]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | ||
59 | /* | |
60 | * kern/ast.h: Definitions for Asynchronous System Traps. | |
61 | */ | |
62 | ||
63 | #ifndef _KERN_AST_H_ | |
64 | #define _KERN_AST_H_ | |
65 | ||
66 | ||
67 | #include <kern/assert.h> | |
68 | #include <kern/macro_help.h> | |
69 | #include <kern/spl.h> | |
70 | ||
71 | /* | |
72 | * A processor detects an AST when it is about to return from an | |
73 | * interrupt context, and calls ast_taken_kernel or ast_taken_user | |
74 | * depending on whether it was returning from userspace or kernelspace. | |
75 | * | |
76 | * Machine-dependent code is responsible for maintaining | |
77 | * a set of reasons for an AST. | |
78 | */ | |
79 | typedef uint32_t ast_t; | |
80 | ||
81 | /* | |
82 | * When returning from interrupt/trap context to kernel mode, | |
83 | * if AST_URGENT is set, then ast_taken_kernel is called, for | |
84 | * instance to effect preemption of a kernel thread by a realtime | |
85 | * thread. | |
86 | * | |
87 | * This is also done when re-enabling preemption or re-enabling | |
88 | * interrupts, since an AST may have been set while preemption | |
89 | * was disabled, and it should take effect as soon as possible. | |
90 | * | |
91 | * When returning from interrupt/trap/syscall context to user | |
92 | * mode, any and all ASTs that are pending should be handled by | |
93 | * calling ast_taken_user. | |
94 | * | |
95 | * If a thread context switches, only ASTs not in AST_PER_THREAD | |
96 | * remain active. The per-thread ASTs are stored in the thread_t | |
97 | * and re-enabled when the thread context switches back. | |
98 | * | |
99 | * Typically the preemption ASTs are set as a result of threads | |
100 | * becoming runnable, threads changing priority, or quantum | |
101 | * expiration. If a thread becomes runnable and is chosen | |
102 | * to run on another processor, cause_ast_check() may be called | |
103 | * to IPI that processor and request csw_check() be run there. | |
104 | */ | |
105 | ||
106 | /* | |
107 | * Bits for reasons | |
108 | * TODO: Split the context switch and return-to-user AST namespaces | |
109 | * NOTE: Some of these are exported as the 'reason' code in scheduler tracepoints | |
110 | */ | |
111 | #define AST_PREEMPT 0x01 | |
112 | #define AST_QUANTUM 0x02 | |
113 | #define AST_URGENT 0x04 | |
114 | #define AST_HANDOFF 0x08 | |
115 | #define AST_YIELD 0x10 | |
116 | #define AST_APC 0x20 /* migration APC hook */ | |
117 | #define AST_LEDGER 0x40 | |
118 | #define AST_BSD 0x80 | |
119 | #define AST_KPERF 0x100 /* kernel profiling */ | |
120 | #define AST_MACF 0x200 /* MACF user ret pending */ | |
121 | #define AST_RESET_PCS 0x400 /* restartable ranges */ | |
122 | #define AST_ARCADE 0x800 /* arcade subsciption support */ | |
123 | #define AST_GUARD 0x1000 | |
124 | #define AST_TELEMETRY_USER 0x2000 /* telemetry sample requested on interrupt from userspace */ | |
125 | #define AST_TELEMETRY_KERNEL 0x4000 /* telemetry sample requested on interrupt from kernel */ | |
126 | #define AST_TELEMETRY_PMI 0x8000 /* telemetry sample requested on PMI */ | |
127 | #define AST_SFI 0x10000 /* Evaluate if SFI wait is needed before return to userspace */ | |
128 | #define AST_DTRACE 0x20000 | |
129 | #define AST_TELEMETRY_IO 0x40000 /* telemetry sample requested for I/O */ | |
130 | #define AST_KEVENT 0x80000 | |
131 | #define AST_REBALANCE 0x100000 /* thread context switched due to rebalancing */ | |
132 | #define AST_UNQUIESCE 0x200000 /* catch unquiesced processor before returning to userspace */ | |
133 | ||
134 | #define AST_NONE 0x00 | |
135 | #define AST_ALL (~AST_NONE) | |
136 | ||
137 | #define AST_SCHEDULING (AST_PREEMPTION | AST_YIELD | AST_HANDOFF) | |
138 | #define AST_PREEMPTION (AST_PREEMPT | AST_QUANTUM | AST_URGENT) | |
139 | ||
140 | #define AST_TELEMETRY_ALL (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL | \ | |
141 | AST_TELEMETRY_PMI | AST_TELEMETRY_IO) | |
142 | ||
143 | /* Per-thread ASTs follow the thread at context-switch time. */ | |
144 | #define AST_PER_THREAD (AST_APC | AST_BSD | AST_MACF | AST_RESET_PCS | \ | |
145 | AST_ARCADE | AST_LEDGER | AST_GUARD | AST_TELEMETRY_ALL | AST_KEVENT) | |
146 | ||
147 | /* Handle AST_URGENT detected while in the kernel */ | |
148 | extern void ast_taken_kernel(void); | |
149 | ||
150 | /* Handle an AST flag set while returning to user mode (may continue via thread_exception_return) */ | |
151 | extern void ast_taken_user(void); | |
152 | ||
153 | /* Check for pending ASTs */ | |
154 | extern void ast_check(processor_t processor); | |
155 | ||
156 | /* Pending ast mask for the current processor */ | |
157 | extern ast_t *ast_pending(void); | |
158 | ||
159 | /* Set AST flags on current processor */ | |
160 | extern void ast_on(ast_t reasons); | |
161 | ||
162 | /* Clear AST flags on current processor */ | |
163 | extern void ast_off(ast_t reasons); | |
164 | ||
165 | /* Consume specified AST flags from current processor */ | |
166 | extern ast_t ast_consume(ast_t reasons); | |
167 | ||
168 | /* Read specified AST flags from current processor */ | |
169 | extern ast_t ast_peek(ast_t reasons); | |
170 | ||
171 | /* Re-set current processor's per-thread AST flags to those set on thread */ | |
172 | extern void ast_context(thread_t thread); | |
173 | ||
174 | /* Propagate ASTs set on a thread to the current processor */ | |
175 | extern void ast_propagate(thread_t thread); | |
176 | ||
177 | /* | |
178 | * Set an AST on a thread with thread_ast_set. | |
179 | * | |
180 | * You can then propagate it to the current processor with ast_propagate(), | |
181 | * or tell another processor to act on it with cause_ast_check(). | |
182 | * | |
183 | * See act_set_ast() for an example. | |
184 | */ | |
185 | #define thread_ast_set(act, reason) ((void)os_atomic_or(&(act)->ast, (reason), relaxed)) | |
186 | #define thread_ast_clear(act, reason) ((void)os_atomic_andnot(&(act)->ast, (reason), relaxed)) | |
187 | ||
188 | #ifdef MACH_BSD | |
189 | ||
190 | extern void act_set_astbsd(thread_t); | |
191 | extern void bsd_ast(thread_t); | |
192 | ||
193 | #endif /* MACH_BSD */ | |
194 | ||
195 | #ifdef CONFIG_DTRACE | |
196 | extern void ast_dtrace_on(void); | |
197 | extern void dtrace_ast(void); | |
198 | #endif /* CONFIG_DTRACE */ | |
199 | ||
200 | extern void kevent_ast(thread_t thread, uint16_t bits); | |
201 | extern void act_set_astkevent(thread_t thread, uint16_t bits); | |
202 | extern uint16_t act_clear_astkevent(thread_t thread, uint16_t bits); | |
203 | extern void act_set_ast_reset_pcs(thread_t thread); | |
204 | ||
205 | #endif /* _KERN_AST_H_ */ |