]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/ast.c
xnu-3247.10.11.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *
61 * This file contains routines to check whether an ast is needed.
62 *
63 * ast_check() - check whether ast is needed for interrupt or context
64 * switch. Usually called by clock interrupt handler.
65 *
66 */
67
68#include <kern/ast.h>
69#include <kern/counters.h>
70#include <kern/cpu_number.h>
71#include <kern/misc_protos.h>
72#include <kern/queue.h>
73#include <kern/sched_prim.h>
74#include <kern/thread.h>
75#include <kern/processor.h>
76#include <kern/spl.h>
77#include <kern/sfi.h>
78#if CONFIG_TELEMETRY
79#include <kern/telemetry.h>
80#endif
81#include <kern/waitq.h>
82#include <kern/ledger.h>
83#include <mach/policy.h>
84#include <machine/trap.h> // for CHUD AST hook
85#include <machine/pal_routines.h>
86#include <security/mac_mach_internal.h> // for MACF AST hook
87
88volatile perfASTCallback perfASTHook;
89
90
91void
92ast_init(void)
93{
94}
95
96extern void chudxnu_thread_ast(thread_t); // XXX this should probably be in a header...
97
98/*
99 * Called at splsched.
100 */
101void
102ast_taken(
103 ast_t reasons,
104 boolean_t enable
105)
106{
107 boolean_t preempt_trap = (reasons == AST_PREEMPTION);
108 ast_t *myast = ast_pending();
109 thread_t thread = current_thread();
110 perfASTCallback perf_hook = perfASTHook;
111
112 /*
113 * CHUD hook - all threads including idle processor threads
114 */
115 if (perf_hook) {
116 if (*myast & AST_CHUD_ALL) {
117 (*perf_hook)(reasons, myast);
118
119 if (*myast == AST_NONE)
120 return;
121 }
122 }
123 else
124 *myast &= ~AST_CHUD_ALL;
125
126 reasons &= *myast;
127 *myast &= ~reasons;
128
129 /*
130 * Handle ASTs for all threads
131 * except idle processor threads.
132 */
133 if (!(thread->state & TH_IDLE)) {
134 /*
135 * Check for urgent preemption.
136 */
137 if ( (reasons & AST_URGENT) &&
138 waitq_wait_possible(thread) ) {
139 if (reasons & AST_PREEMPT) {
140 counter(c_ast_taken_block++);
141 thread_block_reason(THREAD_CONTINUE_NULL, NULL,
142 reasons & AST_PREEMPTION);
143 }
144
145 reasons &= ~AST_PREEMPTION;
146 }
147
148 /*
149 * The kernel preempt traps
150 * skip all other ASTs.
151 */
152 if (!preempt_trap) {
153 ml_set_interrupts_enabled(enable);
154
155#ifdef MACH_BSD
156 /*
157 * Handle BSD hook.
158 */
159 if (reasons & AST_BSD) {
160 thread_ast_clear(thread, AST_BSD);
161 bsd_ast(thread);
162 }
163#endif
164#if CONFIG_MACF
165 /*
166 * Handle MACF hook.
167 */
168 if (reasons & AST_MACF) {
169 thread_ast_clear(thread, AST_MACF);
170 mac_thread_userret(thread);
171 }
172#endif
173 /*
174 * Thread APC hook.
175 */
176 if (reasons & AST_APC) {
177 thread_ast_clear(thread, AST_APC);
178 special_handler(thread);
179 }
180
181 if (reasons & AST_GUARD) {
182 thread_ast_clear(thread, AST_GUARD);
183 guard_ast(thread);
184 }
185
186 if (reasons & AST_LEDGER) {
187 thread_ast_clear(thread, AST_LEDGER);
188 ledger_ast(thread);
189 }
190
191 /*
192 * Kernel Profiling Hook
193 */
194 if (reasons & AST_KPERF) {
195 thread_ast_clear(thread, AST_KPERF);
196 chudxnu_thread_ast(thread);
197 }
198
199#if CONFIG_TELEMETRY
200 if (reasons & AST_TELEMETRY_ALL) {
201 boolean_t interrupted_userspace = FALSE;
202 boolean_t is_windowed = FALSE;
203
204 assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
205 interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
206 is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE);
207 thread_ast_clear(thread, AST_TELEMETRY_ALL);
208 telemetry_ast(thread, interrupted_userspace, is_windowed);
209 }
210#endif
211
212 ml_set_interrupts_enabled(FALSE);
213
214#if CONFIG_SCHED_SFI
215 if (reasons & AST_SFI) {
216 sfi_ast(thread);
217 }
218#endif
219
220 /*
221 * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
222 */
223 thread_lock(thread);
224 if (reasons & AST_PREEMPT)
225 reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
226 thread_unlock(thread);
227
228 assert(waitq_wait_possible(thread));
229
230 if (reasons & AST_PREEMPT) {
231 counter(c_ast_taken_block++);
232 thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
233 }
234 }
235 }
236
237 ml_set_interrupts_enabled(enable);
238}
239
240/*
241 * Called at splsched.
242 */
243void
244ast_check(
245 processor_t processor)
246{
247 thread_t thread = processor->active_thread;
248
249 if (processor->state == PROCESSOR_RUNNING ||
250 processor->state == PROCESSOR_SHUTDOWN) {
251 ast_t preempt;
252
253 /*
254 * Propagate thread ast to processor.
255 */
256 pal_ast_check(thread);
257
258 ast_propagate(thread->ast);
259
260 /*
261 * Context switch check.
262 */
263 thread_lock(thread);
264
265 processor->current_pri = thread->sched_pri;
266 processor->current_thmode = thread->sched_mode;
267 processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
268
269 if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
270 ast_on(preempt);
271
272 thread_unlock(thread);
273 }
274}
275
276/*
277 * Set AST flags on current processor
278 * Called at splsched
279 */
280void
281ast_on(ast_t reasons)
282{
283 ast_t *pending_ast = ast_pending();
284
285 *pending_ast |= reasons;
286}
287
288/*
289 * Clear AST flags on current processor
290 * Called at splsched
291 */
292void
293ast_off(ast_t reasons)
294{
295 ast_t *pending_ast = ast_pending();
296
297 *pending_ast &= ~reasons;
298}
299
300/*
301 * Re-set current processor's per-thread AST flags to those set on thread
302 * Called at splsched
303 */
304void
305ast_context(thread_t thread)
306{
307 ast_t *pending_ast = ast_pending();
308
309 *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
310}
311
312