]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/ast.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *
61 * This file contains routines to check whether an ast is needed.
62 *
63 * ast_check() - check whether ast is needed for interrupt or context
64 * switch. Usually called by clock interrupt handler.
65 *
66 */
67
68#include <kern/ast.h>
69#include <kern/counters.h>
70#include <kern/cpu_number.h>
71#include <kern/misc_protos.h>
72#include <kern/queue.h>
73#include <kern/sched_prim.h>
74#include <kern/thread.h>
75#include <kern/processor.h>
76#include <kern/spl.h>
77#include <kern/sfi.h>
78#if CONFIG_TELEMETRY
79#include <kern/telemetry.h>
80#endif
81#include <kern/waitq.h>
82#include <kern/ledger.h>
83#include <kperf/kperf_kpc.h>
84#include <mach/policy.h>
85#include <machine/trap.h> // for CHUD AST hook
86#include <machine/pal_routines.h>
87#include <security/mac_mach_internal.h> // for MACF AST hook
88
89volatile perfASTCallback perfASTHook;
90
91
92void
93ast_init(void)
94{
95}
96
97#ifdef CONFIG_DTRACE
98extern void dtrace_ast(void);
99#endif
100
101/*
102 * Called at splsched.
103 */
104void
105ast_taken(
106 ast_t reasons,
107 boolean_t enable
108)
109{
110 boolean_t preempt_trap = (reasons == AST_PREEMPTION);
111 ast_t *myast = ast_pending();
112 thread_t thread = current_thread();
113 perfASTCallback perf_hook = perfASTHook;
114
115 /*
116 * CHUD hook - all threads including idle processor threads
117 */
118 if (perf_hook) {
119 if (*myast & AST_CHUD_ALL) {
120 (*perf_hook)(reasons, myast);
121
122 if (*myast == AST_NONE)
123 return;
124 }
125 }
126 else
127 *myast &= ~AST_CHUD_ALL;
128
129 reasons &= *myast;
130 *myast &= ~reasons;
131
132 /*
133 * Handle ASTs for all threads
134 * except idle processor threads.
135 */
136 if (!(thread->state & TH_IDLE)) {
137 /*
138 * Check for urgent preemption.
139 */
140 if ( (reasons & AST_URGENT) &&
141 waitq_wait_possible(thread) ) {
142 if (reasons & AST_PREEMPT) {
143 counter(c_ast_taken_block++);
144 thread_block_reason(THREAD_CONTINUE_NULL, NULL,
145 reasons & AST_PREEMPTION);
146 }
147
148 reasons &= ~AST_PREEMPTION;
149 }
150
151 /*
152 * The kernel preempt traps
153 * skip all other ASTs.
154 */
155 if (!preempt_trap) {
156 ml_set_interrupts_enabled(enable);
157
158#if CONFIG_DTRACE
159 if (reasons & AST_DTRACE) {
160 dtrace_ast();
161 }
162#endif
163
164#ifdef MACH_BSD
165 /*
166 * Handle BSD hook.
167 */
168 if (reasons & AST_BSD) {
169 thread_ast_clear(thread, AST_BSD);
170 bsd_ast(thread);
171 }
172#endif
173#if CONFIG_MACF
174 /*
175 * Handle MACF hook.
176 */
177 if (reasons & AST_MACF) {
178 thread_ast_clear(thread, AST_MACF);
179 mac_thread_userret(thread);
180 }
181#endif
182 /*
183 * Thread APC hook.
184 */
185 if (reasons & AST_APC) {
186 thread_ast_clear(thread, AST_APC);
187 thread_apc_ast(thread);
188 }
189
190 if (reasons & AST_GUARD) {
191 thread_ast_clear(thread, AST_GUARD);
192 guard_ast(thread);
193 }
194
195 if (reasons & AST_LEDGER) {
196 thread_ast_clear(thread, AST_LEDGER);
197 ledger_ast(thread);
198 }
199
200 /*
201 * Kernel Profiling Hook
202 */
203 if (reasons & AST_KPERF) {
204 thread_ast_clear(thread, AST_KPERF);
205 kperf_kpc_thread_ast(thread);
206 }
207
208#if CONFIG_TELEMETRY
209 if (reasons & AST_TELEMETRY_ALL) {
210 boolean_t interrupted_userspace = FALSE;
211 boolean_t io_telemetry = FALSE;
212
213 assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
214 interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
215 io_telemetry = ((reasons & AST_TELEMETRY_IO) ? TRUE : FALSE);
216 thread_ast_clear(thread, AST_TELEMETRY_ALL);
217 telemetry_ast(thread, interrupted_userspace, io_telemetry);
218 }
219#endif
220
221 ml_set_interrupts_enabled(FALSE);
222
223#if CONFIG_SCHED_SFI
224 if (reasons & AST_SFI) {
225 sfi_ast(thread);
226 }
227#endif
228
229 /*
230 * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
231 */
232 thread_lock(thread);
233 if (reasons & AST_PREEMPT)
234 reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
235 thread_unlock(thread);
236
237 assert(waitq_wait_possible(thread));
238
239 if (reasons & AST_PREEMPT) {
240 counter(c_ast_taken_block++);
241 thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
242 }
243 }
244 }
245
246 ml_set_interrupts_enabled(enable);
247}
248
249/*
250 * Called at splsched.
251 */
252void
253ast_check(
254 processor_t processor)
255{
256 thread_t thread = processor->active_thread;
257
258 if (processor->state == PROCESSOR_RUNNING ||
259 processor->state == PROCESSOR_SHUTDOWN) {
260 ast_t preempt;
261
262 /*
263 * Propagate thread ast to processor.
264 */
265 pal_ast_check(thread);
266
267 ast_propagate(thread->ast);
268
269 /*
270 * Context switch check.
271 */
272 thread_lock(thread);
273
274 processor->current_pri = thread->sched_pri;
275 processor->current_thmode = thread->sched_mode;
276 processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
277
278 if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
279 ast_on(preempt);
280
281 thread_unlock(thread);
282 }
283}
284
285/*
286 * Set AST flags on current processor
287 * Called at splsched
288 */
289void
290ast_on(ast_t reasons)
291{
292 ast_t *pending_ast = ast_pending();
293
294 *pending_ast |= reasons;
295}
296
297/*
298 * Clear AST flags on current processor
299 * Called at splsched
300 */
301void
302ast_off(ast_t reasons)
303{
304 ast_t *pending_ast = ast_pending();
305
306 *pending_ast &= ~reasons;
307}
308
309/*
310 * Re-set current processor's per-thread AST flags to those set on thread
311 * Called at splsched
312 */
313void
314ast_context(thread_t thread)
315{
316 ast_t *pending_ast = ast_pending();
317
318 *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
319}
320
321void
322ast_dtrace_on(void)
323{
324 ast_on(AST_DTRACE);
325}
326