2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
61 * This file contains routines to check whether an ast is needed.
63 * ast_check() - check whether ast is needed for interrupt or context
64 * switch. Usually called by clock interrupt handler.
69 #include <kern/counters.h>
70 #include <kern/cpu_number.h>
71 #include <kern/misc_protos.h>
72 #include <kern/queue.h>
73 #include <kern/sched_prim.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
79 #include <kern/telemetry.h>
81 #include <kern/waitq.h>
82 #include <kern/ledger.h>
83 #include <kperf/kperf_kpc.h>
84 #include <mach/policy.h>
85 #include <machine/trap.h> // for CHUD AST hook
86 #include <machine/pal_routines.h>
87 #include <security/mac_mach_internal.h> // for MACF AST hook
89 volatile perfASTCallback perfASTHook
;
98 extern void dtrace_ast(void);
102 * Called at splsched.
110 boolean_t preempt_trap
= (reasons
== AST_PREEMPTION
);
111 ast_t
*myast
= ast_pending();
112 thread_t thread
= current_thread();
113 perfASTCallback perf_hook
= perfASTHook
;
116 * CHUD hook - all threads including idle processor threads
119 if (*myast
& AST_CHUD_ALL
) {
120 (*perf_hook
)(reasons
, myast
);
122 if (*myast
== AST_NONE
)
127 *myast
&= ~AST_CHUD_ALL
;
133 * Handle ASTs for all threads
134 * except idle processor threads.
136 if (!(thread
->state
& TH_IDLE
)) {
138 * Check for urgent preemption.
140 if ( (reasons
& AST_URGENT
) &&
141 waitq_wait_possible(thread
) ) {
142 if (reasons
& AST_PREEMPT
) {
143 counter(c_ast_taken_block
++);
144 thread_block_reason(THREAD_CONTINUE_NULL
, NULL
,
145 reasons
& AST_PREEMPTION
);
148 reasons
&= ~AST_PREEMPTION
;
152 * The kernel preempt traps
153 * skip all other ASTs.
156 ml_set_interrupts_enabled(enable
);
159 if (reasons
& AST_DTRACE
) {
168 if (reasons
& AST_BSD
) {
169 thread_ast_clear(thread
, AST_BSD
);
177 if (reasons
& AST_MACF
) {
178 thread_ast_clear(thread
, AST_MACF
);
179 mac_thread_userret(thread
);
185 if (reasons
& AST_APC
) {
186 thread_ast_clear(thread
, AST_APC
);
187 thread_apc_ast(thread
);
190 if (reasons
& AST_GUARD
) {
191 thread_ast_clear(thread
, AST_GUARD
);
195 if (reasons
& AST_LEDGER
) {
196 thread_ast_clear(thread
, AST_LEDGER
);
201 * Kernel Profiling Hook
203 if (reasons
& AST_KPERF
) {
204 thread_ast_clear(thread
, AST_KPERF
);
205 kperf_kpc_thread_ast(thread
);
209 if (reasons
& AST_TELEMETRY_ALL
) {
210 boolean_t interrupted_userspace
= FALSE
;
211 boolean_t io_telemetry
= FALSE
;
213 assert((reasons
& AST_TELEMETRY_ALL
) != AST_TELEMETRY_ALL
); /* only one is valid at a time */
214 interrupted_userspace
= (reasons
& AST_TELEMETRY_USER
) ? TRUE
: FALSE
;
215 io_telemetry
= ((reasons
& AST_TELEMETRY_IO
) ? TRUE
: FALSE
);
216 thread_ast_clear(thread
, AST_TELEMETRY_ALL
);
217 telemetry_ast(thread
, interrupted_userspace
, io_telemetry
);
221 ml_set_interrupts_enabled(FALSE
);
224 if (reasons
& AST_SFI
) {
230 * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
233 if (reasons
& AST_PREEMPT
)
234 reasons
= csw_check(current_processor(), reasons
& AST_QUANTUM
);
235 thread_unlock(thread
);
237 assert(waitq_wait_possible(thread
));
239 if (reasons
& AST_PREEMPT
) {
240 counter(c_ast_taken_block
++);
241 thread_block_reason((thread_continue_t
)thread_exception_return
, NULL
, reasons
& AST_PREEMPTION
);
246 ml_set_interrupts_enabled(enable
);
250 * Called at splsched.
254 processor_t processor
)
256 thread_t thread
= processor
->active_thread
;
258 if (processor
->state
== PROCESSOR_RUNNING
||
259 processor
->state
== PROCESSOR_SHUTDOWN
) {
263 * Propagate thread ast to processor.
265 pal_ast_check(thread
);
267 ast_propagate(thread
->ast
);
270 * Context switch check.
274 processor
->current_pri
= thread
->sched_pri
;
275 processor
->current_thmode
= thread
->sched_mode
;
276 processor
->current_sfi_class
= thread
->sfi_class
= sfi_thread_classify(thread
);
278 if ((preempt
= csw_check(processor
, AST_NONE
)) != AST_NONE
)
281 thread_unlock(thread
);
286 * Set AST flags on current processor
290 ast_on(ast_t reasons
)
292 ast_t
*pending_ast
= ast_pending();
294 *pending_ast
|= reasons
;
298 * Clear AST flags on current processor
302 ast_off(ast_t reasons
)
304 ast_t
*pending_ast
= ast_pending();
306 *pending_ast
&= ~reasons
;
310 * Re-set current processor's per-thread AST flags to those set on thread
314 ast_context(thread_t thread
)
316 ast_t
*pending_ast
= ast_pending();
318 *pending_ast
= ((*pending_ast
& ~AST_PER_THREAD
) | thread
->ast
);