]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ast.c
21fb3f55414250b814a43c5359cfaf3cbdc88906
[apple/xnu.git] / osfmk / kern / ast.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <kern/ast.h>
58 #include <kern/counters.h>
59 #include <kern/cpu_quiesce.h>
60 #include <kern/misc_protos.h>
61 #include <kern/queue.h>
62 #include <kern/sched_prim.h>
63 #include <kern/thread.h>
64 #include <kern/processor.h>
65 #include <kern/spl.h>
66 #include <kern/sfi.h>
67 #if CONFIG_TELEMETRY
68 #include <kern/telemetry.h>
69 #endif
70 #include <kern/waitq.h>
71 #include <kern/ledger.h>
72 #include <kern/machine.h>
73 #include <kperf/kperf_kpc.h>
74 #include <mach/policy.h>
75 #include <security/mac_mach_internal.h> // for MACF AST hook
76 #include <stdatomic.h>
77
78 static void __attribute__((noinline, noreturn, disable_tail_calls))
79 thread_preempted(__unused void* parameter, __unused wait_result_t result)
80 {
81 /*
82 * We've been scheduled again after a userspace preemption,
83 * try again to return to userspace.
84 */
85 thread_exception_return();
86 }
87
88 /*
89 * AST_URGENT was detected while in kernel mode
90 * Called with interrupts disabled, returns the same way
91 * Must return to caller
92 */
93 void
94 ast_taken_kernel(void)
95 {
96 assert(ml_get_interrupts_enabled() == FALSE);
97
98 thread_t thread = current_thread();
99
100 /* Idle threads handle preemption themselves */
101 if ((thread->state & TH_IDLE)) {
102 ast_off(AST_PREEMPTION);
103 return;
104 }
105
106 /*
107 * It's possible for this to be called after AST_URGENT
108 * has already been handled, due to races in enable_preemption
109 */
110 if (ast_peek(AST_URGENT) != AST_URGENT) {
111 return;
112 }
113
114 /*
115 * Don't preempt if the thread is already preparing to block.
116 * TODO: the thread can cheese this with clear_wait()
117 */
118 if (waitq_wait_possible(thread) == FALSE) {
119 /* Consume AST_URGENT or the interrupt will call us again */
120 ast_consume(AST_URGENT);
121 return;
122 }
123
124 /* TODO: Should we csw_check again to notice if conditions have changed? */
125
126 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
127
128 assert(urgent_reason & AST_PREEMPT);
129
130 counter(c_ast_taken_block++);
131
132 thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);
133
134 assert(ml_get_interrupts_enabled() == FALSE);
135 }
136
137 /*
138 * An AST flag was set while returning to user mode
139 * Called with interrupts disabled, returns with interrupts enabled
140 * May call continuation instead of returning
141 */
142 void
143 ast_taken_user(void)
144 {
145 assert(ml_get_interrupts_enabled() == FALSE);
146
147 thread_t thread = current_thread();
148
149 /* We are about to return to userspace, there must not be a pending wait */
150 assert(waitq_wait_possible(thread));
151 assert((thread->state & TH_IDLE) == 0);
152
153 /* TODO: Add more 'return to userspace' assertions here */
154
155 /*
156 * If this thread was urgently preempted in userspace,
157 * take the preemption before processing the ASTs.
158 * The trap handler will call us again if we have more ASTs, so it's
159 * safe to block in a continuation here.
160 */
161 if (ast_peek(AST_URGENT) == AST_URGENT) {
162 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
163
164 assert(urgent_reason & AST_PREEMPT);
165
166 /* TODO: Should we csw_check again to notice if conditions have changed? */
167
168 thread_block_reason(thread_preempted, NULL, urgent_reason);
169 /* NOTREACHED */
170 }
171
172 /*
173 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
174 * on a different processor. Only the ast bit on the thread will be set.
175 *
176 * Force a propagate for concurrent updates without an IPI.
177 */
178 ast_propagate(thread);
179
180 /*
181 * Consume all non-preemption processor ASTs matching reasons
182 * because we're handling them here.
183 *
184 * If one of the AST handlers blocks in a continuation,
185 * we'll reinstate the unserviced thread-level AST flags
186 * from the thread to the processor on context switch.
187 * If one of the AST handlers sets another AST,
188 * the trap handler will call ast_taken_user again.
189 *
190 * We expect the AST handlers not to thread_exception_return
191 * without an ast_propagate or context switch to reinstate
192 * the per-processor ASTs.
193 *
194 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
195 */
196 ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);
197
198 ml_set_interrupts_enabled(TRUE);
199
200 #if CONFIG_DTRACE
201 if (reasons & AST_DTRACE) {
202 dtrace_ast();
203 }
204 #endif
205
206 #ifdef MACH_BSD
207 if (reasons & AST_BSD) {
208 thread_ast_clear(thread, AST_BSD);
209 bsd_ast(thread);
210 }
211 #endif
212
213 #if CONFIG_MACF
214 if (reasons & AST_MACF) {
215 thread_ast_clear(thread, AST_MACF);
216 mac_thread_userret(thread);
217 }
218 #endif
219
220 if (reasons & AST_APC) {
221 thread_ast_clear(thread, AST_APC);
222 thread_apc_ast(thread);
223 }
224
225 if (reasons & AST_GUARD) {
226 thread_ast_clear(thread, AST_GUARD);
227 guard_ast(thread);
228 }
229
230 if (reasons & AST_LEDGER) {
231 thread_ast_clear(thread, AST_LEDGER);
232 ledger_ast(thread);
233 }
234
235 if (reasons & AST_KPERF) {
236 thread_ast_clear(thread, AST_KPERF);
237 kperf_kpc_thread_ast(thread);
238 }
239
240 if (reasons & AST_KEVENT) {
241 thread_ast_clear(thread, AST_KEVENT);
242 uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
243 if (bits) {
244 kevent_ast(thread, bits);
245 }
246 }
247
248 #if CONFIG_TELEMETRY
249 if (reasons & AST_TELEMETRY_ALL) {
250 ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
251 thread_ast_clear(thread, AST_TELEMETRY_ALL);
252 telemetry_ast(thread, telemetry_reasons);
253 }
254 #endif
255
256 spl_t s = splsched();
257
258 #if CONFIG_SCHED_SFI
259 /*
260 * SFI is currently a per-processor AST, not a per-thread AST
261 * TODO: SFI should be a per-thread AST
262 */
263 if (ast_consume(AST_SFI) == AST_SFI) {
264 sfi_ast(thread);
265 }
266 #endif
267
268 /* We are about to return to userspace, there must not be a pending wait */
269 assert(waitq_wait_possible(thread));
270
271 /*
272 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
273 *
274 * We delay reading the preemption bits until now in case the thread
275 * blocks while handling per-thread ASTs.
276 *
277 * If one of the AST handlers had managed to set a new AST bit,
278 * thread_exception_return will call ast_taken again.
279 */
280 ast_t preemption_reasons = ast_consume(AST_PREEMPTION);
281
282 if (preemption_reasons & AST_PREEMPT) {
283 /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
284
285 thread_lock(thread);
286 preemption_reasons = csw_check(thread, current_processor(), (preemption_reasons & AST_QUANTUM));
287 thread_unlock(thread);
288
289 #if CONFIG_SCHED_SFI
290 /* csw_check might tell us that SFI is needed */
291 if (preemption_reasons & AST_SFI) {
292 sfi_ast(thread);
293 }
294 #endif
295
296 if (preemption_reasons & AST_PREEMPT) {
297 counter(c_ast_taken_block++);
298 /* switching to a continuation implicitly re-enables interrupts */
299 thread_block_reason(thread_preempted, NULL, preemption_reasons);
300 /* NOTREACHED */
301 }
302 }
303
304 if (ast_consume(AST_UNQUIESCE) == AST_UNQUIESCE) {
305 cpu_quiescent_counter_ast();
306 }
307
308 cpu_quiescent_counter_assert_ast();
309
310 splx(s);
311
312 /*
313 * Here's a good place to put assertions of things which must be true
314 * upon return to userspace.
315 */
316 assert((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) == 0);
317 assert((thread->sched_flags & TH_SFLAG_RW_PROMOTED) == 0);
318 assert((thread->sched_flags & TH_SFLAG_EXEC_PROMOTED) == 0);
319 assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0);
320 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
321
322 assert(thread->promotions == 0);
323 assert(thread->was_promoted_on_wakeup == 0);
324 assert(thread->waiting_for_mutex == NULL);
325 assert(thread->rwlock_count == 0);
326 }
327
328 /*
329 * Set AST flags on current processor
330 * Called at splsched
331 */
332 void
333 ast_on(ast_t reasons)
334 {
335 ast_t *pending_ast = ast_pending();
336
337 *pending_ast |= reasons;
338 }
339
340 /*
341 * Clear AST flags on current processor
342 * Called at splsched
343 */
344 void
345 ast_off(ast_t reasons)
346 {
347 ast_t *pending_ast = ast_pending();
348
349 *pending_ast &= ~reasons;
350 }
351
352 /*
353 * Consume the requested subset of the AST flags set on the processor
354 * Return the bits that were set
355 * Called at splsched
356 */
357 ast_t
358 ast_consume(ast_t reasons)
359 {
360 ast_t *pending_ast = ast_pending();
361
362 reasons &= *pending_ast;
363 *pending_ast &= ~reasons;
364
365 return reasons;
366 }
367
368 /*
369 * Read the requested subset of the AST flags set on the processor
370 * Return the bits that were set, don't modify the processor
371 * Called at splsched
372 */
373 ast_t
374 ast_peek(ast_t reasons)
375 {
376 ast_t *pending_ast = ast_pending();
377
378 reasons &= *pending_ast;
379
380 return reasons;
381 }
382
383 /*
384 * Re-set current processor's per-thread AST flags to those set on thread
385 * Called at splsched
386 */
387 void
388 ast_context(thread_t thread)
389 {
390 ast_t *pending_ast = ast_pending();
391
392 *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
393 }
394
395 /*
396 * Propagate ASTs set on a thread to the current processor
397 * Called at splsched
398 */
399 void
400 ast_propagate(thread_t thread)
401 {
402 ast_on(thread->ast);
403 }
404
405 void
406 ast_dtrace_on(void)
407 {
408 ast_on(AST_DTRACE);
409 }