]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ast.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <kern/ast.h>
58 #include <kern/counters.h>
59 #include <kern/cpu_quiesce.h>
60 #include <kern/misc_protos.h>
61 #include <kern/queue.h>
62 #include <kern/sched_prim.h>
63 #include <kern/thread.h>
64 #include <kern/processor.h>
65 #include <kern/restartable.h>
66 #include <kern/spl.h>
67 #include <kern/sfi.h>
68 #if CONFIG_TELEMETRY
69 #include <kern/telemetry.h>
70 #endif
71 #include <kern/waitq.h>
72 #include <kern/ledger.h>
73 #include <kern/machine.h>
74 #include <kperf/kperf_kpc.h>
75 #include <mach/policy.h>
76 #include <security/mac_mach_internal.h> // for MACF AST hook
77 #include <stdatomic.h>
78
79 #if CONFIG_ARCADE
80 #include <kern/arcade.h>
81 #endif
82
83 static void __attribute__((noinline, noreturn, disable_tail_calls))
84 thread_preempted(__unused void* parameter, __unused wait_result_t result)
85 {
86 /*
87 * We've been scheduled again after a userspace preemption,
88 * try again to return to userspace.
89 */
90 thread_exception_return();
91 }
92
93 /*
94 * AST_URGENT was detected while in kernel mode
95 * Called with interrupts disabled, returns the same way
96 * Must return to caller
97 */
98 void
99 ast_taken_kernel(void)
100 {
101 assert(ml_get_interrupts_enabled() == FALSE);
102
103 thread_t thread = current_thread();
104
105 /* Idle threads handle preemption themselves */
106 if ((thread->state & TH_IDLE)) {
107 ast_off(AST_PREEMPTION);
108 return;
109 }
110
111 /*
112 * It's possible for this to be called after AST_URGENT
113 * has already been handled, due to races in enable_preemption
114 */
115 if (ast_peek(AST_URGENT) != AST_URGENT) {
116 return;
117 }
118
119 /*
120 * Don't preempt if the thread is already preparing to block.
121 * TODO: the thread can cheese this with clear_wait()
122 */
123 if (waitq_wait_possible(thread) == FALSE) {
124 /* Consume AST_URGENT or the interrupt will call us again */
125 ast_consume(AST_URGENT);
126 return;
127 }
128
129 /* TODO: Should we csw_check again to notice if conditions have changed? */
130
131 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
132
133 assert(urgent_reason & AST_PREEMPT);
134
135 counter(c_ast_taken_block++);
136
137 thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);
138
139 assert(ml_get_interrupts_enabled() == FALSE);
140 }
141
142 /*
143 * An AST flag was set while returning to user mode
144 * Called with interrupts disabled, returns with interrupts enabled
145 * May call continuation instead of returning
146 */
147 void
148 ast_taken_user(void)
149 {
150 assert(ml_get_interrupts_enabled() == FALSE);
151
152 thread_t thread = current_thread();
153
154 /* We are about to return to userspace, there must not be a pending wait */
155 assert(waitq_wait_possible(thread));
156 assert((thread->state & TH_IDLE) == 0);
157
158 /* TODO: Add more 'return to userspace' assertions here */
159
160 /*
161 * If this thread was urgently preempted in userspace,
162 * take the preemption before processing the ASTs.
163 * The trap handler will call us again if we have more ASTs, so it's
164 * safe to block in a continuation here.
165 */
166 if (ast_peek(AST_URGENT) == AST_URGENT) {
167 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
168
169 assert(urgent_reason & AST_PREEMPT);
170
171 /* TODO: Should we csw_check again to notice if conditions have changed? */
172
173 thread_block_reason(thread_preempted, NULL, urgent_reason);
174 /* NOTREACHED */
175 }
176
177 /*
178 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
179 * on a different processor. Only the ast bit on the thread will be set.
180 *
181 * Force a propagate for concurrent updates without an IPI.
182 */
183 ast_propagate(thread);
184
185 /*
186 * Consume all non-preemption processor ASTs matching reasons
187 * because we're handling them here.
188 *
189 * If one of the AST handlers blocks in a continuation,
190 * we'll reinstate the unserviced thread-level AST flags
191 * from the thread to the processor on context switch.
192 * If one of the AST handlers sets another AST,
193 * the trap handler will call ast_taken_user again.
194 *
195 * We expect the AST handlers not to thread_exception_return
196 * without an ast_propagate or context switch to reinstate
197 * the per-processor ASTs.
198 *
199 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
200 */
201 ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);
202
203 ml_set_interrupts_enabled(TRUE);
204
205 #if CONFIG_DTRACE
206 if (reasons & AST_DTRACE) {
207 dtrace_ast();
208 }
209 #endif
210
211 #ifdef MACH_BSD
212 if (reasons & AST_BSD) {
213 thread_ast_clear(thread, AST_BSD);
214 bsd_ast(thread);
215 }
216 #endif
217
218 #if CONFIG_MACF
219 if (reasons & AST_MACF) {
220 thread_ast_clear(thread, AST_MACF);
221 mac_thread_userret(thread);
222 }
223 #endif
224
225 #if CONFIG_ARCADE
226 if (reasons & AST_ARCADE) {
227 thread_ast_clear(thread, AST_ARCADE);
228 arcade_ast(thread);
229 }
230 #endif
231
232 if (reasons & AST_APC) {
233 thread_ast_clear(thread, AST_APC);
234 thread_apc_ast(thread);
235 }
236
237 if (reasons & AST_GUARD) {
238 thread_ast_clear(thread, AST_GUARD);
239 guard_ast(thread);
240 }
241
242 if (reasons & AST_LEDGER) {
243 thread_ast_clear(thread, AST_LEDGER);
244 ledger_ast(thread);
245 }
246
247 if (reasons & AST_KPERF) {
248 thread_ast_clear(thread, AST_KPERF);
249 kperf_kpc_thread_ast(thread);
250 }
251
252 if (reasons & AST_RESET_PCS) {
253 thread_ast_clear(thread, AST_RESET_PCS);
254 thread_reset_pcs_ast(thread);
255 }
256
257 if (reasons & AST_KEVENT) {
258 thread_ast_clear(thread, AST_KEVENT);
259 uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
260 if (bits) {
261 kevent_ast(thread, bits);
262 }
263 }
264
265 #if CONFIG_TELEMETRY
266 if (reasons & AST_TELEMETRY_ALL) {
267 ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
268 thread_ast_clear(thread, AST_TELEMETRY_ALL);
269 telemetry_ast(thread, telemetry_reasons);
270 }
271 #endif
272
273 spl_t s = splsched();
274
275 #if CONFIG_SCHED_SFI
276 /*
277 * SFI is currently a per-processor AST, not a per-thread AST
278 * TODO: SFI should be a per-thread AST
279 */
280 if (ast_consume(AST_SFI) == AST_SFI) {
281 sfi_ast(thread);
282 }
283 #endif
284
285 /* We are about to return to userspace, there must not be a pending wait */
286 assert(waitq_wait_possible(thread));
287
288 /*
289 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
290 *
291 * We delay reading the preemption bits until now in case the thread
292 * blocks while handling per-thread ASTs.
293 *
294 * If one of the AST handlers had managed to set a new AST bit,
295 * thread_exception_return will call ast_taken again.
296 */
297 ast_t preemption_reasons = ast_consume(AST_PREEMPTION);
298
299 if (preemption_reasons & AST_PREEMPT) {
300 /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
301
302 thread_lock(thread);
303 preemption_reasons = csw_check(thread, current_processor(), (preemption_reasons & AST_QUANTUM));
304 thread_unlock(thread);
305
306 #if CONFIG_SCHED_SFI
307 /* csw_check might tell us that SFI is needed */
308 if (preemption_reasons & AST_SFI) {
309 sfi_ast(thread);
310 }
311 #endif
312
313 if (preemption_reasons & AST_PREEMPT) {
314 counter(c_ast_taken_block++);
315 /* switching to a continuation implicitly re-enables interrupts */
316 thread_block_reason(thread_preempted, NULL, preemption_reasons);
317 /* NOTREACHED */
318 }
319 }
320
321 if (ast_consume(AST_UNQUIESCE) == AST_UNQUIESCE) {
322 cpu_quiescent_counter_ast();
323 }
324
325 cpu_quiescent_counter_assert_ast();
326
327 splx(s);
328
329 /*
330 * Here's a good place to put assertions of things which must be true
331 * upon return to userspace.
332 */
333 assert((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) == 0);
334 assert((thread->sched_flags & TH_SFLAG_RW_PROMOTED) == 0);
335 assert((thread->sched_flags & TH_SFLAG_EXEC_PROMOTED) == 0);
336 assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0);
337 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
338
339 assert(thread->kern_promotion_schedpri == 0);
340 assert(thread->waiting_for_mutex == NULL);
341 assert(thread->rwlock_count == 0);
342 }
343
344 /*
345 * Set AST flags on current processor
346 * Called at splsched
347 */
348 void
349 ast_on(ast_t reasons)
350 {
351 ast_t *pending_ast = ast_pending();
352
353 *pending_ast |= reasons;
354 }
355
356 /*
357 * Clear AST flags on current processor
358 * Called at splsched
359 */
360 void
361 ast_off(ast_t reasons)
362 {
363 ast_t *pending_ast = ast_pending();
364
365 *pending_ast &= ~reasons;
366 }
367
368 /*
369 * Consume the requested subset of the AST flags set on the processor
370 * Return the bits that were set
371 * Called at splsched
372 */
373 ast_t
374 ast_consume(ast_t reasons)
375 {
376 ast_t *pending_ast = ast_pending();
377
378 reasons &= *pending_ast;
379 *pending_ast &= ~reasons;
380
381 return reasons;
382 }
383
384 /*
385 * Read the requested subset of the AST flags set on the processor
386 * Return the bits that were set, don't modify the processor
387 * Called at splsched
388 */
389 ast_t
390 ast_peek(ast_t reasons)
391 {
392 ast_t *pending_ast = ast_pending();
393
394 reasons &= *pending_ast;
395
396 return reasons;
397 }
398
399 /*
400 * Re-set current processor's per-thread AST flags to those set on thread
401 * Called at splsched
402 */
403 void
404 ast_context(thread_t thread)
405 {
406 ast_t *pending_ast = ast_pending();
407
408 *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
409 }
410
411 /*
412 * Propagate ASTs set on a thread to the current processor
413 * Called at splsched
414 */
415 void
416 ast_propagate(thread_t thread)
417 {
418 ast_on(thread->ast);
419 }
420
421 void
422 ast_dtrace_on(void)
423 {
424 ast_on(AST_DTRACE);
425 }