]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/ast.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
CommitLineData
1c79356b 1/*
5ba3f43e 2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
1c79356b 56
1c79356b 57#include <kern/ast.h>
c3c9b80d 58#include <kern/counter.h>
d9a64523 59#include <kern/cpu_quiesce.h>
1c79356b
A
60#include <kern/misc_protos.h>
61#include <kern/queue.h>
1c79356b
A
62#include <kern/sched_prim.h>
63#include <kern/thread.h>
1c79356b 64#include <kern/processor.h>
cb323159 65#include <kern/restartable.h>
1c79356b 66#include <kern/spl.h>
fe8ab488 67#include <kern/sfi.h>
39236c6e
A
68#if CONFIG_TELEMETRY
69#include <kern/telemetry.h>
70#endif
3e170ce0 71#include <kern/waitq.h>
316670eb 72#include <kern/ledger.h>
5ba3f43e 73#include <kern/machine.h>
39037602 74#include <kperf/kperf_kpc.h>
1c79356b 75#include <mach/policy.h>
316670eb 76#include <security/mac_mach_internal.h> // for MACF AST hook
5ba3f43e 77#include <stdatomic.h>
6d2010ae 78
cb323159
A
79#if CONFIG_ARCADE
80#include <kern/arcade.h>
81#endif
82
5ba3f43e
A
83static void __attribute__((noinline, noreturn, disable_tail_calls))
84thread_preempted(__unused void* parameter, __unused wait_result_t result)
85{
86 /*
87 * We've been scheduled again after a userspace preemption,
88 * try again to return to userspace.
89 */
90 thread_exception_return();
91}
b0d623f7 92
5ba3f43e
A
93/*
94 * AST_URGENT was detected while in kernel mode
95 * Called with interrupts disabled, returns the same way
96 * Must return to caller
97 */
1c79356b 98void
5ba3f43e 99ast_taken_kernel(void)
1c79356b 100{
5ba3f43e 101 assert(ml_get_interrupts_enabled() == FALSE);
1c79356b 102
5ba3f43e
A
103 thread_t thread = current_thread();
104
105 /* Idle threads handle preemption themselves */
106 if ((thread->state & TH_IDLE)) {
107 ast_off(AST_PREEMPTION);
108 return;
109 }
110
111 /*
112 * It's possible for this to be called after AST_URGENT
113 * has already been handled, due to races in enable_preemption
114 */
0a7de745 115 if (ast_peek(AST_URGENT) != AST_URGENT) {
5ba3f43e 116 return;
0a7de745 117 }
5ba3f43e
A
118
119 /*
120 * Don't preempt if the thread is already preparing to block.
121 * TODO: the thread can cheese this with clear_wait()
122 */
123 if (waitq_wait_possible(thread) == FALSE) {
124 /* Consume AST_URGENT or the interrupt will call us again */
125 ast_consume(AST_URGENT);
126 return;
127 }
128
129 /* TODO: Should we csw_check again to notice if conditions have changed? */
130
131 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
132
133 assert(urgent_reason & AST_PREEMPT);
134
5ba3f43e
A
135 thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);
136
137 assert(ml_get_interrupts_enabled() == FALSE);
138}
316670eb 139
55e303ae 140/*
5ba3f43e
A
141 * An AST flag was set while returning to user mode
142 * Called with interrupts disabled, returns with interrupts enabled
143 * May call continuation instead of returning
55e303ae 144 */
1c79356b 145void
5ba3f43e 146ast_taken_user(void)
1c79356b 147{
5ba3f43e
A
148 assert(ml_get_interrupts_enabled() == FALSE);
149
150 thread_t thread = current_thread();
151
152 /* We are about to return to userspace, there must not be a pending wait */
153 assert(waitq_wait_possible(thread));
154 assert((thread->state & TH_IDLE) == 0);
155
156 /* TODO: Add more 'return to userspace' assertions here */
91447636 157
91447636 158 /*
5ba3f43e
A
159 * If this thread was urgently preempted in userspace,
160 * take the preemption before processing the ASTs.
161 * The trap handler will call us again if we have more ASTs, so it's
162 * safe to block in a continuation here.
91447636 163 */
5ba3f43e
A
164 if (ast_peek(AST_URGENT) == AST_URGENT) {
165 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
166
167 assert(urgent_reason & AST_PREEMPT);
168
169 /* TODO: Should we csw_check again to notice if conditions have changed? */
170
171 thread_block_reason(thread_preempted, NULL, urgent_reason);
172 /* NOTREACHED */
91447636 173 }
1c79356b 174
5ba3f43e
A
175 /*
176 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
177 * on a different processor. Only the ast bit on the thread will be set.
178 *
179 * Force a propagate for concurrent updates without an IPI.
180 */
181 ast_propagate(thread);
1c79356b
A
182
183 /*
5ba3f43e
A
184 * Consume all non-preemption processor ASTs matching reasons
185 * because we're handling them here.
186 *
187 * If one of the AST handlers blocks in a continuation,
188 * we'll reinstate the unserviced thread-level AST flags
189 * from the thread to the processor on context switch.
190 * If one of the AST handlers sets another AST,
191 * the trap handler will call ast_taken_user again.
192 *
193 * We expect the AST handlers not to thread_exception_return
194 * without an ast_propagate or context switch to reinstate
195 * the per-processor ASTs.
196 *
197 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
1c79356b 198 */
5ba3f43e 199 ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);
0b4e3aa0 200
5ba3f43e 201 ml_set_interrupts_enabled(TRUE);
0b4e3aa0 202
39037602 203#if CONFIG_DTRACE
5ba3f43e
A
204 if (reasons & AST_DTRACE) {
205 dtrace_ast();
206 }
39037602
A
207#endif
208
5ba3f43e
A
209#ifdef MACH_BSD
210 if (reasons & AST_BSD) {
211 thread_ast_clear(thread, AST_BSD);
212 bsd_ast(thread);
213 }
1c79356b 214#endif
5ba3f43e 215
316670eb 216#if CONFIG_MACF
5ba3f43e
A
217 if (reasons & AST_MACF) {
218 thread_ast_clear(thread, AST_MACF);
219 mac_thread_userret(thread);
220 }
316670eb 221#endif
5ba3f43e 222
cb323159
A
223#if CONFIG_ARCADE
224 if (reasons & AST_ARCADE) {
225 thread_ast_clear(thread, AST_ARCADE);
226 arcade_ast(thread);
227 }
228#endif
229
5ba3f43e
A
230 if (reasons & AST_APC) {
231 thread_ast_clear(thread, AST_APC);
232 thread_apc_ast(thread);
233 }
234
235 if (reasons & AST_GUARD) {
236 thread_ast_clear(thread, AST_GUARD);
237 guard_ast(thread);
238 }
239
240 if (reasons & AST_LEDGER) {
241 thread_ast_clear(thread, AST_LEDGER);
242 ledger_ast(thread);
243 }
244
245 if (reasons & AST_KPERF) {
246 thread_ast_clear(thread, AST_KPERF);
247 kperf_kpc_thread_ast(thread);
248 }
249
cb323159
A
250 if (reasons & AST_RESET_PCS) {
251 thread_ast_clear(thread, AST_RESET_PCS);
252 thread_reset_pcs_ast(thread);
253 }
254
5ba3f43e
A
255 if (reasons & AST_KEVENT) {
256 thread_ast_clear(thread, AST_KEVENT);
257 uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
0a7de745
A
258 if (bits) {
259 kevent_ast(thread, bits);
260 }
5ba3f43e 261 }
316670eb 262
39236c6e 263#if CONFIG_TELEMETRY
5ba3f43e
A
264 if (reasons & AST_TELEMETRY_ALL) {
265 ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
266 thread_ast_clear(thread, AST_TELEMETRY_ALL);
267 telemetry_ast(thread, telemetry_reasons);
268 }
39236c6e
A
269#endif
270
5ba3f43e 271 spl_t s = splsched();
55e303ae 272
3e170ce0 273#if CONFIG_SCHED_SFI
5ba3f43e
A
274 /*
275 * SFI is currently a per-processor AST, not a per-thread AST
276 * TODO: SFI should be a per-thread AST
277 */
278 if (ast_consume(AST_SFI) == AST_SFI) {
279 sfi_ast(thread);
280 }
3e170ce0 281#endif
fe8ab488 282
5ba3f43e
A
283 /* We are about to return to userspace, there must not be a pending wait */
284 assert(waitq_wait_possible(thread));
285
286 /*
287 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
288 *
289 * We delay reading the preemption bits until now in case the thread
290 * blocks while handling per-thread ASTs.
291 *
292 * If one of the AST handlers had managed to set a new AST bit,
293 * thread_exception_return will call ast_taken again.
294 */
295 ast_t preemption_reasons = ast_consume(AST_PREEMPTION);
296
297 if (preemption_reasons & AST_PREEMPT) {
298 /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
299
300 thread_lock(thread);
0a7de745 301 preemption_reasons = csw_check(thread, current_processor(), (preemption_reasons & AST_QUANTUM));
5ba3f43e 302 thread_unlock(thread);
55e303ae 303
5ba3f43e
A
304#if CONFIG_SCHED_SFI
305 /* csw_check might tell us that SFI is needed */
306 if (preemption_reasons & AST_SFI) {
307 sfi_ast(thread);
308 }
309#endif
3e170ce0 310
5ba3f43e 311 if (preemption_reasons & AST_PREEMPT) {
5ba3f43e
A
312 /* switching to a continuation implicitly re-enables interrupts */
313 thread_block_reason(thread_preempted, NULL, preemption_reasons);
314 /* NOTREACHED */
55e303ae 315 }
1c79356b 316 }
0b4e3aa0 317
d9a64523
A
318 if (ast_consume(AST_UNQUIESCE) == AST_UNQUIESCE) {
319 cpu_quiescent_counter_ast();
320 }
321
322 cpu_quiescent_counter_assert_ast();
323
5ba3f43e 324 splx(s);
d9a64523
A
325
326 /*
327 * Here's a good place to put assertions of things which must be true
328 * upon return to userspace.
329 */
330 assert((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) == 0);
331 assert((thread->sched_flags & TH_SFLAG_RW_PROMOTED) == 0);
332 assert((thread->sched_flags & TH_SFLAG_EXEC_PROMOTED) == 0);
333 assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0);
334 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
335
cb323159 336 assert(thread->kern_promotion_schedpri == 0);
d9a64523
A
337 assert(thread->waiting_for_mutex == NULL);
338 assert(thread->rwlock_count == 0);
1c79356b
A
339}
340
3e170ce0
A
341/*
342 * Set AST flags on current processor
343 * Called at splsched
344 */
345void
346ast_on(ast_t reasons)
347{
348 ast_t *pending_ast = ast_pending();
349
350 *pending_ast |= reasons;
351}
352
353/*
354 * Clear AST flags on current processor
355 * Called at splsched
356 */
357void
358ast_off(ast_t reasons)
359{
360 ast_t *pending_ast = ast_pending();
361
362 *pending_ast &= ~reasons;
363}
364
5ba3f43e
A
365/*
366 * Consume the requested subset of the AST flags set on the processor
367 * Return the bits that were set
368 * Called at splsched
369 */
370ast_t
371ast_consume(ast_t reasons)
372{
373 ast_t *pending_ast = ast_pending();
374
375 reasons &= *pending_ast;
376 *pending_ast &= ~reasons;
377
378 return reasons;
379}
380
381/*
382 * Read the requested subset of the AST flags set on the processor
383 * Return the bits that were set, don't modify the processor
384 * Called at splsched
385 */
386ast_t
387ast_peek(ast_t reasons)
388{
389 ast_t *pending_ast = ast_pending();
390
391 reasons &= *pending_ast;
392
393 return reasons;
394}
395
3e170ce0
A
396/*
397 * Re-set current processor's per-thread AST flags to those set on thread
398 * Called at splsched
399 */
400void
401ast_context(thread_t thread)
402{
403 ast_t *pending_ast = ast_pending();
404
405 *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
406}
407
5ba3f43e
A
408/*
409 * Propagate ASTs set on a thread to the current processor
410 * Called at splsched
411 */
412void
413ast_propagate(thread_t thread)
414{
415 ast_on(thread->ast);
416}
417
39037602
A
418void
419ast_dtrace_on(void)
420{
421 ast_on(AST_DTRACE);
422}