]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/ast.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
CommitLineData
1c79356b 1/*
5ba3f43e 2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
1c79356b 56
1c79356b
A
57#include <kern/ast.h>
58#include <kern/counters.h>
1c79356b
A
59#include <kern/misc_protos.h>
60#include <kern/queue.h>
1c79356b
A
61#include <kern/sched_prim.h>
62#include <kern/thread.h>
1c79356b
A
63#include <kern/processor.h>
64#include <kern/spl.h>
fe8ab488 65#include <kern/sfi.h>
39236c6e
A
66#if CONFIG_TELEMETRY
67#include <kern/telemetry.h>
68#endif
3e170ce0 69#include <kern/waitq.h>
316670eb 70#include <kern/ledger.h>
5ba3f43e 71#include <kern/machine.h>
39037602 72#include <kperf/kperf_kpc.h>
1c79356b 73#include <mach/policy.h>
316670eb 74#include <security/mac_mach_internal.h> // for MACF AST hook
5ba3f43e 75#include <stdatomic.h>
6d2010ae 76
5ba3f43e
A
77static void __attribute__((noinline, noreturn, disable_tail_calls))
78thread_preempted(__unused void* parameter, __unused wait_result_t result)
79{
80 /*
81 * We've been scheduled again after a userspace preemption,
82 * try again to return to userspace.
83 */
84 thread_exception_return();
85}
b0d623f7 86
5ba3f43e
A
87/*
88 * AST_URGENT was detected while in kernel mode
89 * Called with interrupts disabled, returns the same way
90 * Must return to caller
91 */
1c79356b 92void
5ba3f43e 93ast_taken_kernel(void)
1c79356b 94{
5ba3f43e 95 assert(ml_get_interrupts_enabled() == FALSE);
1c79356b 96
5ba3f43e
A
97 thread_t thread = current_thread();
98
99 /* Idle threads handle preemption themselves */
100 if ((thread->state & TH_IDLE)) {
101 ast_off(AST_PREEMPTION);
102 return;
103 }
104
105 /*
106 * It's possible for this to be called after AST_URGENT
107 * has already been handled, due to races in enable_preemption
108 */
109 if (ast_peek(AST_URGENT) != AST_URGENT)
110 return;
111
112 /*
113 * Don't preempt if the thread is already preparing to block.
114 * TODO: the thread can cheese this with clear_wait()
115 */
116 if (waitq_wait_possible(thread) == FALSE) {
117 /* Consume AST_URGENT or the interrupt will call us again */
118 ast_consume(AST_URGENT);
119 return;
120 }
121
122 /* TODO: Should we csw_check again to notice if conditions have changed? */
123
124 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
125
126 assert(urgent_reason & AST_PREEMPT);
127
128 counter(c_ast_taken_block++);
129
130 thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);
131
132 assert(ml_get_interrupts_enabled() == FALSE);
133}
316670eb 134
55e303ae 135/*
5ba3f43e
A
136 * An AST flag was set while returning to user mode
137 * Called with interrupts disabled, returns with interrupts enabled
138 * May call continuation instead of returning
55e303ae 139 */
1c79356b 140void
5ba3f43e 141ast_taken_user(void)
1c79356b 142{
5ba3f43e
A
143 assert(ml_get_interrupts_enabled() == FALSE);
144
145 thread_t thread = current_thread();
146
147 /* We are about to return to userspace, there must not be a pending wait */
148 assert(waitq_wait_possible(thread));
149 assert((thread->state & TH_IDLE) == 0);
150
151 /* TODO: Add more 'return to userspace' assertions here */
91447636 152
91447636 153 /*
5ba3f43e
A
154 * If this thread was urgently preempted in userspace,
155 * take the preemption before processing the ASTs.
156 * The trap handler will call us again if we have more ASTs, so it's
157 * safe to block in a continuation here.
91447636 158 */
5ba3f43e
A
159 if (ast_peek(AST_URGENT) == AST_URGENT) {
160 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
161
162 assert(urgent_reason & AST_PREEMPT);
163
164 /* TODO: Should we csw_check again to notice if conditions have changed? */
165
166 thread_block_reason(thread_preempted, NULL, urgent_reason);
167 /* NOTREACHED */
91447636 168 }
1c79356b 169
5ba3f43e
A
170 /*
171 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
172 * on a different processor. Only the ast bit on the thread will be set.
173 *
174 * Force a propagate for concurrent updates without an IPI.
175 */
176 ast_propagate(thread);
1c79356b
A
177
178 /*
5ba3f43e
A
179 * Consume all non-preemption processor ASTs matching reasons
180 * because we're handling them here.
181 *
182 * If one of the AST handlers blocks in a continuation,
183 * we'll reinstate the unserviced thread-level AST flags
184 * from the thread to the processor on context switch.
185 * If one of the AST handlers sets another AST,
186 * the trap handler will call ast_taken_user again.
187 *
188 * We expect the AST handlers not to thread_exception_return
189 * without an ast_propagate or context switch to reinstate
190 * the per-processor ASTs.
191 *
192 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
1c79356b 193 */
5ba3f43e 194 ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);
0b4e3aa0 195
5ba3f43e 196 ml_set_interrupts_enabled(TRUE);
0b4e3aa0 197
39037602 198#if CONFIG_DTRACE
5ba3f43e
A
199 if (reasons & AST_DTRACE) {
200 dtrace_ast();
201 }
39037602
A
202#endif
203
5ba3f43e
A
204#ifdef MACH_BSD
205 if (reasons & AST_BSD) {
206 thread_ast_clear(thread, AST_BSD);
207 bsd_ast(thread);
208 }
1c79356b 209#endif
5ba3f43e 210
316670eb 211#if CONFIG_MACF
5ba3f43e
A
212 if (reasons & AST_MACF) {
213 thread_ast_clear(thread, AST_MACF);
214 mac_thread_userret(thread);
215 }
316670eb 216#endif
5ba3f43e
A
217
218 if (reasons & AST_APC) {
219 thread_ast_clear(thread, AST_APC);
220 thread_apc_ast(thread);
221 }
222
223 if (reasons & AST_GUARD) {
224 thread_ast_clear(thread, AST_GUARD);
225 guard_ast(thread);
226 }
227
228 if (reasons & AST_LEDGER) {
229 thread_ast_clear(thread, AST_LEDGER);
230 ledger_ast(thread);
231 }
232
233 if (reasons & AST_KPERF) {
234 thread_ast_clear(thread, AST_KPERF);
235 kperf_kpc_thread_ast(thread);
236 }
237
238 if (reasons & AST_KEVENT) {
239 thread_ast_clear(thread, AST_KEVENT);
240 uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
241 if (bits) kevent_ast(thread, bits);
242 }
316670eb 243
39236c6e 244#if CONFIG_TELEMETRY
5ba3f43e
A
245 if (reasons & AST_TELEMETRY_ALL) {
246 ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
247 thread_ast_clear(thread, AST_TELEMETRY_ALL);
248 telemetry_ast(thread, telemetry_reasons);
249 }
39236c6e
A
250#endif
251
5ba3f43e 252 spl_t s = splsched();
55e303ae 253
3e170ce0 254#if CONFIG_SCHED_SFI
5ba3f43e
A
255 /*
256 * SFI is currently a per-processor AST, not a per-thread AST
257 * TODO: SFI should be a per-thread AST
258 */
259 if (ast_consume(AST_SFI) == AST_SFI) {
260 sfi_ast(thread);
261 }
3e170ce0 262#endif
fe8ab488 263
5ba3f43e
A
264 /* We are about to return to userspace, there must not be a pending wait */
265 assert(waitq_wait_possible(thread));
266
267 /*
268 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
269 *
270 * We delay reading the preemption bits until now in case the thread
271 * blocks while handling per-thread ASTs.
272 *
273 * If one of the AST handlers had managed to set a new AST bit,
274 * thread_exception_return will call ast_taken again.
275 */
276 ast_t preemption_reasons = ast_consume(AST_PREEMPTION);
277
278 if (preemption_reasons & AST_PREEMPT) {
279 /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
280
281 thread_lock(thread);
282 preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM));
283 thread_unlock(thread);
55e303ae 284
5ba3f43e
A
285#if CONFIG_SCHED_SFI
286 /* csw_check might tell us that SFI is needed */
287 if (preemption_reasons & AST_SFI) {
288 sfi_ast(thread);
289 }
290#endif
3e170ce0 291
5ba3f43e
A
292 if (preemption_reasons & AST_PREEMPT) {
293 counter(c_ast_taken_block++);
294 /* switching to a continuation implicitly re-enables interrupts */
295 thread_block_reason(thread_preempted, NULL, preemption_reasons);
296 /* NOTREACHED */
55e303ae 297 }
1c79356b 298 }
0b4e3aa0 299
5ba3f43e 300 splx(s);
1c79356b
A
301}
302
9bccf70c 303/*
5ba3f43e
A
304 * Handle preemption IPI or IPI in response to setting an AST flag
305 * Triggered by cause_ast_check
306 * Called at splsched
9bccf70c 307 */
1c79356b 308void
5ba3f43e 309ast_check(processor_t processor)
1c79356b 310{
5ba3f43e
A
311 if (processor->state != PROCESSOR_RUNNING &&
312 processor->state != PROCESSOR_SHUTDOWN)
313 return;
314
3e170ce0 315 thread_t thread = processor->active_thread;
cf7d32b8 316
5ba3f43e 317 assert(thread == current_thread());
1c79356b 318
5ba3f43e 319 thread_lock(thread);
6d2010ae 320
5ba3f43e
A
321 /*
322 * Propagate thread ast to processor.
323 * (handles IPI in response to setting AST flag)
324 */
325 ast_propagate(thread);
326
327 boolean_t needs_callout = false;
328 processor->current_pri = thread->sched_pri;
329 processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
330 processor->current_recommended_pset_type = recommended_pset_type(thread);
331 perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread);
332 if (thread_class != processor->current_perfctl_class) {
333 /* We updated the perfctl class of this thread from another core.
334 * Since we dont do CLPC callouts from another core, do a callout
335 * here to let CLPC know that the currently running thread has a new
336 * class.
337 */
338 needs_callout = true;
339 }
340 processor->current_perfctl_class = thread_class;
1c79356b 341
5ba3f43e 342 ast_t preempt;
fe8ab488 343
5ba3f43e
A
344 if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
345 ast_on(preempt);
fe8ab488 346
5ba3f43e 347 thread_unlock(thread);
3e170ce0 348
5ba3f43e
A
349 if (needs_callout) {
350 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
351 mach_approximate_time(), 0, thread);
1c79356b 352 }
1c79356b 353}
3e170ce0
A
354
355/*
356 * Set AST flags on current processor
357 * Called at splsched
358 */
359void
360ast_on(ast_t reasons)
361{
362 ast_t *pending_ast = ast_pending();
363
364 *pending_ast |= reasons;
365}
366
367/*
368 * Clear AST flags on current processor
369 * Called at splsched
370 */
371void
372ast_off(ast_t reasons)
373{
374 ast_t *pending_ast = ast_pending();
375
376 *pending_ast &= ~reasons;
377}
378
5ba3f43e
A
379/*
380 * Consume the requested subset of the AST flags set on the processor
381 * Return the bits that were set
382 * Called at splsched
383 */
384ast_t
385ast_consume(ast_t reasons)
386{
387 ast_t *pending_ast = ast_pending();
388
389 reasons &= *pending_ast;
390 *pending_ast &= ~reasons;
391
392 return reasons;
393}
394
395/*
396 * Read the requested subset of the AST flags set on the processor
397 * Return the bits that were set, don't modify the processor
398 * Called at splsched
399 */
400ast_t
401ast_peek(ast_t reasons)
402{
403 ast_t *pending_ast = ast_pending();
404
405 reasons &= *pending_ast;
406
407 return reasons;
408}
409
3e170ce0
A
410/*
411 * Re-set current processor's per-thread AST flags to those set on thread
412 * Called at splsched
413 */
414void
415ast_context(thread_t thread)
416{
417 ast_t *pending_ast = ast_pending();
418
419 *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
420}
421
5ba3f43e
A
422/*
423 * Propagate ASTs set on a thread to the current processor
424 * Called at splsched
425 */
426void
427ast_propagate(thread_t thread)
428{
429 ast_on(thread->ast);
430}
431
39037602
A
432void
433ast_dtrace_on(void)
434{
435 ast_on(AST_DTRACE);
436}
3e170ce0 437
5ba3f43e 438