2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <kern/counters.h>
59 #include <kern/cpu_quiesce.h>
60 #include <kern/misc_protos.h>
61 #include <kern/queue.h>
62 #include <kern/sched_prim.h>
63 #include <kern/thread.h>
64 #include <kern/processor.h>
68 #include <kern/telemetry.h>
70 #include <kern/waitq.h>
71 #include <kern/ledger.h>
72 #include <kern/machine.h>
73 #include <kperf/kperf_kpc.h>
74 #include <mach/policy.h>
75 #include <security/mac_mach_internal.h> // for MACF AST hook
76 #include <stdatomic.h>
78 static void __attribute__((noinline
, noreturn
, disable_tail_calls
))
79 thread_preempted(__unused
void* parameter
, __unused wait_result_t result
)
82 * We've been scheduled again after a userspace preemption,
83 * try again to return to userspace.
85 thread_exception_return();
89 * AST_URGENT was detected while in kernel mode
90 * Called with interrupts disabled, returns the same way
91 * Must return to caller
94 ast_taken_kernel(void)
96 assert(ml_get_interrupts_enabled() == FALSE
);
98 thread_t thread
= current_thread();
100 /* Idle threads handle preemption themselves */
101 if ((thread
->state
& TH_IDLE
)) {
102 ast_off(AST_PREEMPTION
);
107 * It's possible for this to be called after AST_URGENT
108 * has already been handled, due to races in enable_preemption
110 if (ast_peek(AST_URGENT
) != AST_URGENT
) {
115 * Don't preempt if the thread is already preparing to block.
116 * TODO: the thread can cheese this with clear_wait()
118 if (waitq_wait_possible(thread
) == FALSE
) {
119 /* Consume AST_URGENT or the interrupt will call us again */
120 ast_consume(AST_URGENT
);
124 /* TODO: Should we csw_check again to notice if conditions have changed? */
126 ast_t urgent_reason
= ast_consume(AST_PREEMPTION
);
128 assert(urgent_reason
& AST_PREEMPT
);
130 counter(c_ast_taken_block
++);
132 thread_block_reason(THREAD_CONTINUE_NULL
, NULL
, urgent_reason
);
134 assert(ml_get_interrupts_enabled() == FALSE
);
138 * An AST flag was set while returning to user mode
139 * Called with interrupts disabled, returns with interrupts enabled
140 * May call continuation instead of returning
145 assert(ml_get_interrupts_enabled() == FALSE
);
147 thread_t thread
= current_thread();
149 /* We are about to return to userspace, there must not be a pending wait */
150 assert(waitq_wait_possible(thread
));
151 assert((thread
->state
& TH_IDLE
) == 0);
153 /* TODO: Add more 'return to userspace' assertions here */
156 * If this thread was urgently preempted in userspace,
157 * take the preemption before processing the ASTs.
158 * The trap handler will call us again if we have more ASTs, so it's
159 * safe to block in a continuation here.
161 if (ast_peek(AST_URGENT
) == AST_URGENT
) {
162 ast_t urgent_reason
= ast_consume(AST_PREEMPTION
);
164 assert(urgent_reason
& AST_PREEMPT
);
166 /* TODO: Should we csw_check again to notice if conditions have changed? */
168 thread_block_reason(thread_preempted
, NULL
, urgent_reason
);
173 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
174 * on a different processor. Only the ast bit on the thread will be set.
176 * Force a propagate for concurrent updates without an IPI.
178 ast_propagate(thread
);
181 * Consume all non-preemption processor ASTs matching reasons
182 * because we're handling them here.
184 * If one of the AST handlers blocks in a continuation,
185 * we'll reinstate the unserviced thread-level AST flags
186 * from the thread to the processor on context switch.
187 * If one of the AST handlers sets another AST,
188 * the trap handler will call ast_taken_user again.
190 * We expect the AST handlers not to thread_exception_return
191 * without an ast_propagate or context switch to reinstate
192 * the per-processor ASTs.
194 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
196 ast_t reasons
= ast_consume(AST_PER_THREAD
| AST_KPERF
| AST_DTRACE
);
198 ml_set_interrupts_enabled(TRUE
);
201 if (reasons
& AST_DTRACE
) {
207 if (reasons
& AST_BSD
) {
208 thread_ast_clear(thread
, AST_BSD
);
214 if (reasons
& AST_MACF
) {
215 thread_ast_clear(thread
, AST_MACF
);
216 mac_thread_userret(thread
);
220 if (reasons
& AST_APC
) {
221 thread_ast_clear(thread
, AST_APC
);
222 thread_apc_ast(thread
);
225 if (reasons
& AST_GUARD
) {
226 thread_ast_clear(thread
, AST_GUARD
);
230 if (reasons
& AST_LEDGER
) {
231 thread_ast_clear(thread
, AST_LEDGER
);
235 if (reasons
& AST_KPERF
) {
236 thread_ast_clear(thread
, AST_KPERF
);
237 kperf_kpc_thread_ast(thread
);
240 if (reasons
& AST_KEVENT
) {
241 thread_ast_clear(thread
, AST_KEVENT
);
242 uint16_t bits
= atomic_exchange(&thread
->kevent_ast_bits
, 0);
244 kevent_ast(thread
, bits
);
249 if (reasons
& AST_TELEMETRY_ALL
) {
250 ast_t telemetry_reasons
= reasons
& AST_TELEMETRY_ALL
;
251 thread_ast_clear(thread
, AST_TELEMETRY_ALL
);
252 telemetry_ast(thread
, telemetry_reasons
);
256 spl_t s
= splsched();
260 * SFI is currently a per-processor AST, not a per-thread AST
261 * TODO: SFI should be a per-thread AST
263 if (ast_consume(AST_SFI
) == AST_SFI
) {
268 /* We are about to return to userspace, there must not be a pending wait */
269 assert(waitq_wait_possible(thread
));
272 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
274 * We delay reading the preemption bits until now in case the thread
275 * blocks while handling per-thread ASTs.
277 * If one of the AST handlers had managed to set a new AST bit,
278 * thread_exception_return will call ast_taken again.
280 ast_t preemption_reasons
= ast_consume(AST_PREEMPTION
);
282 if (preemption_reasons
& AST_PREEMPT
) {
283 /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
286 preemption_reasons
= csw_check(thread
, current_processor(), (preemption_reasons
& AST_QUANTUM
));
287 thread_unlock(thread
);
290 /* csw_check might tell us that SFI is needed */
291 if (preemption_reasons
& AST_SFI
) {
296 if (preemption_reasons
& AST_PREEMPT
) {
297 counter(c_ast_taken_block
++);
298 /* switching to a continuation implicitly re-enables interrupts */
299 thread_block_reason(thread_preempted
, NULL
, preemption_reasons
);
304 if (ast_consume(AST_UNQUIESCE
) == AST_UNQUIESCE
) {
305 cpu_quiescent_counter_ast();
308 cpu_quiescent_counter_assert_ast();
313 * Here's a good place to put assertions of things which must be true
314 * upon return to userspace.
316 assert((thread
->sched_flags
& TH_SFLAG_WAITQ_PROMOTED
) == 0);
317 assert((thread
->sched_flags
& TH_SFLAG_RW_PROMOTED
) == 0);
318 assert((thread
->sched_flags
& TH_SFLAG_EXEC_PROMOTED
) == 0);
319 assert((thread
->sched_flags
& TH_SFLAG_PROMOTED
) == 0);
320 assert((thread
->sched_flags
& TH_SFLAG_DEPRESS
) == 0);
322 assert(thread
->promotions
== 0);
323 assert(thread
->was_promoted_on_wakeup
== 0);
324 assert(thread
->waiting_for_mutex
== NULL
);
325 assert(thread
->rwlock_count
== 0);
329 * Set AST flags on current processor
333 ast_on(ast_t reasons
)
335 ast_t
*pending_ast
= ast_pending();
337 *pending_ast
|= reasons
;
341 * Clear AST flags on current processor
345 ast_off(ast_t reasons
)
347 ast_t
*pending_ast
= ast_pending();
349 *pending_ast
&= ~reasons
;
353 * Consume the requested subset of the AST flags set on the processor
354 * Return the bits that were set
358 ast_consume(ast_t reasons
)
360 ast_t
*pending_ast
= ast_pending();
362 reasons
&= *pending_ast
;
363 *pending_ast
&= ~reasons
;
369 * Read the requested subset of the AST flags set on the processor
370 * Return the bits that were set, don't modify the processor
374 ast_peek(ast_t reasons
)
376 ast_t
*pending_ast
= ast_pending();
378 reasons
&= *pending_ast
;
384 * Re-set current processor's per-thread AST flags to those set on thread
388 ast_context(thread_t thread
)
390 ast_t
*pending_ast
= ast_pending();
392 *pending_ast
= ((*pending_ast
& ~AST_PER_THREAD
) | thread
->ast
);
396 * Propagate ASTs set on a thread to the current processor
400 ast_propagate(thread_t thread
)