]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2017 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | #include <kern/ast.h> | |
58 | #include <kern/counter.h> | |
59 | #include <kern/cpu_quiesce.h> | |
60 | #include <kern/misc_protos.h> | |
61 | #include <kern/queue.h> | |
62 | #include <kern/sched_prim.h> | |
63 | #include <kern/thread.h> | |
64 | #include <kern/processor.h> | |
65 | #include <kern/restartable.h> | |
66 | #include <kern/spl.h> | |
67 | #include <kern/sfi.h> | |
68 | #if CONFIG_TELEMETRY | |
69 | #include <kern/telemetry.h> | |
70 | #endif | |
71 | #include <kern/waitq.h> | |
72 | #include <kern/ledger.h> | |
73 | #include <kern/machine.h> | |
74 | #include <kperf/kperf_kpc.h> | |
75 | #include <mach/policy.h> | |
76 | #include <security/mac_mach_internal.h> // for MACF AST hook | |
77 | #include <stdatomic.h> | |
78 | ||
79 | #if CONFIG_ARCADE | |
80 | #include <kern/arcade.h> | |
81 | #endif | |
82 | ||
83 | static void __attribute__((noinline, noreturn, disable_tail_calls)) | |
84 | thread_preempted(__unused void* parameter, __unused wait_result_t result) | |
85 | { | |
86 | /* | |
87 | * We've been scheduled again after a userspace preemption, | |
88 | * try again to return to userspace. | |
89 | */ | |
90 | thread_exception_return(); | |
91 | } | |
92 | ||
93 | /* | |
94 | * AST_URGENT was detected while in kernel mode | |
95 | * Called with interrupts disabled, returns the same way | |
96 | * Must return to caller | |
97 | */ | |
98 | void | |
99 | ast_taken_kernel(void) | |
100 | { | |
101 | assert(ml_get_interrupts_enabled() == FALSE); | |
102 | ||
103 | thread_t thread = current_thread(); | |
104 | ||
105 | /* Idle threads handle preemption themselves */ | |
106 | if ((thread->state & TH_IDLE)) { | |
107 | ast_off(AST_PREEMPTION); | |
108 | return; | |
109 | } | |
110 | ||
111 | /* | |
112 | * It's possible for this to be called after AST_URGENT | |
113 | * has already been handled, due to races in enable_preemption | |
114 | */ | |
115 | if (ast_peek(AST_URGENT) != AST_URGENT) { | |
116 | return; | |
117 | } | |
118 | ||
119 | /* | |
120 | * Don't preempt if the thread is already preparing to block. | |
121 | * TODO: the thread can cheese this with clear_wait() | |
122 | */ | |
123 | if (waitq_wait_possible(thread) == FALSE) { | |
124 | /* Consume AST_URGENT or the interrupt will call us again */ | |
125 | ast_consume(AST_URGENT); | |
126 | return; | |
127 | } | |
128 | ||
129 | /* TODO: Should we csw_check again to notice if conditions have changed? */ | |
130 | ||
131 | ast_t urgent_reason = ast_consume(AST_PREEMPTION); | |
132 | ||
133 | assert(urgent_reason & AST_PREEMPT); | |
134 | ||
135 | thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason); | |
136 | ||
137 | assert(ml_get_interrupts_enabled() == FALSE); | |
138 | } | |
139 | ||
140 | /* | |
141 | * An AST flag was set while returning to user mode | |
142 | * Called with interrupts disabled, returns with interrupts enabled | |
143 | * May call continuation instead of returning | |
144 | */ | |
145 | void | |
146 | ast_taken_user(void) | |
147 | { | |
148 | assert(ml_get_interrupts_enabled() == FALSE); | |
149 | ||
150 | thread_t thread = current_thread(); | |
151 | ||
152 | /* We are about to return to userspace, there must not be a pending wait */ | |
153 | assert(waitq_wait_possible(thread)); | |
154 | assert((thread->state & TH_IDLE) == 0); | |
155 | ||
156 | /* TODO: Add more 'return to userspace' assertions here */ | |
157 | ||
158 | /* | |
159 | * If this thread was urgently preempted in userspace, | |
160 | * take the preemption before processing the ASTs. | |
161 | * The trap handler will call us again if we have more ASTs, so it's | |
162 | * safe to block in a continuation here. | |
163 | */ | |
164 | if (ast_peek(AST_URGENT) == AST_URGENT) { | |
165 | ast_t urgent_reason = ast_consume(AST_PREEMPTION); | |
166 | ||
167 | assert(urgent_reason & AST_PREEMPT); | |
168 | ||
169 | /* TODO: Should we csw_check again to notice if conditions have changed? */ | |
170 | ||
171 | thread_block_reason(thread_preempted, NULL, urgent_reason); | |
172 | /* NOTREACHED */ | |
173 | } | |
174 | ||
175 | /* | |
176 | * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel | |
177 | * on a different processor. Only the ast bit on the thread will be set. | |
178 | * | |
179 | * Force a propagate for concurrent updates without an IPI. | |
180 | */ | |
181 | ast_propagate(thread); | |
182 | ||
183 | /* | |
184 | * Consume all non-preemption processor ASTs matching reasons | |
185 | * because we're handling them here. | |
186 | * | |
187 | * If one of the AST handlers blocks in a continuation, | |
188 | * we'll reinstate the unserviced thread-level AST flags | |
189 | * from the thread to the processor on context switch. | |
190 | * If one of the AST handlers sets another AST, | |
191 | * the trap handler will call ast_taken_user again. | |
192 | * | |
193 | * We expect the AST handlers not to thread_exception_return | |
194 | * without an ast_propagate or context switch to reinstate | |
195 | * the per-processor ASTs. | |
196 | * | |
197 | * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs? | |
198 | */ | |
199 | ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE); | |
200 | ||
201 | ml_set_interrupts_enabled(TRUE); | |
202 | ||
203 | #if CONFIG_DTRACE | |
204 | if (reasons & AST_DTRACE) { | |
205 | dtrace_ast(); | |
206 | } | |
207 | #endif | |
208 | ||
209 | #ifdef MACH_BSD | |
210 | if (reasons & AST_BSD) { | |
211 | thread_ast_clear(thread, AST_BSD); | |
212 | bsd_ast(thread); | |
213 | } | |
214 | #endif | |
215 | ||
216 | #if CONFIG_MACF | |
217 | if (reasons & AST_MACF) { | |
218 | thread_ast_clear(thread, AST_MACF); | |
219 | mac_thread_userret(thread); | |
220 | } | |
221 | #endif | |
222 | ||
223 | #if CONFIG_ARCADE | |
224 | if (reasons & AST_ARCADE) { | |
225 | thread_ast_clear(thread, AST_ARCADE); | |
226 | arcade_ast(thread); | |
227 | } | |
228 | #endif | |
229 | ||
230 | if (reasons & AST_APC) { | |
231 | thread_ast_clear(thread, AST_APC); | |
232 | thread_apc_ast(thread); | |
233 | } | |
234 | ||
235 | if (reasons & AST_GUARD) { | |
236 | thread_ast_clear(thread, AST_GUARD); | |
237 | guard_ast(thread); | |
238 | } | |
239 | ||
240 | if (reasons & AST_LEDGER) { | |
241 | thread_ast_clear(thread, AST_LEDGER); | |
242 | ledger_ast(thread); | |
243 | } | |
244 | ||
245 | if (reasons & AST_KPERF) { | |
246 | thread_ast_clear(thread, AST_KPERF); | |
247 | kperf_kpc_thread_ast(thread); | |
248 | } | |
249 | ||
250 | if (reasons & AST_RESET_PCS) { | |
251 | thread_ast_clear(thread, AST_RESET_PCS); | |
252 | thread_reset_pcs_ast(thread); | |
253 | } | |
254 | ||
255 | if (reasons & AST_KEVENT) { | |
256 | thread_ast_clear(thread, AST_KEVENT); | |
257 | uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0); | |
258 | if (bits) { | |
259 | kevent_ast(thread, bits); | |
260 | } | |
261 | } | |
262 | ||
263 | #if CONFIG_TELEMETRY | |
264 | if (reasons & AST_TELEMETRY_ALL) { | |
265 | ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL; | |
266 | thread_ast_clear(thread, AST_TELEMETRY_ALL); | |
267 | telemetry_ast(thread, telemetry_reasons); | |
268 | } | |
269 | #endif | |
270 | ||
271 | spl_t s = splsched(); | |
272 | ||
273 | #if CONFIG_SCHED_SFI | |
274 | /* | |
275 | * SFI is currently a per-processor AST, not a per-thread AST | |
276 | * TODO: SFI should be a per-thread AST | |
277 | */ | |
278 | if (ast_consume(AST_SFI) == AST_SFI) { | |
279 | sfi_ast(thread); | |
280 | } | |
281 | #endif | |
282 | ||
283 | /* We are about to return to userspace, there must not be a pending wait */ | |
284 | assert(waitq_wait_possible(thread)); | |
285 | ||
286 | /* | |
287 | * We've handled all per-thread ASTs, time to handle non-urgent preemption. | |
288 | * | |
289 | * We delay reading the preemption bits until now in case the thread | |
290 | * blocks while handling per-thread ASTs. | |
291 | * | |
292 | * If one of the AST handlers had managed to set a new AST bit, | |
293 | * thread_exception_return will call ast_taken again. | |
294 | */ | |
295 | ast_t preemption_reasons = ast_consume(AST_PREEMPTION); | |
296 | ||
297 | if (preemption_reasons & AST_PREEMPT) { | |
298 | /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */ | |
299 | ||
300 | thread_lock(thread); | |
301 | preemption_reasons = csw_check(thread, current_processor(), (preemption_reasons & AST_QUANTUM)); | |
302 | thread_unlock(thread); | |
303 | ||
304 | #if CONFIG_SCHED_SFI | |
305 | /* csw_check might tell us that SFI is needed */ | |
306 | if (preemption_reasons & AST_SFI) { | |
307 | sfi_ast(thread); | |
308 | } | |
309 | #endif | |
310 | ||
311 | if (preemption_reasons & AST_PREEMPT) { | |
312 | /* switching to a continuation implicitly re-enables interrupts */ | |
313 | thread_block_reason(thread_preempted, NULL, preemption_reasons); | |
314 | /* NOTREACHED */ | |
315 | } | |
316 | } | |
317 | ||
318 | if (ast_consume(AST_UNQUIESCE) == AST_UNQUIESCE) { | |
319 | cpu_quiescent_counter_ast(); | |
320 | } | |
321 | ||
322 | cpu_quiescent_counter_assert_ast(); | |
323 | ||
324 | splx(s); | |
325 | ||
326 | /* | |
327 | * Here's a good place to put assertions of things which must be true | |
328 | * upon return to userspace. | |
329 | */ | |
330 | assert((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) == 0); | |
331 | assert((thread->sched_flags & TH_SFLAG_RW_PROMOTED) == 0); | |
332 | assert((thread->sched_flags & TH_SFLAG_EXEC_PROMOTED) == 0); | |
333 | assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0); | |
334 | assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0); | |
335 | ||
336 | assert(thread->kern_promotion_schedpri == 0); | |
337 | assert(thread->waiting_for_mutex == NULL); | |
338 | assert(thread->rwlock_count == 0); | |
339 | } | |
340 | ||
341 | /* | |
342 | * Set AST flags on current processor | |
343 | * Called at splsched | |
344 | */ | |
345 | void | |
346 | ast_on(ast_t reasons) | |
347 | { | |
348 | ast_t *pending_ast = ast_pending(); | |
349 | ||
350 | *pending_ast |= reasons; | |
351 | } | |
352 | ||
353 | /* | |
354 | * Clear AST flags on current processor | |
355 | * Called at splsched | |
356 | */ | |
357 | void | |
358 | ast_off(ast_t reasons) | |
359 | { | |
360 | ast_t *pending_ast = ast_pending(); | |
361 | ||
362 | *pending_ast &= ~reasons; | |
363 | } | |
364 | ||
365 | /* | |
366 | * Consume the requested subset of the AST flags set on the processor | |
367 | * Return the bits that were set | |
368 | * Called at splsched | |
369 | */ | |
370 | ast_t | |
371 | ast_consume(ast_t reasons) | |
372 | { | |
373 | ast_t *pending_ast = ast_pending(); | |
374 | ||
375 | reasons &= *pending_ast; | |
376 | *pending_ast &= ~reasons; | |
377 | ||
378 | return reasons; | |
379 | } | |
380 | ||
381 | /* | |
382 | * Read the requested subset of the AST flags set on the processor | |
383 | * Return the bits that were set, don't modify the processor | |
384 | * Called at splsched | |
385 | */ | |
386 | ast_t | |
387 | ast_peek(ast_t reasons) | |
388 | { | |
389 | ast_t *pending_ast = ast_pending(); | |
390 | ||
391 | reasons &= *pending_ast; | |
392 | ||
393 | return reasons; | |
394 | } | |
395 | ||
396 | /* | |
397 | * Re-set current processor's per-thread AST flags to those set on thread | |
398 | * Called at splsched | |
399 | */ | |
400 | void | |
401 | ast_context(thread_t thread) | |
402 | { | |
403 | ast_t *pending_ast = ast_pending(); | |
404 | ||
405 | *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast); | |
406 | } | |
407 | ||
408 | /* | |
409 | * Propagate ASTs set on a thread to the current processor | |
410 | * Called at splsched | |
411 | */ | |
412 | void | |
413 | ast_propagate(thread_t thread) | |
414 | { | |
415 | ast_on(thread->ast); | |
416 | } | |
417 | ||
418 | void | |
419 | ast_dtrace_on(void) | |
420 | { | |
421 | ast_on(AST_DTRACE); | |
422 | } |