]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2017 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | #include <kern/ast.h> | |
58 | #include <kern/counters.h> | |
59 | #include <kern/misc_protos.h> | |
60 | #include <kern/queue.h> | |
61 | #include <kern/sched_prim.h> | |
62 | #include <kern/thread.h> | |
63 | #include <kern/processor.h> | |
64 | #include <kern/spl.h> | |
65 | #include <kern/sfi.h> | |
66 | #if CONFIG_TELEMETRY | |
67 | #include <kern/telemetry.h> | |
68 | #endif | |
69 | #include <kern/waitq.h> | |
70 | #include <kern/ledger.h> | |
71 | #include <kern/machine.h> | |
72 | #include <kperf/kperf_kpc.h> | |
73 | #include <mach/policy.h> | |
74 | #include <security/mac_mach_internal.h> // for MACF AST hook | |
75 | #include <stdatomic.h> | |
76 | ||
77 | static void __attribute__((noinline, noreturn, disable_tail_calls)) | |
78 | thread_preempted(__unused void* parameter, __unused wait_result_t result) | |
79 | { | |
80 | /* | |
81 | * We've been scheduled again after a userspace preemption, | |
82 | * try again to return to userspace. | |
83 | */ | |
84 | thread_exception_return(); | |
85 | } | |
86 | ||
87 | /* | |
88 | * AST_URGENT was detected while in kernel mode | |
89 | * Called with interrupts disabled, returns the same way | |
90 | * Must return to caller | |
91 | */ | |
92 | void | |
93 | ast_taken_kernel(void) | |
94 | { | |
95 | assert(ml_get_interrupts_enabled() == FALSE); | |
96 | ||
97 | thread_t thread = current_thread(); | |
98 | ||
99 | /* Idle threads handle preemption themselves */ | |
100 | if ((thread->state & TH_IDLE)) { | |
101 | ast_off(AST_PREEMPTION); | |
102 | return; | |
103 | } | |
104 | ||
105 | /* | |
106 | * It's possible for this to be called after AST_URGENT | |
107 | * has already been handled, due to races in enable_preemption | |
108 | */ | |
109 | if (ast_peek(AST_URGENT) != AST_URGENT) | |
110 | return; | |
111 | ||
112 | /* | |
113 | * Don't preempt if the thread is already preparing to block. | |
114 | * TODO: the thread can cheese this with clear_wait() | |
115 | */ | |
116 | if (waitq_wait_possible(thread) == FALSE) { | |
117 | /* Consume AST_URGENT or the interrupt will call us again */ | |
118 | ast_consume(AST_URGENT); | |
119 | return; | |
120 | } | |
121 | ||
122 | /* TODO: Should we csw_check again to notice if conditions have changed? */ | |
123 | ||
124 | ast_t urgent_reason = ast_consume(AST_PREEMPTION); | |
125 | ||
126 | assert(urgent_reason & AST_PREEMPT); | |
127 | ||
128 | counter(c_ast_taken_block++); | |
129 | ||
130 | thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason); | |
131 | ||
132 | assert(ml_get_interrupts_enabled() == FALSE); | |
133 | } | |
134 | ||
135 | /* | |
136 | * An AST flag was set while returning to user mode | |
137 | * Called with interrupts disabled, returns with interrupts enabled | |
138 | * May call continuation instead of returning | |
139 | */ | |
140 | void | |
141 | ast_taken_user(void) | |
142 | { | |
143 | assert(ml_get_interrupts_enabled() == FALSE); | |
144 | ||
145 | thread_t thread = current_thread(); | |
146 | ||
147 | /* We are about to return to userspace, there must not be a pending wait */ | |
148 | assert(waitq_wait_possible(thread)); | |
149 | assert((thread->state & TH_IDLE) == 0); | |
150 | ||
151 | /* TODO: Add more 'return to userspace' assertions here */ | |
152 | ||
153 | /* | |
154 | * If this thread was urgently preempted in userspace, | |
155 | * take the preemption before processing the ASTs. | |
156 | * The trap handler will call us again if we have more ASTs, so it's | |
157 | * safe to block in a continuation here. | |
158 | */ | |
159 | if (ast_peek(AST_URGENT) == AST_URGENT) { | |
160 | ast_t urgent_reason = ast_consume(AST_PREEMPTION); | |
161 | ||
162 | assert(urgent_reason & AST_PREEMPT); | |
163 | ||
164 | /* TODO: Should we csw_check again to notice if conditions have changed? */ | |
165 | ||
166 | thread_block_reason(thread_preempted, NULL, urgent_reason); | |
167 | /* NOTREACHED */ | |
168 | } | |
169 | ||
170 | /* | |
171 | * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel | |
172 | * on a different processor. Only the ast bit on the thread will be set. | |
173 | * | |
174 | * Force a propagate for concurrent updates without an IPI. | |
175 | */ | |
176 | ast_propagate(thread); | |
177 | ||
178 | /* | |
179 | * Consume all non-preemption processor ASTs matching reasons | |
180 | * because we're handling them here. | |
181 | * | |
182 | * If one of the AST handlers blocks in a continuation, | |
183 | * we'll reinstate the unserviced thread-level AST flags | |
184 | * from the thread to the processor on context switch. | |
185 | * If one of the AST handlers sets another AST, | |
186 | * the trap handler will call ast_taken_user again. | |
187 | * | |
188 | * We expect the AST handlers not to thread_exception_return | |
189 | * without an ast_propagate or context switch to reinstate | |
190 | * the per-processor ASTs. | |
191 | * | |
192 | * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs? | |
193 | */ | |
194 | ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE); | |
195 | ||
196 | ml_set_interrupts_enabled(TRUE); | |
197 | ||
198 | #if CONFIG_DTRACE | |
199 | if (reasons & AST_DTRACE) { | |
200 | dtrace_ast(); | |
201 | } | |
202 | #endif | |
203 | ||
204 | #ifdef MACH_BSD | |
205 | if (reasons & AST_BSD) { | |
206 | thread_ast_clear(thread, AST_BSD); | |
207 | bsd_ast(thread); | |
208 | } | |
209 | #endif | |
210 | ||
211 | #if CONFIG_MACF | |
212 | if (reasons & AST_MACF) { | |
213 | thread_ast_clear(thread, AST_MACF); | |
214 | mac_thread_userret(thread); | |
215 | } | |
216 | #endif | |
217 | ||
218 | if (reasons & AST_APC) { | |
219 | thread_ast_clear(thread, AST_APC); | |
220 | thread_apc_ast(thread); | |
221 | } | |
222 | ||
223 | if (reasons & AST_GUARD) { | |
224 | thread_ast_clear(thread, AST_GUARD); | |
225 | guard_ast(thread); | |
226 | } | |
227 | ||
228 | if (reasons & AST_LEDGER) { | |
229 | thread_ast_clear(thread, AST_LEDGER); | |
230 | ledger_ast(thread); | |
231 | } | |
232 | ||
233 | if (reasons & AST_KPERF) { | |
234 | thread_ast_clear(thread, AST_KPERF); | |
235 | kperf_kpc_thread_ast(thread); | |
236 | } | |
237 | ||
238 | if (reasons & AST_KEVENT) { | |
239 | thread_ast_clear(thread, AST_KEVENT); | |
240 | uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0); | |
241 | if (bits) kevent_ast(thread, bits); | |
242 | } | |
243 | ||
244 | #if CONFIG_TELEMETRY | |
245 | if (reasons & AST_TELEMETRY_ALL) { | |
246 | ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL; | |
247 | thread_ast_clear(thread, AST_TELEMETRY_ALL); | |
248 | telemetry_ast(thread, telemetry_reasons); | |
249 | } | |
250 | #endif | |
251 | ||
252 | spl_t s = splsched(); | |
253 | ||
254 | #if CONFIG_SCHED_SFI | |
255 | /* | |
256 | * SFI is currently a per-processor AST, not a per-thread AST | |
257 | * TODO: SFI should be a per-thread AST | |
258 | */ | |
259 | if (ast_consume(AST_SFI) == AST_SFI) { | |
260 | sfi_ast(thread); | |
261 | } | |
262 | #endif | |
263 | ||
264 | /* We are about to return to userspace, there must not be a pending wait */ | |
265 | assert(waitq_wait_possible(thread)); | |
266 | ||
267 | /* | |
268 | * We've handled all per-thread ASTs, time to handle non-urgent preemption. | |
269 | * | |
270 | * We delay reading the preemption bits until now in case the thread | |
271 | * blocks while handling per-thread ASTs. | |
272 | * | |
273 | * If one of the AST handlers had managed to set a new AST bit, | |
274 | * thread_exception_return will call ast_taken again. | |
275 | */ | |
276 | ast_t preemption_reasons = ast_consume(AST_PREEMPTION); | |
277 | ||
278 | if (preemption_reasons & AST_PREEMPT) { | |
279 | /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */ | |
280 | ||
281 | thread_lock(thread); | |
282 | preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM)); | |
283 | thread_unlock(thread); | |
284 | ||
285 | #if CONFIG_SCHED_SFI | |
286 | /* csw_check might tell us that SFI is needed */ | |
287 | if (preemption_reasons & AST_SFI) { | |
288 | sfi_ast(thread); | |
289 | } | |
290 | #endif | |
291 | ||
292 | if (preemption_reasons & AST_PREEMPT) { | |
293 | counter(c_ast_taken_block++); | |
294 | /* switching to a continuation implicitly re-enables interrupts */ | |
295 | thread_block_reason(thread_preempted, NULL, preemption_reasons); | |
296 | /* NOTREACHED */ | |
297 | } | |
298 | } | |
299 | ||
300 | splx(s); | |
301 | } | |
302 | ||
303 | /* | |
304 | * Handle preemption IPI or IPI in response to setting an AST flag | |
305 | * Triggered by cause_ast_check | |
306 | * Called at splsched | |
307 | */ | |
308 | void | |
309 | ast_check(processor_t processor) | |
310 | { | |
311 | if (processor->state != PROCESSOR_RUNNING && | |
312 | processor->state != PROCESSOR_SHUTDOWN) | |
313 | return; | |
314 | ||
315 | thread_t thread = processor->active_thread; | |
316 | ||
317 | assert(thread == current_thread()); | |
318 | ||
319 | thread_lock(thread); | |
320 | ||
321 | /* | |
322 | * Propagate thread ast to processor. | |
323 | * (handles IPI in response to setting AST flag) | |
324 | */ | |
325 | ast_propagate(thread); | |
326 | ||
327 | boolean_t needs_callout = false; | |
328 | processor->current_pri = thread->sched_pri; | |
329 | processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread); | |
330 | processor->current_recommended_pset_type = recommended_pset_type(thread); | |
331 | perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread); | |
332 | if (thread_class != processor->current_perfctl_class) { | |
333 | /* We updated the perfctl class of this thread from another core. | |
334 | * Since we dont do CLPC callouts from another core, do a callout | |
335 | * here to let CLPC know that the currently running thread has a new | |
336 | * class. | |
337 | */ | |
338 | needs_callout = true; | |
339 | } | |
340 | processor->current_perfctl_class = thread_class; | |
341 | ||
342 | ast_t preempt; | |
343 | ||
344 | if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE) | |
345 | ast_on(preempt); | |
346 | ||
347 | thread_unlock(thread); | |
348 | ||
349 | if (needs_callout) { | |
350 | machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, | |
351 | mach_approximate_time(), 0, thread); | |
352 | } | |
353 | } | |
354 | ||
355 | /* | |
356 | * Set AST flags on current processor | |
357 | * Called at splsched | |
358 | */ | |
359 | void | |
360 | ast_on(ast_t reasons) | |
361 | { | |
362 | ast_t *pending_ast = ast_pending(); | |
363 | ||
364 | *pending_ast |= reasons; | |
365 | } | |
366 | ||
367 | /* | |
368 | * Clear AST flags on current processor | |
369 | * Called at splsched | |
370 | */ | |
371 | void | |
372 | ast_off(ast_t reasons) | |
373 | { | |
374 | ast_t *pending_ast = ast_pending(); | |
375 | ||
376 | *pending_ast &= ~reasons; | |
377 | } | |
378 | ||
379 | /* | |
380 | * Consume the requested subset of the AST flags set on the processor | |
381 | * Return the bits that were set | |
382 | * Called at splsched | |
383 | */ | |
384 | ast_t | |
385 | ast_consume(ast_t reasons) | |
386 | { | |
387 | ast_t *pending_ast = ast_pending(); | |
388 | ||
389 | reasons &= *pending_ast; | |
390 | *pending_ast &= ~reasons; | |
391 | ||
392 | return reasons; | |
393 | } | |
394 | ||
395 | /* | |
396 | * Read the requested subset of the AST flags set on the processor | |
397 | * Return the bits that were set, don't modify the processor | |
398 | * Called at splsched | |
399 | */ | |
400 | ast_t | |
401 | ast_peek(ast_t reasons) | |
402 | { | |
403 | ast_t *pending_ast = ast_pending(); | |
404 | ||
405 | reasons &= *pending_ast; | |
406 | ||
407 | return reasons; | |
408 | } | |
409 | ||
410 | /* | |
411 | * Re-set current processor's per-thread AST flags to those set on thread | |
412 | * Called at splsched | |
413 | */ | |
414 | void | |
415 | ast_context(thread_t thread) | |
416 | { | |
417 | ast_t *pending_ast = ast_pending(); | |
418 | ||
419 | *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast); | |
420 | } | |
421 | ||
422 | /* | |
423 | * Propagate ASTs set on a thread to the current processor | |
424 | * Called at splsched | |
425 | */ | |
426 | void | |
427 | ast_propagate(thread_t thread) | |
428 | { | |
429 | ast_on(thread->ast); | |
430 | } | |
431 | ||
432 | void | |
433 | ast_dtrace_on(void) | |
434 | { | |
435 | ast_on(AST_DTRACE); | |
436 | } | |
437 | ||
438 |