]>
Commit | Line | Data |
---|---|---|
fe8ab488 A |
1 | /* |
2 | * Copyright (c) 2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <mach/mach_types.h> | |
29 | #include <kern/assert.h> | |
30 | #include <kern/clock.h> | |
3e170ce0 | 31 | #include <kern/coalition.h> |
fe8ab488 A |
32 | #include <kern/debug.h> |
33 | #include <kern/host.h> | |
34 | #include <kern/kalloc.h> | |
35 | #include <kern/kern_types.h> | |
36 | #include <kern/machine.h> | |
37 | #include <kern/simple_lock.h> | |
38 | #include <kern/misc_protos.h> | |
39 | #include <kern/sched.h> | |
40 | #include <kern/sched_prim.h> | |
41 | #include <kern/sfi.h> | |
42 | #include <kern/timer_call.h> | |
3e170ce0 | 43 | #include <kern/waitq.h> |
fe8ab488 A |
44 | #include <kern/ledger.h> |
45 | #include <pexpert/pexpert.h> | |
46 | ||
47 | #include <libkern/kernel_mach_header.h> | |
48 | ||
49 | #include <sys/kdebug.h> | |
50 | ||
3e170ce0 A |
51 | #if CONFIG_SCHED_SFI |
52 | ||
fe8ab488 A |
53 | #define SFI_DEBUG 0 |
54 | ||
55 | #if SFI_DEBUG | |
56 | #define dprintf(...) kprintf(__VA_ARGS__) | |
57 | #else | |
58 | #define dprintf(...) do { } while(0) | |
59 | #endif | |
60 | ||
61 | #ifdef MACH_BSD | |
62 | extern sched_call_t workqueue_get_sched_callback(void); | |
63 | #endif /* MACH_BSD */ | |
64 | ||
65 | /* | |
66 | * SFI (Selective Forced Idle) operates by enabling a global | |
67 | * timer on the SFI window interval. When it fires, all processors | |
68 | * running a thread that should be SFI-ed are sent an AST. | |
69 | * As threads become runnable while in their "off phase", they | |
70 | * are placed on a deferred ready queue. When a per-class | |
71 | * "on timer" fires, the ready threads for that class are | |
72 | * re-enqueued for running. As an optimization to avoid spurious | |
73 | * wakeups, the timer may be lazily programmed. | |
74 | */ | |
75 | ||
76 | /* | |
77 | * The "sfi_lock" simple lock guards access to static configuration | |
78 | * parameters (as specified by userspace), dynamic state changes | |
79 | * (as updated by the timer event routine), and timer data structures. | |
80 | * Since it can be taken with interrupts disabled in some cases, all | |
81 | * uses should be taken with interrupts disabled at splsched(). The | |
82 | * "sfi_lock" also guards the "sfi_wait_class" field of thread_t, and | |
83 | * must only be accessed with it held. | |
84 | * | |
85 | * When an "on timer" fires, we must deterministically be able to drain | |
86 | * the wait queue, since if any threads are added to the queue afterwards, | |
87 | * they may never get woken out of SFI wait. So sfi_lock must be | |
88 | * taken before the wait queue's own spinlock. | |
89 | * | |
90 | * The wait queue will take the thread's scheduling lock. We may also take | |
91 | * the thread_lock directly to update the "sfi_class" field and determine | |
92 | * if the thread should block in the wait queue, but the lock will be | |
93 | * released before doing so. | |
94 | * | |
95 | * The pset lock may also be taken, but not while any other locks are held. | |
96 | * | |
3e170ce0 | 97 | * splsched ---> sfi_lock ---> waitq ---> thread_lock |
fe8ab488 A |
98 | * \ \ \__ thread_lock (*) |
99 | * \ \__ pset_lock | |
100 | * \ | |
101 | * \__ thread_lock | |
102 | */ | |
103 | ||
104 | decl_simple_lock_data(static,sfi_lock); | |
105 | static timer_call_data_t sfi_timer_call_entry; | |
106 | volatile boolean_t sfi_is_enabled; | |
107 | ||
108 | boolean_t sfi_window_is_set; | |
109 | uint64_t sfi_window_usecs; | |
110 | uint64_t sfi_window_interval; | |
111 | uint64_t sfi_next_off_deadline; | |
112 | ||
113 | typedef struct { | |
114 | sfi_class_id_t class_id; | |
115 | thread_continue_t class_continuation; | |
116 | const char * class_name; | |
117 | const char * class_ledger_name; | |
118 | } sfi_class_registration_t; | |
119 | ||
120 | /* | |
121 | * To add a new SFI class: | |
122 | * | |
123 | * 1) Raise MAX_SFI_CLASS_ID in mach/sfi_class.h | |
124 | * 2) Add a #define for it to mach/sfi_class.h. It need not be inserted in order of restrictiveness. | |
125 | * 3) Add a call to SFI_CLASS_REGISTER below | |
126 | * 4) Augment sfi_thread_classify to categorize threads as early as possible for as restrictive as possible. | |
127 | * 5) Modify thermald to use the SFI class | |
128 | */ | |
129 | ||
130 | static inline void _sfi_wait_cleanup(sched_call_t callback); | |
131 | ||
132 | #define SFI_CLASS_REGISTER(class_id, ledger_name) \ | |
133 | extern char compile_time_assert_ ## class_id[SFI_CLASS_ ## class_id < MAX_SFI_CLASS_ID ? 1 : -1]; \ | |
134 | void __attribute__((noinline,noreturn)) SFI_ ## class_id ## _THREAD_IS_WAITING(void *callback, wait_result_t wret __unused); \ | |
135 | void SFI_ ## class_id ## _THREAD_IS_WAITING(void *callback, wait_result_t wret __unused) \ | |
136 | { \ | |
137 | _sfi_wait_cleanup(callback); \ | |
138 | thread_exception_return(); \ | |
139 | } \ | |
140 | \ | |
141 | sfi_class_registration_t SFI_ ## class_id ## _registration __attribute__((section("__DATA,__sfi_class_reg"),used)) = { SFI_CLASS_ ## class_id, SFI_ ## class_id ## _THREAD_IS_WAITING, "SFI_CLASS_" # class_id, "SFI_CLASS_" # ledger_name }; | |
142 | ||
143 | /* SFI_CLASS_UNSPECIFIED not included here */ | |
144 | SFI_CLASS_REGISTER(MAINTENANCE, MAINTENANCE) | |
145 | SFI_CLASS_REGISTER(DARWIN_BG, DARWIN_BG) | |
146 | SFI_CLASS_REGISTER(APP_NAP, APP_NAP) | |
147 | SFI_CLASS_REGISTER(MANAGED_FOCAL, MANAGED) | |
148 | SFI_CLASS_REGISTER(MANAGED_NONFOCAL, MANAGED) | |
149 | SFI_CLASS_REGISTER(UTILITY, UTILITY) | |
150 | SFI_CLASS_REGISTER(DEFAULT_FOCAL, DEFAULT) | |
151 | SFI_CLASS_REGISTER(DEFAULT_NONFOCAL, DEFAULT) | |
152 | SFI_CLASS_REGISTER(LEGACY_FOCAL, LEGACY) | |
153 | SFI_CLASS_REGISTER(LEGACY_NONFOCAL, LEGACY) | |
154 | SFI_CLASS_REGISTER(USER_INITIATED_FOCAL, USER_INITIATED) | |
155 | SFI_CLASS_REGISTER(USER_INITIATED_NONFOCAL, USER_INITIATED) | |
156 | SFI_CLASS_REGISTER(USER_INTERACTIVE_FOCAL, USER_INTERACTIVE) | |
157 | SFI_CLASS_REGISTER(USER_INTERACTIVE_NONFOCAL, USER_INTERACTIVE) | |
158 | SFI_CLASS_REGISTER(KERNEL, OPTED_OUT) | |
159 | SFI_CLASS_REGISTER(OPTED_OUT, OPTED_OUT) | |
160 | ||
161 | struct sfi_class_state { | |
162 | uint64_t off_time_usecs; | |
163 | uint64_t off_time_interval; | |
164 | ||
165 | timer_call_data_t on_timer; | |
04b8595b | 166 | uint64_t on_timer_deadline; |
fe8ab488 A |
167 | boolean_t on_timer_programmed; |
168 | ||
169 | boolean_t class_sfi_is_enabled; | |
170 | volatile boolean_t class_in_on_phase; | |
171 | ||
3e170ce0 | 172 | struct waitq waitq; /* threads in ready state */ |
fe8ab488 A |
173 | thread_continue_t continuation; |
174 | ||
175 | const char * class_name; | |
176 | const char * class_ledger_name; | |
177 | }; | |
178 | ||
179 | /* Static configuration performed in sfi_early_init() */ | |
180 | struct sfi_class_state sfi_classes[MAX_SFI_CLASS_ID]; | |
181 | ||
182 | int sfi_enabled_class_count; | |
183 | ||
184 | static void sfi_timer_global_off( | |
185 | timer_call_param_t param0, | |
186 | timer_call_param_t param1); | |
187 | ||
188 | static void sfi_timer_per_class_on( | |
189 | timer_call_param_t param0, | |
190 | timer_call_param_t param1); | |
191 | ||
192 | static sfi_class_registration_t * | |
193 | sfi_get_registration_data(unsigned long *count) | |
194 | { | |
195 | unsigned long sectlen = 0; | |
196 | void *sectdata; | |
197 | ||
198 | sectdata = getsectdatafromheader(&_mh_execute_header, "__DATA", "__sfi_class_reg", §len); | |
199 | if (sectdata) { | |
200 | ||
201 | if (sectlen % sizeof(sfi_class_registration_t) != 0) { | |
202 | /* corrupt data? */ | |
203 | panic("__sfi_class_reg section has invalid size %lu", sectlen); | |
204 | __builtin_unreachable(); | |
205 | } | |
206 | ||
207 | *count = sectlen / sizeof(sfi_class_registration_t); | |
208 | return (sfi_class_registration_t *)sectdata; | |
209 | } else { | |
210 | panic("__sfi_class_reg section not found"); | |
211 | __builtin_unreachable(); | |
212 | } | |
213 | } | |
214 | ||
215 | /* Called early in boot, when kernel is single-threaded */ | |
216 | void sfi_early_init(void) | |
217 | { | |
218 | unsigned long i, count; | |
219 | sfi_class_registration_t *registrations; | |
220 | ||
221 | registrations = sfi_get_registration_data(&count); | |
222 | for (i=0; i < count; i++) { | |
223 | sfi_class_id_t class_id = registrations[i].class_id; | |
224 | ||
225 | assert(class_id < MAX_SFI_CLASS_ID); /* should be caught at compile-time */ | |
226 | if (class_id < MAX_SFI_CLASS_ID) { | |
227 | if (sfi_classes[class_id].continuation != NULL) { | |
228 | panic("Duplicate SFI registration for class 0x%x", class_id); | |
229 | } | |
230 | sfi_classes[class_id].class_sfi_is_enabled = FALSE; | |
231 | sfi_classes[class_id].class_in_on_phase = TRUE; | |
232 | sfi_classes[class_id].continuation = registrations[i].class_continuation; | |
233 | sfi_classes[class_id].class_name = registrations[i].class_name; | |
234 | sfi_classes[class_id].class_ledger_name = registrations[i].class_ledger_name; | |
235 | } | |
236 | } | |
237 | } | |
238 | ||
239 | void sfi_init(void) | |
240 | { | |
241 | sfi_class_id_t i; | |
242 | kern_return_t kret; | |
243 | ||
244 | simple_lock_init(&sfi_lock, 0); | |
245 | timer_call_setup(&sfi_timer_call_entry, sfi_timer_global_off, NULL); | |
246 | sfi_window_is_set = FALSE; | |
247 | sfi_enabled_class_count = 0; | |
248 | sfi_is_enabled = FALSE; | |
249 | ||
250 | for (i = 0; i < MAX_SFI_CLASS_ID; i++) { | |
251 | /* If the class was set up in sfi_early_init(), initialize remaining fields */ | |
252 | if (sfi_classes[i].continuation) { | |
253 | timer_call_setup(&sfi_classes[i].on_timer, sfi_timer_per_class_on, (void *)(uintptr_t)i); | |
254 | sfi_classes[i].on_timer_programmed = FALSE; | |
255 | ||
3e170ce0 | 256 | kret = waitq_init(&sfi_classes[i].waitq, SYNC_POLICY_FIFO|SYNC_POLICY_DISABLE_IRQ); |
fe8ab488 A |
257 | assert(kret == KERN_SUCCESS); |
258 | } else { | |
259 | /* The only allowed gap is for SFI_CLASS_UNSPECIFIED */ | |
260 | if(i != SFI_CLASS_UNSPECIFIED) { | |
261 | panic("Gap in registered SFI classes"); | |
262 | } | |
263 | } | |
264 | } | |
265 | } | |
266 | ||
267 | /* Can be called before sfi_init() by task initialization, but after sfi_early_init() */ | |
268 | sfi_class_id_t | |
269 | sfi_get_ledger_alias_for_class(sfi_class_id_t class_id) | |
270 | { | |
271 | sfi_class_id_t i; | |
272 | const char *ledger_name = NULL; | |
273 | ||
274 | ledger_name = sfi_classes[class_id].class_ledger_name; | |
275 | ||
276 | /* Find the first class in the registration table with this ledger name */ | |
277 | if (ledger_name) { | |
278 | for (i = SFI_CLASS_UNSPECIFIED + 1; i < class_id; i++) { | |
279 | if (0 == strcmp(sfi_classes[i].class_ledger_name, ledger_name)) { | |
280 | dprintf("sfi_get_ledger_alias_for_class(0x%x) -> 0x%x\n", class_id, i); | |
281 | return i; | |
282 | } | |
283 | } | |
284 | ||
285 | /* This class is the primary one for the ledger, so there is no alias */ | |
286 | dprintf("sfi_get_ledger_alias_for_class(0x%x) -> 0x%x\n", class_id, SFI_CLASS_UNSPECIFIED); | |
287 | return SFI_CLASS_UNSPECIFIED; | |
288 | } | |
289 | ||
290 | /* We are permissive on SFI class lookup failures. In sfi_init(), we assert more */ | |
291 | return SFI_CLASS_UNSPECIFIED; | |
292 | } | |
293 | ||
294 | int | |
295 | sfi_ledger_entry_add(ledger_template_t template, sfi_class_id_t class_id) | |
296 | { | |
297 | const char *ledger_name = NULL; | |
298 | ||
299 | ledger_name = sfi_classes[class_id].class_ledger_name; | |
300 | ||
301 | dprintf("sfi_ledger_entry_add(%p, 0x%x) -> %s\n", template, class_id, ledger_name); | |
302 | return ledger_entry_add(template, ledger_name, "sfi", "MATUs"); | |
303 | } | |
304 | ||
305 | static void sfi_timer_global_off( | |
306 | timer_call_param_t param0 __unused, | |
307 | timer_call_param_t param1 __unused) | |
308 | { | |
309 | uint64_t now = mach_absolute_time(); | |
310 | sfi_class_id_t i; | |
311 | processor_set_t pset, nset; | |
312 | processor_t processor; | |
313 | uint32_t needs_cause_ast_mask = 0x0; | |
314 | spl_t s; | |
315 | ||
316 | s = splsched(); | |
317 | ||
318 | simple_lock(&sfi_lock); | |
319 | if (!sfi_is_enabled) { | |
320 | /* If SFI has been disabled, let all "on" timers drain naturally */ | |
321 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_NONE, 1, 0, 0, 0, 0); | |
322 | ||
323 | simple_unlock(&sfi_lock); | |
324 | splx(s); | |
325 | return; | |
326 | } | |
327 | ||
328 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_START, 0, 0, 0, 0, 0); | |
329 | ||
330 | /* First set all configured classes into the off state, and program their "on" timer */ | |
331 | for (i = 0; i < MAX_SFI_CLASS_ID; i++) { | |
332 | if (sfi_classes[i].class_sfi_is_enabled) { | |
333 | uint64_t on_timer_deadline; | |
334 | ||
335 | sfi_classes[i].class_in_on_phase = FALSE; | |
336 | sfi_classes[i].on_timer_programmed = TRUE; | |
337 | ||
338 | /* Push out on-timer */ | |
339 | on_timer_deadline = now + sfi_classes[i].off_time_interval; | |
04b8595b A |
340 | sfi_classes[i].on_timer_deadline = on_timer_deadline; |
341 | ||
fe8ab488 A |
342 | timer_call_enter1(&sfi_classes[i].on_timer, NULL, on_timer_deadline, TIMER_CALL_SYS_CRITICAL); |
343 | } else { | |
344 | /* If this class no longer needs SFI, make sure the timer is cancelled */ | |
345 | sfi_classes[i].class_in_on_phase = TRUE; | |
346 | if (sfi_classes[i].on_timer_programmed) { | |
347 | sfi_classes[i].on_timer_programmed = FALSE; | |
04b8595b | 348 | sfi_classes[i].on_timer_deadline = ~0ULL; |
fe8ab488 A |
349 | timer_call_cancel(&sfi_classes[i].on_timer); |
350 | } | |
351 | } | |
352 | } | |
353 | simple_unlock(&sfi_lock); | |
354 | ||
355 | /* Iterate over processors, call cause_ast_check() on ones running a thread that should be in an off phase */ | |
356 | processor = processor_list; | |
357 | pset = processor->processor_set; | |
358 | ||
359 | pset_lock(pset); | |
360 | ||
361 | do { | |
362 | nset = processor->processor_set; | |
363 | if (nset != pset) { | |
364 | pset_unlock(pset); | |
365 | pset = nset; | |
366 | pset_lock(pset); | |
367 | } | |
368 | ||
369 | /* "processor" and its pset are locked */ | |
370 | if (processor->state == PROCESSOR_RUNNING) { | |
371 | if (AST_NONE != sfi_processor_needs_ast(processor)) { | |
372 | needs_cause_ast_mask |= (1U << processor->cpu_id); | |
373 | } | |
374 | } | |
375 | } while ((processor = processor->processor_list) != NULL); | |
376 | ||
377 | pset_unlock(pset); | |
378 | ||
379 | processor = processor_list; | |
380 | do { | |
381 | if (needs_cause_ast_mask & (1U << processor->cpu_id)) { | |
382 | if (processor == current_processor()) | |
383 | ast_on(AST_SFI); | |
384 | else | |
385 | cause_ast_check(processor); | |
386 | } | |
387 | } while ((processor = processor->processor_list) != NULL); | |
388 | ||
389 | /* Re-arm timer if still enabled */ | |
390 | simple_lock(&sfi_lock); | |
391 | if (sfi_is_enabled) { | |
392 | clock_deadline_for_periodic_event(sfi_window_interval, | |
393 | now, | |
394 | &sfi_next_off_deadline); | |
395 | timer_call_enter1(&sfi_timer_call_entry, | |
396 | NULL, | |
397 | sfi_next_off_deadline, | |
398 | TIMER_CALL_SYS_CRITICAL); | |
399 | } | |
400 | ||
401 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
402 | ||
403 | simple_unlock(&sfi_lock); | |
404 | ||
405 | splx(s); | |
406 | } | |
407 | ||
408 | static void sfi_timer_per_class_on( | |
409 | timer_call_param_t param0, | |
410 | timer_call_param_t param1 __unused) | |
411 | { | |
412 | sfi_class_id_t sfi_class_id = (sfi_class_id_t)(uintptr_t)param0; | |
413 | struct sfi_class_state *sfi_class = &sfi_classes[sfi_class_id]; | |
414 | kern_return_t kret; | |
415 | spl_t s; | |
416 | ||
417 | s = splsched(); | |
418 | ||
419 | simple_lock(&sfi_lock); | |
420 | ||
421 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_ON_TIMER) | DBG_FUNC_START, sfi_class_id, 0, 0, 0, 0); | |
422 | ||
423 | /* | |
424 | * Any threads that may have accumulated in the ready queue for this class should get re-enqueued. | |
425 | * Since we have the sfi_lock held and have changed "class_in_on_phase", we expect | |
426 | * no new threads to be put on this wait queue until the global "off timer" has fired. | |
427 | */ | |
04b8595b | 428 | |
fe8ab488 | 429 | sfi_class->class_in_on_phase = TRUE; |
04b8595b A |
430 | sfi_class->on_timer_programmed = FALSE; |
431 | ||
3e170ce0 A |
432 | kret = waitq_wakeup64_all(&sfi_class->waitq, |
433 | CAST_EVENT64_T(sfi_class_id), | |
434 | THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); | |
fe8ab488 A |
435 | assert(kret == KERN_SUCCESS || kret == KERN_NOT_WAITING); |
436 | ||
437 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_ON_TIMER) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
438 | ||
439 | simple_unlock(&sfi_lock); | |
440 | ||
441 | splx(s); | |
442 | } | |
443 | ||
444 | ||
445 | kern_return_t sfi_set_window(uint64_t window_usecs) | |
446 | { | |
447 | uint64_t interval, deadline; | |
448 | uint64_t now = mach_absolute_time(); | |
449 | sfi_class_id_t i; | |
450 | spl_t s; | |
451 | uint64_t largest_class_off_interval = 0; | |
452 | ||
453 | if (window_usecs < MIN_SFI_WINDOW_USEC) | |
454 | window_usecs = MIN_SFI_WINDOW_USEC; | |
455 | ||
456 | if (window_usecs > UINT32_MAX) | |
457 | return (KERN_INVALID_ARGUMENT); | |
458 | ||
459 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_SET_WINDOW), window_usecs, 0, 0, 0, 0); | |
460 | ||
461 | clock_interval_to_absolutetime_interval((uint32_t)window_usecs, NSEC_PER_USEC, &interval); | |
462 | deadline = now + interval; | |
463 | ||
464 | s = splsched(); | |
465 | ||
466 | simple_lock(&sfi_lock); | |
467 | ||
468 | /* Check that we are not bringing in the SFI window smaller than any class */ | |
469 | for (i = 0; i < MAX_SFI_CLASS_ID; i++) { | |
470 | if (sfi_classes[i].class_sfi_is_enabled) { | |
471 | largest_class_off_interval = MAX(largest_class_off_interval, sfi_classes[i].off_time_interval); | |
472 | } | |
473 | } | |
474 | ||
475 | /* | |
476 | * Off window must be strictly greater than all enabled classes, | |
477 | * otherwise threads would build up on ready queue and never be able to run. | |
478 | */ | |
479 | if (interval <= largest_class_off_interval) { | |
480 | simple_unlock(&sfi_lock); | |
481 | splx(s); | |
482 | return (KERN_INVALID_ARGUMENT); | |
483 | } | |
484 | ||
485 | /* | |
486 | * If the new "off" deadline is further out than the current programmed timer, | |
487 | * just let the current one expire (and the new cadence will be established thereafter). | |
488 | * If the new "off" deadline is nearer than the current one, bring it in, so we | |
489 | * can start the new behavior sooner. Note that this may cause the "off" timer to | |
490 | * fire before some of the class "on" timers have fired. | |
491 | */ | |
492 | sfi_window_usecs = window_usecs; | |
493 | sfi_window_interval = interval; | |
494 | sfi_window_is_set = TRUE; | |
495 | ||
496 | if (sfi_enabled_class_count == 0) { | |
497 | /* Can't program timer yet */ | |
498 | } else if (!sfi_is_enabled) { | |
499 | sfi_is_enabled = TRUE; | |
500 | sfi_next_off_deadline = deadline; | |
501 | timer_call_enter1(&sfi_timer_call_entry, | |
502 | NULL, | |
503 | sfi_next_off_deadline, | |
504 | TIMER_CALL_SYS_CRITICAL); | |
505 | } else if (deadline >= sfi_next_off_deadline) { | |
506 | sfi_next_off_deadline = deadline; | |
507 | } else { | |
508 | sfi_next_off_deadline = deadline; | |
509 | timer_call_enter1(&sfi_timer_call_entry, | |
510 | NULL, | |
511 | sfi_next_off_deadline, | |
512 | TIMER_CALL_SYS_CRITICAL); | |
513 | } | |
514 | ||
515 | simple_unlock(&sfi_lock); | |
516 | splx(s); | |
517 | ||
518 | return (KERN_SUCCESS); | |
519 | } | |
520 | ||
521 | kern_return_t sfi_window_cancel(void) | |
522 | { | |
523 | spl_t s; | |
524 | ||
525 | s = splsched(); | |
526 | ||
527 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_CANCEL_WINDOW), 0, 0, 0, 0, 0); | |
528 | ||
529 | /* Disable globals so that global "off-timer" is not re-armed */ | |
530 | simple_lock(&sfi_lock); | |
531 | sfi_window_is_set = FALSE; | |
532 | sfi_window_usecs = 0; | |
533 | sfi_window_interval = 0; | |
534 | sfi_next_off_deadline = 0; | |
535 | sfi_is_enabled = FALSE; | |
536 | simple_unlock(&sfi_lock); | |
537 | ||
538 | splx(s); | |
539 | ||
540 | return (KERN_SUCCESS); | |
541 | } | |
542 | ||
04b8595b A |
543 | /* Defers SFI off and per-class on timers (if live) by the specified interval |
544 | * in Mach Absolute Time Units. Currently invoked to align with the global | |
545 | * forced idle mechanism. Making some simplifying assumptions, the iterative GFI | |
546 | * induced SFI on+off deferrals form a geometric series that converges to yield | |
547 | * an effective SFI duty cycle that is scaled by the GFI duty cycle. Initial phase | |
548 | * alignment and congruency of the SFI/GFI periods can distort this to some extent. | |
549 | */ | |
550 | ||
551 | kern_return_t sfi_defer(uint64_t sfi_defer_matus) | |
552 | { | |
553 | spl_t s; | |
554 | kern_return_t kr = KERN_FAILURE; | |
555 | s = splsched(); | |
556 | ||
557 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_GLOBAL_DEFER), sfi_defer_matus, 0, 0, 0, 0); | |
558 | ||
559 | simple_lock(&sfi_lock); | |
560 | if (!sfi_is_enabled) { | |
561 | goto sfi_defer_done; | |
562 | } | |
563 | ||
564 | assert(sfi_next_off_deadline != 0); | |
565 | ||
566 | sfi_next_off_deadline += sfi_defer_matus; | |
567 | timer_call_enter1(&sfi_timer_call_entry, NULL, sfi_next_off_deadline, TIMER_CALL_SYS_CRITICAL); | |
568 | ||
569 | int i; | |
570 | for (i = 0; i < MAX_SFI_CLASS_ID; i++) { | |
571 | if (sfi_classes[i].class_sfi_is_enabled) { | |
572 | if (sfi_classes[i].on_timer_programmed) { | |
573 | uint64_t new_on_deadline = sfi_classes[i].on_timer_deadline + sfi_defer_matus; | |
574 | sfi_classes[i].on_timer_deadline = new_on_deadline; | |
575 | timer_call_enter1(&sfi_classes[i].on_timer, NULL, new_on_deadline, TIMER_CALL_SYS_CRITICAL); | |
576 | } | |
577 | } | |
578 | } | |
579 | ||
580 | kr = KERN_SUCCESS; | |
581 | sfi_defer_done: | |
582 | simple_unlock(&sfi_lock); | |
583 | ||
584 | splx(s); | |
585 | ||
586 | return (kr); | |
587 | } | |
588 | ||
fe8ab488 A |
589 | |
590 | kern_return_t sfi_get_window(uint64_t *window_usecs) | |
591 | { | |
592 | spl_t s; | |
593 | uint64_t off_window_us; | |
594 | ||
595 | s = splsched(); | |
596 | simple_lock(&sfi_lock); | |
597 | ||
598 | off_window_us = sfi_window_usecs; | |
599 | ||
600 | simple_unlock(&sfi_lock); | |
601 | splx(s); | |
602 | ||
603 | *window_usecs = off_window_us; | |
604 | ||
605 | return (KERN_SUCCESS); | |
606 | } | |
607 | ||
608 | ||
609 | kern_return_t sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_usecs) | |
610 | { | |
611 | uint64_t interval; | |
612 | spl_t s; | |
613 | uint64_t off_window_interval; | |
614 | ||
615 | if (offtime_usecs < MIN_SFI_WINDOW_USEC) | |
616 | offtime_usecs = MIN_SFI_WINDOW_USEC; | |
617 | ||
618 | if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) | |
619 | return (KERN_INVALID_ARGUMENT); | |
620 | ||
621 | if (offtime_usecs > UINT32_MAX) | |
622 | return (KERN_INVALID_ARGUMENT); | |
623 | ||
624 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_SET_CLASS_OFFTIME), offtime_usecs, class_id, 0, 0, 0); | |
625 | ||
626 | clock_interval_to_absolutetime_interval((uint32_t)offtime_usecs, NSEC_PER_USEC, &interval); | |
627 | ||
628 | s = splsched(); | |
629 | ||
630 | simple_lock(&sfi_lock); | |
631 | off_window_interval = sfi_window_interval; | |
632 | ||
633 | /* Check that we are not bringing in class off-time larger than the SFI window */ | |
634 | if (off_window_interval && (interval >= off_window_interval)) { | |
635 | simple_unlock(&sfi_lock); | |
636 | splx(s); | |
637 | return (KERN_INVALID_ARGUMENT); | |
638 | } | |
639 | ||
640 | /* We never re-program the per-class on-timer, but rather just let it expire naturally */ | |
641 | if (!sfi_classes[class_id].class_sfi_is_enabled) { | |
642 | sfi_enabled_class_count++; | |
643 | } | |
644 | sfi_classes[class_id].off_time_usecs = offtime_usecs; | |
645 | sfi_classes[class_id].off_time_interval = interval; | |
646 | sfi_classes[class_id].class_sfi_is_enabled = TRUE; | |
647 | ||
648 | if (sfi_window_is_set && !sfi_is_enabled) { | |
649 | /* start global off timer */ | |
650 | sfi_is_enabled = TRUE; | |
651 | sfi_next_off_deadline = mach_absolute_time() + sfi_window_interval; | |
652 | timer_call_enter1(&sfi_timer_call_entry, | |
653 | NULL, | |
654 | sfi_next_off_deadline, | |
655 | TIMER_CALL_SYS_CRITICAL); | |
656 | } | |
657 | ||
658 | simple_unlock(&sfi_lock); | |
659 | ||
660 | splx(s); | |
661 | ||
662 | return (KERN_SUCCESS); | |
663 | } | |
664 | ||
665 | kern_return_t sfi_class_offtime_cancel(sfi_class_id_t class_id) | |
666 | { | |
667 | spl_t s; | |
668 | ||
669 | if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) | |
670 | return (KERN_INVALID_ARGUMENT); | |
671 | ||
672 | s = splsched(); | |
673 | ||
674 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_CANCEL_CLASS_OFFTIME), class_id, 0, 0, 0, 0); | |
675 | ||
676 | simple_lock(&sfi_lock); | |
677 | ||
678 | /* We never re-program the per-class on-timer, but rather just let it expire naturally */ | |
679 | if (sfi_classes[class_id].class_sfi_is_enabled) { | |
680 | sfi_enabled_class_count--; | |
681 | } | |
682 | sfi_classes[class_id].off_time_usecs = 0; | |
683 | sfi_classes[class_id].off_time_interval = 0; | |
684 | sfi_classes[class_id].class_sfi_is_enabled = FALSE; | |
685 | ||
686 | if (sfi_enabled_class_count == 0) { | |
687 | sfi_is_enabled = FALSE; | |
688 | } | |
689 | ||
690 | simple_unlock(&sfi_lock); | |
691 | ||
692 | splx(s); | |
693 | ||
694 | return (KERN_SUCCESS); | |
695 | } | |
696 | ||
697 | kern_return_t sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_usecs) | |
698 | { | |
699 | uint64_t off_time_us; | |
700 | spl_t s; | |
701 | ||
702 | if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) | |
703 | return (0); | |
704 | ||
705 | s = splsched(); | |
706 | ||
707 | simple_lock(&sfi_lock); | |
708 | off_time_us = sfi_classes[class_id].off_time_usecs; | |
709 | simple_unlock(&sfi_lock); | |
710 | ||
711 | splx(s); | |
712 | ||
713 | *offtime_usecs = off_time_us; | |
714 | ||
715 | return (KERN_SUCCESS); | |
716 | } | |
717 | ||
718 | /* | |
719 | * sfi_thread_classify and sfi_processor_active_thread_classify perform the critical | |
720 | * role of quickly categorizing a thread into its SFI class so that an AST_SFI can be | |
721 | * set. As the thread is unwinding to userspace, sfi_ast() performs full locking | |
722 | * and determines whether the thread should enter an SFI wait state. Because of | |
723 | * the inherent races between the time the AST is set and when it is evaluated, | |
724 | * thread classification can be inaccurate (but should always be safe). This is | |
725 | * especially the case for sfi_processor_active_thread_classify, which must | |
726 | * classify the active thread on a remote processor without taking the thread lock. | |
727 | * When in doubt, classification should err on the side of *not* classifying a | |
728 | * thread at all, and wait for the thread itself to either hit a quantum expiration | |
729 | * or block inside the kernel. | |
730 | */ | |
731 | ||
732 | /* | |
733 | * Thread must be locked. Ultimately, the real decision to enter | |
734 | * SFI wait happens at the AST boundary. | |
735 | */ | |
736 | sfi_class_id_t sfi_thread_classify(thread_t thread) | |
737 | { | |
738 | task_t task = thread->task; | |
739 | boolean_t is_kernel_thread = (task == kernel_task); | |
740 | sched_mode_t thmode = thread->sched_mode; | |
741 | int latency_qos = proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS); | |
742 | int task_role = proc_get_effective_task_policy(task, TASK_POLICY_ROLE); | |
743 | int thread_bg = proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG); | |
744 | int managed_task = proc_get_effective_task_policy(task, TASK_POLICY_SFI_MANAGED); | |
745 | int thread_qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS); | |
a1c7dba1 | 746 | boolean_t focal = FALSE; |
fe8ab488 A |
747 | |
748 | /* kernel threads never reach the user AST boundary, and are in a separate world for SFI */ | |
749 | if (is_kernel_thread) { | |
750 | return SFI_CLASS_KERNEL; | |
751 | } | |
752 | ||
753 | if (thread_qos == THREAD_QOS_MAINTENANCE) | |
754 | return SFI_CLASS_MAINTENANCE; | |
755 | ||
756 | if (thread_bg || thread_qos == THREAD_QOS_BACKGROUND) { | |
757 | return SFI_CLASS_DARWIN_BG; | |
758 | } | |
759 | ||
760 | if (latency_qos != 0) { | |
761 | int latency_qos_wtf = latency_qos - 1; | |
762 | ||
763 | if ((latency_qos_wtf >= 4) && (latency_qos_wtf <= 5)) { | |
764 | return SFI_CLASS_APP_NAP; | |
765 | } | |
766 | } | |
767 | ||
768 | /* | |
769 | * Realtime and fixed priority threads express their duty cycle constraints | |
770 | * via other mechanisms, and are opted out of (most) forms of SFI | |
771 | */ | |
772 | if (thmode == TH_MODE_REALTIME || thmode == TH_MODE_FIXED || task_role == TASK_GRAPHICS_SERVER) { | |
773 | return SFI_CLASS_OPTED_OUT; | |
774 | } | |
775 | ||
776 | /* | |
a1c7dba1 | 777 | * Threads with unspecified, legacy, or user-initiated QOS class can be individually managed. |
fe8ab488 | 778 | */ |
a1c7dba1 | 779 | switch (task_role) { |
3e170ce0 A |
780 | case TASK_CONTROL_APPLICATION: |
781 | case TASK_FOREGROUND_APPLICATION: | |
782 | focal = TRUE; | |
783 | break; | |
784 | case TASK_BACKGROUND_APPLICATION: | |
785 | case TASK_DEFAULT_APPLICATION: | |
786 | case TASK_THROTTLE_APPLICATION: | |
787 | case TASK_UNSPECIFIED: | |
788 | /* Focal if the task is in a coalition with a FG/focal app */ | |
789 | if (task_coalition_focal_count(thread->task) > 0) | |
a1c7dba1 | 790 | focal = TRUE; |
3e170ce0 A |
791 | break; |
792 | default: | |
793 | break; | |
a1c7dba1 A |
794 | } |
795 | ||
796 | if (managed_task) { | |
797 | switch (thread_qos) { | |
798 | case THREAD_QOS_UNSPECIFIED: | |
799 | case THREAD_QOS_LEGACY: | |
800 | case THREAD_QOS_USER_INITIATED: | |
801 | if (focal) | |
802 | return SFI_CLASS_MANAGED_FOCAL; | |
803 | else | |
804 | return SFI_CLASS_MANAGED_NONFOCAL; | |
805 | default: | |
806 | break; | |
807 | } | |
fe8ab488 A |
808 | } |
809 | ||
810 | if (thread_qos == THREAD_QOS_UTILITY) | |
811 | return SFI_CLASS_UTILITY; | |
812 | ||
a1c7dba1 A |
813 | /* |
814 | * Classify threads in non-managed tasks | |
815 | */ | |
816 | if (focal) { | |
fe8ab488 A |
817 | switch (thread_qos) { |
818 | case THREAD_QOS_USER_INTERACTIVE: | |
819 | return SFI_CLASS_USER_INTERACTIVE_FOCAL; | |
820 | case THREAD_QOS_USER_INITIATED: | |
821 | return SFI_CLASS_USER_INITIATED_FOCAL; | |
822 | case THREAD_QOS_LEGACY: | |
823 | return SFI_CLASS_LEGACY_FOCAL; | |
824 | default: | |
825 | return SFI_CLASS_DEFAULT_FOCAL; | |
826 | } | |
827 | } else { | |
828 | switch (thread_qos) { | |
829 | case THREAD_QOS_USER_INTERACTIVE: | |
830 | return SFI_CLASS_USER_INTERACTIVE_NONFOCAL; | |
831 | case THREAD_QOS_USER_INITIATED: | |
832 | return SFI_CLASS_USER_INITIATED_NONFOCAL; | |
833 | case THREAD_QOS_LEGACY: | |
834 | return SFI_CLASS_LEGACY_NONFOCAL; | |
835 | default: | |
836 | return SFI_CLASS_DEFAULT_NONFOCAL; | |
837 | } | |
838 | } | |
839 | } | |
840 | ||
841 | /* | |
842 | * pset must be locked. | |
843 | */ | |
844 | sfi_class_id_t sfi_processor_active_thread_classify(processor_t processor) | |
845 | { | |
846 | return processor->current_sfi_class; | |
847 | } | |
848 | ||
849 | /* | |
850 | * thread must be locked. This is inherently racy, with the intent that | |
851 | * at the AST boundary, it will be fully evaluated whether we need to | |
852 | * perform an AST wait | |
853 | */ | |
854 | ast_t sfi_thread_needs_ast(thread_t thread, sfi_class_id_t *out_class) | |
855 | { | |
856 | sfi_class_id_t class_id; | |
857 | ||
858 | class_id = sfi_thread_classify(thread); | |
859 | ||
860 | if (out_class) | |
861 | *out_class = class_id; | |
862 | ||
863 | /* No lock taken, so a stale value may be used. */ | |
864 | if (!sfi_classes[class_id].class_in_on_phase) | |
865 | return AST_SFI; | |
866 | else | |
867 | return AST_NONE; | |
868 | } | |
869 | ||
870 | /* | |
871 | * pset must be locked. We take the SFI class for | |
872 | * the currently running thread which is cached on | |
873 | * the processor_t, and assume it is accurate. In the | |
874 | * worst case, the processor will get an IPI and be asked | |
875 | * to evaluate if the current running thread at that | |
876 | * later point in time should be in an SFI wait. | |
877 | */ | |
878 | ast_t sfi_processor_needs_ast(processor_t processor) | |
879 | { | |
880 | sfi_class_id_t class_id; | |
881 | ||
882 | class_id = sfi_processor_active_thread_classify(processor); | |
883 | ||
884 | /* No lock taken, so a stale value may be used. */ | |
885 | if (!sfi_classes[class_id].class_in_on_phase) | |
886 | return AST_SFI; | |
887 | else | |
888 | return AST_NONE; | |
889 | ||
890 | } | |
891 | ||
892 | static inline void _sfi_wait_cleanup(sched_call_t callback) { | |
893 | thread_t self = current_thread(); | |
894 | sfi_class_id_t current_sfi_wait_class = SFI_CLASS_UNSPECIFIED; | |
895 | int64_t sfi_wait_time, sfi_wait_begin = 0; | |
896 | ||
897 | spl_t s = splsched(); | |
898 | thread_lock(self); | |
899 | if (callback) { | |
900 | thread_sched_call(self, callback); | |
901 | } | |
902 | sfi_wait_begin = self->wait_sfi_begin_time; | |
903 | thread_unlock(self); | |
904 | ||
905 | simple_lock(&sfi_lock); | |
906 | sfi_wait_time = mach_absolute_time() - sfi_wait_begin; | |
907 | current_sfi_wait_class = self->sfi_wait_class; | |
908 | self->sfi_wait_class = SFI_CLASS_UNSPECIFIED; | |
909 | simple_unlock(&sfi_lock); | |
910 | splx(s); | |
3e170ce0 | 911 | assert((SFI_CLASS_UNSPECIFIED < current_sfi_wait_class) && (current_sfi_wait_class < MAX_SFI_CLASS_ID)); |
fe8ab488 A |
912 | ledger_credit(self->task->ledger, task_ledgers.sfi_wait_times[current_sfi_wait_class], sfi_wait_time); |
913 | } | |
914 | ||
915 | /* | |
916 | * Called at AST context to fully evaluate if the current thread | |
917 | * (which is obviously running) should instead block in an SFI wait. | |
918 | * We must take the sfi_lock to check whether we are in the "off" period | |
919 | * for the class, and if so, block. | |
920 | */ | |
921 | void sfi_ast(thread_t thread) | |
922 | { | |
923 | sfi_class_id_t class_id; | |
924 | spl_t s; | |
925 | struct sfi_class_state *sfi_class; | |
926 | wait_result_t waitret; | |
927 | boolean_t did_wait = FALSE; | |
928 | uint64_t tid; | |
929 | thread_continue_t continuation; | |
930 | sched_call_t workq_callback = workqueue_get_sched_callback(); | |
931 | boolean_t did_clear_wq = FALSE; | |
932 | ||
933 | s = splsched(); | |
934 | ||
935 | simple_lock(&sfi_lock); | |
936 | ||
937 | if (!sfi_is_enabled) { | |
938 | /* | |
939 | * SFI is not enabled, or has recently been disabled. | |
940 | * There is no point putting this thread on a deferred ready | |
941 | * queue, even if it were classified as needing it, since | |
942 | * SFI will truly be off at the next global off timer | |
943 | */ | |
944 | simple_unlock(&sfi_lock); | |
945 | splx(s); | |
946 | ||
947 | return; | |
948 | } | |
949 | ||
950 | thread_lock(thread); | |
951 | thread->sfi_class = class_id = sfi_thread_classify(thread); | |
952 | tid = thread_tid(thread); | |
953 | ||
954 | /* | |
955 | * Once the sfi_lock is taken and the thread's ->sfi_class field is updated, we | |
956 | * are committed to transitioning to whatever state is indicated by "->class_in_on_phase". | |
957 | * If another thread tries to call sfi_reevaluate() after this point, it will take the | |
958 | * sfi_lock and see the thread in this wait state. If another thread calls | |
959 | * sfi_reevaluate() before this point, it would see a runnable thread and at most | |
960 | * attempt to send an AST to this processor, but we would have the most accurate | |
961 | * classification. | |
962 | */ | |
963 | ||
964 | /* Optimistically clear workq callback while thread is already locked */ | |
965 | if (workq_callback && (thread->sched_call == workq_callback)) { | |
966 | thread_sched_call(thread, NULL); | |
967 | did_clear_wq = TRUE; | |
968 | } | |
969 | thread_unlock(thread); | |
970 | ||
971 | sfi_class = &sfi_classes[class_id]; | |
972 | if (!sfi_class->class_in_on_phase) { | |
973 | /* Need to block thread in wait queue */ | |
974 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_THREAD_DEFER), tid, class_id, 0, 0, 0); | |
975 | ||
3e170ce0 A |
976 | waitret = waitq_assert_wait64(&sfi_class->waitq, |
977 | CAST_EVENT64_T(class_id), | |
978 | THREAD_INTERRUPTIBLE, | |
979 | 0); | |
fe8ab488 A |
980 | if (waitret == THREAD_WAITING) { |
981 | thread->sfi_wait_class = class_id; | |
982 | did_wait = TRUE; | |
983 | continuation = sfi_class->continuation; | |
984 | } else { | |
985 | /* thread may be exiting already, all other errors are unexpected */ | |
986 | assert(waitret == THREAD_INTERRUPTED); | |
987 | } | |
988 | } | |
989 | simple_unlock(&sfi_lock); | |
990 | ||
991 | splx(s); | |
992 | ||
993 | if (did_wait) { | |
994 | thread_block_reason(continuation, did_clear_wq ? workq_callback : NULL, AST_SFI); | |
995 | } else { | |
996 | if (did_clear_wq) { | |
997 | s = splsched(); | |
998 | thread_lock(thread); | |
999 | thread_sched_call(thread, workq_callback); | |
1000 | thread_unlock(thread); | |
1001 | splx(s); | |
1002 | } | |
1003 | } | |
1004 | } | |
1005 | ||
3e170ce0 | 1006 | /* Thread must be unlocked */ |
fe8ab488 A |
1007 | void sfi_reevaluate(thread_t thread) |
1008 | { | |
1009 | kern_return_t kret; | |
1010 | spl_t s; | |
1011 | sfi_class_id_t class_id, current_class_id; | |
1012 | ast_t sfi_ast; | |
1013 | ||
1014 | s = splsched(); | |
1015 | ||
1016 | simple_lock(&sfi_lock); | |
1017 | ||
1018 | thread_lock(thread); | |
1019 | sfi_ast = sfi_thread_needs_ast(thread, &class_id); | |
1020 | thread->sfi_class = class_id; | |
1021 | ||
1022 | /* | |
1023 | * This routine chiefly exists to boost threads out of an SFI wait | |
1024 | * if their classification changes before the "on" timer fires. | |
1025 | * | |
1026 | * If we calculate that a thread is in a different ->sfi_wait_class | |
1027 | * than we think it should be (including no-SFI-wait), we need to | |
1028 | * correct that: | |
1029 | * | |
1030 | * If the thread is in SFI wait and should not be (or should be waiting | |
1031 | * on a different class' "on" timer), we wake it up. If needed, the | |
1032 | * thread may immediately block again in the different SFI wait state. | |
1033 | * | |
1034 | * If the thread is not in an SFI wait state and it should be, we need | |
1035 | * to get that thread's attention, possibly by sending an AST to another | |
1036 | * processor. | |
1037 | */ | |
1038 | ||
1039 | if ((current_class_id = thread->sfi_wait_class) != SFI_CLASS_UNSPECIFIED) { | |
1040 | ||
1041 | thread_unlock(thread); /* not needed anymore */ | |
1042 | ||
1043 | assert(current_class_id < MAX_SFI_CLASS_ID); | |
1044 | ||
1045 | if ((sfi_ast == AST_NONE) || (class_id != current_class_id)) { | |
1046 | struct sfi_class_state *sfi_class = &sfi_classes[current_class_id]; | |
1047 | ||
1048 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_WAIT_CANCELED), thread_tid(thread), current_class_id, class_id, 0, 0); | |
1049 | ||
3e170ce0 | 1050 | kret = waitq_wakeup64_thread(&sfi_class->waitq, |
fe8ab488 A |
1051 | CAST_EVENT64_T(current_class_id), |
1052 | thread, | |
1053 | THREAD_AWAKENED); | |
1054 | assert(kret == KERN_SUCCESS || kret == KERN_NOT_WAITING); | |
1055 | } | |
1056 | } else { | |
1057 | /* | |
1058 | * Thread's current SFI wait class is not set, and because we | |
1059 | * have the sfi_lock, it won't get set. | |
1060 | */ | |
1061 | ||
1062 | if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) { | |
1063 | if (sfi_ast != AST_NONE) { | |
1064 | if (thread == current_thread()) | |
1065 | ast_on(sfi_ast); | |
1066 | else { | |
1067 | processor_t processor = thread->last_processor; | |
1068 | ||
1069 | if (processor != PROCESSOR_NULL && | |
1070 | processor->state == PROCESSOR_RUNNING && | |
1071 | processor->active_thread == thread) { | |
1072 | cause_ast_check(processor); | |
1073 | } else { | |
1074 | /* | |
1075 | * Runnable thread that's not on a CPU currently. When a processor | |
1076 | * does context switch to it, the AST will get set based on whether | |
1077 | * the thread is in its "off time". | |
1078 | */ | |
1079 | } | |
1080 | } | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | thread_unlock(thread); | |
1085 | } | |
1086 | ||
1087 | simple_unlock(&sfi_lock); | |
1088 | splx(s); | |
1089 | } | |
3e170ce0 A |
1090 | |
1091 | #else /* !CONFIG_SCHED_SFI */ | |
1092 | ||
1093 | kern_return_t sfi_set_window(uint64_t window_usecs __unused) | |
1094 | { | |
1095 | return (KERN_NOT_SUPPORTED); | |
1096 | } | |
1097 | ||
1098 | kern_return_t sfi_window_cancel(void) | |
1099 | { | |
1100 | return (KERN_NOT_SUPPORTED); | |
1101 | } | |
1102 | ||
1103 | ||
1104 | kern_return_t sfi_get_window(uint64_t *window_usecs __unused) | |
1105 | { | |
1106 | return (KERN_NOT_SUPPORTED); | |
1107 | } | |
1108 | ||
1109 | ||
1110 | kern_return_t sfi_set_class_offtime(sfi_class_id_t class_id __unused, uint64_t offtime_usecs __unused) | |
1111 | { | |
1112 | return (KERN_NOT_SUPPORTED); | |
1113 | } | |
1114 | ||
1115 | kern_return_t sfi_class_offtime_cancel(sfi_class_id_t class_id __unused) | |
1116 | { | |
1117 | return (KERN_NOT_SUPPORTED); | |
1118 | } | |
1119 | ||
1120 | kern_return_t sfi_get_class_offtime(sfi_class_id_t class_id __unused, uint64_t *offtime_usecs __unused) | |
1121 | { | |
1122 | return (KERN_NOT_SUPPORTED); | |
1123 | } | |
1124 | ||
1125 | void sfi_reevaluate(thread_t thread __unused) | |
1126 | { | |
1127 | return; | |
1128 | } | |
1129 | ||
1130 | sfi_class_id_t sfi_thread_classify(thread_t thread) | |
1131 | { | |
1132 | task_t task = thread->task; | |
1133 | boolean_t is_kernel_thread = (task == kernel_task); | |
1134 | ||
1135 | if (is_kernel_thread) { | |
1136 | return SFI_CLASS_KERNEL; | |
1137 | } | |
1138 | ||
1139 | return SFI_CLASS_OPTED_OUT; | |
1140 | } | |
1141 | ||
1142 | #endif /* !CONFIG_SCHED_SFI */ |