2 * Copyright (c) 2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
29 #include <kern/assert.h>
30 #include <kern/clock.h>
31 #include <kern/coalition.h>
32 #include <kern/debug.h>
33 #include <kern/host.h>
34 #include <kern/kalloc.h>
35 #include <kern/kern_types.h>
36 #include <kern/machine.h>
37 #include <kern/simple_lock.h>
38 #include <kern/misc_protos.h>
39 #include <kern/sched.h>
40 #include <kern/sched_prim.h>
42 #include <kern/timer_call.h>
43 #include <kern/waitq.h>
44 #include <kern/ledger.h>
45 #include <kern/policy_internal.h>
47 #include <pexpert/pexpert.h>
49 #include <libkern/kernel_mach_header.h>
51 #include <sys/kdebug.h>
58 #define dprintf(...) kprintf(__VA_ARGS__)
60 #define dprintf(...) do { } while(0)
64 extern sched_call_t
workqueue_get_sched_callback(void);
68 * SFI (Selective Forced Idle) operates by enabling a global
69 * timer on the SFI window interval. When it fires, all processors
70 * running a thread that should be SFI-ed are sent an AST.
71 * As threads become runnable while in their "off phase", they
72 * are placed on a deferred ready queue. When a per-class
73 * "on timer" fires, the ready threads for that class are
74 * re-enqueued for running. As an optimization to avoid spurious
75 * wakeups, the timer may be lazily programmed.
79 * The "sfi_lock" simple lock guards access to static configuration
80 * parameters (as specified by userspace), dynamic state changes
81 * (as updated by the timer event routine), and timer data structures.
82 * Since it can be taken with interrupts disabled in some cases, all
83 * uses should be taken with interrupts disabled at splsched(). The
84 * "sfi_lock" also guards the "sfi_wait_class" field of thread_t, and
85 * must only be accessed with it held.
87 * When an "on timer" fires, we must deterministically be able to drain
88 * the wait queue, since if any threads are added to the queue afterwards,
89 * they may never get woken out of SFI wait. So sfi_lock must be
90 * taken before the wait queue's own spinlock.
92 * The wait queue will take the thread's scheduling lock. We may also take
93 * the thread_lock directly to update the "sfi_class" field and determine
94 * if the thread should block in the wait queue, but the lock will be
95 * released before doing so.
97 * The pset lock may also be taken, but not while any other locks are held.
99 * The task and thread mutex may also be held while reevaluating sfi state.
101 * splsched ---> sfi_lock ---> waitq ---> thread_lock
102 * \ \ \__ thread_lock (*)
108 decl_simple_lock_data(static,sfi_lock
);
109 static timer_call_data_t sfi_timer_call_entry
;
110 volatile boolean_t sfi_is_enabled
;
112 boolean_t sfi_window_is_set
;
113 uint64_t sfi_window_usecs
;
114 uint64_t sfi_window_interval
;
115 uint64_t sfi_next_off_deadline
;
118 sfi_class_id_t class_id
;
119 thread_continue_t class_continuation
;
120 const char * class_name
;
121 const char * class_ledger_name
;
122 } sfi_class_registration_t
;
125 * To add a new SFI class:
127 * 1) Raise MAX_SFI_CLASS_ID in mach/sfi_class.h
128 * 2) Add a #define for it to mach/sfi_class.h. It need not be inserted in order of restrictiveness.
129 * 3) Add a call to SFI_CLASS_REGISTER below
130 * 4) Augment sfi_thread_classify to categorize threads as early as possible for as restrictive as possible.
131 * 5) Modify thermald to use the SFI class
134 static inline void _sfi_wait_cleanup(sched_call_t callback
);
136 #define SFI_CLASS_REGISTER(class_id, ledger_name) \
137 extern char compile_time_assert_ ## class_id[SFI_CLASS_ ## class_id < MAX_SFI_CLASS_ID ? 1 : -1]; \
138 void __attribute__((noinline,noreturn)) SFI_ ## class_id ## _THREAD_IS_WAITING(void *callback, wait_result_t wret __unused); \
139 void SFI_ ## class_id ## _THREAD_IS_WAITING(void *callback, wait_result_t wret __unused) \
141 _sfi_wait_cleanup(callback); \
142 thread_exception_return(); \
145 sfi_class_registration_t SFI_ ## class_id ## _registration __attribute__((section("__DATA,__sfi_class_reg"),used)) = { SFI_CLASS_ ## class_id, SFI_ ## class_id ## _THREAD_IS_WAITING, "SFI_CLASS_" # class_id, "SFI_CLASS_" # ledger_name };
147 /* SFI_CLASS_UNSPECIFIED not included here */
148 SFI_CLASS_REGISTER(MAINTENANCE
, MAINTENANCE
)
149 SFI_CLASS_REGISTER(DARWIN_BG
, DARWIN_BG
)
150 SFI_CLASS_REGISTER(APP_NAP
, APP_NAP
)
151 SFI_CLASS_REGISTER(MANAGED_FOCAL
, MANAGED
)
152 SFI_CLASS_REGISTER(MANAGED_NONFOCAL
, MANAGED
)
153 SFI_CLASS_REGISTER(UTILITY
, UTILITY
)
154 SFI_CLASS_REGISTER(DEFAULT_FOCAL
, DEFAULT
)
155 SFI_CLASS_REGISTER(DEFAULT_NONFOCAL
, DEFAULT
)
156 SFI_CLASS_REGISTER(LEGACY_FOCAL
, LEGACY
)
157 SFI_CLASS_REGISTER(LEGACY_NONFOCAL
, LEGACY
)
158 SFI_CLASS_REGISTER(USER_INITIATED_FOCAL
, USER_INITIATED
)
159 SFI_CLASS_REGISTER(USER_INITIATED_NONFOCAL
, USER_INITIATED
)
160 SFI_CLASS_REGISTER(USER_INTERACTIVE_FOCAL
, USER_INTERACTIVE
)
161 SFI_CLASS_REGISTER(USER_INTERACTIVE_NONFOCAL
, USER_INTERACTIVE
)
162 SFI_CLASS_REGISTER(KERNEL
, OPTED_OUT
)
163 SFI_CLASS_REGISTER(OPTED_OUT
, OPTED_OUT
)
165 struct sfi_class_state
{
166 uint64_t off_time_usecs
;
167 uint64_t off_time_interval
;
169 timer_call_data_t on_timer
;
170 uint64_t on_timer_deadline
;
171 boolean_t on_timer_programmed
;
173 boolean_t class_sfi_is_enabled
;
174 volatile boolean_t class_in_on_phase
;
176 struct waitq waitq
; /* threads in ready state */
177 thread_continue_t continuation
;
179 const char * class_name
;
180 const char * class_ledger_name
;
183 /* Static configuration performed in sfi_early_init() */
184 struct sfi_class_state sfi_classes
[MAX_SFI_CLASS_ID
];
186 int sfi_enabled_class_count
;
188 static void sfi_timer_global_off(
189 timer_call_param_t param0
,
190 timer_call_param_t param1
);
192 static void sfi_timer_per_class_on(
193 timer_call_param_t param0
,
194 timer_call_param_t param1
);
196 static sfi_class_registration_t
*
197 sfi_get_registration_data(unsigned long *count
)
199 unsigned long sectlen
= 0;
202 sectdata
= getsectdatafromheader(&_mh_execute_header
, "__DATA", "__sfi_class_reg", §len
);
205 if (sectlen
% sizeof(sfi_class_registration_t
) != 0) {
207 panic("__sfi_class_reg section has invalid size %lu", sectlen
);
208 __builtin_unreachable();
211 *count
= sectlen
/ sizeof(sfi_class_registration_t
);
212 return (sfi_class_registration_t
*)sectdata
;
214 panic("__sfi_class_reg section not found");
215 __builtin_unreachable();
219 /* Called early in boot, when kernel is single-threaded */
220 void sfi_early_init(void)
222 unsigned long i
, count
;
223 sfi_class_registration_t
*registrations
;
225 registrations
= sfi_get_registration_data(&count
);
226 for (i
=0; i
< count
; i
++) {
227 sfi_class_id_t class_id
= registrations
[i
].class_id
;
229 assert(class_id
< MAX_SFI_CLASS_ID
); /* should be caught at compile-time */
230 if (class_id
< MAX_SFI_CLASS_ID
) {
231 if (sfi_classes
[class_id
].continuation
!= NULL
) {
232 panic("Duplicate SFI registration for class 0x%x", class_id
);
234 sfi_classes
[class_id
].class_sfi_is_enabled
= FALSE
;
235 sfi_classes
[class_id
].class_in_on_phase
= TRUE
;
236 sfi_classes
[class_id
].continuation
= registrations
[i
].class_continuation
;
237 sfi_classes
[class_id
].class_name
= registrations
[i
].class_name
;
238 sfi_classes
[class_id
].class_ledger_name
= registrations
[i
].class_ledger_name
;
248 simple_lock_init(&sfi_lock
, 0);
249 timer_call_setup(&sfi_timer_call_entry
, sfi_timer_global_off
, NULL
);
250 sfi_window_is_set
= FALSE
;
251 sfi_enabled_class_count
= 0;
252 sfi_is_enabled
= FALSE
;
254 for (i
= 0; i
< MAX_SFI_CLASS_ID
; i
++) {
255 /* If the class was set up in sfi_early_init(), initialize remaining fields */
256 if (sfi_classes
[i
].continuation
) {
257 timer_call_setup(&sfi_classes
[i
].on_timer
, sfi_timer_per_class_on
, (void *)(uintptr_t)i
);
258 sfi_classes
[i
].on_timer_programmed
= FALSE
;
260 kret
= waitq_init(&sfi_classes
[i
].waitq
, SYNC_POLICY_FIFO
|SYNC_POLICY_DISABLE_IRQ
);
261 assert(kret
== KERN_SUCCESS
);
263 /* The only allowed gap is for SFI_CLASS_UNSPECIFIED */
264 if(i
!= SFI_CLASS_UNSPECIFIED
) {
265 panic("Gap in registered SFI classes");
271 /* Can be called before sfi_init() by task initialization, but after sfi_early_init() */
273 sfi_get_ledger_alias_for_class(sfi_class_id_t class_id
)
276 const char *ledger_name
= NULL
;
278 ledger_name
= sfi_classes
[class_id
].class_ledger_name
;
280 /* Find the first class in the registration table with this ledger name */
282 for (i
= SFI_CLASS_UNSPECIFIED
+ 1; i
< class_id
; i
++) {
283 if (0 == strcmp(sfi_classes
[i
].class_ledger_name
, ledger_name
)) {
284 dprintf("sfi_get_ledger_alias_for_class(0x%x) -> 0x%x\n", class_id
, i
);
289 /* This class is the primary one for the ledger, so there is no alias */
290 dprintf("sfi_get_ledger_alias_for_class(0x%x) -> 0x%x\n", class_id
, SFI_CLASS_UNSPECIFIED
);
291 return SFI_CLASS_UNSPECIFIED
;
294 /* We are permissive on SFI class lookup failures. In sfi_init(), we assert more */
295 return SFI_CLASS_UNSPECIFIED
;
299 sfi_ledger_entry_add(ledger_template_t
template, sfi_class_id_t class_id
)
301 const char *ledger_name
= NULL
;
303 ledger_name
= sfi_classes
[class_id
].class_ledger_name
;
305 dprintf("sfi_ledger_entry_add(%p, 0x%x) -> %s\n", template, class_id
, ledger_name
);
306 return ledger_entry_add(template, ledger_name
, "sfi", "MATUs");
309 static void sfi_timer_global_off(
310 timer_call_param_t param0 __unused
,
311 timer_call_param_t param1 __unused
)
313 uint64_t now
= mach_absolute_time();
315 processor_set_t pset
, nset
;
316 processor_t processor
;
317 uint32_t needs_cause_ast_mask
= 0x0;
322 simple_lock(&sfi_lock
);
323 if (!sfi_is_enabled
) {
324 /* If SFI has been disabled, let all "on" timers drain naturally */
325 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_OFF_TIMER
) | DBG_FUNC_NONE
, 1, 0, 0, 0, 0);
327 simple_unlock(&sfi_lock
);
332 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_OFF_TIMER
) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
334 /* First set all configured classes into the off state, and program their "on" timer */
335 for (i
= 0; i
< MAX_SFI_CLASS_ID
; i
++) {
336 if (sfi_classes
[i
].class_sfi_is_enabled
) {
337 uint64_t on_timer_deadline
;
339 sfi_classes
[i
].class_in_on_phase
= FALSE
;
340 sfi_classes
[i
].on_timer_programmed
= TRUE
;
342 /* Push out on-timer */
343 on_timer_deadline
= now
+ sfi_classes
[i
].off_time_interval
;
344 sfi_classes
[i
].on_timer_deadline
= on_timer_deadline
;
346 timer_call_enter1(&sfi_classes
[i
].on_timer
, NULL
, on_timer_deadline
, TIMER_CALL_SYS_CRITICAL
);
348 /* If this class no longer needs SFI, make sure the timer is cancelled */
349 sfi_classes
[i
].class_in_on_phase
= TRUE
;
350 if (sfi_classes
[i
].on_timer_programmed
) {
351 sfi_classes
[i
].on_timer_programmed
= FALSE
;
352 sfi_classes
[i
].on_timer_deadline
= ~0ULL;
353 timer_call_cancel(&sfi_classes
[i
].on_timer
);
357 simple_unlock(&sfi_lock
);
359 /* Iterate over processors, call cause_ast_check() on ones running a thread that should be in an off phase */
360 processor
= processor_list
;
361 pset
= processor
->processor_set
;
366 nset
= processor
->processor_set
;
373 /* "processor" and its pset are locked */
374 if (processor
->state
== PROCESSOR_RUNNING
) {
375 if (AST_NONE
!= sfi_processor_needs_ast(processor
)) {
376 needs_cause_ast_mask
|= (1U << processor
->cpu_id
);
379 } while ((processor
= processor
->processor_list
) != NULL
);
383 for (int cpuid
= lsb_first(needs_cause_ast_mask
); cpuid
>= 0; cpuid
= lsb_next(needs_cause_ast_mask
, cpuid
)) {
384 processor
= processor_array
[cpuid
];
385 if (processor
== current_processor()) {
388 cause_ast_check(processor
);
392 /* Re-arm timer if still enabled */
393 simple_lock(&sfi_lock
);
394 if (sfi_is_enabled
) {
395 clock_deadline_for_periodic_event(sfi_window_interval
,
397 &sfi_next_off_deadline
);
398 timer_call_enter1(&sfi_timer_call_entry
,
400 sfi_next_off_deadline
,
401 TIMER_CALL_SYS_CRITICAL
);
404 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_OFF_TIMER
) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
406 simple_unlock(&sfi_lock
);
411 static void sfi_timer_per_class_on(
412 timer_call_param_t param0
,
413 timer_call_param_t param1 __unused
)
415 sfi_class_id_t sfi_class_id
= (sfi_class_id_t
)(uintptr_t)param0
;
416 struct sfi_class_state
*sfi_class
= &sfi_classes
[sfi_class_id
];
422 simple_lock(&sfi_lock
);
424 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_ON_TIMER
) | DBG_FUNC_START
, sfi_class_id
, 0, 0, 0, 0);
427 * Any threads that may have accumulated in the ready queue for this class should get re-enqueued.
428 * Since we have the sfi_lock held and have changed "class_in_on_phase", we expect
429 * no new threads to be put on this wait queue until the global "off timer" has fired.
432 sfi_class
->class_in_on_phase
= TRUE
;
433 sfi_class
->on_timer_programmed
= FALSE
;
435 kret
= waitq_wakeup64_all(&sfi_class
->waitq
,
436 CAST_EVENT64_T(sfi_class_id
),
437 THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
);
438 assert(kret
== KERN_SUCCESS
|| kret
== KERN_NOT_WAITING
);
440 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_ON_TIMER
) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
442 simple_unlock(&sfi_lock
);
448 kern_return_t
sfi_set_window(uint64_t window_usecs
)
450 uint64_t interval
, deadline
;
451 uint64_t now
= mach_absolute_time();
454 uint64_t largest_class_off_interval
= 0;
456 if (window_usecs
< MIN_SFI_WINDOW_USEC
)
457 window_usecs
= MIN_SFI_WINDOW_USEC
;
459 if (window_usecs
> UINT32_MAX
)
460 return (KERN_INVALID_ARGUMENT
);
462 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_SET_WINDOW
), window_usecs
, 0, 0, 0, 0);
464 clock_interval_to_absolutetime_interval((uint32_t)window_usecs
, NSEC_PER_USEC
, &interval
);
465 deadline
= now
+ interval
;
469 simple_lock(&sfi_lock
);
471 /* Check that we are not bringing in the SFI window smaller than any class */
472 for (i
= 0; i
< MAX_SFI_CLASS_ID
; i
++) {
473 if (sfi_classes
[i
].class_sfi_is_enabled
) {
474 largest_class_off_interval
= MAX(largest_class_off_interval
, sfi_classes
[i
].off_time_interval
);
479 * Off window must be strictly greater than all enabled classes,
480 * otherwise threads would build up on ready queue and never be able to run.
482 if (interval
<= largest_class_off_interval
) {
483 simple_unlock(&sfi_lock
);
485 return (KERN_INVALID_ARGUMENT
);
489 * If the new "off" deadline is further out than the current programmed timer,
490 * just let the current one expire (and the new cadence will be established thereafter).
491 * If the new "off" deadline is nearer than the current one, bring it in, so we
492 * can start the new behavior sooner. Note that this may cause the "off" timer to
493 * fire before some of the class "on" timers have fired.
495 sfi_window_usecs
= window_usecs
;
496 sfi_window_interval
= interval
;
497 sfi_window_is_set
= TRUE
;
499 if (sfi_enabled_class_count
== 0) {
500 /* Can't program timer yet */
501 } else if (!sfi_is_enabled
) {
502 sfi_is_enabled
= TRUE
;
503 sfi_next_off_deadline
= deadline
;
504 timer_call_enter1(&sfi_timer_call_entry
,
506 sfi_next_off_deadline
,
507 TIMER_CALL_SYS_CRITICAL
);
508 } else if (deadline
>= sfi_next_off_deadline
) {
509 sfi_next_off_deadline
= deadline
;
511 sfi_next_off_deadline
= deadline
;
512 timer_call_enter1(&sfi_timer_call_entry
,
514 sfi_next_off_deadline
,
515 TIMER_CALL_SYS_CRITICAL
);
518 simple_unlock(&sfi_lock
);
521 return (KERN_SUCCESS
);
524 kern_return_t
sfi_window_cancel(void)
530 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_CANCEL_WINDOW
), 0, 0, 0, 0, 0);
532 /* Disable globals so that global "off-timer" is not re-armed */
533 simple_lock(&sfi_lock
);
534 sfi_window_is_set
= FALSE
;
535 sfi_window_usecs
= 0;
536 sfi_window_interval
= 0;
537 sfi_next_off_deadline
= 0;
538 sfi_is_enabled
= FALSE
;
539 simple_unlock(&sfi_lock
);
543 return (KERN_SUCCESS
);
546 /* Defers SFI off and per-class on timers (if live) by the specified interval
547 * in Mach Absolute Time Units. Currently invoked to align with the global
548 * forced idle mechanism. Making some simplifying assumptions, the iterative GFI
549 * induced SFI on+off deferrals form a geometric series that converges to yield
550 * an effective SFI duty cycle that is scaled by the GFI duty cycle. Initial phase
551 * alignment and congruency of the SFI/GFI periods can distort this to some extent.
554 kern_return_t
sfi_defer(uint64_t sfi_defer_matus
)
557 kern_return_t kr
= KERN_FAILURE
;
560 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_GLOBAL_DEFER
), sfi_defer_matus
, 0, 0, 0, 0);
562 simple_lock(&sfi_lock
);
563 if (!sfi_is_enabled
) {
567 assert(sfi_next_off_deadline
!= 0);
569 sfi_next_off_deadline
+= sfi_defer_matus
;
570 timer_call_enter1(&sfi_timer_call_entry
, NULL
, sfi_next_off_deadline
, TIMER_CALL_SYS_CRITICAL
);
573 for (i
= 0; i
< MAX_SFI_CLASS_ID
; i
++) {
574 if (sfi_classes
[i
].class_sfi_is_enabled
) {
575 if (sfi_classes
[i
].on_timer_programmed
) {
576 uint64_t new_on_deadline
= sfi_classes
[i
].on_timer_deadline
+ sfi_defer_matus
;
577 sfi_classes
[i
].on_timer_deadline
= new_on_deadline
;
578 timer_call_enter1(&sfi_classes
[i
].on_timer
, NULL
, new_on_deadline
, TIMER_CALL_SYS_CRITICAL
);
585 simple_unlock(&sfi_lock
);
593 kern_return_t
sfi_get_window(uint64_t *window_usecs
)
596 uint64_t off_window_us
;
599 simple_lock(&sfi_lock
);
601 off_window_us
= sfi_window_usecs
;
603 simple_unlock(&sfi_lock
);
606 *window_usecs
= off_window_us
;
608 return (KERN_SUCCESS
);
612 kern_return_t
sfi_set_class_offtime(sfi_class_id_t class_id
, uint64_t offtime_usecs
)
616 uint64_t off_window_interval
;
618 if (offtime_usecs
< MIN_SFI_WINDOW_USEC
)
619 offtime_usecs
= MIN_SFI_WINDOW_USEC
;
621 if (class_id
== SFI_CLASS_UNSPECIFIED
|| class_id
>= MAX_SFI_CLASS_ID
)
622 return (KERN_INVALID_ARGUMENT
);
624 if (offtime_usecs
> UINT32_MAX
)
625 return (KERN_INVALID_ARGUMENT
);
627 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_SET_CLASS_OFFTIME
), offtime_usecs
, class_id
, 0, 0, 0);
629 clock_interval_to_absolutetime_interval((uint32_t)offtime_usecs
, NSEC_PER_USEC
, &interval
);
633 simple_lock(&sfi_lock
);
634 off_window_interval
= sfi_window_interval
;
636 /* Check that we are not bringing in class off-time larger than the SFI window */
637 if (off_window_interval
&& (interval
>= off_window_interval
)) {
638 simple_unlock(&sfi_lock
);
640 return (KERN_INVALID_ARGUMENT
);
643 /* We never re-program the per-class on-timer, but rather just let it expire naturally */
644 if (!sfi_classes
[class_id
].class_sfi_is_enabled
) {
645 sfi_enabled_class_count
++;
647 sfi_classes
[class_id
].off_time_usecs
= offtime_usecs
;
648 sfi_classes
[class_id
].off_time_interval
= interval
;
649 sfi_classes
[class_id
].class_sfi_is_enabled
= TRUE
;
651 if (sfi_window_is_set
&& !sfi_is_enabled
) {
652 /* start global off timer */
653 sfi_is_enabled
= TRUE
;
654 sfi_next_off_deadline
= mach_absolute_time() + sfi_window_interval
;
655 timer_call_enter1(&sfi_timer_call_entry
,
657 sfi_next_off_deadline
,
658 TIMER_CALL_SYS_CRITICAL
);
661 simple_unlock(&sfi_lock
);
665 return (KERN_SUCCESS
);
668 kern_return_t
sfi_class_offtime_cancel(sfi_class_id_t class_id
)
672 if (class_id
== SFI_CLASS_UNSPECIFIED
|| class_id
>= MAX_SFI_CLASS_ID
)
673 return (KERN_INVALID_ARGUMENT
);
677 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_CANCEL_CLASS_OFFTIME
), class_id
, 0, 0, 0, 0);
679 simple_lock(&sfi_lock
);
681 /* We never re-program the per-class on-timer, but rather just let it expire naturally */
682 if (sfi_classes
[class_id
].class_sfi_is_enabled
) {
683 sfi_enabled_class_count
--;
685 sfi_classes
[class_id
].off_time_usecs
= 0;
686 sfi_classes
[class_id
].off_time_interval
= 0;
687 sfi_classes
[class_id
].class_sfi_is_enabled
= FALSE
;
689 if (sfi_enabled_class_count
== 0) {
690 sfi_is_enabled
= FALSE
;
693 simple_unlock(&sfi_lock
);
697 return (KERN_SUCCESS
);
700 kern_return_t
sfi_get_class_offtime(sfi_class_id_t class_id
, uint64_t *offtime_usecs
)
702 uint64_t off_time_us
;
705 if (class_id
== SFI_CLASS_UNSPECIFIED
|| class_id
>= MAX_SFI_CLASS_ID
)
710 simple_lock(&sfi_lock
);
711 off_time_us
= sfi_classes
[class_id
].off_time_usecs
;
712 simple_unlock(&sfi_lock
);
716 *offtime_usecs
= off_time_us
;
718 return (KERN_SUCCESS
);
722 * sfi_thread_classify and sfi_processor_active_thread_classify perform the critical
723 * role of quickly categorizing a thread into its SFI class so that an AST_SFI can be
724 * set. As the thread is unwinding to userspace, sfi_ast() performs full locking
725 * and determines whether the thread should enter an SFI wait state. Because of
726 * the inherent races between the time the AST is set and when it is evaluated,
727 * thread classification can be inaccurate (but should always be safe). This is
728 * especially the case for sfi_processor_active_thread_classify, which must
729 * classify the active thread on a remote processor without taking the thread lock.
730 * When in doubt, classification should err on the side of *not* classifying a
731 * thread at all, and wait for the thread itself to either hit a quantum expiration
732 * or block inside the kernel.
736 * Thread must be locked. Ultimately, the real decision to enter
737 * SFI wait happens at the AST boundary.
739 sfi_class_id_t
sfi_thread_classify(thread_t thread
)
741 task_t task
= thread
->task
;
742 boolean_t is_kernel_thread
= (task
== kernel_task
);
743 sched_mode_t thmode
= thread
->sched_mode
;
744 boolean_t focal
= FALSE
;
746 int task_role
= proc_get_effective_task_policy(task
, TASK_POLICY_ROLE
);
747 int latency_qos
= proc_get_effective_task_policy(task
, TASK_POLICY_LATENCY_QOS
);
748 int managed_task
= proc_get_effective_task_policy(task
, TASK_POLICY_SFI_MANAGED
);
750 int thread_qos
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
);
751 int thread_bg
= proc_get_effective_thread_policy(thread
, TASK_POLICY_DARWIN_BG
);
753 /* kernel threads never reach the user AST boundary, and are in a separate world for SFI */
754 if (is_kernel_thread
) {
755 return SFI_CLASS_KERNEL
;
758 if (thread_qos
== THREAD_QOS_MAINTENANCE
)
759 return SFI_CLASS_MAINTENANCE
;
761 if (thread_bg
|| thread_qos
== THREAD_QOS_BACKGROUND
) {
762 return SFI_CLASS_DARWIN_BG
;
765 if (latency_qos
!= 0) {
766 int latency_qos_wtf
= latency_qos
- 1;
768 if ((latency_qos_wtf
>= 4) && (latency_qos_wtf
<= 5)) {
769 return SFI_CLASS_APP_NAP
;
774 * Realtime and fixed priority threads express their duty cycle constraints
775 * via other mechanisms, and are opted out of (most) forms of SFI
777 if (thmode
== TH_MODE_REALTIME
|| thmode
== TH_MODE_FIXED
|| task_role
== TASK_GRAPHICS_SERVER
) {
778 return SFI_CLASS_OPTED_OUT
;
782 * Threads with unspecified, legacy, or user-initiated QOS class can be individually managed.
785 case TASK_CONTROL_APPLICATION
:
786 case TASK_FOREGROUND_APPLICATION
:
789 case TASK_BACKGROUND_APPLICATION
:
790 case TASK_DEFAULT_APPLICATION
:
791 case TASK_THROTTLE_APPLICATION
:
792 case TASK_UNSPECIFIED
:
793 /* Focal if the task is in a coalition with a FG/focal app */
794 if (task_coalition_focal_count(thread
->task
) > 0)
802 switch (thread_qos
) {
803 case THREAD_QOS_UNSPECIFIED
:
804 case THREAD_QOS_LEGACY
:
805 case THREAD_QOS_USER_INITIATED
:
807 return SFI_CLASS_MANAGED_FOCAL
;
809 return SFI_CLASS_MANAGED_NONFOCAL
;
815 if (thread_qos
== THREAD_QOS_UTILITY
)
816 return SFI_CLASS_UTILITY
;
819 * Classify threads in non-managed tasks
822 switch (thread_qos
) {
823 case THREAD_QOS_USER_INTERACTIVE
:
824 return SFI_CLASS_USER_INTERACTIVE_FOCAL
;
825 case THREAD_QOS_USER_INITIATED
:
826 return SFI_CLASS_USER_INITIATED_FOCAL
;
827 case THREAD_QOS_LEGACY
:
828 return SFI_CLASS_LEGACY_FOCAL
;
830 return SFI_CLASS_DEFAULT_FOCAL
;
833 switch (thread_qos
) {
834 case THREAD_QOS_USER_INTERACTIVE
:
835 return SFI_CLASS_USER_INTERACTIVE_NONFOCAL
;
836 case THREAD_QOS_USER_INITIATED
:
837 return SFI_CLASS_USER_INITIATED_NONFOCAL
;
838 case THREAD_QOS_LEGACY
:
839 return SFI_CLASS_LEGACY_NONFOCAL
;
841 return SFI_CLASS_DEFAULT_NONFOCAL
;
847 * pset must be locked.
849 sfi_class_id_t
sfi_processor_active_thread_classify(processor_t processor
)
851 return processor
->current_sfi_class
;
855 * thread must be locked. This is inherently racy, with the intent that
856 * at the AST boundary, it will be fully evaluated whether we need to
857 * perform an AST wait
859 ast_t
sfi_thread_needs_ast(thread_t thread
, sfi_class_id_t
*out_class
)
861 sfi_class_id_t class_id
;
863 class_id
= sfi_thread_classify(thread
);
866 *out_class
= class_id
;
868 /* No lock taken, so a stale value may be used. */
869 if (!sfi_classes
[class_id
].class_in_on_phase
)
876 * pset must be locked. We take the SFI class for
877 * the currently running thread which is cached on
878 * the processor_t, and assume it is accurate. In the
879 * worst case, the processor will get an IPI and be asked
880 * to evaluate if the current running thread at that
881 * later point in time should be in an SFI wait.
883 ast_t
sfi_processor_needs_ast(processor_t processor
)
885 sfi_class_id_t class_id
;
887 class_id
= sfi_processor_active_thread_classify(processor
);
889 /* No lock taken, so a stale value may be used. */
890 if (!sfi_classes
[class_id
].class_in_on_phase
)
897 static inline void _sfi_wait_cleanup(sched_call_t callback
) {
898 thread_t self
= current_thread();
899 sfi_class_id_t current_sfi_wait_class
= SFI_CLASS_UNSPECIFIED
;
900 int64_t sfi_wait_time
, sfi_wait_begin
= 0;
902 spl_t s
= splsched();
905 thread_sched_call(self
, callback
);
907 sfi_wait_begin
= self
->wait_sfi_begin_time
;
910 simple_lock(&sfi_lock
);
911 sfi_wait_time
= mach_absolute_time() - sfi_wait_begin
;
912 current_sfi_wait_class
= self
->sfi_wait_class
;
913 self
->sfi_wait_class
= SFI_CLASS_UNSPECIFIED
;
914 simple_unlock(&sfi_lock
);
916 assert((SFI_CLASS_UNSPECIFIED
< current_sfi_wait_class
) && (current_sfi_wait_class
< MAX_SFI_CLASS_ID
));
918 ledger_credit(self
->task
->ledger
, task_ledgers
.sfi_wait_times
[current_sfi_wait_class
], sfi_wait_time
);
919 #endif /* !CONFIG_EMBEDDED */
923 * Called at AST context to fully evaluate if the current thread
924 * (which is obviously running) should instead block in an SFI wait.
925 * We must take the sfi_lock to check whether we are in the "off" period
926 * for the class, and if so, block.
928 void sfi_ast(thread_t thread
)
930 sfi_class_id_t class_id
;
932 struct sfi_class_state
*sfi_class
;
933 wait_result_t waitret
;
934 boolean_t did_wait
= FALSE
;
936 thread_continue_t continuation
;
937 sched_call_t workq_callback
= workqueue_get_sched_callback();
941 simple_lock(&sfi_lock
);
943 if (!sfi_is_enabled
) {
945 * SFI is not enabled, or has recently been disabled.
946 * There is no point putting this thread on a deferred ready
947 * queue, even if it were classified as needing it, since
948 * SFI will truly be off at the next global off timer
950 simple_unlock(&sfi_lock
);
957 thread
->sfi_class
= class_id
= sfi_thread_classify(thread
);
958 tid
= thread_tid(thread
);
961 * Once the sfi_lock is taken and the thread's ->sfi_class field is updated, we
962 * are committed to transitioning to whatever state is indicated by "->class_in_on_phase".
963 * If another thread tries to call sfi_reevaluate() after this point, it will take the
964 * sfi_lock and see the thread in this wait state. If another thread calls
965 * sfi_reevaluate() before this point, it would see a runnable thread and at most
966 * attempt to send an AST to this processor, but we would have the most accurate
970 /* Optimistically clear workq callback while thread is already locked */
971 if (workq_callback
&& (thread
->sched_call
== workq_callback
)) {
972 thread_sched_call(thread
, NULL
);
974 workq_callback
= NULL
;
976 thread_unlock(thread
);
978 sfi_class
= &sfi_classes
[class_id
];
979 if (!sfi_class
->class_in_on_phase
) {
980 /* Need to block thread in wait queue */
981 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_THREAD_DEFER
), tid
, class_id
, 0, 0, 0);
983 waitret
= waitq_assert_wait64(&sfi_class
->waitq
,
984 CAST_EVENT64_T(class_id
),
985 THREAD_INTERRUPTIBLE
,
987 if (waitret
== THREAD_WAITING
) {
988 thread
->sfi_wait_class
= class_id
;
990 continuation
= sfi_class
->continuation
;
992 /* thread may be exiting already, all other errors are unexpected */
993 assert(waitret
== THREAD_INTERRUPTED
);
996 simple_unlock(&sfi_lock
);
1001 thread_block_reason(continuation
, workq_callback
, AST_SFI
);
1002 } else if (workq_callback
) {
1003 thread_reenable_sched_call(thread
, workq_callback
);
1007 /* Thread must be unlocked */
1008 void sfi_reevaluate(thread_t thread
)
1012 sfi_class_id_t class_id
, current_class_id
;
1017 simple_lock(&sfi_lock
);
1019 thread_lock(thread
);
1020 sfi_ast
= sfi_thread_needs_ast(thread
, &class_id
);
1021 thread
->sfi_class
= class_id
;
1024 * This routine chiefly exists to boost threads out of an SFI wait
1025 * if their classification changes before the "on" timer fires.
1027 * If we calculate that a thread is in a different ->sfi_wait_class
1028 * than we think it should be (including no-SFI-wait), we need to
1031 * If the thread is in SFI wait and should not be (or should be waiting
1032 * on a different class' "on" timer), we wake it up. If needed, the
1033 * thread may immediately block again in the different SFI wait state.
1035 * If the thread is not in an SFI wait state and it should be, we need
1036 * to get that thread's attention, possibly by sending an AST to another
1040 if ((current_class_id
= thread
->sfi_wait_class
) != SFI_CLASS_UNSPECIFIED
) {
1042 thread_unlock(thread
); /* not needed anymore */
1044 assert(current_class_id
< MAX_SFI_CLASS_ID
);
1046 if ((sfi_ast
== AST_NONE
) || (class_id
!= current_class_id
)) {
1047 struct sfi_class_state
*sfi_class
= &sfi_classes
[current_class_id
];
1049 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI
, SFI_WAIT_CANCELED
), thread_tid(thread
), current_class_id
, class_id
, 0, 0);
1051 kret
= waitq_wakeup64_thread(&sfi_class
->waitq
,
1052 CAST_EVENT64_T(current_class_id
),
1055 assert(kret
== KERN_SUCCESS
|| kret
== KERN_NOT_WAITING
);
1059 * Thread's current SFI wait class is not set, and because we
1060 * have the sfi_lock, it won't get set.
1063 if ((thread
->state
& (TH_RUN
| TH_IDLE
)) == TH_RUN
) {
1064 if (sfi_ast
!= AST_NONE
) {
1065 if (thread
== current_thread())
1068 processor_t processor
= thread
->last_processor
;
1070 if (processor
!= PROCESSOR_NULL
&&
1071 processor
->state
== PROCESSOR_RUNNING
&&
1072 processor
->active_thread
== thread
) {
1073 cause_ast_check(processor
);
1076 * Runnable thread that's not on a CPU currently. When a processor
1077 * does context switch to it, the AST will get set based on whether
1078 * the thread is in its "off time".
1085 thread_unlock(thread
);
1088 simple_unlock(&sfi_lock
);
1092 #else /* !CONFIG_SCHED_SFI */
1094 kern_return_t
sfi_set_window(uint64_t window_usecs __unused
)
1096 return (KERN_NOT_SUPPORTED
);
1099 kern_return_t
sfi_window_cancel(void)
1101 return (KERN_NOT_SUPPORTED
);
1105 kern_return_t
sfi_get_window(uint64_t *window_usecs __unused
)
1107 return (KERN_NOT_SUPPORTED
);
1111 kern_return_t
sfi_set_class_offtime(sfi_class_id_t class_id __unused
, uint64_t offtime_usecs __unused
)
1113 return (KERN_NOT_SUPPORTED
);
1116 kern_return_t
sfi_class_offtime_cancel(sfi_class_id_t class_id __unused
)
1118 return (KERN_NOT_SUPPORTED
);
1121 kern_return_t
sfi_get_class_offtime(sfi_class_id_t class_id __unused
, uint64_t *offtime_usecs __unused
)
1123 return (KERN_NOT_SUPPORTED
);
1126 void sfi_reevaluate(thread_t thread __unused
)
1131 sfi_class_id_t
sfi_thread_classify(thread_t thread
)
1133 task_t task
= thread
->task
;
1134 boolean_t is_kernel_thread
= (task
== kernel_task
);
1136 if (is_kernel_thread
) {
1137 return SFI_CLASS_KERNEL
;
1140 return SFI_CLASS_OPTED_OUT
;
1143 #endif /* !CONFIG_SCHED_SFI */