2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Scheduling primitives
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/limits.h>
79 #include <machine/atomic.h>
81 #include <machine/commpage.h>
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/cpu_number.h>
87 #include <kern/cpu_data.h>
89 #include <kern/debug.h>
90 #include <kern/macro_help.h>
91 #include <kern/machine.h>
92 #include <kern/misc_protos.h>
94 #include <kern/monotonic.h>
95 #endif /* MONOTONIC */
96 #include <kern/processor.h>
97 #include <kern/queue.h>
98 #include <kern/sched.h>
99 #include <kern/sched_prim.h>
100 #include <kern/sfi.h>
101 #include <kern/syscall_subr.h>
102 #include <kern/task.h>
103 #include <kern/thread.h>
104 #include <kern/ledger.h>
105 #include <kern/timer_queue.h>
106 #include <kern/waitq.h>
107 #include <kern/policy_internal.h>
108 #include <kern/cpu_quiesce.h>
111 #include <vm/vm_kern.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_pageout.h>
115 #include <mach/sdt.h>
116 #include <mach/mach_host.h>
117 #include <mach/host_info.h>
119 #include <sys/kdebug.h>
120 #include <kperf/kperf.h>
121 #include <kern/kpc.h>
122 #include <san/kasan.h>
123 #include <kern/pms.h>
124 #include <kern/host.h>
125 #include <stdatomic.h>
127 struct sched_statistics
PERCPU_DATA(sched_stats
);
128 bool sched_stats_active
;
131 rt_runq_count(processor_set_t pset
)
133 return atomic_load_explicit(&SCHED(rt_runq
)(pset
)->count
, memory_order_relaxed
);
137 rt_runq_count_incr(processor_set_t pset
)
139 atomic_fetch_add_explicit(&SCHED(rt_runq
)(pset
)->count
, 1, memory_order_relaxed
);
143 rt_runq_count_decr(processor_set_t pset
)
145 atomic_fetch_sub_explicit(&SCHED(rt_runq
)(pset
)->count
, 1, memory_order_relaxed
);
148 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
149 TUNABLE(int, default_preemption_rate
, "preempt", DEFAULT_PREEMPTION_RATE
);
151 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
152 TUNABLE(int, default_bg_preemption_rate
, "bg_preempt", DEFAULT_BG_PREEMPTION_RATE
);
154 #define MAX_UNSAFE_QUANTA 800
155 TUNABLE(int, max_unsafe_quanta
, "unsafe", MAX_UNSAFE_QUANTA
);
157 #define MAX_POLL_QUANTA 2
158 TUNABLE(int, max_poll_quanta
, "poll", MAX_POLL_QUANTA
);
160 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
161 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
163 uint64_t max_poll_computation
;
165 uint64_t max_unsafe_computation
;
166 uint64_t sched_safe_duration
;
168 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
170 uint32_t std_quantum
;
171 uint32_t min_std_quantum
;
174 uint32_t std_quantum_us
;
175 uint32_t bg_quantum_us
;
177 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
179 uint32_t thread_depress_time
;
180 uint32_t default_timeshare_computation
;
181 uint32_t default_timeshare_constraint
;
183 uint32_t max_rt_quantum
;
184 uint32_t min_rt_quantum
;
186 uint32_t rt_constraint_threshold
;
188 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
191 uint32_t sched_tick_interval
;
193 /* Timeshare load calculation interval (15ms) */
194 uint32_t sched_load_compute_interval_us
= 15000;
195 uint64_t sched_load_compute_interval_abs
;
196 static _Atomic
uint64_t sched_load_compute_deadline
;
198 uint32_t sched_pri_shifts
[TH_BUCKET_MAX
];
199 uint32_t sched_fixed_shift
;
201 uint32_t sched_decay_usage_age_factor
= 1; /* accelerate 5/8^n usage aging */
203 /* Allow foreground to decay past default to resolve inversions */
204 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
205 int sched_pri_decay_band_limit
= DEFAULT_DECAY_BAND_LIMIT
;
207 /* Defaults for timer deadline profiling */
208 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
210 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
213 uint64_t timer_deadline_tracking_bin_1
;
214 uint64_t timer_deadline_tracking_bin_2
;
216 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
218 thread_t sched_maintenance_thread
;
220 /* interrupts disabled lock to guard recommended cores state */
221 decl_simple_lock_data(static, sched_recommended_cores_lock
);
222 static uint64_t usercontrol_requested_recommended_cores
= ALL_CORES_RECOMMENDED
;
223 static void sched_update_recommended_cores(uint64_t recommended_cores
);
225 #if __arm__ || __arm64__
226 static void sched_recommended_cores_maintenance(void);
227 uint64_t perfcontrol_failsafe_starvation_threshold
;
228 extern char *proc_name_address(struct proc
*p
);
229 #endif /* __arm__ || __arm64__ */
231 uint64_t sched_one_second_interval
;
232 boolean_t allow_direct_handoff
= TRUE
;
236 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
238 static void load_shift_init(void);
239 static void preempt_pri_init(void);
241 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
243 thread_t
processor_idle(
245 processor_t processor
);
250 processor_t processor
,
251 processor_set_t pset
,
254 static void processor_setrun(
255 processor_t processor
,
260 sched_realtime_timebase_init(void);
263 sched_timer_deadline_tracking_init(void);
266 extern int debug_task
;
267 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
269 #define TLOG(a, fmt, args...) do {} while (0)
273 thread_bind_internal(
275 processor_t processor
);
278 sched_vm_group_maintenance(void);
280 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
281 int8_t sched_load_shifts
[NRQS
];
282 bitmap_t sched_preempt_pri
[BITMAP_LEN(NRQS_MAX
)];
283 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
286 * Statically allocate a buffer to hold the longest possible
287 * scheduler description string, as currently implemented.
288 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
289 * to export to userspace via sysctl(3). If either version
290 * changes, update the other.
292 * Note that in addition to being an upper bound on the strings
293 * in the kernel, it's also an exact parameter to PE_get_default(),
294 * which interrogates the device tree on some platforms. That
295 * API requires the caller know the exact size of the device tree
296 * property, so we need both a legacy size (32) and the current size
297 * (48) to deal with old and new device trees. The device tree property
298 * is similarly padded to a fixed size so that the same kernel image
299 * can run on multiple devices with different schedulers configured
300 * in the device tree.
302 char sched_string
[SCHED_STRING_MAX_LENGTH
];
304 uint32_t sched_debug_flags
= SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS
;
306 /* Global flag which indicates whether Background Stepper Context is enabled */
307 static int cpu_throttle_enabled
= 1;
312 boolean_t direct_handoff
= FALSE
;
313 kprintf("Scheduler: Default of %s\n", SCHED(sched_name
));
315 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit
, sizeof(sched_pri_decay_band_limit
))) {
316 /* No boot-args, check in device tree */
317 if (!PE_get_default("kern.sched_pri_decay_limit",
318 &sched_pri_decay_band_limit
,
319 sizeof(sched_pri_decay_band_limit
))) {
320 /* Allow decay all the way to normal limits */
321 sched_pri_decay_band_limit
= DEFAULT_DECAY_BAND_LIMIT
;
325 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit
);
327 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags
, sizeof(sched_debug_flags
))) {
328 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags
);
330 strlcpy(sched_string
, SCHED(sched_name
), sizeof(sched_string
));
332 cpu_quiescent_counter_init();
335 SCHED(rt_init
)(&pset0
);
336 sched_timer_deadline_tracking_init();
338 SCHED(pset_init
)(&pset0
);
339 SCHED(processor_init
)(master_processor
);
341 if (PE_parse_boot_argn("direct_handoff", &direct_handoff
, sizeof(direct_handoff
))) {
342 allow_direct_handoff
= direct_handoff
;
347 sched_timebase_init(void)
351 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC
, &abstime
);
352 sched_one_second_interval
= abstime
;
354 SCHED(timebase_init
)();
355 sched_realtime_timebase_init();
358 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
361 sched_timeshare_init(void)
364 * Calculate the timeslicing quantum
367 if (default_preemption_rate
< 1) {
368 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
370 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
372 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
374 if (default_bg_preemption_rate
< 1) {
375 default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
377 bg_quantum_us
= (1000 * 1000) / default_bg_preemption_rate
;
379 printf("standard background quantum is %d us\n", bg_quantum_us
);
387 sched_timeshare_timebase_init(void)
392 /* standard timeslicing quantum */
393 clock_interval_to_absolutetime_interval(
394 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
395 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
396 std_quantum
= (uint32_t)abstime
;
398 /* smallest remaining quantum (250 us) */
399 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
400 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
401 min_std_quantum
= (uint32_t)abstime
;
403 /* quantum for background tasks */
404 clock_interval_to_absolutetime_interval(
405 bg_quantum_us
, NSEC_PER_USEC
, &abstime
);
406 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
407 bg_quantum
= (uint32_t)abstime
;
409 /* scheduler tick interval */
410 clock_interval_to_absolutetime_interval(USEC_PER_SEC
>> SCHED_TICK_SHIFT
,
411 NSEC_PER_USEC
, &abstime
);
412 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
413 sched_tick_interval
= (uint32_t)abstime
;
415 /* timeshare load calculation interval & deadline initialization */
416 clock_interval_to_absolutetime_interval(sched_load_compute_interval_us
, NSEC_PER_USEC
, &sched_load_compute_interval_abs
);
417 os_atomic_init(&sched_load_compute_deadline
, sched_load_compute_interval_abs
);
420 * Compute conversion factor from usage to
421 * timesharing priorities with 5/8 ** n aging.
423 abstime
= (abstime
* 5) / 3;
424 for (shift
= 0; abstime
> BASEPRI_DEFAULT
; ++shift
) {
427 sched_fixed_shift
= shift
;
429 for (uint32_t i
= 0; i
< TH_BUCKET_MAX
; i
++) {
430 sched_pri_shifts
[i
] = INT8_MAX
;
433 max_unsafe_computation
= ((uint64_t)max_unsafe_quanta
) * std_quantum
;
434 sched_safe_duration
= 2 * ((uint64_t)max_unsafe_quanta
) * std_quantum
;
436 max_poll_computation
= ((uint64_t)max_poll_quanta
) * std_quantum
;
437 thread_depress_time
= 1 * std_quantum
;
438 default_timeshare_computation
= std_quantum
/ 2;
439 default_timeshare_constraint
= std_quantum
;
441 #if __arm__ || __arm64__
442 perfcontrol_failsafe_starvation_threshold
= (2 * sched_tick_interval
);
443 #endif /* __arm__ || __arm64__ */
446 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
449 pset_rt_init(processor_set_t pset
)
451 os_atomic_init(&pset
->rt_runq
.count
, 0);
452 queue_init(&pset
->rt_runq
.queue
);
453 memset(&pset
->rt_runq
.runq_stats
, 0, sizeof pset
->rt_runq
.runq_stats
);
457 sched_realtime_timebase_init(void)
461 /* smallest rt computaton (50 us) */
462 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
463 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
464 min_rt_quantum
= (uint32_t)abstime
;
466 /* maximum rt computation (50 ms) */
467 clock_interval_to_absolutetime_interval(
468 50, 1000 * NSEC_PER_USEC
, &abstime
);
469 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
470 max_rt_quantum
= (uint32_t)abstime
;
472 /* constraint threshold for sending backup IPIs (4 ms) */
473 clock_interval_to_absolutetime_interval(4, NSEC_PER_MSEC
, &abstime
);
474 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
475 rt_constraint_threshold
= (uint32_t)abstime
;
479 sched_check_spill(processor_set_t pset
, thread_t thread
)
488 sched_thread_should_yield(processor_t processor
, thread_t thread
)
492 return !SCHED(processor_queue_empty
)(processor
) || rt_runq_count(processor
->processor_set
) > 0;
495 /* Default implementations of .steal_thread_enabled */
497 sched_steal_thread_DISABLED(processor_set_t pset
)
504 sched_steal_thread_enabled(processor_set_t pset
)
506 return bit_count(pset
->node
->pset_map
) > 1;
509 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
512 * Set up values for timeshare
516 load_shift_init(void)
518 int8_t k
, *p
= sched_load_shifts
;
521 uint32_t sched_decay_penalty
= 1;
523 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty
, sizeof(sched_decay_penalty
))) {
524 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty
);
527 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor
, sizeof(sched_decay_usage_age_factor
))) {
528 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor
);
531 if (sched_decay_penalty
== 0) {
533 * There is no penalty for timeshare threads for using too much
534 * CPU, so set all load shifts to INT8_MIN. Even under high load,
535 * sched_pri_shift will be >INT8_MAX, and there will be no
536 * penalty applied to threads (nor will sched_usage be updated per
539 for (i
= 0; i
< NRQS
; i
++) {
540 sched_load_shifts
[i
] = INT8_MIN
;
546 *p
++ = INT8_MIN
; *p
++ = 0;
549 * For a given system load "i", the per-thread priority
550 * penalty per quantum of CPU usage is ~2^k priority
551 * levels. "sched_decay_penalty" can cause more
552 * array entries to be filled with smaller "k" values
554 for (i
= 2, j
= 1 << sched_decay_penalty
, k
= 1; i
< NRQS
; ++k
) {
555 for (j
<<= 1; (i
< j
) && (i
< NRQS
); ++i
) {
562 preempt_pri_init(void)
564 bitmap_t
*p
= sched_preempt_pri
;
566 for (int i
= BASEPRI_FOREGROUND
; i
< MINPRI_KERNEL
; ++i
) {
570 for (int i
= BASEPRI_PREEMPT
; i
<= MAXPRI
; ++i
) {
575 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
578 * Thread wait timer expiration.
585 thread_t thread
= p0
;
588 assert_thread_magic(thread
);
592 if (--thread
->wait_timer_active
== 0) {
593 if (thread
->wait_timer_is_set
) {
594 thread
->wait_timer_is_set
= FALSE
;
595 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
598 thread_unlock(thread
);
605 * Unblock thread on wake up.
607 * Returns TRUE if the thread should now be placed on the runqueue.
609 * Thread must be locked.
611 * Called at splsched().
616 wait_result_t wresult
)
618 boolean_t ready_for_runq
= FALSE
;
619 thread_t cthread
= current_thread();
620 uint32_t new_run_count
;
621 int old_thread_state
;
626 thread
->wait_result
= wresult
;
629 * Cancel pending wait timer.
631 if (thread
->wait_timer_is_set
) {
632 if (timer_call_cancel(&thread
->wait_timer
)) {
633 thread
->wait_timer_active
--;
635 thread
->wait_timer_is_set
= FALSE
;
638 boolean_t aticontext
, pidle
;
639 ml_get_power_state(&aticontext
, &pidle
);
642 * Update scheduling state: not waiting,
645 old_thread_state
= thread
->state
;
646 thread
->state
= (old_thread_state
| TH_RUN
) &
647 ~(TH_WAIT
| TH_UNINT
| TH_WAIT_REPORT
);
649 if ((old_thread_state
& TH_RUN
) == 0) {
650 uint64_t ctime
= mach_approximate_time();
651 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= ctime
;
652 timer_start(&thread
->runnable_timer
, ctime
);
654 ready_for_runq
= TRUE
;
656 if (old_thread_state
& TH_WAIT_REPORT
) {
657 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
660 /* Update the runnable thread count */
661 new_run_count
= SCHED(run_count_incr
)(thread
);
663 #if CONFIG_SCHED_AUTO_JOIN
664 if (aticontext
== FALSE
&& work_interval_should_propagate(cthread
, thread
)) {
665 work_interval_auto_join_propagate(cthread
, thread
);
667 #endif /*CONFIG_SCHED_AUTO_JOIN */
670 * Either the thread is idling in place on another processor,
671 * or it hasn't finished context switching yet.
673 assert((thread
->state
& TH_IDLE
) == 0);
675 * The run count is only dropped after the context switch completes
676 * and the thread is still waiting, so we should not run_incr here
678 new_run_count
= os_atomic_load(&sched_run_buckets
[TH_BUCKET_RUN
], relaxed
);
682 * Calculate deadline for real-time threads.
684 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
687 ctime
= mach_absolute_time();
688 thread
->realtime
.deadline
= thread
->realtime
.constraint
+ ctime
;
689 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SET_RT_DEADLINE
) | DBG_FUNC_NONE
,
690 (uintptr_t)thread_tid(thread
), thread
->realtime
.deadline
, thread
->realtime
.computation
, 0);
694 * Clear old quantum, fail-safe computation, etc.
696 thread
->quantum_remaining
= 0;
697 thread
->computation_metered
= 0;
698 thread
->reason
= AST_NONE
;
699 thread
->block_hint
= kThreadWaitNone
;
701 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
702 * We also account for "double hop" thread signaling via
703 * the thread callout infrastructure.
704 * DRK: consider removing the callout wakeup counters in the future
705 * they're present for verification at the moment.
708 if (__improbable(aticontext
&& !(thread_get_tag_internal(thread
) & THREAD_TAG_CALLOUT
))) {
709 DTRACE_SCHED2(iwakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
711 uint64_t ttd
= current_processor()->timer_call_ttd
;
714 if (ttd
<= timer_deadline_tracking_bin_1
) {
715 thread
->thread_timer_wakeups_bin_1
++;
716 } else if (ttd
<= timer_deadline_tracking_bin_2
) {
717 thread
->thread_timer_wakeups_bin_2
++;
721 ledger_credit_thread(thread
, thread
->t_ledger
,
722 task_ledgers
.interrupt_wakeups
, 1);
724 ledger_credit_thread(thread
, thread
->t_ledger
,
725 task_ledgers
.platform_idle_wakeups
, 1);
727 } else if (thread_get_tag_internal(cthread
) & THREAD_TAG_CALLOUT
) {
728 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
729 if (cthread
->callout_woken_from_icontext
) {
730 ledger_credit_thread(thread
, thread
->t_ledger
,
731 task_ledgers
.interrupt_wakeups
, 1);
732 thread
->thread_callout_interrupt_wakeups
++;
734 if (cthread
->callout_woken_from_platform_idle
) {
735 ledger_credit_thread(thread
, thread
->t_ledger
,
736 task_ledgers
.platform_idle_wakeups
, 1);
737 thread
->thread_callout_platform_idle_wakeups
++;
740 cthread
->callout_woke_thread
= TRUE
;
744 if (thread_get_tag_internal(thread
) & THREAD_TAG_CALLOUT
) {
745 thread
->callout_woken_from_icontext
= !!aticontext
;
746 thread
->callout_woken_from_platform_idle
= !!pidle
;
747 thread
->callout_woke_thread
= FALSE
;
751 if (ready_for_runq
) {
752 kperf_make_runnable(thread
, aticontext
);
756 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
757 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
758 (uintptr_t)thread_tid(thread
), thread
->sched_pri
, thread
->wait_result
,
759 sched_run_buckets
[TH_BUCKET_RUN
], 0);
761 DTRACE_SCHED2(wakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
763 return ready_for_runq
;
767 * Routine: thread_allowed_for_handoff
769 * Check if the thread is allowed for handoff operation
771 * thread lock held, IPC locks may be held.
772 * TODO: In future, do not allow handoff if threads have different cluster
776 thread_allowed_for_handoff(
779 thread_t self
= current_thread();
781 if (allow_direct_handoff
&&
782 thread
->sched_mode
== TH_MODE_REALTIME
&&
783 self
->sched_mode
== TH_MODE_REALTIME
) {
793 * Unblock and dispatch thread.
795 * thread lock held, IPC locks may be held.
796 * thread must have been pulled from wait queue under same lock hold.
797 * thread must have been waiting
799 * KERN_SUCCESS - Thread was set running
801 * TODO: This should return void
806 wait_result_t wresult
,
807 waitq_options_t option
)
809 thread_t self
= current_thread();
811 assert_thread_magic(thread
);
813 assert(thread
->at_safe_point
== FALSE
);
814 assert(thread
->wait_event
== NO_EVENT64
);
815 assert(thread
->waitq
== NULL
);
817 assert(!(thread
->state
& (TH_TERMINATE
| TH_TERMINATE2
)));
818 assert(thread
->state
& TH_WAIT
);
821 if (thread_unblock(thread
, wresult
)) {
822 #if SCHED_TRACE_THREAD_WAKEUPS
823 backtrace(&thread
->thread_wakeup_bt
[0],
824 (sizeof(thread
->thread_wakeup_bt
) / sizeof(uintptr_t)), NULL
);
826 if ((option
& WQ_OPTION_HANDOFF
) &&
827 thread_allowed_for_handoff(thread
)) {
828 thread_reference(thread
);
829 assert(self
->handoff_thread
== NULL
);
830 self
->handoff_thread
= thread
;
832 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
840 * Routine: thread_mark_wait_locked
842 * Mark a thread as waiting. If, given the circumstances,
843 * it doesn't want to wait (i.e. already aborted), then
844 * indicate that in the return value.
846 * at splsched() and thread is locked.
850 thread_mark_wait_locked(
852 wait_interrupt_t interruptible_orig
)
854 boolean_t at_safe_point
;
855 wait_interrupt_t interruptible
= interruptible_orig
;
857 if (thread
->state
& TH_IDLE
) {
858 panic("Invalid attempt to wait while running the idle thread");
861 assert(!(thread
->state
& (TH_WAIT
| TH_IDLE
| TH_UNINT
| TH_TERMINATE2
| TH_WAIT_REPORT
)));
864 * The thread may have certain types of interrupts/aborts masked
865 * off. Even if the wait location says these types of interrupts
866 * are OK, we have to honor mask settings (outer-scoped code may
867 * not be able to handle aborts at the moment).
869 interruptible
&= TH_OPT_INTMASK
;
870 if (interruptible
> (thread
->options
& TH_OPT_INTMASK
)) {
871 interruptible
= thread
->options
& TH_OPT_INTMASK
;
874 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
876 if (interruptible
== THREAD_UNINT
||
877 !(thread
->sched_flags
& TH_SFLAG_ABORT
) ||
879 (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
))) {
880 if (!(thread
->state
& TH_TERMINATE
)) {
884 int state_bits
= TH_WAIT
;
885 if (!interruptible
) {
886 state_bits
|= TH_UNINT
;
888 if (thread
->sched_call
) {
889 wait_interrupt_t mask
= THREAD_WAIT_NOREPORT_USER
;
890 if (is_kerneltask(thread
->task
)) {
891 mask
= THREAD_WAIT_NOREPORT_KERNEL
;
893 if ((interruptible_orig
& mask
) == 0) {
894 state_bits
|= TH_WAIT_REPORT
;
897 thread
->state
|= state_bits
;
898 thread
->at_safe_point
= at_safe_point
;
900 /* TODO: pass this through assert_wait instead, have
901 * assert_wait just take a struct as an argument */
902 assert(!thread
->block_hint
);
903 thread
->block_hint
= thread
->pending_block_hint
;
904 thread
->pending_block_hint
= kThreadWaitNone
;
906 return thread
->wait_result
= THREAD_WAITING
;
908 if (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
) {
909 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
912 thread
->pending_block_hint
= kThreadWaitNone
;
914 return thread
->wait_result
= THREAD_INTERRUPTED
;
918 * Routine: thread_interrupt_level
920 * Set the maximum interruptible state for the
921 * current thread. The effective value of any
922 * interruptible flag passed into assert_wait
923 * will never exceed this.
925 * Useful for code that must not be interrupted,
926 * but which calls code that doesn't know that.
928 * The old interrupt level for the thread.
932 thread_interrupt_level(
933 wait_interrupt_t new_level
)
935 thread_t thread
= current_thread();
936 wait_interrupt_t result
= thread
->options
& TH_OPT_INTMASK
;
938 thread
->options
= (thread
->options
& ~TH_OPT_INTMASK
) | (new_level
& TH_OPT_INTMASK
);
946 * Assert that the current thread is about to go to
947 * sleep until the specified event occurs.
952 wait_interrupt_t interruptible
)
954 if (__improbable(event
== NO_EVENT
)) {
955 panic("%s() called with NO_EVENT", __func__
);
958 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
959 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
960 VM_KERNEL_UNSLIDE_OR_PERM(event
), 0, 0, 0, 0);
963 waitq
= global_eventq(event
);
964 return waitq_assert_wait64(waitq
, CAST_EVENT64_T(event
), interruptible
, TIMEOUT_WAIT_FOREVER
);
970 * Return the global waitq for the specified event
976 return global_eventq(event
);
982 wait_interrupt_t interruptible
,
984 uint32_t scale_factor
)
986 thread_t thread
= current_thread();
987 wait_result_t wresult
;
991 if (__improbable(event
== NO_EVENT
)) {
992 panic("%s() called with NO_EVENT", __func__
);
996 waitq
= global_eventq(event
);
1001 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
1003 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1004 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1005 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1007 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1009 TIMEOUT_URGENCY_SYS_NORMAL
,
1010 deadline
, TIMEOUT_NO_LEEWAY
,
1013 waitq_unlock(waitq
);
1019 assert_wait_timeout_with_leeway(
1021 wait_interrupt_t interruptible
,
1022 wait_timeout_urgency_t urgency
,
1025 uint32_t scale_factor
)
1027 thread_t thread
= current_thread();
1028 wait_result_t wresult
;
1035 if (__improbable(event
== NO_EVENT
)) {
1036 panic("%s() called with NO_EVENT", __func__
);
1039 now
= mach_absolute_time();
1040 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1041 deadline
= now
+ abstime
;
1043 clock_interval_to_absolutetime_interval(leeway
, scale_factor
, &slop
);
1045 struct waitq
*waitq
;
1046 waitq
= global_eventq(event
);
1051 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1052 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1053 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1055 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1057 urgency
, deadline
, slop
,
1060 waitq_unlock(waitq
);
1066 assert_wait_deadline(
1068 wait_interrupt_t interruptible
,
1071 thread_t thread
= current_thread();
1072 wait_result_t wresult
;
1075 if (__improbable(event
== NO_EVENT
)) {
1076 panic("%s() called with NO_EVENT", __func__
);
1079 struct waitq
*waitq
;
1080 waitq
= global_eventq(event
);
1085 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1086 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1087 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1089 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1091 TIMEOUT_URGENCY_SYS_NORMAL
, deadline
,
1092 TIMEOUT_NO_LEEWAY
, thread
);
1093 waitq_unlock(waitq
);
1099 assert_wait_deadline_with_leeway(
1101 wait_interrupt_t interruptible
,
1102 wait_timeout_urgency_t urgency
,
1106 thread_t thread
= current_thread();
1107 wait_result_t wresult
;
1110 if (__improbable(event
== NO_EVENT
)) {
1111 panic("%s() called with NO_EVENT", __func__
);
1114 struct waitq
*waitq
;
1115 waitq
= global_eventq(event
);
1120 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1121 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1122 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1124 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1126 urgency
, deadline
, leeway
,
1128 waitq_unlock(waitq
);
1136 * Return TRUE if a thread is running on a processor such that an AST
1137 * is needed to pull it out of userspace execution, or if executing in
1138 * the kernel, bring to a context switch boundary that would cause
1139 * thread state to be serialized in the thread PCB.
1141 * Thread locked, returns the same way. While locked, fields
1142 * like "state" cannot change. "runq" can change only from set to unset.
1144 static inline boolean_t
1145 thread_isoncpu(thread_t thread
)
1147 /* Not running or runnable */
1148 if (!(thread
->state
& TH_RUN
)) {
1152 /* Waiting on a runqueue, not currently running */
1153 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1154 if (thread
->runq
!= PROCESSOR_NULL
) {
1159 * Thread does not have a stack yet
1160 * It could be on the stack alloc queue or preparing to be invoked
1162 if (!thread
->kernel_stack
) {
1167 * Thread must be running on a processor, or
1168 * about to run, or just did run. In all these
1169 * cases, an AST to the processor is needed
1170 * to guarantee that the thread is kicked out
1171 * of userspace and the processor has
1172 * context switched (and saved register state).
1180 * Force a preemption point for a thread and wait
1181 * for it to stop running on a CPU. If a stronger
1182 * guarantee is requested, wait until no longer
1183 * runnable. Arbitrates access among
1184 * multiple stop requests. (released by unstop)
1186 * The thread must enter a wait state and stop via a
1189 * Returns FALSE if interrupted.
1194 boolean_t until_not_runnable
)
1196 wait_result_t wresult
;
1197 spl_t s
= splsched();
1201 thread_lock(thread
);
1203 while (thread
->state
& TH_SUSP
) {
1204 thread
->wake_active
= TRUE
;
1205 thread_unlock(thread
);
1207 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1208 wake_unlock(thread
);
1211 if (wresult
== THREAD_WAITING
) {
1212 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1215 if (wresult
!= THREAD_AWAKENED
) {
1221 thread_lock(thread
);
1224 thread
->state
|= TH_SUSP
;
1226 while ((oncpu
= thread_isoncpu(thread
)) ||
1227 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1228 processor_t processor
;
1231 assert(thread
->state
& TH_RUN
);
1232 processor
= thread
->chosen_processor
;
1233 cause_ast_check(processor
);
1236 thread
->wake_active
= TRUE
;
1237 thread_unlock(thread
);
1239 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1240 wake_unlock(thread
);
1243 if (wresult
== THREAD_WAITING
) {
1244 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1247 if (wresult
!= THREAD_AWAKENED
) {
1248 thread_unstop(thread
);
1254 thread_lock(thread
);
1257 thread_unlock(thread
);
1258 wake_unlock(thread
);
1262 * We return with the thread unlocked. To prevent it from
1263 * transitioning to a runnable state (or from TH_RUN to
1264 * being on the CPU), the caller must ensure the thread
1265 * is stopped via an external means (such as an AST)
1274 * Release a previous stop request and set
1275 * the thread running if appropriate.
1277 * Use only after a successful stop operation.
1283 spl_t s
= splsched();
1286 thread_lock(thread
);
1288 assert((thread
->state
& (TH_RUN
| TH_WAIT
| TH_SUSP
)) != TH_SUSP
);
1290 if (thread
->state
& TH_SUSP
) {
1291 thread
->state
&= ~TH_SUSP
;
1293 if (thread
->wake_active
) {
1294 thread
->wake_active
= FALSE
;
1295 thread_unlock(thread
);
1297 thread_wakeup(&thread
->wake_active
);
1298 wake_unlock(thread
);
1305 thread_unlock(thread
);
1306 wake_unlock(thread
);
1313 * Wait for a thread to stop running. (non-interruptible)
1319 boolean_t until_not_runnable
)
1321 wait_result_t wresult
;
1323 processor_t processor
;
1324 spl_t s
= splsched();
1327 thread_lock(thread
);
1330 * Wait until not running on a CPU. If stronger requirement
1331 * desired, wait until not runnable. Assumption: if thread is
1332 * on CPU, then TH_RUN is set, so we're not waiting in any case
1333 * where the original, pure "TH_RUN" check would have let us
1336 while ((oncpu
= thread_isoncpu(thread
)) ||
1337 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1339 assert(thread
->state
& TH_RUN
);
1340 processor
= thread
->chosen_processor
;
1341 cause_ast_check(processor
);
1344 thread
->wake_active
= TRUE
;
1345 thread_unlock(thread
);
1347 wresult
= assert_wait(&thread
->wake_active
, THREAD_UNINT
);
1348 wake_unlock(thread
);
1351 if (wresult
== THREAD_WAITING
) {
1352 thread_block(THREAD_CONTINUE_NULL
);
1357 thread_lock(thread
);
1360 thread_unlock(thread
);
1361 wake_unlock(thread
);
1366 * Routine: clear_wait_internal
1368 * Clear the wait condition for the specified thread.
1369 * Start the thread executing if that is appropriate.
1371 * thread thread to awaken
1372 * result Wakeup result the thread should see
1375 * the thread is locked.
1377 * KERN_SUCCESS thread was rousted out a wait
1378 * KERN_FAILURE thread was waiting but could not be rousted
1379 * KERN_NOT_WAITING thread was not waiting
1381 __private_extern__ kern_return_t
1382 clear_wait_internal(
1384 wait_result_t wresult
)
1386 uint32_t i
= LockTimeOutUsec
;
1387 struct waitq
*waitq
= thread
->waitq
;
1390 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
)) {
1391 return KERN_FAILURE
;
1394 if (waitq
!= NULL
) {
1395 if (!waitq_pull_thread_locked(waitq
, thread
)) {
1396 thread_unlock(thread
);
1398 if (i
> 0 && !machine_timeout_suspended()) {
1401 thread_lock(thread
);
1402 if (waitq
!= thread
->waitq
) {
1403 return KERN_NOT_WAITING
;
1409 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1410 if ((thread
->state
& (TH_WAIT
| TH_TERMINATE
)) == TH_WAIT
) {
1411 return thread_go(thread
, wresult
, WQ_OPTION_NONE
);
1413 return KERN_NOT_WAITING
;
1417 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1418 thread
, waitq
, cpu_number());
1420 return KERN_FAILURE
;
1427 * Clear the wait condition for the specified thread. Start the thread
1428 * executing if that is appropriate.
1431 * thread thread to awaken
1432 * result Wakeup result the thread should see
1437 wait_result_t result
)
1443 thread_lock(thread
);
1444 ret
= clear_wait_internal(thread
, result
);
1445 thread_unlock(thread
);
1452 * thread_wakeup_prim:
1454 * Common routine for thread_wakeup, thread_wakeup_with_result,
1455 * and thread_wakeup_one.
1461 boolean_t one_thread
,
1462 wait_result_t result
)
1464 if (__improbable(event
== NO_EVENT
)) {
1465 panic("%s() called with NO_EVENT", __func__
);
1468 struct waitq
*wq
= global_eventq(event
);
1471 return waitq_wakeup64_one(wq
, CAST_EVENT64_T(event
), result
, WAITQ_ALL_PRIORITIES
);
1473 return waitq_wakeup64_all(wq
, CAST_EVENT64_T(event
), result
, WAITQ_ALL_PRIORITIES
);
1478 * Wakeup a specified thread if and only if it's waiting for this event
1481 thread_wakeup_thread(
1485 if (__improbable(event
== NO_EVENT
)) {
1486 panic("%s() called with NO_EVENT", __func__
);
1489 if (__improbable(thread
== THREAD_NULL
)) {
1490 panic("%s() called with THREAD_NULL", __func__
);
1493 struct waitq
*wq
= global_eventq(event
);
1495 return waitq_wakeup64_thread(wq
, CAST_EVENT64_T(event
), thread
, THREAD_AWAKENED
);
1499 * Wakeup a thread waiting on an event and promote it to a priority.
1501 * Requires woken thread to un-promote itself when done.
1504 thread_wakeup_one_with_pri(
1508 if (__improbable(event
== NO_EVENT
)) {
1509 panic("%s() called with NO_EVENT", __func__
);
1512 struct waitq
*wq
= global_eventq(event
);
1514 return waitq_wakeup64_one(wq
, CAST_EVENT64_T(event
), THREAD_AWAKENED
, priority
);
1518 * Wakeup a thread waiting on an event,
1519 * promote it to a priority,
1520 * and return a reference to the woken thread.
1522 * Requires woken thread to un-promote itself when done.
1525 thread_wakeup_identify(event_t event
,
1528 if (__improbable(event
== NO_EVENT
)) {
1529 panic("%s() called with NO_EVENT", __func__
);
1532 struct waitq
*wq
= global_eventq(event
);
1534 return waitq_wakeup64_identify(wq
, CAST_EVENT64_T(event
), THREAD_AWAKENED
, priority
);
1540 * Force the current thread to execute on the specified processor.
1541 * Takes effect after the next thread_block().
1543 * Returns the previous binding. PROCESSOR_NULL means
1546 * XXX - DO NOT export this to users - XXX
1550 processor_t processor
)
1552 thread_t self
= current_thread();
1559 prev
= thread_bind_internal(self
, processor
);
1561 thread_unlock(self
);
1568 * thread_bind_internal:
1570 * If the specified thread is not the current thread, and it is currently
1571 * running on another CPU, a remote AST must be sent to that CPU to cause
1572 * the thread to migrate to its bound processor. Otherwise, the migration
1573 * will occur at the next quantum expiration or blocking point.
1575 * When the thread is the current thread, and explicit thread_block() should
1576 * be used to force the current processor to context switch away and
1577 * let the thread migrate to the bound processor.
1579 * Thread must be locked, and at splsched.
1583 thread_bind_internal(
1585 processor_t processor
)
1589 /* <rdar://problem/15102234> */
1590 assert(thread
->sched_pri
< BASEPRI_RTQUEUES
);
1591 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1592 assert(thread
->runq
== PROCESSOR_NULL
);
1594 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_THREAD_BIND
), thread_tid(thread
), processor
? (uintptr_t)processor
->cpu_id
: (uintptr_t)-1, 0, 0, 0);
1596 prev
= thread
->bound_processor
;
1597 thread
->bound_processor
= processor
;
1603 * thread_vm_bind_group_add:
1605 * The "VM bind group" is a special mechanism to mark a collection
1606 * of threads from the VM subsystem that, in general, should be scheduled
1607 * with only one CPU of parallelism. To accomplish this, we initially
1608 * bind all the threads to the master processor, which has the effect
1609 * that only one of the threads in the group can execute at once, including
1610 * preempting threads in the group that are a lower priority. Future
1611 * mechanisms may use more dynamic mechanisms to prevent the collection
1612 * of VM threads from using more CPU time than desired.
1614 * The current implementation can result in priority inversions where
1615 * compute-bound priority 95 or realtime threads that happen to have
1616 * landed on the master processor prevent the VM threads from running.
1617 * When this situation is detected, we unbind the threads for one
1618 * scheduler tick to allow the scheduler to run the threads an
1619 * additional CPUs, before restoring the binding (assuming high latency
1620 * is no longer a problem).
1624 * The current max is provisioned for:
1625 * vm_compressor_swap_trigger_thread (92)
1626 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1627 * vm_pageout_continue (92)
1628 * memorystatus_thread (95)
1630 #define MAX_VM_BIND_GROUP_COUNT (5)
1631 decl_simple_lock_data(static, sched_vm_group_list_lock
);
1632 static thread_t sched_vm_group_thread_list
[MAX_VM_BIND_GROUP_COUNT
];
1633 static int sched_vm_group_thread_count
;
1634 static boolean_t sched_vm_group_temporarily_unbound
= FALSE
;
1637 thread_vm_bind_group_add(void)
1639 thread_t self
= current_thread();
1641 thread_reference_internal(self
);
1642 self
->options
|= TH_OPT_SCHED_VM_GROUP
;
1644 simple_lock(&sched_vm_group_list_lock
, LCK_GRP_NULL
);
1645 assert(sched_vm_group_thread_count
< MAX_VM_BIND_GROUP_COUNT
);
1646 sched_vm_group_thread_list
[sched_vm_group_thread_count
++] = self
;
1647 simple_unlock(&sched_vm_group_list_lock
);
1649 thread_bind(master_processor
);
1651 /* Switch to bound processor if not already there */
1652 thread_block(THREAD_CONTINUE_NULL
);
1656 sched_vm_group_maintenance(void)
1658 uint64_t ctime
= mach_absolute_time();
1659 uint64_t longtime
= ctime
- sched_tick_interval
;
1662 boolean_t high_latency_observed
= FALSE
;
1663 boolean_t runnable_and_not_on_runq_observed
= FALSE
;
1664 boolean_t bind_target_changed
= FALSE
;
1665 processor_t bind_target
= PROCESSOR_NULL
;
1667 /* Make sure nobody attempts to add new threads while we are enumerating them */
1668 simple_lock(&sched_vm_group_list_lock
, LCK_GRP_NULL
);
1672 for (i
= 0; i
< sched_vm_group_thread_count
; i
++) {
1673 thread_t thread
= sched_vm_group_thread_list
[i
];
1674 assert(thread
!= THREAD_NULL
);
1675 thread_lock(thread
);
1676 if ((thread
->state
& (TH_RUN
| TH_WAIT
)) == TH_RUN
) {
1677 if (thread
->runq
!= PROCESSOR_NULL
&& thread
->last_made_runnable_time
< longtime
) {
1678 high_latency_observed
= TRUE
;
1679 } else if (thread
->runq
== PROCESSOR_NULL
) {
1680 /* There are some cases where a thread be transitiong that also fall into this case */
1681 runnable_and_not_on_runq_observed
= TRUE
;
1684 thread_unlock(thread
);
1686 if (high_latency_observed
&& runnable_and_not_on_runq_observed
) {
1687 /* All the things we are looking for are true, stop looking */
1694 if (sched_vm_group_temporarily_unbound
) {
1695 /* If we turned off binding, make sure everything is OK before rebinding */
1696 if (!high_latency_observed
) {
1698 bind_target_changed
= TRUE
;
1699 bind_target
= master_processor
;
1700 sched_vm_group_temporarily_unbound
= FALSE
; /* might be reset to TRUE if change cannot be completed */
1704 * Check if we're in a bad state, which is defined by high
1705 * latency with no core currently executing a thread. If a
1706 * single thread is making progress on a CPU, that means the
1707 * binding concept to reduce parallelism is working as
1710 if (high_latency_observed
&& !runnable_and_not_on_runq_observed
) {
1712 bind_target_changed
= TRUE
;
1713 bind_target
= PROCESSOR_NULL
;
1714 sched_vm_group_temporarily_unbound
= TRUE
;
1718 if (bind_target_changed
) {
1720 for (i
= 0; i
< sched_vm_group_thread_count
; i
++) {
1721 thread_t thread
= sched_vm_group_thread_list
[i
];
1723 assert(thread
!= THREAD_NULL
);
1725 thread_lock(thread
);
1726 removed
= thread_run_queue_remove(thread
);
1727 if (removed
|| ((thread
->state
& (TH_RUN
| TH_WAIT
)) == TH_WAIT
)) {
1728 thread_bind_internal(thread
, bind_target
);
1731 * Thread was in the middle of being context-switched-to,
1732 * or was in the process of blocking. To avoid switching the bind
1733 * state out mid-flight, defer the change if possible.
1735 if (bind_target
== PROCESSOR_NULL
) {
1736 thread_bind_internal(thread
, bind_target
);
1738 sched_vm_group_temporarily_unbound
= TRUE
; /* next pass will try again */
1743 thread_run_queue_reinsert(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1745 thread_unlock(thread
);
1750 simple_unlock(&sched_vm_group_list_lock
);
1753 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1754 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1755 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1756 * IPI thrash if this core does not remain idle following the load balancing ASTs
1757 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1758 * followed by a wakeup shortly thereafter.
1761 #if (DEVELOPMENT || DEBUG)
1762 int sched_smt_balance
= 1;
1765 /* Invoked with pset locked, returns with pset unlocked */
1767 sched_SMT_balance(processor_t cprocessor
, processor_set_t cpset
)
1769 processor_t ast_processor
= NULL
;
1771 #if (DEVELOPMENT || DEBUG)
1772 if (__improbable(sched_smt_balance
== 0)) {
1773 goto smt_balance_exit
;
1777 assert(cprocessor
== current_processor());
1778 if (cprocessor
->is_SMT
== FALSE
) {
1779 goto smt_balance_exit
;
1782 processor_t sib_processor
= cprocessor
->processor_secondary
? cprocessor
->processor_secondary
: cprocessor
->processor_primary
;
1784 /* Determine if both this processor and its sibling are idle,
1785 * indicating an SMT rebalancing opportunity.
1787 if (sib_processor
->state
!= PROCESSOR_IDLE
) {
1788 goto smt_balance_exit
;
1791 processor_t sprocessor
;
1793 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
1794 uint64_t running_secondary_map
= (cpset
->cpu_state_map
[PROCESSOR_RUNNING
] &
1795 ~cpset
->primary_map
);
1796 for (int cpuid
= lsb_first(running_secondary_map
); cpuid
>= 0; cpuid
= lsb_next(running_secondary_map
, cpuid
)) {
1797 sprocessor
= processor_array
[cpuid
];
1798 if ((sprocessor
->processor_primary
->state
== PROCESSOR_RUNNING
) &&
1799 (sprocessor
->current_pri
< BASEPRI_RTQUEUES
)) {
1800 ipi_type
= sched_ipi_action(sprocessor
, NULL
, false, SCHED_IPI_EVENT_SMT_REBAL
);
1801 if (ipi_type
!= SCHED_IPI_NONE
) {
1802 assert(sprocessor
!= cprocessor
);
1803 ast_processor
= sprocessor
;
1812 if (ast_processor
) {
1813 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_SMT_BALANCE
), ast_processor
->cpu_id
, ast_processor
->state
, ast_processor
->processor_primary
->state
, 0, 0);
1814 sched_ipi_perform(ast_processor
, ipi_type
);
1819 pset_available_cpumap(processor_set_t pset
)
1821 return (pset
->cpu_state_map
[PROCESSOR_IDLE
] | pset
->cpu_state_map
[PROCESSOR_DISPATCHING
] | pset
->cpu_state_map
[PROCESSOR_RUNNING
]) &
1822 pset
->recommended_bitmask
;
1826 pset_available_but_not_running_cpumap(processor_set_t pset
)
1828 return (pset
->cpu_state_map
[PROCESSOR_IDLE
] | pset
->cpu_state_map
[PROCESSOR_DISPATCHING
]) &
1829 pset
->recommended_bitmask
;
1833 pset_has_stealable_threads(processor_set_t pset
)
1835 pset_assert_locked(pset
);
1837 cpumap_t avail_map
= pset_available_but_not_running_cpumap(pset
);
1839 * Secondary CPUs never steal, so allow stealing of threads if there are more threads than
1840 * available primary CPUs
1842 avail_map
&= pset
->primary_map
;
1844 return (pset
->pset_runq
.count
> 0) && ((pset
->pset_runq
.count
+ rt_runq_count(pset
)) > bit_count(avail_map
));
1848 * Called with pset locked, on a processor that is committing to run a new thread
1849 * Will transition an idle or dispatching processor to running as it picks up
1850 * the first new thread from the idle thread.
1853 pset_commit_processor_to_new_thread(processor_set_t pset
, processor_t processor
, thread_t new_thread
)
1855 pset_assert_locked(pset
);
1857 if (processor
->state
== PROCESSOR_DISPATCHING
|| processor
->state
== PROCESSOR_IDLE
) {
1858 assert(current_thread() == processor
->idle_thread
);
1861 * Dispatching processor is now committed to running new_thread,
1862 * so change its state to PROCESSOR_RUNNING.
1864 pset_update_processor_state(pset
, processor
, PROCESSOR_RUNNING
);
1866 assert((processor
->state
== PROCESSOR_RUNNING
) || (processor
->state
== PROCESSOR_SHUTDOWN
));
1869 processor_state_update_from_thread(processor
, new_thread
);
1871 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
1872 bit_set(pset
->realtime_map
, processor
->cpu_id
);
1874 bit_clear(pset
->realtime_map
, processor
->cpu_id
);
1877 pset_node_t node
= pset
->node
;
1879 if (bit_count(node
->pset_map
) == 1) {
1880 /* Node has only a single pset, so skip node pset map updates */
1884 cpumap_t avail_map
= pset_available_cpumap(pset
);
1886 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
1887 if ((avail_map
& pset
->realtime_map
) == avail_map
) {
1888 /* No more non-RT CPUs in this pset */
1889 atomic_bit_clear(&node
->pset_non_rt_map
, pset
->pset_id
, memory_order_relaxed
);
1891 avail_map
&= pset
->primary_map
;
1892 if ((avail_map
& pset
->realtime_map
) == avail_map
) {
1893 /* No more non-RT primary CPUs in this pset */
1894 atomic_bit_clear(&node
->pset_non_rt_primary_map
, pset
->pset_id
, memory_order_relaxed
);
1897 if ((avail_map
& pset
->realtime_map
) != avail_map
) {
1898 if (!bit_test(atomic_load(&node
->pset_non_rt_map
), pset
->pset_id
)) {
1899 atomic_bit_set(&node
->pset_non_rt_map
, pset
->pset_id
, memory_order_relaxed
);
1902 avail_map
&= pset
->primary_map
;
1903 if ((avail_map
& pset
->realtime_map
) != avail_map
) {
1904 if (!bit_test(atomic_load(&node
->pset_non_rt_primary_map
), pset
->pset_id
)) {
1905 atomic_bit_set(&node
->pset_non_rt_primary_map
, pset
->pset_id
, memory_order_relaxed
);
1911 static processor_t
choose_processor_for_realtime_thread(processor_set_t pset
, processor_t skip_processor
, bool consider_secondaries
);
1912 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset
);
1913 #if defined(__x86_64__)
1914 static bool these_processors_are_running_realtime_threads(processor_set_t pset
, uint64_t these_map
);
1916 static bool sched_ok_to_run_realtime_thread(processor_set_t pset
, processor_t processor
);
1917 static bool processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset
, processor_t processor
);
1918 int sched_allow_rt_smt
= 1;
1919 int sched_avoid_cpu0
= 1;
1924 * Select a new thread for the current processor to execute.
1926 * May select the current thread, which must be locked.
1929 thread_select(thread_t thread
,
1930 processor_t processor
,
1933 processor_set_t pset
= processor
->processor_set
;
1934 thread_t new_thread
= THREAD_NULL
;
1936 assert(processor
== current_processor());
1937 assert((thread
->state
& (TH_RUN
| TH_TERMINATE2
)) == TH_RUN
);
1941 * Update the priority.
1943 if (SCHED(can_update_priority
)(thread
)) {
1944 SCHED(update_priority
)(thread
);
1949 processor_state_update_from_thread(processor
, thread
);
1952 /* Acknowledge any pending IPIs here with pset lock held */
1953 bit_clear(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
1954 bit_clear(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
1956 #if defined(CONFIG_SCHED_DEFERRED_AST)
1957 bit_clear(pset
->pending_deferred_AST_cpu_mask
, processor
->cpu_id
);
1960 bool secondary_can_only_run_realtime_thread
= false;
1962 assert(processor
->state
!= PROCESSOR_OFF_LINE
);
1964 if (!processor
->is_recommended
) {
1966 * The performance controller has provided a hint to not dispatch more threads,
1967 * unless they are bound to us (and thus we are the only option
1969 if (!SCHED(processor_bound_count
)(processor
)) {
1972 } else if (processor
->processor_primary
!= processor
) {
1974 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1975 * we should look for work only under the same conditions that choose_processor()
1976 * would have assigned work, which is when all primary processors have been assigned work.
1978 * An exception is that bound threads are dispatched to a processor without going through
1979 * choose_processor(), so in those cases we should continue trying to dequeue work.
1981 if (!SCHED(processor_bound_count
)(processor
)) {
1982 if ((pset
->recommended_bitmask
& pset
->primary_map
& pset
->cpu_state_map
[PROCESSOR_IDLE
]) != 0) {
1987 * TODO: What if a secondary core beat an idle primary to waking up from an IPI?
1988 * Should it dequeue immediately, or spin waiting for the primary to wake up?
1991 /* There are no idle primaries */
1993 if (processor
->processor_primary
->current_pri
>= BASEPRI_RTQUEUES
) {
1994 bool secondary_can_run_realtime_thread
= sched_allow_rt_smt
&& rt_runq_count(pset
) && all_available_primaries_are_running_realtime_threads(pset
);
1995 if (!secondary_can_run_realtime_thread
) {
1998 secondary_can_only_run_realtime_thread
= true;
2004 * Test to see if the current thread should continue
2005 * to run on this processor. Must not be attempting to wait, and not
2006 * bound to a different processor, nor be in the wrong
2007 * processor set, nor be forced to context switch by TH_SUSP.
2009 * Note that there are never any RT threads in the regular runqueue.
2011 * This code is very insanely tricky.
2014 /* i.e. not waiting, not TH_SUSP'ed */
2015 bool still_running
= ((thread
->state
& (TH_TERMINATE
| TH_IDLE
| TH_WAIT
| TH_RUN
| TH_SUSP
)) == TH_RUN
);
2018 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
2019 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
2020 * <rdar://problem/47907700>
2022 * A yielding thread shouldn't be forced to context switch.
2025 bool is_yielding
= (*reason
& AST_YIELD
) == AST_YIELD
;
2027 bool needs_smt_rebalance
= !is_yielding
&& thread
->sched_pri
< BASEPRI_RTQUEUES
&& processor
->processor_primary
!= processor
;
2029 bool affinity_mismatch
= thread
->affinity_set
!= AFFINITY_SET_NULL
&& thread
->affinity_set
->aset_pset
!= pset
;
2031 bool bound_elsewhere
= thread
->bound_processor
!= PROCESSOR_NULL
&& thread
->bound_processor
!= processor
;
2033 bool avoid_processor
= !is_yielding
&& SCHED(avoid_processor_enabled
) && SCHED(thread_avoid_processor
)(processor
, thread
);
2035 if (still_running
&& !needs_smt_rebalance
&& !affinity_mismatch
&& !bound_elsewhere
&& !avoid_processor
) {
2037 * This thread is eligible to keep running on this processor.
2039 * RT threads with un-expired quantum stay on processor,
2040 * unless there's a valid RT thread with an earlier deadline.
2042 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
&& processor
->first_timeslice
) {
2043 if (rt_runq_count(pset
) > 0) {
2044 thread_t next_rt
= qe_queue_first(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
2046 if (next_rt
->realtime
.deadline
< processor
->deadline
&&
2047 (next_rt
->bound_processor
== PROCESSOR_NULL
||
2048 next_rt
->bound_processor
== processor
)) {
2049 /* The next RT thread is better, so pick it off the runqueue. */
2050 goto pick_new_rt_thread
;
2054 /* This is still the best RT thread to run. */
2055 processor
->deadline
= thread
->realtime
.deadline
;
2057 sched_update_pset_load_average(pset
, 0);
2059 processor_t next_rt_processor
= PROCESSOR_NULL
;
2060 sched_ipi_type_t next_rt_ipi_type
= SCHED_IPI_NONE
;
2062 if (rt_runq_count(pset
) - bit_count(pset
->pending_AST_URGENT_cpu_mask
) > 0) {
2063 next_rt_processor
= choose_processor_for_realtime_thread(pset
, processor
, true);
2064 if (next_rt_processor
) {
2065 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
2066 (uintptr_t)0, (uintptr_t)-4, next_rt_processor
->cpu_id
, next_rt_processor
->state
, 0);
2067 if (next_rt_processor
->state
== PROCESSOR_IDLE
) {
2068 pset_update_processor_state(pset
, next_rt_processor
, PROCESSOR_DISPATCHING
);
2070 next_rt_ipi_type
= sched_ipi_action(next_rt_processor
, NULL
, false, SCHED_IPI_EVENT_PREEMPT
);
2075 if (next_rt_processor
) {
2076 sched_ipi_perform(next_rt_processor
, next_rt_ipi_type
);
2082 if ((rt_runq_count(pset
) == 0) &&
2083 SCHED(processor_queue_has_priority
)(processor
, thread
->sched_pri
, TRUE
) == FALSE
) {
2084 /* This thread is still the highest priority runnable (non-idle) thread */
2085 processor
->deadline
= UINT64_MAX
;
2087 sched_update_pset_load_average(pset
, 0);
2094 * This processor must context switch.
2095 * If it's due to a rebalance, we should aggressively find this thread a new home.
2097 if (needs_smt_rebalance
|| affinity_mismatch
|| bound_elsewhere
|| avoid_processor
) {
2098 *reason
|= AST_REBALANCE
;
2102 bool secondary_forced_idle
= ((processor
->processor_secondary
!= PROCESSOR_NULL
) &&
2103 (thread_no_smt(thread
) || (thread
->sched_pri
>= BASEPRI_RTQUEUES
)) &&
2104 (processor
->processor_secondary
->state
== PROCESSOR_IDLE
));
2106 /* OK, so we're not going to run the current thread. Look at the RT queue. */
2107 bool ok_to_run_realtime_thread
= sched_ok_to_run_realtime_thread(pset
, processor
);
2108 if ((rt_runq_count(pset
) > 0) && ok_to_run_realtime_thread
) {
2109 thread_t next_rt
= qe_queue_first(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
2111 if (__probable((next_rt
->bound_processor
== PROCESSOR_NULL
||
2112 (next_rt
->bound_processor
== processor
)))) {
2114 new_thread
= qe_dequeue_head(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
2116 new_thread
->runq
= PROCESSOR_NULL
;
2117 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
2118 rt_runq_count_decr(pset
);
2120 processor
->deadline
= new_thread
->realtime
.deadline
;
2122 pset_commit_processor_to_new_thread(pset
, processor
, new_thread
);
2124 sched_update_pset_load_average(pset
, 0);
2126 processor_t ast_processor
= PROCESSOR_NULL
;
2127 processor_t next_rt_processor
= PROCESSOR_NULL
;
2128 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
2129 sched_ipi_type_t next_rt_ipi_type
= SCHED_IPI_NONE
;
2131 if (processor
->processor_secondary
!= NULL
) {
2132 processor_t sprocessor
= processor
->processor_secondary
;
2133 if ((sprocessor
->state
== PROCESSOR_RUNNING
) || (sprocessor
->state
== PROCESSOR_DISPATCHING
)) {
2134 ipi_type
= sched_ipi_action(sprocessor
, NULL
, false, SCHED_IPI_EVENT_SMT_REBAL
);
2135 ast_processor
= sprocessor
;
2138 if (rt_runq_count(pset
) - bit_count(pset
->pending_AST_URGENT_cpu_mask
) > 0) {
2139 next_rt_processor
= choose_processor_for_realtime_thread(pset
, processor
, true);
2140 if (next_rt_processor
) {
2141 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
2142 (uintptr_t)0, (uintptr_t)-5, next_rt_processor
->cpu_id
, next_rt_processor
->state
, 0);
2143 if (next_rt_processor
->state
== PROCESSOR_IDLE
) {
2144 pset_update_processor_state(pset
, next_rt_processor
, PROCESSOR_DISPATCHING
);
2146 next_rt_ipi_type
= sched_ipi_action(next_rt_processor
, NULL
, false, SCHED_IPI_EVENT_PREEMPT
);
2151 if (ast_processor
) {
2152 sched_ipi_perform(ast_processor
, ipi_type
);
2155 if (next_rt_processor
) {
2156 sched_ipi_perform(next_rt_processor
, next_rt_ipi_type
);
2162 if (secondary_can_only_run_realtime_thread
) {
2166 processor
->deadline
= UINT64_MAX
;
2168 /* No RT threads, so let's look at the regular threads. */
2169 if ((new_thread
= SCHED(choose_thread
)(processor
, MINPRI
, *reason
)) != THREAD_NULL
) {
2170 pset_commit_processor_to_new_thread(pset
, processor
, new_thread
);
2171 sched_update_pset_load_average(pset
, 0);
2173 processor_t ast_processor
= PROCESSOR_NULL
;
2174 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
2176 processor_t sprocessor
= processor
->processor_secondary
;
2177 if ((sprocessor
!= NULL
) && (sprocessor
->state
== PROCESSOR_RUNNING
)) {
2178 if (thread_no_smt(new_thread
)) {
2179 ipi_type
= sched_ipi_action(sprocessor
, NULL
, false, SCHED_IPI_EVENT_SMT_REBAL
);
2180 ast_processor
= sprocessor
;
2182 } else if (secondary_forced_idle
&& !thread_no_smt(new_thread
) && pset_has_stealable_threads(pset
)) {
2183 pset_update_processor_state(pset
, sprocessor
, PROCESSOR_DISPATCHING
);
2184 ipi_type
= sched_ipi_action(sprocessor
, NULL
, true, SCHED_IPI_EVENT_PREEMPT
);
2185 ast_processor
= sprocessor
;
2189 if (ast_processor
) {
2190 sched_ipi_perform(ast_processor
, ipi_type
);
2195 if (processor
->must_idle
) {
2196 processor
->must_idle
= false;
2200 if (SCHED(steal_thread_enabled
)(pset
) && (processor
->processor_primary
== processor
)) {
2202 * No runnable threads, attempt to steal
2203 * from other processors. Returns with pset lock dropped.
2206 if ((new_thread
= SCHED(steal_thread
)(pset
)) != THREAD_NULL
) {
2208 * Avoid taking the pset_lock unless it is necessary to change state.
2209 * It's safe to read processor->state here, as only the current processor can change state
2210 * from this point (interrupts are disabled and this processor is committed to run new_thread).
2212 if (processor
->state
== PROCESSOR_DISPATCHING
|| processor
->state
== PROCESSOR_IDLE
) {
2214 pset_commit_processor_to_new_thread(pset
, processor
, new_thread
);
2217 assert((processor
->state
== PROCESSOR_RUNNING
) || (processor
->state
== PROCESSOR_SHUTDOWN
));
2218 processor_state_update_from_thread(processor
, new_thread
);
2225 * If other threads have appeared, shortcut
2228 if (!SCHED(processor_queue_empty
)(processor
) || (ok_to_run_realtime_thread
&& (rt_runq_count(pset
) > 0))) {
2234 /* Someone selected this processor while we had dropped the lock */
2235 if (bit_test(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
)) {
2242 * Nothing is runnable, so set this processor idle if it
2245 if ((processor
->state
== PROCESSOR_RUNNING
) || (processor
->state
== PROCESSOR_DISPATCHING
)) {
2246 pset_update_processor_state(pset
, processor
, PROCESSOR_IDLE
);
2247 processor_state_update_idle(processor
);
2250 /* Invoked with pset locked, returns with pset unlocked */
2251 SCHED(processor_balance
)(processor
, pset
);
2253 new_thread
= processor
->idle_thread
;
2254 } while (new_thread
== THREAD_NULL
);
2262 * Called at splsched with neither thread locked.
2264 * Perform a context switch and start executing the new thread.
2266 * Returns FALSE when the context switch didn't happen.
2267 * The reference to the new thread is still consumed.
2269 * "self" is what is currently running on the processor,
2270 * "thread" is the new thread to context switch to
2271 * (which may be the same thread in some cases)
2279 if (__improbable(get_preemption_level() != 0)) {
2280 int pl
= get_preemption_level();
2281 panic("thread_invoke: preemption_level %d, possible cause: %s",
2282 pl
, (pl
< 0 ? "unlocking an unlocked mutex or spinlock" :
2283 "blocking while holding a spinlock, or within interrupt context"));
2286 thread_continue_t continuation
= self
->continuation
;
2287 void *parameter
= self
->parameter
;
2288 processor_t processor
;
2290 uint64_t ctime
= mach_absolute_time();
2292 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2293 commpage_update_mach_approximate_time(ctime
);
2296 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2297 if (!((thread
->state
& TH_IDLE
) != 0 ||
2298 ((reason
& AST_HANDOFF
) && self
->sched_mode
== TH_MODE_REALTIME
))) {
2299 sched_timeshare_consider_maintenance(ctime
);
2304 mt_sched_update(self
);
2305 #endif /* MONOTONIC */
2307 assert_thread_magic(self
);
2308 assert(self
== current_thread());
2309 assert(self
->runq
== PROCESSOR_NULL
);
2310 assert((self
->state
& (TH_RUN
| TH_TERMINATE2
)) == TH_RUN
);
2312 thread_lock(thread
);
2314 assert_thread_magic(thread
);
2315 assert((thread
->state
& (TH_RUN
| TH_WAIT
| TH_UNINT
| TH_TERMINATE
| TH_TERMINATE2
)) == TH_RUN
);
2316 assert(thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== current_processor());
2317 assert(thread
->runq
== PROCESSOR_NULL
);
2319 /* Reload precise timing global policy to thread-local policy */
2320 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
2322 /* Update SFI class based on other factors */
2323 thread
->sfi_class
= sfi_thread_classify(thread
);
2325 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2326 thread
->same_pri_latency
= ctime
- thread
->last_basepri_change_time
;
2328 * In case a base_pri update happened between the timestamp and
2329 * taking the thread lock
2331 if (ctime
<= thread
->last_basepri_change_time
) {
2332 thread
->same_pri_latency
= ctime
- thread
->last_made_runnable_time
;
2335 /* Allow realtime threads to hang onto a stack. */
2336 if ((self
->sched_mode
== TH_MODE_REALTIME
) && !self
->reserved_stack
) {
2337 self
->reserved_stack
= self
->kernel_stack
;
2340 /* Prepare for spin debugging */
2341 #if INTERRUPT_MASKED_DEBUG
2342 ml_spin_debug_clear(thread
);
2345 if (continuation
!= NULL
) {
2346 if (!thread
->kernel_stack
) {
2348 * If we are using a privileged stack,
2349 * check to see whether we can exchange it with
2350 * that of the other thread.
2352 if (self
->kernel_stack
== self
->reserved_stack
&& !thread
->reserved_stack
) {
2357 * Context switch by performing a stack handoff.
2358 * Requires both threads to be parked in a continuation.
2360 continuation
= thread
->continuation
;
2361 parameter
= thread
->parameter
;
2363 processor
= current_processor();
2364 processor
->active_thread
= thread
;
2365 processor_state_update_from_thread(processor
, thread
);
2367 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2368 if (thread
->last_processor
->processor_set
!= processor
->processor_set
) {
2369 thread
->ps_switch
++;
2373 thread
->last_processor
= processor
;
2375 ast_context(thread
);
2377 thread_unlock(thread
);
2379 self
->reason
= reason
;
2381 processor
->last_dispatch
= ctime
;
2382 self
->last_run_time
= ctime
;
2383 processor_timer_switch_thread(ctime
, &thread
->system_timer
);
2384 timer_update(&thread
->runnable_timer
, ctime
);
2385 processor
->kernel_timer
= &thread
->system_timer
;
2388 * Since non-precise user/kernel time doesn't update the state timer
2389 * during privilege transitions, synthesize an event now.
2391 if (!thread
->precise_user_kernel_time
) {
2392 timer_update(processor
->current_state
, ctime
);
2395 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2396 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_HANDOFF
) | DBG_FUNC_NONE
,
2397 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2399 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= PROCESSOR_NULL
)) {
2400 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
) | DBG_FUNC_NONE
,
2401 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2404 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2406 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2409 kperf_off_cpu(self
);
2413 * This is where we actually switch thread identity,
2414 * and address space if required. However, register
2415 * state is not switched - this routine leaves the
2416 * stack and register state active on the current CPU.
2418 TLOG(1, "thread_invoke: calling stack_handoff\n");
2419 stack_handoff(self
, thread
);
2421 /* 'self' is now off core */
2422 assert(thread
== current_thread_volatile());
2424 DTRACE_SCHED(on__cpu
);
2427 kperf_on_cpu(thread
, continuation
, NULL
);
2430 thread_dispatch(self
, thread
);
2433 /* Old thread's stack has been moved to the new thread, so explicitly
2435 kasan_unpoison_stack(thread
->kernel_stack
, kernel_stack_size
);
2438 thread
->continuation
= thread
->parameter
= NULL
;
2440 boolean_t enable_interrupts
= TRUE
;
2442 /* idle thread needs to stay interrupts-disabled */
2443 if ((thread
->state
& TH_IDLE
)) {
2444 enable_interrupts
= FALSE
;
2447 assert(continuation
);
2448 call_continuation(continuation
, parameter
,
2449 thread
->wait_result
, enable_interrupts
);
2451 } else if (thread
== self
) {
2452 /* same thread but with continuation */
2455 thread_unlock(self
);
2458 kperf_on_cpu(thread
, continuation
, NULL
);
2461 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2462 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED
) | DBG_FUNC_NONE
,
2463 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2466 /* stack handoff to self - no thread_dispatch(), so clear the stack
2467 * and free the fakestack directly */
2468 kasan_fakestack_drop(self
);
2469 kasan_fakestack_gc(self
);
2470 kasan_unpoison_stack(self
->kernel_stack
, kernel_stack_size
);
2473 self
->continuation
= self
->parameter
= NULL
;
2475 boolean_t enable_interrupts
= TRUE
;
2477 /* idle thread needs to stay interrupts-disabled */
2478 if ((self
->state
& TH_IDLE
)) {
2479 enable_interrupts
= FALSE
;
2482 call_continuation(continuation
, parameter
,
2483 self
->wait_result
, enable_interrupts
);
2488 * Check that the other thread has a stack
2490 if (!thread
->kernel_stack
) {
2492 if (!stack_alloc_try(thread
)) {
2493 thread_unlock(thread
);
2494 thread_stack_enqueue(thread
);
2497 } else if (thread
== self
) {
2499 thread_unlock(self
);
2501 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2502 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED
) | DBG_FUNC_NONE
,
2503 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2510 * Context switch by full context save.
2512 processor
= current_processor();
2513 processor
->active_thread
= thread
;
2514 processor_state_update_from_thread(processor
, thread
);
2516 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2517 if (thread
->last_processor
->processor_set
!= processor
->processor_set
) {
2518 thread
->ps_switch
++;
2522 thread
->last_processor
= processor
;
2524 ast_context(thread
);
2526 thread_unlock(thread
);
2528 self
->reason
= reason
;
2530 processor
->last_dispatch
= ctime
;
2531 self
->last_run_time
= ctime
;
2532 processor_timer_switch_thread(ctime
, &thread
->system_timer
);
2533 timer_update(&thread
->runnable_timer
, ctime
);
2534 processor
->kernel_timer
= &thread
->system_timer
;
2537 * Since non-precise user/kernel time doesn't update the state timer
2538 * during privilege transitions, synthesize an event now.
2540 if (!thread
->precise_user_kernel_time
) {
2541 timer_update(processor
->current_state
, ctime
);
2544 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2545 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED
) | DBG_FUNC_NONE
,
2546 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2548 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= NULL
)) {
2549 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
) | DBG_FUNC_NONE
,
2550 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2553 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2555 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2558 kperf_off_cpu(self
);
2562 * This is where we actually switch register context,
2563 * and address space if required. We will next run
2564 * as a result of a subsequent context switch.
2566 * Once registers are switched and the processor is running "thread",
2567 * the stack variables and non-volatile registers will contain whatever
2568 * was there the last time that thread blocked. No local variables should
2569 * be used after this point, except for the special case of "thread", which
2570 * the platform layer returns as the previous thread running on the processor
2571 * via the function call ABI as a return register, and "self", which may have
2572 * been stored on the stack or a non-volatile register, but a stale idea of
2573 * what was on the CPU is newly-accurate because that thread is again
2574 * running on the CPU.
2576 * If one of the threads is using a continuation, thread_continue
2577 * is used to stitch up its context.
2579 * If we are invoking a thread which is resuming from a continuation,
2580 * the CPU will invoke thread_continue next.
2582 * If the current thread is parking in a continuation, then its state
2583 * won't be saved and the stack will be discarded. When the stack is
2584 * re-allocated, it will be configured to resume from thread_continue.
2586 assert(continuation
== self
->continuation
);
2587 thread
= machine_switch_context(self
, continuation
, thread
);
2588 assert(self
== current_thread_volatile());
2589 TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self
, continuation
, thread
);
2591 assert(continuation
== NULL
&& self
->continuation
== NULL
);
2593 DTRACE_SCHED(on__cpu
);
2596 kperf_on_cpu(self
, NULL
, __builtin_frame_address(0));
2599 /* We have been resumed and are set to run. */
2600 thread_dispatch(thread
, self
);
2605 #if defined(CONFIG_SCHED_DEFERRED_AST)
2607 * pset_cancel_deferred_dispatch:
2609 * Cancels all ASTs that we can cancel for the given processor set
2610 * if the current processor is running the last runnable thread in the
2613 * This function assumes the current thread is runnable. This must
2614 * be called with the pset unlocked.
2617 pset_cancel_deferred_dispatch(
2618 processor_set_t pset
,
2619 processor_t processor
)
2621 processor_t active_processor
= NULL
;
2622 uint32_t sampled_sched_run_count
;
2625 sampled_sched_run_count
= os_atomic_load(&sched_run_buckets
[TH_BUCKET_RUN
], relaxed
);
2628 * If we have emptied the run queue, and our current thread is runnable, we
2629 * should tell any processors that are still DISPATCHING that they will
2630 * probably not have any work to do. In the event that there are no
2631 * pending signals that we can cancel, this is also uninteresting.
2633 * In the unlikely event that another thread becomes runnable while we are
2634 * doing this (sched_run_count is atomically updated, not guarded), the
2635 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2636 * in order to dispatch it to a processor in our pset. So, the other
2637 * codepath will wait while we squash all cancelable ASTs, get the pset
2638 * lock, and then dispatch the freshly runnable thread. So this should be
2639 * correct (we won't accidentally have a runnable thread that hasn't been
2640 * dispatched to an idle processor), if not ideal (we may be restarting the
2641 * dispatch process, which could have some overhead).
2644 if ((sampled_sched_run_count
== 1) && (pset
->pending_deferred_AST_cpu_mask
)) {
2645 uint64_t dispatching_map
= (pset
->cpu_state_map
[PROCESSOR_DISPATCHING
] &
2646 pset
->pending_deferred_AST_cpu_mask
&
2647 ~pset
->pending_AST_URGENT_cpu_mask
);
2648 for (int cpuid
= lsb_first(dispatching_map
); cpuid
>= 0; cpuid
= lsb_next(dispatching_map
, cpuid
)) {
2649 active_processor
= processor_array
[cpuid
];
2651 * If a processor is DISPATCHING, it could be because of
2652 * a cancelable signal.
2654 * IF the processor is not our
2655 * current processor (the current processor should not
2656 * be DISPATCHING, so this is a bit paranoid), AND there
2657 * is a cancelable signal pending on the processor, AND
2658 * there is no non-cancelable signal pending (as there is
2659 * no point trying to backtrack on bringing the processor
2660 * up if a signal we cannot cancel is outstanding), THEN
2661 * it should make sense to roll back the processor state
2662 * to the IDLE state.
2664 * If the racey nature of this approach (as the signal
2665 * will be arbitrated by hardware, and can fire as we
2666 * roll back state) results in the core responding
2667 * despite being pushed back to the IDLE state, it
2668 * should be no different than if the core took some
2669 * interrupt while IDLE.
2671 if (active_processor
!= processor
) {
2673 * Squash all of the processor state back to some
2674 * reasonable facsimile of PROCESSOR_IDLE.
2677 processor_state_update_idle(active_processor
);
2678 active_processor
->deadline
= UINT64_MAX
;
2679 pset_update_processor_state(pset
, active_processor
, PROCESSOR_IDLE
);
2680 bit_clear(pset
->pending_deferred_AST_cpu_mask
, active_processor
->cpu_id
);
2681 machine_signal_idle_cancel(active_processor
);
2689 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2698 perfcontrol_event event
= (new->state
& TH_IDLE
) ? IDLE
: CONTEXT_SWITCH
;
2699 uint64_t same_pri_latency
= (new->state
& TH_IDLE
) ? 0 : new->same_pri_latency
;
2700 machine_switch_perfcontrol_context(event
, timestamp
, 0,
2701 same_pri_latency
, old
, new);
2708 * Handle threads at context switch. Re-dispatch other thread
2709 * if still running, otherwise update run state and perform
2710 * special actions. Update quantum for other thread and begin
2711 * the quantum for ourselves.
2713 * "thread" is the old thread that we have switched away from.
2714 * "self" is the new current thread that we have context switched to
2716 * Called at splsched.
2724 processor_t processor
= self
->last_processor
;
2725 bool was_idle
= false;
2727 assert(processor
== current_processor());
2728 assert(self
== current_thread_volatile());
2729 assert(thread
!= self
);
2731 if (thread
!= THREAD_NULL
) {
2733 * Do the perfcontrol callout for context switch.
2734 * The reason we do this here is:
2735 * - thread_dispatch() is called from various places that are not
2736 * the direct context switch path for eg. processor shutdown etc.
2737 * So adding the callout here covers all those cases.
2738 * - We want this callout as early as possible to be close
2739 * to the timestamp taken in thread_invoke()
2740 * - We want to avoid holding the thread lock while doing the
2742 * - We do not want to callout if "thread" is NULL.
2744 thread_csw_callout(thread
, self
, processor
->last_dispatch
);
2747 if (thread
->continuation
!= NULL
) {
2749 * Thread has a continuation and the normal stack is going away.
2750 * Unpoison the stack and mark all fakestack objects as unused.
2752 kasan_fakestack_drop(thread
);
2753 if (thread
->kernel_stack
) {
2754 kasan_unpoison_stack(thread
->kernel_stack
, kernel_stack_size
);
2759 * Free all unused fakestack objects.
2761 kasan_fakestack_gc(thread
);
2765 * If blocked at a continuation, discard
2768 if (thread
->continuation
!= NULL
&& thread
->kernel_stack
!= 0) {
2772 if (thread
->state
& TH_IDLE
) {
2774 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2775 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DISPATCH
) | DBG_FUNC_NONE
,
2776 (uintptr_t)thread_tid(thread
), 0, thread
->state
,
2777 sched_run_buckets
[TH_BUCKET_RUN
], 0);
2780 int64_t remainder
= 0;
2782 if (processor
->quantum_end
> processor
->last_dispatch
) {
2783 remainder
= processor
->quantum_end
-
2784 processor
->last_dispatch
;
2787 consumed
= thread
->quantum_remaining
- remainder
;
2789 if ((thread
->reason
& AST_LEDGER
) == 0) {
2791 * Bill CPU time to both the task and
2792 * the individual thread.
2794 ledger_credit_thread(thread
, thread
->t_ledger
,
2795 task_ledgers
.cpu_time
, consumed
);
2796 ledger_credit_thread(thread
, thread
->t_threadledger
,
2797 thread_ledgers
.cpu_time
, consumed
);
2798 if (thread
->t_bankledger
) {
2799 ledger_credit_thread(thread
, thread
->t_bankledger
,
2800 bank_ledgers
.cpu_time
,
2801 (consumed
- thread
->t_deduct_bank_ledger_time
));
2803 thread
->t_deduct_bank_ledger_time
= 0;
2806 * This should never be negative, but in traces we are seeing some instances
2807 * of consumed being negative.
2808 * <rdar://problem/57782596> thread_dispatch() thread CPU consumed calculation sometimes results in negative value
2810 sched_update_pset_avg_execution_time(current_processor()->processor_set
, consumed
, processor
->last_dispatch
, thread
->th_sched_bucket
);
2815 thread_lock(thread
);
2818 * Apply a priority floor if the thread holds a kernel resource
2819 * Do this before checking starting_pri to avoid overpenalizing
2820 * repeated rwlock blockers.
2822 if (__improbable(thread
->rwlock_count
!= 0)) {
2823 lck_rw_set_promotion_locked(thread
);
2826 boolean_t keep_quantum
= processor
->first_timeslice
;
2829 * Treat a thread which has dropped priority since it got on core
2830 * as having expired its quantum.
2832 if (processor
->starting_pri
> thread
->sched_pri
) {
2833 keep_quantum
= FALSE
;
2836 /* Compute remainder of current quantum. */
2838 processor
->quantum_end
> processor
->last_dispatch
) {
2839 thread
->quantum_remaining
= (uint32_t)remainder
;
2841 thread
->quantum_remaining
= 0;
2844 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
2846 * Cancel the deadline if the thread has
2847 * consumed the entire quantum.
2849 if (thread
->quantum_remaining
== 0) {
2850 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_CANCEL_RT_DEADLINE
) | DBG_FUNC_NONE
,
2851 (uintptr_t)thread_tid(thread
), thread
->realtime
.deadline
, thread
->realtime
.computation
, 0);
2852 thread
->realtime
.deadline
= UINT64_MAX
;
2855 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2857 * For non-realtime threads treat a tiny
2858 * remaining quantum as an expired quantum
2859 * but include what's left next time.
2861 if (thread
->quantum_remaining
< min_std_quantum
) {
2862 thread
->reason
|= AST_QUANTUM
;
2863 thread
->quantum_remaining
+= SCHED(initial_quantum_size
)(thread
);
2865 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2869 * If we are doing a direct handoff then
2870 * take the remainder of the quantum.
2872 if ((thread
->reason
& (AST_HANDOFF
| AST_QUANTUM
)) == AST_HANDOFF
) {
2873 self
->quantum_remaining
= thread
->quantum_remaining
;
2874 thread
->reason
|= AST_QUANTUM
;
2875 thread
->quantum_remaining
= 0;
2877 #if defined(CONFIG_SCHED_MULTIQ)
2878 if (SCHED(sched_groups_enabled
) &&
2879 thread
->sched_group
== self
->sched_group
) {
2880 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2881 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_QUANTUM_HANDOFF
),
2882 self
->reason
, (uintptr_t)thread_tid(thread
),
2883 self
->quantum_remaining
, thread
->quantum_remaining
, 0);
2885 self
->quantum_remaining
= thread
->quantum_remaining
;
2886 thread
->quantum_remaining
= 0;
2887 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2889 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2892 thread
->computation_metered
+= (processor
->last_dispatch
- thread
->computation_epoch
);
2894 if (!(thread
->state
& TH_WAIT
)) {
2898 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= processor
->last_dispatch
;
2900 machine_thread_going_off_core(thread
, FALSE
, processor
->last_dispatch
, TRUE
);
2902 ast_t reason
= thread
->reason
;
2903 sched_options_t options
= SCHED_NONE
;
2905 if (reason
& AST_REBALANCE
) {
2906 options
|= SCHED_REBALANCE
;
2907 if (reason
& AST_QUANTUM
) {
2909 * Having gone to the trouble of forcing this thread off a less preferred core,
2910 * we should force the preferable core to reschedule immediately to give this
2911 * thread a chance to run instead of just sitting on the run queue where
2912 * it may just be stolen back by the idle core we just forced it off.
2913 * But only do this at the end of a quantum to prevent cascading effects.
2915 options
|= SCHED_PREEMPT
;
2919 if (reason
& AST_QUANTUM
) {
2920 options
|= SCHED_TAILQ
;
2921 } else if (reason
& AST_PREEMPT
) {
2922 options
|= SCHED_HEADQ
;
2924 options
|= (SCHED_PREEMPT
| SCHED_TAILQ
);
2927 thread_setrun(thread
, options
);
2929 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2930 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DISPATCH
) | DBG_FUNC_NONE
,
2931 (uintptr_t)thread_tid(thread
), thread
->reason
, thread
->state
,
2932 sched_run_buckets
[TH_BUCKET_RUN
], 0);
2934 if (thread
->wake_active
) {
2935 thread
->wake_active
= FALSE
;
2936 thread_unlock(thread
);
2938 thread_wakeup(&thread
->wake_active
);
2940 thread_unlock(thread
);
2943 wake_unlock(thread
);
2948 boolean_t should_terminate
= FALSE
;
2949 uint32_t new_run_count
;
2950 int thread_state
= thread
->state
;
2952 /* Only the first call to thread_dispatch
2953 * after explicit termination should add
2954 * the thread to the termination queue
2956 if ((thread_state
& (TH_TERMINATE
| TH_TERMINATE2
)) == TH_TERMINATE
) {
2957 should_terminate
= TRUE
;
2958 thread_state
|= TH_TERMINATE2
;
2961 timer_stop(&thread
->runnable_timer
, processor
->last_dispatch
);
2963 thread_state
&= ~TH_RUN
;
2964 thread
->state
= thread_state
;
2966 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= THREAD_NOT_RUNNABLE
;
2967 thread
->chosen_processor
= PROCESSOR_NULL
;
2969 new_run_count
= SCHED(run_count_decr
)(thread
);
2971 #if CONFIG_SCHED_AUTO_JOIN
2972 if ((thread
->sched_flags
& TH_SFLAG_THREAD_GROUP_AUTO_JOIN
) != 0) {
2973 work_interval_auto_join_unwind(thread
);
2975 #endif /* CONFIG_SCHED_AUTO_JOIN */
2977 #if CONFIG_SCHED_SFI
2978 if (thread
->reason
& AST_SFI
) {
2979 thread
->wait_sfi_begin_time
= processor
->last_dispatch
;
2982 machine_thread_going_off_core(thread
, should_terminate
, processor
->last_dispatch
, FALSE
);
2984 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2985 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DISPATCH
) | DBG_FUNC_NONE
,
2986 (uintptr_t)thread_tid(thread
), thread
->reason
, thread_state
,
2989 if (thread_state
& TH_WAIT_REPORT
) {
2990 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
2993 if (thread
->wake_active
) {
2994 thread
->wake_active
= FALSE
;
2995 thread_unlock(thread
);
2997 thread_wakeup(&thread
->wake_active
);
2999 thread_unlock(thread
);
3002 wake_unlock(thread
);
3004 if (should_terminate
) {
3005 thread_terminate_enqueue(thread
);
3010 * The thread could have been added to the termination queue, so it's
3011 * unsafe to use after this point.
3013 thread
= THREAD_NULL
;
3016 int urgency
= THREAD_URGENCY_NONE
;
3017 uint64_t latency
= 0;
3019 /* Update (new) current thread and reprogram running timers */
3022 if (!(self
->state
& TH_IDLE
)) {
3023 uint64_t arg1
, arg2
;
3025 #if CONFIG_SCHED_SFI
3028 new_ast
= sfi_thread_needs_ast(self
, NULL
);
3030 if (new_ast
!= AST_NONE
) {
3035 assertf(processor
->last_dispatch
>= self
->last_made_runnable_time
,
3036 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
3037 processor
->last_dispatch
, self
->last_made_runnable_time
);
3039 assert(self
->last_made_runnable_time
<= self
->last_basepri_change_time
);
3041 latency
= processor
->last_dispatch
- self
->last_made_runnable_time
;
3042 assert(latency
>= self
->same_pri_latency
);
3044 urgency
= thread_get_urgency(self
, &arg1
, &arg2
);
3046 thread_tell_urgency(urgency
, arg1
, arg2
, latency
, self
);
3049 * Get a new quantum if none remaining.
3051 if (self
->quantum_remaining
== 0) {
3052 thread_quantum_init(self
);
3056 * Set up quantum timer and timeslice.
3058 processor
->quantum_end
= processor
->last_dispatch
+
3059 self
->quantum_remaining
;
3061 running_timer_setup(processor
, RUNNING_TIMER_QUANTUM
, self
,
3062 processor
->quantum_end
, processor
->last_dispatch
);
3065 * kperf's running timer is active whenever the idle thread for a
3066 * CPU is not running.
3068 kperf_running_setup(processor
, processor
->last_dispatch
);
3070 running_timers_activate(processor
);
3071 processor
->first_timeslice
= TRUE
;
3073 running_timers_deactivate(processor
);
3074 processor
->first_timeslice
= FALSE
;
3075 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0, 0, self
);
3078 assert(self
->block_hint
== kThreadWaitNone
);
3079 self
->computation_epoch
= processor
->last_dispatch
;
3080 self
->reason
= AST_NONE
;
3081 processor
->starting_pri
= self
->sched_pri
;
3083 thread_unlock(self
);
3085 machine_thread_going_on_core(self
, urgency
, latency
, self
->same_pri_latency
,
3086 processor
->last_dispatch
);
3088 #if defined(CONFIG_SCHED_DEFERRED_AST)
3090 * TODO: Can we state that redispatching our old thread is also
3093 if ((os_atomic_load(&sched_run_buckets
[TH_BUCKET_RUN
], relaxed
) == 1) && !(self
->state
& TH_IDLE
)) {
3094 pset_cancel_deferred_dispatch(processor
->processor_set
, processor
);
3100 * thread_block_reason:
3102 * Forces a reschedule, blocking the caller if a wait
3103 * has been asserted.
3105 * If a continuation is specified, then thread_invoke will
3106 * attempt to discard the thread's kernel stack. When the
3107 * thread resumes, it will execute the continuation function
3108 * on a new kernel stack.
3111 thread_block_reason(
3112 thread_continue_t continuation
,
3116 thread_t self
= current_thread();
3117 processor_t processor
;
3118 thread_t new_thread
;
3123 processor
= current_processor();
3125 /* If we're explicitly yielding, force a subsequent quantum */
3126 if (reason
& AST_YIELD
) {
3127 processor
->first_timeslice
= FALSE
;
3130 /* We're handling all scheduling AST's */
3131 ast_off(AST_SCHEDULING
);
3134 if ((continuation
!= NULL
) && (self
->task
!= kernel_task
)) {
3135 if (uthread_get_proc_refcount(self
->uthread
) != 0) {
3136 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self
->uthread
);
3141 self
->continuation
= continuation
;
3142 self
->parameter
= parameter
;
3144 if (self
->state
& ~(TH_RUN
| TH_IDLE
)) {
3145 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3146 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_BLOCK
),
3147 reason
, VM_KERNEL_UNSLIDE(continuation
), 0, 0, 0);
3152 new_thread
= thread_select(self
, processor
, &reason
);
3153 thread_unlock(self
);
3154 } while (!thread_invoke(self
, new_thread
, reason
));
3158 return self
->wait_result
;
3164 * Block the current thread if a wait has been asserted.
3168 thread_continue_t continuation
)
3170 return thread_block_reason(continuation
, NULL
, AST_NONE
);
3174 thread_block_parameter(
3175 thread_continue_t continuation
,
3178 return thread_block_reason(continuation
, parameter
, AST_NONE
);
3184 * Switch directly from the current thread to the
3185 * new thread, handing off our quantum if appropriate.
3187 * New thread must be runnable, and not on a run queue.
3189 * Called at splsched.
3194 thread_continue_t continuation
,
3196 thread_t new_thread
)
3198 ast_t reason
= AST_NONE
;
3200 if ((self
->state
& TH_IDLE
) == 0) {
3201 reason
= AST_HANDOFF
;
3205 * If this thread hadn't been setrun'ed, it
3206 * might not have a chosen processor, so give it one
3208 if (new_thread
->chosen_processor
== NULL
) {
3209 new_thread
->chosen_processor
= current_processor();
3212 self
->continuation
= continuation
;
3213 self
->parameter
= parameter
;
3215 while (!thread_invoke(self
, new_thread
, reason
)) {
3216 /* the handoff failed, so we have to fall back to the normal block path */
3217 processor_t processor
= current_processor();
3222 new_thread
= thread_select(self
, processor
, &reason
);
3223 thread_unlock(self
);
3226 return self
->wait_result
;
3232 * Called at splsched when a thread first receives
3233 * a new stack after a continuation.
3235 * Called with THREAD_NULL as the old thread when
3236 * invoked by machine_load_context.
3242 thread_t self
= current_thread();
3243 thread_continue_t continuation
;
3246 DTRACE_SCHED(on__cpu
);
3248 continuation
= self
->continuation
;
3249 parameter
= self
->parameter
;
3251 assert(continuation
!= NULL
);
3254 kperf_on_cpu(self
, continuation
, NULL
);
3257 thread_dispatch(thread
, self
);
3259 self
->continuation
= self
->parameter
= NULL
;
3261 #if INTERRUPT_MASKED_DEBUG
3262 /* Reset interrupt-masked spin debugging timeout */
3263 ml_spin_debug_clear(self
);
3266 TLOG(1, "thread_continue: calling call_continuation\n");
3268 boolean_t enable_interrupts
= TRUE
;
3270 /* bootstrap thread, idle thread need to stay interrupts-disabled */
3271 if (thread
== THREAD_NULL
|| (self
->state
& TH_IDLE
)) {
3272 enable_interrupts
= FALSE
;
3275 call_continuation(continuation
, parameter
, self
->wait_result
, enable_interrupts
);
3280 thread_quantum_init(thread_t thread
)
3282 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
3283 thread
->quantum_remaining
= thread
->realtime
.computation
;
3285 thread
->quantum_remaining
= SCHED(initial_quantum_size
)(thread
);
3290 sched_timeshare_initial_quantum_size(thread_t thread
)
3292 if ((thread
!= THREAD_NULL
) && thread
->th_sched_bucket
== TH_BUCKET_SHARE_BG
) {
3302 * Initialize a run queue before first use.
3309 for (u_int i
= 0; i
< BITMAP_LEN(NRQS
); i
++) {
3312 rq
->urgency
= rq
->count
= 0;
3313 for (int i
= 0; i
< NRQS
; i
++) {
3314 circle_queue_init(&rq
->queues
[i
]);
3319 * run_queue_dequeue:
3321 * Perform a dequeue operation on a run queue,
3322 * and return the resulting thread.
3324 * The run queue must be locked (see thread_run_queue_remove()
3325 * for more info), and not empty.
3330 sched_options_t options
)
3333 circle_queue_t queue
= &rq
->queues
[rq
->highq
];
3335 if (options
& SCHED_HEADQ
) {
3336 thread
= cqe_dequeue_head(queue
, struct thread
, runq_links
);
3338 thread
= cqe_dequeue_tail(queue
, struct thread
, runq_links
);
3341 assert(thread
!= THREAD_NULL
);
3342 assert_thread_magic(thread
);
3344 thread
->runq
= PROCESSOR_NULL
;
3345 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3347 if (SCHED(priority_is_urgent
)(rq
->highq
)) {
3348 rq
->urgency
--; assert(rq
->urgency
>= 0);
3350 if (circle_queue_empty(queue
)) {
3351 bitmap_clear(rq
->bitmap
, rq
->highq
);
3352 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
3359 * run_queue_enqueue:
3361 * Perform a enqueue operation on a run queue.
3363 * The run queue must be locked (see thread_run_queue_remove()
3370 sched_options_t options
)
3372 circle_queue_t queue
= &rq
->queues
[thread
->sched_pri
];
3373 boolean_t result
= FALSE
;
3375 assert_thread_magic(thread
);
3377 if (circle_queue_empty(queue
)) {
3378 circle_enqueue_tail(queue
, &thread
->runq_links
);
3380 rq_bitmap_set(rq
->bitmap
, thread
->sched_pri
);
3381 if (thread
->sched_pri
> rq
->highq
) {
3382 rq
->highq
= thread
->sched_pri
;
3386 if (options
& SCHED_TAILQ
) {
3387 circle_enqueue_tail(queue
, &thread
->runq_links
);
3389 circle_enqueue_head(queue
, &thread
->runq_links
);
3392 if (SCHED(priority_is_urgent
)(thread
->sched_pri
)) {
3395 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3404 * Remove a specific thread from a runqueue.
3406 * The run queue must be locked.
3413 circle_queue_t queue
= &rq
->queues
[thread
->sched_pri
];
3415 assert(thread
->runq
!= PROCESSOR_NULL
);
3416 assert_thread_magic(thread
);
3418 circle_dequeue(queue
, &thread
->runq_links
);
3419 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3421 if (SCHED(priority_is_urgent
)(thread
->sched_pri
)) {
3422 rq
->urgency
--; assert(rq
->urgency
>= 0);
3425 if (circle_queue_empty(queue
)) {
3426 /* update run queue status */
3427 bitmap_clear(rq
->bitmap
, thread
->sched_pri
);
3428 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
3431 thread
->runq
= PROCESSOR_NULL
;
3437 * Peek at the runq and return the highest
3438 * priority thread from the runq.
3440 * The run queue must be locked.
3446 if (rq
->count
> 0) {
3447 circle_queue_t queue
= &rq
->queues
[rq
->highq
];
3448 thread_t thread
= cqe_queue_first(queue
, struct thread
, runq_links
);
3449 assert_thread_magic(thread
);
3457 sched_rtlocal_runq(processor_set_t pset
)
3459 return &pset
->rt_runq
;
3463 sched_rtlocal_init(processor_set_t pset
)
3469 sched_rtlocal_queue_shutdown(processor_t processor
)
3471 processor_set_t pset
= processor
->processor_set
;
3473 queue_head_t tqueue
;
3477 /* We only need to migrate threads if this is the last active or last recommended processor in the pset */
3478 if ((pset
->online_processor_count
> 0) && pset_is_recommended(pset
)) {
3483 queue_init(&tqueue
);
3485 while (rt_runq_count(pset
) > 0) {
3486 thread
= qe_dequeue_head(&pset
->rt_runq
.queue
, struct thread
, runq_links
);
3487 thread
->runq
= PROCESSOR_NULL
;
3488 SCHED_STATS_RUNQ_CHANGE(&pset
->rt_runq
.runq_stats
, rt_runq_count(pset
));
3489 rt_runq_count_decr(pset
);
3490 enqueue_tail(&tqueue
, &thread
->runq_links
);
3492 sched_update_pset_load_average(pset
, 0);
3495 qe_foreach_element_safe(thread
, &tqueue
, runq_links
) {
3496 remqueue(&thread
->runq_links
);
3498 thread_lock(thread
);
3500 thread_setrun(thread
, SCHED_TAILQ
);
3502 thread_unlock(thread
);
3506 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3508 sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context
)
3512 pset_node_t node
= &pset_node0
;
3513 processor_set_t pset
= node
->psets
;
3515 spl_t s
= splsched();
3517 while (pset
!= NULL
) {
3520 qe_foreach_element_safe(thread
, &pset
->rt_runq
.queue
, runq_links
) {
3521 if (thread
->last_made_runnable_time
< scan_context
->earliest_rt_make_runnable_time
) {
3522 scan_context
->earliest_rt_make_runnable_time
= thread
->last_made_runnable_time
;
3528 pset
= pset
->pset_list
;
3530 } while (((node
= node
->node_list
) != NULL
) && ((pset
= node
->psets
) != NULL
));
3535 sched_rtlocal_runq_count_sum(void)
3537 pset_node_t node
= &pset_node0
;
3538 processor_set_t pset
= node
->psets
;
3542 while (pset
!= NULL
) {
3543 count
+= pset
->rt_runq
.runq_stats
.count_sum
;
3545 pset
= pset
->pset_list
;
3547 } while (((node
= node
->node_list
) != NULL
) && ((pset
= node
->psets
) != NULL
));
3553 * realtime_queue_insert:
3555 * Enqueue a thread for realtime execution.
3558 realtime_queue_insert(processor_t processor
, processor_set_t pset
, thread_t thread
)
3560 queue_t queue
= &SCHED(rt_runq
)(pset
)->queue
;
3561 uint64_t deadline
= thread
->realtime
.deadline
;
3562 boolean_t preempt
= FALSE
;
3564 pset_assert_locked(pset
);
3566 if (queue_empty(queue
)) {
3567 enqueue_tail(queue
, &thread
->runq_links
);
3570 /* Insert into rt_runq in thread deadline order */
3572 qe_foreach(iter
, queue
) {
3573 thread_t iter_thread
= qe_element(iter
, struct thread
, runq_links
);
3574 assert_thread_magic(iter_thread
);
3576 if (deadline
< iter_thread
->realtime
.deadline
) {
3577 if (iter
== queue_first(queue
)) {
3580 insque(&thread
->runq_links
, queue_prev(iter
));
3582 } else if (iter
== queue_last(queue
)) {
3583 enqueue_tail(queue
, &thread
->runq_links
);
3589 thread
->runq
= processor
;
3590 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
3591 rt_runq_count_incr(pset
);
3596 #define MAX_BACKUP_PROCESSORS 7
3597 #if defined(__x86_64__)
3598 #define DEFAULT_BACKUP_PROCESSORS 1
3600 #define DEFAULT_BACKUP_PROCESSORS 0
3603 int sched_rt_n_backup_processors
= DEFAULT_BACKUP_PROCESSORS
;
3606 sched_get_rt_n_backup_processors(void)
3608 return sched_rt_n_backup_processors
;
3612 sched_set_rt_n_backup_processors(int n
)
3616 } else if (n
> MAX_BACKUP_PROCESSORS
) {
3617 n
= MAX_BACKUP_PROCESSORS
;
3620 sched_rt_n_backup_processors
= n
;
3626 * Dispatch a thread for realtime execution.
3628 * Thread must be locked. Associated pset must
3629 * be locked, and is returned unlocked.
3633 processor_t chosen_processor
,
3636 processor_set_t pset
= chosen_processor
->processor_set
;
3637 pset_assert_locked(pset
);
3642 if (thread
->realtime
.constraint
<= rt_constraint_threshold
) {
3643 n_backup
= sched_rt_n_backup_processors
;
3645 assert((n_backup
>= 0) && (n_backup
<= MAX_BACKUP_PROCESSORS
));
3647 sched_ipi_type_t ipi_type
[MAX_BACKUP_PROCESSORS
+ 1] = {};
3648 processor_t ipi_processor
[MAX_BACKUP_PROCESSORS
+ 1] = {};
3650 thread
->chosen_processor
= chosen_processor
;
3652 /* <rdar://problem/15102234> */
3653 assert(thread
->bound_processor
== PROCESSOR_NULL
);
3655 realtime_queue_insert(chosen_processor
, pset
, thread
);
3657 processor_t processor
= chosen_processor
;
3658 bool chosen_process_is_secondary
= chosen_processor
->processor_primary
!= chosen_processor
;
3661 for (int i
= 0; i
<= n_backup
; i
++) {
3663 processor
= choose_processor_for_realtime_thread(pset
, chosen_processor
, chosen_process_is_secondary
);
3664 if ((processor
== PROCESSOR_NULL
) || (sched_avoid_cpu0
&& (processor
->cpu_id
== 0))) {
3667 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
3668 (uintptr_t)thread_tid(thread
), (uintptr_t)-3, processor
->cpu_id
, processor
->state
, 0);
3670 ipi_type
[i
] = SCHED_IPI_NONE
;
3671 ipi_processor
[i
] = processor
;
3674 if (processor
->current_pri
< BASEPRI_RTQUEUES
) {
3675 preempt
= (AST_PREEMPT
| AST_URGENT
);
3676 } else if (thread
->realtime
.deadline
< processor
->deadline
) {
3677 preempt
= (AST_PREEMPT
| AST_URGENT
);
3682 if (preempt
!= AST_NONE
) {
3683 if (processor
->state
== PROCESSOR_IDLE
) {
3684 processor_state_update_from_thread(processor
, thread
);
3685 processor
->deadline
= thread
->realtime
.deadline
;
3686 pset_update_processor_state(pset
, processor
, PROCESSOR_DISPATCHING
);
3687 if (processor
== current_processor()) {
3690 if ((preempt
& AST_URGENT
) == AST_URGENT
) {
3691 bit_set(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
3694 if ((preempt
& AST_PREEMPT
) == AST_PREEMPT
) {
3695 bit_set(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
3698 ipi_type
[i
] = sched_ipi_action(processor
, thread
, true, SCHED_IPI_EVENT_PREEMPT
);
3700 } else if (processor
->state
== PROCESSOR_DISPATCHING
) {
3701 if ((processor
->current_pri
< thread
->sched_pri
) || (processor
->deadline
> thread
->realtime
.deadline
)) {
3702 processor_state_update_from_thread(processor
, thread
);
3703 processor
->deadline
= thread
->realtime
.deadline
;
3706 if (processor
== current_processor()) {
3709 if ((preempt
& AST_URGENT
) == AST_URGENT
) {
3710 bit_set(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
3713 if ((preempt
& AST_PREEMPT
) == AST_PREEMPT
) {
3714 bit_set(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
3717 ipi_type
[i
] = sched_ipi_action(processor
, thread
, false, SCHED_IPI_EVENT_PREEMPT
);
3721 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3727 assert((count
> 0) && (count
<= (n_backup
+ 1)));
3728 for (int i
= 0; i
< count
; i
++) {
3729 assert(ipi_processor
[i
] != PROCESSOR_NULL
);
3730 sched_ipi_perform(ipi_processor
[i
], ipi_type
[i
]);
3736 sched_ipi_deferred_policy(processor_set_t pset
, processor_t dst
,
3737 __unused sched_ipi_event_t event
)
3739 #if defined(CONFIG_SCHED_DEFERRED_AST)
3740 if (!bit_test(pset
->pending_deferred_AST_cpu_mask
, dst
->cpu_id
)) {
3741 return SCHED_IPI_DEFERRED
;
3743 #else /* CONFIG_SCHED_DEFERRED_AST */
3744 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset
, dst
->cpu_id
);
3745 #endif /* CONFIG_SCHED_DEFERRED_AST */
3746 return SCHED_IPI_NONE
;
3750 sched_ipi_action(processor_t dst
, thread_t thread
, boolean_t dst_idle
, sched_ipi_event_t event
)
3752 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3753 assert(dst
!= NULL
);
3755 processor_set_t pset
= dst
->processor_set
;
3756 if (current_processor() == dst
) {
3757 return SCHED_IPI_NONE
;
3760 if (bit_test(pset
->pending_AST_URGENT_cpu_mask
, dst
->cpu_id
)) {
3761 return SCHED_IPI_NONE
;
3764 ipi_type
= SCHED(ipi_policy
)(dst
, thread
, dst_idle
, event
);
3766 case SCHED_IPI_NONE
:
3767 return SCHED_IPI_NONE
;
3768 #if defined(CONFIG_SCHED_DEFERRED_AST)
3769 case SCHED_IPI_DEFERRED
:
3770 bit_set(pset
->pending_deferred_AST_cpu_mask
, dst
->cpu_id
);
3772 #endif /* CONFIG_SCHED_DEFERRED_AST */
3774 bit_set(pset
->pending_AST_URGENT_cpu_mask
, dst
->cpu_id
);
3775 bit_set(pset
->pending_AST_PREEMPT_cpu_mask
, dst
->cpu_id
);
3782 sched_ipi_policy(processor_t dst
, thread_t thread
, boolean_t dst_idle
, sched_ipi_event_t event
)
3784 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3785 boolean_t deferred_ipi_supported
= false;
3786 processor_set_t pset
= dst
->processor_set
;
3788 #if defined(CONFIG_SCHED_DEFERRED_AST)
3789 deferred_ipi_supported
= true;
3790 #endif /* CONFIG_SCHED_DEFERRED_AST */
3793 case SCHED_IPI_EVENT_SPILL
:
3794 case SCHED_IPI_EVENT_SMT_REBAL
:
3795 case SCHED_IPI_EVENT_REBALANCE
:
3796 case SCHED_IPI_EVENT_BOUND_THR
:
3798 * The spill, SMT rebalance, rebalance and the bound thread
3799 * scenarios use immediate IPIs always.
3801 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3803 case SCHED_IPI_EVENT_PREEMPT
:
3804 /* In the preemption case, use immediate IPIs for RT threads */
3805 if (thread
&& (thread
->sched_pri
>= BASEPRI_RTQUEUES
)) {
3806 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3811 * For Non-RT threads preemption,
3812 * If the core is active, use immediate IPIs.
3813 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3815 if (deferred_ipi_supported
&& dst_idle
) {
3816 return sched_ipi_deferred_policy(pset
, dst
, event
);
3818 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3821 panic("Unrecognized scheduler IPI event type %d", event
);
3823 assert(ipi_type
!= SCHED_IPI_NONE
);
3828 sched_ipi_perform(processor_t dst
, sched_ipi_type_t ipi
)
3831 case SCHED_IPI_NONE
:
3833 case SCHED_IPI_IDLE
:
3834 machine_signal_idle(dst
);
3836 case SCHED_IPI_IMMEDIATE
:
3837 cause_ast_check(dst
);
3839 case SCHED_IPI_DEFERRED
:
3840 machine_signal_idle_deferred(dst
);
3843 panic("Unrecognized scheduler IPI type: %d", ipi
);
3847 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3850 priority_is_urgent(int priority
)
3852 return bitmap_test(sched_preempt_pri
, priority
) ? TRUE
: FALSE
;
3855 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3860 * Dispatch a thread for execution on a
3863 * Thread must be locked. Associated pset must
3864 * be locked, and is returned unlocked.
3868 processor_t processor
,
3872 processor_set_t pset
= processor
->processor_set
;
3873 pset_assert_locked(pset
);
3875 enum { eExitIdle
, eInterruptRunning
, eDoNothing
} ipi_action
= eDoNothing
;
3877 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3879 thread
->chosen_processor
= processor
;
3882 * Set preemption mode.
3884 #if defined(CONFIG_SCHED_DEFERRED_AST)
3885 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3887 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) && thread
->sched_pri
> processor
->current_pri
) {
3888 preempt
= (AST_PREEMPT
| AST_URGENT
);
3889 } else if (processor
->current_is_eagerpreempt
) {
3890 preempt
= (AST_PREEMPT
| AST_URGENT
);
3891 } else if ((thread
->sched_mode
== TH_MODE_TIMESHARE
) && (thread
->sched_pri
< thread
->base_pri
)) {
3892 if (SCHED(priority_is_urgent
)(thread
->base_pri
) && thread
->sched_pri
> processor
->current_pri
) {
3893 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3898 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3901 if ((options
& (SCHED_PREEMPT
| SCHED_REBALANCE
)) == (SCHED_PREEMPT
| SCHED_REBALANCE
)) {
3903 * Having gone to the trouble of forcing this thread off a less preferred core,
3904 * we should force the preferable core to reschedule immediately to give this
3905 * thread a chance to run instead of just sitting on the run queue where
3906 * it may just be stolen back by the idle core we just forced it off.
3908 preempt
|= AST_PREEMPT
;
3911 SCHED(processor_enqueue
)(processor
, thread
, options
);
3912 sched_update_pset_load_average(pset
, 0);
3914 if (preempt
!= AST_NONE
) {
3915 if (processor
->state
== PROCESSOR_IDLE
) {
3916 processor_state_update_from_thread(processor
, thread
);
3917 processor
->deadline
= UINT64_MAX
;
3918 pset_update_processor_state(pset
, processor
, PROCESSOR_DISPATCHING
);
3919 ipi_action
= eExitIdle
;
3920 } else if (processor
->state
== PROCESSOR_DISPATCHING
) {
3921 if (processor
->current_pri
< thread
->sched_pri
) {
3922 processor_state_update_from_thread(processor
, thread
);
3923 processor
->deadline
= UINT64_MAX
;
3925 } else if ((processor
->state
== PROCESSOR_RUNNING
||
3926 processor
->state
== PROCESSOR_SHUTDOWN
) &&
3927 (thread
->sched_pri
>= processor
->current_pri
)) {
3928 ipi_action
= eInterruptRunning
;
3932 * New thread is not important enough to preempt what is running, but
3933 * special processor states may need special handling
3935 if (processor
->state
== PROCESSOR_SHUTDOWN
&&
3936 thread
->sched_pri
>= processor
->current_pri
) {
3937 ipi_action
= eInterruptRunning
;
3938 } else if (processor
->state
== PROCESSOR_IDLE
) {
3939 processor_state_update_from_thread(processor
, thread
);
3940 processor
->deadline
= UINT64_MAX
;
3941 pset_update_processor_state(pset
, processor
, PROCESSOR_DISPATCHING
);
3943 ipi_action
= eExitIdle
;
3947 if (ipi_action
!= eDoNothing
) {
3948 if (processor
== current_processor()) {
3949 if ((preempt
= csw_check_locked(processor
->active_thread
, processor
, pset
, AST_NONE
)) != AST_NONE
) {
3953 if ((preempt
& AST_URGENT
) == AST_URGENT
) {
3954 bit_set(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
3956 bit_clear(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
3959 if ((preempt
& AST_PREEMPT
) == AST_PREEMPT
) {
3960 bit_set(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
3962 bit_clear(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
3965 sched_ipi_event_t event
= (options
& SCHED_REBALANCE
) ? SCHED_IPI_EVENT_REBALANCE
: SCHED_IPI_EVENT_PREEMPT
;
3966 ipi_type
= sched_ipi_action(processor
, thread
, (ipi_action
== eExitIdle
), event
);
3970 sched_ipi_perform(processor
, ipi_type
);
3976 * Return the next sibling pset containing
3977 * available processors.
3979 * Returns the original pset if none other is
3982 static processor_set_t
3984 processor_set_t pset
)
3986 processor_set_t nset
= pset
;
3989 nset
= next_pset(nset
);
3990 } while (nset
->online_processor_count
< 1 && nset
!= pset
);
3995 inline static processor_set_t
3996 change_locked_pset(processor_set_t current_pset
, processor_set_t new_pset
)
3998 if (current_pset
!= new_pset
) {
3999 pset_unlock(current_pset
);
4000 pset_lock(new_pset
);
4009 * Choose a processor for the thread, beginning at
4010 * the pset. Accepts an optional processor hint in
4013 * Returns a processor, possibly from a different pset.
4015 * The thread must be locked. The pset must be locked,
4016 * and the resulting pset is locked on return.
4020 processor_set_t starting_pset
,
4021 processor_t processor
,
4024 processor_set_t pset
= starting_pset
;
4025 processor_set_t nset
;
4027 assert(thread
->sched_pri
<= BASEPRI_RTQUEUES
);
4030 * Prefer the hinted processor, when appropriate.
4033 /* Fold last processor hint from secondary processor to its primary */
4034 if (processor
!= PROCESSOR_NULL
) {
4035 processor
= processor
->processor_primary
;
4039 * Only consult platform layer if pset is active, which
4040 * it may not be in some cases when a multi-set system
4041 * is going to sleep.
4043 if (pset
->online_processor_count
) {
4044 if ((processor
== PROCESSOR_NULL
) || (processor
->processor_set
== pset
&& processor
->state
== PROCESSOR_IDLE
)) {
4045 processor_t mc_processor
= machine_choose_processor(pset
, processor
);
4046 if (mc_processor
!= PROCESSOR_NULL
) {
4047 processor
= mc_processor
->processor_primary
;
4053 * At this point, we may have a processor hint, and we may have
4054 * an initial starting pset. If the hint is not in the pset, or
4055 * if the hint is for a processor in an invalid state, discard
4058 if (processor
!= PROCESSOR_NULL
) {
4059 if (processor
->processor_set
!= pset
) {
4060 processor
= PROCESSOR_NULL
;
4061 } else if (!processor
->is_recommended
) {
4062 processor
= PROCESSOR_NULL
;
4064 switch (processor
->state
) {
4065 case PROCESSOR_START
:
4066 case PROCESSOR_SHUTDOWN
:
4067 case PROCESSOR_OFF_LINE
:
4069 * Hint is for a processor that cannot support running new threads.
4071 processor
= PROCESSOR_NULL
;
4073 case PROCESSOR_IDLE
:
4075 * Hint is for an idle processor. Assume it is no worse than any other
4076 * idle processor. The platform layer had an opportunity to provide
4077 * the "least cost idle" processor above.
4079 if ((thread
->sched_pri
< BASEPRI_RTQUEUES
) || processor_is_fast_track_candidate_for_realtime_thread(pset
, processor
)) {
4082 processor
= PROCESSOR_NULL
;
4084 case PROCESSOR_RUNNING
:
4085 case PROCESSOR_DISPATCHING
:
4087 * Hint is for an active CPU. This fast-path allows
4088 * realtime threads to preempt non-realtime threads
4089 * to regain their previous executing processor.
4091 if ((thread
->sched_pri
>= BASEPRI_RTQUEUES
) &&
4092 processor_is_fast_track_candidate_for_realtime_thread(pset
, processor
)) {
4096 /* Otherwise, use hint as part of search below */
4099 processor
= PROCESSOR_NULL
;
4106 * Iterate through the processor sets to locate
4107 * an appropriate processor. Seed results with
4108 * a last-processor hint, if available, so that
4109 * a search must find something strictly better
4112 * A primary/secondary pair of SMT processors are
4113 * "unpaired" if the primary is busy but its
4114 * corresponding secondary is idle (so the physical
4115 * core has full use of its resources).
4118 integer_t lowest_priority
= MAXPRI
+ 1;
4119 integer_t lowest_secondary_priority
= MAXPRI
+ 1;
4120 integer_t lowest_unpaired_primary_priority
= MAXPRI
+ 1;
4121 integer_t lowest_idle_secondary_priority
= MAXPRI
+ 1;
4122 integer_t lowest_count
= INT_MAX
;
4123 uint64_t furthest_deadline
= 1;
4124 processor_t lp_processor
= PROCESSOR_NULL
;
4125 processor_t lp_unpaired_primary_processor
= PROCESSOR_NULL
;
4126 processor_t lp_idle_secondary_processor
= PROCESSOR_NULL
;
4127 processor_t lp_paired_secondary_processor
= PROCESSOR_NULL
;
4128 processor_t lc_processor
= PROCESSOR_NULL
;
4129 processor_t fd_processor
= PROCESSOR_NULL
;
4131 if (processor
!= PROCESSOR_NULL
) {
4132 /* All other states should be enumerated above. */
4133 assert(processor
->state
== PROCESSOR_RUNNING
|| processor
->state
== PROCESSOR_DISPATCHING
);
4135 lowest_priority
= processor
->current_pri
;
4136 lp_processor
= processor
;
4138 if (processor
->current_pri
>= BASEPRI_RTQUEUES
) {
4139 furthest_deadline
= processor
->deadline
;
4140 fd_processor
= processor
;
4143 lowest_count
= SCHED(processor_runq_count
)(processor
);
4144 lc_processor
= processor
;
4147 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
4148 pset_node_t node
= pset
->node
;
4149 int consider_secondaries
= (!pset
->is_SMT
) || (bit_count(node
->pset_map
) == 1) || (node
->pset_non_rt_primary_map
== 0);
4150 for (; consider_secondaries
< 2; consider_secondaries
++) {
4151 pset
= change_locked_pset(pset
, starting_pset
);
4153 processor
= choose_processor_for_realtime_thread(pset
, PROCESSOR_NULL
, consider_secondaries
);
4158 /* NRG Collect processor stats for furthest deadline etc. here */
4160 nset
= next_pset(pset
);
4162 if (nset
!= starting_pset
) {
4163 pset
= change_locked_pset(pset
, nset
);
4165 } while (nset
!= starting_pset
);
4167 /* Or we could just let it change to starting_pset in the loop above */
4168 pset
= change_locked_pset(pset
, starting_pset
);
4173 * Choose an idle processor, in pset traversal order
4176 uint64_t idle_primary_map
= (pset
->cpu_state_map
[PROCESSOR_IDLE
] &
4178 pset
->recommended_bitmask
);
4180 /* there shouldn't be a pending AST if the processor is idle */
4181 assert((idle_primary_map
& pset
->pending_AST_URGENT_cpu_mask
) == 0);
4183 int cpuid
= lsb_first(idle_primary_map
);
4185 processor
= processor_array
[cpuid
];
4190 * Otherwise, enumerate active and idle processors to find primary candidates
4191 * with lower priority/etc.
4194 uint64_t active_map
= ((pset
->cpu_state_map
[PROCESSOR_RUNNING
] | pset
->cpu_state_map
[PROCESSOR_DISPATCHING
]) &
4195 pset
->recommended_bitmask
&
4196 ~pset
->pending_AST_URGENT_cpu_mask
);
4198 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) == FALSE
) {
4199 active_map
&= ~pset
->pending_AST_PREEMPT_cpu_mask
;
4202 active_map
= bit_ror64(active_map
, (pset
->last_chosen
+ 1));
4203 for (int rotid
= lsb_first(active_map
); rotid
>= 0; rotid
= lsb_next(active_map
, rotid
)) {
4204 cpuid
= ((rotid
+ pset
->last_chosen
+ 1) & 63);
4205 processor
= processor_array
[cpuid
];
4207 integer_t cpri
= processor
->current_pri
;
4208 processor_t primary
= processor
->processor_primary
;
4209 if (primary
!= processor
) {
4210 /* If primary is running a NO_SMT thread, don't choose its secondary */
4211 if (!((primary
->state
== PROCESSOR_RUNNING
) && processor_active_thread_no_smt(primary
))) {
4212 if (cpri
< lowest_secondary_priority
) {
4213 lowest_secondary_priority
= cpri
;
4214 lp_paired_secondary_processor
= processor
;
4218 if (cpri
< lowest_priority
) {
4219 lowest_priority
= cpri
;
4220 lp_processor
= processor
;
4224 if ((cpri
>= BASEPRI_RTQUEUES
) && (processor
->deadline
> furthest_deadline
)) {
4225 furthest_deadline
= processor
->deadline
;
4226 fd_processor
= processor
;
4229 integer_t ccount
= SCHED(processor_runq_count
)(processor
);
4230 if (ccount
< lowest_count
) {
4231 lowest_count
= ccount
;
4232 lc_processor
= processor
;
4237 * For SMT configs, these idle secondary processors must have active primary. Otherwise
4238 * the idle primary would have short-circuited the loop above
4240 uint64_t idle_secondary_map
= (pset
->cpu_state_map
[PROCESSOR_IDLE
] &
4241 ~pset
->primary_map
&
4242 pset
->recommended_bitmask
);
4244 /* there shouldn't be a pending AST if the processor is idle */
4245 assert((idle_secondary_map
& pset
->pending_AST_URGENT_cpu_mask
) == 0);
4246 assert((idle_secondary_map
& pset
->pending_AST_PREEMPT_cpu_mask
) == 0);
4248 for (cpuid
= lsb_first(idle_secondary_map
); cpuid
>= 0; cpuid
= lsb_next(idle_secondary_map
, cpuid
)) {
4249 processor
= processor_array
[cpuid
];
4251 processor_t cprimary
= processor
->processor_primary
;
4253 integer_t primary_pri
= cprimary
->current_pri
;
4256 * TODO: This should also make the same decisions
4257 * as secondary_can_run_realtime_thread
4259 * TODO: Keep track of the pending preemption priority
4260 * of the primary to make this more accurate.
4263 /* If the primary is running a no-smt thread, then don't choose its secondary */
4264 if (cprimary
->state
== PROCESSOR_RUNNING
&&
4265 processor_active_thread_no_smt(cprimary
)) {
4270 * Find the idle secondary processor with the lowest priority primary
4272 * We will choose this processor as a fallback if we find no better
4273 * primary to preempt.
4275 if (primary_pri
< lowest_idle_secondary_priority
) {
4276 lp_idle_secondary_processor
= processor
;
4277 lowest_idle_secondary_priority
= primary_pri
;
4280 /* Find the the lowest priority active primary with idle secondary */
4281 if (primary_pri
< lowest_unpaired_primary_priority
) {
4282 /* If the primary processor is offline or starting up, it's not a candidate for this path */
4283 if (cprimary
->state
!= PROCESSOR_RUNNING
&&
4284 cprimary
->state
!= PROCESSOR_DISPATCHING
) {
4288 if (!cprimary
->is_recommended
) {
4292 /* if the primary is pending preemption, don't try to re-preempt it */
4293 if (bit_test(pset
->pending_AST_URGENT_cpu_mask
, cprimary
->cpu_id
)) {
4297 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) == FALSE
&&
4298 bit_test(pset
->pending_AST_PREEMPT_cpu_mask
, cprimary
->cpu_id
)) {
4302 lowest_unpaired_primary_priority
= primary_pri
;
4303 lp_unpaired_primary_processor
= cprimary
;
4308 * We prefer preempting a primary processor over waking up its secondary.
4309 * The secondary will then be woken up by the preempted thread.
4311 if (thread
->sched_pri
> lowest_unpaired_primary_priority
) {
4312 pset
->last_chosen
= lp_unpaired_primary_processor
->cpu_id
;
4313 return lp_unpaired_primary_processor
;
4317 * We prefer preempting a lower priority active processor over directly
4318 * waking up an idle secondary.
4319 * The preempted thread will then find the idle secondary.
4321 if (thread
->sched_pri
> lowest_priority
) {
4322 pset
->last_chosen
= lp_processor
->cpu_id
;
4323 return lp_processor
;
4326 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
4328 * For realtime threads, the most important aspect is
4329 * scheduling latency, so we will pick an active
4330 * secondary processor in this pset, or preempt
4331 * another RT thread with a further deadline before
4332 * going to the next pset.
4335 if (sched_allow_rt_smt
&& (thread
->sched_pri
> lowest_secondary_priority
)) {
4336 pset
->last_chosen
= lp_paired_secondary_processor
->cpu_id
;
4337 return lp_paired_secondary_processor
;
4340 if (thread
->realtime
.deadline
< furthest_deadline
) {
4341 return fd_processor
;
4346 * lc_processor is used to indicate the best processor set run queue
4347 * on which to enqueue a thread when all available CPUs are busy with
4348 * higher priority threads, so try to make sure it is initialized.
4350 if (lc_processor
== PROCESSOR_NULL
) {
4351 cpumap_t available_map
= ((pset
->cpu_state_map
[PROCESSOR_IDLE
] |
4352 pset
->cpu_state_map
[PROCESSOR_RUNNING
] |
4353 pset
->cpu_state_map
[PROCESSOR_DISPATCHING
]) &
4354 pset
->recommended_bitmask
);
4355 cpuid
= lsb_first(available_map
);
4357 lc_processor
= processor_array
[cpuid
];
4358 lowest_count
= SCHED(processor_runq_count
)(lc_processor
);
4363 * Move onto the next processor set.
4365 * If all primary processors in this pset are running a higher
4366 * priority thread, move on to next pset. Only when we have
4367 * exhausted the search for primary processors do we
4368 * fall back to secondaries.
4370 nset
= next_pset(pset
);
4372 if (nset
!= starting_pset
) {
4373 pset
= change_locked_pset(pset
, nset
);
4375 } while (nset
!= starting_pset
);
4378 * Make sure that we pick a running processor,
4379 * and that the correct processor set is locked.
4380 * Since we may have unlocked the candidate processor's
4381 * pset, it may have changed state.
4383 * All primary processors are running a higher priority
4384 * thread, so the only options left are enqueuing on
4385 * the secondary processor that would perturb the least priority
4386 * primary, or the least busy primary.
4388 boolean_t fallback_processor
= false;
4390 /* lowest_priority is evaluated in the main loops above */
4391 if (lp_idle_secondary_processor
!= PROCESSOR_NULL
) {
4392 processor
= lp_idle_secondary_processor
;
4393 lp_idle_secondary_processor
= PROCESSOR_NULL
;
4394 } else if (lp_paired_secondary_processor
!= PROCESSOR_NULL
) {
4395 processor
= lp_paired_secondary_processor
;
4396 lp_paired_secondary_processor
= PROCESSOR_NULL
;
4397 } else if (lc_processor
!= PROCESSOR_NULL
) {
4398 processor
= lc_processor
;
4399 lc_processor
= PROCESSOR_NULL
;
4402 * All processors are executing higher priority threads, and
4403 * the lowest_count candidate was not usable.
4405 * For AMP platforms running the clutch scheduler always
4406 * return a processor from the requested pset to allow the
4407 * thread to be enqueued in the correct runq. For non-AMP
4408 * platforms, simply return the master_processor.
4410 fallback_processor
= true;
4411 #if CONFIG_SCHED_EDGE
4412 processor
= processor_array
[lsb_first(starting_pset
->primary_map
)];
4413 #else /* CONFIG_SCHED_EDGE */
4414 processor
= master_processor
;
4415 #endif /* CONFIG_SCHED_EDGE */
4419 * Check that the correct processor set is
4422 pset
= change_locked_pset(pset
, processor
->processor_set
);
4425 * We must verify that the chosen processor is still available.
4426 * The cases where we pick the master_processor or the fallback
4427 * processor are execptions, since we may need enqueue a thread
4428 * on its runqueue if this is the last remaining processor
4429 * during pset shutdown.
4431 * <rdar://problem/47559304> would really help here since it
4432 * gets rid of the weird last processor SHUTDOWN case where
4433 * the pset is still schedulable.
4435 if (processor
!= master_processor
&& (fallback_processor
== false) && (processor
->state
== PROCESSOR_SHUTDOWN
|| processor
->state
== PROCESSOR_OFF_LINE
)) {
4436 processor
= PROCESSOR_NULL
;
4438 } while (processor
== PROCESSOR_NULL
);
4440 pset
->last_chosen
= processor
->cpu_id
;
4445 * Default implementation of SCHED(choose_node)()
4446 * for single node systems
4449 sched_choose_node(__unused thread_t thread
)
4455 * choose_starting_pset:
4457 * Choose a starting processor set for the thread.
4458 * May return a processor hint within the pset.
4460 * Returns a starting processor set, to be used by
4463 * The thread must be locked. The resulting pset is unlocked on return,
4464 * and is chosen without taking any pset locks.
4467 choose_starting_pset(pset_node_t node
, thread_t thread
, processor_t
*processor_hint
)
4469 processor_set_t pset
;
4470 processor_t processor
= PROCESSOR_NULL
;
4472 if (thread
->affinity_set
!= AFFINITY_SET_NULL
) {
4474 * Use affinity set policy hint.
4476 pset
= thread
->affinity_set
->aset_pset
;
4477 } else if (thread
->last_processor
!= PROCESSOR_NULL
) {
4479 * Simple (last processor) affinity case.
4481 processor
= thread
->last_processor
;
4482 pset
= processor
->processor_set
;
4487 * Utilitize a per task hint to spread threads
4488 * among the available processor sets.
4489 * NRG this seems like the wrong thing to do.
4490 * See also task->pset_hint = pset in thread_setrun()
4492 task_t task
= thread
->task
;
4494 pset
= task
->pset_hint
;
4495 if (pset
== PROCESSOR_SET_NULL
) {
4496 pset
= current_processor()->processor_set
;
4499 pset
= choose_next_pset(pset
);
4502 if (!bit_test(node
->pset_map
, pset
->pset_id
)) {
4503 /* pset is not from this node so choose one that is */
4504 int id
= lsb_first(node
->pset_map
);
4506 pset
= pset_array
[id
];
4509 if (bit_count(node
->pset_map
) == 1) {
4510 /* Only a single pset in this node */
4514 bool avoid_cpu0
= false;
4516 #if defined(__x86_64__)
4517 if ((thread
->sched_pri
>= BASEPRI_RTQUEUES
) && sched_avoid_cpu0
) {
4518 /* Avoid the pset containing cpu0 */
4520 /* Assert that cpu0 is in pset0. I expect this to be true on __x86_64__ */
4521 assert(bit_test(pset_array
[0]->cpu_bitmask
, 0));
4525 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
4526 pset_map_t rt_target_map
= atomic_load(&node
->pset_non_rt_primary_map
);
4527 if ((avoid_cpu0
&& pset
->pset_id
== 0) || !bit_test(rt_target_map
, pset
->pset_id
)) {
4529 rt_target_map
= bit_ror64(rt_target_map
, 1);
4531 int rotid
= lsb_first(rt_target_map
);
4533 int id
= avoid_cpu0
? ((rotid
+ 1) & 63) : rotid
;
4534 pset
= pset_array
[id
];
4538 if (!pset
->is_SMT
|| !sched_allow_rt_smt
) {
4539 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
4542 rt_target_map
= atomic_load(&node
->pset_non_rt_map
);
4543 if ((avoid_cpu0
&& pset
->pset_id
== 0) || !bit_test(rt_target_map
, pset
->pset_id
)) {
4545 rt_target_map
= bit_ror64(rt_target_map
, 1);
4547 int rotid
= lsb_first(rt_target_map
);
4549 int id
= avoid_cpu0
? ((rotid
+ 1) & 63) : rotid
;
4550 pset
= pset_array
[id
];
4554 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
4556 pset_map_t idle_map
= atomic_load(&node
->pset_idle_map
);
4557 if (!bit_test(idle_map
, pset
->pset_id
)) {
4558 int next_idle_pset_id
= lsb_first(idle_map
);
4559 if (next_idle_pset_id
>= 0) {
4560 pset
= pset_array
[next_idle_pset_id
];
4566 if ((processor
!= PROCESSOR_NULL
) && (processor
->processor_set
!= pset
)) {
4567 processor
= PROCESSOR_NULL
;
4569 if (processor
!= PROCESSOR_NULL
) {
4570 *processor_hint
= processor
;
4579 * Dispatch thread for execution, onto an idle
4580 * processor or run queue, and signal a preemption
4583 * Thread must be locked.
4588 sched_options_t options
)
4590 processor_t processor
;
4591 processor_set_t pset
;
4593 assert((thread
->state
& (TH_RUN
| TH_WAIT
| TH_UNINT
| TH_TERMINATE
| TH_TERMINATE2
)) == TH_RUN
);
4594 assert(thread
->runq
== PROCESSOR_NULL
);
4597 * Update priority if needed.
4599 if (SCHED(can_update_priority
)(thread
)) {
4600 SCHED(update_priority
)(thread
);
4603 thread
->sfi_class
= sfi_thread_classify(thread
);
4605 assert(thread
->runq
== PROCESSOR_NULL
);
4607 if (thread
->bound_processor
== PROCESSOR_NULL
) {
4611 processor_t processor_hint
= PROCESSOR_NULL
;
4612 pset_node_t node
= SCHED(choose_node
)(thread
);
4613 processor_set_t starting_pset
= choose_starting_pset(node
, thread
, &processor_hint
);
4615 pset_lock(starting_pset
);
4617 processor
= SCHED(choose_processor
)(starting_pset
, processor_hint
, thread
);
4618 pset
= processor
->processor_set
;
4619 task_t task
= thread
->task
;
4620 task
->pset_hint
= pset
; /* NRG this is done without holding the task lock */
4622 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
4623 (uintptr_t)thread_tid(thread
), (uintptr_t)-1, processor
->cpu_id
, processor
->state
, 0);
4628 * Unconditionally dispatch on the processor.
4630 processor
= thread
->bound_processor
;
4631 pset
= processor
->processor_set
;
4634 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
4635 (uintptr_t)thread_tid(thread
), (uintptr_t)-2, processor
->cpu_id
, processor
->state
, 0);
4639 * Dispatch the thread on the chosen processor.
4640 * TODO: This should be based on sched_mode, not sched_pri
4642 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
4643 realtime_setrun(processor
, thread
);
4645 processor_setrun(processor
, thread
, options
);
4647 /* pset is now unlocked */
4648 if (thread
->bound_processor
== PROCESSOR_NULL
) {
4649 SCHED(check_spill
)(pset
, thread
);
4657 processor_set_t pset
= task
->pset_hint
;
4659 if (pset
!= PROCESSOR_SET_NULL
) {
4660 pset
= choose_next_pset(pset
);
4667 * Check for a preemption point in
4668 * the current context.
4670 * Called at splsched with thread locked.
4675 processor_t processor
,
4678 processor_set_t pset
= processor
->processor_set
;
4680 assert(thread
== processor
->active_thread
);
4684 processor_state_update_from_thread(processor
, thread
);
4686 ast_t preempt
= csw_check_locked(thread
, processor
, pset
, check_reason
);
4688 /* Acknowledge the IPI if we decided not to preempt */
4690 if ((preempt
& AST_URGENT
) == 0) {
4691 bit_clear(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
4694 if ((preempt
& AST_PREEMPT
) == 0) {
4695 bit_clear(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
4704 * Check for preemption at splsched with
4705 * pset and thread locked
4710 processor_t processor
,
4711 processor_set_t pset
,
4716 if (processor
->first_timeslice
) {
4717 if (rt_runq_count(pset
) > 0) {
4718 return check_reason
| AST_PREEMPT
| AST_URGENT
;
4721 if (rt_runq_count(pset
) > 0) {
4722 if (BASEPRI_RTQUEUES
> processor
->current_pri
) {
4723 return check_reason
| AST_PREEMPT
| AST_URGENT
;
4725 return check_reason
| AST_PREEMPT
;
4731 * If the current thread is running on a processor that is no longer recommended,
4732 * urgently preempt it, at which point thread_select() should
4733 * try to idle the processor and re-dispatch the thread to a recommended processor.
4735 if (!processor
->is_recommended
) {
4736 return check_reason
| AST_PREEMPT
| AST_URGENT
;
4739 result
= SCHED(processor_csw_check
)(processor
);
4740 if (result
!= AST_NONE
) {
4741 return check_reason
| result
| (thread_is_eager_preempt(thread
) ? AST_URGENT
: AST_NONE
);
4745 * Same for avoid-processor
4747 * TODO: Should these set AST_REBALANCE?
4749 if (SCHED(avoid_processor_enabled
) && SCHED(thread_avoid_processor
)(processor
, thread
)) {
4750 return check_reason
| AST_PREEMPT
;
4754 * Even though we could continue executing on this processor, a
4755 * secondary SMT core should try to shed load to another primary core.
4757 * TODO: Should this do the same check that thread_select does? i.e.
4758 * if no bound threads target this processor, and idle primaries exist, preempt
4759 * The case of RT threads existing is already taken care of above
4762 if (processor
->current_pri
< BASEPRI_RTQUEUES
&&
4763 processor
->processor_primary
!= processor
) {
4764 return check_reason
| AST_PREEMPT
;
4767 if (thread
->state
& TH_SUSP
) {
4768 return check_reason
| AST_PREEMPT
;
4771 #if CONFIG_SCHED_SFI
4773 * Current thread may not need to be preempted, but maybe needs
4776 result
= sfi_thread_needs_ast(thread
, NULL
);
4777 if (result
!= AST_NONE
) {
4778 return check_reason
| result
;
4786 * Handle preemption IPI or IPI in response to setting an AST flag
4787 * Triggered by cause_ast_check
4788 * Called at splsched
4791 ast_check(processor_t processor
)
4793 if (processor
->state
!= PROCESSOR_RUNNING
&&
4794 processor
->state
!= PROCESSOR_SHUTDOWN
) {
4798 thread_t thread
= processor
->active_thread
;
4800 assert(thread
== current_thread());
4802 thread_lock(thread
);
4805 * Propagate thread ast to processor.
4806 * (handles IPI in response to setting AST flag)
4808 ast_propagate(thread
);
4811 * Stash the old urgency and perfctl values to find out if
4812 * csw_check updates them.
4814 thread_urgency_t old_urgency
= processor
->current_urgency
;
4815 perfcontrol_class_t old_perfctl_class
= processor
->current_perfctl_class
;
4819 if ((preempt
= csw_check(thread
, processor
, AST_NONE
)) != AST_NONE
) {
4823 if (old_urgency
!= processor
->current_urgency
) {
4825 * Urgency updates happen with the thread lock held (ugh).
4826 * TODO: This doesn't notice QoS changes...
4828 uint64_t urgency_param1
, urgency_param2
;
4830 thread_urgency_t urgency
= thread_get_urgency(thread
, &urgency_param1
, &urgency_param2
);
4831 thread_tell_urgency(urgency
, urgency_param1
, urgency_param2
, 0, thread
);
4834 thread_unlock(thread
);
4836 if (old_perfctl_class
!= processor
->current_perfctl_class
) {
4838 * We updated the perfctl class of this thread from another core.
4839 * Let CLPC know that the currently running thread has a new
4843 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE
,
4844 mach_approximate_time(), 0, thread
);
4852 * Set the scheduled priority of the specified thread.
4854 * This may cause the thread to change queues.
4856 * Thread must be locked.
4861 int16_t new_priority
,
4862 set_sched_pri_options_t options
)
4864 bool is_current_thread
= (thread
== current_thread());
4865 bool removed_from_runq
= false;
4866 bool lazy_update
= ((options
& SETPRI_LAZY
) == SETPRI_LAZY
);
4868 int16_t old_priority
= thread
->sched_pri
;
4870 /* If we're already at this priority, no need to mess with the runqueue */
4871 if (new_priority
== old_priority
) {
4872 #if CONFIG_SCHED_CLUTCH
4873 /* For the first thread in the system, the priority is correct but
4874 * th_sched_bucket is still TH_BUCKET_RUN. Since the clutch
4875 * scheduler relies on the bucket being set for all threads, update
4878 if (thread
->th_sched_bucket
== TH_BUCKET_RUN
) {
4879 assert(is_current_thread
);
4880 SCHED(update_thread_bucket
)(thread
);
4882 #endif /* CONFIG_SCHED_CLUTCH */
4887 if (is_current_thread
) {
4888 assert(thread
->state
& TH_RUN
);
4889 assert(thread
->runq
== PROCESSOR_NULL
);
4891 removed_from_runq
= thread_run_queue_remove(thread
);
4894 thread
->sched_pri
= new_priority
;
4896 #if CONFIG_SCHED_CLUTCH
4898 * Since for the clutch scheduler, the thread's bucket determines its runq
4899 * in the hierarchy it is important to update the bucket when the thread
4900 * lock is held and the thread has been removed from the runq hierarchy.
4902 SCHED(update_thread_bucket
)(thread
);
4904 #endif /* CONFIG_SCHED_CLUTCH */
4906 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHANGE_PRIORITY
),
4907 (uintptr_t)thread_tid(thread
),
4910 thread
->sched_usage
,
4913 if (removed_from_runq
) {
4914 thread_run_queue_reinsert(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
4915 } else if (is_current_thread
) {
4916 processor_t processor
= thread
->last_processor
;
4917 assert(processor
== current_processor());
4919 thread_urgency_t old_urgency
= processor
->current_urgency
;
4922 * When dropping in priority, check if the thread no longer belongs on core.
4923 * If a thread raises its own priority, don't aggressively rebalance it.
4924 * <rdar://problem/31699165>
4926 * csw_check does a processor_state_update_from_thread, but
4927 * we should do our own if we're being lazy.
4929 if (!lazy_update
&& new_priority
< old_priority
) {
4932 if ((preempt
= csw_check(thread
, processor
, AST_NONE
)) != AST_NONE
) {
4936 processor_state_update_from_thread(processor
, thread
);
4940 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4941 * class alterations from user space to occur relatively infrequently, hence
4942 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4943 * inheritance is expected to involve priority changes.
4945 if (processor
->current_urgency
!= old_urgency
) {
4946 uint64_t urgency_param1
, urgency_param2
;
4948 thread_urgency_t new_urgency
= thread_get_urgency(thread
,
4949 &urgency_param1
, &urgency_param2
);
4951 thread_tell_urgency(new_urgency
, urgency_param1
,
4952 urgency_param2
, 0, thread
);
4955 /* TODO: only call this if current_perfctl_class changed */
4956 uint64_t ctime
= mach_approximate_time();
4957 machine_thread_going_on_core(thread
, processor
->current_urgency
, 0, 0, ctime
);
4958 } else if (thread
->state
& TH_RUN
) {
4959 processor_t processor
= thread
->last_processor
;
4962 processor
!= PROCESSOR_NULL
&&
4963 processor
!= current_processor() &&
4964 processor
->active_thread
== thread
) {
4965 cause_ast_check(processor
);
4971 * thread_run_queue_remove_for_handoff
4973 * Pull a thread or its (recursive) push target out of the runqueue
4974 * so that it is ready for thread_run()
4976 * Called at splsched
4978 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4979 * This may be different than the thread that was passed in.
4982 thread_run_queue_remove_for_handoff(thread_t thread
)
4984 thread_t pulled_thread
= THREAD_NULL
;
4986 thread_lock(thread
);
4989 * Check that the thread is not bound to a different processor,
4990 * NO_SMT flag is not set on the thread, cluster type of
4991 * processor matches with thread if the thread is pinned to a
4992 * particular cluster and that realtime is not involved.
4994 * Next, pull it off its run queue. If it doesn't come, it's not eligible.
4996 processor_t processor
= current_processor();
4997 if ((thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== processor
)
4998 && (!thread_no_smt(thread
))
4999 && (processor
->current_pri
< BASEPRI_RTQUEUES
)
5000 && (thread
->sched_pri
< BASEPRI_RTQUEUES
)
5002 && ((!(thread
->sched_flags
& TH_SFLAG_PCORE_ONLY
)) ||
5003 processor
->processor_set
->pset_cluster_type
== PSET_AMP_P
)
5004 && ((!(thread
->sched_flags
& TH_SFLAG_ECORE_ONLY
)) ||
5005 processor
->processor_set
->pset_cluster_type
== PSET_AMP_E
)
5006 #endif /* __AMP__ */
5008 if (thread_run_queue_remove(thread
)) {
5009 pulled_thread
= thread
;
5013 thread_unlock(thread
);
5015 return pulled_thread
;
5019 * thread_prepare_for_handoff
5021 * Make the thread ready for handoff.
5022 * If the thread was runnable then pull it off the runq, if the thread could
5023 * not be pulled, return NULL.
5025 * If the thread was woken up from wait for handoff, make sure it is not bound to
5026 * different processor.
5028 * Called at splsched
5030 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
5031 * This may be different than the thread that was passed in.
5034 thread_prepare_for_handoff(thread_t thread
, thread_handoff_option_t option
)
5036 thread_t pulled_thread
= THREAD_NULL
;
5038 if (option
& THREAD_HANDOFF_SETRUN_NEEDED
) {
5039 processor_t processor
= current_processor();
5040 thread_lock(thread
);
5043 * Check that the thread is not bound to a different processor,
5044 * NO_SMT flag is not set on the thread and cluster type of
5045 * processor matches with thread if the thread is pinned to a
5046 * particular cluster. Call setrun instead if above conditions
5047 * are not satisfied.
5049 if ((thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== processor
)
5050 && (!thread_no_smt(thread
))
5052 && ((!(thread
->sched_flags
& TH_SFLAG_PCORE_ONLY
)) ||
5053 processor
->processor_set
->pset_cluster_type
== PSET_AMP_P
)
5054 && ((!(thread
->sched_flags
& TH_SFLAG_ECORE_ONLY
)) ||
5055 processor
->processor_set
->pset_cluster_type
== PSET_AMP_E
)
5056 #endif /* __AMP__ */
5058 pulled_thread
= thread
;
5060 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
5062 thread_unlock(thread
);
5064 pulled_thread
= thread_run_queue_remove_for_handoff(thread
);
5067 return pulled_thread
;
5071 * thread_run_queue_remove:
5073 * Remove a thread from its current run queue and
5074 * return TRUE if successful.
5076 * Thread must be locked.
5078 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
5079 * run queues because the caller locked the thread. Otherwise
5080 * the thread is on a run queue, but could be chosen for dispatch
5081 * and removed by another processor under a different lock, which
5082 * will set thread->runq to PROCESSOR_NULL.
5084 * Hence the thread select path must not rely on anything that could
5085 * be changed under the thread lock after calling this function,
5086 * most importantly thread->sched_pri.
5089 thread_run_queue_remove(
5092 boolean_t removed
= FALSE
;
5093 processor_t processor
= thread
->runq
;
5095 if ((thread
->state
& (TH_RUN
| TH_WAIT
)) == TH_WAIT
) {
5096 /* Thread isn't runnable */
5097 assert(thread
->runq
== PROCESSOR_NULL
);
5101 if (processor
== PROCESSOR_NULL
) {
5103 * The thread is either not on the runq,
5104 * or is in the midst of being removed from the runq.
5106 * runq is set to NULL under the pset lock, not the thread
5107 * lock, so the thread may still be in the process of being dequeued
5108 * from the runq. It will wait in invoke for the thread lock to be
5115 if (thread
->sched_pri
< BASEPRI_RTQUEUES
) {
5116 return SCHED(processor_queue_remove
)(processor
, thread
);
5119 processor_set_t pset
= processor
->processor_set
;
5123 if (thread
->runq
!= PROCESSOR_NULL
) {
5125 * Thread is on the RT run queue and we have a lock on
5129 remqueue(&thread
->runq_links
);
5130 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
5131 rt_runq_count_decr(pset
);
5133 thread
->runq
= PROCESSOR_NULL
;
5144 * Put the thread back where it goes after a thread_run_queue_remove
5146 * Thread must have been removed under the same thread lock hold
5148 * thread locked, at splsched
5151 thread_run_queue_reinsert(thread_t thread
, sched_options_t options
)
5153 assert(thread
->runq
== PROCESSOR_NULL
);
5154 assert(thread
->state
& (TH_RUN
));
5156 thread_setrun(thread
, options
);
5160 sys_override_cpu_throttle(boolean_t enable_override
)
5162 if (enable_override
) {
5163 cpu_throttle_enabled
= 0;
5165 cpu_throttle_enabled
= 1;
5170 thread_get_urgency(thread_t thread
, uint64_t *arg1
, uint64_t *arg2
)
5172 uint64_t urgency_param1
= 0, urgency_param2
= 0;
5174 thread_urgency_t urgency
;
5176 if (thread
== NULL
|| (thread
->state
& TH_IDLE
)) {
5180 urgency
= THREAD_URGENCY_NONE
;
5181 } else if (thread
->sched_mode
== TH_MODE_REALTIME
) {
5182 urgency_param1
= thread
->realtime
.period
;
5183 urgency_param2
= thread
->realtime
.deadline
;
5185 urgency
= THREAD_URGENCY_REAL_TIME
;
5186 } else if (cpu_throttle_enabled
&&
5187 (thread
->sched_pri
<= MAXPRI_THROTTLE
) &&
5188 (thread
->base_pri
<= MAXPRI_THROTTLE
)) {
5190 * Threads that are running at low priority but are not
5191 * tagged with a specific QoS are separated out from
5192 * the "background" urgency. Performance management
5193 * subsystem can decide to either treat these threads
5194 * as normal threads or look at other signals like thermal
5195 * levels for optimal power/perf tradeoffs for a platform.
5197 boolean_t thread_lacks_qos
= (proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
) == THREAD_QOS_UNSPECIFIED
); //thread_has_qos_policy(thread);
5198 boolean_t task_is_suppressed
= (proc_get_effective_task_policy(thread
->task
, TASK_POLICY_SUP_ACTIVE
) == 0x1);
5201 * Background urgency applied when thread priority is
5202 * MAXPRI_THROTTLE or lower and thread is not promoted
5203 * and thread has a QoS specified
5205 urgency_param1
= thread
->sched_pri
;
5206 urgency_param2
= thread
->base_pri
;
5208 if (thread_lacks_qos
&& !task_is_suppressed
) {
5209 urgency
= THREAD_URGENCY_LOWPRI
;
5211 urgency
= THREAD_URGENCY_BACKGROUND
;
5214 /* For otherwise unclassified threads, report throughput QoS parameters */
5215 urgency_param1
= proc_get_effective_thread_policy(thread
, TASK_POLICY_THROUGH_QOS
);
5216 urgency_param2
= proc_get_effective_task_policy(thread
->task
, TASK_POLICY_THROUGH_QOS
);
5217 urgency
= THREAD_URGENCY_NORMAL
;
5221 *arg1
= urgency_param1
;
5224 *arg2
= urgency_param2
;
5231 thread_get_perfcontrol_class(thread_t thread
)
5233 /* Special case handling */
5234 if (thread
->state
& TH_IDLE
) {
5235 return PERFCONTROL_CLASS_IDLE
;
5237 if (thread
->task
== kernel_task
) {
5238 return PERFCONTROL_CLASS_KERNEL
;
5240 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
5241 return PERFCONTROL_CLASS_REALTIME
;
5244 /* perfcontrol_class based on base_pri */
5245 if (thread
->base_pri
<= MAXPRI_THROTTLE
) {
5246 return PERFCONTROL_CLASS_BACKGROUND
;
5247 } else if (thread
->base_pri
<= BASEPRI_UTILITY
) {
5248 return PERFCONTROL_CLASS_UTILITY
;
5249 } else if (thread
->base_pri
<= BASEPRI_DEFAULT
) {
5250 return PERFCONTROL_CLASS_NONUI
;
5251 } else if (thread
->base_pri
<= BASEPRI_FOREGROUND
) {
5252 return PERFCONTROL_CLASS_UI
;
5254 return PERFCONTROL_CLASS_ABOVEUI
;
5259 * This is the processor idle loop, which just looks for other threads
5260 * to execute. Processor idle threads invoke this without supplying a
5261 * current thread to idle without an asserted wait state.
5263 * Returns a the next thread to execute if dispatched directly.
5267 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
5269 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
5275 processor_t processor
)
5277 processor_set_t pset
= processor
->processor_set
;
5281 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5282 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_IDLE
) | DBG_FUNC_START
,
5283 (uintptr_t)thread_tid(thread
), 0, 0, 0, 0);
5285 SCHED_STATS_INC(idle_transitions
);
5286 assert(processor
->running_timers_active
== false);
5288 uint64_t ctime
= mach_absolute_time();
5290 timer_switch(&processor
->system_state
, ctime
, &processor
->idle_state
);
5291 processor
->current_state
= &processor
->idle_state
;
5293 cpu_quiescent_counter_leave(ctime
);
5297 * Ensure that updates to my processor and pset state,
5298 * made by the IPI source processor before sending the IPI,
5299 * are visible on this processor now (even though we don't
5300 * take the pset lock yet).
5302 atomic_thread_fence(memory_order_acquire
);
5304 if (processor
->state
!= PROCESSOR_IDLE
) {
5307 if (bit_test(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
)) {
5310 #if defined(CONFIG_SCHED_DEFERRED_AST)
5311 if (bit_test(pset
->pending_deferred_AST_cpu_mask
, processor
->cpu_id
)) {
5315 if (processor
->is_recommended
&& (processor
->processor_primary
== processor
)) {
5316 if (rt_runq_count(pset
)) {
5320 if (SCHED(processor_bound_count
)(processor
)) {
5325 IDLE_KERNEL_DEBUG_CONSTANT(
5326 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq_count(pset
), SCHED(processor_runq_count
)(processor
), -1, 0);
5328 machine_track_platform_idle(TRUE
);
5331 /* returns with interrupts enabled */
5333 machine_track_platform_idle(FALSE
);
5338 * Check if we should call sched_timeshare_consider_maintenance() here.
5339 * The CPU was woken out of idle due to an interrupt and we should do the
5340 * call only if the processor is still idle. If the processor is non-idle,
5341 * the threads running on the processor would do the call as part of
5344 if (processor
->state
== PROCESSOR_IDLE
) {
5345 sched_timeshare_consider_maintenance(mach_absolute_time());
5348 IDLE_KERNEL_DEBUG_CONSTANT(
5349 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq_count(pset
), SCHED(processor_runq_count
)(processor
), -2, 0);
5351 if (!SCHED(processor_queue_empty
)(processor
)) {
5352 /* Secondary SMT processors respond to directed wakeups
5353 * exclusively. Some platforms induce 'spurious' SMT wakeups.
5355 if (processor
->processor_primary
== processor
) {
5361 ctime
= mach_absolute_time();
5363 timer_switch(&processor
->idle_state
, ctime
, &processor
->system_state
);
5364 processor
->current_state
= &processor
->system_state
;
5366 cpu_quiescent_counter_join(ctime
);
5368 ast_t reason
= AST_NONE
;
5370 /* We're handling all scheduling AST's */
5371 ast_off(AST_SCHEDULING
);
5374 * thread_select will move the processor from dispatching to running,
5375 * or put it in idle if there's nothing to do.
5377 thread_t current_thread
= current_thread();
5379 thread_lock(current_thread
);
5380 thread_t new_thread
= thread_select(current_thread
, processor
, &reason
);
5381 thread_unlock(current_thread
);
5383 assert(processor
->running_timers_active
== false);
5385 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5386 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_IDLE
) | DBG_FUNC_END
,
5387 (uintptr_t)thread_tid(thread
), processor
->state
, (uintptr_t)thread_tid(new_thread
), reason
, 0);
5393 * Each processor has a dedicated thread which
5394 * executes the idle loop when there is no suitable
5397 * This continuation is entered with interrupts disabled.
5400 idle_thread(__assert_only
void* parameter
,
5401 __unused wait_result_t result
)
5403 assert(ml_get_interrupts_enabled() == FALSE
);
5404 assert(parameter
== NULL
);
5406 processor_t processor
= current_processor();
5409 * Ensure that anything running in idle context triggers
5410 * preemption-disabled checks.
5412 disable_preemption();
5415 * Enable interrupts temporarily to handle any pending interrupts
5416 * or IPIs before deciding to sleep
5420 thread_t new_thread
= processor_idle(THREAD_NULL
, processor
);
5421 /* returns with interrupts disabled */
5423 enable_preemption();
5425 if (new_thread
!= THREAD_NULL
) {
5426 thread_run(processor
->idle_thread
,
5427 idle_thread
, NULL
, new_thread
);
5431 thread_block(idle_thread
);
5437 processor_t processor
)
5439 kern_return_t result
;
5442 char name
[MAXTHREADNAMESIZE
];
5444 result
= kernel_thread_create(idle_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
5445 if (result
!= KERN_SUCCESS
) {
5449 snprintf(name
, sizeof(name
), "idle #%d", processor
->cpu_id
);
5450 thread_set_thread_name(thread
, name
);
5453 thread_lock(thread
);
5454 thread
->bound_processor
= processor
;
5455 processor
->idle_thread
= thread
;
5456 thread
->sched_pri
= thread
->base_pri
= IDLEPRI
;
5457 thread
->state
= (TH_RUN
| TH_IDLE
);
5458 thread
->options
|= TH_OPT_IDLE_THREAD
;
5459 thread_unlock(thread
);
5462 thread_deallocate(thread
);
5464 return KERN_SUCCESS
;
5470 * Kicks off scheduler services.
5472 * Called at splsched.
5477 kern_return_t result
;
5480 simple_lock_init(&sched_vm_group_list_lock
, 0);
5482 #if __arm__ || __arm64__
5483 simple_lock_init(&sched_recommended_cores_lock
, 0);
5484 #endif /* __arm__ || __arm64__ */
5486 result
= kernel_thread_start_priority((thread_continue_t
)sched_init_thread
,
5487 NULL
, MAXPRI_KERNEL
, &thread
);
5488 if (result
!= KERN_SUCCESS
) {
5489 panic("sched_startup");
5492 thread_deallocate(thread
);
5494 assert_thread_magic(thread
);
5497 * Yield to the sched_init_thread once, to
5498 * initialize our own thread after being switched
5501 * The current thread is the only other thread
5502 * active at this point.
5504 thread_block(THREAD_CONTINUE_NULL
);
5508 static _Atomic
uint64_t sched_perfcontrol_callback_deadline
;
5509 #endif /* __arm64__ */
5512 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5514 static volatile uint64_t sched_maintenance_deadline
;
5515 static uint64_t sched_tick_last_abstime
;
5516 static uint64_t sched_tick_delta
;
5517 uint64_t sched_tick_max_delta
;
5521 * sched_init_thread:
5523 * Perform periodic bookkeeping functions about ten
5527 sched_timeshare_maintenance_continue(void)
5529 uint64_t sched_tick_ctime
, late_time
;
5531 struct sched_update_scan_context scan_context
= {
5532 .earliest_bg_make_runnable_time
= UINT64_MAX
,
5533 .earliest_normal_make_runnable_time
= UINT64_MAX
,
5534 .earliest_rt_make_runnable_time
= UINT64_MAX
5537 sched_tick_ctime
= mach_absolute_time();
5539 if (__improbable(sched_tick_last_abstime
== 0)) {
5540 sched_tick_last_abstime
= sched_tick_ctime
;
5542 sched_tick_delta
= 1;
5544 late_time
= sched_tick_ctime
- sched_tick_last_abstime
;
5545 sched_tick_delta
= late_time
/ sched_tick_interval
;
5546 /* Ensure a delta of 1, since the interval could be slightly
5547 * smaller than the sched_tick_interval due to dispatch
5550 sched_tick_delta
= MAX(sched_tick_delta
, 1);
5552 /* In the event interrupt latencies or platform
5553 * idle events that advanced the timebase resulted
5554 * in periods where no threads were dispatched,
5555 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
5558 sched_tick_delta
= MIN(sched_tick_delta
, SCHED_TICK_MAX_DELTA
);
5560 sched_tick_last_abstime
= sched_tick_ctime
;
5561 sched_tick_max_delta
= MAX(sched_tick_delta
, sched_tick_max_delta
);
5564 scan_context
.sched_tick_last_abstime
= sched_tick_last_abstime
;
5565 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_MAINTENANCE
) | DBG_FUNC_START
,
5566 sched_tick_delta
, late_time
, 0, 0, 0);
5568 /* Add a number of pseudo-ticks corresponding to the elapsed interval
5569 * This could be greater than 1 if substantial intervals where
5570 * all processors are idle occur, which rarely occurs in practice.
5573 sched_tick
+= sched_tick_delta
;
5578 * Compute various averages.
5580 compute_averages(sched_tick_delta
);
5583 * Scan the run queues for threads which
5584 * may need to be updated, and find the earliest runnable thread on the runqueue
5585 * to report its latency.
5587 SCHED(thread_update_scan
)(&scan_context
);
5589 SCHED(rt_runq_scan
)(&scan_context
);
5591 uint64_t ctime
= mach_absolute_time();
5593 uint64_t bg_max_latency
= (ctime
> scan_context
.earliest_bg_make_runnable_time
) ?
5594 ctime
- scan_context
.earliest_bg_make_runnable_time
: 0;
5596 uint64_t default_max_latency
= (ctime
> scan_context
.earliest_normal_make_runnable_time
) ?
5597 ctime
- scan_context
.earliest_normal_make_runnable_time
: 0;
5599 uint64_t realtime_max_latency
= (ctime
> scan_context
.earliest_rt_make_runnable_time
) ?
5600 ctime
- scan_context
.earliest_rt_make_runnable_time
: 0;
5602 machine_max_runnable_latency(bg_max_latency
, default_max_latency
, realtime_max_latency
);
5605 * Check to see if the special sched VM group needs attention.
5607 sched_vm_group_maintenance();
5609 #if __arm__ || __arm64__
5610 /* Check to see if the recommended cores failsafe is active */
5611 sched_recommended_cores_maintenance();
5612 #endif /* __arm__ || __arm64__ */
5615 #if DEBUG || DEVELOPMENT
5617 #include <i386/misc_protos.h>
5618 /* Check for long-duration interrupts */
5619 mp_interrupt_watchdog();
5620 #endif /* __x86_64__ */
5621 #endif /* DEBUG || DEVELOPMENT */
5623 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_MAINTENANCE
) | DBG_FUNC_END
,
5624 sched_pri_shifts
[TH_BUCKET_SHARE_FG
], sched_pri_shifts
[TH_BUCKET_SHARE_BG
],
5625 sched_pri_shifts
[TH_BUCKET_SHARE_UT
], sched_pri_shifts
[TH_BUCKET_SHARE_DF
], 0);
5627 assert_wait((event_t
)sched_timeshare_maintenance_continue
, THREAD_UNINT
);
5628 thread_block((thread_continue_t
)sched_timeshare_maintenance_continue
);
5632 static uint64_t sched_maintenance_wakeups
;
5635 * Determine if the set of routines formerly driven by a maintenance timer
5636 * must be invoked, based on a deadline comparison. Signals the scheduler
5637 * maintenance thread on deadline expiration. Must be invoked at an interval
5638 * lower than the "sched_tick_interval", currently accomplished by
5639 * invocation via the quantum expiration timer and at context switch time.
5640 * Performance matters: this routine reuses a timestamp approximating the
5641 * current absolute time received from the caller, and should perform
5642 * no more than a comparison against the deadline in the common case.
5645 sched_timeshare_consider_maintenance(uint64_t ctime
)
5647 cpu_quiescent_counter_checkin(ctime
);
5649 uint64_t deadline
= sched_maintenance_deadline
;
5651 if (__improbable(ctime
>= deadline
)) {
5652 if (__improbable(current_thread() == sched_maintenance_thread
)) {
5657 uint64_t ndeadline
= ctime
+ sched_tick_interval
;
5659 if (__probable(os_atomic_cmpxchg(&sched_maintenance_deadline
, deadline
, ndeadline
, seq_cst
))) {
5660 thread_wakeup((event_t
)sched_timeshare_maintenance_continue
);
5661 sched_maintenance_wakeups
++;
5665 #if !CONFIG_SCHED_CLUTCH
5667 * Only non-clutch schedulers use the global load calculation EWMA algorithm. For clutch
5668 * scheduler, the load is maintained at the thread group and bucket level.
5670 uint64_t load_compute_deadline
= os_atomic_load_wide(&sched_load_compute_deadline
, relaxed
);
5672 if (__improbable(load_compute_deadline
&& ctime
>= load_compute_deadline
)) {
5673 uint64_t new_deadline
= 0;
5674 if (os_atomic_cmpxchg(&sched_load_compute_deadline
, load_compute_deadline
, new_deadline
, relaxed
)) {
5675 compute_sched_load();
5676 new_deadline
= ctime
+ sched_load_compute_interval_abs
;
5677 os_atomic_store_wide(&sched_load_compute_deadline
, new_deadline
, relaxed
);
5680 #endif /* CONFIG_SCHED_CLUTCH */
5683 uint64_t perf_deadline
= os_atomic_load(&sched_perfcontrol_callback_deadline
, relaxed
);
5685 if (__improbable(perf_deadline
&& ctime
>= perf_deadline
)) {
5686 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
5687 if (os_atomic_cmpxchg(&sched_perfcontrol_callback_deadline
, perf_deadline
, 0, relaxed
)) {
5688 machine_perfcontrol_deadline_passed(perf_deadline
);
5691 #endif /* __arm64__ */
5694 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5697 sched_init_thread(void)
5699 thread_block(THREAD_CONTINUE_NULL
);
5701 thread_t thread
= current_thread();
5703 thread_set_thread_name(thread
, "sched_maintenance_thread");
5705 sched_maintenance_thread
= thread
;
5707 SCHED(maintenance_continuation
)();
5712 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5715 * thread_update_scan / runq_scan:
5717 * Scan the run queues to account for timesharing threads
5718 * which need to be updated.
5720 * Scanner runs in two passes. Pass one squirrels likely
5721 * threads away in an array, pass two does the update.
5723 * This is necessary because the run queue is locked for
5724 * the candidate scan, but the thread is locked for the update.
5726 * Array should be sized to make forward progress, without
5727 * disabling preemption for long periods.
5730 #define THREAD_UPDATE_SIZE 128
5732 static thread_t thread_update_array
[THREAD_UPDATE_SIZE
];
5733 static uint32_t thread_update_count
= 0;
5735 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
5737 thread_update_add_thread(thread_t thread
)
5739 if (thread_update_count
== THREAD_UPDATE_SIZE
) {
5743 thread_update_array
[thread_update_count
++] = thread
;
5744 thread_reference_internal(thread
);
5749 thread_update_process_threads(void)
5751 assert(thread_update_count
<= THREAD_UPDATE_SIZE
);
5753 for (uint32_t i
= 0; i
< thread_update_count
; i
++) {
5754 thread_t thread
= thread_update_array
[i
];
5755 assert_thread_magic(thread
);
5756 thread_update_array
[i
] = THREAD_NULL
;
5758 spl_t s
= splsched();
5759 thread_lock(thread
);
5760 if (!(thread
->state
& (TH_WAIT
)) && thread
->sched_stamp
!= sched_tick
) {
5761 SCHED(update_priority
)(thread
);
5763 thread_unlock(thread
);
5766 thread_deallocate(thread
);
5769 thread_update_count
= 0;
5775 sched_update_scan_context_t scan_context
)
5777 assert_thread_magic(thread
);
5779 if (thread
->sched_stamp
!= sched_tick
&&
5780 thread
->sched_mode
== TH_MODE_TIMESHARE
) {
5781 if (thread_update_add_thread(thread
) == FALSE
) {
5786 if (cpu_throttle_enabled
&& ((thread
->sched_pri
<= MAXPRI_THROTTLE
) && (thread
->base_pri
<= MAXPRI_THROTTLE
))) {
5787 if (thread
->last_made_runnable_time
< scan_context
->earliest_bg_make_runnable_time
) {
5788 scan_context
->earliest_bg_make_runnable_time
= thread
->last_made_runnable_time
;
5791 if (thread
->last_made_runnable_time
< scan_context
->earliest_normal_make_runnable_time
) {
5792 scan_context
->earliest_normal_make_runnable_time
= thread
->last_made_runnable_time
;
5800 * Scan a runq for candidate threads.
5802 * Returns TRUE if retry is needed.
5807 sched_update_scan_context_t scan_context
)
5809 int count
= runq
->count
;
5818 for (queue_index
= bitmap_first(runq
->bitmap
, NRQS
);
5820 queue_index
= bitmap_next(runq
->bitmap
, queue_index
)) {
5822 circle_queue_t queue
= &runq
->queues
[queue_index
];
5824 cqe_foreach_element(thread
, queue
, runq_links
) {
5826 if (runq_scan_thread(thread
, scan_context
) == TRUE
) {
5836 #if CONFIG_SCHED_CLUTCH
5839 sched_clutch_timeshare_scan(
5840 queue_t thread_queue
,
5841 uint16_t thread_count
,
5842 sched_update_scan_context_t scan_context
)
5844 if (thread_count
== 0) {
5849 qe_foreach_element_safe(thread
, thread_queue
, th_clutch_timeshare_link
) {
5850 if (runq_scan_thread(thread
, scan_context
) == TRUE
) {
5856 assert(thread_count
== 0);
5861 #endif /* CONFIG_SCHED_CLUTCH */
5863 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5866 thread_is_eager_preempt(thread_t thread
)
5868 return thread
->sched_flags
& TH_SFLAG_EAGERPREEMPT
;
5872 thread_set_eager_preempt(thread_t thread
)
5874 spl_t s
= splsched();
5875 thread_lock(thread
);
5877 assert(!thread_is_eager_preempt(thread
));
5879 thread
->sched_flags
|= TH_SFLAG_EAGERPREEMPT
;
5881 if (thread
== current_thread()) {
5882 /* csw_check updates current_is_eagerpreempt on the processor */
5883 ast_t ast
= csw_check(thread
, current_processor(), AST_NONE
);
5885 thread_unlock(thread
);
5887 if (ast
!= AST_NONE
) {
5888 thread_block_reason(THREAD_CONTINUE_NULL
, NULL
, ast
);
5891 processor_t last_processor
= thread
->last_processor
;
5893 if (last_processor
!= PROCESSOR_NULL
&&
5894 last_processor
->state
== PROCESSOR_RUNNING
&&
5895 last_processor
->active_thread
== thread
) {
5896 cause_ast_check(last_processor
);
5899 thread_unlock(thread
);
5906 thread_clear_eager_preempt(thread_t thread
)
5908 spl_t s
= splsched();
5909 thread_lock(thread
);
5911 assert(thread_is_eager_preempt(thread
));
5913 thread
->sched_flags
&= ~TH_SFLAG_EAGERPREEMPT
;
5915 if (thread
== current_thread()) {
5916 current_processor()->current_is_eagerpreempt
= false;
5919 thread_unlock(thread
);
5924 * Scheduling statistics
5927 sched_stats_handle_csw(processor_t processor
, int reasons
, int selfpri
, int otherpri
)
5929 struct sched_statistics
*stats
;
5930 boolean_t to_realtime
= FALSE
;
5932 stats
= PERCPU_GET_RELATIVE(sched_stats
, processor
, processor
);
5935 if (otherpri
>= BASEPRI_REALTIME
) {
5936 stats
->rt_sched_count
++;
5940 if ((reasons
& AST_PREEMPT
) != 0) {
5941 stats
->preempt_count
++;
5943 if (selfpri
>= BASEPRI_REALTIME
) {
5944 stats
->preempted_rt_count
++;
5948 stats
->preempted_by_rt_count
++;
5954 sched_stats_handle_runq_change(struct runq_stats
*stats
, int old_count
)
5956 uint64_t timestamp
= mach_absolute_time();
5958 stats
->count_sum
+= (timestamp
- stats
->last_change_timestamp
) * old_count
;
5959 stats
->last_change_timestamp
= timestamp
;
5963 * For calls from assembly code
5965 #undef thread_wakeup
5974 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
5978 preemption_enabled(void)
5980 return get_preemption_level() == 0 && ml_get_interrupts_enabled();
5984 sched_timer_deadline_tracking_init(void)
5986 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT
, &timer_deadline_tracking_bin_1
);
5987 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT
, &timer_deadline_tracking_bin_2
);
5990 #if __arm__ || __arm64__
5992 uint32_t perfcontrol_requested_recommended_cores
= ALL_CORES_RECOMMENDED
;
5993 uint32_t perfcontrol_requested_recommended_core_count
= MAX_CPUS
;
5994 bool perfcontrol_failsafe_active
= false;
5995 bool perfcontrol_sleep_override
= false;
5997 uint64_t perfcontrol_failsafe_maintenance_runnable_time
;
5998 uint64_t perfcontrol_failsafe_activation_time
;
5999 uint64_t perfcontrol_failsafe_deactivation_time
;
6001 /* data covering who likely caused it and how long they ran */
6002 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
6003 char perfcontrol_failsafe_name
[FAILSAFE_NAME_LEN
];
6004 int perfcontrol_failsafe_pid
;
6005 uint64_t perfcontrol_failsafe_tid
;
6006 uint64_t perfcontrol_failsafe_thread_timer_at_start
;
6007 uint64_t perfcontrol_failsafe_thread_timer_last_seen
;
6008 uint32_t perfcontrol_failsafe_recommended_at_trigger
;
6011 * Perf controller calls here to update the recommended core bitmask.
6012 * If the failsafe is active, we don't immediately apply the new value.
6013 * Instead, we store the new request and use it after the failsafe deactivates.
6015 * If the failsafe is not active, immediately apply the update.
6017 * No scheduler locks are held, no other locks are held that scheduler might depend on,
6018 * interrupts are enabled
6020 * currently prototype is in osfmk/arm/machine_routines.h
6023 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores
)
6025 assert(preemption_enabled());
6027 spl_t s
= splsched();
6028 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
6030 perfcontrol_requested_recommended_cores
= recommended_cores
;
6031 perfcontrol_requested_recommended_core_count
= __builtin_popcountll(recommended_cores
);
6033 if ((perfcontrol_failsafe_active
== false) && (perfcontrol_sleep_override
== false)) {
6034 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
& usercontrol_requested_recommended_cores
);
6036 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
6037 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_NONE
,
6038 perfcontrol_requested_recommended_cores
,
6039 sched_maintenance_thread
->last_made_runnable_time
, 0, 0, 0);
6042 simple_unlock(&sched_recommended_cores_lock
);
6047 sched_override_recommended_cores_for_sleep(void)
6049 spl_t s
= splsched();
6050 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
6052 if (perfcontrol_sleep_override
== false) {
6053 perfcontrol_sleep_override
= true;
6054 sched_update_recommended_cores(ALL_CORES_RECOMMENDED
);
6057 simple_unlock(&sched_recommended_cores_lock
);
6062 sched_restore_recommended_cores_after_sleep(void)
6064 spl_t s
= splsched();
6065 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
6067 if (perfcontrol_sleep_override
== true) {
6068 perfcontrol_sleep_override
= false;
6069 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
& usercontrol_requested_recommended_cores
);
6072 simple_unlock(&sched_recommended_cores_lock
);
6077 * Consider whether we need to activate the recommended cores failsafe
6079 * Called from quantum timer interrupt context of a realtime thread
6080 * No scheduler locks are held, interrupts are disabled
6083 sched_consider_recommended_cores(uint64_t ctime
, thread_t cur_thread
)
6086 * Check if a realtime thread is starving the system
6087 * and bringing up non-recommended cores would help
6089 * TODO: Is this the correct check for recommended == possible cores?
6090 * TODO: Validate the checks without the relevant lock are OK.
6093 if (__improbable(perfcontrol_failsafe_active
== TRUE
)) {
6094 /* keep track of how long the responsible thread runs */
6096 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
6098 if (perfcontrol_failsafe_active
== TRUE
&&
6099 cur_thread
->thread_id
== perfcontrol_failsafe_tid
) {
6100 perfcontrol_failsafe_thread_timer_last_seen
= timer_grab(&cur_thread
->user_timer
) +
6101 timer_grab(&cur_thread
->system_timer
);
6104 simple_unlock(&sched_recommended_cores_lock
);
6106 /* we're already trying to solve the problem, so bail */
6110 /* The failsafe won't help if there are no more processors to enable */
6111 if (__probable(perfcontrol_requested_recommended_core_count
>= processor_count
)) {
6115 uint64_t too_long_ago
= ctime
- perfcontrol_failsafe_starvation_threshold
;
6117 /* Use the maintenance thread as our canary in the coal mine */
6118 thread_t m_thread
= sched_maintenance_thread
;
6120 /* If it doesn't look bad, nothing to see here */
6121 if (__probable(m_thread
->last_made_runnable_time
>= too_long_ago
)) {
6125 /* It looks bad, take the lock to be sure */
6126 thread_lock(m_thread
);
6128 if (m_thread
->runq
== PROCESSOR_NULL
||
6129 (m_thread
->state
& (TH_RUN
| TH_WAIT
)) != TH_RUN
||
6130 m_thread
->last_made_runnable_time
>= too_long_ago
) {
6132 * Maintenance thread is either on cpu or blocked, and
6133 * therefore wouldn't benefit from more cores
6135 thread_unlock(m_thread
);
6139 uint64_t maintenance_runnable_time
= m_thread
->last_made_runnable_time
;
6141 thread_unlock(m_thread
);
6144 * There are cores disabled at perfcontrol's recommendation, but the
6145 * system is so overloaded that the maintenance thread can't run.
6146 * That likely means that perfcontrol can't run either, so it can't fix
6147 * the recommendation. We have to kick in a failsafe to keep from starving.
6149 * When the maintenance thread has been starved for too long,
6150 * ignore the recommendation from perfcontrol and light up all the cores.
6152 * TODO: Consider weird states like boot, sleep, or debugger
6155 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
6157 if (perfcontrol_failsafe_active
== TRUE
) {
6158 simple_unlock(&sched_recommended_cores_lock
);
6162 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
6163 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_START
,
6164 perfcontrol_requested_recommended_cores
, maintenance_runnable_time
, 0, 0, 0);
6166 perfcontrol_failsafe_active
= TRUE
;
6167 perfcontrol_failsafe_activation_time
= mach_absolute_time();
6168 perfcontrol_failsafe_maintenance_runnable_time
= maintenance_runnable_time
;
6169 perfcontrol_failsafe_recommended_at_trigger
= perfcontrol_requested_recommended_cores
;
6171 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
6172 task_t task
= cur_thread
->task
;
6173 perfcontrol_failsafe_pid
= task_pid(task
);
6174 strlcpy(perfcontrol_failsafe_name
, proc_name_address(task
->bsd_info
), sizeof(perfcontrol_failsafe_name
));
6176 perfcontrol_failsafe_tid
= cur_thread
->thread_id
;
6178 /* Blame the thread for time it has run recently */
6179 uint64_t recent_computation
= (ctime
- cur_thread
->computation_epoch
) + cur_thread
->computation_metered
;
6181 uint64_t last_seen
= timer_grab(&cur_thread
->user_timer
) + timer_grab(&cur_thread
->system_timer
);
6183 /* Compute the start time of the bad behavior in terms of the thread's on core time */
6184 perfcontrol_failsafe_thread_timer_at_start
= last_seen
- recent_computation
;
6185 perfcontrol_failsafe_thread_timer_last_seen
= last_seen
;
6187 /* Ignore the previously recommended core configuration */
6188 sched_update_recommended_cores(ALL_CORES_RECOMMENDED
);
6190 simple_unlock(&sched_recommended_cores_lock
);
6194 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
6196 * Runs in the context of the maintenance thread, no locks held
6199 sched_recommended_cores_maintenance(void)
6201 /* Common case - no failsafe, nothing to be done here */
6202 if (__probable(perfcontrol_failsafe_active
== FALSE
)) {
6206 uint64_t ctime
= mach_absolute_time();
6208 boolean_t print_diagnostic
= FALSE
;
6209 char p_name
[FAILSAFE_NAME_LEN
] = "";
6211 spl_t s
= splsched();
6212 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
6214 /* Check again, under the lock, to avoid races */
6215 if (perfcontrol_failsafe_active
== FALSE
) {
6220 * Ensure that the other cores get another few ticks to run some threads
6221 * If we don't have this hysteresis, the maintenance thread is the first
6222 * to run, and then it immediately kills the other cores
6224 if ((ctime
- perfcontrol_failsafe_activation_time
) < perfcontrol_failsafe_starvation_threshold
) {
6228 /* Capture some diagnostic state under the lock so we can print it out later */
6230 int pid
= perfcontrol_failsafe_pid
;
6231 uint64_t tid
= perfcontrol_failsafe_tid
;
6233 uint64_t thread_usage
= perfcontrol_failsafe_thread_timer_last_seen
-
6234 perfcontrol_failsafe_thread_timer_at_start
;
6235 uint32_t rec_cores_before
= perfcontrol_failsafe_recommended_at_trigger
;
6236 uint32_t rec_cores_after
= perfcontrol_requested_recommended_cores
;
6237 uint64_t failsafe_duration
= ctime
- perfcontrol_failsafe_activation_time
;
6238 strlcpy(p_name
, perfcontrol_failsafe_name
, sizeof(p_name
));
6240 print_diagnostic
= TRUE
;
6242 /* Deactivate the failsafe and reinstate the requested recommendation settings */
6244 perfcontrol_failsafe_deactivation_time
= ctime
;
6245 perfcontrol_failsafe_active
= FALSE
;
6247 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
6248 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_END
,
6249 perfcontrol_requested_recommended_cores
, failsafe_duration
, 0, 0, 0);
6251 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
& usercontrol_requested_recommended_cores
);
6254 simple_unlock(&sched_recommended_cores_lock
);
6257 if (print_diagnostic
) {
6258 uint64_t failsafe_duration_ms
= 0, thread_usage_ms
= 0;
6260 absolutetime_to_nanoseconds(failsafe_duration
, &failsafe_duration_ms
);
6261 failsafe_duration_ms
= failsafe_duration_ms
/ NSEC_PER_MSEC
;
6263 absolutetime_to_nanoseconds(thread_usage
, &thread_usage_ms
);
6264 thread_usage_ms
= thread_usage_ms
/ NSEC_PER_MSEC
;
6266 printf("recommended core failsafe kicked in for %lld ms "
6267 "likely due to %s[%d] thread 0x%llx spending "
6268 "%lld ms on cpu at realtime priority - "
6269 "new recommendation: 0x%x -> 0x%x\n",
6270 failsafe_duration_ms
, p_name
, pid
, tid
, thread_usage_ms
,
6271 rec_cores_before
, rec_cores_after
);
6275 #endif /* __arm__ || __arm64__ */
6278 sched_processor_enable(processor_t processor
, boolean_t enable
)
6280 assert(preemption_enabled());
6282 spl_t s
= splsched();
6283 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
6286 bit_set(usercontrol_requested_recommended_cores
, processor
->cpu_id
);
6288 bit_clear(usercontrol_requested_recommended_cores
, processor
->cpu_id
);
6291 #if __arm__ || __arm64__
6292 if ((perfcontrol_failsafe_active
== false) && (perfcontrol_sleep_override
== false)) {
6293 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
& usercontrol_requested_recommended_cores
);
6295 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
6296 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_NONE
,
6297 perfcontrol_requested_recommended_cores
,
6298 sched_maintenance_thread
->last_made_runnable_time
, 0, 0, 0);
6300 #else /* __arm__ || __arm64__ */
6301 sched_update_recommended_cores(usercontrol_requested_recommended_cores
);
6302 #endif /* !__arm__ || __arm64__ */
6304 simple_unlock(&sched_recommended_cores_lock
);
6307 return KERN_SUCCESS
;
6312 * Apply a new recommended cores mask to the processors it affects
6313 * Runs after considering failsafes and such
6315 * Iterate over processors and update their ->is_recommended field.
6316 * If a processor is running, we let it drain out at its next
6317 * quantum expiration or blocking point. If a processor is idle, there
6318 * may be more work for it to do, so IPI it.
6320 * interrupts disabled, sched_recommended_cores_lock is held
6323 sched_update_recommended_cores(uint64_t recommended_cores
)
6325 processor_set_t pset
, nset
;
6326 processor_t processor
;
6327 uint64_t needs_exit_idle_mask
= 0x0;
6328 uint32_t avail_count
;
6330 processor
= processor_list
;
6331 pset
= processor
->processor_set
;
6333 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_UPDATE_REC_CORES
) | DBG_FUNC_START
,
6335 #if __arm__ || __arm64__
6336 perfcontrol_failsafe_active
, 0, 0);
6337 #else /* __arm__ || __arm64__ */
6339 #endif /* ! __arm__ || __arm64__ */
6341 if (__builtin_popcountll(recommended_cores
) == 0) {
6342 bit_set(recommended_cores
, master_processor
->cpu_id
); /* add boot processor or we hang */
6345 /* First set recommended cores */
6349 nset
= processor
->processor_set
;
6356 if (bit_test(recommended_cores
, processor
->cpu_id
)) {
6357 processor
->is_recommended
= TRUE
;
6358 bit_set(pset
->recommended_bitmask
, processor
->cpu_id
);
6360 if (processor
->state
== PROCESSOR_IDLE
) {
6361 if (processor
!= current_processor()) {
6362 bit_set(needs_exit_idle_mask
, processor
->cpu_id
);
6365 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
6367 SCHED(pset_made_schedulable
)(processor
, pset
, false);
6370 } while ((processor
= processor
->processor_list
) != NULL
);
6373 /* Now shutdown not recommended cores */
6374 processor
= processor_list
;
6375 pset
= processor
->processor_set
;
6379 nset
= processor
->processor_set
;
6386 if (!bit_test(recommended_cores
, processor
->cpu_id
)) {
6387 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
6389 processor
->is_recommended
= FALSE
;
6390 bit_clear(pset
->recommended_bitmask
, processor
->cpu_id
);
6392 if ((processor
->state
== PROCESSOR_RUNNING
) || (processor
->state
== PROCESSOR_DISPATCHING
)) {
6393 ipi_type
= SCHED_IPI_IMMEDIATE
;
6395 SCHED(processor_queue_shutdown
)(processor
);
6398 SCHED(rt_queue_shutdown
)(processor
);
6400 if (ipi_type
!= SCHED_IPI_NONE
) {
6401 if (processor
== current_processor()) {
6402 ast_on(AST_PREEMPT
);
6404 sched_ipi_perform(processor
, ipi_type
);
6410 } while ((processor
= processor
->processor_list
) != NULL
);
6412 processor_avail_count_user
= avail_count
;
6413 #if defined(__x86_64__)
6414 commpage_update_active_cpus();
6419 /* Issue all pending IPIs now that the pset lock has been dropped */
6420 for (int cpuid
= lsb_first(needs_exit_idle_mask
); cpuid
>= 0; cpuid
= lsb_next(needs_exit_idle_mask
, cpuid
)) {
6421 processor
= processor_array
[cpuid
];
6422 machine_signal_idle(processor
);
6425 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_UPDATE_REC_CORES
) | DBG_FUNC_END
,
6426 needs_exit_idle_mask
, 0, 0, 0);
6430 thread_set_options(uint32_t thopt
)
6433 thread_t t
= current_thread();
6438 t
->options
|= thopt
;
6445 thread_set_pending_block_hint(thread_t thread
, block_hint_t block_hint
)
6447 thread
->pending_block_hint
= block_hint
;
6451 qos_max_parallelism(int qos
, uint64_t options
)
6453 return SCHED(qos_max_parallelism
)(qos
, options
);
6457 sched_qos_max_parallelism(__unused
int qos
, uint64_t options
)
6459 host_basic_info_data_t hinfo
;
6460 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
6461 /* Query the machine layer for core information */
6462 __assert_only kern_return_t kret
= host_info(host_self(), HOST_BASIC_INFO
,
6463 (host_info_t
)&hinfo
, &count
);
6464 assert(kret
== KERN_SUCCESS
);
6466 if (options
& QOS_PARALLELISM_COUNT_LOGICAL
) {
6467 return hinfo
.logical_cpu
;
6469 return hinfo
.physical_cpu
;
6473 int sched_allow_NO_SMT_threads
= 1;
6475 thread_no_smt(thread_t thread
)
6477 return sched_allow_NO_SMT_threads
&& (thread
->bound_processor
== PROCESSOR_NULL
) && ((thread
->sched_flags
& TH_SFLAG_NO_SMT
) || (thread
->task
->t_flags
& TF_NO_SMT
));
6481 processor_active_thread_no_smt(processor_t processor
)
6483 return sched_allow_NO_SMT_threads
&& !processor
->current_is_bound
&& processor
->current_is_NO_SMT
;
6489 * Set up or replace old timer with new timer
6491 * Returns true if canceled old timer, false if it did not
6494 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline
)
6497 * Exchange deadline for new deadline, if old deadline was nonzero,
6498 * then I cancelled the callback, otherwise I didn't
6501 return os_atomic_xchg(&sched_perfcontrol_callback_deadline
, new_deadline
,
6505 #endif /* __arm64__ */
6507 #if CONFIG_SCHED_EDGE
6509 #define SCHED_PSET_LOAD_EWMA_TC_NSECS 10000000u
6512 * sched_edge_pset_running_higher_bucket()
6514 * Routine to calculate cumulative running counts for each scheduling
6515 * bucket. This effectively lets the load calculation calculate if a
6516 * cluster is running any threads at a QoS lower than the thread being
6521 sched_edge_pset_running_higher_bucket(processor_set_t pset
, uint32_t *running_higher
)
6523 bitmap_t
*active_map
= &pset
->cpu_state_map
[PROCESSOR_RUNNING
];
6525 /* Edge Scheduler Optimization */
6526 for (int cpu
= bitmap_first(active_map
, MAX_CPUS
); cpu
>= 0; cpu
= bitmap_next(active_map
, cpu
)) {
6527 sched_bucket_t cpu_bucket
= os_atomic_load(&pset
->cpu_running_buckets
[cpu
], relaxed
);
6528 for (sched_bucket_t bucket
= cpu_bucket
; bucket
< TH_BUCKET_SCHED_MAX
; bucket
++) {
6529 running_higher
[bucket
]++;
6535 * sched_update_pset_load_average()
6537 * Updates the load average for each sched bucket for a cluster.
6538 * This routine must be called with the pset lock held.
6541 sched_update_pset_load_average(processor_set_t pset
, uint64_t curtime
)
6543 if (pset
->online_processor_count
== 0) {
6544 /* Looks like the pset is not runnable any more; nothing to do here */
6549 * Edge Scheduler Optimization
6551 * See if more callers of this routine can pass in timestamps to avoid the
6552 * mach_absolute_time() call here.
6556 curtime
= mach_absolute_time();
6558 uint64_t last_update
= os_atomic_load(&pset
->pset_load_last_update
, relaxed
);
6559 int64_t delta_ticks
= curtime
- last_update
;
6560 if (delta_ticks
< 0) {
6564 uint64_t delta_nsecs
= 0;
6565 absolutetime_to_nanoseconds(delta_ticks
, &delta_nsecs
);
6567 if (__improbable(delta_nsecs
> UINT32_MAX
)) {
6568 delta_nsecs
= UINT32_MAX
;
6571 uint32_t running_higher
[TH_BUCKET_SCHED_MAX
] = {0};
6572 sched_edge_pset_running_higher_bucket(pset
, running_higher
);
6574 for (sched_bucket_t sched_bucket
= TH_BUCKET_FIXPRI
; sched_bucket
< TH_BUCKET_SCHED_MAX
; sched_bucket
++) {
6575 uint64_t old_load_average
= os_atomic_load(&pset
->pset_load_average
[sched_bucket
], relaxed
);
6576 uint64_t old_load_average_factor
= old_load_average
* SCHED_PSET_LOAD_EWMA_TC_NSECS
;
6577 uint32_t current_runq_depth
= (sched_edge_cluster_cumulative_count(&pset
->pset_clutch_root
, sched_bucket
) + rt_runq_count(pset
) + running_higher
[sched_bucket
]) / pset
->online_processor_count
;
6580 * For the new load average multiply current_runq_depth by delta_nsecs (which resuts in a 32.0 value).
6581 * Since we want to maintain the load average as a 24.8 fixed arithmetic value for precision, the
6582 * new load averga needs to be shifted before it can be added to the old load average.
6584 uint64_t new_load_average_factor
= (current_runq_depth
* delta_nsecs
) << SCHED_PSET_LOAD_EWMA_FRACTION_BITS
;
6587 * For extremely parallel workloads, it is important that the load average on a cluster moves zero to non-zero
6588 * instantly to allow threads to be migrated to other (potentially idle) clusters quickly. Hence use the EWMA
6589 * when the system is already loaded; otherwise for an idle system use the latest load average immediately.
6591 int old_load_shifted
= (int)((old_load_average
+ SCHED_PSET_LOAD_EWMA_ROUND_BIT
) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS
);
6592 boolean_t load_uptick
= (old_load_shifted
== 0) && (current_runq_depth
!= 0);
6593 boolean_t load_downtick
= (old_load_shifted
!= 0) && (current_runq_depth
== 0);
6594 uint64_t load_average
;
6595 if (load_uptick
|| load_downtick
) {
6596 load_average
= (current_runq_depth
<< SCHED_PSET_LOAD_EWMA_FRACTION_BITS
);
6598 /* Indicates a loaded system; use EWMA for load average calculation */
6599 load_average
= (old_load_average_factor
+ new_load_average_factor
) / (delta_nsecs
+ SCHED_PSET_LOAD_EWMA_TC_NSECS
);
6601 os_atomic_store(&pset
->pset_load_average
[sched_bucket
], load_average
, relaxed
);
6602 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH
, MACH_SCHED_EDGE_LOAD_AVG
) | DBG_FUNC_NONE
, pset
->pset_cluster_id
, (load_average
>> SCHED_PSET_LOAD_EWMA_FRACTION_BITS
), load_average
& SCHED_PSET_LOAD_EWMA_FRACTION_MASK
, sched_bucket
);
6604 os_atomic_store(&pset
->pset_load_last_update
, curtime
, relaxed
);
6608 sched_update_pset_avg_execution_time(processor_set_t pset
, uint64_t execution_time
, uint64_t curtime
, sched_bucket_t sched_bucket
)
6610 pset_execution_time_t old_execution_time_packed
, new_execution_time_packed
;
6611 uint64_t avg_thread_execution_time
= 0;
6613 os_atomic_rmw_loop(&pset
->pset_execution_time
[sched_bucket
].pset_execution_time_packed
,
6614 old_execution_time_packed
.pset_execution_time_packed
,
6615 new_execution_time_packed
.pset_execution_time_packed
, relaxed
, {
6616 uint64_t last_update
= old_execution_time_packed
.pset_execution_time_last_update
;
6617 int64_t delta_ticks
= curtime
- last_update
;
6618 if (delta_ticks
< 0) {
6620 * Its possible that another CPU came in and updated the pset_execution_time
6621 * before this CPU could do it. Since the average execution time is meant to
6622 * be an approximate measure per cluster, ignore the older update.
6624 os_atomic_rmw_loop_give_up(return );
6626 uint64_t delta_nsecs
= 0;
6627 absolutetime_to_nanoseconds(delta_ticks
, &delta_nsecs
);
6629 uint64_t nanotime
= 0;
6630 absolutetime_to_nanoseconds(execution_time
, &nanotime
);
6631 uint64_t execution_time_us
= nanotime
/ NSEC_PER_USEC
;
6633 uint64_t old_execution_time
= (old_execution_time_packed
.pset_avg_thread_execution_time
* SCHED_PSET_LOAD_EWMA_TC_NSECS
);
6634 uint64_t new_execution_time
= (execution_time_us
* delta_nsecs
);
6636 avg_thread_execution_time
= (old_execution_time
+ new_execution_time
) / (delta_nsecs
+ SCHED_PSET_LOAD_EWMA_TC_NSECS
);
6637 new_execution_time_packed
.pset_avg_thread_execution_time
= avg_thread_execution_time
;
6638 new_execution_time_packed
.pset_execution_time_last_update
= curtime
;
6640 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_PSET_AVG_EXEC_TIME
) | DBG_FUNC_NONE
, pset
->pset_cluster_id
, avg_thread_execution_time
, sched_bucket
);
6643 #else /* CONFIG_SCHED_EDGE */
6646 sched_update_pset_load_average(processor_set_t pset
, __unused
uint64_t curtime
)
6648 int non_rt_load
= pset
->pset_runq
.count
;
6649 int load
= ((bit_count(pset
->cpu_state_map
[PROCESSOR_RUNNING
]) + non_rt_load
+ rt_runq_count(pset
)) << PSET_LOAD_NUMERATOR_SHIFT
);
6650 int new_load_average
= ((int)pset
->load_average
+ load
) >> 1;
6652 pset
->load_average
= new_load_average
;
6653 #if (DEVELOPMENT || DEBUG)
6655 if (pset
->pset_cluster_type
== PSET_AMP_P
) {
6656 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_PSET_LOAD_AVERAGE
) | DBG_FUNC_NONE
, sched_get_pset_load_average(pset
, 0), (bit_count(pset
->cpu_state_map
[PROCESSOR_RUNNING
]) + pset
->pset_runq
.count
+ rt_runq_count(pset
)));
6663 sched_update_pset_avg_execution_time(__unused processor_set_t pset
, __unused
uint64_t execution_time
, __unused
uint64_t curtime
, __unused sched_bucket_t sched_bucket
)
6666 #endif /* CONFIG_SCHED_EDGE */
6668 /* pset is locked */
6670 processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset
, processor_t processor
)
6672 int cpuid
= processor
->cpu_id
;
6673 #if defined(__x86_64__)
6674 if (sched_avoid_cpu0
&& (cpuid
== 0)) {
6679 cpumap_t fasttrack_map
= pset_available_cpumap(pset
) & ~pset
->pending_AST_URGENT_cpu_mask
& ~pset
->realtime_map
;
6681 return bit_test(fasttrack_map
, cpuid
);
6684 /* pset is locked */
6686 choose_processor_for_realtime_thread(processor_set_t pset
, processor_t skip_processor
, bool consider_secondaries
)
6688 #if defined(__x86_64__)
6689 bool avoid_cpu0
= sched_avoid_cpu0
&& bit_test(pset
->cpu_bitmask
, 0);
6691 const bool avoid_cpu0
= false;
6694 cpumap_t cpu_map
= pset_available_cpumap(pset
) & ~pset
->pending_AST_URGENT_cpu_mask
& ~pset
->realtime_map
;
6695 if (skip_processor
) {
6696 bit_clear(cpu_map
, skip_processor
->cpu_id
);
6699 cpumap_t primary_map
= cpu_map
& pset
->primary_map
;
6701 primary_map
= bit_ror64(primary_map
, 1);
6704 int rotid
= lsb_first(primary_map
);
6706 int cpuid
= avoid_cpu0
? ((rotid
+ 1) & 63) : rotid
;
6708 processor_t processor
= processor_array
[cpuid
];
6713 if (!pset
->is_SMT
|| !sched_allow_rt_smt
|| !consider_secondaries
) {
6717 /* Consider secondary processors */
6718 cpumap_t secondary_map
= cpu_map
& ~pset
->primary_map
;
6720 /* Also avoid cpu1 */
6721 secondary_map
= bit_ror64(secondary_map
, 2);
6723 rotid
= lsb_first(secondary_map
);
6725 int cpuid
= avoid_cpu0
? ((rotid
+ 2) & 63) : rotid
;
6727 processor_t processor
= processor_array
[cpuid
];
6733 if (skip_processor
) {
6734 return PROCESSOR_NULL
;
6738 * If we didn't find an obvious processor to choose, but there are still more CPUs
6739 * not already running realtime threads than realtime threads in the realtime run queue,
6740 * this thread belongs in this pset, so choose some other processor in this pset
6741 * to ensure the thread is enqueued here.
6743 cpumap_t non_realtime_map
= pset_available_cpumap(pset
) & pset
->primary_map
& ~pset
->realtime_map
;
6744 if (bit_count(non_realtime_map
) > rt_runq_count(pset
)) {
6745 cpu_map
= non_realtime_map
;
6746 assert(cpu_map
!= 0);
6747 int cpuid
= bit_first(cpu_map
);
6749 return processor_array
[cpuid
];
6752 if (!pset
->is_SMT
|| !sched_allow_rt_smt
|| !consider_secondaries
) {
6753 goto skip_secondaries
;
6756 non_realtime_map
= pset_available_cpumap(pset
) & ~pset
->realtime_map
;
6757 if (bit_count(non_realtime_map
) > rt_runq_count(pset
)) {
6758 cpu_map
= non_realtime_map
;
6759 assert(cpu_map
!= 0);
6760 int cpuid
= bit_first(cpu_map
);
6762 return processor_array
[cpuid
];
6766 return PROCESSOR_NULL
;
6769 /* pset is locked */
6771 all_available_primaries_are_running_realtime_threads(processor_set_t pset
)
6773 cpumap_t cpu_map
= pset_available_cpumap(pset
) & pset
->primary_map
& ~pset
->realtime_map
;
6774 return rt_runq_count(pset
) > bit_count(cpu_map
);
6777 #if defined(__x86_64__)
6778 /* pset is locked */
6780 these_processors_are_running_realtime_threads(processor_set_t pset
, uint64_t these_map
)
6782 cpumap_t cpu_map
= pset_available_cpumap(pset
) & these_map
& ~pset
->realtime_map
;
6783 return rt_runq_count(pset
) > bit_count(cpu_map
);
6788 sched_ok_to_run_realtime_thread(processor_set_t pset
, processor_t processor
)
6790 bool ok_to_run_realtime_thread
= true;
6791 #if defined(__x86_64__)
6792 if (sched_avoid_cpu0
&& processor
->cpu_id
== 0) {
6793 ok_to_run_realtime_thread
= these_processors_are_running_realtime_threads(pset
, pset
->primary_map
& ~0x1);
6794 } else if (sched_avoid_cpu0
&& (processor
->cpu_id
== 1) && processor
->is_SMT
) {
6795 ok_to_run_realtime_thread
= sched_allow_rt_smt
&& these_processors_are_running_realtime_threads(pset
, ~0x2);
6796 } else if (processor
->processor_primary
!= processor
) {
6797 ok_to_run_realtime_thread
= (sched_allow_rt_smt
&& all_available_primaries_are_running_realtime_threads(pset
));
6803 return ok_to_run_realtime_thread
;
6807 sched_pset_made_schedulable(__unused processor_t processor
, processor_set_t pset
, boolean_t drop_lock
)
6815 thread_set_no_smt(bool set
)
6817 if (!system_is_SMT
) {
6818 /* Not a machine that supports SMT */
6822 thread_t thread
= current_thread();
6824 spl_t s
= splsched();
6825 thread_lock(thread
);
6827 thread
->sched_flags
|= TH_SFLAG_NO_SMT
;
6829 thread_unlock(thread
);
6834 thread_get_no_smt(void)
6836 return current_thread()->sched_flags
& TH_SFLAG_NO_SMT
;
6839 extern void task_set_no_smt(task_t
);
6841 task_set_no_smt(task_t task
)
6843 if (!system_is_SMT
) {
6844 /* Not a machine that supports SMT */
6848 if (task
== TASK_NULL
) {
6849 task
= current_task();
6853 task
->t_flags
|= TF_NO_SMT
;
6857 #if DEBUG || DEVELOPMENT
6858 extern void sysctl_task_set_no_smt(char no_smt
);
6860 sysctl_task_set_no_smt(char no_smt
)
6862 if (!system_is_SMT
) {
6863 /* Not a machine that supports SMT */
6867 task_t task
= current_task();
6870 if (no_smt
== '1') {
6871 task
->t_flags
|= TF_NO_SMT
;
6876 extern char sysctl_task_get_no_smt(void);
6878 sysctl_task_get_no_smt(void)
6880 task_t task
= current_task();
6882 if (task
->t_flags
& TF_NO_SMT
) {
6887 #endif /* DEVELOPMENT || DEBUG */
6890 __private_extern__
void
6891 thread_bind_cluster_type(thread_t thread
, char cluster_type
, bool soft_bound
)
6894 spl_t s
= splsched();
6895 thread_lock(thread
);
6896 thread
->sched_flags
&= ~(TH_SFLAG_ECORE_ONLY
| TH_SFLAG_PCORE_ONLY
| TH_SFLAG_BOUND_SOFT
);
6898 thread
->sched_flags
|= TH_SFLAG_BOUND_SOFT
;
6900 switch (cluster_type
) {
6903 thread
->sched_flags
|= TH_SFLAG_ECORE_ONLY
;
6907 thread
->sched_flags
|= TH_SFLAG_PCORE_ONLY
;
6912 thread_unlock(thread
);
6915 if (thread
== current_thread()) {
6916 thread_block(THREAD_CONTINUE_NULL
);
6922 #endif /* __AMP__ */
6925 #if DEVELOPMENT || DEBUG
6926 extern int32_t sysctl_get_bound_cpuid(void);
6928 sysctl_get_bound_cpuid(void)
6931 thread_t self
= current_thread();
6933 processor_t processor
= self
->bound_processor
;
6934 if (processor
== NULL
) {
6937 cpuid
= processor
->cpu_id
;
6943 extern kern_return_t
sysctl_thread_bind_cpuid(int32_t cpuid
);
6945 sysctl_thread_bind_cpuid(int32_t cpuid
)
6947 processor_t processor
= PROCESSOR_NULL
;
6953 if (cpuid
< 0 || cpuid
>= MAX_SCHED_CPUS
) {
6954 return KERN_INVALID_VALUE
;
6957 processor
= processor_array
[cpuid
];
6958 if (processor
== PROCESSOR_NULL
) {
6959 return KERN_INVALID_VALUE
;
6964 thread_t thread
= current_thread();
6966 if (thread
->sched_flags
& (TH_SFLAG_ECORE_ONLY
| TH_SFLAG_PCORE_ONLY
)) {
6967 if ((thread
->sched_flags
& TH_SFLAG_BOUND_SOFT
) == 0) {
6968 /* Cannot hard-bind an already hard-cluster-bound thread */
6969 return KERN_NOT_SUPPORTED
;
6973 #endif /* __AMP__ */
6976 thread_bind(processor
);
6978 thread_block(THREAD_CONTINUE_NULL
);
6979 return KERN_SUCCESS
;
6981 #endif /* DEVELOPMENT || DEBUG */