2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Scheduling primitives
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/machlimits.h>
79 #include <machine/atomic.h>
81 #include <machine/commpage.h>
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/counters.h>
87 #include <kern/cpu_number.h>
88 #include <kern/cpu_data.h>
90 #include <kern/debug.h>
91 #include <kern/macro_help.h>
92 #include <kern/machine.h>
93 #include <kern/misc_protos.h>
95 #include <kern/monotonic.h>
96 #endif /* MONOTONIC */
97 #include <kern/processor.h>
98 #include <kern/queue.h>
99 #include <kern/sched.h>
100 #include <kern/sched_prim.h>
101 #include <kern/sfi.h>
102 #include <kern/syscall_subr.h>
103 #include <kern/task.h>
104 #include <kern/thread.h>
105 #include <kern/ledger.h>
106 #include <kern/timer_queue.h>
107 #include <kern/waitq.h>
108 #include <kern/policy_internal.h>
109 #include <kern/cpu_quiesce.h>
112 #include <vm/vm_kern.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_pageout.h>
116 #include <mach/sdt.h>
117 #include <mach/mach_host.h>
118 #include <mach/host_info.h>
120 #include <sys/kdebug.h>
121 #include <kperf/kperf.h>
122 #include <kern/kpc.h>
123 #include <san/kasan.h>
124 #include <kern/pms.h>
125 #include <kern/host.h>
126 #include <stdatomic.h>
129 rt_runq_count(processor_set_t pset
)
131 return atomic_load_explicit(&SCHED(rt_runq
)(pset
)->count
, memory_order_relaxed
);
135 rt_runq_count_incr(processor_set_t pset
)
137 atomic_fetch_add_explicit(&SCHED(rt_runq
)(pset
)->count
, 1, memory_order_relaxed
);
141 rt_runq_count_decr(processor_set_t pset
)
143 atomic_fetch_sub_explicit(&SCHED(rt_runq
)(pset
)->count
, 1, memory_order_relaxed
);
146 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
147 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
149 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
150 int default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
152 #define MAX_UNSAFE_QUANTA 800
153 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
155 #define MAX_POLL_QUANTA 2
156 int max_poll_quanta
= MAX_POLL_QUANTA
;
158 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
159 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
161 uint64_t max_poll_computation
;
163 uint64_t max_unsafe_computation
;
164 uint64_t sched_safe_duration
;
166 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
168 uint32_t std_quantum
;
169 uint32_t min_std_quantum
;
172 uint32_t std_quantum_us
;
173 uint32_t bg_quantum_us
;
175 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
177 uint32_t thread_depress_time
;
178 uint32_t default_timeshare_computation
;
179 uint32_t default_timeshare_constraint
;
181 uint32_t max_rt_quantum
;
182 uint32_t min_rt_quantum
;
184 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
187 uint32_t sched_tick_interval
;
189 /* Timeshare load calculation interval (15ms) */
190 uint32_t sched_load_compute_interval_us
= 15000;
191 uint64_t sched_load_compute_interval_abs
;
192 static _Atomic
uint64_t sched_load_compute_deadline
;
194 uint32_t sched_pri_shifts
[TH_BUCKET_MAX
];
195 uint32_t sched_fixed_shift
;
197 uint32_t sched_decay_usage_age_factor
= 1; /* accelerate 5/8^n usage aging */
199 /* Allow foreground to decay past default to resolve inversions */
200 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
201 int sched_pri_decay_band_limit
= DEFAULT_DECAY_BAND_LIMIT
;
203 /* Defaults for timer deadline profiling */
204 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
206 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
209 uint64_t timer_deadline_tracking_bin_1
;
210 uint64_t timer_deadline_tracking_bin_2
;
212 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
214 thread_t sched_maintenance_thread
;
216 /* interrupts disabled lock to guard recommended cores state */
217 decl_simple_lock_data(static, sched_recommended_cores_lock
);
218 static uint64_t usercontrol_requested_recommended_cores
= ALL_CORES_RECOMMENDED
;
219 static void sched_update_recommended_cores(uint64_t recommended_cores
);
221 #if __arm__ || __arm64__
222 static void sched_recommended_cores_maintenance(void);
223 uint64_t perfcontrol_failsafe_starvation_threshold
;
224 extern char *proc_name_address(struct proc
*p
);
225 #endif /* __arm__ || __arm64__ */
227 uint64_t sched_one_second_interval
;
231 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
233 static void load_shift_init(void);
234 static void preempt_pri_init(void);
236 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
238 #if CONFIG_SCHED_IDLE_IN_PLACE
239 static thread_t
thread_select_idle(
241 processor_t processor
);
244 thread_t
processor_idle(
246 processor_t processor
);
251 processor_t processor
,
252 processor_set_t pset
,
255 static void processor_setrun(
256 processor_t processor
,
261 sched_realtime_timebase_init(void);
264 sched_timer_deadline_tracking_init(void);
267 extern int debug_task
;
268 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
270 #define TLOG(a, fmt, args...) do {} while (0)
274 thread_bind_internal(
276 processor_t processor
);
279 sched_vm_group_maintenance(void);
281 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
282 int8_t sched_load_shifts
[NRQS
];
283 bitmap_t sched_preempt_pri
[BITMAP_LEN(NRQS
)];
284 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
286 const struct sched_dispatch_table
*sched_current_dispatch
= NULL
;
289 * Statically allocate a buffer to hold the longest possible
290 * scheduler description string, as currently implemented.
291 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
292 * to export to userspace via sysctl(3). If either version
293 * changes, update the other.
295 * Note that in addition to being an upper bound on the strings
296 * in the kernel, it's also an exact parameter to PE_get_default(),
297 * which interrogates the device tree on some platforms. That
298 * API requires the caller know the exact size of the device tree
299 * property, so we need both a legacy size (32) and the current size
300 * (48) to deal with old and new device trees. The device tree property
301 * is similarly padded to a fixed size so that the same kernel image
302 * can run on multiple devices with different schedulers configured
303 * in the device tree.
305 char sched_string
[SCHED_STRING_MAX_LENGTH
];
307 uint32_t sched_debug_flags
= SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS
;
309 /* Global flag which indicates whether Background Stepper Context is enabled */
310 static int cpu_throttle_enabled
= 1;
314 /* Since using the indirect function dispatch table has a negative impact on
315 * context switch performance, only allow DEBUG kernels to use that mechanism.
318 sched_init_override(void)
320 char sched_arg
[SCHED_STRING_MAX_LENGTH
] = { '\0' };
322 /* Check for runtime selection of the scheduler algorithm */
323 if (!PE_parse_boot_argn("sched", sched_arg
, sizeof(sched_arg
))) {
326 if (strlen(sched_arg
) > 0) {
328 /* Allow pattern below */
329 #if defined(CONFIG_SCHED_TRADITIONAL)
330 } else if (0 == strcmp(sched_arg
, sched_traditional_dispatch
.sched_name
)) {
331 sched_current_dispatch
= &sched_traditional_dispatch
;
332 } else if (0 == strcmp(sched_arg
, sched_traditional_with_pset_runqueue_dispatch
.sched_name
)) {
333 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
335 #if defined(CONFIG_SCHED_MULTIQ)
336 } else if (0 == strcmp(sched_arg
, sched_multiq_dispatch
.sched_name
)) {
337 sched_current_dispatch
= &sched_multiq_dispatch
;
338 } else if (0 == strcmp(sched_arg
, sched_dualq_dispatch
.sched_name
)) {
339 sched_current_dispatch
= &sched_dualq_dispatch
;
342 #if defined(CONFIG_SCHED_TRADITIONAL)
343 printf("Unrecognized scheduler algorithm: %s\n", sched_arg
);
344 printf("Scheduler: Using instead: %s\n", sched_traditional_with_pset_runqueue_dispatch
.sched_name
);
345 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
347 panic("Unrecognized scheduler algorithm: %s", sched_arg
);
350 kprintf("Scheduler: Runtime selection of %s\n", SCHED(sched_name
));
352 #if defined(CONFIG_SCHED_MULTIQ)
353 sched_current_dispatch
= &sched_dualq_dispatch
;
354 #elif defined(CONFIG_SCHED_TRADITIONAL)
355 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
357 #error No default scheduler implementation
359 kprintf("Scheduler: Default of %s\n", SCHED(sched_name
));
369 sched_init_override();
371 kprintf("Scheduler: Default of %s\n", SCHED(sched_name
));
374 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit
, sizeof(sched_pri_decay_band_limit
))) {
375 /* No boot-args, check in device tree */
376 if (!PE_get_default("kern.sched_pri_decay_limit",
377 &sched_pri_decay_band_limit
,
378 sizeof(sched_pri_decay_band_limit
))) {
379 /* Allow decay all the way to normal limits */
380 sched_pri_decay_band_limit
= DEFAULT_DECAY_BAND_LIMIT
;
384 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit
);
386 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags
, sizeof(sched_debug_flags
))) {
387 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags
);
389 strlcpy(sched_string
, SCHED(sched_name
), sizeof(sched_string
));
391 cpu_quiescent_counter_init();
394 SCHED(rt_init
)(&pset0
);
395 sched_timer_deadline_tracking_init();
397 SCHED(pset_init
)(&pset0
);
398 SCHED(processor_init
)(master_processor
);
402 sched_timebase_init(void)
406 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC
, &abstime
);
407 sched_one_second_interval
= abstime
;
409 SCHED(timebase_init
)();
410 sched_realtime_timebase_init();
413 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
416 sched_timeshare_init(void)
419 * Calculate the timeslicing quantum
422 if (default_preemption_rate
< 1) {
423 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
425 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
427 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
429 if (default_bg_preemption_rate
< 1) {
430 default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
432 bg_quantum_us
= (1000 * 1000) / default_bg_preemption_rate
;
434 printf("standard background quantum is %d us\n", bg_quantum_us
);
442 sched_timeshare_timebase_init(void)
447 /* standard timeslicing quantum */
448 clock_interval_to_absolutetime_interval(
449 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
450 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
451 std_quantum
= (uint32_t)abstime
;
453 /* smallest remaining quantum (250 us) */
454 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
455 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
456 min_std_quantum
= (uint32_t)abstime
;
458 /* quantum for background tasks */
459 clock_interval_to_absolutetime_interval(
460 bg_quantum_us
, NSEC_PER_USEC
, &abstime
);
461 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
462 bg_quantum
= (uint32_t)abstime
;
464 /* scheduler tick interval */
465 clock_interval_to_absolutetime_interval(USEC_PER_SEC
>> SCHED_TICK_SHIFT
,
466 NSEC_PER_USEC
, &abstime
);
467 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
468 sched_tick_interval
= (uint32_t)abstime
;
470 /* timeshare load calculation interval & deadline initialization */
471 clock_interval_to_absolutetime_interval(sched_load_compute_interval_us
, NSEC_PER_USEC
, &sched_load_compute_interval_abs
);
472 sched_load_compute_deadline
= sched_load_compute_interval_abs
;
475 * Compute conversion factor from usage to
476 * timesharing priorities with 5/8 ** n aging.
478 abstime
= (abstime
* 5) / 3;
479 for (shift
= 0; abstime
> BASEPRI_DEFAULT
; ++shift
) {
482 sched_fixed_shift
= shift
;
484 for (uint32_t i
= 0; i
< TH_BUCKET_MAX
; i
++) {
485 sched_pri_shifts
[i
] = INT8_MAX
;
488 max_unsafe_computation
= ((uint64_t)max_unsafe_quanta
) * std_quantum
;
489 sched_safe_duration
= 2 * ((uint64_t)max_unsafe_quanta
) * std_quantum
;
491 max_poll_computation
= ((uint64_t)max_poll_quanta
) * std_quantum
;
492 thread_depress_time
= 1 * std_quantum
;
493 default_timeshare_computation
= std_quantum
/ 2;
494 default_timeshare_constraint
= std_quantum
;
496 #if __arm__ || __arm64__
497 perfcontrol_failsafe_starvation_threshold
= (2 * sched_tick_interval
);
498 #endif /* __arm__ || __arm64__ */
501 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
504 pset_rt_init(processor_set_t pset
)
508 pset
->rt_runq
.count
= 0;
509 queue_init(&pset
->rt_runq
.queue
);
510 memset(&pset
->rt_runq
.runq_stats
, 0, sizeof pset
->rt_runq
.runq_stats
);
514 sched_rtglobal_runq(processor_set_t pset
)
518 return &pset0
.rt_runq
;
522 sched_rtglobal_init(processor_set_t pset
)
524 if (pset
== &pset0
) {
525 return pset_rt_init(pset
);
528 /* Only pset0 rt_runq is used, so make it easy to detect
529 * buggy accesses to others.
531 memset(&pset
->rt_runq
, 0xfd, sizeof pset
->rt_runq
);
535 sched_rtglobal_queue_shutdown(processor_t processor
)
541 sched_realtime_timebase_init(void)
545 /* smallest rt computaton (50 us) */
546 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
547 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
548 min_rt_quantum
= (uint32_t)abstime
;
550 /* maximum rt computation (50 ms) */
551 clock_interval_to_absolutetime_interval(
552 50, 1000 * NSEC_PER_USEC
, &abstime
);
553 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
554 max_rt_quantum
= (uint32_t)abstime
;
558 sched_check_spill(processor_set_t pset
, thread_t thread
)
567 sched_thread_should_yield(processor_t processor
, thread_t thread
)
571 return !SCHED(processor_queue_empty
)(processor
) || rt_runq_count(processor
->processor_set
) > 0;
574 /* Default implementations of .steal_thread_enabled */
576 sched_steal_thread_DISABLED(processor_set_t pset
)
583 sched_steal_thread_enabled(processor_set_t pset
)
585 return pset
->node
->pset_count
> 1;
588 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
591 * Set up values for timeshare
595 load_shift_init(void)
597 int8_t k
, *p
= sched_load_shifts
;
600 uint32_t sched_decay_penalty
= 1;
602 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty
, sizeof(sched_decay_penalty
))) {
603 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty
);
606 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor
, sizeof(sched_decay_usage_age_factor
))) {
607 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor
);
610 if (sched_decay_penalty
== 0) {
612 * There is no penalty for timeshare threads for using too much
613 * CPU, so set all load shifts to INT8_MIN. Even under high load,
614 * sched_pri_shift will be >INT8_MAX, and there will be no
615 * penalty applied to threads (nor will sched_usage be updated per
618 for (i
= 0; i
< NRQS
; i
++) {
619 sched_load_shifts
[i
] = INT8_MIN
;
625 *p
++ = INT8_MIN
; *p
++ = 0;
628 * For a given system load "i", the per-thread priority
629 * penalty per quantum of CPU usage is ~2^k priority
630 * levels. "sched_decay_penalty" can cause more
631 * array entries to be filled with smaller "k" values
633 for (i
= 2, j
= 1 << sched_decay_penalty
, k
= 1; i
< NRQS
; ++k
) {
634 for (j
<<= 1; (i
< j
) && (i
< NRQS
); ++i
) {
641 preempt_pri_init(void)
643 bitmap_t
*p
= sched_preempt_pri
;
645 for (int i
= BASEPRI_FOREGROUND
; i
< MINPRI_KERNEL
; ++i
) {
649 for (int i
= BASEPRI_PREEMPT
; i
<= MAXPRI
; ++i
) {
654 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
657 * Thread wait timer expiration.
664 thread_t thread
= p0
;
667 assert_thread_magic(thread
);
671 if (--thread
->wait_timer_active
== 0) {
672 if (thread
->wait_timer_is_set
) {
673 thread
->wait_timer_is_set
= FALSE
;
674 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
677 thread_unlock(thread
);
684 * Unblock thread on wake up.
686 * Returns TRUE if the thread should now be placed on the runqueue.
688 * Thread must be locked.
690 * Called at splsched().
695 wait_result_t wresult
)
697 boolean_t ready_for_runq
= FALSE
;
698 thread_t cthread
= current_thread();
699 uint32_t new_run_count
;
700 int old_thread_state
;
705 thread
->wait_result
= wresult
;
708 * Cancel pending wait timer.
710 if (thread
->wait_timer_is_set
) {
711 if (timer_call_cancel(&thread
->wait_timer
)) {
712 thread
->wait_timer_active
--;
714 thread
->wait_timer_is_set
= FALSE
;
718 * Update scheduling state: not waiting,
721 old_thread_state
= thread
->state
;
722 thread
->state
= (old_thread_state
| TH_RUN
) &
723 ~(TH_WAIT
| TH_UNINT
| TH_WAIT_REPORT
);
725 if ((old_thread_state
& TH_RUN
) == 0) {
726 uint64_t ctime
= mach_approximate_time();
727 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= ctime
;
728 timer_start(&thread
->runnable_timer
, ctime
);
730 ready_for_runq
= TRUE
;
732 if (old_thread_state
& TH_WAIT_REPORT
) {
733 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
736 /* Update the runnable thread count */
737 new_run_count
= sched_run_incr(thread
);
740 * Either the thread is idling in place on another processor,
741 * or it hasn't finished context switching yet.
743 #if CONFIG_SCHED_IDLE_IN_PLACE
744 if (thread
->state
& TH_IDLE
) {
745 processor_t processor
= thread
->last_processor
;
747 if (processor
!= current_processor()) {
748 machine_signal_idle(processor
);
752 assert((thread
->state
& TH_IDLE
) == 0);
755 * The run count is only dropped after the context switch completes
756 * and the thread is still waiting, so we should not run_incr here
758 new_run_count
= sched_run_buckets
[TH_BUCKET_RUN
];
763 * Calculate deadline for real-time threads.
765 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
768 ctime
= mach_absolute_time();
769 thread
->realtime
.deadline
= thread
->realtime
.constraint
+ ctime
;
773 * Clear old quantum, fail-safe computation, etc.
775 thread
->quantum_remaining
= 0;
776 thread
->computation_metered
= 0;
777 thread
->reason
= AST_NONE
;
778 thread
->block_hint
= kThreadWaitNone
;
780 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
781 * We also account for "double hop" thread signaling via
782 * the thread callout infrastructure.
783 * DRK: consider removing the callout wakeup counters in the future
784 * they're present for verification at the moment.
786 boolean_t aticontext
, pidle
;
787 ml_get_power_state(&aticontext
, &pidle
);
789 if (__improbable(aticontext
&& !(thread_get_tag_internal(thread
) & THREAD_TAG_CALLOUT
))) {
790 DTRACE_SCHED2(iwakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
792 uint64_t ttd
= PROCESSOR_DATA(current_processor(), timer_call_ttd
);
795 if (ttd
<= timer_deadline_tracking_bin_1
) {
796 thread
->thread_timer_wakeups_bin_1
++;
797 } else if (ttd
<= timer_deadline_tracking_bin_2
) {
798 thread
->thread_timer_wakeups_bin_2
++;
802 ledger_credit_thread(thread
, thread
->t_ledger
,
803 task_ledgers
.interrupt_wakeups
, 1);
805 ledger_credit_thread(thread
, thread
->t_ledger
,
806 task_ledgers
.platform_idle_wakeups
, 1);
808 } else if (thread_get_tag_internal(cthread
) & THREAD_TAG_CALLOUT
) {
809 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
810 if (cthread
->callout_woken_from_icontext
) {
811 ledger_credit_thread(thread
, thread
->t_ledger
,
812 task_ledgers
.interrupt_wakeups
, 1);
813 thread
->thread_callout_interrupt_wakeups
++;
815 if (cthread
->callout_woken_from_platform_idle
) {
816 ledger_credit_thread(thread
, thread
->t_ledger
,
817 task_ledgers
.platform_idle_wakeups
, 1);
818 thread
->thread_callout_platform_idle_wakeups
++;
821 cthread
->callout_woke_thread
= TRUE
;
825 if (thread_get_tag_internal(thread
) & THREAD_TAG_CALLOUT
) {
826 thread
->callout_woken_from_icontext
= aticontext
;
827 thread
->callout_woken_from_platform_idle
= pidle
;
828 thread
->callout_woke_thread
= FALSE
;
832 if (ready_for_runq
) {
833 kperf_make_runnable(thread
, aticontext
);
837 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
838 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
839 (uintptr_t)thread_tid(thread
), thread
->sched_pri
, thread
->wait_result
,
840 sched_run_buckets
[TH_BUCKET_RUN
], 0);
842 DTRACE_SCHED2(wakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
844 return ready_for_runq
;
850 * Unblock and dispatch thread.
852 * thread lock held, IPC locks may be held.
853 * thread must have been pulled from wait queue under same lock hold.
854 * thread must have been waiting
856 * KERN_SUCCESS - Thread was set running
858 * TODO: This should return void
863 wait_result_t wresult
)
865 assert_thread_magic(thread
);
867 assert(thread
->at_safe_point
== FALSE
);
868 assert(thread
->wait_event
== NO_EVENT64
);
869 assert(thread
->waitq
== NULL
);
871 assert(!(thread
->state
& (TH_TERMINATE
| TH_TERMINATE2
)));
872 assert(thread
->state
& TH_WAIT
);
875 if (thread_unblock(thread
, wresult
)) {
876 #if SCHED_TRACE_THREAD_WAKEUPS
877 backtrace(&thread
->thread_wakeup_bt
[0],
878 (sizeof(thread
->thread_wakeup_bt
) / sizeof(uintptr_t)));
880 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
887 * Routine: thread_mark_wait_locked
889 * Mark a thread as waiting. If, given the circumstances,
890 * it doesn't want to wait (i.e. already aborted), then
891 * indicate that in the return value.
893 * at splsched() and thread is locked.
897 thread_mark_wait_locked(
899 wait_interrupt_t interruptible_orig
)
901 boolean_t at_safe_point
;
902 wait_interrupt_t interruptible
= interruptible_orig
;
904 assert(!(thread
->state
& (TH_WAIT
| TH_IDLE
| TH_UNINT
| TH_TERMINATE2
| TH_WAIT_REPORT
)));
907 * The thread may have certain types of interrupts/aborts masked
908 * off. Even if the wait location says these types of interrupts
909 * are OK, we have to honor mask settings (outer-scoped code may
910 * not be able to handle aborts at the moment).
912 interruptible
&= TH_OPT_INTMASK
;
913 if (interruptible
> (thread
->options
& TH_OPT_INTMASK
)) {
914 interruptible
= thread
->options
& TH_OPT_INTMASK
;
917 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
919 if (interruptible
== THREAD_UNINT
||
920 !(thread
->sched_flags
& TH_SFLAG_ABORT
) ||
922 (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
))) {
923 if (!(thread
->state
& TH_TERMINATE
)) {
927 int state_bits
= TH_WAIT
;
928 if (!interruptible
) {
929 state_bits
|= TH_UNINT
;
931 if (thread
->sched_call
) {
932 wait_interrupt_t mask
= THREAD_WAIT_NOREPORT_USER
;
933 if (is_kerneltask(thread
->task
)) {
934 mask
= THREAD_WAIT_NOREPORT_KERNEL
;
936 if ((interruptible_orig
& mask
) == 0) {
937 state_bits
|= TH_WAIT_REPORT
;
940 thread
->state
|= state_bits
;
941 thread
->at_safe_point
= at_safe_point
;
943 /* TODO: pass this through assert_wait instead, have
944 * assert_wait just take a struct as an argument */
945 assert(!thread
->block_hint
);
946 thread
->block_hint
= thread
->pending_block_hint
;
947 thread
->pending_block_hint
= kThreadWaitNone
;
949 return thread
->wait_result
= THREAD_WAITING
;
951 if (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
) {
952 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
955 thread
->pending_block_hint
= kThreadWaitNone
;
957 return thread
->wait_result
= THREAD_INTERRUPTED
;
961 * Routine: thread_interrupt_level
963 * Set the maximum interruptible state for the
964 * current thread. The effective value of any
965 * interruptible flag passed into assert_wait
966 * will never exceed this.
968 * Useful for code that must not be interrupted,
969 * but which calls code that doesn't know that.
971 * The old interrupt level for the thread.
975 thread_interrupt_level(
976 wait_interrupt_t new_level
)
978 thread_t thread
= current_thread();
979 wait_interrupt_t result
= thread
->options
& TH_OPT_INTMASK
;
981 thread
->options
= (thread
->options
& ~TH_OPT_INTMASK
) | (new_level
& TH_OPT_INTMASK
);
989 * Assert that the current thread is about to go to
990 * sleep until the specified event occurs.
995 wait_interrupt_t interruptible
)
997 if (__improbable(event
== NO_EVENT
)) {
998 panic("%s() called with NO_EVENT", __func__
);
1001 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1002 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1003 VM_KERNEL_UNSLIDE_OR_PERM(event
), 0, 0, 0, 0);
1005 struct waitq
*waitq
;
1006 waitq
= global_eventq(event
);
1007 return waitq_assert_wait64(waitq
, CAST_EVENT64_T(event
), interruptible
, TIMEOUT_WAIT_FOREVER
);
1011 * assert_wait_queue:
1013 * Return the global waitq for the specified event
1019 return global_eventq(event
);
1023 assert_wait_timeout(
1025 wait_interrupt_t interruptible
,
1027 uint32_t scale_factor
)
1029 thread_t thread
= current_thread();
1030 wait_result_t wresult
;
1034 if (__improbable(event
== NO_EVENT
)) {
1035 panic("%s() called with NO_EVENT", __func__
);
1038 struct waitq
*waitq
;
1039 waitq
= global_eventq(event
);
1044 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
1046 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1047 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1048 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1050 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1052 TIMEOUT_URGENCY_SYS_NORMAL
,
1053 deadline
, TIMEOUT_NO_LEEWAY
,
1056 waitq_unlock(waitq
);
1062 assert_wait_timeout_with_leeway(
1064 wait_interrupt_t interruptible
,
1065 wait_timeout_urgency_t urgency
,
1068 uint32_t scale_factor
)
1070 thread_t thread
= current_thread();
1071 wait_result_t wresult
;
1078 if (__improbable(event
== NO_EVENT
)) {
1079 panic("%s() called with NO_EVENT", __func__
);
1082 now
= mach_absolute_time();
1083 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1084 deadline
= now
+ abstime
;
1086 clock_interval_to_absolutetime_interval(leeway
, scale_factor
, &slop
);
1088 struct waitq
*waitq
;
1089 waitq
= global_eventq(event
);
1094 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1095 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1096 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1098 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1100 urgency
, deadline
, slop
,
1103 waitq_unlock(waitq
);
1109 assert_wait_deadline(
1111 wait_interrupt_t interruptible
,
1114 thread_t thread
= current_thread();
1115 wait_result_t wresult
;
1118 if (__improbable(event
== NO_EVENT
)) {
1119 panic("%s() called with NO_EVENT", __func__
);
1122 struct waitq
*waitq
;
1123 waitq
= global_eventq(event
);
1128 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1129 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1130 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1132 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1134 TIMEOUT_URGENCY_SYS_NORMAL
, deadline
,
1135 TIMEOUT_NO_LEEWAY
, thread
);
1136 waitq_unlock(waitq
);
1142 assert_wait_deadline_with_leeway(
1144 wait_interrupt_t interruptible
,
1145 wait_timeout_urgency_t urgency
,
1149 thread_t thread
= current_thread();
1150 wait_result_t wresult
;
1153 if (__improbable(event
== NO_EVENT
)) {
1154 panic("%s() called with NO_EVENT", __func__
);
1157 struct waitq
*waitq
;
1158 waitq
= global_eventq(event
);
1163 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1164 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
) | DBG_FUNC_NONE
,
1165 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1167 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1169 urgency
, deadline
, leeway
,
1171 waitq_unlock(waitq
);
1179 * Return TRUE if a thread is running on a processor such that an AST
1180 * is needed to pull it out of userspace execution, or if executing in
1181 * the kernel, bring to a context switch boundary that would cause
1182 * thread state to be serialized in the thread PCB.
1184 * Thread locked, returns the same way. While locked, fields
1185 * like "state" cannot change. "runq" can change only from set to unset.
1187 static inline boolean_t
1188 thread_isoncpu(thread_t thread
)
1190 /* Not running or runnable */
1191 if (!(thread
->state
& TH_RUN
)) {
1195 /* Waiting on a runqueue, not currently running */
1196 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1197 if (thread
->runq
!= PROCESSOR_NULL
) {
1202 * Thread does not have a stack yet
1203 * It could be on the stack alloc queue or preparing to be invoked
1205 if (!thread
->kernel_stack
) {
1210 * Thread must be running on a processor, or
1211 * about to run, or just did run. In all these
1212 * cases, an AST to the processor is needed
1213 * to guarantee that the thread is kicked out
1214 * of userspace and the processor has
1215 * context switched (and saved register state).
1223 * Force a preemption point for a thread and wait
1224 * for it to stop running on a CPU. If a stronger
1225 * guarantee is requested, wait until no longer
1226 * runnable. Arbitrates access among
1227 * multiple stop requests. (released by unstop)
1229 * The thread must enter a wait state and stop via a
1232 * Returns FALSE if interrupted.
1237 boolean_t until_not_runnable
)
1239 wait_result_t wresult
;
1240 spl_t s
= splsched();
1244 thread_lock(thread
);
1246 while (thread
->state
& TH_SUSP
) {
1247 thread
->wake_active
= TRUE
;
1248 thread_unlock(thread
);
1250 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1251 wake_unlock(thread
);
1254 if (wresult
== THREAD_WAITING
) {
1255 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1258 if (wresult
!= THREAD_AWAKENED
) {
1264 thread_lock(thread
);
1267 thread
->state
|= TH_SUSP
;
1269 while ((oncpu
= thread_isoncpu(thread
)) ||
1270 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1271 processor_t processor
;
1274 assert(thread
->state
& TH_RUN
);
1275 processor
= thread
->chosen_processor
;
1276 cause_ast_check(processor
);
1279 thread
->wake_active
= TRUE
;
1280 thread_unlock(thread
);
1282 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1283 wake_unlock(thread
);
1286 if (wresult
== THREAD_WAITING
) {
1287 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1290 if (wresult
!= THREAD_AWAKENED
) {
1291 thread_unstop(thread
);
1297 thread_lock(thread
);
1300 thread_unlock(thread
);
1301 wake_unlock(thread
);
1305 * We return with the thread unlocked. To prevent it from
1306 * transitioning to a runnable state (or from TH_RUN to
1307 * being on the CPU), the caller must ensure the thread
1308 * is stopped via an external means (such as an AST)
1317 * Release a previous stop request and set
1318 * the thread running if appropriate.
1320 * Use only after a successful stop operation.
1326 spl_t s
= splsched();
1329 thread_lock(thread
);
1331 assert((thread
->state
& (TH_RUN
| TH_WAIT
| TH_SUSP
)) != TH_SUSP
);
1333 if (thread
->state
& TH_SUSP
) {
1334 thread
->state
&= ~TH_SUSP
;
1336 if (thread
->wake_active
) {
1337 thread
->wake_active
= FALSE
;
1338 thread_unlock(thread
);
1340 thread_wakeup(&thread
->wake_active
);
1341 wake_unlock(thread
);
1348 thread_unlock(thread
);
1349 wake_unlock(thread
);
1356 * Wait for a thread to stop running. (non-interruptible)
1362 boolean_t until_not_runnable
)
1364 wait_result_t wresult
;
1366 processor_t processor
;
1367 spl_t s
= splsched();
1370 thread_lock(thread
);
1373 * Wait until not running on a CPU. If stronger requirement
1374 * desired, wait until not runnable. Assumption: if thread is
1375 * on CPU, then TH_RUN is set, so we're not waiting in any case
1376 * where the original, pure "TH_RUN" check would have let us
1379 while ((oncpu
= thread_isoncpu(thread
)) ||
1380 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1382 assert(thread
->state
& TH_RUN
);
1383 processor
= thread
->chosen_processor
;
1384 cause_ast_check(processor
);
1387 thread
->wake_active
= TRUE
;
1388 thread_unlock(thread
);
1390 wresult
= assert_wait(&thread
->wake_active
, THREAD_UNINT
);
1391 wake_unlock(thread
);
1394 if (wresult
== THREAD_WAITING
) {
1395 thread_block(THREAD_CONTINUE_NULL
);
1400 thread_lock(thread
);
1403 thread_unlock(thread
);
1404 wake_unlock(thread
);
1409 * Routine: clear_wait_internal
1411 * Clear the wait condition for the specified thread.
1412 * Start the thread executing if that is appropriate.
1414 * thread thread to awaken
1415 * result Wakeup result the thread should see
1418 * the thread is locked.
1420 * KERN_SUCCESS thread was rousted out a wait
1421 * KERN_FAILURE thread was waiting but could not be rousted
1422 * KERN_NOT_WAITING thread was not waiting
1424 __private_extern__ kern_return_t
1425 clear_wait_internal(
1427 wait_result_t wresult
)
1429 uint32_t i
= LockTimeOutUsec
;
1430 struct waitq
*waitq
= thread
->waitq
;
1433 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
)) {
1434 return KERN_FAILURE
;
1437 if (waitq
!= NULL
) {
1438 if (!waitq_pull_thread_locked(waitq
, thread
)) {
1439 thread_unlock(thread
);
1441 if (i
> 0 && !machine_timeout_suspended()) {
1444 thread_lock(thread
);
1445 if (waitq
!= thread
->waitq
) {
1446 return KERN_NOT_WAITING
;
1452 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1453 if ((thread
->state
& (TH_WAIT
| TH_TERMINATE
)) == TH_WAIT
) {
1454 return thread_go(thread
, wresult
);
1456 return KERN_NOT_WAITING
;
1460 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1461 thread
, waitq
, cpu_number());
1463 return KERN_FAILURE
;
1470 * Clear the wait condition for the specified thread. Start the thread
1471 * executing if that is appropriate.
1474 * thread thread to awaken
1475 * result Wakeup result the thread should see
1480 wait_result_t result
)
1486 thread_lock(thread
);
1487 ret
= clear_wait_internal(thread
, result
);
1488 thread_unlock(thread
);
1495 * thread_wakeup_prim:
1497 * Common routine for thread_wakeup, thread_wakeup_with_result,
1498 * and thread_wakeup_one.
1504 boolean_t one_thread
,
1505 wait_result_t result
)
1507 if (__improbable(event
== NO_EVENT
)) {
1508 panic("%s() called with NO_EVENT", __func__
);
1511 struct waitq
*wq
= global_eventq(event
);
1514 return waitq_wakeup64_one(wq
, CAST_EVENT64_T(event
), result
, WAITQ_ALL_PRIORITIES
);
1516 return waitq_wakeup64_all(wq
, CAST_EVENT64_T(event
), result
, WAITQ_ALL_PRIORITIES
);
1521 * Wakeup a specified thread if and only if it's waiting for this event
1524 thread_wakeup_thread(
1528 if (__improbable(event
== NO_EVENT
)) {
1529 panic("%s() called with NO_EVENT", __func__
);
1532 if (__improbable(thread
== THREAD_NULL
)) {
1533 panic("%s() called with THREAD_NULL", __func__
);
1536 struct waitq
*wq
= global_eventq(event
);
1538 return waitq_wakeup64_thread(wq
, CAST_EVENT64_T(event
), thread
, THREAD_AWAKENED
);
1542 * Wakeup a thread waiting on an event and promote it to a priority.
1544 * Requires woken thread to un-promote itself when done.
1547 thread_wakeup_one_with_pri(
1551 if (__improbable(event
== NO_EVENT
)) {
1552 panic("%s() called with NO_EVENT", __func__
);
1555 struct waitq
*wq
= global_eventq(event
);
1557 return waitq_wakeup64_one(wq
, CAST_EVENT64_T(event
), THREAD_AWAKENED
, priority
);
1561 * Wakeup a thread waiting on an event,
1562 * promote it to a priority,
1563 * and return a reference to the woken thread.
1565 * Requires woken thread to un-promote itself when done.
1568 thread_wakeup_identify(event_t event
,
1571 if (__improbable(event
== NO_EVENT
)) {
1572 panic("%s() called with NO_EVENT", __func__
);
1575 struct waitq
*wq
= global_eventq(event
);
1577 return waitq_wakeup64_identify(wq
, CAST_EVENT64_T(event
), THREAD_AWAKENED
, priority
);
1583 * Force the current thread to execute on the specified processor.
1584 * Takes effect after the next thread_block().
1586 * Returns the previous binding. PROCESSOR_NULL means
1589 * XXX - DO NOT export this to users - XXX
1593 processor_t processor
)
1595 thread_t self
= current_thread();
1602 prev
= thread_bind_internal(self
, processor
);
1604 thread_unlock(self
);
1611 * thread_bind_internal:
1613 * If the specified thread is not the current thread, and it is currently
1614 * running on another CPU, a remote AST must be sent to that CPU to cause
1615 * the thread to migrate to its bound processor. Otherwise, the migration
1616 * will occur at the next quantum expiration or blocking point.
1618 * When the thread is the current thread, and explicit thread_block() should
1619 * be used to force the current processor to context switch away and
1620 * let the thread migrate to the bound processor.
1622 * Thread must be locked, and at splsched.
1626 thread_bind_internal(
1628 processor_t processor
)
1632 /* <rdar://problem/15102234> */
1633 assert(thread
->sched_pri
< BASEPRI_RTQUEUES
);
1634 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1635 assert(thread
->runq
== PROCESSOR_NULL
);
1637 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_THREAD_BIND
), thread_tid(thread
), processor
? (uintptr_t)processor
->cpu_id
: (uintptr_t)-1, 0, 0, 0);
1639 prev
= thread
->bound_processor
;
1640 thread
->bound_processor
= processor
;
1646 * thread_vm_bind_group_add:
1648 * The "VM bind group" is a special mechanism to mark a collection
1649 * of threads from the VM subsystem that, in general, should be scheduled
1650 * with only one CPU of parallelism. To accomplish this, we initially
1651 * bind all the threads to the master processor, which has the effect
1652 * that only one of the threads in the group can execute at once, including
1653 * preempting threads in the group that are a lower priority. Future
1654 * mechanisms may use more dynamic mechanisms to prevent the collection
1655 * of VM threads from using more CPU time than desired.
1657 * The current implementation can result in priority inversions where
1658 * compute-bound priority 95 or realtime threads that happen to have
1659 * landed on the master processor prevent the VM threads from running.
1660 * When this situation is detected, we unbind the threads for one
1661 * scheduler tick to allow the scheduler to run the threads an
1662 * additional CPUs, before restoring the binding (assuming high latency
1663 * is no longer a problem).
1667 * The current max is provisioned for:
1668 * vm_compressor_swap_trigger_thread (92)
1669 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1670 * vm_pageout_continue (92)
1671 * memorystatus_thread (95)
1673 #define MAX_VM_BIND_GROUP_COUNT (5)
1674 decl_simple_lock_data(static, sched_vm_group_list_lock
);
1675 static thread_t sched_vm_group_thread_list
[MAX_VM_BIND_GROUP_COUNT
];
1676 static int sched_vm_group_thread_count
;
1677 static boolean_t sched_vm_group_temporarily_unbound
= FALSE
;
1680 thread_vm_bind_group_add(void)
1682 thread_t self
= current_thread();
1684 thread_reference_internal(self
);
1685 self
->options
|= TH_OPT_SCHED_VM_GROUP
;
1687 simple_lock(&sched_vm_group_list_lock
, LCK_GRP_NULL
);
1688 assert(sched_vm_group_thread_count
< MAX_VM_BIND_GROUP_COUNT
);
1689 sched_vm_group_thread_list
[sched_vm_group_thread_count
++] = self
;
1690 simple_unlock(&sched_vm_group_list_lock
);
1692 thread_bind(master_processor
);
1694 /* Switch to bound processor if not already there */
1695 thread_block(THREAD_CONTINUE_NULL
);
1699 sched_vm_group_maintenance(void)
1701 uint64_t ctime
= mach_absolute_time();
1702 uint64_t longtime
= ctime
- sched_tick_interval
;
1705 boolean_t high_latency_observed
= FALSE
;
1706 boolean_t runnable_and_not_on_runq_observed
= FALSE
;
1707 boolean_t bind_target_changed
= FALSE
;
1708 processor_t bind_target
= PROCESSOR_NULL
;
1710 /* Make sure nobody attempts to add new threads while we are enumerating them */
1711 simple_lock(&sched_vm_group_list_lock
, LCK_GRP_NULL
);
1715 for (i
= 0; i
< sched_vm_group_thread_count
; i
++) {
1716 thread_t thread
= sched_vm_group_thread_list
[i
];
1717 assert(thread
!= THREAD_NULL
);
1718 thread_lock(thread
);
1719 if ((thread
->state
& (TH_RUN
| TH_WAIT
)) == TH_RUN
) {
1720 if (thread
->runq
!= PROCESSOR_NULL
&& thread
->last_made_runnable_time
< longtime
) {
1721 high_latency_observed
= TRUE
;
1722 } else if (thread
->runq
== PROCESSOR_NULL
) {
1723 /* There are some cases where a thread be transitiong that also fall into this case */
1724 runnable_and_not_on_runq_observed
= TRUE
;
1727 thread_unlock(thread
);
1729 if (high_latency_observed
&& runnable_and_not_on_runq_observed
) {
1730 /* All the things we are looking for are true, stop looking */
1737 if (sched_vm_group_temporarily_unbound
) {
1738 /* If we turned off binding, make sure everything is OK before rebinding */
1739 if (!high_latency_observed
) {
1741 bind_target_changed
= TRUE
;
1742 bind_target
= master_processor
;
1743 sched_vm_group_temporarily_unbound
= FALSE
; /* might be reset to TRUE if change cannot be completed */
1747 * Check if we're in a bad state, which is defined by high
1748 * latency with no core currently executing a thread. If a
1749 * single thread is making progress on a CPU, that means the
1750 * binding concept to reduce parallelism is working as
1753 if (high_latency_observed
&& !runnable_and_not_on_runq_observed
) {
1755 bind_target_changed
= TRUE
;
1756 bind_target
= PROCESSOR_NULL
;
1757 sched_vm_group_temporarily_unbound
= TRUE
;
1761 if (bind_target_changed
) {
1763 for (i
= 0; i
< sched_vm_group_thread_count
; i
++) {
1764 thread_t thread
= sched_vm_group_thread_list
[i
];
1766 assert(thread
!= THREAD_NULL
);
1768 thread_lock(thread
);
1769 removed
= thread_run_queue_remove(thread
);
1770 if (removed
|| ((thread
->state
& (TH_RUN
| TH_WAIT
)) == TH_WAIT
)) {
1771 thread_bind_internal(thread
, bind_target
);
1774 * Thread was in the middle of being context-switched-to,
1775 * or was in the process of blocking. To avoid switching the bind
1776 * state out mid-flight, defer the change if possible.
1778 if (bind_target
== PROCESSOR_NULL
) {
1779 thread_bind_internal(thread
, bind_target
);
1781 sched_vm_group_temporarily_unbound
= TRUE
; /* next pass will try again */
1786 thread_run_queue_reinsert(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1788 thread_unlock(thread
);
1793 simple_unlock(&sched_vm_group_list_lock
);
1796 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1797 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1798 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1799 * IPI thrash if this core does not remain idle following the load balancing ASTs
1800 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1801 * followed by a wakeup shortly thereafter.
1804 #if (DEVELOPMENT || DEBUG)
1805 int sched_smt_balance
= 1;
1809 /* Invoked with pset locked, returns with pset unlocked */
1811 sched_SMT_balance(processor_t cprocessor
, processor_set_t cpset
)
1813 processor_t ast_processor
= NULL
;
1815 #if (DEVELOPMENT || DEBUG)
1816 if (__improbable(sched_smt_balance
== 0)) {
1817 goto smt_balance_exit
;
1821 assert(cprocessor
== current_processor());
1822 if (cprocessor
->is_SMT
== FALSE
) {
1823 goto smt_balance_exit
;
1826 processor_t sib_processor
= cprocessor
->processor_secondary
? cprocessor
->processor_secondary
: cprocessor
->processor_primary
;
1828 /* Determine if both this processor and its sibling are idle,
1829 * indicating an SMT rebalancing opportunity.
1831 if (sib_processor
->state
!= PROCESSOR_IDLE
) {
1832 goto smt_balance_exit
;
1835 processor_t sprocessor
;
1837 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
1838 uint64_t running_secondary_map
= (cpset
->cpu_state_map
[PROCESSOR_RUNNING
] &
1839 ~cpset
->primary_map
);
1840 for (int cpuid
= lsb_first(running_secondary_map
); cpuid
>= 0; cpuid
= lsb_next(running_secondary_map
, cpuid
)) {
1841 sprocessor
= processor_array
[cpuid
];
1842 if ((sprocessor
->processor_primary
->state
== PROCESSOR_RUNNING
) &&
1843 (sprocessor
->current_pri
< BASEPRI_RTQUEUES
)) {
1844 ipi_type
= sched_ipi_action(sprocessor
, NULL
, false, SCHED_IPI_EVENT_SMT_REBAL
);
1845 if (ipi_type
!= SCHED_IPI_NONE
) {
1846 assert(sprocessor
!= cprocessor
);
1847 ast_processor
= sprocessor
;
1856 if (ast_processor
) {
1857 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_SMT_BALANCE
), ast_processor
->cpu_id
, ast_processor
->state
, ast_processor
->processor_primary
->state
, 0, 0);
1858 sched_ipi_perform(ast_processor
, ipi_type
);
1862 /* Invoked with pset locked, returns with pset unlocked */
1864 sched_SMT_balance(__unused processor_t cprocessor
, processor_set_t cpset
)
1868 #endif /* __SMP__ */
1871 * Called with pset locked, on a processor that is committing to run a new thread
1872 * Will transition an idle or dispatching processor to running as it picks up
1873 * the first new thread from the idle thread.
1876 pset_commit_processor_to_new_thread(processor_set_t pset
, processor_t processor
, thread_t new_thread
)
1878 if (processor
->state
== PROCESSOR_DISPATCHING
|| processor
->state
== PROCESSOR_IDLE
) {
1879 assert(current_thread() == processor
->idle_thread
);
1882 * Dispatching processor is now committed to running new_thread,
1883 * so change its state to PROCESSOR_RUNNING.
1885 pset_update_processor_state(pset
, processor
, PROCESSOR_RUNNING
);
1887 assert((processor
->state
== PROCESSOR_RUNNING
) || (processor
->state
== PROCESSOR_SHUTDOWN
));
1890 processor_state_update_from_thread(processor
, new_thread
);
1893 static processor_t
choose_processor_for_realtime_thread(processor_set_t pset
);
1894 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset
);
1895 static bool these_processors_are_running_realtime_threads(processor_set_t pset
, uint64_t these_map
);
1896 static bool sched_ok_to_run_realtime_thread(processor_set_t pset
, processor_t processor
);
1897 int sched_allow_rt_smt
= 1;
1898 int sched_avoid_cpu0
= 1;
1903 * Select a new thread for the current processor to execute.
1905 * May select the current thread, which must be locked.
1908 thread_select(thread_t thread
,
1909 processor_t processor
,
1912 processor_set_t pset
= processor
->processor_set
;
1913 thread_t new_thread
= THREAD_NULL
;
1915 assert(processor
== current_processor());
1916 assert((thread
->state
& (TH_RUN
| TH_TERMINATE2
)) == TH_RUN
);
1920 * Update the priority.
1922 if (SCHED(can_update_priority
)(thread
)) {
1923 SCHED(update_priority
)(thread
);
1928 processor_state_update_from_thread(processor
, thread
);
1931 /* Acknowledge any pending IPIs here with pset lock held */
1932 bit_clear(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
1933 bit_clear(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
1935 #if defined(CONFIG_SCHED_DEFERRED_AST)
1936 bit_clear(pset
->pending_deferred_AST_cpu_mask
, processor
->cpu_id
);
1939 bool secondary_can_only_run_realtime_thread
= false;
1941 assert(processor
->state
!= PROCESSOR_OFF_LINE
);
1943 if (!processor
->is_recommended
) {
1945 * The performance controller has provided a hint to not dispatch more threads,
1946 * unless they are bound to us (and thus we are the only option
1948 if (!SCHED(processor_bound_count
)(processor
)) {
1951 } else if (processor
->processor_primary
!= processor
) {
1953 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1954 * we should look for work only under the same conditions that choose_processor()
1955 * would have assigned work, which is when all primary processors have been assigned work.
1957 * An exception is that bound threads are dispatched to a processor without going through
1958 * choose_processor(), so in those cases we should continue trying to dequeue work.
1960 if (!SCHED(processor_bound_count
)(processor
)) {
1961 if ((pset
->recommended_bitmask
& pset
->primary_map
& pset
->cpu_state_map
[PROCESSOR_IDLE
]) != 0) {
1966 * TODO: What if a secondary core beat an idle primary to waking up from an IPI?
1967 * Should it dequeue immediately, or spin waiting for the primary to wake up?
1970 /* There are no idle primaries */
1972 if (processor
->processor_primary
->current_pri
>= BASEPRI_RTQUEUES
) {
1973 bool secondary_can_run_realtime_thread
= sched_allow_rt_smt
&& rt_runq_count(pset
) && all_available_primaries_are_running_realtime_threads(pset
);
1974 if (!secondary_can_run_realtime_thread
) {
1977 secondary_can_only_run_realtime_thread
= true;
1983 * Test to see if the current thread should continue
1984 * to run on this processor. Must not be attempting to wait, and not
1985 * bound to a different processor, nor be in the wrong
1986 * processor set, nor be forced to context switch by TH_SUSP.
1988 * Note that there are never any RT threads in the regular runqueue.
1990 * This code is very insanely tricky.
1993 /* i.e. not waiting, not TH_SUSP'ed */
1994 bool still_running
= ((thread
->state
& (TH_TERMINATE
| TH_IDLE
| TH_WAIT
| TH_RUN
| TH_SUSP
)) == TH_RUN
);
1997 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
1998 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
1999 * <rdar://problem/47907700>
2001 * A yielding thread shouldn't be forced to context switch.
2004 bool is_yielding
= (*reason
& AST_YIELD
) == AST_YIELD
;
2006 bool needs_smt_rebalance
= !is_yielding
&& thread
->sched_pri
< BASEPRI_RTQUEUES
&& processor
->processor_primary
!= processor
;
2008 bool affinity_mismatch
= thread
->affinity_set
!= AFFINITY_SET_NULL
&& thread
->affinity_set
->aset_pset
!= pset
;
2010 bool bound_elsewhere
= thread
->bound_processor
!= PROCESSOR_NULL
&& thread
->bound_processor
!= processor
;
2012 bool avoid_processor
= !is_yielding
&& SCHED(avoid_processor_enabled
) && SCHED(thread_avoid_processor
)(processor
, thread
);
2014 if (still_running
&& !needs_smt_rebalance
&& !affinity_mismatch
&& !bound_elsewhere
&& !avoid_processor
) {
2016 * This thread is eligible to keep running on this processor.
2018 * RT threads with un-expired quantum stay on processor,
2019 * unless there's a valid RT thread with an earlier deadline.
2021 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
&& processor
->first_timeslice
) {
2022 if (rt_runq_count(pset
) > 0) {
2025 if (rt_runq_count(pset
) > 0) {
2026 thread_t next_rt
= qe_queue_first(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
2028 if (next_rt
->realtime
.deadline
< processor
->deadline
&&
2029 (next_rt
->bound_processor
== PROCESSOR_NULL
||
2030 next_rt
->bound_processor
== processor
)) {
2031 /* The next RT thread is better, so pick it off the runqueue. */
2032 goto pick_new_rt_thread
;
2036 rt_lock_unlock(pset
);
2039 /* This is still the best RT thread to run. */
2040 processor
->deadline
= thread
->realtime
.deadline
;
2042 sched_update_pset_load_average(pset
);
2044 processor_t next_rt_processor
= PROCESSOR_NULL
;
2045 sched_ipi_type_t next_rt_ipi_type
= SCHED_IPI_NONE
;
2047 if (rt_runq_count(pset
) > 0) {
2048 next_rt_processor
= choose_processor_for_realtime_thread(pset
);
2049 if (next_rt_processor
) {
2050 if (next_rt_processor
->state
== PROCESSOR_IDLE
) {
2051 pset_update_processor_state(pset
, next_rt_processor
, PROCESSOR_DISPATCHING
);
2053 next_rt_ipi_type
= sched_ipi_action(next_rt_processor
, NULL
, false, SCHED_IPI_EVENT_PREEMPT
);
2058 if (next_rt_processor
) {
2059 sched_ipi_perform(next_rt_processor
, next_rt_ipi_type
);
2065 if ((rt_runq_count(pset
) == 0) &&
2066 SCHED(processor_queue_has_priority
)(processor
, thread
->sched_pri
, TRUE
) == FALSE
) {
2067 /* This thread is still the highest priority runnable (non-idle) thread */
2068 processor
->deadline
= UINT64_MAX
;
2070 sched_update_pset_load_average(pset
);
2077 * This processor must context switch.
2078 * If it's due to a rebalance, we should aggressively find this thread a new home.
2080 if (needs_smt_rebalance
|| affinity_mismatch
|| bound_elsewhere
|| avoid_processor
) {
2081 *reason
|= AST_REBALANCE
;
2085 /* OK, so we're not going to run the current thread. Look at the RT queue. */
2086 bool ok_to_run_realtime_thread
= sched_ok_to_run_realtime_thread(pset
, processor
);
2087 if ((rt_runq_count(pset
) > 0) && ok_to_run_realtime_thread
) {
2090 if ((rt_runq_count(pset
) > 0) && ok_to_run_realtime_thread
) {
2091 thread_t next_rt
= qe_queue_first(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
2093 if (__probable((next_rt
->bound_processor
== PROCESSOR_NULL
||
2094 (next_rt
->bound_processor
== processor
)))) {
2096 new_thread
= qe_dequeue_head(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
2098 new_thread
->runq
= PROCESSOR_NULL
;
2099 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
2100 rt_runq_count_decr(pset
);
2102 processor
->deadline
= new_thread
->realtime
.deadline
;
2104 pset_commit_processor_to_new_thread(pset
, processor
, new_thread
);
2106 rt_lock_unlock(pset
);
2107 sched_update_pset_load_average(pset
);
2109 processor_t ast_processor
= PROCESSOR_NULL
;
2110 processor_t next_rt_processor
= PROCESSOR_NULL
;
2111 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
2112 sched_ipi_type_t next_rt_ipi_type
= SCHED_IPI_NONE
;
2114 if (processor
->processor_secondary
!= NULL
) {
2115 processor_t sprocessor
= processor
->processor_secondary
;
2116 if ((sprocessor
->state
== PROCESSOR_RUNNING
) || (sprocessor
->state
== PROCESSOR_DISPATCHING
)) {
2117 ipi_type
= sched_ipi_action(sprocessor
, NULL
, false, SCHED_IPI_EVENT_SMT_REBAL
);
2118 ast_processor
= sprocessor
;
2121 if (rt_runq_count(pset
) > 0) {
2122 next_rt_processor
= choose_processor_for_realtime_thread(pset
);
2123 if (next_rt_processor
) {
2124 if (next_rt_processor
->state
== PROCESSOR_IDLE
) {
2125 pset_update_processor_state(pset
, next_rt_processor
, PROCESSOR_DISPATCHING
);
2127 next_rt_ipi_type
= sched_ipi_action(next_rt_processor
, NULL
, false, SCHED_IPI_EVENT_PREEMPT
);
2132 if (ast_processor
) {
2133 sched_ipi_perform(ast_processor
, ipi_type
);
2136 if (next_rt_processor
) {
2137 sched_ipi_perform(next_rt_processor
, next_rt_ipi_type
);
2144 rt_lock_unlock(pset
);
2146 if (secondary_can_only_run_realtime_thread
) {
2150 processor
->deadline
= UINT64_MAX
;
2152 /* No RT threads, so let's look at the regular threads. */
2153 if ((new_thread
= SCHED(choose_thread
)(processor
, MINPRI
, *reason
)) != THREAD_NULL
) {
2154 sched_update_pset_load_average(pset
);
2156 pset_commit_processor_to_new_thread(pset
, processor
, new_thread
);
2158 processor_t ast_processor
= PROCESSOR_NULL
;
2159 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
2161 processor_t sprocessor
= processor
->processor_secondary
;
2162 if ((sprocessor
!= NULL
) && (sprocessor
->state
== PROCESSOR_RUNNING
)) {
2163 if (thread_no_smt(new_thread
)) {
2164 ipi_type
= sched_ipi_action(sprocessor
, NULL
, false, SCHED_IPI_EVENT_SMT_REBAL
);
2165 ast_processor
= sprocessor
;
2170 if (ast_processor
) {
2171 sched_ipi_perform(ast_processor
, ipi_type
);
2176 if (processor
->must_idle
) {
2177 processor
->must_idle
= false;
2182 if (SCHED(steal_thread_enabled
)(pset
)) {
2184 * No runnable threads, attempt to steal
2185 * from other processors. Returns with pset lock dropped.
2188 if ((new_thread
= SCHED(steal_thread
)(pset
)) != THREAD_NULL
) {
2190 * Avoid taking the pset_lock unless it is necessary to change state.
2191 * It's safe to read processor->state here, as only the current processor can change state
2192 * from this point (interrupts are disabled and this processor is committed to run new_thread).
2194 if (processor
->state
== PROCESSOR_DISPATCHING
|| processor
->state
== PROCESSOR_IDLE
) {
2196 pset_commit_processor_to_new_thread(pset
, processor
, new_thread
);
2199 assert((processor
->state
== PROCESSOR_RUNNING
) || (processor
->state
== PROCESSOR_SHUTDOWN
));
2200 processor_state_update_from_thread(processor
, new_thread
);
2207 * If other threads have appeared, shortcut
2210 if (!SCHED(processor_queue_empty
)(processor
) || (ok_to_run_realtime_thread
&& (rt_runq_count(pset
) > 0))) {
2216 /* Someone selected this processor while we had dropped the lock */
2217 if (bit_test(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
)) {
2225 * Nothing is runnable, so set this processor idle if it
2228 if ((processor
->state
== PROCESSOR_RUNNING
) || (processor
->state
== PROCESSOR_DISPATCHING
)) {
2229 pset_update_processor_state(pset
, processor
, PROCESSOR_IDLE
);
2230 processor_state_update_idle(processor
);
2234 /* Invoked with pset locked, returns with pset unlocked */
2235 SCHED(processor_balance
)(processor
, pset
);
2240 #if CONFIG_SCHED_IDLE_IN_PLACE
2242 * Choose idle thread if fast idle is not possible.
2244 if (processor
->processor_primary
!= processor
) {
2245 return processor
->idle_thread
;
2248 if ((thread
->state
& (TH_IDLE
| TH_TERMINATE
| TH_SUSP
)) || !(thread
->state
& TH_WAIT
) || thread
->wake_active
|| thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
2249 return processor
->idle_thread
;
2253 * Perform idling activities directly without a
2254 * context switch. Return dispatched thread,
2255 * else check again for a runnable thread.
2257 new_thread
= thread_select_idle(thread
, processor
);
2259 #else /* !CONFIG_SCHED_IDLE_IN_PLACE */
2262 * Do a full context switch to idle so that the current
2263 * thread can start running on another processor without
2264 * waiting for the fast-idled processor to wake up.
2266 new_thread
= processor
->idle_thread
;
2268 #endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
2269 } while (new_thread
== THREAD_NULL
);
2274 #if CONFIG_SCHED_IDLE_IN_PLACE
2276 * thread_select_idle:
2278 * Idle the processor using the current thread context.
2280 * Called with thread locked, then dropped and relocked.
2285 processor_t processor
)
2287 thread_t new_thread
;
2288 uint64_t arg1
, arg2
;
2291 sched_run_decr(thread
);
2293 thread
->state
|= TH_IDLE
;
2294 processor_state_update_idle(procssor
);
2296 /* Reload precise timing global policy to thread-local policy */
2297 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
2299 thread_unlock(thread
);
2302 * Switch execution timing to processor idle thread.
2304 processor
->last_dispatch
= mach_absolute_time();
2306 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2307 commpage_update_mach_approximate_time(processor
->last_dispatch
);
2310 thread
->last_run_time
= processor
->last_dispatch
;
2311 processor_timer_switch_thread(processor
->last_dispatch
,
2312 &processor
->idle_thread
->system_timer
);
2313 PROCESSOR_DATA(processor
, kernel_timer
) = &processor
->idle_thread
->system_timer
;
2317 * Cancel the quantum timer while idling.
2319 timer_call_quantum_timer_cancel(&processor
->quantum_timer
);
2320 processor
->first_timeslice
= FALSE
;
2322 if (thread
->sched_call
) {
2323 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
2326 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0, 0, NULL
);
2329 * Enable interrupts and perform idling activities. No
2330 * preemption due to TH_IDLE being set.
2332 spllo(); new_thread
= processor_idle(thread
, processor
);
2335 * Return at splsched.
2337 if (thread
->sched_call
) {
2338 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
2341 thread_lock(thread
);
2344 * If awakened, switch to thread timer and start a new quantum.
2345 * Otherwise skip; we will context switch to another thread or return here.
2347 if (!(thread
->state
& TH_WAIT
)) {
2348 uint64_t time_now
= processor
->last_dispatch
= mach_absolute_time();
2349 processor_timer_switch_thread(time_now
, &thread
->system_timer
);
2350 timer_update(&thread
->runnable_timer
, time_now
);
2351 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2352 thread_quantum_init(thread
);
2353 processor
->quantum_end
= time_now
+ thread
->quantum_remaining
;
2354 timer_call_quantum_timer_enter(&processor
->quantum_timer
,
2355 thread
, processor
->quantum_end
, time_now
);
2356 processor
->first_timeslice
= TRUE
;
2358 thread
->computation_epoch
= time_now
;
2361 thread
->state
&= ~TH_IDLE
;
2363 urgency
= thread_get_urgency(thread
, &arg1
, &arg2
);
2365 thread_tell_urgency(urgency
, arg1
, arg2
, 0, new_thread
);
2367 sched_run_incr(thread
);
2371 #endif /* CONFIG_SCHED_IDLE_IN_PLACE */
2376 * Called at splsched with neither thread locked.
2378 * Perform a context switch and start executing the new thread.
2380 * Returns FALSE when the context switch didn't happen.
2381 * The reference to the new thread is still consumed.
2383 * "self" is what is currently running on the processor,
2384 * "thread" is the new thread to context switch to
2385 * (which may be the same thread in some cases)
2393 if (__improbable(get_preemption_level() != 0)) {
2394 int pl
= get_preemption_level();
2395 panic("thread_invoke: preemption_level %d, possible cause: %s",
2396 pl
, (pl
< 0 ? "unlocking an unlocked mutex or spinlock" :
2397 "blocking while holding a spinlock, or within interrupt context"));
2400 thread_continue_t continuation
= self
->continuation
;
2401 void *parameter
= self
->parameter
;
2402 processor_t processor
;
2404 uint64_t ctime
= mach_absolute_time();
2406 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2407 commpage_update_mach_approximate_time(ctime
);
2410 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2411 if ((thread
->state
& TH_IDLE
) == 0) {
2412 sched_timeshare_consider_maintenance(ctime
);
2417 mt_sched_update(self
);
2418 #endif /* MONOTONIC */
2420 assert_thread_magic(self
);
2421 assert(self
== current_thread());
2422 assert(self
->runq
== PROCESSOR_NULL
);
2423 assert((self
->state
& (TH_RUN
| TH_TERMINATE2
)) == TH_RUN
);
2425 thread_lock(thread
);
2427 assert_thread_magic(thread
);
2428 assert((thread
->state
& (TH_RUN
| TH_WAIT
| TH_UNINT
| TH_TERMINATE
| TH_TERMINATE2
)) == TH_RUN
);
2429 assert(thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== current_processor());
2430 assert(thread
->runq
== PROCESSOR_NULL
);
2432 /* Reload precise timing global policy to thread-local policy */
2433 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
2435 /* Update SFI class based on other factors */
2436 thread
->sfi_class
= sfi_thread_classify(thread
);
2438 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2439 thread
->same_pri_latency
= ctime
- thread
->last_basepri_change_time
;
2441 * In case a base_pri update happened between the timestamp and
2442 * taking the thread lock
2444 if (ctime
<= thread
->last_basepri_change_time
) {
2445 thread
->same_pri_latency
= ctime
- thread
->last_made_runnable_time
;
2448 /* Allow realtime threads to hang onto a stack. */
2449 if ((self
->sched_mode
== TH_MODE_REALTIME
) && !self
->reserved_stack
) {
2450 self
->reserved_stack
= self
->kernel_stack
;
2453 /* Prepare for spin debugging */
2454 #if INTERRUPT_MASKED_DEBUG
2455 ml_spin_debug_clear(thread
);
2458 if (continuation
!= NULL
) {
2459 if (!thread
->kernel_stack
) {
2461 * If we are using a privileged stack,
2462 * check to see whether we can exchange it with
2463 * that of the other thread.
2465 if (self
->kernel_stack
== self
->reserved_stack
&& !thread
->reserved_stack
) {
2470 * Context switch by performing a stack handoff.
2472 continuation
= thread
->continuation
;
2473 parameter
= thread
->parameter
;
2475 processor
= current_processor();
2476 processor
->active_thread
= thread
;
2477 processor_state_update_from_thread(processor
, thread
);
2479 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2480 if (thread
->last_processor
->processor_set
!= processor
->processor_set
) {
2481 thread
->ps_switch
++;
2485 thread
->last_processor
= processor
;
2487 ast_context(thread
);
2489 thread_unlock(thread
);
2491 self
->reason
= reason
;
2493 processor
->last_dispatch
= ctime
;
2494 self
->last_run_time
= ctime
;
2495 processor_timer_switch_thread(ctime
, &thread
->system_timer
);
2496 timer_update(&thread
->runnable_timer
, ctime
);
2497 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2500 * Since non-precise user/kernel time doesn't update the state timer
2501 * during privilege transitions, synthesize an event now.
2503 if (!thread
->precise_user_kernel_time
) {
2504 timer_update(PROCESSOR_DATA(processor
, current_state
), ctime
);
2507 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2508 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_HANDOFF
) | DBG_FUNC_NONE
,
2509 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2511 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= PROCESSOR_NULL
)) {
2512 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
) | DBG_FUNC_NONE
,
2513 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2516 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2518 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2521 kperf_off_cpu(self
);
2524 TLOG(1, "thread_invoke: calling stack_handoff\n");
2525 stack_handoff(self
, thread
);
2527 /* 'self' is now off core */
2528 assert(thread
== current_thread_volatile());
2530 DTRACE_SCHED(on__cpu
);
2533 kperf_on_cpu(thread
, continuation
, NULL
);
2536 thread_dispatch(self
, thread
);
2539 /* Old thread's stack has been moved to the new thread, so explicitly
2541 kasan_unpoison_stack(thread
->kernel_stack
, kernel_stack_size
);
2544 thread
->continuation
= thread
->parameter
= NULL
;
2546 counter(c_thread_invoke_hits
++);
2548 assert(continuation
);
2549 call_continuation(continuation
, parameter
, thread
->wait_result
, TRUE
);
2551 } else if (thread
== self
) {
2552 /* same thread but with continuation */
2554 counter(++c_thread_invoke_same
);
2556 thread_unlock(self
);
2559 kperf_on_cpu(thread
, continuation
, NULL
);
2562 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2563 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED
) | DBG_FUNC_NONE
,
2564 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2567 /* stack handoff to self - no thread_dispatch(), so clear the stack
2568 * and free the fakestack directly */
2569 kasan_fakestack_drop(self
);
2570 kasan_fakestack_gc(self
);
2571 kasan_unpoison_stack(self
->kernel_stack
, kernel_stack_size
);
2574 self
->continuation
= self
->parameter
= NULL
;
2576 call_continuation(continuation
, parameter
, self
->wait_result
, TRUE
);
2581 * Check that the other thread has a stack
2583 if (!thread
->kernel_stack
) {
2585 if (!stack_alloc_try(thread
)) {
2586 counter(c_thread_invoke_misses
++);
2587 thread_unlock(thread
);
2588 thread_stack_enqueue(thread
);
2591 } else if (thread
== self
) {
2593 counter(++c_thread_invoke_same
);
2594 thread_unlock(self
);
2596 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2597 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED
) | DBG_FUNC_NONE
,
2598 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2605 * Context switch by full context save.
2607 processor
= current_processor();
2608 processor
->active_thread
= thread
;
2609 processor_state_update_from_thread(processor
, thread
);
2611 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2612 if (thread
->last_processor
->processor_set
!= processor
->processor_set
) {
2613 thread
->ps_switch
++;
2617 thread
->last_processor
= processor
;
2619 ast_context(thread
);
2621 thread_unlock(thread
);
2623 counter(c_thread_invoke_csw
++);
2625 self
->reason
= reason
;
2627 processor
->last_dispatch
= ctime
;
2628 self
->last_run_time
= ctime
;
2629 processor_timer_switch_thread(ctime
, &thread
->system_timer
);
2630 timer_update(&thread
->runnable_timer
, ctime
);
2631 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2634 * Since non-precise user/kernel time doesn't update the state timer
2635 * during privilege transitions, synthesize an event now.
2637 if (!thread
->precise_user_kernel_time
) {
2638 timer_update(PROCESSOR_DATA(processor
, current_state
), ctime
);
2641 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2642 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED
) | DBG_FUNC_NONE
,
2643 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2645 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= NULL
)) {
2646 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
) | DBG_FUNC_NONE
,
2647 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2650 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2652 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2655 kperf_off_cpu(self
);
2659 * This is where we actually switch register context,
2660 * and address space if required. We will next run
2661 * as a result of a subsequent context switch.
2663 * Once registers are switched and the processor is running "thread",
2664 * the stack variables and non-volatile registers will contain whatever
2665 * was there the last time that thread blocked. No local variables should
2666 * be used after this point, except for the special case of "thread", which
2667 * the platform layer returns as the previous thread running on the processor
2668 * via the function call ABI as a return register, and "self", which may have
2669 * been stored on the stack or a non-volatile register, but a stale idea of
2670 * what was on the CPU is newly-accurate because that thread is again
2671 * running on the CPU.
2673 assert(continuation
== self
->continuation
);
2674 thread
= machine_switch_context(self
, continuation
, thread
);
2675 assert(self
== current_thread_volatile());
2676 TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self
, continuation
, thread
);
2678 DTRACE_SCHED(on__cpu
);
2681 kperf_on_cpu(self
, NULL
, __builtin_frame_address(0));
2685 * We have been resumed and are set to run.
2687 thread_dispatch(thread
, self
);
2690 self
->continuation
= self
->parameter
= NULL
;
2692 call_continuation(continuation
, parameter
, self
->wait_result
, TRUE
);
2699 #if defined(CONFIG_SCHED_DEFERRED_AST)
2701 * pset_cancel_deferred_dispatch:
2703 * Cancels all ASTs that we can cancel for the given processor set
2704 * if the current processor is running the last runnable thread in the
2707 * This function assumes the current thread is runnable. This must
2708 * be called with the pset unlocked.
2711 pset_cancel_deferred_dispatch(
2712 processor_set_t pset
,
2713 processor_t processor
)
2715 processor_t active_processor
= NULL
;
2716 uint32_t sampled_sched_run_count
;
2719 sampled_sched_run_count
= (volatile uint32_t) sched_run_buckets
[TH_BUCKET_RUN
];
2722 * If we have emptied the run queue, and our current thread is runnable, we
2723 * should tell any processors that are still DISPATCHING that they will
2724 * probably not have any work to do. In the event that there are no
2725 * pending signals that we can cancel, this is also uninteresting.
2727 * In the unlikely event that another thread becomes runnable while we are
2728 * doing this (sched_run_count is atomically updated, not guarded), the
2729 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2730 * in order to dispatch it to a processor in our pset. So, the other
2731 * codepath will wait while we squash all cancelable ASTs, get the pset
2732 * lock, and then dispatch the freshly runnable thread. So this should be
2733 * correct (we won't accidentally have a runnable thread that hasn't been
2734 * dispatched to an idle processor), if not ideal (we may be restarting the
2735 * dispatch process, which could have some overhead).
2738 if ((sampled_sched_run_count
== 1) && (pset
->pending_deferred_AST_cpu_mask
)) {
2739 uint64_t dispatching_map
= (pset
->cpu_state_map
[PROCESSOR_DISPATCHING
] &
2740 pset
->pending_deferred_AST_cpu_mask
&
2741 ~pset
->pending_AST_URGENT_cpu_mask
);
2742 for (int cpuid
= lsb_first(dispatching_map
); cpuid
>= 0; cpuid
= lsb_next(dispatching_map
, cpuid
)) {
2743 active_processor
= processor_array
[cpuid
];
2745 * If a processor is DISPATCHING, it could be because of
2746 * a cancelable signal.
2748 * IF the processor is not our
2749 * current processor (the current processor should not
2750 * be DISPATCHING, so this is a bit paranoid), AND there
2751 * is a cancelable signal pending on the processor, AND
2752 * there is no non-cancelable signal pending (as there is
2753 * no point trying to backtrack on bringing the processor
2754 * up if a signal we cannot cancel is outstanding), THEN
2755 * it should make sense to roll back the processor state
2756 * to the IDLE state.
2758 * If the racey nature of this approach (as the signal
2759 * will be arbitrated by hardware, and can fire as we
2760 * roll back state) results in the core responding
2761 * despite being pushed back to the IDLE state, it
2762 * should be no different than if the core took some
2763 * interrupt while IDLE.
2765 if (active_processor
!= processor
) {
2767 * Squash all of the processor state back to some
2768 * reasonable facsimile of PROCESSOR_IDLE.
2771 assert(active_processor
->next_thread
== THREAD_NULL
);
2772 processor_state_update_idle(active_processor
);
2773 active_processor
->deadline
= UINT64_MAX
;
2774 pset_update_processor_state(pset
, active_processor
, PROCESSOR_IDLE
);
2775 bit_clear(pset
->pending_deferred_AST_cpu_mask
, active_processor
->cpu_id
);
2776 machine_signal_idle_cancel(active_processor
);
2784 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2793 perfcontrol_event event
= (new->state
& TH_IDLE
) ? IDLE
: CONTEXT_SWITCH
;
2794 uint64_t same_pri_latency
= (new->state
& TH_IDLE
) ? 0 : new->same_pri_latency
;
2795 machine_switch_perfcontrol_context(event
, timestamp
, 0,
2796 same_pri_latency
, old
, new);
2803 * Handle threads at context switch. Re-dispatch other thread
2804 * if still running, otherwise update run state and perform
2805 * special actions. Update quantum for other thread and begin
2806 * the quantum for ourselves.
2808 * "thread" is the old thread that we have switched away from.
2809 * "self" is the new current thread that we have context switched to
2811 * Called at splsched.
2818 processor_t processor
= self
->last_processor
;
2820 assert(processor
== current_processor());
2821 assert(self
== current_thread_volatile());
2822 assert(thread
!= self
);
2824 if (thread
!= THREAD_NULL
) {
2826 * Do the perfcontrol callout for context switch.
2827 * The reason we do this here is:
2828 * - thread_dispatch() is called from various places that are not
2829 * the direct context switch path for eg. processor shutdown etc.
2830 * So adding the callout here covers all those cases.
2831 * - We want this callout as early as possible to be close
2832 * to the timestamp taken in thread_invoke()
2833 * - We want to avoid holding the thread lock while doing the
2835 * - We do not want to callout if "thread" is NULL.
2837 thread_csw_callout(thread
, self
, processor
->last_dispatch
);
2840 if (thread
->continuation
!= NULL
) {
2842 * Thread has a continuation and the normal stack is going away.
2843 * Unpoison the stack and mark all fakestack objects as unused.
2845 kasan_fakestack_drop(thread
);
2846 if (thread
->kernel_stack
) {
2847 kasan_unpoison_stack(thread
->kernel_stack
, kernel_stack_size
);
2852 * Free all unused fakestack objects.
2854 kasan_fakestack_gc(thread
);
2858 * If blocked at a continuation, discard
2861 if (thread
->continuation
!= NULL
&& thread
->kernel_stack
!= 0) {
2865 if (thread
->state
& TH_IDLE
) {
2866 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2867 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DISPATCH
) | DBG_FUNC_NONE
,
2868 (uintptr_t)thread_tid(thread
), 0, thread
->state
,
2869 sched_run_buckets
[TH_BUCKET_RUN
], 0);
2872 int64_t remainder
= 0;
2874 if (processor
->quantum_end
> processor
->last_dispatch
) {
2875 remainder
= processor
->quantum_end
-
2876 processor
->last_dispatch
;
2879 consumed
= thread
->quantum_remaining
- remainder
;
2881 if ((thread
->reason
& AST_LEDGER
) == 0) {
2883 * Bill CPU time to both the task and
2884 * the individual thread.
2886 ledger_credit_thread(thread
, thread
->t_ledger
,
2887 task_ledgers
.cpu_time
, consumed
);
2888 ledger_credit_thread(thread
, thread
->t_threadledger
,
2889 thread_ledgers
.cpu_time
, consumed
);
2890 if (thread
->t_bankledger
) {
2891 ledger_credit_thread(thread
, thread
->t_bankledger
,
2892 bank_ledgers
.cpu_time
,
2893 (consumed
- thread
->t_deduct_bank_ledger_time
));
2895 thread
->t_deduct_bank_ledger_time
= 0;
2899 thread_lock(thread
);
2902 * Apply a priority floor if the thread holds a kernel resource
2903 * Do this before checking starting_pri to avoid overpenalizing
2904 * repeated rwlock blockers.
2906 if (__improbable(thread
->rwlock_count
!= 0)) {
2907 lck_rw_set_promotion_locked(thread
);
2910 boolean_t keep_quantum
= processor
->first_timeslice
;
2913 * Treat a thread which has dropped priority since it got on core
2914 * as having expired its quantum.
2916 if (processor
->starting_pri
> thread
->sched_pri
) {
2917 keep_quantum
= FALSE
;
2920 /* Compute remainder of current quantum. */
2922 processor
->quantum_end
> processor
->last_dispatch
) {
2923 thread
->quantum_remaining
= (uint32_t)remainder
;
2925 thread
->quantum_remaining
= 0;
2928 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
2930 * Cancel the deadline if the thread has
2931 * consumed the entire quantum.
2933 if (thread
->quantum_remaining
== 0) {
2934 thread
->realtime
.deadline
= UINT64_MAX
;
2937 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2939 * For non-realtime threads treat a tiny
2940 * remaining quantum as an expired quantum
2941 * but include what's left next time.
2943 if (thread
->quantum_remaining
< min_std_quantum
) {
2944 thread
->reason
|= AST_QUANTUM
;
2945 thread
->quantum_remaining
+= SCHED(initial_quantum_size
)(thread
);
2947 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2951 * If we are doing a direct handoff then
2952 * take the remainder of the quantum.
2954 if ((thread
->reason
& (AST_HANDOFF
| AST_QUANTUM
)) == AST_HANDOFF
) {
2955 self
->quantum_remaining
= thread
->quantum_remaining
;
2956 thread
->reason
|= AST_QUANTUM
;
2957 thread
->quantum_remaining
= 0;
2959 #if defined(CONFIG_SCHED_MULTIQ)
2960 if (SCHED(sched_groups_enabled
) &&
2961 thread
->sched_group
== self
->sched_group
) {
2962 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2963 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_QUANTUM_HANDOFF
),
2964 self
->reason
, (uintptr_t)thread_tid(thread
),
2965 self
->quantum_remaining
, thread
->quantum_remaining
, 0);
2967 self
->quantum_remaining
= thread
->quantum_remaining
;
2968 thread
->quantum_remaining
= 0;
2969 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2971 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2974 thread
->computation_metered
+= (processor
->last_dispatch
- thread
->computation_epoch
);
2976 if (!(thread
->state
& TH_WAIT
)) {
2980 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= processor
->last_dispatch
;
2982 machine_thread_going_off_core(thread
, FALSE
, processor
->last_dispatch
, TRUE
);
2984 ast_t reason
= thread
->reason
;
2985 sched_options_t options
= SCHED_NONE
;
2987 if (reason
& AST_REBALANCE
) {
2988 options
|= SCHED_REBALANCE
;
2989 if (reason
& AST_QUANTUM
) {
2991 * Having gone to the trouble of forcing this thread off a less preferred core,
2992 * we should force the preferable core to reschedule immediately to give this
2993 * thread a chance to run instead of just sitting on the run queue where
2994 * it may just be stolen back by the idle core we just forced it off.
2995 * But only do this at the end of a quantum to prevent cascading effects.
2997 options
|= SCHED_PREEMPT
;
3001 if (reason
& AST_QUANTUM
) {
3002 options
|= SCHED_TAILQ
;
3003 } else if (reason
& AST_PREEMPT
) {
3004 options
|= SCHED_HEADQ
;
3006 options
|= (SCHED_PREEMPT
| SCHED_TAILQ
);
3009 thread_setrun(thread
, options
);
3011 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3012 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DISPATCH
) | DBG_FUNC_NONE
,
3013 (uintptr_t)thread_tid(thread
), thread
->reason
, thread
->state
,
3014 sched_run_buckets
[TH_BUCKET_RUN
], 0);
3016 if (thread
->wake_active
) {
3017 thread
->wake_active
= FALSE
;
3018 thread_unlock(thread
);
3020 thread_wakeup(&thread
->wake_active
);
3022 thread_unlock(thread
);
3025 wake_unlock(thread
);
3030 boolean_t should_terminate
= FALSE
;
3031 uint32_t new_run_count
;
3032 int thread_state
= thread
->state
;
3034 /* Only the first call to thread_dispatch
3035 * after explicit termination should add
3036 * the thread to the termination queue
3038 if ((thread_state
& (TH_TERMINATE
| TH_TERMINATE2
)) == TH_TERMINATE
) {
3039 should_terminate
= TRUE
;
3040 thread_state
|= TH_TERMINATE2
;
3043 timer_stop(&thread
->runnable_timer
, processor
->last_dispatch
);
3045 thread_state
&= ~TH_RUN
;
3046 thread
->state
= thread_state
;
3048 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= THREAD_NOT_RUNNABLE
;
3049 thread
->chosen_processor
= PROCESSOR_NULL
;
3051 new_run_count
= sched_run_decr(thread
);
3053 #if CONFIG_SCHED_SFI
3054 if (thread
->reason
& AST_SFI
) {
3055 thread
->wait_sfi_begin_time
= processor
->last_dispatch
;
3058 machine_thread_going_off_core(thread
, should_terminate
, processor
->last_dispatch
, FALSE
);
3060 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3061 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DISPATCH
) | DBG_FUNC_NONE
,
3062 (uintptr_t)thread_tid(thread
), thread
->reason
, thread_state
,
3065 if (thread_state
& TH_WAIT_REPORT
) {
3066 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
3069 if (thread
->wake_active
) {
3070 thread
->wake_active
= FALSE
;
3071 thread_unlock(thread
);
3073 thread_wakeup(&thread
->wake_active
);
3075 thread_unlock(thread
);
3078 wake_unlock(thread
);
3080 if (should_terminate
) {
3081 thread_terminate_enqueue(thread
);
3087 int urgency
= THREAD_URGENCY_NONE
;
3088 uint64_t latency
= 0;
3090 /* Update (new) current thread and reprogram quantum timer */
3093 if (!(self
->state
& TH_IDLE
)) {
3094 uint64_t arg1
, arg2
;
3096 #if CONFIG_SCHED_SFI
3099 new_ast
= sfi_thread_needs_ast(self
, NULL
);
3101 if (new_ast
!= AST_NONE
) {
3106 assertf(processor
->last_dispatch
>= self
->last_made_runnable_time
,
3107 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
3108 processor
->last_dispatch
, self
->last_made_runnable_time
);
3110 assert(self
->last_made_runnable_time
<= self
->last_basepri_change_time
);
3112 latency
= processor
->last_dispatch
- self
->last_made_runnable_time
;
3113 assert(latency
>= self
->same_pri_latency
);
3115 urgency
= thread_get_urgency(self
, &arg1
, &arg2
);
3117 thread_tell_urgency(urgency
, arg1
, arg2
, latency
, self
);
3120 * Get a new quantum if none remaining.
3122 if (self
->quantum_remaining
== 0) {
3123 thread_quantum_init(self
);
3127 * Set up quantum timer and timeslice.
3129 processor
->quantum_end
= processor
->last_dispatch
+ self
->quantum_remaining
;
3130 timer_call_quantum_timer_enter(&processor
->quantum_timer
, self
,
3131 processor
->quantum_end
, processor
->last_dispatch
);
3133 processor
->first_timeslice
= TRUE
;
3135 timer_call_quantum_timer_cancel(&processor
->quantum_timer
);
3136 processor
->first_timeslice
= FALSE
;
3138 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0, 0, self
);
3141 assert(self
->block_hint
== kThreadWaitNone
);
3142 self
->computation_epoch
= processor
->last_dispatch
;
3143 self
->reason
= AST_NONE
;
3144 processor
->starting_pri
= self
->sched_pri
;
3146 thread_unlock(self
);
3148 machine_thread_going_on_core(self
, urgency
, latency
, self
->same_pri_latency
,
3149 processor
->last_dispatch
);
3151 #if defined(CONFIG_SCHED_DEFERRED_AST)
3153 * TODO: Can we state that redispatching our old thread is also
3156 if ((((volatile uint32_t)sched_run_buckets
[TH_BUCKET_RUN
]) == 1) &&
3157 !(self
->state
& TH_IDLE
)) {
3158 pset_cancel_deferred_dispatch(processor
->processor_set
, processor
);
3164 * thread_block_reason:
3166 * Forces a reschedule, blocking the caller if a wait
3167 * has been asserted.
3169 * If a continuation is specified, then thread_invoke will
3170 * attempt to discard the thread's kernel stack. When the
3171 * thread resumes, it will execute the continuation function
3172 * on a new kernel stack.
3174 counter(mach_counter_t c_thread_block_calls
= 0; )
3177 thread_block_reason(
3178 thread_continue_t continuation
,
3182 thread_t self
= current_thread();
3183 processor_t processor
;
3184 thread_t new_thread
;
3187 counter(++c_thread_block_calls
);
3191 processor
= current_processor();
3193 /* If we're explicitly yielding, force a subsequent quantum */
3194 if (reason
& AST_YIELD
) {
3195 processor
->first_timeslice
= FALSE
;
3198 /* We're handling all scheduling AST's */
3199 ast_off(AST_SCHEDULING
);
3202 if ((continuation
!= NULL
) && (self
->task
!= kernel_task
)) {
3203 if (uthread_get_proc_refcount(self
->uthread
) != 0) {
3204 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self
->uthread
);
3209 self
->continuation
= continuation
;
3210 self
->parameter
= parameter
;
3212 if (self
->state
& ~(TH_RUN
| TH_IDLE
)) {
3213 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3214 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_BLOCK
),
3215 reason
, VM_KERNEL_UNSLIDE(continuation
), 0, 0, 0);
3220 new_thread
= thread_select(self
, processor
, &reason
);
3221 thread_unlock(self
);
3222 } while (!thread_invoke(self
, new_thread
, reason
));
3226 return self
->wait_result
;
3232 * Block the current thread if a wait has been asserted.
3236 thread_continue_t continuation
)
3238 return thread_block_reason(continuation
, NULL
, AST_NONE
);
3242 thread_block_parameter(
3243 thread_continue_t continuation
,
3246 return thread_block_reason(continuation
, parameter
, AST_NONE
);
3252 * Switch directly from the current thread to the
3253 * new thread, handing off our quantum if appropriate.
3255 * New thread must be runnable, and not on a run queue.
3257 * Called at splsched.
3262 thread_continue_t continuation
,
3264 thread_t new_thread
)
3266 ast_t reason
= AST_NONE
;
3268 if ((self
->state
& TH_IDLE
) == 0) {
3269 reason
= AST_HANDOFF
;
3272 self
->continuation
= continuation
;
3273 self
->parameter
= parameter
;
3275 while (!thread_invoke(self
, new_thread
, reason
)) {
3276 /* the handoff failed, so we have to fall back to the normal block path */
3277 processor_t processor
= current_processor();
3282 new_thread
= thread_select(self
, processor
, &reason
);
3283 thread_unlock(self
);
3286 return self
->wait_result
;
3292 * Called at splsched when a thread first receives
3293 * a new stack after a continuation.
3299 thread_t self
= current_thread();
3300 thread_continue_t continuation
;
3303 DTRACE_SCHED(on__cpu
);
3305 continuation
= self
->continuation
;
3306 parameter
= self
->parameter
;
3309 kperf_on_cpu(self
, continuation
, NULL
);
3312 thread_dispatch(thread
, self
);
3314 self
->continuation
= self
->parameter
= NULL
;
3316 #if INTERRUPT_MASKED_DEBUG
3317 /* Reset interrupt-masked spin debugging timeout */
3318 ml_spin_debug_clear(self
);
3321 TLOG(1, "thread_continue: calling call_continuation\n");
3323 boolean_t enable_interrupts
= thread
!= THREAD_NULL
;
3324 call_continuation(continuation
, parameter
, self
->wait_result
, enable_interrupts
);
3329 thread_quantum_init(thread_t thread
)
3331 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
3332 thread
->quantum_remaining
= thread
->realtime
.computation
;
3334 thread
->quantum_remaining
= SCHED(initial_quantum_size
)(thread
);
3339 sched_timeshare_initial_quantum_size(thread_t thread
)
3341 if ((thread
!= THREAD_NULL
) && thread
->th_sched_bucket
== TH_BUCKET_SHARE_BG
) {
3351 * Initialize a run queue before first use.
3358 for (u_int i
= 0; i
< BITMAP_LEN(NRQS
); i
++) {
3361 rq
->urgency
= rq
->count
= 0;
3362 for (int i
= 0; i
< NRQS
; i
++) {
3363 queue_init(&rq
->queues
[i
]);
3368 * run_queue_dequeue:
3370 * Perform a dequeue operation on a run queue,
3371 * and return the resulting thread.
3373 * The run queue must be locked (see thread_run_queue_remove()
3374 * for more info), and not empty.
3382 queue_t queue
= &rq
->queues
[rq
->highq
];
3384 if (options
& SCHED_PEEK
) {
3385 if (options
& SCHED_HEADQ
) {
3386 thread
= qe_queue_first(queue
, struct thread
, runq_links
);
3388 thread
= qe_queue_last(queue
, struct thread
, runq_links
);
3393 if (options
& SCHED_HEADQ
) {
3394 thread
= qe_dequeue_head(queue
, struct thread
, runq_links
);
3396 thread
= qe_dequeue_tail(queue
, struct thread
, runq_links
);
3399 assert(thread
!= THREAD_NULL
);
3400 assert_thread_magic(thread
);
3402 thread
->runq
= PROCESSOR_NULL
;
3403 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3405 if (SCHED(priority_is_urgent
)(rq
->highq
)) {
3406 rq
->urgency
--; assert(rq
->urgency
>= 0);
3408 if (queue_empty(queue
)) {
3409 bitmap_clear(rq
->bitmap
, rq
->highq
);
3410 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
3417 * run_queue_enqueue:
3419 * Perform a enqueue operation on a run queue.
3421 * The run queue must be locked (see thread_run_queue_remove()
3430 queue_t queue
= &rq
->queues
[thread
->sched_pri
];
3431 boolean_t result
= FALSE
;
3433 assert_thread_magic(thread
);
3435 if (queue_empty(queue
)) {
3436 enqueue_tail(queue
, &thread
->runq_links
);
3438 rq_bitmap_set(rq
->bitmap
, thread
->sched_pri
);
3439 if (thread
->sched_pri
> rq
->highq
) {
3440 rq
->highq
= thread
->sched_pri
;
3444 if (options
& SCHED_TAILQ
) {
3445 enqueue_tail(queue
, &thread
->runq_links
);
3447 enqueue_head(queue
, &thread
->runq_links
);
3450 if (SCHED(priority_is_urgent
)(thread
->sched_pri
)) {
3453 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3462 * Remove a specific thread from a runqueue.
3464 * The run queue must be locked.
3471 assert(thread
->runq
!= PROCESSOR_NULL
);
3472 assert_thread_magic(thread
);
3474 remqueue(&thread
->runq_links
);
3475 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3477 if (SCHED(priority_is_urgent
)(thread
->sched_pri
)) {
3478 rq
->urgency
--; assert(rq
->urgency
>= 0);
3481 if (queue_empty(&rq
->queues
[thread
->sched_pri
])) {
3482 /* update run queue status */
3483 bitmap_clear(rq
->bitmap
, thread
->sched_pri
);
3484 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
3487 thread
->runq
= PROCESSOR_NULL
;
3490 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3492 sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context
)
3497 processor_set_t pset
= &pset0
;
3502 qe_foreach_element_safe(thread
, &pset
->rt_runq
.queue
, runq_links
) {
3503 if (thread
->last_made_runnable_time
< scan_context
->earliest_rt_make_runnable_time
) {
3504 scan_context
->earliest_rt_make_runnable_time
= thread
->last_made_runnable_time
;
3508 rt_lock_unlock(pset
);
3513 sched_rtglobal_runq_count_sum(void)
3515 return pset0
.rt_runq
.runq_stats
.count_sum
;
3519 * realtime_queue_insert:
3521 * Enqueue a thread for realtime execution.
3524 realtime_queue_insert(processor_t processor
, processor_set_t pset
, thread_t thread
)
3526 queue_t queue
= &SCHED(rt_runq
)(pset
)->queue
;
3527 uint64_t deadline
= thread
->realtime
.deadline
;
3528 boolean_t preempt
= FALSE
;
3532 if (queue_empty(queue
)) {
3533 enqueue_tail(queue
, &thread
->runq_links
);
3536 /* Insert into rt_runq in thread deadline order */
3538 qe_foreach(iter
, queue
) {
3539 thread_t iter_thread
= qe_element(iter
, struct thread
, runq_links
);
3540 assert_thread_magic(iter_thread
);
3542 if (deadline
< iter_thread
->realtime
.deadline
) {
3543 if (iter
== queue_first(queue
)) {
3546 insque(&thread
->runq_links
, queue_prev(iter
));
3548 } else if (iter
== queue_last(queue
)) {
3549 enqueue_tail(queue
, &thread
->runq_links
);
3555 thread
->runq
= processor
;
3556 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
3557 rt_runq_count_incr(pset
);
3559 rt_lock_unlock(pset
);
3567 * Dispatch a thread for realtime execution.
3569 * Thread must be locked. Associated pset must
3570 * be locked, and is returned unlocked.
3574 processor_t processor
,
3577 processor_set_t pset
= processor
->processor_set
;
3578 pset_assert_locked(pset
);
3581 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3583 thread
->chosen_processor
= processor
;
3585 /* <rdar://problem/15102234> */
3586 assert(thread
->bound_processor
== PROCESSOR_NULL
);
3589 * Dispatch directly onto idle processor.
3591 if ((thread
->bound_processor
== processor
)
3592 && processor
->state
== PROCESSOR_IDLE
) {
3593 processor
->next_thread
= thread
;
3594 processor_state_update_from_thread(processor
, thread
);
3595 processor
->deadline
= thread
->realtime
.deadline
;
3596 pset_update_processor_state(pset
, processor
, PROCESSOR_DISPATCHING
);
3598 ipi_type
= sched_ipi_action(processor
, thread
, true, SCHED_IPI_EVENT_BOUND_THR
);
3600 sched_ipi_perform(processor
, ipi_type
);
3604 if (processor
->current_pri
< BASEPRI_RTQUEUES
) {
3605 preempt
= (AST_PREEMPT
| AST_URGENT
);
3606 } else if (thread
->realtime
.deadline
< processor
->deadline
) {
3607 preempt
= (AST_PREEMPT
| AST_URGENT
);
3612 realtime_queue_insert(processor
, pset
, thread
);
3614 ipi_type
= SCHED_IPI_NONE
;
3615 if (preempt
!= AST_NONE
) {
3616 if (processor
->state
== PROCESSOR_IDLE
) {
3617 processor
->next_thread
= THREAD_NULL
;
3618 processor_state_update_from_thread(processor
, thread
);
3619 processor
->deadline
= thread
->realtime
.deadline
;
3620 pset_update_processor_state(pset
, processor
, PROCESSOR_DISPATCHING
);
3621 if (processor
== current_processor()) {
3624 ipi_type
= sched_ipi_action(processor
, thread
, true, SCHED_IPI_EVENT_PREEMPT
);
3626 } else if (processor
->state
== PROCESSOR_DISPATCHING
) {
3627 if ((processor
->next_thread
== THREAD_NULL
) && ((processor
->current_pri
< thread
->sched_pri
) || (processor
->deadline
> thread
->realtime
.deadline
))) {
3628 processor_state_update_from_thread(processor
, thread
);
3629 processor
->deadline
= thread
->realtime
.deadline
;
3632 if (processor
== current_processor()) {
3635 if ((preempt
& AST_URGENT
) == AST_URGENT
) {
3636 bit_set(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
3639 if ((preempt
& AST_PREEMPT
) == AST_PREEMPT
) {
3640 bit_set(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
3643 ipi_type
= sched_ipi_action(processor
, thread
, false, SCHED_IPI_EVENT_PREEMPT
);
3647 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3651 sched_ipi_perform(processor
, ipi_type
);
3656 sched_ipi_deferred_policy(processor_set_t pset
, processor_t dst
,
3657 __unused sched_ipi_event_t event
)
3659 #if defined(CONFIG_SCHED_DEFERRED_AST)
3660 if (!bit_test(pset
->pending_deferred_AST_cpu_mask
, dst
->cpu_id
)) {
3661 return SCHED_IPI_DEFERRED
;
3663 #else /* CONFIG_SCHED_DEFERRED_AST */
3664 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset
, dst
->cpu_id
);
3665 #endif /* CONFIG_SCHED_DEFERRED_AST */
3666 return SCHED_IPI_NONE
;
3670 sched_ipi_action(processor_t dst
, thread_t thread
, boolean_t dst_idle
, sched_ipi_event_t event
)
3672 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3673 assert(dst
!= NULL
);
3675 processor_set_t pset
= dst
->processor_set
;
3676 if (current_processor() == dst
) {
3677 return SCHED_IPI_NONE
;
3680 if (bit_test(pset
->pending_AST_URGENT_cpu_mask
, dst
->cpu_id
)) {
3681 return SCHED_IPI_NONE
;
3684 ipi_type
= SCHED(ipi_policy
)(dst
, thread
, dst_idle
, event
);
3686 case SCHED_IPI_NONE
:
3687 return SCHED_IPI_NONE
;
3688 #if defined(CONFIG_SCHED_DEFERRED_AST)
3689 case SCHED_IPI_DEFERRED
:
3690 bit_set(pset
->pending_deferred_AST_cpu_mask
, dst
->cpu_id
);
3692 #endif /* CONFIG_SCHED_DEFERRED_AST */
3694 bit_set(pset
->pending_AST_URGENT_cpu_mask
, dst
->cpu_id
);
3695 bit_set(pset
->pending_AST_PREEMPT_cpu_mask
, dst
->cpu_id
);
3702 sched_ipi_policy(processor_t dst
, thread_t thread
, boolean_t dst_idle
, sched_ipi_event_t event
)
3704 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3705 boolean_t deferred_ipi_supported
= false;
3706 processor_set_t pset
= dst
->processor_set
;
3708 #if defined(CONFIG_SCHED_DEFERRED_AST)
3709 deferred_ipi_supported
= true;
3710 #endif /* CONFIG_SCHED_DEFERRED_AST */
3713 case SCHED_IPI_EVENT_SPILL
:
3714 case SCHED_IPI_EVENT_SMT_REBAL
:
3715 case SCHED_IPI_EVENT_REBALANCE
:
3716 case SCHED_IPI_EVENT_BOUND_THR
:
3718 * The spill, SMT rebalance, rebalance and the bound thread
3719 * scenarios use immediate IPIs always.
3721 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3723 case SCHED_IPI_EVENT_PREEMPT
:
3724 /* In the preemption case, use immediate IPIs for RT threads */
3725 if (thread
&& (thread
->sched_pri
>= BASEPRI_RTQUEUES
)) {
3726 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3731 * For Non-RT threads preemption,
3732 * If the core is active, use immediate IPIs.
3733 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3735 if (deferred_ipi_supported
&& dst_idle
) {
3736 return sched_ipi_deferred_policy(pset
, dst
, event
);
3738 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3741 panic("Unrecognized scheduler IPI event type %d", event
);
3743 assert(ipi_type
!= SCHED_IPI_NONE
);
3748 sched_ipi_perform(processor_t dst
, sched_ipi_type_t ipi
)
3751 case SCHED_IPI_NONE
:
3753 case SCHED_IPI_IDLE
:
3754 machine_signal_idle(dst
);
3756 case SCHED_IPI_IMMEDIATE
:
3757 cause_ast_check(dst
);
3759 case SCHED_IPI_DEFERRED
:
3760 machine_signal_idle_deferred(dst
);
3763 panic("Unrecognized scheduler IPI type: %d", ipi
);
3767 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3770 priority_is_urgent(int priority
)
3772 return bitmap_test(sched_preempt_pri
, priority
) ? TRUE
: FALSE
;
3775 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3780 * Dispatch a thread for execution on a
3783 * Thread must be locked. Associated pset must
3784 * be locked, and is returned unlocked.
3788 processor_t processor
,
3792 processor_set_t pset
= processor
->processor_set
;
3793 pset_assert_locked(pset
);
3795 enum { eExitIdle
, eInterruptRunning
, eDoNothing
} ipi_action
= eDoNothing
;
3797 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3799 thread
->chosen_processor
= processor
;
3802 * Set preemption mode.
3804 #if defined(CONFIG_SCHED_DEFERRED_AST)
3805 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3807 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) && thread
->sched_pri
> processor
->current_pri
) {
3808 preempt
= (AST_PREEMPT
| AST_URGENT
);
3809 } else if (processor
->active_thread
&& thread_eager_preemption(processor
->active_thread
)) {
3810 preempt
= (AST_PREEMPT
| AST_URGENT
);
3811 } else if ((thread
->sched_mode
== TH_MODE_TIMESHARE
) && (thread
->sched_pri
< thread
->base_pri
)) {
3812 if (SCHED(priority_is_urgent
)(thread
->base_pri
) && thread
->sched_pri
> processor
->current_pri
) {
3813 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3818 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3821 if ((options
& (SCHED_PREEMPT
| SCHED_REBALANCE
)) == (SCHED_PREEMPT
| SCHED_REBALANCE
)) {
3823 * Having gone to the trouble of forcing this thread off a less preferred core,
3824 * we should force the preferable core to reschedule immediately to give this
3825 * thread a chance to run instead of just sitting on the run queue where
3826 * it may just be stolen back by the idle core we just forced it off.
3828 preempt
|= AST_PREEMPT
;
3831 SCHED(processor_enqueue
)(processor
, thread
, options
);
3832 sched_update_pset_load_average(pset
);
3834 if (preempt
!= AST_NONE
) {
3835 if (processor
->state
== PROCESSOR_IDLE
) {
3836 processor
->next_thread
= THREAD_NULL
;
3837 processor_state_update_from_thread(processor
, thread
);
3838 processor
->deadline
= UINT64_MAX
;
3839 pset_update_processor_state(pset
, processor
, PROCESSOR_DISPATCHING
);
3840 ipi_action
= eExitIdle
;
3841 } else if (processor
->state
== PROCESSOR_DISPATCHING
) {
3842 if ((processor
->next_thread
== THREAD_NULL
) && (processor
->current_pri
< thread
->sched_pri
)) {
3843 processor_state_update_from_thread(processor
, thread
);
3844 processor
->deadline
= UINT64_MAX
;
3846 } else if ((processor
->state
== PROCESSOR_RUNNING
||
3847 processor
->state
== PROCESSOR_SHUTDOWN
) &&
3848 (thread
->sched_pri
>= processor
->current_pri
)) {
3849 ipi_action
= eInterruptRunning
;
3853 * New thread is not important enough to preempt what is running, but
3854 * special processor states may need special handling
3856 if (processor
->state
== PROCESSOR_SHUTDOWN
&&
3857 thread
->sched_pri
>= processor
->current_pri
) {
3858 ipi_action
= eInterruptRunning
;
3859 } else if (processor
->state
== PROCESSOR_IDLE
) {
3860 processor
->next_thread
= THREAD_NULL
;
3861 processor_state_update_from_thread(processor
, thread
);
3862 processor
->deadline
= UINT64_MAX
;
3863 pset_update_processor_state(pset
, processor
, PROCESSOR_DISPATCHING
);
3865 ipi_action
= eExitIdle
;
3869 if (ipi_action
!= eDoNothing
) {
3870 if (processor
== current_processor()) {
3871 if ((preempt
= csw_check_locked(processor
->active_thread
, processor
, pset
, AST_NONE
)) != AST_NONE
) {
3875 if ((preempt
& AST_URGENT
) == AST_URGENT
) {
3876 bit_set(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
3878 bit_clear(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
3881 if ((preempt
& AST_PREEMPT
) == AST_PREEMPT
) {
3882 bit_set(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
3884 bit_clear(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
3887 sched_ipi_event_t event
= (options
& SCHED_REBALANCE
) ? SCHED_IPI_EVENT_REBALANCE
: SCHED_IPI_EVENT_PREEMPT
;
3888 ipi_type
= sched_ipi_action(processor
, thread
, (ipi_action
== eExitIdle
), event
);
3892 sched_ipi_perform(processor
, ipi_type
);
3898 * Return the next sibling pset containing
3899 * available processors.
3901 * Returns the original pset if none other is
3904 static processor_set_t
3906 processor_set_t pset
)
3908 processor_set_t nset
= pset
;
3911 nset
= next_pset(nset
);
3912 } while (nset
->online_processor_count
< 1 && nset
!= pset
);
3920 * Choose a processor for the thread, beginning at
3921 * the pset. Accepts an optional processor hint in
3924 * Returns a processor, possibly from a different pset.
3926 * The thread must be locked. The pset must be locked,
3927 * and the resulting pset is locked on return.
3931 processor_set_t starting_pset
,
3932 processor_t processor
,
3935 processor_set_t pset
= starting_pset
;
3936 processor_set_t nset
;
3938 assert(thread
->sched_pri
<= BASEPRI_RTQUEUES
);
3941 * Prefer the hinted processor, when appropriate.
3944 /* Fold last processor hint from secondary processor to its primary */
3945 if (processor
!= PROCESSOR_NULL
) {
3946 processor
= processor
->processor_primary
;
3950 * Only consult platform layer if pset is active, which
3951 * it may not be in some cases when a multi-set system
3952 * is going to sleep.
3954 if (pset
->online_processor_count
) {
3955 if ((processor
== PROCESSOR_NULL
) || (processor
->processor_set
== pset
&& processor
->state
== PROCESSOR_IDLE
)) {
3956 processor_t mc_processor
= machine_choose_processor(pset
, processor
);
3957 if (mc_processor
!= PROCESSOR_NULL
) {
3958 processor
= mc_processor
->processor_primary
;
3964 * At this point, we may have a processor hint, and we may have
3965 * an initial starting pset. If the hint is not in the pset, or
3966 * if the hint is for a processor in an invalid state, discard
3969 if (processor
!= PROCESSOR_NULL
) {
3970 if (processor
->processor_set
!= pset
) {
3971 processor
= PROCESSOR_NULL
;
3972 } else if (!processor
->is_recommended
) {
3973 processor
= PROCESSOR_NULL
;
3974 } else if ((thread
->sched_pri
>= BASEPRI_RTQUEUES
) && !sched_ok_to_run_realtime_thread(pset
, processor
)) {
3975 processor
= PROCESSOR_NULL
;
3977 switch (processor
->state
) {
3978 case PROCESSOR_START
:
3979 case PROCESSOR_SHUTDOWN
:
3980 case PROCESSOR_OFF_LINE
:
3982 * Hint is for a processor that cannot support running new threads.
3984 processor
= PROCESSOR_NULL
;
3986 case PROCESSOR_IDLE
:
3988 * Hint is for an idle processor. Assume it is no worse than any other
3989 * idle processor. The platform layer had an opportunity to provide
3990 * the "least cost idle" processor above.
3993 case PROCESSOR_RUNNING
:
3994 case PROCESSOR_DISPATCHING
:
3996 * Hint is for an active CPU. This fast-path allows
3997 * realtime threads to preempt non-realtime threads
3998 * to regain their previous executing processor.
4000 if ((thread
->sched_pri
>= BASEPRI_RTQUEUES
) &&
4001 (processor
->current_pri
< BASEPRI_RTQUEUES
)) {
4005 /* Otherwise, use hint as part of search below */
4008 processor
= PROCESSOR_NULL
;
4015 * Iterate through the processor sets to locate
4016 * an appropriate processor. Seed results with
4017 * a last-processor hint, if available, so that
4018 * a search must find something strictly better
4021 * A primary/secondary pair of SMT processors are
4022 * "unpaired" if the primary is busy but its
4023 * corresponding secondary is idle (so the physical
4024 * core has full use of its resources).
4027 integer_t lowest_priority
= MAXPRI
+ 1;
4028 integer_t lowest_secondary_priority
= MAXPRI
+ 1;
4029 integer_t lowest_unpaired_primary_priority
= MAXPRI
+ 1;
4030 integer_t lowest_idle_secondary_priority
= MAXPRI
+ 1;
4031 integer_t lowest_count
= INT_MAX
;
4032 uint64_t furthest_deadline
= 1;
4033 processor_t lp_processor
= PROCESSOR_NULL
;
4034 processor_t lp_unpaired_primary_processor
= PROCESSOR_NULL
;
4035 processor_t lp_idle_secondary_processor
= PROCESSOR_NULL
;
4036 processor_t lp_paired_secondary_processor
= PROCESSOR_NULL
;
4037 processor_t lc_processor
= PROCESSOR_NULL
;
4038 processor_t fd_processor
= PROCESSOR_NULL
;
4040 if (processor
!= PROCESSOR_NULL
) {
4041 /* All other states should be enumerated above. */
4042 assert(processor
->state
== PROCESSOR_RUNNING
|| processor
->state
== PROCESSOR_DISPATCHING
);
4044 lowest_priority
= processor
->current_pri
;
4045 lp_processor
= processor
;
4047 if (processor
->current_pri
>= BASEPRI_RTQUEUES
) {
4048 furthest_deadline
= processor
->deadline
;
4049 fd_processor
= processor
;
4052 lowest_count
= SCHED(processor_runq_count
)(processor
);
4053 lc_processor
= processor
;
4059 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
4060 processor
= choose_processor_for_realtime_thread(pset
);
4066 * Choose an idle processor, in pset traversal order
4069 uint64_t idle_primary_map
= (pset
->cpu_state_map
[PROCESSOR_IDLE
] &
4071 pset
->recommended_bitmask
);
4073 /* there shouldn't be a pending AST if the processor is idle */
4074 assert((idle_primary_map
& pset
->pending_AST_URGENT_cpu_mask
) == 0);
4076 cpuid
= lsb_first(idle_primary_map
);
4078 processor
= processor_array
[cpuid
];
4084 * Otherwise, enumerate active and idle processors to find primary candidates
4085 * with lower priority/etc.
4088 uint64_t active_map
= ((pset
->cpu_state_map
[PROCESSOR_RUNNING
] | pset
->cpu_state_map
[PROCESSOR_DISPATCHING
]) &
4089 pset
->recommended_bitmask
&
4090 ~pset
->pending_AST_URGENT_cpu_mask
);
4092 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) == FALSE
) {
4093 active_map
&= ~pset
->pending_AST_PREEMPT_cpu_mask
;
4096 active_map
= bit_ror64(active_map
, (pset
->last_chosen
+ 1));
4097 for (int rotid
= lsb_first(active_map
); rotid
>= 0; rotid
= lsb_next(active_map
, rotid
)) {
4098 cpuid
= ((rotid
+ pset
->last_chosen
+ 1) & 63);
4099 processor
= processor_array
[cpuid
];
4101 integer_t cpri
= processor
->current_pri
;
4102 processor_t primary
= processor
->processor_primary
;
4103 if (primary
!= processor
) {
4104 /* If primary is running a NO_SMT thread, don't choose its secondary */
4105 if (!((primary
->state
== PROCESSOR_RUNNING
) && processor_active_thread_no_smt(primary
))) {
4106 if (cpri
< lowest_secondary_priority
) {
4107 lowest_secondary_priority
= cpri
;
4108 lp_paired_secondary_processor
= processor
;
4112 if (cpri
< lowest_priority
) {
4113 lowest_priority
= cpri
;
4114 lp_processor
= processor
;
4118 if ((cpri
>= BASEPRI_RTQUEUES
) && (processor
->deadline
> furthest_deadline
)) {
4119 furthest_deadline
= processor
->deadline
;
4120 fd_processor
= processor
;
4123 integer_t ccount
= SCHED(processor_runq_count
)(processor
);
4124 if (ccount
< lowest_count
) {
4125 lowest_count
= ccount
;
4126 lc_processor
= processor
;
4131 * For SMT configs, these idle secondary processors must have active primary. Otherwise
4132 * the idle primary would have short-circuited the loop above
4134 uint64_t idle_secondary_map
= (pset
->cpu_state_map
[PROCESSOR_IDLE
] &
4135 ~pset
->primary_map
&
4136 pset
->recommended_bitmask
);
4138 /* there shouldn't be a pending AST if the processor is idle */
4139 assert((idle_secondary_map
& pset
->pending_AST_URGENT_cpu_mask
) == 0);
4140 assert((idle_secondary_map
& pset
->pending_AST_PREEMPT_cpu_mask
) == 0);
4142 for (cpuid
= lsb_first(idle_secondary_map
); cpuid
>= 0; cpuid
= lsb_next(idle_secondary_map
, cpuid
)) {
4143 processor
= processor_array
[cpuid
];
4145 processor_t cprimary
= processor
->processor_primary
;
4147 integer_t primary_pri
= cprimary
->current_pri
;
4150 * TODO: This should also make the same decisions
4151 * as secondary_can_run_realtime_thread
4153 * TODO: Keep track of the pending preemption priority
4154 * of the primary to make this more accurate.
4157 /* If the primary is running a no-smt thread, then don't choose its secondary */
4158 if (cprimary
->state
== PROCESSOR_RUNNING
&&
4159 processor_active_thread_no_smt(cprimary
)) {
4164 * Find the idle secondary processor with the lowest priority primary
4166 * We will choose this processor as a fallback if we find no better
4167 * primary to preempt.
4169 if (primary_pri
< lowest_idle_secondary_priority
) {
4170 lp_idle_secondary_processor
= processor
;
4171 lowest_idle_secondary_priority
= primary_pri
;
4174 /* Find the the lowest priority active primary with idle secondary */
4175 if (primary_pri
< lowest_unpaired_primary_priority
) {
4176 /* If the primary processor is offline or starting up, it's not a candidate for this path */
4177 if (cprimary
->state
!= PROCESSOR_RUNNING
&&
4178 cprimary
->state
!= PROCESSOR_DISPATCHING
) {
4182 if (!cprimary
->is_recommended
) {
4186 /* if the primary is pending preemption, don't try to re-preempt it */
4187 if (bit_test(pset
->pending_AST_URGENT_cpu_mask
, cprimary
->cpu_id
)) {
4191 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) == FALSE
&&
4192 bit_test(pset
->pending_AST_PREEMPT_cpu_mask
, cprimary
->cpu_id
)) {
4196 lowest_unpaired_primary_priority
= primary_pri
;
4197 lp_unpaired_primary_processor
= cprimary
;
4202 * We prefer preempting a primary processor over waking up its secondary.
4203 * The secondary will then be woken up by the preempted thread.
4205 if (thread
->sched_pri
> lowest_unpaired_primary_priority
) {
4206 pset
->last_chosen
= lp_unpaired_primary_processor
->cpu_id
;
4207 return lp_unpaired_primary_processor
;
4211 * We prefer preempting a lower priority active processor over directly
4212 * waking up an idle secondary.
4213 * The preempted thread will then find the idle secondary.
4215 if (thread
->sched_pri
> lowest_priority
) {
4216 pset
->last_chosen
= lp_processor
->cpu_id
;
4217 return lp_processor
;
4220 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
4222 * For realtime threads, the most important aspect is
4223 * scheduling latency, so we will pick an active
4224 * secondary processor in this pset, or preempt
4225 * another RT thread with a further deadline before
4226 * going to the next pset.
4229 if (sched_allow_rt_smt
&& (thread
->sched_pri
> lowest_secondary_priority
)) {
4230 pset
->last_chosen
= lp_paired_secondary_processor
->cpu_id
;
4231 return lp_paired_secondary_processor
;
4234 if (thread
->realtime
.deadline
< furthest_deadline
) {
4235 return fd_processor
;
4240 * If all primary processors in this pset are running a higher
4241 * priority thread, move on to next pset. Only when we have
4242 * exhausted the search for primary processors do we
4243 * fall back to secondaries.
4245 nset
= next_pset(pset
);
4247 if (nset
!= starting_pset
) {
4253 } while (nset
!= starting_pset
);
4256 * Make sure that we pick a running processor,
4257 * and that the correct processor set is locked.
4258 * Since we may have unlocked the candidate processor's
4259 * pset, it may have changed state.
4261 * All primary processors are running a higher priority
4262 * thread, so the only options left are enqueuing on
4263 * the secondary processor that would perturb the least priority
4264 * primary, or the least busy primary.
4267 /* lowest_priority is evaluated in the main loops above */
4268 if (lp_idle_secondary_processor
!= PROCESSOR_NULL
) {
4269 processor
= lp_idle_secondary_processor
;
4270 lp_idle_secondary_processor
= PROCESSOR_NULL
;
4271 } else if (lp_paired_secondary_processor
!= PROCESSOR_NULL
) {
4272 processor
= lp_paired_secondary_processor
;
4273 lp_paired_secondary_processor
= PROCESSOR_NULL
;
4274 } else if (lc_processor
!= PROCESSOR_NULL
) {
4275 processor
= lc_processor
;
4276 lc_processor
= PROCESSOR_NULL
;
4279 * All processors are executing higher
4280 * priority threads, and the lowest_count
4281 * candidate was not usable, so we pick a processor
4282 * to give this thread somewhere to be enqueued.
4284 * TODO: Need tracepoint or something to show when this happens
4285 * TODO: Prefer a processor in the original pset
4287 processor
= master_processor
;
4291 * Check that the correct processor set is
4294 if (pset
!= processor
->processor_set
) {
4296 pset
= processor
->processor_set
;
4301 * We must verify that the chosen processor is still available.
4302 * master_processor is an exception, since we may need to preempt
4303 * a running thread on it during processor shutdown (for sleep),
4304 * and that thread needs to be enqueued on its runqueue to run
4305 * when the processor is restarted.
4307 if (processor
!= master_processor
&& (processor
->state
== PROCESSOR_SHUTDOWN
|| processor
->state
== PROCESSOR_OFF_LINE
)) {
4308 processor
= PROCESSOR_NULL
;
4310 } while (processor
== PROCESSOR_NULL
);
4312 pset
->last_chosen
= processor
->cpu_id
;
4319 * Dispatch thread for execution, onto an idle
4320 * processor or run queue, and signal a preemption
4323 * Thread must be locked.
4330 processor_t processor
;
4331 processor_set_t pset
;
4333 assert((thread
->state
& (TH_RUN
| TH_WAIT
| TH_UNINT
| TH_TERMINATE
| TH_TERMINATE2
)) == TH_RUN
);
4334 assert(thread
->runq
== PROCESSOR_NULL
);
4337 * Update priority if needed.
4339 if (SCHED(can_update_priority
)(thread
)) {
4340 SCHED(update_priority
)(thread
);
4343 thread
->sfi_class
= sfi_thread_classify(thread
);
4345 assert(thread
->runq
== PROCESSOR_NULL
);
4348 if (thread
->bound_processor
== PROCESSOR_NULL
) {
4352 if (thread
->affinity_set
!= AFFINITY_SET_NULL
) {
4354 * Use affinity set policy hint.
4356 pset
= thread
->affinity_set
->aset_pset
;
4359 processor
= SCHED(choose_processor
)(pset
, PROCESSOR_NULL
, thread
);
4360 pset
= processor
->processor_set
;
4362 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
4363 (uintptr_t)thread_tid(thread
), (uintptr_t)-1, processor
->cpu_id
, processor
->state
, 0);
4364 } else if (thread
->last_processor
!= PROCESSOR_NULL
) {
4366 * Simple (last processor) affinity case.
4368 processor
= thread
->last_processor
;
4369 pset
= processor
->processor_set
;
4371 processor
= SCHED(choose_processor
)(pset
, processor
, thread
);
4372 pset
= processor
->processor_set
;
4374 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
4375 (uintptr_t)thread_tid(thread
), thread
->last_processor
->cpu_id
, processor
->cpu_id
, processor
->state
, 0);
4380 * Utilitize a per task hint to spread threads
4381 * among the available processor sets.
4383 task_t task
= thread
->task
;
4385 pset
= task
->pset_hint
;
4386 if (pset
== PROCESSOR_SET_NULL
) {
4387 pset
= current_processor()->processor_set
;
4390 pset
= choose_next_pset(pset
);
4393 processor
= SCHED(choose_processor
)(pset
, PROCESSOR_NULL
, thread
);
4394 pset
= processor
->processor_set
;
4395 task
->pset_hint
= pset
;
4397 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
4398 (uintptr_t)thread_tid(thread
), (uintptr_t)-1, processor
->cpu_id
, processor
->state
, 0);
4404 * Unconditionally dispatch on the processor.
4406 processor
= thread
->bound_processor
;
4407 pset
= processor
->processor_set
;
4410 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
) | DBG_FUNC_NONE
,
4411 (uintptr_t)thread_tid(thread
), (uintptr_t)-2, processor
->cpu_id
, processor
->state
, 0);
4413 #else /* !__SMP__ */
4414 /* Only one processor to choose */
4415 assert(thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== master_processor
);
4416 processor
= master_processor
;
4417 pset
= processor
->processor_set
;
4419 #endif /* !__SMP__ */
4422 * Dispatch the thread on the chosen processor.
4423 * TODO: This should be based on sched_mode, not sched_pri
4425 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
4426 realtime_setrun(processor
, thread
);
4428 processor_setrun(processor
, thread
, options
);
4430 /* pset is now unlocked */
4431 if (thread
->bound_processor
== PROCESSOR_NULL
) {
4432 SCHED(check_spill
)(pset
, thread
);
4440 processor_set_t pset
= task
->pset_hint
;
4442 if (pset
!= PROCESSOR_SET_NULL
) {
4443 pset
= choose_next_pset(pset
);
4450 * Check for a preemption point in
4451 * the current context.
4453 * Called at splsched with thread locked.
4458 processor_t processor
,
4461 processor_set_t pset
= processor
->processor_set
;
4463 assert(thread
== processor
->active_thread
);
4467 processor_state_update_from_thread(processor
, thread
);
4469 ast_t preempt
= csw_check_locked(thread
, processor
, pset
, check_reason
);
4471 /* Acknowledge the IPI if we decided not to preempt */
4473 if ((preempt
& AST_URGENT
) == 0) {
4474 bit_clear(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
);
4477 if ((preempt
& AST_PREEMPT
) == 0) {
4478 bit_clear(pset
->pending_AST_PREEMPT_cpu_mask
, processor
->cpu_id
);
4487 * Check for preemption at splsched with
4488 * pset and thread locked
4493 processor_t processor
,
4494 processor_set_t pset
,
4499 if (processor
->first_timeslice
) {
4500 if (rt_runq_count(pset
) > 0) {
4501 return check_reason
| AST_PREEMPT
| AST_URGENT
;
4504 if (rt_runq_count(pset
) > 0) {
4505 if (BASEPRI_RTQUEUES
> processor
->current_pri
) {
4506 return check_reason
| AST_PREEMPT
| AST_URGENT
;
4508 return check_reason
| AST_PREEMPT
;
4515 * If the current thread is running on a processor that is no longer recommended,
4516 * urgently preempt it, at which point thread_select() should
4517 * try to idle the processor and re-dispatch the thread to a recommended processor.
4519 if (!processor
->is_recommended
) {
4520 return check_reason
| AST_PREEMPT
| AST_URGENT
;
4524 result
= SCHED(processor_csw_check
)(processor
);
4525 if (result
!= AST_NONE
) {
4526 return check_reason
| result
| (thread_eager_preemption(thread
) ? AST_URGENT
: AST_NONE
);
4531 * Same for avoid-processor
4533 * TODO: Should these set AST_REBALANCE?
4535 if (SCHED(avoid_processor_enabled
) && SCHED(thread_avoid_processor
)(processor
, thread
)) {
4536 return check_reason
| AST_PREEMPT
;
4540 * Even though we could continue executing on this processor, a
4541 * secondary SMT core should try to shed load to another primary core.
4543 * TODO: Should this do the same check that thread_select does? i.e.
4544 * if no bound threads target this processor, and idle primaries exist, preempt
4545 * The case of RT threads existing is already taken care of above
4548 if (processor
->current_pri
< BASEPRI_RTQUEUES
&&
4549 processor
->processor_primary
!= processor
) {
4550 return check_reason
| AST_PREEMPT
;
4554 if (thread
->state
& TH_SUSP
) {
4555 return check_reason
| AST_PREEMPT
;
4558 #if CONFIG_SCHED_SFI
4560 * Current thread may not need to be preempted, but maybe needs
4563 result
= sfi_thread_needs_ast(thread
, NULL
);
4564 if (result
!= AST_NONE
) {
4565 return check_reason
| result
;
4573 * Handle preemption IPI or IPI in response to setting an AST flag
4574 * Triggered by cause_ast_check
4575 * Called at splsched
4578 ast_check(processor_t processor
)
4580 if (processor
->state
!= PROCESSOR_RUNNING
&&
4581 processor
->state
!= PROCESSOR_SHUTDOWN
) {
4585 thread_t thread
= processor
->active_thread
;
4587 assert(thread
== current_thread());
4589 thread_lock(thread
);
4592 * Propagate thread ast to processor.
4593 * (handles IPI in response to setting AST flag)
4595 ast_propagate(thread
);
4598 * Stash the old urgency and perfctl values to find out if
4599 * csw_check updates them.
4601 thread_urgency_t old_urgency
= processor
->current_urgency
;
4602 perfcontrol_class_t old_perfctl_class
= processor
->current_perfctl_class
;
4606 if ((preempt
= csw_check(thread
, processor
, AST_NONE
)) != AST_NONE
) {
4610 if (old_urgency
!= processor
->current_urgency
) {
4612 * Urgency updates happen with the thread lock held (ugh).
4613 * TODO: This doesn't notice QoS changes...
4615 uint64_t urgency_param1
, urgency_param2
;
4617 thread_urgency_t urgency
= thread_get_urgency(thread
, &urgency_param1
, &urgency_param2
);
4618 thread_tell_urgency(urgency
, urgency_param1
, urgency_param2
, 0, thread
);
4621 thread_unlock(thread
);
4623 if (old_perfctl_class
!= processor
->current_perfctl_class
) {
4625 * We updated the perfctl class of this thread from another core.
4626 * Let CLPC know that the currently running thread has a new
4630 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE
,
4631 mach_approximate_time(), 0, thread
);
4639 * Set the scheduled priority of the specified thread.
4641 * This may cause the thread to change queues.
4643 * Thread must be locked.
4649 set_sched_pri_options_t options
)
4651 bool is_current_thread
= (thread
== current_thread());
4652 bool removed_from_runq
= false;
4653 bool lazy_update
= ((options
& SETPRI_LAZY
) == SETPRI_LAZY
);
4655 int old_priority
= thread
->sched_pri
;
4657 /* If we're already at this priority, no need to mess with the runqueue */
4658 if (new_priority
== old_priority
) {
4662 if (is_current_thread
) {
4663 assert(thread
->state
& TH_RUN
);
4664 assert(thread
->runq
== PROCESSOR_NULL
);
4666 removed_from_runq
= thread_run_queue_remove(thread
);
4669 thread
->sched_pri
= new_priority
;
4671 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHANGE_PRIORITY
),
4672 (uintptr_t)thread_tid(thread
),
4675 thread
->sched_usage
,
4678 if (removed_from_runq
) {
4679 thread_run_queue_reinsert(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
4680 } else if (is_current_thread
) {
4681 processor_t processor
= thread
->last_processor
;
4682 assert(processor
== current_processor());
4684 thread_urgency_t old_urgency
= processor
->current_urgency
;
4687 * When dropping in priority, check if the thread no longer belongs on core.
4688 * If a thread raises its own priority, don't aggressively rebalance it.
4689 * <rdar://problem/31699165>
4691 * csw_check does a processor_state_update_from_thread, but
4692 * we should do our own if we're being lazy.
4694 if (!lazy_update
&& new_priority
< old_priority
) {
4697 if ((preempt
= csw_check(thread
, processor
, AST_NONE
)) != AST_NONE
) {
4701 processor_state_update_from_thread(processor
, thread
);
4705 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4706 * class alterations from user space to occur relatively infrequently, hence
4707 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4708 * inheritance is expected to involve priority changes.
4710 if (processor
->current_urgency
!= old_urgency
) {
4711 uint64_t urgency_param1
, urgency_param2
;
4713 thread_urgency_t new_urgency
= thread_get_urgency(thread
,
4714 &urgency_param1
, &urgency_param2
);
4716 thread_tell_urgency(new_urgency
, urgency_param1
,
4717 urgency_param2
, 0, thread
);
4720 /* TODO: only call this if current_perfctl_class changed */
4721 uint64_t ctime
= mach_approximate_time();
4722 machine_thread_going_on_core(thread
, processor
->current_urgency
, 0, 0, ctime
);
4723 } else if (thread
->state
& TH_RUN
) {
4724 processor_t processor
= thread
->last_processor
;
4727 processor
!= PROCESSOR_NULL
&&
4728 processor
!= current_processor() &&
4729 processor
->active_thread
== thread
) {
4730 cause_ast_check(processor
);
4736 * thread_run_queue_remove_for_handoff
4738 * Pull a thread or its (recursive) push target out of the runqueue
4739 * so that it is ready for thread_run()
4741 * Called at splsched
4743 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4744 * This may be different than the thread that was passed in.
4747 thread_run_queue_remove_for_handoff(thread_t thread
)
4749 thread_t pulled_thread
= THREAD_NULL
;
4751 thread_lock(thread
);
4754 * Check that the thread is not bound
4755 * to a different processor, and that realtime
4758 * Next, pull it off its run queue. If it
4759 * doesn't come, it's not eligible.
4762 processor_t processor
= current_processor();
4763 if (processor
->current_pri
< BASEPRI_RTQUEUES
&& thread
->sched_pri
< BASEPRI_RTQUEUES
&&
4764 (thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== processor
)) {
4765 if (thread_run_queue_remove(thread
)) {
4766 pulled_thread
= thread
;
4770 thread_unlock(thread
);
4772 return pulled_thread
;
4776 * thread_run_queue_remove:
4778 * Remove a thread from its current run queue and
4779 * return TRUE if successful.
4781 * Thread must be locked.
4783 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4784 * run queues because the caller locked the thread. Otherwise
4785 * the thread is on a run queue, but could be chosen for dispatch
4786 * and removed by another processor under a different lock, which
4787 * will set thread->runq to PROCESSOR_NULL.
4789 * Hence the thread select path must not rely on anything that could
4790 * be changed under the thread lock after calling this function,
4791 * most importantly thread->sched_pri.
4794 thread_run_queue_remove(
4797 boolean_t removed
= FALSE
;
4798 processor_t processor
= thread
->runq
;
4800 if ((thread
->state
& (TH_RUN
| TH_WAIT
)) == TH_WAIT
) {
4801 /* Thread isn't runnable */
4802 assert(thread
->runq
== PROCESSOR_NULL
);
4806 if (processor
== PROCESSOR_NULL
) {
4808 * The thread is either not on the runq,
4809 * or is in the midst of being removed from the runq.
4811 * runq is set to NULL under the pset lock, not the thread
4812 * lock, so the thread may still be in the process of being dequeued
4813 * from the runq. It will wait in invoke for the thread lock to be
4820 if (thread
->sched_pri
< BASEPRI_RTQUEUES
) {
4821 return SCHED(processor_queue_remove
)(processor
, thread
);
4824 processor_set_t pset
= processor
->processor_set
;
4828 if (thread
->runq
!= PROCESSOR_NULL
) {
4830 * Thread is on the RT run queue and we have a lock on
4834 remqueue(&thread
->runq_links
);
4835 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
4836 rt_runq_count_decr(pset
);
4838 thread
->runq
= PROCESSOR_NULL
;
4843 rt_lock_unlock(pset
);
4849 * Put the thread back where it goes after a thread_run_queue_remove
4851 * Thread must have been removed under the same thread lock hold
4853 * thread locked, at splsched
4856 thread_run_queue_reinsert(thread_t thread
, integer_t options
)
4858 assert(thread
->runq
== PROCESSOR_NULL
);
4859 assert(thread
->state
& (TH_RUN
));
4861 thread_setrun(thread
, options
);
4865 sys_override_cpu_throttle(boolean_t enable_override
)
4867 if (enable_override
) {
4868 cpu_throttle_enabled
= 0;
4870 cpu_throttle_enabled
= 1;
4875 thread_get_urgency(thread_t thread
, uint64_t *arg1
, uint64_t *arg2
)
4877 uint64_t urgency_param1
= 0, urgency_param2
= 0;
4879 thread_urgency_t urgency
;
4881 if (thread
== NULL
|| (thread
->state
& TH_IDLE
)) {
4885 urgency
= THREAD_URGENCY_NONE
;
4886 } else if (thread
->sched_mode
== TH_MODE_REALTIME
) {
4887 urgency_param1
= thread
->realtime
.period
;
4888 urgency_param2
= thread
->realtime
.deadline
;
4890 urgency
= THREAD_URGENCY_REAL_TIME
;
4891 } else if (cpu_throttle_enabled
&&
4892 (thread
->sched_pri
<= MAXPRI_THROTTLE
) &&
4893 (thread
->base_pri
<= MAXPRI_THROTTLE
)) {
4895 * Threads that are running at low priority but are not
4896 * tagged with a specific QoS are separated out from
4897 * the "background" urgency. Performance management
4898 * subsystem can decide to either treat these threads
4899 * as normal threads or look at other signals like thermal
4900 * levels for optimal power/perf tradeoffs for a platform.
4902 boolean_t thread_lacks_qos
= (proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
) == THREAD_QOS_UNSPECIFIED
); //thread_has_qos_policy(thread);
4903 boolean_t task_is_suppressed
= (proc_get_effective_task_policy(thread
->task
, TASK_POLICY_SUP_ACTIVE
) == 0x1);
4906 * Background urgency applied when thread priority is
4907 * MAXPRI_THROTTLE or lower and thread is not promoted
4908 * and thread has a QoS specified
4910 urgency_param1
= thread
->sched_pri
;
4911 urgency_param2
= thread
->base_pri
;
4913 if (thread_lacks_qos
&& !task_is_suppressed
) {
4914 urgency
= THREAD_URGENCY_LOWPRI
;
4916 urgency
= THREAD_URGENCY_BACKGROUND
;
4919 /* For otherwise unclassified threads, report throughput QoS parameters */
4920 urgency_param1
= proc_get_effective_thread_policy(thread
, TASK_POLICY_THROUGH_QOS
);
4921 urgency_param2
= proc_get_effective_task_policy(thread
->task
, TASK_POLICY_THROUGH_QOS
);
4922 urgency
= THREAD_URGENCY_NORMAL
;
4926 *arg1
= urgency_param1
;
4929 *arg2
= urgency_param2
;
4936 thread_get_perfcontrol_class(thread_t thread
)
4938 /* Special case handling */
4939 if (thread
->state
& TH_IDLE
) {
4940 return PERFCONTROL_CLASS_IDLE
;
4942 if (thread
->task
== kernel_task
) {
4943 return PERFCONTROL_CLASS_KERNEL
;
4945 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
4946 return PERFCONTROL_CLASS_REALTIME
;
4949 /* perfcontrol_class based on base_pri */
4950 if (thread
->base_pri
<= MAXPRI_THROTTLE
) {
4951 return PERFCONTROL_CLASS_BACKGROUND
;
4952 } else if (thread
->base_pri
<= BASEPRI_UTILITY
) {
4953 return PERFCONTROL_CLASS_UTILITY
;
4954 } else if (thread
->base_pri
<= BASEPRI_DEFAULT
) {
4955 return PERFCONTROL_CLASS_NONUI
;
4956 } else if (thread
->base_pri
<= BASEPRI_FOREGROUND
) {
4957 return PERFCONTROL_CLASS_UI
;
4959 return PERFCONTROL_CLASS_ABOVEUI
;
4964 * This is the processor idle loop, which just looks for other threads
4965 * to execute. Processor idle threads invoke this without supplying a
4966 * current thread to idle without an asserted wait state.
4968 * Returns a the next thread to execute if dispatched directly.
4972 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4974 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4980 processor_t processor
)
4982 processor_set_t pset
= processor
->processor_set
;
4986 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4987 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_IDLE
) | DBG_FUNC_START
,
4988 (uintptr_t)thread_tid(thread
), 0, 0, 0, 0);
4990 SCHED_STATS_CPU_IDLE_START(processor
);
4992 uint64_t ctime
= mach_absolute_time();
4994 timer_switch(&PROCESSOR_DATA(processor
, system_state
), ctime
, &PROCESSOR_DATA(processor
, idle_state
));
4995 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, idle_state
);
4997 cpu_quiescent_counter_leave(ctime
);
5001 * Ensure that updates to my processor and pset state,
5002 * made by the IPI source processor before sending the IPI,
5003 * are visible on this processor now (even though we don't
5004 * take the pset lock yet).
5006 atomic_thread_fence(memory_order_acquire
);
5008 if (processor
->state
!= PROCESSOR_IDLE
) {
5011 if (bit_test(pset
->pending_AST_URGENT_cpu_mask
, processor
->cpu_id
)) {
5014 #if defined(CONFIG_SCHED_DEFERRED_AST)
5015 if (bit_test(pset
->pending_deferred_AST_cpu_mask
, processor
->cpu_id
)) {
5019 if (processor
->is_recommended
&& (processor
->processor_primary
== processor
)) {
5020 if (rt_runq_count(pset
)) {
5024 if (SCHED(processor_bound_count
)(processor
)) {
5029 #if CONFIG_SCHED_IDLE_IN_PLACE
5030 if (thread
!= THREAD_NULL
) {
5031 /* Did idle-in-place thread wake up */
5032 if ((thread
->state
& (TH_WAIT
| TH_SUSP
)) != TH_WAIT
|| thread
->wake_active
) {
5038 IDLE_KERNEL_DEBUG_CONSTANT(
5039 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq_count(pset
), SCHED(processor_runq_count
)(processor
), -1, 0);
5041 machine_track_platform_idle(TRUE
);
5045 machine_track_platform_idle(FALSE
);
5050 * Check if we should call sched_timeshare_consider_maintenance() here.
5051 * The CPU was woken out of idle due to an interrupt and we should do the
5052 * call only if the processor is still idle. If the processor is non-idle,
5053 * the threads running on the processor would do the call as part of
5056 if (processor
->state
== PROCESSOR_IDLE
) {
5057 sched_timeshare_consider_maintenance(mach_absolute_time());
5060 IDLE_KERNEL_DEBUG_CONSTANT(
5061 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq_count(pset
), SCHED(processor_runq_count
)(processor
), -2, 0);
5063 if (!SCHED(processor_queue_empty
)(processor
)) {
5064 /* Secondary SMT processors respond to directed wakeups
5065 * exclusively. Some platforms induce 'spurious' SMT wakeups.
5067 if (processor
->processor_primary
== processor
) {
5073 ctime
= mach_absolute_time();
5075 timer_switch(&PROCESSOR_DATA(processor
, idle_state
), ctime
, &PROCESSOR_DATA(processor
, system_state
));
5076 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, system_state
);
5078 cpu_quiescent_counter_join(ctime
);
5080 assert(processor
->next_thread
== NULL
);
5082 ast_t reason
= AST_NONE
;
5084 /* We're handling all scheduling AST's */
5085 ast_off(AST_SCHEDULING
);
5088 * thread_select will move the processor from dispatching to running,
5089 * or put it in idle if there's nothing to do.
5091 thread_t current_thread
= current_thread();
5093 thread_lock(current_thread
);
5094 thread_t new_thread
= thread_select(current_thread
, processor
, &reason
);
5095 thread_unlock(current_thread
);
5097 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5098 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_IDLE
) | DBG_FUNC_END
,
5099 (uintptr_t)thread_tid(thread
), processor
->state
, (uintptr_t)thread_tid(new_thread
), reason
, 0);
5105 * Each processor has a dedicated thread which
5106 * executes the idle loop when there is no suitable
5112 processor_t processor
= current_processor();
5113 thread_t new_thread
;
5115 new_thread
= processor_idle(THREAD_NULL
, processor
);
5116 if (new_thread
!= THREAD_NULL
) {
5117 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
5121 thread_block((thread_continue_t
)idle_thread
);
5127 processor_t processor
)
5129 kern_return_t result
;
5132 char name
[MAXTHREADNAMESIZE
];
5134 result
= kernel_thread_create((thread_continue_t
)idle_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
5135 if (result
!= KERN_SUCCESS
) {
5139 snprintf(name
, sizeof(name
), "idle #%d", processor
->cpu_id
);
5140 thread_set_thread_name(thread
, name
);
5143 thread_lock(thread
);
5144 thread
->bound_processor
= processor
;
5145 processor
->idle_thread
= thread
;
5146 thread
->sched_pri
= thread
->base_pri
= IDLEPRI
;
5147 thread
->state
= (TH_RUN
| TH_IDLE
);
5148 thread
->options
|= TH_OPT_IDLE_THREAD
;
5149 thread_unlock(thread
);
5152 thread_deallocate(thread
);
5154 return KERN_SUCCESS
;
5160 * Kicks off scheduler services.
5162 * Called at splsched.
5167 kern_return_t result
;
5170 simple_lock_init(&sched_vm_group_list_lock
, 0);
5172 #if __arm__ || __arm64__
5173 simple_lock_init(&sched_recommended_cores_lock
, 0);
5174 #endif /* __arm__ || __arm64__ */
5176 result
= kernel_thread_start_priority((thread_continue_t
)sched_init_thread
,
5177 (void *)SCHED(maintenance_continuation
), MAXPRI_KERNEL
, &thread
);
5178 if (result
!= KERN_SUCCESS
) {
5179 panic("sched_startup");
5182 thread_deallocate(thread
);
5184 assert_thread_magic(thread
);
5187 * Yield to the sched_init_thread once, to
5188 * initialize our own thread after being switched
5191 * The current thread is the only other thread
5192 * active at this point.
5194 thread_block(THREAD_CONTINUE_NULL
);
5198 static _Atomic
uint64_t sched_perfcontrol_callback_deadline
;
5199 #endif /* __arm64__ */
5202 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5204 static volatile uint64_t sched_maintenance_deadline
;
5205 static uint64_t sched_tick_last_abstime
;
5206 static uint64_t sched_tick_delta
;
5207 uint64_t sched_tick_max_delta
;
5211 * sched_init_thread:
5213 * Perform periodic bookkeeping functions about ten
5217 sched_timeshare_maintenance_continue(void)
5219 uint64_t sched_tick_ctime
, late_time
;
5221 struct sched_update_scan_context scan_context
= {
5222 .earliest_bg_make_runnable_time
= UINT64_MAX
,
5223 .earliest_normal_make_runnable_time
= UINT64_MAX
,
5224 .earliest_rt_make_runnable_time
= UINT64_MAX
5227 sched_tick_ctime
= mach_absolute_time();
5229 if (__improbable(sched_tick_last_abstime
== 0)) {
5230 sched_tick_last_abstime
= sched_tick_ctime
;
5232 sched_tick_delta
= 1;
5234 late_time
= sched_tick_ctime
- sched_tick_last_abstime
;
5235 sched_tick_delta
= late_time
/ sched_tick_interval
;
5236 /* Ensure a delta of 1, since the interval could be slightly
5237 * smaller than the sched_tick_interval due to dispatch
5240 sched_tick_delta
= MAX(sched_tick_delta
, 1);
5242 /* In the event interrupt latencies or platform
5243 * idle events that advanced the timebase resulted
5244 * in periods where no threads were dispatched,
5245 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
5248 sched_tick_delta
= MIN(sched_tick_delta
, SCHED_TICK_MAX_DELTA
);
5250 sched_tick_last_abstime
= sched_tick_ctime
;
5251 sched_tick_max_delta
= MAX(sched_tick_delta
, sched_tick_max_delta
);
5254 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_MAINTENANCE
) | DBG_FUNC_START
,
5255 sched_tick_delta
, late_time
, 0, 0, 0);
5257 /* Add a number of pseudo-ticks corresponding to the elapsed interval
5258 * This could be greater than 1 if substantial intervals where
5259 * all processors are idle occur, which rarely occurs in practice.
5262 sched_tick
+= sched_tick_delta
;
5267 * Compute various averages.
5269 compute_averages(sched_tick_delta
);
5272 * Scan the run queues for threads which
5273 * may need to be updated, and find the earliest runnable thread on the runqueue
5274 * to report its latency.
5276 SCHED(thread_update_scan
)(&scan_context
);
5278 SCHED(rt_runq_scan
)(&scan_context
);
5280 uint64_t ctime
= mach_absolute_time();
5282 uint64_t bg_max_latency
= (ctime
> scan_context
.earliest_bg_make_runnable_time
) ?
5283 ctime
- scan_context
.earliest_bg_make_runnable_time
: 0;
5285 uint64_t default_max_latency
= (ctime
> scan_context
.earliest_normal_make_runnable_time
) ?
5286 ctime
- scan_context
.earliest_normal_make_runnable_time
: 0;
5288 uint64_t realtime_max_latency
= (ctime
> scan_context
.earliest_rt_make_runnable_time
) ?
5289 ctime
- scan_context
.earliest_rt_make_runnable_time
: 0;
5291 machine_max_runnable_latency(bg_max_latency
, default_max_latency
, realtime_max_latency
);
5294 * Check to see if the special sched VM group needs attention.
5296 sched_vm_group_maintenance();
5298 #if __arm__ || __arm64__
5299 /* Check to see if the recommended cores failsafe is active */
5300 sched_recommended_cores_maintenance();
5301 #endif /* __arm__ || __arm64__ */
5304 #if DEBUG || DEVELOPMENT
5306 #include <i386/misc_protos.h>
5307 /* Check for long-duration interrupts */
5308 mp_interrupt_watchdog();
5309 #endif /* __x86_64__ */
5310 #endif /* DEBUG || DEVELOPMENT */
5312 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_MAINTENANCE
) | DBG_FUNC_END
,
5313 sched_pri_shifts
[TH_BUCKET_SHARE_FG
], sched_pri_shifts
[TH_BUCKET_SHARE_BG
],
5314 sched_pri_shifts
[TH_BUCKET_SHARE_UT
], sched_pri_shifts
[TH_BUCKET_SHARE_DF
], 0);
5316 assert_wait((event_t
)sched_timeshare_maintenance_continue
, THREAD_UNINT
);
5317 thread_block((thread_continue_t
)sched_timeshare_maintenance_continue
);
5321 static uint64_t sched_maintenance_wakeups
;
5324 * Determine if the set of routines formerly driven by a maintenance timer
5325 * must be invoked, based on a deadline comparison. Signals the scheduler
5326 * maintenance thread on deadline expiration. Must be invoked at an interval
5327 * lower than the "sched_tick_interval", currently accomplished by
5328 * invocation via the quantum expiration timer and at context switch time.
5329 * Performance matters: this routine reuses a timestamp approximating the
5330 * current absolute time received from the caller, and should perform
5331 * no more than a comparison against the deadline in the common case.
5334 sched_timeshare_consider_maintenance(uint64_t ctime
)
5336 cpu_quiescent_counter_checkin(ctime
);
5338 uint64_t deadline
= sched_maintenance_deadline
;
5340 if (__improbable(ctime
>= deadline
)) {
5341 if (__improbable(current_thread() == sched_maintenance_thread
)) {
5346 uint64_t ndeadline
= ctime
+ sched_tick_interval
;
5348 if (__probable(__sync_bool_compare_and_swap(&sched_maintenance_deadline
, deadline
, ndeadline
))) {
5349 thread_wakeup((event_t
)sched_timeshare_maintenance_continue
);
5350 sched_maintenance_wakeups
++;
5354 uint64_t load_compute_deadline
= __c11_atomic_load(&sched_load_compute_deadline
, memory_order_relaxed
);
5356 if (__improbable(load_compute_deadline
&& ctime
>= load_compute_deadline
)) {
5357 uint64_t new_deadline
= 0;
5358 if (__c11_atomic_compare_exchange_strong(&sched_load_compute_deadline
, &load_compute_deadline
, new_deadline
,
5359 memory_order_relaxed
, memory_order_relaxed
)) {
5360 compute_sched_load();
5361 new_deadline
= ctime
+ sched_load_compute_interval_abs
;
5362 __c11_atomic_store(&sched_load_compute_deadline
, new_deadline
, memory_order_relaxed
);
5367 uint64_t perf_deadline
= __c11_atomic_load(&sched_perfcontrol_callback_deadline
, memory_order_relaxed
);
5369 if (__improbable(perf_deadline
&& ctime
>= perf_deadline
)) {
5370 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
5371 if (__c11_atomic_compare_exchange_strong(&sched_perfcontrol_callback_deadline
, &perf_deadline
, 0,
5372 memory_order_relaxed
, memory_order_relaxed
)) {
5373 machine_perfcontrol_deadline_passed(perf_deadline
);
5376 #endif /* __arm64__ */
5379 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5382 sched_init_thread(void (*continuation
)(void))
5384 thread_block(THREAD_CONTINUE_NULL
);
5386 thread_t thread
= current_thread();
5388 thread_set_thread_name(thread
, "sched_maintenance_thread");
5390 sched_maintenance_thread
= thread
;
5397 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5400 * thread_update_scan / runq_scan:
5402 * Scan the run queues to account for timesharing threads
5403 * which need to be updated.
5405 * Scanner runs in two passes. Pass one squirrels likely
5406 * threads away in an array, pass two does the update.
5408 * This is necessary because the run queue is locked for
5409 * the candidate scan, but the thread is locked for the update.
5411 * Array should be sized to make forward progress, without
5412 * disabling preemption for long periods.
5415 #define THREAD_UPDATE_SIZE 128
5417 static thread_t thread_update_array
[THREAD_UPDATE_SIZE
];
5418 static uint32_t thread_update_count
= 0;
5420 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
5422 thread_update_add_thread(thread_t thread
)
5424 if (thread_update_count
== THREAD_UPDATE_SIZE
) {
5428 thread_update_array
[thread_update_count
++] = thread
;
5429 thread_reference_internal(thread
);
5434 thread_update_process_threads(void)
5436 assert(thread_update_count
<= THREAD_UPDATE_SIZE
);
5438 for (uint32_t i
= 0; i
< thread_update_count
; i
++) {
5439 thread_t thread
= thread_update_array
[i
];
5440 assert_thread_magic(thread
);
5441 thread_update_array
[i
] = THREAD_NULL
;
5443 spl_t s
= splsched();
5444 thread_lock(thread
);
5445 if (!(thread
->state
& (TH_WAIT
)) && thread
->sched_stamp
!= sched_tick
) {
5446 SCHED(update_priority
)(thread
);
5448 thread_unlock(thread
);
5451 thread_deallocate(thread
);
5454 thread_update_count
= 0;
5458 * Scan a runq for candidate threads.
5460 * Returns TRUE if retry is needed.
5465 sched_update_scan_context_t scan_context
)
5467 int count
= runq
->count
;
5476 for (queue_index
= bitmap_first(runq
->bitmap
, NRQS
);
5478 queue_index
= bitmap_next(runq
->bitmap
, queue_index
)) {
5480 queue_t queue
= &runq
->queues
[queue_index
];
5482 qe_foreach_element(thread
, queue
, runq_links
) {
5484 assert_thread_magic(thread
);
5486 if (thread
->sched_stamp
!= sched_tick
&&
5487 thread
->sched_mode
== TH_MODE_TIMESHARE
) {
5488 if (thread_update_add_thread(thread
) == FALSE
) {
5493 if (cpu_throttle_enabled
&& ((thread
->sched_pri
<= MAXPRI_THROTTLE
) && (thread
->base_pri
<= MAXPRI_THROTTLE
))) {
5494 if (thread
->last_made_runnable_time
< scan_context
->earliest_bg_make_runnable_time
) {
5495 scan_context
->earliest_bg_make_runnable_time
= thread
->last_made_runnable_time
;
5498 if (thread
->last_made_runnable_time
< scan_context
->earliest_normal_make_runnable_time
) {
5499 scan_context
->earliest_normal_make_runnable_time
= thread
->last_made_runnable_time
;
5509 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5512 thread_eager_preemption(thread_t thread
)
5514 return (thread
->sched_flags
& TH_SFLAG_EAGERPREEMPT
) != 0;
5518 thread_set_eager_preempt(thread_t thread
)
5522 ast_t ast
= AST_NONE
;
5525 p
= current_processor();
5527 thread_lock(thread
);
5528 thread
->sched_flags
|= TH_SFLAG_EAGERPREEMPT
;
5530 if (thread
== current_thread()) {
5531 ast
= csw_check(thread
, p
, AST_NONE
);
5532 thread_unlock(thread
);
5533 if (ast
!= AST_NONE
) {
5534 (void) thread_block_reason(THREAD_CONTINUE_NULL
, NULL
, ast
);
5537 p
= thread
->last_processor
;
5539 if (p
!= PROCESSOR_NULL
&& p
->state
== PROCESSOR_RUNNING
&&
5540 p
->active_thread
== thread
) {
5544 thread_unlock(thread
);
5551 thread_clear_eager_preempt(thread_t thread
)
5556 thread_lock(thread
);
5558 thread
->sched_flags
&= ~TH_SFLAG_EAGERPREEMPT
;
5560 thread_unlock(thread
);
5565 * Scheduling statistics
5568 sched_stats_handle_csw(processor_t processor
, int reasons
, int selfpri
, int otherpri
)
5570 struct processor_sched_statistics
*stats
;
5571 boolean_t to_realtime
= FALSE
;
5573 stats
= &processor
->processor_data
.sched_stats
;
5576 if (otherpri
>= BASEPRI_REALTIME
) {
5577 stats
->rt_sched_count
++;
5581 if ((reasons
& AST_PREEMPT
) != 0) {
5582 stats
->preempt_count
++;
5584 if (selfpri
>= BASEPRI_REALTIME
) {
5585 stats
->preempted_rt_count
++;
5589 stats
->preempted_by_rt_count
++;
5595 sched_stats_handle_runq_change(struct runq_stats
*stats
, int old_count
)
5597 uint64_t timestamp
= mach_absolute_time();
5599 stats
->count_sum
+= (timestamp
- stats
->last_change_timestamp
) * old_count
;
5600 stats
->last_change_timestamp
= timestamp
;
5604 * For calls from assembly code
5606 #undef thread_wakeup
5615 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
5619 preemption_enabled(void)
5621 return get_preemption_level() == 0 && ml_get_interrupts_enabled();
5625 sched_timer_deadline_tracking_init(void)
5627 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT
, &timer_deadline_tracking_bin_1
);
5628 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT
, &timer_deadline_tracking_bin_2
);
5631 #if __arm__ || __arm64__
5633 uint32_t perfcontrol_requested_recommended_cores
= ALL_CORES_RECOMMENDED
;
5634 uint32_t perfcontrol_requested_recommended_core_count
= MAX_CPUS
;
5635 bool perfcontrol_failsafe_active
= false;
5636 bool perfcontrol_sleep_override
= false;
5638 uint64_t perfcontrol_failsafe_maintenance_runnable_time
;
5639 uint64_t perfcontrol_failsafe_activation_time
;
5640 uint64_t perfcontrol_failsafe_deactivation_time
;
5642 /* data covering who likely caused it and how long they ran */
5643 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
5644 char perfcontrol_failsafe_name
[FAILSAFE_NAME_LEN
];
5645 int perfcontrol_failsafe_pid
;
5646 uint64_t perfcontrol_failsafe_tid
;
5647 uint64_t perfcontrol_failsafe_thread_timer_at_start
;
5648 uint64_t perfcontrol_failsafe_thread_timer_last_seen
;
5649 uint32_t perfcontrol_failsafe_recommended_at_trigger
;
5652 * Perf controller calls here to update the recommended core bitmask.
5653 * If the failsafe is active, we don't immediately apply the new value.
5654 * Instead, we store the new request and use it after the failsafe deactivates.
5656 * If the failsafe is not active, immediately apply the update.
5658 * No scheduler locks are held, no other locks are held that scheduler might depend on,
5659 * interrupts are enabled
5661 * currently prototype is in osfmk/arm/machine_routines.h
5664 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores
)
5666 assert(preemption_enabled());
5668 spl_t s
= splsched();
5669 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
5671 perfcontrol_requested_recommended_cores
= recommended_cores
;
5672 perfcontrol_requested_recommended_core_count
= __builtin_popcountll(recommended_cores
);
5674 if ((perfcontrol_failsafe_active
== false) && (perfcontrol_sleep_override
== false)) {
5675 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
& usercontrol_requested_recommended_cores
);
5677 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5678 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_NONE
,
5679 perfcontrol_requested_recommended_cores
,
5680 sched_maintenance_thread
->last_made_runnable_time
, 0, 0, 0);
5683 simple_unlock(&sched_recommended_cores_lock
);
5688 sched_override_recommended_cores_for_sleep(void)
5690 spl_t s
= splsched();
5691 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
5693 if (perfcontrol_sleep_override
== false) {
5694 perfcontrol_sleep_override
= true;
5695 sched_update_recommended_cores(ALL_CORES_RECOMMENDED
);
5698 simple_unlock(&sched_recommended_cores_lock
);
5703 sched_restore_recommended_cores_after_sleep(void)
5705 spl_t s
= splsched();
5706 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
5708 if (perfcontrol_sleep_override
== true) {
5709 perfcontrol_sleep_override
= false;
5710 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
& usercontrol_requested_recommended_cores
);
5713 simple_unlock(&sched_recommended_cores_lock
);
5718 * Consider whether we need to activate the recommended cores failsafe
5720 * Called from quantum timer interrupt context of a realtime thread
5721 * No scheduler locks are held, interrupts are disabled
5724 sched_consider_recommended_cores(uint64_t ctime
, thread_t cur_thread
)
5727 * Check if a realtime thread is starving the system
5728 * and bringing up non-recommended cores would help
5730 * TODO: Is this the correct check for recommended == possible cores?
5731 * TODO: Validate the checks without the relevant lock are OK.
5734 if (__improbable(perfcontrol_failsafe_active
== TRUE
)) {
5735 /* keep track of how long the responsible thread runs */
5737 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
5739 if (perfcontrol_failsafe_active
== TRUE
&&
5740 cur_thread
->thread_id
== perfcontrol_failsafe_tid
) {
5741 perfcontrol_failsafe_thread_timer_last_seen
= timer_grab(&cur_thread
->user_timer
) +
5742 timer_grab(&cur_thread
->system_timer
);
5745 simple_unlock(&sched_recommended_cores_lock
);
5747 /* we're already trying to solve the problem, so bail */
5751 /* The failsafe won't help if there are no more processors to enable */
5752 if (__probable(perfcontrol_requested_recommended_core_count
>= processor_count
)) {
5756 uint64_t too_long_ago
= ctime
- perfcontrol_failsafe_starvation_threshold
;
5758 /* Use the maintenance thread as our canary in the coal mine */
5759 thread_t m_thread
= sched_maintenance_thread
;
5761 /* If it doesn't look bad, nothing to see here */
5762 if (__probable(m_thread
->last_made_runnable_time
>= too_long_ago
)) {
5766 /* It looks bad, take the lock to be sure */
5767 thread_lock(m_thread
);
5769 if (m_thread
->runq
== PROCESSOR_NULL
||
5770 (m_thread
->state
& (TH_RUN
| TH_WAIT
)) != TH_RUN
||
5771 m_thread
->last_made_runnable_time
>= too_long_ago
) {
5773 * Maintenance thread is either on cpu or blocked, and
5774 * therefore wouldn't benefit from more cores
5776 thread_unlock(m_thread
);
5780 uint64_t maintenance_runnable_time
= m_thread
->last_made_runnable_time
;
5782 thread_unlock(m_thread
);
5785 * There are cores disabled at perfcontrol's recommendation, but the
5786 * system is so overloaded that the maintenance thread can't run.
5787 * That likely means that perfcontrol can't run either, so it can't fix
5788 * the recommendation. We have to kick in a failsafe to keep from starving.
5790 * When the maintenance thread has been starved for too long,
5791 * ignore the recommendation from perfcontrol and light up all the cores.
5793 * TODO: Consider weird states like boot, sleep, or debugger
5796 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
5798 if (perfcontrol_failsafe_active
== TRUE
) {
5799 simple_unlock(&sched_recommended_cores_lock
);
5803 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5804 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_START
,
5805 perfcontrol_requested_recommended_cores
, maintenance_runnable_time
, 0, 0, 0);
5807 perfcontrol_failsafe_active
= TRUE
;
5808 perfcontrol_failsafe_activation_time
= mach_absolute_time();
5809 perfcontrol_failsafe_maintenance_runnable_time
= maintenance_runnable_time
;
5810 perfcontrol_failsafe_recommended_at_trigger
= perfcontrol_requested_recommended_cores
;
5812 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
5813 task_t task
= cur_thread
->task
;
5814 perfcontrol_failsafe_pid
= task_pid(task
);
5815 strlcpy(perfcontrol_failsafe_name
, proc_name_address(task
->bsd_info
), sizeof(perfcontrol_failsafe_name
));
5817 perfcontrol_failsafe_tid
= cur_thread
->thread_id
;
5819 /* Blame the thread for time it has run recently */
5820 uint64_t recent_computation
= (ctime
- cur_thread
->computation_epoch
) + cur_thread
->computation_metered
;
5822 uint64_t last_seen
= timer_grab(&cur_thread
->user_timer
) + timer_grab(&cur_thread
->system_timer
);
5824 /* Compute the start time of the bad behavior in terms of the thread's on core time */
5825 perfcontrol_failsafe_thread_timer_at_start
= last_seen
- recent_computation
;
5826 perfcontrol_failsafe_thread_timer_last_seen
= last_seen
;
5828 /* Ignore the previously recommended core configuration */
5829 sched_update_recommended_cores(ALL_CORES_RECOMMENDED
);
5831 simple_unlock(&sched_recommended_cores_lock
);
5835 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
5837 * Runs in the context of the maintenance thread, no locks held
5840 sched_recommended_cores_maintenance(void)
5842 /* Common case - no failsafe, nothing to be done here */
5843 if (__probable(perfcontrol_failsafe_active
== FALSE
)) {
5847 uint64_t ctime
= mach_absolute_time();
5849 boolean_t print_diagnostic
= FALSE
;
5850 char p_name
[FAILSAFE_NAME_LEN
] = "";
5852 spl_t s
= splsched();
5853 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
5855 /* Check again, under the lock, to avoid races */
5856 if (perfcontrol_failsafe_active
== FALSE
) {
5861 * Ensure that the other cores get another few ticks to run some threads
5862 * If we don't have this hysteresis, the maintenance thread is the first
5863 * to run, and then it immediately kills the other cores
5865 if ((ctime
- perfcontrol_failsafe_activation_time
) < perfcontrol_failsafe_starvation_threshold
) {
5869 /* Capture some diagnostic state under the lock so we can print it out later */
5871 int pid
= perfcontrol_failsafe_pid
;
5872 uint64_t tid
= perfcontrol_failsafe_tid
;
5874 uint64_t thread_usage
= perfcontrol_failsafe_thread_timer_last_seen
-
5875 perfcontrol_failsafe_thread_timer_at_start
;
5876 uint32_t rec_cores_before
= perfcontrol_failsafe_recommended_at_trigger
;
5877 uint32_t rec_cores_after
= perfcontrol_requested_recommended_cores
;
5878 uint64_t failsafe_duration
= ctime
- perfcontrol_failsafe_activation_time
;
5879 strlcpy(p_name
, perfcontrol_failsafe_name
, sizeof(p_name
));
5881 print_diagnostic
= TRUE
;
5883 /* Deactivate the failsafe and reinstate the requested recommendation settings */
5885 perfcontrol_failsafe_deactivation_time
= ctime
;
5886 perfcontrol_failsafe_active
= FALSE
;
5888 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5889 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_END
,
5890 perfcontrol_requested_recommended_cores
, failsafe_duration
, 0, 0, 0);
5892 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
& usercontrol_requested_recommended_cores
);
5895 simple_unlock(&sched_recommended_cores_lock
);
5898 if (print_diagnostic
) {
5899 uint64_t failsafe_duration_ms
= 0, thread_usage_ms
= 0;
5901 absolutetime_to_nanoseconds(failsafe_duration
, &failsafe_duration_ms
);
5902 failsafe_duration_ms
= failsafe_duration_ms
/ NSEC_PER_MSEC
;
5904 absolutetime_to_nanoseconds(thread_usage
, &thread_usage_ms
);
5905 thread_usage_ms
= thread_usage_ms
/ NSEC_PER_MSEC
;
5907 printf("recommended core failsafe kicked in for %lld ms "
5908 "likely due to %s[%d] thread 0x%llx spending "
5909 "%lld ms on cpu at realtime priority - "
5910 "new recommendation: 0x%x -> 0x%x\n",
5911 failsafe_duration_ms
, p_name
, pid
, tid
, thread_usage_ms
,
5912 rec_cores_before
, rec_cores_after
);
5916 #endif /* __arm__ || __arm64__ */
5919 sched_processor_enable(processor_t processor
, boolean_t enable
)
5921 assert(preemption_enabled());
5923 spl_t s
= splsched();
5924 simple_lock(&sched_recommended_cores_lock
, LCK_GRP_NULL
);
5927 bit_set(usercontrol_requested_recommended_cores
, processor
->cpu_id
);
5929 bit_clear(usercontrol_requested_recommended_cores
, processor
->cpu_id
);
5932 #if __arm__ || __arm64__
5933 if ((perfcontrol_failsafe_active
== false) && (perfcontrol_sleep_override
== false)) {
5934 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
& usercontrol_requested_recommended_cores
);
5936 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5937 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_NONE
,
5938 perfcontrol_requested_recommended_cores
,
5939 sched_maintenance_thread
->last_made_runnable_time
, 0, 0, 0);
5941 #else /* __arm__ || __arm64__ */
5942 sched_update_recommended_cores(usercontrol_requested_recommended_cores
);
5943 #endif /* !__arm__ || __arm64__ */
5945 simple_unlock(&sched_recommended_cores_lock
);
5948 return KERN_SUCCESS
;
5953 * Apply a new recommended cores mask to the processors it affects
5954 * Runs after considering failsafes and such
5956 * Iterate over processors and update their ->is_recommended field.
5957 * If a processor is running, we let it drain out at its next
5958 * quantum expiration or blocking point. If a processor is idle, there
5959 * may be more work for it to do, so IPI it.
5961 * interrupts disabled, sched_recommended_cores_lock is held
5964 sched_update_recommended_cores(uint64_t recommended_cores
)
5966 processor_set_t pset
, nset
;
5967 processor_t processor
;
5968 uint64_t needs_exit_idle_mask
= 0x0;
5969 uint32_t avail_count
;
5971 processor
= processor_list
;
5972 pset
= processor
->processor_set
;
5974 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_UPDATE_REC_CORES
) | DBG_FUNC_START
,
5976 #if __arm__ || __arm64__
5977 perfcontrol_failsafe_active
, 0, 0);
5978 #else /* __arm__ || __arm64__ */
5980 #endif /* ! __arm__ || __arm64__ */
5982 if (__builtin_popcountll(recommended_cores
) == 0) {
5983 bit_set(recommended_cores
, master_processor
->cpu_id
); /* add boot processor or we hang */
5986 /* First set recommended cores */
5990 nset
= processor
->processor_set
;
5997 if (bit_test(recommended_cores
, processor
->cpu_id
)) {
5998 processor
->is_recommended
= TRUE
;
5999 bit_set(pset
->recommended_bitmask
, processor
->cpu_id
);
6001 if (processor
->state
== PROCESSOR_IDLE
) {
6002 if (processor
!= current_processor()) {
6003 bit_set(needs_exit_idle_mask
, processor
->cpu_id
);
6006 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
6010 } while ((processor
= processor
->processor_list
) != NULL
);
6013 /* Now shutdown not recommended cores */
6014 processor
= processor_list
;
6015 pset
= processor
->processor_set
;
6019 nset
= processor
->processor_set
;
6026 if (!bit_test(recommended_cores
, processor
->cpu_id
)) {
6027 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
6029 processor
->is_recommended
= FALSE
;
6030 bit_clear(pset
->recommended_bitmask
, processor
->cpu_id
);
6032 if ((processor
->state
== PROCESSOR_RUNNING
) || (processor
->state
== PROCESSOR_DISPATCHING
)) {
6033 ipi_type
= SCHED_IPI_IMMEDIATE
;
6035 SCHED(processor_queue_shutdown
)(processor
);
6038 SCHED(rt_queue_shutdown
)(processor
);
6040 if (ipi_type
!= SCHED_IPI_NONE
) {
6041 if (processor
== current_processor()) {
6042 ast_on(AST_PREEMPT
);
6044 sched_ipi_perform(processor
, ipi_type
);
6050 } while ((processor
= processor
->processor_list
) != NULL
);
6052 processor_avail_count_user
= avail_count
;
6053 #if defined(__x86_64__)
6054 commpage_update_active_cpus();
6059 /* Issue all pending IPIs now that the pset lock has been dropped */
6060 for (int cpuid
= lsb_first(needs_exit_idle_mask
); cpuid
>= 0; cpuid
= lsb_next(needs_exit_idle_mask
, cpuid
)) {
6061 processor
= processor_array
[cpuid
];
6062 machine_signal_idle(processor
);
6065 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_UPDATE_REC_CORES
) | DBG_FUNC_END
,
6066 needs_exit_idle_mask
, 0, 0, 0);
6070 thread_set_options(uint32_t thopt
)
6073 thread_t t
= current_thread();
6078 t
->options
|= thopt
;
6085 thread_set_pending_block_hint(thread_t thread
, block_hint_t block_hint
)
6087 thread
->pending_block_hint
= block_hint
;
6091 qos_max_parallelism(int qos
, uint64_t options
)
6093 return SCHED(qos_max_parallelism
)(qos
, options
);
6097 sched_qos_max_parallelism(__unused
int qos
, uint64_t options
)
6099 host_basic_info_data_t hinfo
;
6100 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
6101 /* Query the machine layer for core information */
6102 __assert_only kern_return_t kret
= host_info(host_self(), HOST_BASIC_INFO
,
6103 (host_info_t
)&hinfo
, &count
);
6104 assert(kret
== KERN_SUCCESS
);
6106 /* We would not want multiple realtime threads running on the
6107 * same physical core; even for SMT capable machines.
6109 if (options
& QOS_PARALLELISM_REALTIME
) {
6110 return hinfo
.physical_cpu
;
6113 if (options
& QOS_PARALLELISM_COUNT_LOGICAL
) {
6114 return hinfo
.logical_cpu
;
6116 return hinfo
.physical_cpu
;
6120 int sched_allow_NO_SMT_threads
= 1;
6122 thread_no_smt(thread_t thread
)
6124 #if DEBUG || DEVELOPMENT
6125 return sched_allow_NO_SMT_threads
&& (thread
->bound_processor
== PROCESSOR_NULL
) && ((thread
->sched_flags
& TH_SFLAG_NO_SMT
) || (thread
->task
->t_flags
& TF_NO_SMT
));
6127 return sched_allow_NO_SMT_threads
&& (thread
->bound_processor
== PROCESSOR_NULL
) && (thread
->sched_flags
& TH_SFLAG_NO_SMT
);
6132 processor_active_thread_no_smt(processor_t processor
)
6134 return sched_allow_NO_SMT_threads
&& !processor
->current_is_bound
&& processor
->current_is_NO_SMT
;
6140 * Set up or replace old timer with new timer
6142 * Returns true if canceled old timer, false if it did not
6145 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline
)
6148 * Exchange deadline for new deadline, if old deadline was nonzero,
6149 * then I cancelled the callback, otherwise I didn't
6152 uint64_t old_deadline
= __c11_atomic_load(&sched_perfcontrol_callback_deadline
,
6153 memory_order_relaxed
);
6156 while (!__c11_atomic_compare_exchange_weak(&sched_perfcontrol_callback_deadline
,
6157 &old_deadline
, new_deadline
,
6158 memory_order_relaxed
, memory_order_relaxed
)) {
6163 /* now old_deadline contains previous value, which might not be the same if it raced */
6165 return (old_deadline
!= 0) ? TRUE
: FALSE
;
6168 #endif /* __arm64__ */
6171 sched_update_pset_load_average(processor_set_t pset
)
6173 int load
= ((bit_count(pset
->cpu_state_map
[PROCESSOR_RUNNING
]) + pset
->pset_runq
.count
+ rt_runq_count(pset
)) << PSET_LOAD_NUMERATOR_SHIFT
);
6174 int new_load_average
= (pset
->load_average
+ load
) >> 1;
6176 pset
->load_average
= new_load_average
;
6178 #if (DEVELOPMENT || DEBUG)
6182 /* pset is locked */
6184 choose_processor_for_realtime_thread(processor_set_t pset
)
6186 #if defined(__x86_64__)
6187 bool avoid_cpu0
= sched_avoid_cpu0
&& bit_test(pset
->cpu_bitmask
, 0);
6189 const bool avoid_cpu0
= false;
6192 uint64_t cpu_map
= (pset
->cpu_bitmask
& pset
->recommended_bitmask
& ~pset
->pending_AST_URGENT_cpu_mask
);
6194 cpu_map
= bit_ror64(cpu_map
, 1);
6197 for (int rotid
= lsb_first(cpu_map
); rotid
>= 0; rotid
= lsb_next(cpu_map
, rotid
)) {
6198 int cpuid
= avoid_cpu0
? ((rotid
+ 1) & 63) : rotid
;
6200 processor_t processor
= processor_array
[cpuid
];
6202 if (processor
->processor_primary
!= processor
) {
6206 if (processor
->state
== PROCESSOR_IDLE
) {
6210 if ((processor
->state
!= PROCESSOR_RUNNING
) && (processor
->state
!= PROCESSOR_DISPATCHING
)) {
6214 if (processor
->current_pri
>= BASEPRI_RTQUEUES
) {
6221 if (!sched_allow_rt_smt
) {
6222 return PROCESSOR_NULL
;
6225 /* Consider secondary processors */
6227 /* Also avoid cpu1 */
6228 cpu_map
= bit_ror64(cpu_map
, 1);
6230 for (int rotid
= lsb_first(cpu_map
); rotid
>= 0; rotid
= lsb_next(cpu_map
, rotid
)) {
6231 int cpuid
= avoid_cpu0
? ((rotid
+ 2) & 63) : rotid
;
6233 processor_t processor
= processor_array
[cpuid
];
6235 if (processor
->processor_primary
== processor
) {
6239 if (processor
->state
== PROCESSOR_IDLE
) {
6243 if ((processor
->state
!= PROCESSOR_RUNNING
) && (processor
->state
!= PROCESSOR_DISPATCHING
)) {
6247 if (processor
->current_pri
>= BASEPRI_RTQUEUES
) {
6254 return PROCESSOR_NULL
;
6257 /* pset is locked */
6259 all_available_primaries_are_running_realtime_threads(processor_set_t pset
)
6261 return these_processors_are_running_realtime_threads(pset
, pset
->primary_map
);
6264 /* pset is locked */
6266 these_processors_are_running_realtime_threads(processor_set_t pset
, uint64_t these_map
)
6268 uint64_t cpu_map
= (pset
->cpu_bitmask
& pset
->recommended_bitmask
) & these_map
;
6270 for (int cpuid
= lsb_first(cpu_map
); cpuid
>= 0; cpuid
= lsb_next(cpu_map
, cpuid
)) {
6271 processor_t processor
= processor_array
[cpuid
];
6273 if (processor
->state
== PROCESSOR_IDLE
) {
6277 if (processor
->state
== PROCESSOR_DISPATCHING
) {
6281 if (processor
->state
!= PROCESSOR_RUNNING
) {
6283 * All other processor states are considered unavailable to run
6284 * realtime threads. In particular, we prefer an available secondary
6285 * processor over the risk of leaving a realtime thread on the run queue
6286 * while waiting for a processor in PROCESSOR_START state,
6287 * which should anyway be a rare case.
6292 if (processor
->current_pri
< BASEPRI_RTQUEUES
) {
6301 sched_ok_to_run_realtime_thread(processor_set_t pset
, processor_t processor
)
6303 bool ok_to_run_realtime_thread
= true;
6304 #if defined(__x86_64__)
6305 if (sched_avoid_cpu0
&& processor
->cpu_id
== 0) {
6306 ok_to_run_realtime_thread
= these_processors_are_running_realtime_threads(pset
, pset
->primary_map
& ~0x1);
6307 } else if (sched_avoid_cpu0
&& (processor
->cpu_id
== 1) && processor
->is_SMT
) {
6308 ok_to_run_realtime_thread
= sched_allow_rt_smt
&& these_processors_are_running_realtime_threads(pset
, ~0x2);
6309 } else if (processor
->processor_primary
!= processor
) {
6310 ok_to_run_realtime_thread
= sched_allow_rt_smt
&& all_available_primaries_are_running_realtime_threads(pset
);
6316 return ok_to_run_realtime_thread
;
6320 thread_set_no_smt(bool set
)
6322 thread_t thread
= current_thread();
6324 spl_t s
= splsched();
6325 thread_lock(thread
);
6327 thread
->sched_flags
|= TH_SFLAG_NO_SMT
;
6329 thread
->sched_flags
&= ~TH_SFLAG_NO_SMT
;
6331 thread_unlock(thread
);
6336 thread_get_no_smt(void)
6338 return current_thread()->sched_flags
& TH_SFLAG_NO_SMT
;
6341 #if DEBUG || DEVELOPMENT
6342 extern void sysctl_task_set_no_smt(char no_smt
);
6344 sysctl_task_set_no_smt(char no_smt
)
6346 thread_t thread
= current_thread();
6347 task_t task
= thread
->task
;
6349 if (no_smt
== '1') {
6350 task
->t_flags
|= TF_NO_SMT
;
6352 task
->t_flags
&= ~TF_NO_SMT
;
6356 extern char sysctl_task_get_no_smt(void);
6358 sysctl_task_get_no_smt(void)
6360 thread_t thread
= current_thread();
6361 task_t task
= thread
->task
;
6363 if (task
->t_flags
& TF_NO_SMT
) {