2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Scheduling primitives
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/machlimits.h>
79 #include <machine/atomic.h>
81 #ifdef CONFIG_MACH_APPROXIMATE_TIME
82 #include <machine/commpage.h>
85 #include <kern/kern_types.h>
86 #include <kern/backtrace.h>
87 #include <kern/clock.h>
88 #include <kern/counters.h>
89 #include <kern/cpu_number.h>
90 #include <kern/cpu_data.h>
92 #include <kern/debug.h>
93 #include <kern/macro_help.h>
94 #include <kern/machine.h>
95 #include <kern/misc_protos.h>
97 #include <kern/monotonic.h>
98 #endif /* MONOTONIC */
99 #include <kern/processor.h>
100 #include <kern/queue.h>
101 #include <kern/sched.h>
102 #include <kern/sched_prim.h>
103 #include <kern/sfi.h>
104 #include <kern/syscall_subr.h>
105 #include <kern/task.h>
106 #include <kern/thread.h>
107 #include <kern/ledger.h>
108 #include <kern/timer_queue.h>
109 #include <kern/waitq.h>
110 #include <kern/policy_internal.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <vm/vm_pageout.h>
117 #include <mach/sdt.h>
118 #include <mach/mach_host.h>
119 #include <mach/host_info.h>
121 #include <sys/kdebug.h>
122 #include <kperf/kperf.h>
123 #include <kern/kpc.h>
124 #include <san/kasan.h>
125 #include <kern/pms.h>
126 #include <kern/host.h>
127 #include <stdatomic.h>
129 int rt_runq_count(processor_set_t pset
)
131 return atomic_load_explicit(&SCHED(rt_runq
)(pset
)->count
, memory_order_relaxed
);
134 void rt_runq_count_incr(processor_set_t pset
)
136 atomic_fetch_add_explicit(&SCHED(rt_runq
)(pset
)->count
, 1, memory_order_relaxed
);
139 void rt_runq_count_decr(processor_set_t pset
)
141 atomic_fetch_sub_explicit(&SCHED(rt_runq
)(pset
)->count
, 1, memory_order_relaxed
);
144 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
145 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
147 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
148 int default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
150 #define MAX_UNSAFE_QUANTA 800
151 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
153 #define MAX_POLL_QUANTA 2
154 int max_poll_quanta
= MAX_POLL_QUANTA
;
156 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
157 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
159 uint64_t max_poll_computation
;
161 uint64_t max_unsafe_computation
;
162 uint64_t sched_safe_duration
;
164 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
166 uint32_t std_quantum
;
167 uint32_t min_std_quantum
;
170 uint32_t std_quantum_us
;
171 uint32_t bg_quantum_us
;
173 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
175 uint32_t thread_depress_time
;
176 uint32_t default_timeshare_computation
;
177 uint32_t default_timeshare_constraint
;
179 uint32_t max_rt_quantum
;
180 uint32_t min_rt_quantum
;
182 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
185 uint32_t sched_tick_interval
;
187 uint32_t sched_pri_shifts
[TH_BUCKET_MAX
];
188 uint32_t sched_fixed_shift
;
190 uint32_t sched_decay_usage_age_factor
= 1; /* accelerate 5/8^n usage aging */
192 /* Allow foreground to decay past default to resolve inversions */
193 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
194 int sched_pri_decay_band_limit
= DEFAULT_DECAY_BAND_LIMIT
;
196 /* Defaults for timer deadline profiling */
197 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
199 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
202 uint64_t timer_deadline_tracking_bin_1
;
203 uint64_t timer_deadline_tracking_bin_2
;
205 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
207 thread_t sched_maintenance_thread
;
209 #if __arm__ || __arm64__
210 /* interrupts disabled lock to guard recommended cores state */
211 decl_simple_lock_data(static,sched_recommended_cores_lock
);
212 static void sched_recommended_cores_maintenance(void);
213 static void sched_update_recommended_cores(uint32_t recommended_cores
);
215 uint64_t perfcontrol_failsafe_starvation_threshold
;
216 extern char *proc_name_address(struct proc
*p
);
218 #endif /* __arm__ || __arm64__ */
220 uint64_t sched_one_second_interval
;
224 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
226 static void load_shift_init(void);
227 static void preempt_pri_init(void);
229 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
231 #if CONFIG_SCHED_IDLE_IN_PLACE
232 static thread_t
thread_select_idle(
234 processor_t processor
);
237 thread_t
processor_idle(
239 processor_t processor
);
242 csw_check_locked( processor_t processor
,
243 processor_set_t pset
,
246 static void processor_setrun(
247 processor_t processor
,
252 sched_realtime_timebase_init(void);
255 sched_timer_deadline_tracking_init(void);
258 extern int debug_task
;
259 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
261 #define TLOG(a, fmt, args...) do {} while (0)
265 thread_bind_internal(
267 processor_t processor
);
270 sched_vm_group_maintenance(void);
272 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
273 int8_t sched_load_shifts
[NRQS
];
274 bitmap_t sched_preempt_pri
[BITMAP_LEN(NRQS
)];
275 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
277 const struct sched_dispatch_table
*sched_current_dispatch
= NULL
;
280 * Statically allocate a buffer to hold the longest possible
281 * scheduler description string, as currently implemented.
282 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
283 * to export to userspace via sysctl(3). If either version
284 * changes, update the other.
286 * Note that in addition to being an upper bound on the strings
287 * in the kernel, it's also an exact parameter to PE_get_default(),
288 * which interrogates the device tree on some platforms. That
289 * API requires the caller know the exact size of the device tree
290 * property, so we need both a legacy size (32) and the current size
291 * (48) to deal with old and new device trees. The device tree property
292 * is similarly padded to a fixed size so that the same kernel image
293 * can run on multiple devices with different schedulers configured
294 * in the device tree.
296 char sched_string
[SCHED_STRING_MAX_LENGTH
];
298 uint32_t sched_debug_flags
= SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS
;
300 /* Global flag which indicates whether Background Stepper Context is enabled */
301 static int cpu_throttle_enabled
= 1;
305 /* Since using the indirect function dispatch table has a negative impact on
306 * context switch performance, only allow DEBUG kernels to use that mechanism.
309 sched_init_override(void)
311 char sched_arg
[SCHED_STRING_MAX_LENGTH
] = { '\0' };
313 /* Check for runtime selection of the scheduler algorithm */
314 if (!PE_parse_boot_argn("sched", sched_arg
, sizeof (sched_arg
))) {
317 if (strlen(sched_arg
) > 0) {
319 /* Allow pattern below */
320 #if defined(CONFIG_SCHED_TRADITIONAL)
321 } else if (0 == strcmp(sched_arg
, sched_traditional_dispatch
.sched_name
)) {
322 sched_current_dispatch
= &sched_traditional_dispatch
;
323 } else if (0 == strcmp(sched_arg
, sched_traditional_with_pset_runqueue_dispatch
.sched_name
)) {
324 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
326 #if defined(CONFIG_SCHED_MULTIQ)
327 } else if (0 == strcmp(sched_arg
, sched_multiq_dispatch
.sched_name
)) {
328 sched_current_dispatch
= &sched_multiq_dispatch
;
329 } else if (0 == strcmp(sched_arg
, sched_dualq_dispatch
.sched_name
)) {
330 sched_current_dispatch
= &sched_dualq_dispatch
;
333 #if defined(CONFIG_SCHED_TRADITIONAL)
334 printf("Unrecognized scheduler algorithm: %s\n", sched_arg
);
335 printf("Scheduler: Using instead: %s\n", sched_traditional_with_pset_runqueue_dispatch
.sched_name
);
336 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
338 panic("Unrecognized scheduler algorithm: %s", sched_arg
);
341 kprintf("Scheduler: Runtime selection of %s\n", SCHED(sched_name
));
343 #if defined(CONFIG_SCHED_MULTIQ)
344 sched_current_dispatch
= &sched_multiq_dispatch
;
345 #elif defined(CONFIG_SCHED_TRADITIONAL)
346 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
348 #error No default scheduler implementation
350 kprintf("Scheduler: Default of %s\n", SCHED(sched_name
));
360 sched_init_override();
362 kprintf("Scheduler: Default of %s\n", SCHED(sched_name
));
365 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit
, sizeof(sched_pri_decay_band_limit
))) {
366 /* No boot-args, check in device tree */
367 if (!PE_get_default("kern.sched_pri_decay_limit",
368 &sched_pri_decay_band_limit
,
369 sizeof(sched_pri_decay_band_limit
))) {
370 /* Allow decay all the way to normal limits */
371 sched_pri_decay_band_limit
= DEFAULT_DECAY_BAND_LIMIT
;
375 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit
);
377 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags
, sizeof(sched_debug_flags
))) {
378 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags
);
380 strlcpy(sched_string
, SCHED(sched_name
), sizeof(sched_string
));
383 SCHED(rt_init
)(&pset0
);
384 sched_timer_deadline_tracking_init();
386 SCHED(pset_init
)(&pset0
);
387 SCHED(processor_init
)(master_processor
);
391 sched_timebase_init(void)
395 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC
, &abstime
);
396 sched_one_second_interval
= abstime
;
398 SCHED(timebase_init
)();
399 sched_realtime_timebase_init();
402 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
405 sched_timeshare_init(void)
408 * Calculate the timeslicing quantum
411 if (default_preemption_rate
< 1)
412 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
413 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
415 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
417 if (default_bg_preemption_rate
< 1)
418 default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
419 bg_quantum_us
= (1000 * 1000) / default_bg_preemption_rate
;
421 printf("standard background quantum is %d us\n", bg_quantum_us
);
429 sched_timeshare_timebase_init(void)
434 /* standard timeslicing quantum */
435 clock_interval_to_absolutetime_interval(
436 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
437 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
438 std_quantum
= (uint32_t)abstime
;
440 /* smallest remaining quantum (250 us) */
441 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
442 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
443 min_std_quantum
= (uint32_t)abstime
;
445 /* quantum for background tasks */
446 clock_interval_to_absolutetime_interval(
447 bg_quantum_us
, NSEC_PER_USEC
, &abstime
);
448 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
449 bg_quantum
= (uint32_t)abstime
;
451 /* scheduler tick interval */
452 clock_interval_to_absolutetime_interval(USEC_PER_SEC
>> SCHED_TICK_SHIFT
,
453 NSEC_PER_USEC
, &abstime
);
454 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
455 sched_tick_interval
= (uint32_t)abstime
;
458 * Compute conversion factor from usage to
459 * timesharing priorities with 5/8 ** n aging.
461 abstime
= (abstime
* 5) / 3;
462 for (shift
= 0; abstime
> BASEPRI_DEFAULT
; ++shift
)
464 sched_fixed_shift
= shift
;
466 for (uint32_t i
= 0 ; i
< TH_BUCKET_MAX
; i
++)
467 sched_pri_shifts
[i
] = INT8_MAX
;
469 max_unsafe_computation
= ((uint64_t)max_unsafe_quanta
) * std_quantum
;
470 sched_safe_duration
= 2 * ((uint64_t)max_unsafe_quanta
) * std_quantum
;
472 max_poll_computation
= ((uint64_t)max_poll_quanta
) * std_quantum
;
473 thread_depress_time
= 1 * std_quantum
;
474 default_timeshare_computation
= std_quantum
/ 2;
475 default_timeshare_constraint
= std_quantum
;
477 #if __arm__ || __arm64__
478 perfcontrol_failsafe_starvation_threshold
= (2 * sched_tick_interval
);
479 #endif /* __arm__ || __arm64__ */
482 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
485 pset_rt_init(processor_set_t pset
)
489 pset
->rt_runq
.count
= 0;
490 queue_init(&pset
->rt_runq
.queue
);
491 memset(&pset
->rt_runq
.runq_stats
, 0, sizeof pset
->rt_runq
.runq_stats
);
495 sched_rtglobal_runq(processor_set_t pset
)
499 return &pset0
.rt_runq
;
503 sched_rtglobal_init(processor_set_t pset
)
505 if (pset
== &pset0
) {
506 return pset_rt_init(pset
);
509 /* Only pset0 rt_runq is used, so make it easy to detect
510 * buggy accesses to others.
512 memset(&pset
->rt_runq
, 0xfd, sizeof pset
->rt_runq
);
516 sched_rtglobal_queue_shutdown(processor_t processor
)
522 sched_realtime_timebase_init(void)
526 /* smallest rt computaton (50 us) */
527 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
528 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
529 min_rt_quantum
= (uint32_t)abstime
;
531 /* maximum rt computation (50 ms) */
532 clock_interval_to_absolutetime_interval(
533 50, 1000*NSEC_PER_USEC
, &abstime
);
534 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
535 max_rt_quantum
= (uint32_t)abstime
;
540 sched_check_spill(processor_set_t pset
, thread_t thread
)
549 sched_thread_should_yield(processor_t processor
, thread_t thread
)
553 return (!SCHED(processor_queue_empty
)(processor
) || rt_runq_count(processor
->processor_set
) > 0);
556 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
559 * Set up values for timeshare
563 load_shift_init(void)
565 int8_t k
, *p
= sched_load_shifts
;
568 uint32_t sched_decay_penalty
= 1;
570 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty
, sizeof (sched_decay_penalty
))) {
571 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty
);
574 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor
, sizeof (sched_decay_usage_age_factor
))) {
575 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor
);
578 if (sched_decay_penalty
== 0) {
580 * There is no penalty for timeshare threads for using too much
581 * CPU, so set all load shifts to INT8_MIN. Even under high load,
582 * sched_pri_shift will be >INT8_MAX, and there will be no
583 * penalty applied to threads (nor will sched_usage be updated per
586 for (i
= 0; i
< NRQS
; i
++) {
587 sched_load_shifts
[i
] = INT8_MIN
;
593 *p
++ = INT8_MIN
; *p
++ = 0;
596 * For a given system load "i", the per-thread priority
597 * penalty per quantum of CPU usage is ~2^k priority
598 * levels. "sched_decay_penalty" can cause more
599 * array entries to be filled with smaller "k" values
601 for (i
= 2, j
= 1 << sched_decay_penalty
, k
= 1; i
< NRQS
; ++k
) {
602 for (j
<<= 1; (i
< j
) && (i
< NRQS
); ++i
)
608 preempt_pri_init(void)
610 bitmap_t
*p
= sched_preempt_pri
;
612 for (int i
= BASEPRI_FOREGROUND
; i
< MINPRI_KERNEL
; ++i
)
615 for (int i
= BASEPRI_PREEMPT
; i
<= MAXPRI
; ++i
)
619 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
622 * Thread wait timer expiration.
629 thread_t thread
= p0
;
632 assert_thread_magic(thread
);
636 if (--thread
->wait_timer_active
== 0) {
637 if (thread
->wait_timer_is_set
) {
638 thread
->wait_timer_is_set
= FALSE
;
639 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
642 thread_unlock(thread
);
649 * Unblock thread on wake up.
651 * Returns TRUE if the thread should now be placed on the runqueue.
653 * Thread must be locked.
655 * Called at splsched().
660 wait_result_t wresult
)
662 boolean_t ready_for_runq
= FALSE
;
663 thread_t cthread
= current_thread();
664 uint32_t new_run_count
;
669 thread
->wait_result
= wresult
;
672 * Cancel pending wait timer.
674 if (thread
->wait_timer_is_set
) {
675 if (timer_call_cancel(&thread
->wait_timer
))
676 thread
->wait_timer_active
--;
677 thread
->wait_timer_is_set
= FALSE
;
681 * Update scheduling state: not waiting,
684 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
686 if (!(thread
->state
& TH_RUN
)) {
687 thread
->state
|= TH_RUN
;
688 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= mach_approximate_time();
690 ready_for_runq
= TRUE
;
692 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
694 /* Update the runnable thread count */
695 new_run_count
= sched_run_incr(thread
);
698 * Either the thread is idling in place on another processor,
699 * or it hasn't finished context switching yet.
701 #if CONFIG_SCHED_IDLE_IN_PLACE
702 if (thread
->state
& TH_IDLE
) {
703 processor_t processor
= thread
->last_processor
;
705 if (processor
!= current_processor())
706 machine_signal_idle(processor
);
709 assert((thread
->state
& TH_IDLE
) == 0);
712 * The run count is only dropped after the context switch completes
713 * and the thread is still waiting, so we should not run_incr here
715 new_run_count
= sched_run_buckets
[TH_BUCKET_RUN
];
720 * Calculate deadline for real-time threads.
722 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
725 ctime
= mach_absolute_time();
726 thread
->realtime
.deadline
= thread
->realtime
.constraint
+ ctime
;
730 * Clear old quantum, fail-safe computation, etc.
732 thread
->quantum_remaining
= 0;
733 thread
->computation_metered
= 0;
734 thread
->reason
= AST_NONE
;
735 thread
->block_hint
= kThreadWaitNone
;
737 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
738 * We also account for "double hop" thread signaling via
739 * the thread callout infrastructure.
740 * DRK: consider removing the callout wakeup counters in the future
741 * they're present for verification at the moment.
743 boolean_t aticontext
, pidle
;
744 ml_get_power_state(&aticontext
, &pidle
);
746 if (__improbable(aticontext
&& !(thread_get_tag_internal(thread
) & THREAD_TAG_CALLOUT
))) {
747 DTRACE_SCHED2(iwakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
749 uint64_t ttd
= PROCESSOR_DATA(current_processor(), timer_call_ttd
);
752 if (ttd
<= timer_deadline_tracking_bin_1
)
753 thread
->thread_timer_wakeups_bin_1
++;
755 if (ttd
<= timer_deadline_tracking_bin_2
)
756 thread
->thread_timer_wakeups_bin_2
++;
759 ledger_credit_thread(thread
, thread
->t_ledger
,
760 task_ledgers
.interrupt_wakeups
, 1);
762 ledger_credit_thread(thread
, thread
->t_ledger
,
763 task_ledgers
.platform_idle_wakeups
, 1);
766 } else if (thread_get_tag_internal(cthread
) & THREAD_TAG_CALLOUT
) {
767 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
768 if (cthread
->callout_woken_from_icontext
) {
769 ledger_credit_thread(thread
, thread
->t_ledger
,
770 task_ledgers
.interrupt_wakeups
, 1);
771 thread
->thread_callout_interrupt_wakeups
++;
773 if (cthread
->callout_woken_from_platform_idle
) {
774 ledger_credit_thread(thread
, thread
->t_ledger
,
775 task_ledgers
.platform_idle_wakeups
, 1);
776 thread
->thread_callout_platform_idle_wakeups
++;
779 cthread
->callout_woke_thread
= TRUE
;
783 if (thread_get_tag_internal(thread
) & THREAD_TAG_CALLOUT
) {
784 thread
->callout_woken_from_icontext
= aticontext
;
785 thread
->callout_woken_from_platform_idle
= pidle
;
786 thread
->callout_woke_thread
= FALSE
;
789 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
790 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
791 (uintptr_t)thread_tid(thread
), thread
->sched_pri
, thread
->wait_result
,
792 sched_run_buckets
[TH_BUCKET_RUN
], 0);
794 DTRACE_SCHED2(wakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
796 return (ready_for_runq
);
802 * Unblock and dispatch thread.
804 * thread lock held, IPC locks may be held.
805 * thread must have been pulled from wait queue under same lock hold.
806 * thread must have been waiting
808 * KERN_SUCCESS - Thread was set running
810 * TODO: This should return void
815 wait_result_t wresult
)
817 assert_thread_magic(thread
);
819 assert(thread
->at_safe_point
== FALSE
);
820 assert(thread
->wait_event
== NO_EVENT64
);
821 assert(thread
->waitq
== NULL
);
823 assert(!(thread
->state
& (TH_TERMINATE
|TH_TERMINATE2
)));
824 assert(thread
->state
& TH_WAIT
);
827 if (thread_unblock(thread
, wresult
)) {
828 #if SCHED_TRACE_THREAD_WAKEUPS
829 backtrace(&thread
->thread_wakeup_bt
[0],
830 (sizeof(thread
->thread_wakeup_bt
)/sizeof(uintptr_t)));
832 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
835 return (KERN_SUCCESS
);
839 * Routine: thread_mark_wait_locked
841 * Mark a thread as waiting. If, given the circumstances,
842 * it doesn't want to wait (i.e. already aborted), then
843 * indicate that in the return value.
845 * at splsched() and thread is locked.
849 thread_mark_wait_locked(
851 wait_interrupt_t interruptible
)
853 boolean_t at_safe_point
;
855 assert(!(thread
->state
& (TH_WAIT
|TH_IDLE
|TH_UNINT
|TH_TERMINATE2
)));
858 * The thread may have certain types of interrupts/aborts masked
859 * off. Even if the wait location says these types of interrupts
860 * are OK, we have to honor mask settings (outer-scoped code may
861 * not be able to handle aborts at the moment).
863 if (interruptible
> (thread
->options
& TH_OPT_INTMASK
))
864 interruptible
= thread
->options
& TH_OPT_INTMASK
;
866 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
868 if ( interruptible
== THREAD_UNINT
||
869 !(thread
->sched_flags
& TH_SFLAG_ABORT
) ||
871 (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
))) {
873 if ( !(thread
->state
& TH_TERMINATE
))
876 thread
->state
|= (interruptible
) ? TH_WAIT
: (TH_WAIT
| TH_UNINT
);
877 thread
->at_safe_point
= at_safe_point
;
879 /* TODO: pass this through assert_wait instead, have
880 * assert_wait just take a struct as an argument */
881 assert(!thread
->block_hint
);
882 thread
->block_hint
= thread
->pending_block_hint
;
883 thread
->pending_block_hint
= kThreadWaitNone
;
885 return (thread
->wait_result
= THREAD_WAITING
);
888 if (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
)
889 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
890 thread
->pending_block_hint
= kThreadWaitNone
;
892 return (thread
->wait_result
= THREAD_INTERRUPTED
);
896 * Routine: thread_interrupt_level
898 * Set the maximum interruptible state for the
899 * current thread. The effective value of any
900 * interruptible flag passed into assert_wait
901 * will never exceed this.
903 * Useful for code that must not be interrupted,
904 * but which calls code that doesn't know that.
906 * The old interrupt level for the thread.
910 thread_interrupt_level(
911 wait_interrupt_t new_level
)
913 thread_t thread
= current_thread();
914 wait_interrupt_t result
= thread
->options
& TH_OPT_INTMASK
;
916 thread
->options
= (thread
->options
& ~TH_OPT_INTMASK
) | (new_level
& TH_OPT_INTMASK
);
924 * Assert that the current thread is about to go to
925 * sleep until the specified event occurs.
930 wait_interrupt_t interruptible
)
932 if (__improbable(event
== NO_EVENT
))
933 panic("%s() called with NO_EVENT", __func__
);
935 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
936 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
937 VM_KERNEL_UNSLIDE_OR_PERM(event
), 0, 0, 0, 0);
940 waitq
= global_eventq(event
);
941 return waitq_assert_wait64(waitq
, CAST_EVENT64_T(event
), interruptible
, TIMEOUT_WAIT_FOREVER
);
947 * Return the global waitq for the specified event
953 return global_eventq(event
);
959 wait_interrupt_t interruptible
,
961 uint32_t scale_factor
)
963 thread_t thread
= current_thread();
964 wait_result_t wresult
;
968 if (__improbable(event
== NO_EVENT
))
969 panic("%s() called with NO_EVENT", __func__
);
972 waitq
= global_eventq(event
);
977 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
979 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
980 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
981 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
983 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
985 TIMEOUT_URGENCY_SYS_NORMAL
,
986 deadline
, TIMEOUT_NO_LEEWAY
,
995 assert_wait_timeout_with_leeway(
997 wait_interrupt_t interruptible
,
998 wait_timeout_urgency_t urgency
,
1001 uint32_t scale_factor
)
1003 thread_t thread
= current_thread();
1004 wait_result_t wresult
;
1011 if (__improbable(event
== NO_EVENT
))
1012 panic("%s() called with NO_EVENT", __func__
);
1014 now
= mach_absolute_time();
1015 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1016 deadline
= now
+ abstime
;
1018 clock_interval_to_absolutetime_interval(leeway
, scale_factor
, &slop
);
1020 struct waitq
*waitq
;
1021 waitq
= global_eventq(event
);
1026 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1027 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
1028 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1030 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1032 urgency
, deadline
, slop
,
1035 waitq_unlock(waitq
);
1041 assert_wait_deadline(
1043 wait_interrupt_t interruptible
,
1046 thread_t thread
= current_thread();
1047 wait_result_t wresult
;
1050 if (__improbable(event
== NO_EVENT
))
1051 panic("%s() called with NO_EVENT", __func__
);
1053 struct waitq
*waitq
;
1054 waitq
= global_eventq(event
);
1059 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1060 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
1061 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1063 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1065 TIMEOUT_URGENCY_SYS_NORMAL
, deadline
,
1066 TIMEOUT_NO_LEEWAY
, thread
);
1067 waitq_unlock(waitq
);
1073 assert_wait_deadline_with_leeway(
1075 wait_interrupt_t interruptible
,
1076 wait_timeout_urgency_t urgency
,
1080 thread_t thread
= current_thread();
1081 wait_result_t wresult
;
1084 if (__improbable(event
== NO_EVENT
))
1085 panic("%s() called with NO_EVENT", __func__
);
1087 struct waitq
*waitq
;
1088 waitq
= global_eventq(event
);
1093 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1094 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
1095 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1097 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1099 urgency
, deadline
, leeway
,
1101 waitq_unlock(waitq
);
1109 * Return TRUE if a thread is running on a processor such that an AST
1110 * is needed to pull it out of userspace execution, or if executing in
1111 * the kernel, bring to a context switch boundary that would cause
1112 * thread state to be serialized in the thread PCB.
1114 * Thread locked, returns the same way. While locked, fields
1115 * like "state" cannot change. "runq" can change only from set to unset.
1117 static inline boolean_t
1118 thread_isoncpu(thread_t thread
)
1120 /* Not running or runnable */
1121 if (!(thread
->state
& TH_RUN
))
1124 /* Waiting on a runqueue, not currently running */
1125 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1126 if (thread
->runq
!= PROCESSOR_NULL
)
1130 * Thread does not have a stack yet
1131 * It could be on the stack alloc queue or preparing to be invoked
1133 if (!thread
->kernel_stack
)
1137 * Thread must be running on a processor, or
1138 * about to run, or just did run. In all these
1139 * cases, an AST to the processor is needed
1140 * to guarantee that the thread is kicked out
1141 * of userspace and the processor has
1142 * context switched (and saved register state).
1150 * Force a preemption point for a thread and wait
1151 * for it to stop running on a CPU. If a stronger
1152 * guarantee is requested, wait until no longer
1153 * runnable. Arbitrates access among
1154 * multiple stop requests. (released by unstop)
1156 * The thread must enter a wait state and stop via a
1159 * Returns FALSE if interrupted.
1164 boolean_t until_not_runnable
)
1166 wait_result_t wresult
;
1167 spl_t s
= splsched();
1171 thread_lock(thread
);
1173 while (thread
->state
& TH_SUSP
) {
1174 thread
->wake_active
= TRUE
;
1175 thread_unlock(thread
);
1177 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1178 wake_unlock(thread
);
1181 if (wresult
== THREAD_WAITING
)
1182 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1184 if (wresult
!= THREAD_AWAKENED
)
1189 thread_lock(thread
);
1192 thread
->state
|= TH_SUSP
;
1194 while ((oncpu
= thread_isoncpu(thread
)) ||
1195 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1196 processor_t processor
;
1199 assert(thread
->state
& TH_RUN
);
1200 processor
= thread
->chosen_processor
;
1201 cause_ast_check(processor
);
1204 thread
->wake_active
= TRUE
;
1205 thread_unlock(thread
);
1207 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1208 wake_unlock(thread
);
1211 if (wresult
== THREAD_WAITING
)
1212 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1214 if (wresult
!= THREAD_AWAKENED
) {
1215 thread_unstop(thread
);
1221 thread_lock(thread
);
1224 thread_unlock(thread
);
1225 wake_unlock(thread
);
1229 * We return with the thread unlocked. To prevent it from
1230 * transitioning to a runnable state (or from TH_RUN to
1231 * being on the CPU), the caller must ensure the thread
1232 * is stopped via an external means (such as an AST)
1241 * Release a previous stop request and set
1242 * the thread running if appropriate.
1244 * Use only after a successful stop operation.
1250 spl_t s
= splsched();
1253 thread_lock(thread
);
1255 assert((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
)) != TH_SUSP
);
1257 if (thread
->state
& TH_SUSP
) {
1258 thread
->state
&= ~TH_SUSP
;
1260 if (thread
->wake_active
) {
1261 thread
->wake_active
= FALSE
;
1262 thread_unlock(thread
);
1264 thread_wakeup(&thread
->wake_active
);
1265 wake_unlock(thread
);
1272 thread_unlock(thread
);
1273 wake_unlock(thread
);
1280 * Wait for a thread to stop running. (non-interruptible)
1286 boolean_t until_not_runnable
)
1288 wait_result_t wresult
;
1290 processor_t processor
;
1291 spl_t s
= splsched();
1294 thread_lock(thread
);
1297 * Wait until not running on a CPU. If stronger requirement
1298 * desired, wait until not runnable. Assumption: if thread is
1299 * on CPU, then TH_RUN is set, so we're not waiting in any case
1300 * where the original, pure "TH_RUN" check would have let us
1303 while ((oncpu
= thread_isoncpu(thread
)) ||
1304 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1307 assert(thread
->state
& TH_RUN
);
1308 processor
= thread
->chosen_processor
;
1309 cause_ast_check(processor
);
1312 thread
->wake_active
= TRUE
;
1313 thread_unlock(thread
);
1315 wresult
= assert_wait(&thread
->wake_active
, THREAD_UNINT
);
1316 wake_unlock(thread
);
1319 if (wresult
== THREAD_WAITING
)
1320 thread_block(THREAD_CONTINUE_NULL
);
1324 thread_lock(thread
);
1327 thread_unlock(thread
);
1328 wake_unlock(thread
);
1333 * Routine: clear_wait_internal
1335 * Clear the wait condition for the specified thread.
1336 * Start the thread executing if that is appropriate.
1338 * thread thread to awaken
1339 * result Wakeup result the thread should see
1342 * the thread is locked.
1344 * KERN_SUCCESS thread was rousted out a wait
1345 * KERN_FAILURE thread was waiting but could not be rousted
1346 * KERN_NOT_WAITING thread was not waiting
1348 __private_extern__ kern_return_t
1349 clear_wait_internal(
1351 wait_result_t wresult
)
1353 uint32_t i
= LockTimeOutUsec
;
1354 struct waitq
*waitq
= thread
->waitq
;
1357 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
))
1358 return (KERN_FAILURE
);
1360 if (waitq
!= NULL
) {
1361 if (!waitq_pull_thread_locked(waitq
, thread
)) {
1362 thread_unlock(thread
);
1364 if (i
> 0 && !machine_timeout_suspended())
1366 thread_lock(thread
);
1367 if (waitq
!= thread
->waitq
)
1368 return KERN_NOT_WAITING
;
1373 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1374 if ((thread
->state
& (TH_WAIT
|TH_TERMINATE
)) == TH_WAIT
)
1375 return (thread_go(thread
, wresult
));
1377 return (KERN_NOT_WAITING
);
1380 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1381 thread
, waitq
, cpu_number());
1383 return (KERN_FAILURE
);
1390 * Clear the wait condition for the specified thread. Start the thread
1391 * executing if that is appropriate.
1394 * thread thread to awaken
1395 * result Wakeup result the thread should see
1400 wait_result_t result
)
1406 thread_lock(thread
);
1407 ret
= clear_wait_internal(thread
, result
);
1408 thread_unlock(thread
);
1415 * thread_wakeup_prim:
1417 * Common routine for thread_wakeup, thread_wakeup_with_result,
1418 * and thread_wakeup_one.
1424 boolean_t one_thread
,
1425 wait_result_t result
)
1427 if (__improbable(event
== NO_EVENT
))
1428 panic("%s() called with NO_EVENT", __func__
);
1430 struct waitq
*wq
= global_eventq(event
);
1433 return waitq_wakeup64_one(wq
, CAST_EVENT64_T(event
), result
, WAITQ_ALL_PRIORITIES
);
1435 return waitq_wakeup64_all(wq
, CAST_EVENT64_T(event
), result
, WAITQ_ALL_PRIORITIES
);
1439 * Wakeup a specified thread if and only if it's waiting for this event
1442 thread_wakeup_thread(
1446 if (__improbable(event
== NO_EVENT
))
1447 panic("%s() called with NO_EVENT", __func__
);
1449 if (__improbable(thread
== THREAD_NULL
))
1450 panic("%s() called with THREAD_NULL", __func__
);
1452 struct waitq
*wq
= global_eventq(event
);
1454 return waitq_wakeup64_thread(wq
, CAST_EVENT64_T(event
), thread
, THREAD_AWAKENED
);
1458 * Wakeup a thread waiting on an event and promote it to a priority.
1460 * Requires woken thread to un-promote itself when done.
1463 thread_wakeup_one_with_pri(
1467 if (__improbable(event
== NO_EVENT
))
1468 panic("%s() called with NO_EVENT", __func__
);
1470 struct waitq
*wq
= global_eventq(event
);
1472 return waitq_wakeup64_one(wq
, CAST_EVENT64_T(event
), THREAD_AWAKENED
, priority
);
1476 * Wakeup a thread waiting on an event,
1477 * promote it to a priority,
1478 * and return a reference to the woken thread.
1480 * Requires woken thread to un-promote itself when done.
1483 thread_wakeup_identify(event_t event
,
1486 if (__improbable(event
== NO_EVENT
))
1487 panic("%s() called with NO_EVENT", __func__
);
1489 struct waitq
*wq
= global_eventq(event
);
1491 return waitq_wakeup64_identify(wq
, CAST_EVENT64_T(event
), THREAD_AWAKENED
, priority
);
1497 * Force the current thread to execute on the specified processor.
1498 * Takes effect after the next thread_block().
1500 * Returns the previous binding. PROCESSOR_NULL means
1503 * XXX - DO NOT export this to users - XXX
1507 processor_t processor
)
1509 thread_t self
= current_thread();
1516 prev
= thread_bind_internal(self
, processor
);
1518 thread_unlock(self
);
1525 * thread_bind_internal:
1527 * If the specified thread is not the current thread, and it is currently
1528 * running on another CPU, a remote AST must be sent to that CPU to cause
1529 * the thread to migrate to its bound processor. Otherwise, the migration
1530 * will occur at the next quantum expiration or blocking point.
1532 * When the thread is the current thread, and explicit thread_block() should
1533 * be used to force the current processor to context switch away and
1534 * let the thread migrate to the bound processor.
1536 * Thread must be locked, and at splsched.
1540 thread_bind_internal(
1542 processor_t processor
)
1546 /* <rdar://problem/15102234> */
1547 assert(thread
->sched_pri
< BASEPRI_RTQUEUES
);
1548 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1549 assert(thread
->runq
== PROCESSOR_NULL
);
1551 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_THREAD_BIND
), thread_tid(thread
), processor
? (uintptr_t)processor
->cpu_id
: (uintptr_t)-1, 0, 0, 0);
1553 prev
= thread
->bound_processor
;
1554 thread
->bound_processor
= processor
;
1560 * thread_vm_bind_group_add:
1562 * The "VM bind group" is a special mechanism to mark a collection
1563 * of threads from the VM subsystem that, in general, should be scheduled
1564 * with only one CPU of parallelism. To accomplish this, we initially
1565 * bind all the threads to the master processor, which has the effect
1566 * that only one of the threads in the group can execute at once, including
1567 * preempting threads in the group that are a lower priority. Future
1568 * mechanisms may use more dynamic mechanisms to prevent the collection
1569 * of VM threads from using more CPU time than desired.
1571 * The current implementation can result in priority inversions where
1572 * compute-bound priority 95 or realtime threads that happen to have
1573 * landed on the master processor prevent the VM threads from running.
1574 * When this situation is detected, we unbind the threads for one
1575 * scheduler tick to allow the scheduler to run the threads an
1576 * additional CPUs, before restoring the binding (assuming high latency
1577 * is no longer a problem).
1581 * The current max is provisioned for:
1582 * vm_compressor_swap_trigger_thread (92)
1583 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1584 * vm_pageout_continue (92)
1585 * memorystatus_thread (95)
1587 #define MAX_VM_BIND_GROUP_COUNT (5)
1588 decl_simple_lock_data(static,sched_vm_group_list_lock
);
1589 static thread_t sched_vm_group_thread_list
[MAX_VM_BIND_GROUP_COUNT
];
1590 static int sched_vm_group_thread_count
;
1591 static boolean_t sched_vm_group_temporarily_unbound
= FALSE
;
1594 thread_vm_bind_group_add(void)
1596 thread_t self
= current_thread();
1598 thread_reference_internal(self
);
1599 self
->options
|= TH_OPT_SCHED_VM_GROUP
;
1601 simple_lock(&sched_vm_group_list_lock
);
1602 assert(sched_vm_group_thread_count
< MAX_VM_BIND_GROUP_COUNT
);
1603 sched_vm_group_thread_list
[sched_vm_group_thread_count
++] = self
;
1604 simple_unlock(&sched_vm_group_list_lock
);
1606 thread_bind(master_processor
);
1608 /* Switch to bound processor if not already there */
1609 thread_block(THREAD_CONTINUE_NULL
);
1613 sched_vm_group_maintenance(void)
1615 uint64_t ctime
= mach_absolute_time();
1616 uint64_t longtime
= ctime
- sched_tick_interval
;
1619 boolean_t high_latency_observed
= FALSE
;
1620 boolean_t runnable_and_not_on_runq_observed
= FALSE
;
1621 boolean_t bind_target_changed
= FALSE
;
1622 processor_t bind_target
= PROCESSOR_NULL
;
1624 /* Make sure nobody attempts to add new threads while we are enumerating them */
1625 simple_lock(&sched_vm_group_list_lock
);
1629 for (i
=0; i
< sched_vm_group_thread_count
; i
++) {
1630 thread_t thread
= sched_vm_group_thread_list
[i
];
1631 assert(thread
!= THREAD_NULL
);
1632 thread_lock(thread
);
1633 if ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
) {
1634 if (thread
->runq
!= PROCESSOR_NULL
&& thread
->last_made_runnable_time
< longtime
) {
1635 high_latency_observed
= TRUE
;
1636 } else if (thread
->runq
== PROCESSOR_NULL
) {
1637 /* There are some cases where a thread be transitiong that also fall into this case */
1638 runnable_and_not_on_runq_observed
= TRUE
;
1641 thread_unlock(thread
);
1643 if (high_latency_observed
&& runnable_and_not_on_runq_observed
) {
1644 /* All the things we are looking for are true, stop looking */
1651 if (sched_vm_group_temporarily_unbound
) {
1652 /* If we turned off binding, make sure everything is OK before rebinding */
1653 if (!high_latency_observed
) {
1655 bind_target_changed
= TRUE
;
1656 bind_target
= master_processor
;
1657 sched_vm_group_temporarily_unbound
= FALSE
; /* might be reset to TRUE if change cannot be completed */
1661 * Check if we're in a bad state, which is defined by high
1662 * latency with no core currently executing a thread. If a
1663 * single thread is making progress on a CPU, that means the
1664 * binding concept to reduce parallelism is working as
1667 if (high_latency_observed
&& !runnable_and_not_on_runq_observed
) {
1669 bind_target_changed
= TRUE
;
1670 bind_target
= PROCESSOR_NULL
;
1671 sched_vm_group_temporarily_unbound
= TRUE
;
1675 if (bind_target_changed
) {
1677 for (i
=0; i
< sched_vm_group_thread_count
; i
++) {
1678 thread_t thread
= sched_vm_group_thread_list
[i
];
1680 assert(thread
!= THREAD_NULL
);
1682 thread_lock(thread
);
1683 removed
= thread_run_queue_remove(thread
);
1684 if (removed
|| ((thread
->state
& (TH_RUN
| TH_WAIT
)) == TH_WAIT
)) {
1685 thread_bind_internal(thread
, bind_target
);
1688 * Thread was in the middle of being context-switched-to,
1689 * or was in the process of blocking. To avoid switching the bind
1690 * state out mid-flight, defer the change if possible.
1692 if (bind_target
== PROCESSOR_NULL
) {
1693 thread_bind_internal(thread
, bind_target
);
1695 sched_vm_group_temporarily_unbound
= TRUE
; /* next pass will try again */
1700 thread_run_queue_reinsert(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1702 thread_unlock(thread
);
1707 simple_unlock(&sched_vm_group_list_lock
);
1710 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1711 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1712 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1713 * IPI thrash if this core does not remain idle following the load balancing ASTs
1714 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1715 * followed by a wakeup shortly thereafter.
1718 #if (DEVELOPMENT || DEBUG)
1719 int sched_smt_balance
= 1;
1723 /* Invoked with pset locked, returns with pset unlocked */
1725 sched_SMT_balance(processor_t cprocessor
, processor_set_t cpset
) {
1726 processor_t ast_processor
= NULL
;
1728 #if (DEVELOPMENT || DEBUG)
1729 if (__improbable(sched_smt_balance
== 0))
1730 goto smt_balance_exit
;
1733 assert(cprocessor
== current_processor());
1734 if (cprocessor
->is_SMT
== FALSE
)
1735 goto smt_balance_exit
;
1737 processor_t sib_processor
= cprocessor
->processor_secondary
? cprocessor
->processor_secondary
: cprocessor
->processor_primary
;
1739 /* Determine if both this processor and its sibling are idle,
1740 * indicating an SMT rebalancing opportunity.
1742 if (sib_processor
->state
!= PROCESSOR_IDLE
)
1743 goto smt_balance_exit
;
1745 processor_t sprocessor
;
1747 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
1748 qe_foreach_element(sprocessor
, &cpset
->active_queue
, processor_queue
) {
1749 if ((sprocessor
->state
== PROCESSOR_RUNNING
) &&
1750 (sprocessor
->processor_primary
!= sprocessor
) &&
1751 (sprocessor
->processor_primary
->state
== PROCESSOR_RUNNING
) &&
1752 (sprocessor
->current_pri
< BASEPRI_RTQUEUES
)) {
1754 ipi_type
= sched_ipi_action(sprocessor
, NULL
, false, SCHED_IPI_EVENT_SMT_REBAL
);
1755 if (ipi_type
!= SCHED_IPI_NONE
) {
1756 assert(sprocessor
!= cprocessor
);
1757 ast_processor
= sprocessor
;
1766 if (ast_processor
) {
1767 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_SMT_BALANCE
), ast_processor
->cpu_id
, ast_processor
->state
, ast_processor
->processor_primary
->state
, 0, 0);
1768 sched_ipi_perform(ast_processor
, ipi_type
);
1772 /* Invoked with pset locked, returns with pset unlocked */
1774 sched_SMT_balance(__unused processor_t cprocessor
, processor_set_t cpset
)
1778 #endif /* __SMP__ */
1783 * Select a new thread for the current processor to execute.
1785 * May select the current thread, which must be locked.
1788 thread_select(thread_t thread
,
1789 processor_t processor
,
1792 processor_set_t pset
= processor
->processor_set
;
1793 thread_t new_thread
= THREAD_NULL
;
1795 assert(processor
== current_processor());
1796 assert((thread
->state
& (TH_RUN
|TH_TERMINATE2
)) == TH_RUN
);
1800 * Update the priority.
1802 if (SCHED(can_update_priority
)(thread
))
1803 SCHED(update_priority
)(thread
);
1805 processor_state_update_from_thread(processor
, thread
);
1809 assert(processor
->state
!= PROCESSOR_OFF_LINE
);
1811 if (!processor
->is_recommended
) {
1813 * The performance controller has provided a hint to not dispatch more threads,
1814 * unless they are bound to us (and thus we are the only option
1816 if (!SCHED(processor_bound_count
)(processor
)) {
1819 } else if (processor
->processor_primary
!= processor
) {
1821 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1822 * we should look for work only under the same conditions that choose_processor()
1823 * would have assigned work, which is when all primary processors have been assigned work.
1825 * An exception is that bound threads are dispatched to a processor without going through
1826 * choose_processor(), so in those cases we should continue trying to dequeue work.
1828 if (!SCHED(processor_bound_count
)(processor
) &&
1829 !queue_empty(&pset
->idle_queue
) && !rt_runq_count(pset
)) {
1835 * Test to see if the current thread should continue
1836 * to run on this processor. Must not be attempting to wait, and not
1837 * bound to a different processor, nor be in the wrong
1838 * processor set, nor be forced to context switch by TH_SUSP.
1840 * Note that there are never any RT threads in the regular runqueue.
1842 * This code is very insanely tricky.
1845 /* i.e. not waiting, not TH_SUSP'ed */
1846 boolean_t still_running
= ((thread
->state
& (TH_TERMINATE
|TH_IDLE
|TH_WAIT
|TH_RUN
|TH_SUSP
)) == TH_RUN
);
1849 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
1850 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
1852 boolean_t needs_smt_rebalance
= (thread
->sched_pri
< BASEPRI_RTQUEUES
&& processor
->processor_primary
!= processor
);
1854 boolean_t affinity_mismatch
= (thread
->affinity_set
!= AFFINITY_SET_NULL
&& thread
->affinity_set
->aset_pset
!= pset
);
1856 boolean_t bound_elsewhere
= (thread
->bound_processor
!= PROCESSOR_NULL
&& thread
->bound_processor
!= processor
);
1858 boolean_t avoid_processor
= (SCHED(avoid_processor_enabled
) && SCHED(thread_avoid_processor
)(processor
, thread
));
1860 if (still_running
&& !needs_smt_rebalance
&& !affinity_mismatch
&& !bound_elsewhere
&& !avoid_processor
) {
1862 * This thread is eligible to keep running on this processor.
1864 * RT threads with un-expired quantum stay on processor,
1865 * unless there's a valid RT thread with an earlier deadline.
1867 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
&& processor
->first_timeslice
) {
1868 if (rt_runq_count(pset
) > 0) {
1872 if (rt_runq_count(pset
) > 0) {
1874 thread_t next_rt
= qe_queue_first(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
1876 if (next_rt
->realtime
.deadline
< processor
->deadline
&&
1877 (next_rt
->bound_processor
== PROCESSOR_NULL
||
1878 next_rt
->bound_processor
== processor
)) {
1879 /* The next RT thread is better, so pick it off the runqueue. */
1880 goto pick_new_rt_thread
;
1884 rt_lock_unlock(pset
);
1887 /* This is still the best RT thread to run. */
1888 processor
->deadline
= thread
->realtime
.deadline
;
1890 sched_update_pset_load_average(pset
);
1896 if ((rt_runq_count(pset
) == 0) &&
1897 SCHED(processor_queue_has_priority
)(processor
, thread
->sched_pri
, TRUE
) == FALSE
) {
1898 /* This thread is still the highest priority runnable (non-idle) thread */
1899 processor
->deadline
= UINT64_MAX
;
1901 sched_update_pset_load_average(pset
);
1908 * This processor must context switch.
1909 * If it's due to a rebalance, we should aggressively find this thread a new home.
1911 if (needs_smt_rebalance
|| affinity_mismatch
|| bound_elsewhere
|| avoid_processor
)
1912 *reason
|= AST_REBALANCE
;
1915 /* OK, so we're not going to run the current thread. Look at the RT queue. */
1916 if (rt_runq_count(pset
) > 0) {
1920 if (rt_runq_count(pset
) > 0) {
1921 thread_t next_rt
= qe_queue_first(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
1923 if (__probable((next_rt
->bound_processor
== PROCESSOR_NULL
||
1924 (next_rt
->bound_processor
== processor
)))) {
1926 new_thread
= qe_dequeue_head(&SCHED(rt_runq
)(pset
)->queue
, struct thread
, runq_links
);
1928 new_thread
->runq
= PROCESSOR_NULL
;
1929 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
1930 rt_runq_count_decr(pset
);
1932 processor
->deadline
= new_thread
->realtime
.deadline
;
1934 rt_lock_unlock(pset
);
1935 sched_update_pset_load_average(pset
);
1938 return (new_thread
);
1942 rt_lock_unlock(pset
);
1945 processor
->deadline
= UINT64_MAX
;
1947 /* No RT threads, so let's look at the regular threads. */
1948 if ((new_thread
= SCHED(choose_thread
)(processor
, MINPRI
, *reason
)) != THREAD_NULL
) {
1949 sched_update_pset_load_average(pset
);
1951 return (new_thread
);
1955 if (SCHED(steal_thread_enabled
)) {
1957 * No runnable threads, attempt to steal
1958 * from other processors. Returns with pset lock dropped.
1961 if ((new_thread
= SCHED(steal_thread
)(pset
)) != THREAD_NULL
) {
1962 return (new_thread
);
1966 * If other threads have appeared, shortcut
1969 if (!SCHED(processor_queue_empty
)(processor
) || rt_runq_count(pset
) > 0)
1978 * Nothing is runnable, so set this processor idle if it
1981 if (processor
->state
== PROCESSOR_RUNNING
) {
1982 processor
->state
= PROCESSOR_IDLE
;
1984 if (!processor
->is_recommended
) {
1985 re_queue_head(&pset
->unused_queue
, &processor
->processor_queue
);
1986 } else if (processor
->processor_primary
== processor
) {
1987 re_queue_head(&pset
->idle_queue
, &processor
->processor_queue
);
1989 re_queue_head(&pset
->idle_secondary_queue
, &processor
->processor_queue
);
1992 pset
->active_processor_count
--;
1993 sched_update_pset_load_average(pset
);
1997 /* Invoked with pset locked, returns with pset unlocked */
1998 SCHED(processor_balance
)(processor
, pset
);
2003 #if CONFIG_SCHED_IDLE_IN_PLACE
2005 * Choose idle thread if fast idle is not possible.
2007 if (processor
->processor_primary
!= processor
)
2008 return (processor
->idle_thread
);
2010 if ((thread
->state
& (TH_IDLE
|TH_TERMINATE
|TH_SUSP
)) || !(thread
->state
& TH_WAIT
) || thread
->wake_active
|| thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2011 return (processor
->idle_thread
);
2014 * Perform idling activities directly without a
2015 * context switch. Return dispatched thread,
2016 * else check again for a runnable thread.
2018 new_thread
= thread_select_idle(thread
, processor
);
2020 #else /* !CONFIG_SCHED_IDLE_IN_PLACE */
2023 * Do a full context switch to idle so that the current
2024 * thread can start running on another processor without
2025 * waiting for the fast-idled processor to wake up.
2027 new_thread
= processor
->idle_thread
;
2029 #endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
2031 } while (new_thread
== THREAD_NULL
);
2033 return (new_thread
);
2036 #if CONFIG_SCHED_IDLE_IN_PLACE
2038 * thread_select_idle:
2040 * Idle the processor using the current thread context.
2042 * Called with thread locked, then dropped and relocked.
2047 processor_t processor
)
2049 thread_t new_thread
;
2050 uint64_t arg1
, arg2
;
2053 sched_run_decr(thread
);
2055 thread
->state
|= TH_IDLE
;
2056 processor_state_update_idle(procssor
);
2058 /* Reload precise timing global policy to thread-local policy */
2059 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
2061 thread_unlock(thread
);
2064 * Switch execution timing to processor idle thread.
2066 processor
->last_dispatch
= mach_absolute_time();
2068 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2069 commpage_update_mach_approximate_time(processor
->last_dispatch
);
2072 thread
->last_run_time
= processor
->last_dispatch
;
2073 thread_timer_event(processor
->last_dispatch
, &processor
->idle_thread
->system_timer
);
2074 PROCESSOR_DATA(processor
, kernel_timer
) = &processor
->idle_thread
->system_timer
;
2078 * Cancel the quantum timer while idling.
2080 timer_call_quantum_timer_cancel(&processor
->quantum_timer
);
2081 processor
->first_timeslice
= FALSE
;
2083 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
2085 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0, 0, NULL
);
2088 * Enable interrupts and perform idling activities. No
2089 * preemption due to TH_IDLE being set.
2091 spllo(); new_thread
= processor_idle(thread
, processor
);
2094 * Return at splsched.
2096 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
2098 thread_lock(thread
);
2101 * If awakened, switch to thread timer and start a new quantum.
2102 * Otherwise skip; we will context switch to another thread or return here.
2104 if (!(thread
->state
& TH_WAIT
)) {
2105 processor
->last_dispatch
= mach_absolute_time();
2106 thread_timer_event(processor
->last_dispatch
, &thread
->system_timer
);
2107 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2108 thread_quantum_init(thread
);
2109 processor
->quantum_end
= processor
->last_dispatch
+ thread
->quantum_remaining
;
2110 timer_call_quantum_timer_enter(&processor
->quantum_timer
,
2111 thread
, processor
->quantum_end
, processor
->last_dispatch
);
2112 processor
->first_timeslice
= TRUE
;
2114 thread
->computation_epoch
= processor
->last_dispatch
;
2117 thread
->state
&= ~TH_IDLE
;
2119 urgency
= thread_get_urgency(thread
, &arg1
, &arg2
);
2121 thread_tell_urgency(urgency
, arg1
, arg2
, 0, new_thread
);
2123 sched_run_incr(thread
);
2125 return (new_thread
);
2127 #endif /* CONFIG_SCHED_IDLE_IN_PLACE */
2132 * Called at splsched with neither thread locked.
2134 * Perform a context switch and start executing the new thread.
2136 * Returns FALSE when the context switch didn't happen.
2137 * The reference to the new thread is still consumed.
2139 * "self" is what is currently running on the processor,
2140 * "thread" is the new thread to context switch to
2141 * (which may be the same thread in some cases)
2149 if (__improbable(get_preemption_level() != 0)) {
2150 int pl
= get_preemption_level();
2151 panic("thread_invoke: preemption_level %d, possible cause: %s",
2152 pl
, (pl
< 0 ? "unlocking an unlocked mutex or spinlock" :
2153 "blocking while holding a spinlock, or within interrupt context"));
2156 thread_continue_t continuation
= self
->continuation
;
2157 void *parameter
= self
->parameter
;
2158 processor_t processor
;
2160 uint64_t ctime
= mach_absolute_time();
2162 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2163 commpage_update_mach_approximate_time(ctime
);
2166 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2167 if ((thread
->state
& TH_IDLE
) == 0)
2168 sched_timeshare_consider_maintenance(ctime
);
2172 mt_sched_update(self
);
2173 #endif /* MONOTONIC */
2175 assert_thread_magic(self
);
2176 assert(self
== current_thread());
2177 assert(self
->runq
== PROCESSOR_NULL
);
2178 assert((self
->state
& (TH_RUN
|TH_TERMINATE2
)) == TH_RUN
);
2180 thread_lock(thread
);
2182 assert_thread_magic(thread
);
2183 assert((thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_TERMINATE
|TH_TERMINATE2
)) == TH_RUN
);
2184 assert(thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== current_processor());
2185 assert(thread
->runq
== PROCESSOR_NULL
);
2187 /* Reload precise timing global policy to thread-local policy */
2188 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
2190 /* Update SFI class based on other factors */
2191 thread
->sfi_class
= sfi_thread_classify(thread
);
2193 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2194 thread
->same_pri_latency
= ctime
- thread
->last_basepri_change_time
;
2196 * In case a base_pri update happened between the timestamp and
2197 * taking the thread lock
2199 if (ctime
<= thread
->last_basepri_change_time
)
2200 thread
->same_pri_latency
= ctime
- thread
->last_made_runnable_time
;
2202 /* Allow realtime threads to hang onto a stack. */
2203 if ((self
->sched_mode
== TH_MODE_REALTIME
) && !self
->reserved_stack
)
2204 self
->reserved_stack
= self
->kernel_stack
;
2206 /* Prepare for spin debugging */
2207 #if INTERRUPT_MASKED_DEBUG
2208 ml_spin_debug_clear(thread
);
2211 if (continuation
!= NULL
) {
2212 if (!thread
->kernel_stack
) {
2214 * If we are using a privileged stack,
2215 * check to see whether we can exchange it with
2216 * that of the other thread.
2218 if (self
->kernel_stack
== self
->reserved_stack
&& !thread
->reserved_stack
)
2222 * Context switch by performing a stack handoff.
2224 continuation
= thread
->continuation
;
2225 parameter
= thread
->parameter
;
2227 processor
= current_processor();
2228 processor
->active_thread
= thread
;
2229 processor_state_update_from_thread(processor
, thread
);
2231 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2232 if (thread
->last_processor
->processor_set
!= processor
->processor_set
)
2233 thread
->ps_switch
++;
2236 thread
->last_processor
= processor
;
2238 ast_context(thread
);
2240 thread_unlock(thread
);
2242 self
->reason
= reason
;
2244 processor
->last_dispatch
= ctime
;
2245 self
->last_run_time
= ctime
;
2246 thread_timer_event(ctime
, &thread
->system_timer
);
2247 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2250 * Since non-precise user/kernel time doesn't update the state timer
2251 * during privilege transitions, synthesize an event now.
2253 if (!thread
->precise_user_kernel_time
) {
2254 timer_switch(PROCESSOR_DATA(processor
, current_state
),
2256 PROCESSOR_DATA(processor
, current_state
));
2259 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2260 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_HANDOFF
)|DBG_FUNC_NONE
,
2261 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2263 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= PROCESSOR_NULL
)) {
2264 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
)|DBG_FUNC_NONE
,
2265 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2268 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2270 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2272 TLOG(1, "thread_invoke: calling stack_handoff\n");
2273 stack_handoff(self
, thread
);
2275 /* 'self' is now off core */
2276 assert(thread
== current_thread());
2278 DTRACE_SCHED(on__cpu
);
2281 kperf_on_cpu(thread
, continuation
, NULL
);
2285 kasan_unpoison_fakestack(self
);
2286 kasan_unpoison_stack(thread
->kernel_stack
, kernel_stack_size
);
2289 thread_dispatch(self
, thread
);
2291 thread
->continuation
= thread
->parameter
= NULL
;
2293 counter(c_thread_invoke_hits
++);
2297 assert(continuation
);
2298 call_continuation(continuation
, parameter
, thread
->wait_result
);
2301 else if (thread
== self
) {
2302 /* same thread but with continuation */
2304 counter(++c_thread_invoke_same
);
2306 thread_unlock(self
);
2309 kperf_on_cpu(thread
, continuation
, NULL
);
2312 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2313 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2314 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2317 kasan_unpoison_fakestack(self
);
2318 kasan_unpoison_stack(self
->kernel_stack
, kernel_stack_size
);
2321 self
->continuation
= self
->parameter
= NULL
;
2325 call_continuation(continuation
, parameter
, self
->wait_result
);
2330 * Check that the other thread has a stack
2332 if (!thread
->kernel_stack
) {
2334 if (!stack_alloc_try(thread
)) {
2335 counter(c_thread_invoke_misses
++);
2336 thread_unlock(thread
);
2337 thread_stack_enqueue(thread
);
2340 } else if (thread
== self
) {
2342 counter(++c_thread_invoke_same
);
2343 thread_unlock(self
);
2345 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2346 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2347 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2354 * Context switch by full context save.
2356 processor
= current_processor();
2357 processor
->active_thread
= thread
;
2358 processor_state_update_from_thread(processor
, thread
);
2360 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2361 if (thread
->last_processor
->processor_set
!= processor
->processor_set
)
2362 thread
->ps_switch
++;
2365 thread
->last_processor
= processor
;
2367 ast_context(thread
);
2369 thread_unlock(thread
);
2371 counter(c_thread_invoke_csw
++);
2373 self
->reason
= reason
;
2375 processor
->last_dispatch
= ctime
;
2376 self
->last_run_time
= ctime
;
2377 thread_timer_event(ctime
, &thread
->system_timer
);
2378 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2381 * Since non-precise user/kernel time doesn't update the state timer
2382 * during privilege transitions, synthesize an event now.
2384 if (!thread
->precise_user_kernel_time
) {
2385 timer_switch(PROCESSOR_DATA(processor
, current_state
),
2387 PROCESSOR_DATA(processor
, current_state
));
2390 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2391 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2392 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2394 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= NULL
)) {
2395 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
)|DBG_FUNC_NONE
,
2396 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2399 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2401 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2404 * This is where we actually switch register context,
2405 * and address space if required. We will next run
2406 * as a result of a subsequent context switch.
2408 * Once registers are switched and the processor is running "thread",
2409 * the stack variables and non-volatile registers will contain whatever
2410 * was there the last time that thread blocked. No local variables should
2411 * be used after this point, except for the special case of "thread", which
2412 * the platform layer returns as the previous thread running on the processor
2413 * via the function call ABI as a return register, and "self", which may have
2414 * been stored on the stack or a non-volatile register, but a stale idea of
2415 * what was on the CPU is newly-accurate because that thread is again
2416 * running on the CPU.
2418 assert(continuation
== self
->continuation
);
2419 thread
= machine_switch_context(self
, continuation
, thread
);
2420 assert(self
== current_thread());
2421 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self
, continuation
, thread
);
2423 DTRACE_SCHED(on__cpu
);
2426 kperf_on_cpu(self
, NULL
, __builtin_frame_address(0));
2430 * We have been resumed and are set to run.
2432 thread_dispatch(thread
, self
);
2435 self
->continuation
= self
->parameter
= NULL
;
2439 call_continuation(continuation
, parameter
, self
->wait_result
);
2446 #if defined(CONFIG_SCHED_DEFERRED_AST)
2448 * pset_cancel_deferred_dispatch:
2450 * Cancels all ASTs that we can cancel for the given processor set
2451 * if the current processor is running the last runnable thread in the
2454 * This function assumes the current thread is runnable. This must
2455 * be called with the pset unlocked.
2458 pset_cancel_deferred_dispatch(
2459 processor_set_t pset
,
2460 processor_t processor
)
2462 processor_t active_processor
= NULL
;
2463 uint32_t sampled_sched_run_count
;
2466 sampled_sched_run_count
= (volatile uint32_t) sched_run_buckets
[TH_BUCKET_RUN
];
2469 * If we have emptied the run queue, and our current thread is runnable, we
2470 * should tell any processors that are still DISPATCHING that they will
2471 * probably not have any work to do. In the event that there are no
2472 * pending signals that we can cancel, this is also uninteresting.
2474 * In the unlikely event that another thread becomes runnable while we are
2475 * doing this (sched_run_count is atomically updated, not guarded), the
2476 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2477 * in order to dispatch it to a processor in our pset. So, the other
2478 * codepath will wait while we squash all cancelable ASTs, get the pset
2479 * lock, and then dispatch the freshly runnable thread. So this should be
2480 * correct (we won't accidentally have a runnable thread that hasn't been
2481 * dispatched to an idle processor), if not ideal (we may be restarting the
2482 * dispatch process, which could have some overhead).
2485 if ((sampled_sched_run_count
== 1) &&
2486 (pset
->pending_deferred_AST_cpu_mask
)) {
2487 qe_foreach_element_safe(active_processor
, &pset
->active_queue
, processor_queue
) {
2489 * If a processor is DISPATCHING, it could be because of
2490 * a cancelable signal.
2492 * IF the processor is not our
2493 * current processor (the current processor should not
2494 * be DISPATCHING, so this is a bit paranoid), AND there
2495 * is a cancelable signal pending on the processor, AND
2496 * there is no non-cancelable signal pending (as there is
2497 * no point trying to backtrack on bringing the processor
2498 * up if a signal we cannot cancel is outstanding), THEN
2499 * it should make sense to roll back the processor state
2500 * to the IDLE state.
2502 * If the racey nature of this approach (as the signal
2503 * will be arbitrated by hardware, and can fire as we
2504 * roll back state) results in the core responding
2505 * despite being pushed back to the IDLE state, it
2506 * should be no different than if the core took some
2507 * interrupt while IDLE.
2509 if ((active_processor
->state
== PROCESSOR_DISPATCHING
) &&
2510 (bit_test(pset
->pending_deferred_AST_cpu_mask
, active_processor
->cpu_id
)) &&
2511 (!bit_test(pset
->pending_AST_cpu_mask
, active_processor
->cpu_id
)) &&
2512 (active_processor
!= processor
)) {
2514 * Squash all of the processor state back to some
2515 * reasonable facsimile of PROCESSOR_IDLE.
2517 * TODO: What queue policy do we actually want here?
2518 * We want to promote selection of a good processor
2519 * to run on. Do we want to enqueue at the head?
2520 * The tail? At the (relative) old position in the
2521 * queue? Or something else entirely?
2523 if (!active_processor
->is_recommended
) {
2524 re_queue_head(&pset
->unused_queue
, &active_processor
->processor_queue
);
2525 } else if (active_processor
->processor_primary
== active_processor
) {
2526 re_queue_head(&pset
->idle_queue
, &active_processor
->processor_queue
);
2528 re_queue_head(&pset
->idle_secondary_queue
, &active_processor
->processor_queue
);
2531 pset
->active_processor_count
--;
2532 sched_update_pset_load_average(pset
);
2534 assert(active_processor
->next_thread
== THREAD_NULL
);
2535 processor_state_update_idle(active_processor
);
2536 active_processor
->deadline
= UINT64_MAX
;
2537 active_processor
->state
= PROCESSOR_IDLE
;
2538 bit_clear(pset
->pending_deferred_AST_cpu_mask
, active_processor
->cpu_id
);
2539 machine_signal_idle_cancel(active_processor
);
2548 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2557 perfcontrol_event event
= (new->state
& TH_IDLE
) ? IDLE
: CONTEXT_SWITCH
;
2558 uint64_t same_pri_latency
= (new->state
& TH_IDLE
) ? 0 : new->same_pri_latency
;
2559 machine_switch_perfcontrol_context(event
, timestamp
, 0,
2560 same_pri_latency
, old
, new);
2567 * Handle threads at context switch. Re-dispatch other thread
2568 * if still running, otherwise update run state and perform
2569 * special actions. Update quantum for other thread and begin
2570 * the quantum for ourselves.
2572 * "thread" is the old thread that we have switched away from.
2573 * "self" is the new current thread that we have context switched to
2575 * Called at splsched.
2582 processor_t processor
= self
->last_processor
;
2584 assert(processor
== current_processor());
2585 assert(self
== current_thread());
2586 assert(thread
!= self
);
2588 if (thread
!= THREAD_NULL
) {
2590 * Do the perfcontrol callout for context switch.
2591 * The reason we do this here is:
2592 * - thread_dispatch() is called from various places that are not
2593 * the direct context switch path for eg. processor shutdown etc.
2594 * So adding the callout here covers all those cases.
2595 * - We want this callout as early as possible to be close
2596 * to the timestamp taken in thread_invoke()
2597 * - We want to avoid holding the thread lock while doing the
2599 * - We do not want to callout if "thread" is NULL.
2601 thread_csw_callout(thread
, self
, processor
->last_dispatch
);
2604 * If blocked at a continuation, discard
2607 if (thread
->continuation
!= NULL
&& thread
->kernel_stack
!= 0)
2610 if (thread
->state
& TH_IDLE
) {
2611 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2612 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DISPATCH
) | DBG_FUNC_NONE
,
2613 (uintptr_t)thread_tid(thread
), 0, thread
->state
,
2614 sched_run_buckets
[TH_BUCKET_RUN
], 0);
2617 int64_t remainder
= 0;
2619 if (processor
->quantum_end
> processor
->last_dispatch
)
2620 remainder
= processor
->quantum_end
-
2621 processor
->last_dispatch
;
2623 consumed
= thread
->quantum_remaining
- remainder
;
2625 if ((thread
->reason
& AST_LEDGER
) == 0) {
2627 * Bill CPU time to both the task and
2628 * the individual thread.
2630 ledger_credit_thread(thread
, thread
->t_ledger
,
2631 task_ledgers
.cpu_time
, consumed
);
2632 ledger_credit_thread(thread
, thread
->t_threadledger
,
2633 thread_ledgers
.cpu_time
, consumed
);
2634 if (thread
->t_bankledger
) {
2635 ledger_credit_thread(thread
, thread
->t_bankledger
,
2636 bank_ledgers
.cpu_time
,
2637 (consumed
- thread
->t_deduct_bank_ledger_time
));
2639 thread
->t_deduct_bank_ledger_time
= 0;
2643 thread_lock(thread
);
2646 * Apply a priority floor if the thread holds a kernel resource
2647 * Do this before checking starting_pri to avoid overpenalizing
2648 * repeated rwlock blockers.
2650 if (__improbable(thread
->rwlock_count
!= 0))
2651 lck_rw_set_promotion_locked(thread
);
2653 boolean_t keep_quantum
= processor
->first_timeslice
;
2656 * Treat a thread which has dropped priority since it got on core
2657 * as having expired its quantum.
2659 if (processor
->starting_pri
> thread
->sched_pri
)
2660 keep_quantum
= FALSE
;
2662 /* Compute remainder of current quantum. */
2664 processor
->quantum_end
> processor
->last_dispatch
)
2665 thread
->quantum_remaining
= (uint32_t)remainder
;
2667 thread
->quantum_remaining
= 0;
2669 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
2671 * Cancel the deadline if the thread has
2672 * consumed the entire quantum.
2674 if (thread
->quantum_remaining
== 0) {
2675 thread
->realtime
.deadline
= UINT64_MAX
;
2678 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2680 * For non-realtime threads treat a tiny
2681 * remaining quantum as an expired quantum
2682 * but include what's left next time.
2684 if (thread
->quantum_remaining
< min_std_quantum
) {
2685 thread
->reason
|= AST_QUANTUM
;
2686 thread
->quantum_remaining
+= SCHED(initial_quantum_size
)(thread
);
2688 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2692 * If we are doing a direct handoff then
2693 * take the remainder of the quantum.
2695 if ((thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
2696 self
->quantum_remaining
= thread
->quantum_remaining
;
2697 thread
->reason
|= AST_QUANTUM
;
2698 thread
->quantum_remaining
= 0;
2700 #if defined(CONFIG_SCHED_MULTIQ)
2701 if (SCHED(sched_groups_enabled
) &&
2702 thread
->sched_group
== self
->sched_group
) {
2703 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2704 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_QUANTUM_HANDOFF
),
2705 self
->reason
, (uintptr_t)thread_tid(thread
),
2706 self
->quantum_remaining
, thread
->quantum_remaining
, 0);
2708 self
->quantum_remaining
= thread
->quantum_remaining
;
2709 thread
->quantum_remaining
= 0;
2710 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2712 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2715 thread
->computation_metered
+= (processor
->last_dispatch
- thread
->computation_epoch
);
2717 if (!(thread
->state
& TH_WAIT
)) {
2721 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= processor
->last_dispatch
;
2723 machine_thread_going_off_core(thread
, FALSE
, processor
->last_dispatch
);
2725 ast_t reason
= thread
->reason
;
2726 sched_options_t options
= SCHED_NONE
;
2728 if (reason
& AST_REBALANCE
) {
2729 options
|= SCHED_REBALANCE
;
2730 if (reason
& AST_QUANTUM
) {
2731 /* Having gone to the trouble of forcing this thread off a less preferred core,
2732 * we should force the preferable core to reschedule immediatey to give this
2733 * thread a chance to run instead of just sitting on the run queue where
2734 * it may just be stolen back by the idle core we just forced it off.
2735 * But only do this at the end of a quantum to prevent cascading effects.
2737 options
|= SCHED_PREEMPT
;
2741 if (reason
& AST_QUANTUM
)
2742 options
|= SCHED_TAILQ
;
2743 else if (reason
& AST_PREEMPT
)
2744 options
|= SCHED_HEADQ
;
2746 options
|= (SCHED_PREEMPT
| SCHED_TAILQ
);
2748 thread_setrun(thread
, options
);
2750 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2751 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DISPATCH
) | DBG_FUNC_NONE
,
2752 (uintptr_t)thread_tid(thread
), thread
->reason
, thread
->state
,
2753 sched_run_buckets
[TH_BUCKET_RUN
], 0);
2755 if (thread
->wake_active
) {
2756 thread
->wake_active
= FALSE
;
2757 thread_unlock(thread
);
2759 thread_wakeup(&thread
->wake_active
);
2761 thread_unlock(thread
);
2764 wake_unlock(thread
);
2769 boolean_t should_terminate
= FALSE
;
2770 uint32_t new_run_count
;
2772 /* Only the first call to thread_dispatch
2773 * after explicit termination should add
2774 * the thread to the termination queue
2776 if ((thread
->state
& (TH_TERMINATE
|TH_TERMINATE2
)) == TH_TERMINATE
) {
2777 should_terminate
= TRUE
;
2778 thread
->state
|= TH_TERMINATE2
;
2781 thread
->state
&= ~TH_RUN
;
2782 thread
->last_made_runnable_time
= thread
->last_basepri_change_time
= THREAD_NOT_RUNNABLE
;
2783 thread
->chosen_processor
= PROCESSOR_NULL
;
2785 new_run_count
= sched_run_decr(thread
);
2787 #if CONFIG_SCHED_SFI
2788 if ((thread
->state
& (TH_WAIT
| TH_TERMINATE
)) == TH_WAIT
) {
2789 if (thread
->reason
& AST_SFI
) {
2790 thread
->wait_sfi_begin_time
= processor
->last_dispatch
;
2795 machine_thread_going_off_core(thread
, should_terminate
, processor
->last_dispatch
);
2797 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2798 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DISPATCH
) | DBG_FUNC_NONE
,
2799 (uintptr_t)thread_tid(thread
), thread
->reason
, thread
->state
,
2802 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
2804 if (thread
->wake_active
) {
2805 thread
->wake_active
= FALSE
;
2806 thread_unlock(thread
);
2808 thread_wakeup(&thread
->wake_active
);
2810 thread_unlock(thread
);
2813 wake_unlock(thread
);
2815 if (should_terminate
)
2816 thread_terminate_enqueue(thread
);
2821 int urgency
= THREAD_URGENCY_NONE
;
2822 uint64_t latency
= 0;
2824 /* Update (new) current thread and reprogram quantum timer */
2827 if (!(self
->state
& TH_IDLE
)) {
2828 uint64_t arg1
, arg2
;
2830 #if CONFIG_SCHED_SFI
2833 new_ast
= sfi_thread_needs_ast(self
, NULL
);
2835 if (new_ast
!= AST_NONE
) {
2840 assertf(processor
->last_dispatch
>= self
->last_made_runnable_time
,
2841 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
2842 processor
->last_dispatch
, self
->last_made_runnable_time
);
2844 assert(self
->last_made_runnable_time
<= self
->last_basepri_change_time
);
2846 latency
= processor
->last_dispatch
- self
->last_made_runnable_time
;
2847 assert(latency
>= self
->same_pri_latency
);
2849 urgency
= thread_get_urgency(self
, &arg1
, &arg2
);
2851 thread_tell_urgency(urgency
, arg1
, arg2
, latency
, self
);
2854 * Get a new quantum if none remaining.
2856 if (self
->quantum_remaining
== 0) {
2857 thread_quantum_init(self
);
2861 * Set up quantum timer and timeslice.
2863 processor
->quantum_end
= processor
->last_dispatch
+ self
->quantum_remaining
;
2864 timer_call_quantum_timer_enter(&processor
->quantum_timer
, self
,
2865 processor
->quantum_end
, processor
->last_dispatch
);
2867 processor
->first_timeslice
= TRUE
;
2869 timer_call_quantum_timer_cancel(&processor
->quantum_timer
);
2870 processor
->first_timeslice
= FALSE
;
2872 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0, 0, self
);
2875 assert(self
->block_hint
== kThreadWaitNone
);
2876 self
->computation_epoch
= processor
->last_dispatch
;
2877 self
->reason
= AST_NONE
;
2878 processor
->starting_pri
= self
->sched_pri
;
2880 thread_unlock(self
);
2882 machine_thread_going_on_core(self
, urgency
, latency
, self
->same_pri_latency
,
2883 processor
->last_dispatch
);
2885 #if defined(CONFIG_SCHED_DEFERRED_AST)
2887 * TODO: Can we state that redispatching our old thread is also
2890 if ((((volatile uint32_t)sched_run_buckets
[TH_BUCKET_RUN
]) == 1) &&
2891 !(self
->state
& TH_IDLE
)) {
2892 pset_cancel_deferred_dispatch(processor
->processor_set
, processor
);
2899 * thread_block_reason:
2901 * Forces a reschedule, blocking the caller if a wait
2902 * has been asserted.
2904 * If a continuation is specified, then thread_invoke will
2905 * attempt to discard the thread's kernel stack. When the
2906 * thread resumes, it will execute the continuation function
2907 * on a new kernel stack.
2909 counter(mach_counter_t c_thread_block_calls
= 0;)
2912 thread_block_reason(
2913 thread_continue_t continuation
,
2917 thread_t self
= current_thread();
2918 processor_t processor
;
2919 thread_t new_thread
;
2922 counter(++c_thread_block_calls
);
2926 processor
= current_processor();
2928 /* If we're explicitly yielding, force a subsequent quantum */
2929 if (reason
& AST_YIELD
)
2930 processor
->first_timeslice
= FALSE
;
2932 /* We're handling all scheduling AST's */
2933 ast_off(AST_SCHEDULING
);
2936 if ((continuation
!= NULL
) && (self
->task
!= kernel_task
)) {
2937 if (uthread_get_proc_refcount(self
->uthread
) != 0) {
2938 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self
->uthread
);
2943 self
->continuation
= continuation
;
2944 self
->parameter
= parameter
;
2946 if (self
->state
& ~(TH_RUN
| TH_IDLE
)) {
2947 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2948 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_BLOCK
),
2949 reason
, VM_KERNEL_UNSLIDE(continuation
), 0, 0, 0);
2954 new_thread
= thread_select(self
, processor
, &reason
);
2955 thread_unlock(self
);
2956 } while (!thread_invoke(self
, new_thread
, reason
));
2960 return (self
->wait_result
);
2966 * Block the current thread if a wait has been asserted.
2970 thread_continue_t continuation
)
2972 return thread_block_reason(continuation
, NULL
, AST_NONE
);
2976 thread_block_parameter(
2977 thread_continue_t continuation
,
2980 return thread_block_reason(continuation
, parameter
, AST_NONE
);
2986 * Switch directly from the current thread to the
2987 * new thread, handing off our quantum if appropriate.
2989 * New thread must be runnable, and not on a run queue.
2991 * Called at splsched.
2996 thread_continue_t continuation
,
2998 thread_t new_thread
)
3000 ast_t reason
= AST_HANDOFF
;
3002 self
->continuation
= continuation
;
3003 self
->parameter
= parameter
;
3005 while (!thread_invoke(self
, new_thread
, reason
)) {
3006 /* the handoff failed, so we have to fall back to the normal block path */
3007 processor_t processor
= current_processor();
3012 new_thread
= thread_select(self
, processor
, &reason
);
3013 thread_unlock(self
);
3016 return (self
->wait_result
);
3022 * Called at splsched when a thread first receives
3023 * a new stack after a continuation.
3029 thread_t self
= current_thread();
3030 thread_continue_t continuation
;
3033 DTRACE_SCHED(on__cpu
);
3035 continuation
= self
->continuation
;
3036 parameter
= self
->parameter
;
3039 kperf_on_cpu(self
, continuation
, NULL
);
3042 thread_dispatch(thread
, self
);
3044 self
->continuation
= self
->parameter
= NULL
;
3046 #if INTERRUPT_MASKED_DEBUG
3047 /* Reset interrupt-masked spin debugging timeout */
3048 ml_spin_debug_clear(self
);
3051 if (thread
!= THREAD_NULL
)
3054 TLOG(1, "thread_continue: calling call_continuation \n");
3055 call_continuation(continuation
, parameter
, self
->wait_result
);
3060 thread_quantum_init(thread_t thread
)
3062 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
3063 thread
->quantum_remaining
= thread
->realtime
.computation
;
3065 thread
->quantum_remaining
= SCHED(initial_quantum_size
)(thread
);
3070 sched_timeshare_initial_quantum_size(thread_t thread
)
3072 if ((thread
!= THREAD_NULL
) && thread
->th_sched_bucket
== TH_BUCKET_SHARE_BG
)
3081 * Initialize a run queue before first use.
3088 for (u_int i
= 0; i
< BITMAP_LEN(NRQS
); i
++)
3090 rq
->urgency
= rq
->count
= 0;
3091 for (int i
= 0; i
< NRQS
; i
++)
3092 queue_init(&rq
->queues
[i
]);
3096 * run_queue_dequeue:
3098 * Perform a dequeue operation on a run queue,
3099 * and return the resulting thread.
3101 * The run queue must be locked (see thread_run_queue_remove()
3102 * for more info), and not empty.
3110 queue_t queue
= &rq
->queues
[rq
->highq
];
3112 if (options
& SCHED_HEADQ
) {
3113 thread
= qe_dequeue_head(queue
, struct thread
, runq_links
);
3115 thread
= qe_dequeue_tail(queue
, struct thread
, runq_links
);
3118 assert(thread
!= THREAD_NULL
);
3119 assert_thread_magic(thread
);
3121 thread
->runq
= PROCESSOR_NULL
;
3122 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3124 if (SCHED(priority_is_urgent
)(rq
->highq
)) {
3125 rq
->urgency
--; assert(rq
->urgency
>= 0);
3127 if (queue_empty(queue
)) {
3128 bitmap_clear(rq
->bitmap
, rq
->highq
);
3129 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
3136 * run_queue_enqueue:
3138 * Perform a enqueue operation on a run queue.
3140 * The run queue must be locked (see thread_run_queue_remove()
3149 queue_t queue
= &rq
->queues
[thread
->sched_pri
];
3150 boolean_t result
= FALSE
;
3152 assert_thread_magic(thread
);
3154 if (queue_empty(queue
)) {
3155 enqueue_tail(queue
, &thread
->runq_links
);
3157 rq_bitmap_set(rq
->bitmap
, thread
->sched_pri
);
3158 if (thread
->sched_pri
> rq
->highq
) {
3159 rq
->highq
= thread
->sched_pri
;
3163 if (options
& SCHED_TAILQ
)
3164 enqueue_tail(queue
, &thread
->runq_links
);
3166 enqueue_head(queue
, &thread
->runq_links
);
3168 if (SCHED(priority_is_urgent
)(thread
->sched_pri
))
3170 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3179 * Remove a specific thread from a runqueue.
3181 * The run queue must be locked.
3188 assert(thread
->runq
!= PROCESSOR_NULL
);
3189 assert_thread_magic(thread
);
3191 remqueue(&thread
->runq_links
);
3192 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3194 if (SCHED(priority_is_urgent
)(thread
->sched_pri
)) {
3195 rq
->urgency
--; assert(rq
->urgency
>= 0);
3198 if (queue_empty(&rq
->queues
[thread
->sched_pri
])) {
3199 /* update run queue status */
3200 bitmap_clear(rq
->bitmap
, thread
->sched_pri
);
3201 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
3204 thread
->runq
= PROCESSOR_NULL
;
3207 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3209 sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context
)
3214 processor_set_t pset
= &pset0
;
3219 qe_foreach_element_safe(thread
, &pset
->rt_runq
.queue
, runq_links
) {
3220 if (thread
->last_made_runnable_time
< scan_context
->earliest_rt_make_runnable_time
) {
3221 scan_context
->earliest_rt_make_runnable_time
= thread
->last_made_runnable_time
;
3225 rt_lock_unlock(pset
);
3230 sched_rtglobal_runq_count_sum(void)
3232 return pset0
.rt_runq
.runq_stats
.count_sum
;
3236 * realtime_queue_insert:
3238 * Enqueue a thread for realtime execution.
3241 realtime_queue_insert(processor_t processor
, processor_set_t pset
, thread_t thread
)
3243 queue_t queue
= &SCHED(rt_runq
)(pset
)->queue
;
3244 uint64_t deadline
= thread
->realtime
.deadline
;
3245 boolean_t preempt
= FALSE
;
3249 if (queue_empty(queue
)) {
3250 enqueue_tail(queue
, &thread
->runq_links
);
3253 /* Insert into rt_runq in thread deadline order */
3255 qe_foreach(iter
, queue
) {
3256 thread_t iter_thread
= qe_element(iter
, struct thread
, runq_links
);
3257 assert_thread_magic(iter_thread
);
3259 if (deadline
< iter_thread
->realtime
.deadline
) {
3260 if (iter
== queue_first(queue
))
3262 insque(&thread
->runq_links
, queue_prev(iter
));
3264 } else if (iter
== queue_last(queue
)) {
3265 enqueue_tail(queue
, &thread
->runq_links
);
3271 thread
->runq
= processor
;
3272 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
3273 rt_runq_count_incr(pset
);
3275 rt_lock_unlock(pset
);
3283 * Dispatch a thread for realtime execution.
3285 * Thread must be locked. Associated pset must
3286 * be locked, and is returned unlocked.
3290 processor_t processor
,
3293 processor_set_t pset
= processor
->processor_set
;
3296 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3298 thread
->chosen_processor
= processor
;
3300 /* <rdar://problem/15102234> */
3301 assert(thread
->bound_processor
== PROCESSOR_NULL
);
3304 * Dispatch directly onto idle processor.
3306 if ( (thread
->bound_processor
== processor
)
3307 && processor
->state
== PROCESSOR_IDLE
) {
3308 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3310 pset
->active_processor_count
++;
3311 sched_update_pset_load_average(pset
);
3313 processor
->next_thread
= thread
;
3314 processor_state_update_from_thread(processor
, thread
);
3315 processor
->deadline
= thread
->realtime
.deadline
;
3316 processor
->state
= PROCESSOR_DISPATCHING
;
3318 ipi_type
= sched_ipi_action(processor
, thread
, true, SCHED_IPI_EVENT_BOUND_THR
);
3320 sched_ipi_perform(processor
, ipi_type
);
3324 if (processor
->current_pri
< BASEPRI_RTQUEUES
)
3325 preempt
= (AST_PREEMPT
| AST_URGENT
);
3326 else if (thread
->realtime
.deadline
< processor
->deadline
)
3327 preempt
= (AST_PREEMPT
| AST_URGENT
);
3331 realtime_queue_insert(processor
, pset
, thread
);
3333 ipi_type
= SCHED_IPI_NONE
;
3334 if (preempt
!= AST_NONE
) {
3335 if (processor
->state
== PROCESSOR_IDLE
) {
3336 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3338 pset
->active_processor_count
++;
3339 sched_update_pset_load_average(pset
);
3341 processor
->next_thread
= THREAD_NULL
;
3342 processor_state_update_from_thread(processor
, thread
);
3343 processor
->deadline
= thread
->realtime
.deadline
;
3344 processor
->state
= PROCESSOR_DISPATCHING
;
3345 if (processor
== current_processor()) {
3348 ipi_type
= sched_ipi_action(processor
, thread
, true, SCHED_IPI_EVENT_PREEMPT
);
3350 } else if (processor
->state
== PROCESSOR_DISPATCHING
) {
3351 if ((processor
->next_thread
== THREAD_NULL
) && ((processor
->current_pri
< thread
->sched_pri
) || (processor
->deadline
> thread
->realtime
.deadline
))) {
3352 processor_state_update_from_thread(processor
, thread
);
3353 processor
->deadline
= thread
->realtime
.deadline
;
3356 if (processor
== current_processor()) {
3359 ipi_type
= sched_ipi_action(processor
, thread
, false, SCHED_IPI_EVENT_PREEMPT
);
3363 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3367 sched_ipi_perform(processor
, ipi_type
);
3371 sched_ipi_type_t
sched_ipi_deferred_policy(processor_set_t pset
, processor_t dst
,
3372 __unused sched_ipi_event_t event
)
3374 #if defined(CONFIG_SCHED_DEFERRED_AST)
3375 if (!bit_test(pset
->pending_deferred_AST_cpu_mask
, dst
->cpu_id
)) {
3376 return SCHED_IPI_DEFERRED
;
3378 #else /* CONFIG_SCHED_DEFERRED_AST */
3379 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset
, dst
->cpu_id
);
3380 #endif /* CONFIG_SCHED_DEFERRED_AST */
3381 return SCHED_IPI_NONE
;
3384 sched_ipi_type_t
sched_ipi_action(processor_t dst
, thread_t thread
, boolean_t dst_idle
, sched_ipi_event_t event
)
3386 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3387 assert(dst
!= NULL
);
3389 processor_set_t pset
= dst
->processor_set
;
3390 if (current_processor() == dst
) {
3391 return SCHED_IPI_NONE
;
3394 if (bit_test(pset
->pending_AST_cpu_mask
, dst
->cpu_id
)) {
3395 return SCHED_IPI_NONE
;
3398 ipi_type
= SCHED(ipi_policy
)(dst
, thread
, dst_idle
, event
);
3400 case SCHED_IPI_NONE
:
3401 return SCHED_IPI_NONE
;
3402 #if defined(CONFIG_SCHED_DEFERRED_AST)
3403 case SCHED_IPI_DEFERRED
:
3404 bit_set(pset
->pending_deferred_AST_cpu_mask
, dst
->cpu_id
);
3406 #endif /* CONFIG_SCHED_DEFERRED_AST */
3408 bit_set(pset
->pending_AST_cpu_mask
, dst
->cpu_id
);
3414 sched_ipi_type_t
sched_ipi_policy(processor_t dst
, thread_t thread
, boolean_t dst_idle
, sched_ipi_event_t event
)
3416 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3417 boolean_t deferred_ipi_supported
= false;
3418 processor_set_t pset
= dst
->processor_set
;
3420 #if defined(CONFIG_SCHED_DEFERRED_AST)
3421 deferred_ipi_supported
= true;
3422 #endif /* CONFIG_SCHED_DEFERRED_AST */
3425 case SCHED_IPI_EVENT_SPILL
:
3426 case SCHED_IPI_EVENT_SMT_REBAL
:
3427 case SCHED_IPI_EVENT_REBALANCE
:
3428 case SCHED_IPI_EVENT_BOUND_THR
:
3430 * The spill, SMT rebalance, rebalance and the bound thread
3431 * scenarios use immediate IPIs always.
3433 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3435 case SCHED_IPI_EVENT_PREEMPT
:
3436 /* In the preemption case, use immediate IPIs for RT threads */
3437 if (thread
&& (thread
->sched_pri
>= BASEPRI_RTQUEUES
)) {
3438 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3443 * For Non-RT threads preemption,
3444 * If the core is active, use immediate IPIs.
3445 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3447 if (deferred_ipi_supported
&& dst_idle
) {
3448 return sched_ipi_deferred_policy(pset
, dst
, event
);
3450 ipi_type
= dst_idle
? SCHED_IPI_IDLE
: SCHED_IPI_IMMEDIATE
;
3453 panic("Unrecognized scheduler IPI event type %d", event
);
3455 assert(ipi_type
!= SCHED_IPI_NONE
);
3459 void sched_ipi_perform(processor_t dst
, sched_ipi_type_t ipi
)
3462 case SCHED_IPI_NONE
:
3464 case SCHED_IPI_IDLE
:
3465 machine_signal_idle(dst
);
3467 case SCHED_IPI_IMMEDIATE
:
3468 cause_ast_check(dst
);
3470 case SCHED_IPI_DEFERRED
:
3471 machine_signal_idle_deferred(dst
);
3474 panic("Unrecognized scheduler IPI type: %d", ipi
);
3478 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3481 priority_is_urgent(int priority
)
3483 return bitmap_test(sched_preempt_pri
, priority
) ? TRUE
: FALSE
;
3486 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3491 * Dispatch a thread for execution on a
3494 * Thread must be locked. Associated pset must
3495 * be locked, and is returned unlocked.
3499 processor_t processor
,
3503 processor_set_t pset
= processor
->processor_set
;
3505 enum { eExitIdle
, eInterruptRunning
, eDoNothing
} ipi_action
= eDoNothing
;
3507 sched_ipi_type_t ipi_type
= SCHED_IPI_NONE
;
3509 thread
->chosen_processor
= processor
;
3512 * Dispatch directly onto idle processor.
3514 if ( (SCHED(direct_dispatch_to_idle_processors
) ||
3515 thread
->bound_processor
== processor
)
3516 && processor
->state
== PROCESSOR_IDLE
) {
3518 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3520 pset
->active_processor_count
++;
3521 sched_update_pset_load_average(pset
);
3523 processor
->next_thread
= thread
;
3524 processor_state_update_from_thread(processor
, thread
);
3525 processor
->deadline
= UINT64_MAX
;
3526 processor
->state
= PROCESSOR_DISPATCHING
;
3528 ipi_type
= sched_ipi_action(processor
, thread
, true, SCHED_IPI_EVENT_BOUND_THR
);
3530 sched_ipi_perform(processor
, ipi_type
);
3535 * Set preemption mode.
3537 #if defined(CONFIG_SCHED_DEFERRED_AST)
3538 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3540 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) && thread
->sched_pri
> processor
->current_pri
)
3541 preempt
= (AST_PREEMPT
| AST_URGENT
);
3542 else if(processor
->active_thread
&& thread_eager_preemption(processor
->active_thread
))
3543 preempt
= (AST_PREEMPT
| AST_URGENT
);
3544 else if ((thread
->sched_mode
== TH_MODE_TIMESHARE
) && (thread
->sched_pri
< thread
->base_pri
)) {
3545 if(SCHED(priority_is_urgent
)(thread
->base_pri
) && thread
->sched_pri
> processor
->current_pri
) {
3546 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3551 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3553 SCHED(processor_enqueue
)(processor
, thread
, options
);
3554 sched_update_pset_load_average(pset
);
3556 if (preempt
!= AST_NONE
) {
3557 if (processor
->state
== PROCESSOR_IDLE
) {
3558 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3559 pset
->active_processor_count
++;
3560 processor
->next_thread
= THREAD_NULL
;
3561 processor_state_update_from_thread(processor
, thread
);
3562 processor
->deadline
= UINT64_MAX
;
3563 processor
->state
= PROCESSOR_DISPATCHING
;
3564 ipi_action
= eExitIdle
;
3565 } else if ( processor
->state
== PROCESSOR_DISPATCHING
) {
3566 if ((processor
->next_thread
== THREAD_NULL
) && (processor
->current_pri
< thread
->sched_pri
)) {
3567 processor_state_update_from_thread(processor
, thread
);
3568 processor
->deadline
= UINT64_MAX
;
3570 } else if ( (processor
->state
== PROCESSOR_RUNNING
||
3571 processor
->state
== PROCESSOR_SHUTDOWN
) &&
3572 (thread
->sched_pri
>= processor
->current_pri
)) {
3573 ipi_action
= eInterruptRunning
;
3577 * New thread is not important enough to preempt what is running, but
3578 * special processor states may need special handling
3580 if (processor
->state
== PROCESSOR_SHUTDOWN
&&
3581 thread
->sched_pri
>= processor
->current_pri
) {
3582 ipi_action
= eInterruptRunning
;
3583 } else if (processor
->state
== PROCESSOR_IDLE
) {
3584 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3586 pset
->active_processor_count
++;
3587 // sched_update_pset_load_average(pset);
3589 processor
->next_thread
= THREAD_NULL
;
3590 processor_state_update_from_thread(processor
, thread
);
3591 processor
->deadline
= UINT64_MAX
;
3592 processor
->state
= PROCESSOR_DISPATCHING
;
3594 ipi_action
= eExitIdle
;
3598 if (ipi_action
!= eDoNothing
) {
3599 if (processor
== current_processor()) {
3600 if (csw_check_locked(processor
, pset
, AST_NONE
) != AST_NONE
)
3603 sched_ipi_event_t event
= (options
& SCHED_REBALANCE
) ? SCHED_IPI_EVENT_REBALANCE
: SCHED_IPI_EVENT_PREEMPT
;
3604 ipi_type
= sched_ipi_action(processor
, thread
, (ipi_action
== eExitIdle
), event
);
3608 sched_ipi_perform(processor
, ipi_type
);
3614 * Return the next sibling pset containing
3615 * available processors.
3617 * Returns the original pset if none other is
3620 static processor_set_t
3622 processor_set_t pset
)
3624 processor_set_t nset
= pset
;
3627 nset
= next_pset(nset
);
3628 } while (nset
->online_processor_count
< 1 && nset
!= pset
);
3636 * Choose a processor for the thread, beginning at
3637 * the pset. Accepts an optional processor hint in
3640 * Returns a processor, possibly from a different pset.
3642 * The thread must be locked. The pset must be locked,
3643 * and the resulting pset is locked on return.
3647 processor_set_t pset
,
3648 processor_t processor
,
3651 processor_set_t nset
, cset
= pset
;
3653 assert(thread
->sched_pri
<= BASEPRI_RTQUEUES
);
3656 * Prefer the hinted processor, when appropriate.
3659 /* Fold last processor hint from secondary processor to its primary */
3660 if (processor
!= PROCESSOR_NULL
) {
3661 processor
= processor
->processor_primary
;
3665 * Only consult platform layer if pset is active, which
3666 * it may not be in some cases when a multi-set system
3667 * is going to sleep.
3669 if (pset
->online_processor_count
) {
3670 if ((processor
== PROCESSOR_NULL
) || (processor
->processor_set
== pset
&& processor
->state
== PROCESSOR_IDLE
)) {
3671 processor_t mc_processor
= machine_choose_processor(pset
, processor
);
3672 if (mc_processor
!= PROCESSOR_NULL
)
3673 processor
= mc_processor
->processor_primary
;
3678 * At this point, we may have a processor hint, and we may have
3679 * an initial starting pset. If the hint is not in the pset, or
3680 * if the hint is for a processor in an invalid state, discard
3683 if (processor
!= PROCESSOR_NULL
) {
3684 if (processor
->processor_set
!= pset
) {
3685 processor
= PROCESSOR_NULL
;
3686 } else if (!processor
->is_recommended
) {
3687 processor
= PROCESSOR_NULL
;
3689 switch (processor
->state
) {
3690 case PROCESSOR_START
:
3691 case PROCESSOR_SHUTDOWN
:
3692 case PROCESSOR_OFF_LINE
:
3694 * Hint is for a processor that cannot support running new threads.
3696 processor
= PROCESSOR_NULL
;
3698 case PROCESSOR_IDLE
:
3700 * Hint is for an idle processor. Assume it is no worse than any other
3701 * idle processor. The platform layer had an opportunity to provide
3702 * the "least cost idle" processor above.
3705 case PROCESSOR_RUNNING
:
3706 case PROCESSOR_DISPATCHING
:
3708 * Hint is for an active CPU. This fast-path allows
3709 * realtime threads to preempt non-realtime threads
3710 * to regain their previous executing processor.
3712 if ((thread
->sched_pri
>= BASEPRI_RTQUEUES
) &&
3713 (processor
->current_pri
< BASEPRI_RTQUEUES
))
3716 /* Otherwise, use hint as part of search below */
3719 processor
= PROCESSOR_NULL
;
3726 * Iterate through the processor sets to locate
3727 * an appropriate processor. Seed results with
3728 * a last-processor hint, if available, so that
3729 * a search must find something strictly better
3732 * A primary/secondary pair of SMT processors are
3733 * "unpaired" if the primary is busy but its
3734 * corresponding secondary is idle (so the physical
3735 * core has full use of its resources).
3738 integer_t lowest_priority
= MAXPRI
+ 1;
3739 integer_t lowest_unpaired_primary_priority
= MAXPRI
+ 1;
3740 integer_t lowest_count
= INT_MAX
;
3741 uint64_t furthest_deadline
= 1;
3742 processor_t lp_processor
= PROCESSOR_NULL
;
3743 processor_t lp_unpaired_primary_processor
= PROCESSOR_NULL
;
3744 processor_t lp_unpaired_secondary_processor
= PROCESSOR_NULL
;
3745 processor_t lc_processor
= PROCESSOR_NULL
;
3746 processor_t fd_processor
= PROCESSOR_NULL
;
3748 if (processor
!= PROCESSOR_NULL
) {
3749 /* All other states should be enumerated above. */
3750 assert(processor
->state
== PROCESSOR_RUNNING
|| processor
->state
== PROCESSOR_DISPATCHING
);
3752 lowest_priority
= processor
->current_pri
;
3753 lp_processor
= processor
;
3755 if (processor
->current_pri
>= BASEPRI_RTQUEUES
) {
3756 furthest_deadline
= processor
->deadline
;
3757 fd_processor
= processor
;
3760 lowest_count
= SCHED(processor_runq_count
)(processor
);
3761 lc_processor
= processor
;
3767 * Choose an idle processor, in pset traversal order
3769 qe_foreach_element(processor
, &cset
->idle_queue
, processor_queue
) {
3770 if (processor
->is_recommended
)
3775 * Otherwise, enumerate active and idle processors to find candidates
3776 * with lower priority/etc.
3779 qe_foreach_element(processor
, &cset
->active_queue
, processor_queue
) {
3781 if (!processor
->is_recommended
) {
3785 integer_t cpri
= processor
->current_pri
;
3786 if (cpri
< lowest_priority
) {
3787 lowest_priority
= cpri
;
3788 lp_processor
= processor
;
3791 if ((cpri
>= BASEPRI_RTQUEUES
) && (processor
->deadline
> furthest_deadline
)) {
3792 furthest_deadline
= processor
->deadline
;
3793 fd_processor
= processor
;
3796 integer_t ccount
= SCHED(processor_runq_count
)(processor
);
3797 if (ccount
< lowest_count
) {
3798 lowest_count
= ccount
;
3799 lc_processor
= processor
;
3804 * For SMT configs, these idle secondary processors must have active primary. Otherwise
3805 * the idle primary would have short-circuited the loop above
3807 qe_foreach_element(processor
, &cset
->idle_secondary_queue
, processor_queue
) {
3809 if (!processor
->is_recommended
) {
3813 processor_t cprimary
= processor
->processor_primary
;
3815 /* If the primary processor is offline or starting up, it's not a candidate for this path */
3816 if (cprimary
->state
== PROCESSOR_RUNNING
|| cprimary
->state
== PROCESSOR_DISPATCHING
) {
3817 integer_t primary_pri
= cprimary
->current_pri
;
3819 if (primary_pri
< lowest_unpaired_primary_priority
) {
3820 lowest_unpaired_primary_priority
= primary_pri
;
3821 lp_unpaired_primary_processor
= cprimary
;
3822 lp_unpaired_secondary_processor
= processor
;
3828 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
3831 * For realtime threads, the most important aspect is
3832 * scheduling latency, so we attempt to assign threads
3833 * to good preemption candidates (assuming an idle primary
3834 * processor was not available above).
3837 if (thread
->sched_pri
> lowest_unpaired_primary_priority
) {
3838 /* Move to end of active queue so that the next thread doesn't also pick it */
3839 re_queue_tail(&cset
->active_queue
, &lp_unpaired_primary_processor
->processor_queue
);
3840 return lp_unpaired_primary_processor
;
3842 if (thread
->sched_pri
> lowest_priority
) {
3843 /* Move to end of active queue so that the next thread doesn't also pick it */
3844 re_queue_tail(&cset
->active_queue
, &lp_processor
->processor_queue
);
3845 return lp_processor
;
3847 if (thread
->realtime
.deadline
< furthest_deadline
)
3848 return fd_processor
;
3851 * If all primary and secondary CPUs are busy with realtime
3852 * threads with deadlines earlier than us, move on to next
3858 if (thread
->sched_pri
> lowest_unpaired_primary_priority
) {
3859 /* Move to end of active queue so that the next thread doesn't also pick it */
3860 re_queue_tail(&cset
->active_queue
, &lp_unpaired_primary_processor
->processor_queue
);
3861 return lp_unpaired_primary_processor
;
3863 if (thread
->sched_pri
> lowest_priority
) {
3864 /* Move to end of active queue so that the next thread doesn't also pick it */
3865 re_queue_tail(&cset
->active_queue
, &lp_processor
->processor_queue
);
3866 return lp_processor
;
3870 * If all primary processor in this pset are running a higher
3871 * priority thread, move on to next pset. Only when we have
3872 * exhausted this search do we fall back to other heuristics.
3877 * Move onto the next processor set.
3879 nset
= next_pset(cset
);
3887 } while (nset
!= pset
);
3890 * Make sure that we pick a running processor,
3891 * and that the correct processor set is locked.
3892 * Since we may have unlock the candidate processor's
3893 * pset, it may have changed state.
3895 * All primary processors are running a higher priority
3896 * thread, so the only options left are enqueuing on
3897 * the secondary processor that would perturb the least priority
3898 * primary, or the least busy primary.
3902 /* lowest_priority is evaluated in the main loops above */
3903 if (lp_unpaired_secondary_processor
!= PROCESSOR_NULL
) {
3904 processor
= lp_unpaired_secondary_processor
;
3905 lp_unpaired_secondary_processor
= PROCESSOR_NULL
;
3906 } else if (lc_processor
!= PROCESSOR_NULL
) {
3907 processor
= lc_processor
;
3908 lc_processor
= PROCESSOR_NULL
;
3911 * All processors are executing higher
3912 * priority threads, and the lowest_count
3913 * candidate was not usable
3915 processor
= master_processor
;
3919 * Check that the correct processor set is
3922 if (cset
!= processor
->processor_set
) {
3924 cset
= processor
->processor_set
;
3929 * We must verify that the chosen processor is still available.
3930 * master_processor is an exception, since we may need to preempt
3931 * a running thread on it during processor shutdown (for sleep),
3932 * and that thread needs to be enqueued on its runqueue to run
3933 * when the processor is restarted.
3935 if (processor
!= master_processor
&& (processor
->state
== PROCESSOR_SHUTDOWN
|| processor
->state
== PROCESSOR_OFF_LINE
))
3936 processor
= PROCESSOR_NULL
;
3938 } while (processor
== PROCESSOR_NULL
);
3940 if (processor
->state
== PROCESSOR_RUNNING
) {
3941 re_queue_tail(&cset
->active_queue
, &processor
->processor_queue
);
3950 * Dispatch thread for execution, onto an idle
3951 * processor or run queue, and signal a preemption
3954 * Thread must be locked.
3961 processor_t processor
;
3962 processor_set_t pset
;
3964 assert((thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_TERMINATE
|TH_TERMINATE2
)) == TH_RUN
);
3965 assert(thread
->runq
== PROCESSOR_NULL
);
3968 * Update priority if needed.
3970 if (SCHED(can_update_priority
)(thread
))
3971 SCHED(update_priority
)(thread
);
3973 thread
->sfi_class
= sfi_thread_classify(thread
);
3975 assert(thread
->runq
== PROCESSOR_NULL
);
3978 if (thread
->bound_processor
== PROCESSOR_NULL
) {
3982 if (thread
->affinity_set
!= AFFINITY_SET_NULL
) {
3984 * Use affinity set policy hint.
3986 pset
= thread
->affinity_set
->aset_pset
;
3989 processor
= SCHED(choose_processor
)(pset
, PROCESSOR_NULL
, thread
);
3990 pset
= processor
->processor_set
;
3992 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
)|DBG_FUNC_NONE
,
3993 (uintptr_t)thread_tid(thread
), (uintptr_t)-1, processor
->cpu_id
, processor
->state
, 0);
3994 } else if (thread
->last_processor
!= PROCESSOR_NULL
) {
3996 * Simple (last processor) affinity case.
3998 processor
= thread
->last_processor
;
3999 pset
= processor
->processor_set
;
4001 processor
= SCHED(choose_processor
)(pset
, processor
, thread
);
4002 pset
= processor
->processor_set
;
4004 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
)|DBG_FUNC_NONE
,
4005 (uintptr_t)thread_tid(thread
), thread
->last_processor
->cpu_id
, processor
->cpu_id
, processor
->state
, 0);
4010 * Utilitize a per task hint to spread threads
4011 * among the available processor sets.
4013 task_t task
= thread
->task
;
4015 pset
= task
->pset_hint
;
4016 if (pset
== PROCESSOR_SET_NULL
)
4017 pset
= current_processor()->processor_set
;
4019 pset
= choose_next_pset(pset
);
4022 processor
= SCHED(choose_processor
)(pset
, PROCESSOR_NULL
, thread
);
4023 pset
= processor
->processor_set
;
4024 task
->pset_hint
= pset
;
4026 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
)|DBG_FUNC_NONE
,
4027 (uintptr_t)thread_tid(thread
), (uintptr_t)-1, processor
->cpu_id
, processor
->state
, 0);
4033 * Unconditionally dispatch on the processor.
4035 processor
= thread
->bound_processor
;
4036 pset
= processor
->processor_set
;
4039 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
)|DBG_FUNC_NONE
,
4040 (uintptr_t)thread_tid(thread
), (uintptr_t)-2, processor
->cpu_id
, processor
->state
, 0);
4042 #else /* !__SMP__ */
4043 /* Only one processor to choose */
4044 assert(thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== master_processor
);
4045 processor
= master_processor
;
4046 pset
= processor
->processor_set
;
4048 #endif /* !__SMP__ */
4051 * Dispatch the thread on the chosen processor.
4052 * TODO: This should be based on sched_mode, not sched_pri
4054 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
4055 realtime_setrun(processor
, thread
);
4057 processor_setrun(processor
, thread
, options
);
4058 /* pset is now unlocked */
4059 if (thread
->bound_processor
== PROCESSOR_NULL
) {
4060 SCHED(check_spill
)(pset
, thread
);
4069 processor_set_t pset
= task
->pset_hint
;
4071 if (pset
!= PROCESSOR_SET_NULL
)
4072 pset
= choose_next_pset(pset
);
4078 * Check for a preemption point in
4079 * the current context.
4081 * Called at splsched with thread locked.
4085 processor_t processor
,
4088 processor_set_t pset
= processor
->processor_set
;
4093 /* If we were sent a remote AST and interrupted a running processor, acknowledge it here with pset lock held */
4094 bit_clear(pset
->pending_AST_cpu_mask
, processor
->cpu_id
);
4096 result
= csw_check_locked(processor
, pset
, check_reason
);
4104 * Check for preemption at splsched with
4105 * pset and thread locked
4109 processor_t processor
,
4110 processor_set_t pset
,
4114 thread_t thread
= processor
->active_thread
;
4116 if (processor
->first_timeslice
) {
4117 if (rt_runq_count(pset
) > 0)
4118 return (check_reason
| AST_PREEMPT
| AST_URGENT
);
4121 if (rt_runq_count(pset
) > 0) {
4122 if (BASEPRI_RTQUEUES
> processor
->current_pri
)
4123 return (check_reason
| AST_PREEMPT
| AST_URGENT
);
4125 return (check_reason
| AST_PREEMPT
);
4129 result
= SCHED(processor_csw_check
)(processor
);
4130 if (result
!= AST_NONE
)
4131 return (check_reason
| result
| (thread_eager_preemption(thread
) ? AST_URGENT
: AST_NONE
));
4136 * If the current thread is running on a processor that is no longer recommended, gently
4137 * (non-urgently) get to a point and then block, and which point thread_select() should
4138 * try to idle the processor and re-dispatch the thread to a recommended processor.
4140 if (!processor
->is_recommended
) {
4141 return (check_reason
| AST_PREEMPT
);
4145 * Same for avoid-processor
4147 * TODO: Should these set AST_REBALANCE?
4149 if (SCHED(avoid_processor_enabled
) && SCHED(thread_avoid_processor
)(processor
, thread
)) {
4150 return (check_reason
| AST_PREEMPT
);
4154 * Even though we could continue executing on this processor, a
4155 * secondary SMT core should try to shed load to another primary core.
4157 * TODO: Should this do the same check that thread_select does? i.e.
4158 * if no bound threads target this processor, and idle primaries exist, preempt
4159 * The case of RT threads existing is already taken care of above
4160 * Consider Capri in this scenario.
4162 * if (!SCHED(processor_bound_count)(processor) && !queue_empty(&pset->idle_queue))
4164 * TODO: Alternatively - check if only primary is idle, or check if primary's pri is lower than mine.
4167 if (processor
->current_pri
< BASEPRI_RTQUEUES
&&
4168 processor
->processor_primary
!= processor
)
4169 return (check_reason
| AST_PREEMPT
);
4172 if (thread
->state
& TH_SUSP
)
4173 return (check_reason
| AST_PREEMPT
);
4175 #if CONFIG_SCHED_SFI
4177 * Current thread may not need to be preempted, but maybe needs
4180 result
= sfi_thread_needs_ast(thread
, NULL
);
4181 if (result
!= AST_NONE
)
4182 return (check_reason
| result
);
4191 * Set the scheduled priority of the specified thread.
4193 * This may cause the thread to change queues.
4195 * Thread must be locked.
4202 thread_t cthread
= current_thread();
4203 boolean_t is_current_thread
= (thread
== cthread
) ? TRUE
: FALSE
;
4204 int curgency
, nurgency
;
4205 uint64_t urgency_param1
, urgency_param2
;
4206 boolean_t removed_from_runq
= FALSE
;
4208 int old_priority
= thread
->sched_pri
;
4210 /* If we're already at this priority, no need to mess with the runqueue */
4211 if (new_priority
== old_priority
)
4214 if (is_current_thread
) {
4215 assert(thread
->runq
== PROCESSOR_NULL
);
4216 curgency
= thread_get_urgency(thread
, &urgency_param1
, &urgency_param2
);
4218 removed_from_runq
= thread_run_queue_remove(thread
);
4221 thread
->sched_pri
= new_priority
;
4223 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHANGE_PRIORITY
),
4224 (uintptr_t)thread_tid(thread
),
4227 0, /* eventually, 'reason' */
4230 if (is_current_thread
) {
4231 nurgency
= thread_get_urgency(thread
, &urgency_param1
, &urgency_param2
);
4233 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4234 * class alterations from user space to occur relatively infrequently, hence
4235 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4236 * inheritance is expected to involve priority changes.
4238 uint64_t ctime
= mach_approximate_time();
4239 if (nurgency
!= curgency
) {
4240 thread_tell_urgency(nurgency
, urgency_param1
, urgency_param2
, 0, thread
);
4242 machine_thread_going_on_core(thread
, nurgency
, 0, 0, ctime
);
4245 if (removed_from_runq
)
4246 thread_run_queue_reinsert(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
4247 else if (thread
->state
& TH_RUN
) {
4248 processor_t processor
= thread
->last_processor
;
4250 if (is_current_thread
) {
4251 processor_state_update_from_thread(processor
, thread
);
4254 * When dropping in priority, check if the thread no longer belongs on core.
4255 * If a thread raises its own priority, don't aggressively rebalance it.
4256 * <rdar://problem/31699165>
4258 if (new_priority
< old_priority
) {
4261 if ((preempt
= csw_check(processor
, AST_NONE
)) != AST_NONE
)
4264 } else if (processor
!= PROCESSOR_NULL
&& processor
->active_thread
== thread
) {
4265 cause_ast_check(processor
);
4271 * thread_run_queue_remove_for_handoff
4273 * Pull a thread or its (recursive) push target out of the runqueue
4274 * so that it is ready for thread_run()
4276 * Called at splsched
4278 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4279 * This may be different than the thread that was passed in.
4282 thread_run_queue_remove_for_handoff(thread_t thread
) {
4284 thread_t pulled_thread
= THREAD_NULL
;
4286 thread_lock(thread
);
4289 * Check that the thread is not bound
4290 * to a different processor, and that realtime
4293 * Next, pull it off its run queue. If it
4294 * doesn't come, it's not eligible.
4297 processor_t processor
= current_processor();
4298 if (processor
->current_pri
< BASEPRI_RTQUEUES
&& thread
->sched_pri
< BASEPRI_RTQUEUES
&&
4299 (thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== processor
)) {
4301 if (thread_run_queue_remove(thread
))
4302 pulled_thread
= thread
;
4305 thread_unlock(thread
);
4307 return pulled_thread
;
4311 * thread_run_queue_remove:
4313 * Remove a thread from its current run queue and
4314 * return TRUE if successful.
4316 * Thread must be locked.
4318 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4319 * run queues because the caller locked the thread. Otherwise
4320 * the thread is on a run queue, but could be chosen for dispatch
4321 * and removed by another processor under a different lock, which
4322 * will set thread->runq to PROCESSOR_NULL.
4324 * Hence the thread select path must not rely on anything that could
4325 * be changed under the thread lock after calling this function,
4326 * most importantly thread->sched_pri.
4329 thread_run_queue_remove(
4332 boolean_t removed
= FALSE
;
4333 processor_t processor
= thread
->runq
;
4335 if ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_WAIT
) {
4336 /* Thread isn't runnable */
4337 assert(thread
->runq
== PROCESSOR_NULL
);
4341 if (processor
== PROCESSOR_NULL
) {
4343 * The thread is either not on the runq,
4344 * or is in the midst of being removed from the runq.
4346 * runq is set to NULL under the pset lock, not the thread
4347 * lock, so the thread may still be in the process of being dequeued
4348 * from the runq. It will wait in invoke for the thread lock to be
4355 if (thread
->sched_pri
< BASEPRI_RTQUEUES
) {
4356 return SCHED(processor_queue_remove
)(processor
, thread
);
4359 processor_set_t pset
= processor
->processor_set
;
4363 if (thread
->runq
!= PROCESSOR_NULL
) {
4365 * Thread is on the RT run queue and we have a lock on
4369 remqueue(&thread
->runq_links
);
4370 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq
)(pset
)->runq_stats
, rt_runq_count(pset
));
4371 rt_runq_count_decr(pset
);
4373 thread
->runq
= PROCESSOR_NULL
;
4378 rt_lock_unlock(pset
);
4384 * Put the thread back where it goes after a thread_run_queue_remove
4386 * Thread must have been removed under the same thread lock hold
4388 * thread locked, at splsched
4391 thread_run_queue_reinsert(thread_t thread
, integer_t options
)
4393 assert(thread
->runq
== PROCESSOR_NULL
);
4394 assert(thread
->state
& (TH_RUN
));
4396 thread_setrun(thread
, options
);
4400 sys_override_cpu_throttle(int flag
)
4402 if (flag
== CPU_THROTTLE_ENABLE
)
4403 cpu_throttle_enabled
= 1;
4404 if (flag
== CPU_THROTTLE_DISABLE
)
4405 cpu_throttle_enabled
= 0;
4409 thread_get_urgency(thread_t thread
, uint64_t *arg1
, uint64_t *arg2
)
4411 if (thread
== NULL
|| (thread
->state
& TH_IDLE
)) {
4415 return (THREAD_URGENCY_NONE
);
4416 } else if (thread
->sched_mode
== TH_MODE_REALTIME
) {
4417 *arg1
= thread
->realtime
.period
;
4418 *arg2
= thread
->realtime
.deadline
;
4420 return (THREAD_URGENCY_REAL_TIME
);
4421 } else if (cpu_throttle_enabled
&&
4422 ((thread
->sched_pri
<= MAXPRI_THROTTLE
) && (thread
->base_pri
<= MAXPRI_THROTTLE
))) {
4424 * Background urgency applied when thread priority is MAXPRI_THROTTLE or lower and thread is not promoted
4426 *arg1
= thread
->sched_pri
;
4427 *arg2
= thread
->base_pri
;
4429 return (THREAD_URGENCY_BACKGROUND
);
4431 /* For otherwise unclassified threads, report throughput QoS
4434 *arg1
= proc_get_effective_thread_policy(thread
, TASK_POLICY_THROUGH_QOS
);
4435 *arg2
= proc_get_effective_task_policy(thread
->task
, TASK_POLICY_THROUGH_QOS
);
4437 return (THREAD_URGENCY_NORMAL
);
4442 thread_get_perfcontrol_class(thread_t thread
)
4444 /* Special case handling */
4445 if (thread
->state
& TH_IDLE
)
4446 return PERFCONTROL_CLASS_IDLE
;
4447 if (thread
->task
== kernel_task
)
4448 return PERFCONTROL_CLASS_KERNEL
;
4449 if (thread
->sched_mode
== TH_MODE_REALTIME
)
4450 return PERFCONTROL_CLASS_REALTIME
;
4452 /* perfcontrol_class based on base_pri */
4453 if (thread
->base_pri
<= MAXPRI_THROTTLE
)
4454 return PERFCONTROL_CLASS_BACKGROUND
;
4455 else if (thread
->base_pri
<= BASEPRI_UTILITY
)
4456 return PERFCONTROL_CLASS_UTILITY
;
4457 else if (thread
->base_pri
<= BASEPRI_DEFAULT
)
4458 return PERFCONTROL_CLASS_NONUI
;
4459 else if (thread
->base_pri
<= BASEPRI_FOREGROUND
)
4460 return PERFCONTROL_CLASS_UI
;
4462 return PERFCONTROL_CLASS_ABOVEUI
;
4466 * This is the processor idle loop, which just looks for other threads
4467 * to execute. Processor idle threads invoke this without supplying a
4468 * current thread to idle without an asserted wait state.
4470 * Returns a the next thread to execute if dispatched directly.
4474 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4476 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4482 processor_t processor
)
4484 processor_set_t pset
= processor
->processor_set
;
4485 thread_t new_thread
;
4489 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4490 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_START
,
4491 (uintptr_t)thread_tid(thread
), 0, 0, 0, 0);
4493 SCHED_STATS_CPU_IDLE_START(processor
);
4495 timer_switch(&PROCESSOR_DATA(processor
, system_state
),
4496 mach_absolute_time(), &PROCESSOR_DATA(processor
, idle_state
));
4497 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, idle_state
);
4501 * Ensure that updates to my processor and pset state,
4502 * made by the IPI source processor before sending the IPI,
4503 * are visible on this processor now (even though we don't
4504 * take the pset lock yet).
4506 atomic_thread_fence(memory_order_acquire
);
4508 if (processor
->state
!= PROCESSOR_IDLE
)
4510 if (bit_test(pset
->pending_AST_cpu_mask
, processor
->cpu_id
))
4512 #if defined(CONFIG_SCHED_DEFERRED_AST)
4513 if (bit_test(pset
->pending_deferred_AST_cpu_mask
, processor
->cpu_id
))
4516 if (processor
->is_recommended
) {
4517 if (rt_runq_count(pset
))
4520 if (SCHED(processor_bound_count
)(processor
))
4524 #if CONFIG_SCHED_IDLE_IN_PLACE
4525 if (thread
!= THREAD_NULL
) {
4526 /* Did idle-in-place thread wake up */
4527 if ((thread
->state
& (TH_WAIT
|TH_SUSP
)) != TH_WAIT
|| thread
->wake_active
)
4532 IDLE_KERNEL_DEBUG_CONSTANT(
4533 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq_count(pset
), SCHED(processor_runq_count
)(processor
), -1, 0);
4535 machine_track_platform_idle(TRUE
);
4539 machine_track_platform_idle(FALSE
);
4543 IDLE_KERNEL_DEBUG_CONSTANT(
4544 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq_count(pset
), SCHED(processor_runq_count
)(processor
), -2, 0);
4546 if (!SCHED(processor_queue_empty
)(processor
)) {
4547 /* Secondary SMT processors respond to directed wakeups
4548 * exclusively. Some platforms induce 'spurious' SMT wakeups.
4550 if (processor
->processor_primary
== processor
)
4555 timer_switch(&PROCESSOR_DATA(processor
, idle_state
),
4556 mach_absolute_time(), &PROCESSOR_DATA(processor
, system_state
));
4557 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, system_state
);
4561 /* If we were sent a remote AST and came out of idle, acknowledge it here with pset lock held */
4562 bit_clear(pset
->pending_AST_cpu_mask
, processor
->cpu_id
);
4563 #if defined(CONFIG_SCHED_DEFERRED_AST)
4564 bit_clear(pset
->pending_deferred_AST_cpu_mask
, processor
->cpu_id
);
4567 state
= processor
->state
;
4568 if (state
== PROCESSOR_DISPATCHING
) {
4570 * Commmon case -- cpu dispatched.
4572 new_thread
= processor
->next_thread
;
4573 processor
->next_thread
= THREAD_NULL
;
4574 processor
->state
= PROCESSOR_RUNNING
;
4576 if ((new_thread
!= THREAD_NULL
) && (SCHED(processor_queue_has_priority
)(processor
, new_thread
->sched_pri
, FALSE
) ||
4577 (rt_runq_count(pset
) > 0)) ) {
4578 /* Something higher priority has popped up on the runqueue - redispatch this thread elsewhere */
4579 processor_state_update_idle(processor
);
4580 processor
->deadline
= UINT64_MAX
;
4584 thread_lock(new_thread
);
4585 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REDISPATCH
), (uintptr_t)thread_tid(new_thread
), new_thread
->sched_pri
, rt_runq_count(pset
), 0, 0);
4586 thread_setrun(new_thread
, SCHED_HEADQ
);
4587 thread_unlock(new_thread
);
4589 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4590 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4591 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4593 return (THREAD_NULL
);
4596 sched_update_pset_load_average(pset
);
4600 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4601 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4602 (uintptr_t)thread_tid(thread
), state
, (uintptr_t)thread_tid(new_thread
), 0, 0);
4604 return (new_thread
);
4606 } else if (state
== PROCESSOR_IDLE
) {
4607 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
4609 pset
->active_processor_count
++;
4610 sched_update_pset_load_average(pset
);
4612 processor
->state
= PROCESSOR_RUNNING
;
4613 processor_state_update_idle(processor
);
4614 processor
->deadline
= UINT64_MAX
;
4616 } else if (state
== PROCESSOR_SHUTDOWN
) {
4618 * Going off-line. Force a
4621 if ((new_thread
= processor
->next_thread
) != THREAD_NULL
) {
4622 processor
->next_thread
= THREAD_NULL
;
4623 processor_state_update_idle(processor
);
4624 processor
->deadline
= UINT64_MAX
;
4628 thread_lock(new_thread
);
4629 thread_setrun(new_thread
, SCHED_HEADQ
);
4630 thread_unlock(new_thread
);
4632 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4633 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4634 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4636 return (THREAD_NULL
);
4642 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4643 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4644 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4646 return (THREAD_NULL
);
4650 * Each processor has a dedicated thread which
4651 * executes the idle loop when there is no suitable
4657 processor_t processor
= current_processor();
4658 thread_t new_thread
;
4660 new_thread
= processor_idle(THREAD_NULL
, processor
);
4661 if (new_thread
!= THREAD_NULL
) {
4662 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
4666 thread_block((thread_continue_t
)idle_thread
);
4672 processor_t processor
)
4674 kern_return_t result
;
4677 char name
[MAXTHREADNAMESIZE
];
4679 result
= kernel_thread_create((thread_continue_t
)idle_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
4680 if (result
!= KERN_SUCCESS
)
4683 snprintf(name
, sizeof(name
), "idle #%d", processor
->cpu_id
);
4684 thread_set_thread_name(thread
, name
);
4687 thread_lock(thread
);
4688 thread
->bound_processor
= processor
;
4689 processor
->idle_thread
= thread
;
4690 thread
->sched_pri
= thread
->base_pri
= IDLEPRI
;
4691 thread
->state
= (TH_RUN
| TH_IDLE
);
4692 thread
->options
|= TH_OPT_IDLE_THREAD
;
4693 thread_unlock(thread
);
4696 thread_deallocate(thread
);
4698 return (KERN_SUCCESS
);
4704 * Kicks off scheduler services.
4706 * Called at splsched.
4711 kern_return_t result
;
4714 simple_lock_init(&sched_vm_group_list_lock
, 0);
4716 #if __arm__ || __arm64__
4717 simple_lock_init(&sched_recommended_cores_lock
, 0);
4718 #endif /* __arm__ || __arm64__ */
4720 result
= kernel_thread_start_priority((thread_continue_t
)sched_init_thread
,
4721 (void *)SCHED(maintenance_continuation
), MAXPRI_KERNEL
, &thread
);
4722 if (result
!= KERN_SUCCESS
)
4723 panic("sched_startup");
4725 thread_deallocate(thread
);
4727 assert_thread_magic(thread
);
4730 * Yield to the sched_init_thread once, to
4731 * initialize our own thread after being switched
4734 * The current thread is the only other thread
4735 * active at this point.
4737 thread_block(THREAD_CONTINUE_NULL
);
4741 static _Atomic
uint64_t sched_perfcontrol_callback_deadline
;
4742 #endif /* __arm64__ */
4745 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4747 static volatile uint64_t sched_maintenance_deadline
;
4748 static uint64_t sched_tick_last_abstime
;
4749 static uint64_t sched_tick_delta
;
4750 uint64_t sched_tick_max_delta
;
4754 * sched_init_thread:
4756 * Perform periodic bookkeeping functions about ten
4760 sched_timeshare_maintenance_continue(void)
4762 uint64_t sched_tick_ctime
, late_time
;
4764 struct sched_update_scan_context scan_context
= {
4765 .earliest_bg_make_runnable_time
= UINT64_MAX
,
4766 .earliest_normal_make_runnable_time
= UINT64_MAX
,
4767 .earliest_rt_make_runnable_time
= UINT64_MAX
4770 sched_tick_ctime
= mach_absolute_time();
4772 if (__improbable(sched_tick_last_abstime
== 0)) {
4773 sched_tick_last_abstime
= sched_tick_ctime
;
4775 sched_tick_delta
= 1;
4777 late_time
= sched_tick_ctime
- sched_tick_last_abstime
;
4778 sched_tick_delta
= late_time
/ sched_tick_interval
;
4779 /* Ensure a delta of 1, since the interval could be slightly
4780 * smaller than the sched_tick_interval due to dispatch
4783 sched_tick_delta
= MAX(sched_tick_delta
, 1);
4785 /* In the event interrupt latencies or platform
4786 * idle events that advanced the timebase resulted
4787 * in periods where no threads were dispatched,
4788 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
4791 sched_tick_delta
= MIN(sched_tick_delta
, SCHED_TICK_MAX_DELTA
);
4793 sched_tick_last_abstime
= sched_tick_ctime
;
4794 sched_tick_max_delta
= MAX(sched_tick_delta
, sched_tick_max_delta
);
4797 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_MAINTENANCE
)|DBG_FUNC_START
,
4798 sched_tick_delta
, late_time
, 0, 0, 0);
4800 /* Add a number of pseudo-ticks corresponding to the elapsed interval
4801 * This could be greater than 1 if substantial intervals where
4802 * all processors are idle occur, which rarely occurs in practice.
4805 sched_tick
+= sched_tick_delta
;
4810 * Compute various averages.
4812 compute_averages(sched_tick_delta
);
4815 * Scan the run queues for threads which
4816 * may need to be updated, and find the earliest runnable thread on the runqueue
4817 * to report its latency.
4819 SCHED(thread_update_scan
)(&scan_context
);
4821 SCHED(rt_runq_scan
)(&scan_context
);
4823 uint64_t ctime
= mach_absolute_time();
4825 uint64_t bg_max_latency
= (ctime
> scan_context
.earliest_bg_make_runnable_time
) ?
4826 ctime
- scan_context
.earliest_bg_make_runnable_time
: 0;
4828 uint64_t default_max_latency
= (ctime
> scan_context
.earliest_normal_make_runnable_time
) ?
4829 ctime
- scan_context
.earliest_normal_make_runnable_time
: 0;
4831 uint64_t realtime_max_latency
= (ctime
> scan_context
.earliest_rt_make_runnable_time
) ?
4832 ctime
- scan_context
.earliest_rt_make_runnable_time
: 0;
4834 machine_max_runnable_latency(bg_max_latency
, default_max_latency
, realtime_max_latency
);
4837 * Check to see if the special sched VM group needs attention.
4839 sched_vm_group_maintenance();
4841 #if __arm__ || __arm64__
4842 /* Check to see if the recommended cores failsafe is active */
4843 sched_recommended_cores_maintenance();
4844 #endif /* __arm__ || __arm64__ */
4847 #if DEBUG || DEVELOPMENT
4849 #include <i386/misc_protos.h>
4850 /* Check for long-duration interrupts */
4851 mp_interrupt_watchdog();
4852 #endif /* __x86_64__ */
4853 #endif /* DEBUG || DEVELOPMENT */
4855 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_MAINTENANCE
) | DBG_FUNC_END
,
4856 sched_pri_shifts
[TH_BUCKET_SHARE_FG
], sched_pri_shifts
[TH_BUCKET_SHARE_BG
],
4857 sched_pri_shifts
[TH_BUCKET_SHARE_UT
], 0, 0);
4859 assert_wait((event_t
)sched_timeshare_maintenance_continue
, THREAD_UNINT
);
4860 thread_block((thread_continue_t
)sched_timeshare_maintenance_continue
);
4864 static uint64_t sched_maintenance_wakeups
;
4867 * Determine if the set of routines formerly driven by a maintenance timer
4868 * must be invoked, based on a deadline comparison. Signals the scheduler
4869 * maintenance thread on deadline expiration. Must be invoked at an interval
4870 * lower than the "sched_tick_interval", currently accomplished by
4871 * invocation via the quantum expiration timer and at context switch time.
4872 * Performance matters: this routine reuses a timestamp approximating the
4873 * current absolute time received from the caller, and should perform
4874 * no more than a comparison against the deadline in the common case.
4877 sched_timeshare_consider_maintenance(uint64_t ctime
) {
4878 uint64_t ndeadline
, deadline
= sched_maintenance_deadline
;
4880 if (__improbable(ctime
>= deadline
)) {
4881 if (__improbable(current_thread() == sched_maintenance_thread
))
4885 ndeadline
= ctime
+ sched_tick_interval
;
4887 if (__probable(__sync_bool_compare_and_swap(&sched_maintenance_deadline
, deadline
, ndeadline
))) {
4888 thread_wakeup((event_t
)sched_timeshare_maintenance_continue
);
4889 sched_maintenance_wakeups
++;
4894 uint64_t perf_deadline
= __c11_atomic_load(&sched_perfcontrol_callback_deadline
, memory_order_relaxed
);
4896 if (__improbable(perf_deadline
&& ctime
>= perf_deadline
)) {
4897 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
4898 if (__c11_atomic_compare_exchange_strong(&sched_perfcontrol_callback_deadline
, &perf_deadline
, 0,
4899 memory_order_relaxed
, memory_order_relaxed
)) {
4900 machine_perfcontrol_deadline_passed(perf_deadline
);
4903 #endif /* __arm64__ */
4907 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
4910 sched_init_thread(void (*continuation
)(void))
4912 thread_block(THREAD_CONTINUE_NULL
);
4914 thread_t thread
= current_thread();
4916 thread_set_thread_name(thread
, "sched_maintenance_thread");
4918 sched_maintenance_thread
= thread
;
4925 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4928 * thread_update_scan / runq_scan:
4930 * Scan the run queues to account for timesharing threads
4931 * which need to be updated.
4933 * Scanner runs in two passes. Pass one squirrels likely
4934 * threads away in an array, pass two does the update.
4936 * This is necessary because the run queue is locked for
4937 * the candidate scan, but the thread is locked for the update.
4939 * Array should be sized to make forward progress, without
4940 * disabling preemption for long periods.
4943 #define THREAD_UPDATE_SIZE 128
4945 static thread_t thread_update_array
[THREAD_UPDATE_SIZE
];
4946 static uint32_t thread_update_count
= 0;
4948 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
4950 thread_update_add_thread(thread_t thread
)
4952 if (thread_update_count
== THREAD_UPDATE_SIZE
)
4955 thread_update_array
[thread_update_count
++] = thread
;
4956 thread_reference_internal(thread
);
4961 thread_update_process_threads(void)
4963 assert(thread_update_count
<= THREAD_UPDATE_SIZE
);
4965 for (uint32_t i
= 0 ; i
< thread_update_count
; i
++) {
4966 thread_t thread
= thread_update_array
[i
];
4967 assert_thread_magic(thread
);
4968 thread_update_array
[i
] = THREAD_NULL
;
4970 spl_t s
= splsched();
4971 thread_lock(thread
);
4972 if (!(thread
->state
& (TH_WAIT
)) && thread
->sched_stamp
!= sched_tick
) {
4973 SCHED(update_priority
)(thread
);
4975 thread_unlock(thread
);
4978 thread_deallocate(thread
);
4981 thread_update_count
= 0;
4985 * Scan a runq for candidate threads.
4987 * Returns TRUE if retry is needed.
4992 sched_update_scan_context_t scan_context
)
4994 int count
= runq
->count
;
5002 for (queue_index
= bitmap_first(runq
->bitmap
, NRQS
);
5004 queue_index
= bitmap_next(runq
->bitmap
, queue_index
)) {
5007 queue_t queue
= &runq
->queues
[queue_index
];
5009 qe_foreach_element(thread
, queue
, runq_links
) {
5011 assert_thread_magic(thread
);
5013 if (thread
->sched_stamp
!= sched_tick
&&
5014 thread
->sched_mode
== TH_MODE_TIMESHARE
) {
5015 if (thread_update_add_thread(thread
) == FALSE
)
5019 if (cpu_throttle_enabled
&& ((thread
->sched_pri
<= MAXPRI_THROTTLE
) && (thread
->base_pri
<= MAXPRI_THROTTLE
))) {
5020 if (thread
->last_made_runnable_time
< scan_context
->earliest_bg_make_runnable_time
) {
5021 scan_context
->earliest_bg_make_runnable_time
= thread
->last_made_runnable_time
;
5024 if (thread
->last_made_runnable_time
< scan_context
->earliest_normal_make_runnable_time
) {
5025 scan_context
->earliest_normal_make_runnable_time
= thread
->last_made_runnable_time
;
5035 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5038 thread_eager_preemption(thread_t thread
)
5040 return ((thread
->sched_flags
& TH_SFLAG_EAGERPREEMPT
) != 0);
5044 thread_set_eager_preempt(thread_t thread
)
5048 ast_t ast
= AST_NONE
;
5051 p
= current_processor();
5053 thread_lock(thread
);
5054 thread
->sched_flags
|= TH_SFLAG_EAGERPREEMPT
;
5056 if (thread
== current_thread()) {
5058 ast
= csw_check(p
, AST_NONE
);
5059 thread_unlock(thread
);
5060 if (ast
!= AST_NONE
) {
5061 (void) thread_block_reason(THREAD_CONTINUE_NULL
, NULL
, ast
);
5064 p
= thread
->last_processor
;
5066 if (p
!= PROCESSOR_NULL
&& p
->state
== PROCESSOR_RUNNING
&&
5067 p
->active_thread
== thread
) {
5071 thread_unlock(thread
);
5078 thread_clear_eager_preempt(thread_t thread
)
5083 thread_lock(thread
);
5085 thread
->sched_flags
&= ~TH_SFLAG_EAGERPREEMPT
;
5087 thread_unlock(thread
);
5092 * Scheduling statistics
5095 sched_stats_handle_csw(processor_t processor
, int reasons
, int selfpri
, int otherpri
)
5097 struct processor_sched_statistics
*stats
;
5098 boolean_t to_realtime
= FALSE
;
5100 stats
= &processor
->processor_data
.sched_stats
;
5103 if (otherpri
>= BASEPRI_REALTIME
) {
5104 stats
->rt_sched_count
++;
5108 if ((reasons
& AST_PREEMPT
) != 0) {
5109 stats
->preempt_count
++;
5111 if (selfpri
>= BASEPRI_REALTIME
) {
5112 stats
->preempted_rt_count
++;
5116 stats
->preempted_by_rt_count
++;
5123 sched_stats_handle_runq_change(struct runq_stats
*stats
, int old_count
)
5125 uint64_t timestamp
= mach_absolute_time();
5127 stats
->count_sum
+= (timestamp
- stats
->last_change_timestamp
) * old_count
;
5128 stats
->last_change_timestamp
= timestamp
;
5132 * For calls from assembly code
5134 #undef thread_wakeup
5143 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
5147 preemption_enabled(void)
5149 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
5153 sched_timer_deadline_tracking_init(void) {
5154 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT
, &timer_deadline_tracking_bin_1
);
5155 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT
, &timer_deadline_tracking_bin_2
);
5158 #if __arm__ || __arm64__
5160 uint32_t perfcontrol_requested_recommended_cores
= ALL_CORES_RECOMMENDED
;
5161 uint32_t perfcontrol_requested_recommended_core_count
= MAX_CPUS
;
5162 boolean_t perfcontrol_failsafe_active
= FALSE
;
5164 uint64_t perfcontrol_failsafe_maintenance_runnable_time
;
5165 uint64_t perfcontrol_failsafe_activation_time
;
5166 uint64_t perfcontrol_failsafe_deactivation_time
;
5168 /* data covering who likely caused it and how long they ran */
5169 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
5170 char perfcontrol_failsafe_name
[FAILSAFE_NAME_LEN
];
5171 int perfcontrol_failsafe_pid
;
5172 uint64_t perfcontrol_failsafe_tid
;
5173 uint64_t perfcontrol_failsafe_thread_timer_at_start
;
5174 uint64_t perfcontrol_failsafe_thread_timer_last_seen
;
5175 uint32_t perfcontrol_failsafe_recommended_at_trigger
;
5178 * Perf controller calls here to update the recommended core bitmask.
5179 * If the failsafe is active, we don't immediately apply the new value.
5180 * Instead, we store the new request and use it after the failsafe deactivates.
5182 * If the failsafe is not active, immediately apply the update.
5184 * No scheduler locks are held, no other locks are held that scheduler might depend on,
5185 * interrupts are enabled
5187 * currently prototype is in osfmk/arm/machine_routines.h
5190 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores
)
5192 assert(preemption_enabled());
5194 spl_t s
= splsched();
5195 simple_lock(&sched_recommended_cores_lock
);
5197 perfcontrol_requested_recommended_cores
= recommended_cores
;
5198 perfcontrol_requested_recommended_core_count
= __builtin_popcountll(recommended_cores
);
5200 if (perfcontrol_failsafe_active
== FALSE
)
5201 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
);
5203 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5204 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_NONE
,
5205 perfcontrol_requested_recommended_cores
,
5206 sched_maintenance_thread
->last_made_runnable_time
, 0, 0, 0);
5208 simple_unlock(&sched_recommended_cores_lock
);
5213 * Consider whether we need to activate the recommended cores failsafe
5215 * Called from quantum timer interrupt context of a realtime thread
5216 * No scheduler locks are held, interrupts are disabled
5219 sched_consider_recommended_cores(uint64_t ctime
, thread_t cur_thread
)
5222 * Check if a realtime thread is starving the system
5223 * and bringing up non-recommended cores would help
5225 * TODO: Is this the correct check for recommended == possible cores?
5226 * TODO: Validate the checks without the relevant lock are OK.
5229 if (__improbable(perfcontrol_failsafe_active
== TRUE
)) {
5230 /* keep track of how long the responsible thread runs */
5232 simple_lock(&sched_recommended_cores_lock
);
5234 if (perfcontrol_failsafe_active
== TRUE
&&
5235 cur_thread
->thread_id
== perfcontrol_failsafe_tid
) {
5236 perfcontrol_failsafe_thread_timer_last_seen
= timer_grab(&cur_thread
->user_timer
) +
5237 timer_grab(&cur_thread
->system_timer
);
5240 simple_unlock(&sched_recommended_cores_lock
);
5242 /* we're already trying to solve the problem, so bail */
5246 /* The failsafe won't help if there are no more processors to enable */
5247 if (__probable(perfcontrol_requested_recommended_core_count
>= processor_count
))
5250 uint64_t too_long_ago
= ctime
- perfcontrol_failsafe_starvation_threshold
;
5252 /* Use the maintenance thread as our canary in the coal mine */
5253 thread_t m_thread
= sched_maintenance_thread
;
5255 /* If it doesn't look bad, nothing to see here */
5256 if (__probable(m_thread
->last_made_runnable_time
>= too_long_ago
))
5259 /* It looks bad, take the lock to be sure */
5260 thread_lock(m_thread
);
5262 if (m_thread
->runq
== PROCESSOR_NULL
||
5263 (m_thread
->state
& (TH_RUN
|TH_WAIT
)) != TH_RUN
||
5264 m_thread
->last_made_runnable_time
>= too_long_ago
) {
5266 * Maintenance thread is either on cpu or blocked, and
5267 * therefore wouldn't benefit from more cores
5269 thread_unlock(m_thread
);
5273 uint64_t maintenance_runnable_time
= m_thread
->last_made_runnable_time
;
5275 thread_unlock(m_thread
);
5278 * There are cores disabled at perfcontrol's recommendation, but the
5279 * system is so overloaded that the maintenance thread can't run.
5280 * That likely means that perfcontrol can't run either, so it can't fix
5281 * the recommendation. We have to kick in a failsafe to keep from starving.
5283 * When the maintenance thread has been starved for too long,
5284 * ignore the recommendation from perfcontrol and light up all the cores.
5286 * TODO: Consider weird states like boot, sleep, or debugger
5289 simple_lock(&sched_recommended_cores_lock
);
5291 if (perfcontrol_failsafe_active
== TRUE
) {
5292 simple_unlock(&sched_recommended_cores_lock
);
5296 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5297 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_START
,
5298 perfcontrol_requested_recommended_cores
, maintenance_runnable_time
, 0, 0, 0);
5300 perfcontrol_failsafe_active
= TRUE
;
5301 perfcontrol_failsafe_activation_time
= mach_absolute_time();
5302 perfcontrol_failsafe_maintenance_runnable_time
= maintenance_runnable_time
;
5303 perfcontrol_failsafe_recommended_at_trigger
= perfcontrol_requested_recommended_cores
;
5305 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
5306 task_t task
= cur_thread
->task
;
5307 perfcontrol_failsafe_pid
= task_pid(task
);
5308 strlcpy(perfcontrol_failsafe_name
, proc_name_address(task
->bsd_info
), sizeof(perfcontrol_failsafe_name
));
5310 perfcontrol_failsafe_tid
= cur_thread
->thread_id
;
5312 /* Blame the thread for time it has run recently */
5313 uint64_t recent_computation
= (ctime
- cur_thread
->computation_epoch
) + cur_thread
->computation_metered
;
5315 uint64_t last_seen
= timer_grab(&cur_thread
->user_timer
) + timer_grab(&cur_thread
->system_timer
);
5317 /* Compute the start time of the bad behavior in terms of the thread's on core time */
5318 perfcontrol_failsafe_thread_timer_at_start
= last_seen
- recent_computation
;
5319 perfcontrol_failsafe_thread_timer_last_seen
= last_seen
;
5321 /* Ignore the previously recommended core configuration */
5322 sched_update_recommended_cores(ALL_CORES_RECOMMENDED
);
5324 simple_unlock(&sched_recommended_cores_lock
);
5328 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
5330 * Runs in the context of the maintenance thread, no locks held
5333 sched_recommended_cores_maintenance(void)
5335 /* Common case - no failsafe, nothing to be done here */
5336 if (__probable(perfcontrol_failsafe_active
== FALSE
))
5339 uint64_t ctime
= mach_absolute_time();
5341 boolean_t print_diagnostic
= FALSE
;
5342 char p_name
[FAILSAFE_NAME_LEN
] = "";
5344 spl_t s
= splsched();
5345 simple_lock(&sched_recommended_cores_lock
);
5347 /* Check again, under the lock, to avoid races */
5348 if (perfcontrol_failsafe_active
== FALSE
)
5352 * Ensure that the other cores get another few ticks to run some threads
5353 * If we don't have this hysteresis, the maintenance thread is the first
5354 * to run, and then it immediately kills the other cores
5356 if ((ctime
- perfcontrol_failsafe_activation_time
) < perfcontrol_failsafe_starvation_threshold
)
5359 /* Capture some diagnostic state under the lock so we can print it out later */
5361 int pid
= perfcontrol_failsafe_pid
;
5362 uint64_t tid
= perfcontrol_failsafe_tid
;
5364 uint64_t thread_usage
= perfcontrol_failsafe_thread_timer_last_seen
-
5365 perfcontrol_failsafe_thread_timer_at_start
;
5366 uint32_t rec_cores_before
= perfcontrol_failsafe_recommended_at_trigger
;
5367 uint32_t rec_cores_after
= perfcontrol_requested_recommended_cores
;
5368 uint64_t failsafe_duration
= ctime
- perfcontrol_failsafe_activation_time
;
5369 strlcpy(p_name
, perfcontrol_failsafe_name
, sizeof(p_name
));
5371 print_diagnostic
= TRUE
;
5373 /* Deactivate the failsafe and reinstate the requested recommendation settings */
5375 perfcontrol_failsafe_deactivation_time
= ctime
;
5376 perfcontrol_failsafe_active
= FALSE
;
5378 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5379 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_REC_CORES_FAILSAFE
) | DBG_FUNC_END
,
5380 perfcontrol_requested_recommended_cores
, failsafe_duration
, 0, 0, 0);
5382 sched_update_recommended_cores(perfcontrol_requested_recommended_cores
);
5385 simple_unlock(&sched_recommended_cores_lock
);
5388 if (print_diagnostic
) {
5389 uint64_t failsafe_duration_ms
= 0, thread_usage_ms
= 0;
5391 absolutetime_to_nanoseconds(failsafe_duration
, &failsafe_duration_ms
);
5392 failsafe_duration_ms
= failsafe_duration_ms
/ NSEC_PER_MSEC
;
5394 absolutetime_to_nanoseconds(thread_usage
, &thread_usage_ms
);
5395 thread_usage_ms
= thread_usage_ms
/ NSEC_PER_MSEC
;
5397 printf("recommended core failsafe kicked in for %lld ms "
5398 "likely due to %s[%d] thread 0x%llx spending "
5399 "%lld ms on cpu at realtime priority - "
5400 "new recommendation: 0x%x -> 0x%x\n",
5401 failsafe_duration_ms
, p_name
, pid
, tid
, thread_usage_ms
,
5402 rec_cores_before
, rec_cores_after
);
5407 * Apply a new recommended cores mask to the processors it affects
5408 * Runs after considering failsafes and such
5410 * Iterate over processors and update their ->is_recommended field.
5411 * If a processor is running, we let it drain out at its next
5412 * quantum expiration or blocking point. If a processor is idle, there
5413 * may be more work for it to do, so IPI it.
5415 * interrupts disabled, sched_recommended_cores_lock is held
5418 sched_update_recommended_cores(uint32_t recommended_cores
)
5420 processor_set_t pset
, nset
;
5421 processor_t processor
;
5422 uint64_t needs_exit_idle_mask
= 0x0;
5424 processor
= processor_list
;
5425 pset
= processor
->processor_set
;
5427 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5428 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED_UPDATE_REC_CORES
) | DBG_FUNC_START
,
5429 recommended_cores
, perfcontrol_failsafe_active
, 0, 0, 0);
5431 if (__builtin_popcount(recommended_cores
) == 0) {
5432 recommended_cores
|= 0x1U
; /* add boot processor or we hang */
5435 /* First set recommended cores */
5439 nset
= processor
->processor_set
;
5446 pset
->recommended_bitmask
= recommended_cores
;
5448 if (recommended_cores
& (1ULL << processor
->cpu_id
)) {
5449 processor
->is_recommended
= TRUE
;
5451 if (processor
->state
== PROCESSOR_IDLE
) {
5452 if (processor
->processor_primary
== processor
) {
5453 re_queue_head(&pset
->idle_queue
, &processor
->processor_queue
);
5455 re_queue_head(&pset
->idle_secondary_queue
, &processor
->processor_queue
);
5457 if (processor
!= current_processor()) {
5458 needs_exit_idle_mask
|= (1ULL << processor
->cpu_id
);
5462 } while ((processor
= processor
->processor_list
) != NULL
);
5465 /* Now shutdown not recommended cores */
5466 processor
= processor_list
;
5467 pset
= processor
->processor_set
;
5472 nset
= processor
->processor_set
;
5479 if (!(recommended_cores
& (1ULL << processor
->cpu_id
))) {
5480 processor
->is_recommended
= FALSE
;
5481 if (processor
->state
== PROCESSOR_IDLE
) {
5482 re_queue_head(&pset
->unused_queue
, &processor
->processor_queue
);
5484 SCHED(processor_queue_shutdown
)(processor
);
5487 SCHED(rt_queue_shutdown
)(processor
);
5491 } while ((processor
= processor
->processor_list
) != NULL
);
5494 /* Issue all pending IPIs now that the pset lock has been dropped */
5495 for (int cpuid
= lsb_first(needs_exit_idle_mask
); cpuid
>= 0; cpuid
= lsb_next(needs_exit_idle_mask
, cpuid
)) {
5496 processor
= processor_array
[cpuid
];
5497 machine_signal_idle(processor
);
5500 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5501 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED_UPDATE_REC_CORES
) | DBG_FUNC_END
,
5502 needs_exit_idle_mask
, 0, 0, 0, 0);
5504 #endif /* __arm__ || __arm64__ */
5506 void thread_set_options(uint32_t thopt
) {
5508 thread_t t
= current_thread();
5513 t
->options
|= thopt
;
5519 void thread_set_pending_block_hint(thread_t thread
, block_hint_t block_hint
) {
5520 thread
->pending_block_hint
= block_hint
;
5523 uint32_t qos_max_parallelism(int qos
, uint64_t options
)
5525 return SCHED(qos_max_parallelism
)(qos
, options
);
5528 uint32_t sched_qos_max_parallelism(__unused
int qos
, uint64_t options
)
5530 host_basic_info_data_t hinfo
;
5531 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
5532 /* Query the machine layer for core information */
5533 __assert_only kern_return_t kret
= host_info(host_self(), HOST_BASIC_INFO
,
5534 (host_info_t
)&hinfo
, &count
);
5535 assert(kret
== KERN_SUCCESS
);
5537 /* We would not want multiple realtime threads running on the
5538 * same physical core; even for SMT capable machines.
5540 if (options
& QOS_PARALLELISM_REALTIME
) {
5541 return hinfo
.physical_cpu
;
5544 if (options
& QOS_PARALLELISM_COUNT_LOGICAL
) {
5545 return hinfo
.logical_cpu
;
5547 return hinfo
.physical_cpu
;
5554 * Set up or replace old timer with new timer
5556 * Returns true if canceled old timer, false if it did not
5559 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline
)
5562 * Exchange deadline for new deadline, if old deadline was nonzero,
5563 * then I cancelled the callback, otherwise I didn't
5566 uint64_t old_deadline
= __c11_atomic_load(&sched_perfcontrol_callback_deadline
,
5567 memory_order_relaxed
);
5570 while (!__c11_atomic_compare_exchange_weak(&sched_perfcontrol_callback_deadline
,
5571 &old_deadline
, new_deadline
,
5572 memory_order_relaxed
, memory_order_relaxed
));
5575 /* now old_deadline contains previous value, which might not be the same if it raced */
5577 return (old_deadline
!= 0) ? TRUE
: FALSE
;
5580 #endif /* __arm64__ */
5583 sched_get_pset_load_average(processor_set_t pset
)
5585 return pset
->load_average
>> (PSET_LOAD_NUMERATOR_SHIFT
- PSET_LOAD_FRACTIONAL_SHIFT
);
5589 sched_update_pset_load_average(processor_set_t pset
)
5594 qe_foreach(iter
, &pset
->active_queue
) {
5597 assertf(count
== pset
->active_processor_count
, "count %d pset->active_processor_count %d\n", count
, pset
->active_processor_count
);
5600 int load
= ((pset
->active_processor_count
+ pset
->pset_runq
.count
+ rt_runq_count(pset
)) << PSET_LOAD_NUMERATOR_SHIFT
);
5601 int new_load_average
= (pset
->load_average
+ load
) >> 1;
5603 pset
->load_average
= new_load_average
;
5605 #if (DEVELOPMENT || DEBUG)