2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Scheduling primitives
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/machlimits.h>
80 #ifdef CONFIG_MACH_APPROXIMATE_TIME
81 #include <machine/commpage.h>
84 #include <kern/kern_types.h>
85 #include <kern/backtrace.h>
86 #include <kern/clock.h>
87 #include <kern/counters.h>
88 #include <kern/cpu_number.h>
89 #include <kern/cpu_data.h>
91 #include <kern/debug.h>
92 #include <kern/macro_help.h>
93 #include <kern/machine.h>
94 #include <kern/misc_protos.h>
95 #include <kern/processor.h>
96 #include <kern/queue.h>
97 #include <kern/sched.h>
98 #include <kern/sched_prim.h>
100 #include <kern/syscall_subr.h>
101 #include <kern/task.h>
102 #include <kern/thread.h>
103 #include <kern/ledger.h>
104 #include <kern/timer_queue.h>
105 #include <kern/waitq.h>
106 #include <kern/policy_internal.h>
109 #include <vm/vm_kern.h>
110 #include <vm/vm_map.h>
112 #include <mach/sdt.h>
114 #include <sys/kdebug.h>
115 #include <kperf/kperf.h>
116 #include <kern/kpc.h>
118 #include <kern/pms.h>
120 struct rt_queue rt_runq
;
122 uintptr_t sched_thread_on_rt_queue
= (uintptr_t)0xDEAFBEE0;
124 /* Lock RT runq, must be done with interrupts disabled (under splsched()) */
126 decl_simple_lock_data(static,rt_lock
);
127 #define rt_lock_init() simple_lock_init(&rt_lock, 0)
128 #define rt_lock_lock() simple_lock(&rt_lock)
129 #define rt_lock_unlock() simple_unlock(&rt_lock)
131 #define rt_lock_init() do { } while(0)
132 #define rt_lock_lock() do { } while(0)
133 #define rt_lock_unlock() do { } while(0)
136 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
137 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
139 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
140 int default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
142 #define MAX_UNSAFE_QUANTA 800
143 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
145 #define MAX_POLL_QUANTA 2
146 int max_poll_quanta
= MAX_POLL_QUANTA
;
148 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
149 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
151 uint64_t max_poll_computation
;
153 uint64_t max_unsafe_computation
;
154 uint64_t sched_safe_duration
;
156 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
158 uint32_t std_quantum
;
159 uint32_t min_std_quantum
;
162 uint32_t std_quantum_us
;
163 uint32_t bg_quantum_us
;
165 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
167 uint32_t thread_depress_time
;
168 uint32_t default_timeshare_computation
;
169 uint32_t default_timeshare_constraint
;
171 uint32_t max_rt_quantum
;
172 uint32_t min_rt_quantum
;
174 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
177 uint32_t sched_tick_interval
;
179 uint32_t sched_pri_shifts
[TH_BUCKET_MAX
];
180 uint32_t sched_fixed_shift
;
182 uint32_t sched_decay_usage_age_factor
= 1; /* accelerate 5/8^n usage aging */
184 /* Allow foreground to decay past default to resolve inversions */
185 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
186 int sched_pri_decay_band_limit
= DEFAULT_DECAY_BAND_LIMIT
;
188 /* Defaults for timer deadline profiling */
189 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
191 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
194 uint64_t timer_deadline_tracking_bin_1
;
195 uint64_t timer_deadline_tracking_bin_2
;
197 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
199 thread_t sched_maintenance_thread
;
202 uint64_t sched_one_second_interval
;
206 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
208 static void load_shift_init(void);
209 static void preempt_pri_init(void);
211 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
213 static thread_t
thread_select(
215 processor_t processor
,
218 #if CONFIG_SCHED_IDLE_IN_PLACE
219 static thread_t
thread_select_idle(
221 processor_t processor
);
224 thread_t
processor_idle(
226 processor_t processor
);
229 csw_check_locked( processor_t processor
,
230 processor_set_t pset
,
233 static void processor_setrun(
234 processor_t processor
,
239 sched_realtime_init(void);
242 sched_realtime_timebase_init(void);
245 sched_timer_deadline_tracking_init(void);
248 extern int debug_task
;
249 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
251 #define TLOG(a, fmt, args...) do {} while (0)
255 thread_bind_internal(
257 processor_t processor
);
260 sched_vm_group_maintenance(void);
262 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
263 int8_t sched_load_shifts
[NRQS
];
264 bitmap_t sched_preempt_pri
[BITMAP_LEN(NRQS
)];
265 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
267 const struct sched_dispatch_table
*sched_current_dispatch
= NULL
;
270 * Statically allocate a buffer to hold the longest possible
271 * scheduler description string, as currently implemented.
272 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
273 * to export to userspace via sysctl(3). If either version
274 * changes, update the other.
276 * Note that in addition to being an upper bound on the strings
277 * in the kernel, it's also an exact parameter to PE_get_default(),
278 * which interrogates the device tree on some platforms. That
279 * API requires the caller know the exact size of the device tree
280 * property, so we need both a legacy size (32) and the current size
281 * (48) to deal with old and new device trees. The device tree property
282 * is similarly padded to a fixed size so that the same kernel image
283 * can run on multiple devices with different schedulers configured
284 * in the device tree.
286 char sched_string
[SCHED_STRING_MAX_LENGTH
];
288 uint32_t sched_debug_flags
;
290 /* Global flag which indicates whether Background Stepper Context is enabled */
291 static int cpu_throttle_enabled
= 1;
296 char sched_arg
[SCHED_STRING_MAX_LENGTH
] = { '\0' };
298 /* Check for runtime selection of the scheduler algorithm */
299 if (!PE_parse_boot_argn("sched", sched_arg
, sizeof (sched_arg
))) {
300 /* If no boot-args override, look in device tree */
301 if (!PE_get_default("kern.sched", sched_arg
,
302 SCHED_STRING_MAX_LENGTH
)) {
308 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit
, sizeof(sched_pri_decay_band_limit
))) {
309 /* No boot-args, check in device tree */
310 if (!PE_get_default("kern.sched_pri_decay_limit",
311 &sched_pri_decay_band_limit
,
312 sizeof(sched_pri_decay_band_limit
))) {
313 /* Allow decay all the way to normal limits */
314 sched_pri_decay_band_limit
= DEFAULT_DECAY_BAND_LIMIT
;
318 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit
);
320 if (strlen(sched_arg
) > 0) {
322 /* Allow pattern below */
323 #if defined(CONFIG_SCHED_TRADITIONAL)
324 } else if (0 == strcmp(sched_arg
, sched_traditional_dispatch
.sched_name
)) {
325 sched_current_dispatch
= &sched_traditional_dispatch
;
326 } else if (0 == strcmp(sched_arg
, sched_traditional_with_pset_runqueue_dispatch
.sched_name
)) {
327 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
329 #if defined(CONFIG_SCHED_PROTO)
330 } else if (0 == strcmp(sched_arg
, sched_proto_dispatch
.sched_name
)) {
331 sched_current_dispatch
= &sched_proto_dispatch
;
333 #if defined(CONFIG_SCHED_GRRR)
334 } else if (0 == strcmp(sched_arg
, sched_grrr_dispatch
.sched_name
)) {
335 sched_current_dispatch
= &sched_grrr_dispatch
;
337 #if defined(CONFIG_SCHED_MULTIQ)
338 } else if (0 == strcmp(sched_arg
, sched_multiq_dispatch
.sched_name
)) {
339 sched_current_dispatch
= &sched_multiq_dispatch
;
340 } else if (0 == strcmp(sched_arg
, sched_dualq_dispatch
.sched_name
)) {
341 sched_current_dispatch
= &sched_dualq_dispatch
;
344 #if defined(CONFIG_SCHED_TRADITIONAL)
345 printf("Unrecognized scheduler algorithm: %s\n", sched_arg
);
346 printf("Scheduler: Using instead: %s\n", sched_traditional_with_pset_runqueue_dispatch
.sched_name
);
347 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
349 panic("Unrecognized scheduler algorithm: %s", sched_arg
);
352 kprintf("Scheduler: Runtime selection of %s\n", SCHED(sched_name
));
354 #if defined(CONFIG_SCHED_MULTIQ)
355 sched_current_dispatch
= &sched_multiq_dispatch
;
356 #elif defined(CONFIG_SCHED_TRADITIONAL)
357 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
358 #elif defined(CONFIG_SCHED_PROTO)
359 sched_current_dispatch
= &sched_proto_dispatch
;
360 #elif defined(CONFIG_SCHED_GRRR)
361 sched_current_dispatch
= &sched_grrr_dispatch
;
363 #error No default scheduler implementation
365 kprintf("Scheduler: Default of %s\n", SCHED(sched_name
));
368 strlcpy(sched_string
, SCHED(sched_name
), sizeof(sched_string
));
370 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags
, sizeof(sched_debug_flags
))) {
371 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags
);
375 sched_realtime_init();
377 sched_timer_deadline_tracking_init();
379 SCHED(pset_init
)(&pset0
);
380 SCHED(processor_init
)(master_processor
);
384 sched_timebase_init(void)
388 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC
, &abstime
);
389 sched_one_second_interval
= abstime
;
391 SCHED(timebase_init
)();
392 sched_realtime_timebase_init();
395 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
398 sched_timeshare_init(void)
401 * Calculate the timeslicing quantum
404 if (default_preemption_rate
< 1)
405 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
406 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
408 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
410 if (default_bg_preemption_rate
< 1)
411 default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
412 bg_quantum_us
= (1000 * 1000) / default_bg_preemption_rate
;
414 printf("standard background quantum is %d us\n", bg_quantum_us
);
422 sched_timeshare_timebase_init(void)
427 /* standard timeslicing quantum */
428 clock_interval_to_absolutetime_interval(
429 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
430 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
431 std_quantum
= (uint32_t)abstime
;
433 /* smallest remaining quantum (250 us) */
434 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
435 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
436 min_std_quantum
= (uint32_t)abstime
;
438 /* quantum for background tasks */
439 clock_interval_to_absolutetime_interval(
440 bg_quantum_us
, NSEC_PER_USEC
, &abstime
);
441 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
442 bg_quantum
= (uint32_t)abstime
;
444 /* scheduler tick interval */
445 clock_interval_to_absolutetime_interval(USEC_PER_SEC
>> SCHED_TICK_SHIFT
,
446 NSEC_PER_USEC
, &abstime
);
447 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
448 sched_tick_interval
= (uint32_t)abstime
;
451 * Compute conversion factor from usage to
452 * timesharing priorities with 5/8 ** n aging.
454 abstime
= (abstime
* 5) / 3;
455 for (shift
= 0; abstime
> BASEPRI_DEFAULT
; ++shift
)
457 sched_fixed_shift
= shift
;
459 for (uint32_t i
= 0 ; i
< TH_BUCKET_MAX
; i
++)
460 sched_pri_shifts
[i
] = INT8_MAX
;
462 max_unsafe_computation
= ((uint64_t)max_unsafe_quanta
) * std_quantum
;
463 sched_safe_duration
= 2 * ((uint64_t)max_unsafe_quanta
) * std_quantum
;
465 max_poll_computation
= ((uint64_t)max_poll_quanta
) * std_quantum
;
466 thread_depress_time
= 1 * std_quantum
;
467 default_timeshare_computation
= std_quantum
/ 2;
468 default_timeshare_constraint
= std_quantum
;
472 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
475 sched_realtime_init(void)
480 queue_init(&rt_runq
.queue
);
484 sched_realtime_timebase_init(void)
488 /* smallest rt computaton (50 us) */
489 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
490 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
491 min_rt_quantum
= (uint32_t)abstime
;
493 /* maximum rt computation (50 ms) */
494 clock_interval_to_absolutetime_interval(
495 50, 1000*NSEC_PER_USEC
, &abstime
);
496 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
497 max_rt_quantum
= (uint32_t)abstime
;
501 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
504 * Set up values for timeshare
508 load_shift_init(void)
510 int8_t k
, *p
= sched_load_shifts
;
513 uint32_t sched_decay_penalty
= 1;
515 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty
, sizeof (sched_decay_penalty
))) {
516 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty
);
519 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor
, sizeof (sched_decay_usage_age_factor
))) {
520 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor
);
523 if (sched_decay_penalty
== 0) {
525 * There is no penalty for timeshare threads for using too much
526 * CPU, so set all load shifts to INT8_MIN. Even under high load,
527 * sched_pri_shift will be >INT8_MAX, and there will be no
528 * penalty applied to threads (nor will sched_usage be updated per
531 for (i
= 0; i
< NRQS
; i
++) {
532 sched_load_shifts
[i
] = INT8_MIN
;
538 *p
++ = INT8_MIN
; *p
++ = 0;
541 * For a given system load "i", the per-thread priority
542 * penalty per quantum of CPU usage is ~2^k priority
543 * levels. "sched_decay_penalty" can cause more
544 * array entries to be filled with smaller "k" values
546 for (i
= 2, j
= 1 << sched_decay_penalty
, k
= 1; i
< NRQS
; ++k
) {
547 for (j
<<= 1; (i
< j
) && (i
< NRQS
); ++i
)
553 preempt_pri_init(void)
555 bitmap_t
*p
= sched_preempt_pri
;
557 for (int i
= BASEPRI_FOREGROUND
; i
< MINPRI_KERNEL
; ++i
)
560 for (int i
= BASEPRI_PREEMPT
; i
<= MAXPRI
; ++i
)
564 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
567 * Thread wait timer expiration.
574 thread_t thread
= p0
;
577 assert_thread_magic(thread
);
581 if (--thread
->wait_timer_active
== 0) {
582 if (thread
->wait_timer_is_set
) {
583 thread
->wait_timer_is_set
= FALSE
;
584 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
587 thread_unlock(thread
);
594 * Unblock thread on wake up.
596 * Returns TRUE if the thread should now be placed on the runqueue.
598 * Thread must be locked.
600 * Called at splsched().
605 wait_result_t wresult
)
607 boolean_t ready_for_runq
= FALSE
;
608 thread_t cthread
= current_thread();
609 uint32_t new_run_count
;
614 thread
->wait_result
= wresult
;
617 * Cancel pending wait timer.
619 if (thread
->wait_timer_is_set
) {
620 if (timer_call_cancel(&thread
->wait_timer
))
621 thread
->wait_timer_active
--;
622 thread
->wait_timer_is_set
= FALSE
;
626 * Update scheduling state: not waiting,
629 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
631 if (!(thread
->state
& TH_RUN
)) {
632 thread
->state
|= TH_RUN
;
633 thread
->last_made_runnable_time
= mach_approximate_time();
635 ready_for_runq
= TRUE
;
637 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
639 /* Update the runnable thread count */
640 new_run_count
= sched_run_incr(thread
);
643 * Either the thread is idling in place on another processor,
644 * or it hasn't finished context switching yet.
646 #if CONFIG_SCHED_IDLE_IN_PLACE
647 if (thread
->state
& TH_IDLE
) {
648 processor_t processor
= thread
->last_processor
;
650 if (processor
!= current_processor())
651 machine_signal_idle(processor
);
654 assert((thread
->state
& TH_IDLE
) == 0);
657 * The run count is only dropped after the context switch completes
658 * and the thread is still waiting, so we should not run_incr here
660 new_run_count
= sched_run_buckets
[TH_BUCKET_RUN
];
665 * Calculate deadline for real-time threads.
667 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
670 ctime
= mach_absolute_time();
671 thread
->realtime
.deadline
= thread
->realtime
.constraint
+ ctime
;
675 * Clear old quantum, fail-safe computation, etc.
677 thread
->quantum_remaining
= 0;
678 thread
->computation_metered
= 0;
679 thread
->reason
= AST_NONE
;
681 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
682 * We also account for "double hop" thread signaling via
683 * the thread callout infrastructure.
684 * DRK: consider removing the callout wakeup counters in the future
685 * they're present for verification at the moment.
687 boolean_t aticontext
, pidle
;
688 ml_get_power_state(&aticontext
, &pidle
);
690 if (__improbable(aticontext
&& !(thread_get_tag_internal(thread
) & THREAD_TAG_CALLOUT
))) {
691 ledger_credit(thread
->t_ledger
, task_ledgers
.interrupt_wakeups
, 1);
692 DTRACE_SCHED2(iwakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
694 uint64_t ttd
= PROCESSOR_DATA(current_processor(), timer_call_ttd
);
697 if (ttd
<= timer_deadline_tracking_bin_1
)
698 thread
->thread_timer_wakeups_bin_1
++;
700 if (ttd
<= timer_deadline_tracking_bin_2
)
701 thread
->thread_timer_wakeups_bin_2
++;
705 ledger_credit(thread
->t_ledger
, task_ledgers
.platform_idle_wakeups
, 1);
708 } else if (thread_get_tag_internal(cthread
) & THREAD_TAG_CALLOUT
) {
709 if (cthread
->callout_woken_from_icontext
) {
710 ledger_credit(thread
->t_ledger
, task_ledgers
.interrupt_wakeups
, 1);
711 thread
->thread_callout_interrupt_wakeups
++;
712 if (cthread
->callout_woken_from_platform_idle
) {
713 ledger_credit(thread
->t_ledger
, task_ledgers
.platform_idle_wakeups
, 1);
714 thread
->thread_callout_platform_idle_wakeups
++;
717 cthread
->callout_woke_thread
= TRUE
;
721 if (thread_get_tag_internal(thread
) & THREAD_TAG_CALLOUT
) {
722 thread
->callout_woken_from_icontext
= aticontext
;
723 thread
->callout_woken_from_platform_idle
= pidle
;
724 thread
->callout_woke_thread
= FALSE
;
727 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
728 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
729 (uintptr_t)thread_tid(thread
), thread
->sched_pri
, thread
->wait_result
,
730 sched_run_buckets
[TH_BUCKET_RUN
], 0);
732 DTRACE_SCHED2(wakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
734 return (ready_for_runq
);
740 * Unblock and dispatch thread.
742 * thread lock held, IPC locks may be held.
743 * thread must have been pulled from wait queue under same lock hold.
744 * thread must have been waiting
746 * KERN_SUCCESS - Thread was set running
748 * TODO: This should return void
753 wait_result_t wresult
)
755 assert_thread_magic(thread
);
757 assert(thread
->at_safe_point
== FALSE
);
758 assert(thread
->wait_event
== NO_EVENT64
);
759 assert(thread
->waitq
== NULL
);
761 assert(!(thread
->state
& (TH_TERMINATE
|TH_TERMINATE2
)));
762 assert(thread
->state
& TH_WAIT
);
765 if (thread_unblock(thread
, wresult
)) {
766 #if SCHED_TRACE_THREAD_WAKEUPS
767 backtrace(&thread
->thread_wakeup_bt
[0],
768 (sizeof(thread
->thread_wakeup_bt
)/sizeof(uintptr_t)));
770 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
773 return (KERN_SUCCESS
);
777 * Routine: thread_mark_wait_locked
779 * Mark a thread as waiting. If, given the circumstances,
780 * it doesn't want to wait (i.e. already aborted), then
781 * indicate that in the return value.
783 * at splsched() and thread is locked.
787 thread_mark_wait_locked(
789 wait_interrupt_t interruptible
)
791 boolean_t at_safe_point
;
793 assert(!(thread
->state
& (TH_WAIT
|TH_IDLE
|TH_UNINT
|TH_TERMINATE2
)));
796 * The thread may have certain types of interrupts/aborts masked
797 * off. Even if the wait location says these types of interrupts
798 * are OK, we have to honor mask settings (outer-scoped code may
799 * not be able to handle aborts at the moment).
801 if (interruptible
> (thread
->options
& TH_OPT_INTMASK
))
802 interruptible
= thread
->options
& TH_OPT_INTMASK
;
804 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
806 if ( interruptible
== THREAD_UNINT
||
807 !(thread
->sched_flags
& TH_SFLAG_ABORT
) ||
809 (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
))) {
811 if ( !(thread
->state
& TH_TERMINATE
))
814 thread
->state
|= (interruptible
) ? TH_WAIT
: (TH_WAIT
| TH_UNINT
);
815 thread
->at_safe_point
= at_safe_point
;
816 return (thread
->wait_result
= THREAD_WAITING
);
819 if (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
)
820 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
822 return (thread
->wait_result
= THREAD_INTERRUPTED
);
826 * Routine: thread_interrupt_level
828 * Set the maximum interruptible state for the
829 * current thread. The effective value of any
830 * interruptible flag passed into assert_wait
831 * will never exceed this.
833 * Useful for code that must not be interrupted,
834 * but which calls code that doesn't know that.
836 * The old interrupt level for the thread.
840 thread_interrupt_level(
841 wait_interrupt_t new_level
)
843 thread_t thread
= current_thread();
844 wait_interrupt_t result
= thread
->options
& TH_OPT_INTMASK
;
846 thread
->options
= (thread
->options
& ~TH_OPT_INTMASK
) | (new_level
& TH_OPT_INTMASK
);
852 * Check to see if an assert wait is possible, without actually doing one.
853 * This is used by debug code in locks and elsewhere to verify that it is
854 * always OK to block when trying to take a blocking lock (since waiting
855 * for the actual assert_wait to catch the case may make it hard to detect
859 assert_wait_possible(void)
865 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
868 thread
= current_thread();
870 return (thread
== NULL
|| waitq_wait_possible(thread
));
876 * Assert that the current thread is about to go to
877 * sleep until the specified event occurs.
882 wait_interrupt_t interruptible
)
884 if (__improbable(event
== NO_EVENT
))
885 panic("%s() called with NO_EVENT", __func__
);
887 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
888 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
889 VM_KERNEL_UNSLIDE_OR_PERM(event
), 0, 0, 0, 0);
892 waitq
= global_eventq(event
);
893 return waitq_assert_wait64(waitq
, CAST_EVENT64_T(event
), interruptible
, TIMEOUT_WAIT_FOREVER
);
899 * Return the global waitq for the specified event
905 return global_eventq(event
);
911 wait_interrupt_t interruptible
,
913 uint32_t scale_factor
)
915 thread_t thread
= current_thread();
916 wait_result_t wresult
;
920 if (__improbable(event
== NO_EVENT
))
921 panic("%s() called with NO_EVENT", __func__
);
924 waitq
= global_eventq(event
);
929 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
931 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
932 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
933 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
935 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
937 TIMEOUT_URGENCY_SYS_NORMAL
,
938 deadline
, TIMEOUT_NO_LEEWAY
,
947 assert_wait_timeout_with_leeway(
949 wait_interrupt_t interruptible
,
950 wait_timeout_urgency_t urgency
,
953 uint32_t scale_factor
)
955 thread_t thread
= current_thread();
956 wait_result_t wresult
;
963 if (__improbable(event
== NO_EVENT
))
964 panic("%s() called with NO_EVENT", __func__
);
966 now
= mach_absolute_time();
967 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
968 deadline
= now
+ abstime
;
970 clock_interval_to_absolutetime_interval(leeway
, scale_factor
, &slop
);
973 waitq
= global_eventq(event
);
978 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
979 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
980 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
982 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
984 urgency
, deadline
, slop
,
993 assert_wait_deadline(
995 wait_interrupt_t interruptible
,
998 thread_t thread
= current_thread();
999 wait_result_t wresult
;
1002 if (__improbable(event
== NO_EVENT
))
1003 panic("%s() called with NO_EVENT", __func__
);
1005 struct waitq
*waitq
;
1006 waitq
= global_eventq(event
);
1011 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1012 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
1013 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1015 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1017 TIMEOUT_URGENCY_SYS_NORMAL
, deadline
,
1018 TIMEOUT_NO_LEEWAY
, thread
);
1019 waitq_unlock(waitq
);
1025 assert_wait_deadline_with_leeway(
1027 wait_interrupt_t interruptible
,
1028 wait_timeout_urgency_t urgency
,
1032 thread_t thread
= current_thread();
1033 wait_result_t wresult
;
1036 if (__improbable(event
== NO_EVENT
))
1037 panic("%s() called with NO_EVENT", __func__
);
1039 struct waitq
*waitq
;
1040 waitq
= global_eventq(event
);
1045 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1046 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
1047 VM_KERNEL_UNSLIDE_OR_PERM(event
), interruptible
, deadline
, 0, 0);
1049 wresult
= waitq_assert_wait64_locked(waitq
, CAST_EVENT64_T(event
),
1051 urgency
, deadline
, leeway
,
1053 waitq_unlock(waitq
);
1061 * Return TRUE if a thread is running on a processor such that an AST
1062 * is needed to pull it out of userspace execution, or if executing in
1063 * the kernel, bring to a context switch boundary that would cause
1064 * thread state to be serialized in the thread PCB.
1066 * Thread locked, returns the same way. While locked, fields
1067 * like "state" cannot change. "runq" can change only from set to unset.
1069 static inline boolean_t
1070 thread_isoncpu(thread_t thread
)
1072 /* Not running or runnable */
1073 if (!(thread
->state
& TH_RUN
))
1076 /* Waiting on a runqueue, not currently running */
1077 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1078 if (thread
->runq
!= PROCESSOR_NULL
)
1082 * Thread does not have a stack yet
1083 * It could be on the stack alloc queue or preparing to be invoked
1085 if (!thread
->kernel_stack
)
1089 * Thread must be running on a processor, or
1090 * about to run, or just did run. In all these
1091 * cases, an AST to the processor is needed
1092 * to guarantee that the thread is kicked out
1093 * of userspace and the processor has
1094 * context switched (and saved register state).
1102 * Force a preemption point for a thread and wait
1103 * for it to stop running on a CPU. If a stronger
1104 * guarantee is requested, wait until no longer
1105 * runnable. Arbitrates access among
1106 * multiple stop requests. (released by unstop)
1108 * The thread must enter a wait state and stop via a
1111 * Returns FALSE if interrupted.
1116 boolean_t until_not_runnable
)
1118 wait_result_t wresult
;
1119 spl_t s
= splsched();
1123 thread_lock(thread
);
1125 while (thread
->state
& TH_SUSP
) {
1126 thread
->wake_active
= TRUE
;
1127 thread_unlock(thread
);
1129 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1130 wake_unlock(thread
);
1133 if (wresult
== THREAD_WAITING
)
1134 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1136 if (wresult
!= THREAD_AWAKENED
)
1141 thread_lock(thread
);
1144 thread
->state
|= TH_SUSP
;
1146 while ((oncpu
= thread_isoncpu(thread
)) ||
1147 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1148 processor_t processor
;
1151 assert(thread
->state
& TH_RUN
);
1152 processor
= thread
->chosen_processor
;
1153 cause_ast_check(processor
);
1156 thread
->wake_active
= TRUE
;
1157 thread_unlock(thread
);
1159 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1160 wake_unlock(thread
);
1163 if (wresult
== THREAD_WAITING
)
1164 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1166 if (wresult
!= THREAD_AWAKENED
) {
1167 thread_unstop(thread
);
1173 thread_lock(thread
);
1176 thread_unlock(thread
);
1177 wake_unlock(thread
);
1181 * We return with the thread unlocked. To prevent it from
1182 * transitioning to a runnable state (or from TH_RUN to
1183 * being on the CPU), the caller must ensure the thread
1184 * is stopped via an external means (such as an AST)
1193 * Release a previous stop request and set
1194 * the thread running if appropriate.
1196 * Use only after a successful stop operation.
1202 spl_t s
= splsched();
1205 thread_lock(thread
);
1207 assert((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
)) != TH_SUSP
);
1209 if (thread
->state
& TH_SUSP
) {
1210 thread
->state
&= ~TH_SUSP
;
1212 if (thread
->wake_active
) {
1213 thread
->wake_active
= FALSE
;
1214 thread_unlock(thread
);
1216 thread_wakeup(&thread
->wake_active
);
1217 wake_unlock(thread
);
1224 thread_unlock(thread
);
1225 wake_unlock(thread
);
1232 * Wait for a thread to stop running. (non-interruptible)
1238 boolean_t until_not_runnable
)
1240 wait_result_t wresult
;
1242 processor_t processor
;
1243 spl_t s
= splsched();
1246 thread_lock(thread
);
1249 * Wait until not running on a CPU. If stronger requirement
1250 * desired, wait until not runnable. Assumption: if thread is
1251 * on CPU, then TH_RUN is set, so we're not waiting in any case
1252 * where the original, pure "TH_RUN" check would have let us
1255 while ((oncpu
= thread_isoncpu(thread
)) ||
1256 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1259 assert(thread
->state
& TH_RUN
);
1260 processor
= thread
->chosen_processor
;
1261 cause_ast_check(processor
);
1264 thread
->wake_active
= TRUE
;
1265 thread_unlock(thread
);
1267 wresult
= assert_wait(&thread
->wake_active
, THREAD_UNINT
);
1268 wake_unlock(thread
);
1271 if (wresult
== THREAD_WAITING
)
1272 thread_block(THREAD_CONTINUE_NULL
);
1276 thread_lock(thread
);
1279 thread_unlock(thread
);
1280 wake_unlock(thread
);
1285 * Routine: clear_wait_internal
1287 * Clear the wait condition for the specified thread.
1288 * Start the thread executing if that is appropriate.
1290 * thread thread to awaken
1291 * result Wakeup result the thread should see
1294 * the thread is locked.
1296 * KERN_SUCCESS thread was rousted out a wait
1297 * KERN_FAILURE thread was waiting but could not be rousted
1298 * KERN_NOT_WAITING thread was not waiting
1300 __private_extern__ kern_return_t
1301 clear_wait_internal(
1303 wait_result_t wresult
)
1305 uint32_t i
= LockTimeOutUsec
;
1306 struct waitq
*waitq
= thread
->waitq
;
1309 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
))
1310 return (KERN_FAILURE
);
1312 if (waitq
!= NULL
) {
1313 if (!waitq_pull_thread_locked(waitq
, thread
)) {
1314 thread_unlock(thread
);
1316 if (i
> 0 && !machine_timeout_suspended())
1318 thread_lock(thread
);
1319 if (waitq
!= thread
->waitq
)
1320 return KERN_NOT_WAITING
;
1325 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1326 if ((thread
->state
& (TH_WAIT
|TH_TERMINATE
)) == TH_WAIT
)
1327 return (thread_go(thread
, wresult
));
1329 return (KERN_NOT_WAITING
);
1332 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1333 thread
, waitq
, cpu_number());
1335 return (KERN_FAILURE
);
1342 * Clear the wait condition for the specified thread. Start the thread
1343 * executing if that is appropriate.
1346 * thread thread to awaken
1347 * result Wakeup result the thread should see
1352 wait_result_t result
)
1358 thread_lock(thread
);
1359 ret
= clear_wait_internal(thread
, result
);
1360 thread_unlock(thread
);
1367 * thread_wakeup_prim:
1369 * Common routine for thread_wakeup, thread_wakeup_with_result,
1370 * and thread_wakeup_one.
1376 boolean_t one_thread
,
1377 wait_result_t result
)
1379 if (__improbable(event
== NO_EVENT
))
1380 panic("%s() called with NO_EVENT", __func__
);
1382 struct waitq
*wq
= global_eventq(event
);
1385 return waitq_wakeup64_one(wq
, CAST_EVENT64_T(event
), result
, WAITQ_ALL_PRIORITIES
);
1387 return waitq_wakeup64_all(wq
, CAST_EVENT64_T(event
), result
, WAITQ_ALL_PRIORITIES
);
1391 * Wakeup a specified thread if and only if it's waiting for this event
1394 thread_wakeup_thread(
1398 if (__improbable(event
== NO_EVENT
))
1399 panic("%s() called with NO_EVENT", __func__
);
1401 struct waitq
*wq
= global_eventq(event
);
1403 return waitq_wakeup64_thread(wq
, CAST_EVENT64_T(event
), thread
, THREAD_AWAKENED
);
1407 * Wakeup a thread waiting on an event and promote it to a priority.
1409 * Requires woken thread to un-promote itself when done.
1412 thread_wakeup_one_with_pri(
1416 if (__improbable(event
== NO_EVENT
))
1417 panic("%s() called with NO_EVENT", __func__
);
1419 struct waitq
*wq
= global_eventq(event
);
1421 return waitq_wakeup64_one(wq
, CAST_EVENT64_T(event
), THREAD_AWAKENED
, priority
);
1425 * Wakeup a thread waiting on an event,
1426 * promote it to a priority,
1427 * and return a reference to the woken thread.
1429 * Requires woken thread to un-promote itself when done.
1432 thread_wakeup_identify(event_t event
,
1435 if (__improbable(event
== NO_EVENT
))
1436 panic("%s() called with NO_EVENT", __func__
);
1438 struct waitq
*wq
= global_eventq(event
);
1440 return waitq_wakeup64_identify(wq
, CAST_EVENT64_T(event
), THREAD_AWAKENED
, priority
);
1446 * Force the current thread to execute on the specified processor.
1447 * Takes effect after the next thread_block().
1449 * Returns the previous binding. PROCESSOR_NULL means
1452 * XXX - DO NOT export this to users - XXX
1456 processor_t processor
)
1458 thread_t self
= current_thread();
1465 prev
= thread_bind_internal(self
, processor
);
1467 thread_unlock(self
);
1474 * thread_bind_internal:
1476 * If the specified thread is not the current thread, and it is currently
1477 * running on another CPU, a remote AST must be sent to that CPU to cause
1478 * the thread to migrate to its bound processor. Otherwise, the migration
1479 * will occur at the next quantum expiration or blocking point.
1481 * When the thread is the current thread, and explicit thread_block() should
1482 * be used to force the current processor to context switch away and
1483 * let the thread migrate to the bound processor.
1485 * Thread must be locked, and at splsched.
1489 thread_bind_internal(
1491 processor_t processor
)
1495 /* <rdar://problem/15102234> */
1496 assert(thread
->sched_pri
< BASEPRI_RTQUEUES
);
1497 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1498 assert(thread
->runq
== PROCESSOR_NULL
);
1500 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_THREAD_BIND
), thread_tid(thread
), processor
? (uintptr_t)processor
->cpu_id
: (uintptr_t)-1, 0, 0, 0);
1502 prev
= thread
->bound_processor
;
1503 thread
->bound_processor
= processor
;
1509 * thread_vm_bind_group_add:
1511 * The "VM bind group" is a special mechanism to mark a collection
1512 * of threads from the VM subsystem that, in general, should be scheduled
1513 * with only one CPU of parallelism. To accomplish this, we initially
1514 * bind all the threads to the master processor, which has the effect
1515 * that only one of the threads in the group can execute at once, including
1516 * preempting threads in the group that are a lower priority. Future
1517 * mechanisms may use more dynamic mechanisms to prevent the collection
1518 * of VM threads from using more CPU time than desired.
1520 * The current implementation can result in priority inversions where
1521 * compute-bound priority 95 or realtime threads that happen to have
1522 * landed on the master processor prevent the VM threads from running.
1523 * When this situation is detected, we unbind the threads for one
1524 * scheduler tick to allow the scheduler to run the threads an
1525 * additional CPUs, before restoring the binding (assuming high latency
1526 * is no longer a problem).
1530 * The current max is provisioned for:
1531 * vm_compressor_swap_trigger_thread (92)
1532 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1533 * vm_pageout_continue (92)
1534 * memorystatus_thread (95)
1536 #define MAX_VM_BIND_GROUP_COUNT (5)
1537 decl_simple_lock_data(static,sched_vm_group_list_lock
);
1538 static thread_t sched_vm_group_thread_list
[MAX_VM_BIND_GROUP_COUNT
];
1539 static int sched_vm_group_thread_count
;
1540 static boolean_t sched_vm_group_temporarily_unbound
= FALSE
;
1543 thread_vm_bind_group_add(void)
1545 thread_t self
= current_thread();
1547 thread_reference_internal(self
);
1548 self
->options
|= TH_OPT_SCHED_VM_GROUP
;
1550 simple_lock(&sched_vm_group_list_lock
);
1551 assert(sched_vm_group_thread_count
< MAX_VM_BIND_GROUP_COUNT
);
1552 sched_vm_group_thread_list
[sched_vm_group_thread_count
++] = self
;
1553 simple_unlock(&sched_vm_group_list_lock
);
1555 thread_bind(master_processor
);
1557 /* Switch to bound processor if not already there */
1558 thread_block(THREAD_CONTINUE_NULL
);
1562 sched_vm_group_maintenance(void)
1564 uint64_t ctime
= mach_absolute_time();
1565 uint64_t longtime
= ctime
- sched_tick_interval
;
1568 boolean_t high_latency_observed
= FALSE
;
1569 boolean_t runnable_and_not_on_runq_observed
= FALSE
;
1570 boolean_t bind_target_changed
= FALSE
;
1571 processor_t bind_target
= PROCESSOR_NULL
;
1573 /* Make sure nobody attempts to add new threads while we are enumerating them */
1574 simple_lock(&sched_vm_group_list_lock
);
1578 for (i
=0; i
< sched_vm_group_thread_count
; i
++) {
1579 thread_t thread
= sched_vm_group_thread_list
[i
];
1580 assert(thread
!= THREAD_NULL
);
1581 thread_lock(thread
);
1582 if ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
) {
1583 if (thread
->runq
!= PROCESSOR_NULL
&& thread
->last_made_runnable_time
< longtime
) {
1584 high_latency_observed
= TRUE
;
1585 } else if (thread
->runq
== PROCESSOR_NULL
) {
1586 /* There are some cases where a thread be transitiong that also fall into this case */
1587 runnable_and_not_on_runq_observed
= TRUE
;
1590 thread_unlock(thread
);
1592 if (high_latency_observed
&& runnable_and_not_on_runq_observed
) {
1593 /* All the things we are looking for are true, stop looking */
1600 if (sched_vm_group_temporarily_unbound
) {
1601 /* If we turned off binding, make sure everything is OK before rebinding */
1602 if (!high_latency_observed
) {
1604 bind_target_changed
= TRUE
;
1605 bind_target
= master_processor
;
1606 sched_vm_group_temporarily_unbound
= FALSE
; /* might be reset to TRUE if change cannot be completed */
1610 * Check if we're in a bad state, which is defined by high
1611 * latency with no core currently executing a thread. If a
1612 * single thread is making progress on a CPU, that means the
1613 * binding concept to reduce parallelism is working as
1616 if (high_latency_observed
&& !runnable_and_not_on_runq_observed
) {
1618 bind_target_changed
= TRUE
;
1619 bind_target
= PROCESSOR_NULL
;
1620 sched_vm_group_temporarily_unbound
= TRUE
;
1624 if (bind_target_changed
) {
1626 for (i
=0; i
< sched_vm_group_thread_count
; i
++) {
1627 thread_t thread
= sched_vm_group_thread_list
[i
];
1629 assert(thread
!= THREAD_NULL
);
1631 thread_lock(thread
);
1632 removed
= thread_run_queue_remove(thread
);
1633 if (removed
|| ((thread
->state
& (TH_RUN
| TH_WAIT
)) == TH_WAIT
)) {
1634 thread_bind_internal(thread
, bind_target
);
1637 * Thread was in the middle of being context-switched-to,
1638 * or was in the process of blocking. To avoid switching the bind
1639 * state out mid-flight, defer the change if possible.
1641 if (bind_target
== PROCESSOR_NULL
) {
1642 thread_bind_internal(thread
, bind_target
);
1644 sched_vm_group_temporarily_unbound
= TRUE
; /* next pass will try again */
1649 thread_run_queue_reinsert(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1651 thread_unlock(thread
);
1656 simple_unlock(&sched_vm_group_list_lock
);
1659 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1660 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1661 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1662 * IPI thrash if this core does not remain idle following the load balancing ASTs
1663 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1664 * followed by a wakeup shortly thereafter.
1667 #if (DEVELOPMENT || DEBUG)
1668 int sched_smt_balance
= 1;
1672 /* Invoked with pset locked, returns with pset unlocked */
1674 sched_SMT_balance(processor_t cprocessor
, processor_set_t cpset
) {
1675 processor_t ast_processor
= NULL
;
1677 #if (DEVELOPMENT || DEBUG)
1678 if (__improbable(sched_smt_balance
== 0))
1679 goto smt_balance_exit
;
1682 assert(cprocessor
== current_processor());
1683 if (cprocessor
->is_SMT
== FALSE
)
1684 goto smt_balance_exit
;
1686 processor_t sib_processor
= cprocessor
->processor_secondary
? cprocessor
->processor_secondary
: cprocessor
->processor_primary
;
1688 /* Determine if both this processor and its sibling are idle,
1689 * indicating an SMT rebalancing opportunity.
1691 if (sib_processor
->state
!= PROCESSOR_IDLE
)
1692 goto smt_balance_exit
;
1694 processor_t sprocessor
;
1696 qe_foreach_element(sprocessor
, &cpset
->active_queue
, processor_queue
) {
1697 if ((sprocessor
->state
== PROCESSOR_RUNNING
) &&
1698 (sprocessor
->processor_primary
!= sprocessor
) &&
1699 (sprocessor
->processor_primary
->state
== PROCESSOR_RUNNING
) &&
1700 (sprocessor
->current_pri
< BASEPRI_RTQUEUES
) &&
1701 ((cpset
->pending_AST_cpu_mask
& (1ULL << sprocessor
->cpu_id
)) == 0)) {
1702 assert(sprocessor
!= cprocessor
);
1703 ast_processor
= sprocessor
;
1711 if (ast_processor
) {
1712 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_SMT_BALANCE
), ast_processor
->cpu_id
, ast_processor
->state
, ast_processor
->processor_primary
->state
, 0, 0);
1713 cause_ast_check(ast_processor
);
1716 #endif /* __SMP__ */
1721 * Select a new thread for the current processor to execute.
1723 * May select the current thread, which must be locked.
1728 processor_t processor
,
1731 processor_set_t pset
= processor
->processor_set
;
1732 thread_t new_thread
= THREAD_NULL
;
1734 assert(processor
== current_processor());
1735 assert((thread
->state
& (TH_RUN
|TH_TERMINATE2
)) == TH_RUN
);
1739 * Update the priority.
1741 if (SCHED(can_update_priority
)(thread
))
1742 SCHED(update_priority
)(thread
);
1744 processor
->current_pri
= thread
->sched_pri
;
1745 processor
->current_thmode
= thread
->sched_mode
;
1746 processor
->current_sfi_class
= thread
->sfi_class
;
1750 assert(processor
->state
!= PROCESSOR_OFF_LINE
);
1752 if (!processor
->is_recommended
) {
1754 * The performance controller has provided a hint to not dispatch more threads,
1755 * unless they are bound to us (and thus we are the only option
1757 if (!SCHED(processor_bound_count
)(processor
)) {
1760 } else if (processor
->processor_primary
!= processor
) {
1762 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1763 * we should look for work only under the same conditions that choose_processor()
1764 * would have assigned work, which is when all primary processors have been assigned work.
1766 * An exception is that bound threads are dispatched to a processor without going through
1767 * choose_processor(), so in those cases we should continue trying to dequeue work.
1769 if (!SCHED(processor_bound_count
)(processor
) && !queue_empty(&pset
->idle_queue
) && !rt_runq
.count
) {
1777 * Test to see if the current thread should continue
1778 * to run on this processor. Must not be attempting to wait, and not
1779 * bound to a different processor, nor be in the wrong
1780 * processor set, nor be forced to context switch by TH_SUSP.
1782 * Note that there are never any RT threads in the regular runqueue.
1784 * This code is very insanely tricky.
1787 if (((thread
->state
& (TH_TERMINATE
|TH_IDLE
|TH_WAIT
|TH_RUN
|TH_SUSP
)) == TH_RUN
) &&
1788 (thread
->sched_pri
>= BASEPRI_RTQUEUES
|| processor
->processor_primary
== processor
) &&
1789 (thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== processor
) &&
1790 (thread
->affinity_set
== AFFINITY_SET_NULL
|| thread
->affinity_set
->aset_pset
== pset
)) {
1792 * RT threads with un-expired quantum stay on processor,
1793 * unless there's a valid RT thread with an earlier deadline.
1795 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
&& processor
->first_timeslice
) {
1796 if (rt_runq
.count
> 0) {
1797 thread_t next_rt
= qe_queue_first(&rt_runq
.queue
, struct thread
, runq_links
);
1799 assert(next_rt
->runq
== THREAD_ON_RT_RUNQ
);
1801 if (next_rt
->realtime
.deadline
< processor
->deadline
&&
1802 (next_rt
->bound_processor
== PROCESSOR_NULL
||
1803 next_rt
->bound_processor
== processor
)) {
1804 /* The next RT thread is better, so pick it off the runqueue. */
1805 goto pick_new_rt_thread
;
1809 /* This is still the best RT thread to run. */
1810 processor
->deadline
= thread
->realtime
.deadline
;
1818 if ((rt_runq
.count
== 0) &&
1819 SCHED(processor_queue_has_priority
)(processor
, thread
->sched_pri
, TRUE
) == FALSE
) {
1820 /* This thread is still the highest priority runnable (non-idle) thread */
1821 processor
->deadline
= UINT64_MAX
;
1830 /* OK, so we're not going to run the current thread. Look at the RT queue. */
1831 if (rt_runq
.count
> 0) {
1832 thread_t next_rt
= qe_queue_first(&rt_runq
.queue
, struct thread
, runq_links
);
1834 assert(next_rt
->runq
== THREAD_ON_RT_RUNQ
);
1836 if (__probable((next_rt
->bound_processor
== PROCESSOR_NULL
||
1837 (next_rt
->bound_processor
== processor
)))) {
1839 new_thread
= qe_dequeue_head(&rt_runq
.queue
, struct thread
, runq_links
);
1841 new_thread
->runq
= PROCESSOR_NULL
;
1842 SCHED_STATS_RUNQ_CHANGE(&rt_runq
.runq_stats
, rt_runq
.count
);
1845 processor
->deadline
= new_thread
->realtime
.deadline
;
1850 return (new_thread
);
1854 processor
->deadline
= UINT64_MAX
;
1857 /* No RT threads, so let's look at the regular threads. */
1858 if ((new_thread
= SCHED(choose_thread
)(processor
, MINPRI
, reason
)) != THREAD_NULL
) {
1860 return (new_thread
);
1864 if (SCHED(steal_thread_enabled
)) {
1866 * No runnable threads, attempt to steal
1867 * from other processors. Returns with pset lock dropped.
1870 if ((new_thread
= SCHED(steal_thread
)(pset
)) != THREAD_NULL
) {
1871 return (new_thread
);
1875 * If other threads have appeared, shortcut
1878 if (!SCHED(processor_queue_empty
)(processor
) || rt_runq
.count
> 0)
1887 * Nothing is runnable, so set this processor idle if it
1890 if (processor
->state
== PROCESSOR_RUNNING
) {
1891 processor
->state
= PROCESSOR_IDLE
;
1893 if (processor
->processor_primary
== processor
) {
1894 re_queue_head(&pset
->idle_queue
, &processor
->processor_queue
);
1896 re_queue_head(&pset
->idle_secondary_queue
, &processor
->processor_queue
);
1901 /* Invoked with pset locked, returns with pset unlocked */
1902 sched_SMT_balance(processor
, pset
);
1907 #if CONFIG_SCHED_IDLE_IN_PLACE
1909 * Choose idle thread if fast idle is not possible.
1911 if (processor
->processor_primary
!= processor
)
1912 return (processor
->idle_thread
);
1914 if ((thread
->state
& (TH_IDLE
|TH_TERMINATE
|TH_SUSP
)) || !(thread
->state
& TH_WAIT
) || thread
->wake_active
|| thread
->sched_pri
>= BASEPRI_RTQUEUES
)
1915 return (processor
->idle_thread
);
1918 * Perform idling activities directly without a
1919 * context switch. Return dispatched thread,
1920 * else check again for a runnable thread.
1922 new_thread
= thread_select_idle(thread
, processor
);
1924 #else /* !CONFIG_SCHED_IDLE_IN_PLACE */
1927 * Do a full context switch to idle so that the current
1928 * thread can start running on another processor without
1929 * waiting for the fast-idled processor to wake up.
1931 new_thread
= processor
->idle_thread
;
1933 #endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
1935 } while (new_thread
== THREAD_NULL
);
1937 return (new_thread
);
1940 #if CONFIG_SCHED_IDLE_IN_PLACE
1942 * thread_select_idle:
1944 * Idle the processor using the current thread context.
1946 * Called with thread locked, then dropped and relocked.
1951 processor_t processor
)
1953 thread_t new_thread
;
1954 uint64_t arg1
, arg2
;
1957 sched_run_decr(thread
);
1959 thread
->state
|= TH_IDLE
;
1960 processor
->current_pri
= IDLEPRI
;
1961 processor
->current_thmode
= TH_MODE_NONE
;
1962 processor
->current_sfi_class
= SFI_CLASS_KERNEL
;
1964 /* Reload precise timing global policy to thread-local policy */
1965 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
1967 thread_unlock(thread
);
1970 * Switch execution timing to processor idle thread.
1972 processor
->last_dispatch
= mach_absolute_time();
1974 #ifdef CONFIG_MACH_APPROXIMATE_TIME
1975 commpage_update_mach_approximate_time(processor
->last_dispatch
);
1978 thread
->last_run_time
= processor
->last_dispatch
;
1979 thread_timer_event(processor
->last_dispatch
, &processor
->idle_thread
->system_timer
);
1980 PROCESSOR_DATA(processor
, kernel_timer
) = &processor
->idle_thread
->system_timer
;
1983 * Cancel the quantum timer while idling.
1985 timer_call_cancel(&processor
->quantum_timer
);
1986 processor
->first_timeslice
= FALSE
;
1988 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
1990 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0, 0, NULL
);
1993 * Enable interrupts and perform idling activities. No
1994 * preemption due to TH_IDLE being set.
1996 spllo(); new_thread
= processor_idle(thread
, processor
);
1999 * Return at splsched.
2001 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
2003 thread_lock(thread
);
2006 * If awakened, switch to thread timer and start a new quantum.
2007 * Otherwise skip; we will context switch to another thread or return here.
2009 if (!(thread
->state
& TH_WAIT
)) {
2010 processor
->last_dispatch
= mach_absolute_time();
2011 thread_timer_event(processor
->last_dispatch
, &thread
->system_timer
);
2012 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2014 thread_quantum_init(thread
);
2015 processor
->quantum_end
= processor
->last_dispatch
+ thread
->quantum_remaining
;
2016 timer_call_enter1(&processor
->quantum_timer
, thread
, processor
->quantum_end
, TIMER_CALL_SYS_CRITICAL
| TIMER_CALL_LOCAL
);
2017 processor
->first_timeslice
= TRUE
;
2019 thread
->computation_epoch
= processor
->last_dispatch
;
2022 thread
->state
&= ~TH_IDLE
;
2024 urgency
= thread_get_urgency(thread
, &arg1
, &arg2
);
2026 thread_tell_urgency(urgency
, arg1
, arg2
, 0, new_thread
);
2028 sched_run_incr(thread
);
2030 return (new_thread
);
2032 #endif /* CONFIG_SCHED_IDLE_IN_PLACE */
2037 * Called at splsched with neither thread locked.
2039 * Perform a context switch and start executing the new thread.
2041 * Returns FALSE when the context switch didn't happen.
2042 * The reference to the new thread is still consumed.
2044 * "self" is what is currently running on the processor,
2045 * "thread" is the new thread to context switch to
2046 * (which may be the same thread in some cases)
2054 if (__improbable(get_preemption_level() != 0)) {
2055 int pl
= get_preemption_level();
2056 panic("thread_invoke: preemption_level %d, possible cause: %s",
2057 pl
, (pl
< 0 ? "unlocking an unlocked mutex or spinlock" :
2058 "blocking while holding a spinlock, or within interrupt context"));
2061 thread_continue_t continuation
= self
->continuation
;
2062 void *parameter
= self
->parameter
;
2063 processor_t processor
;
2065 uint64_t ctime
= mach_absolute_time();
2067 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2068 commpage_update_mach_approximate_time(ctime
);
2071 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2072 sched_timeshare_consider_maintenance(ctime
);
2075 assert_thread_magic(self
);
2076 assert(self
== current_thread());
2077 assert(self
->runq
== PROCESSOR_NULL
);
2078 assert((self
->state
& (TH_RUN
|TH_TERMINATE2
)) == TH_RUN
);
2080 thread_lock(thread
);
2082 assert_thread_magic(thread
);
2083 assert((thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_TERMINATE
|TH_TERMINATE2
)) == TH_RUN
);
2084 assert(thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== current_processor());
2085 assert(thread
->runq
== PROCESSOR_NULL
);
2087 /* Reload precise timing global policy to thread-local policy */
2088 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
2090 /* Update SFI class based on other factors */
2091 thread
->sfi_class
= sfi_thread_classify(thread
);
2093 /* Allow realtime threads to hang onto a stack. */
2094 if ((self
->sched_mode
== TH_MODE_REALTIME
) && !self
->reserved_stack
)
2095 self
->reserved_stack
= self
->kernel_stack
;
2097 if (continuation
!= NULL
) {
2098 if (!thread
->kernel_stack
) {
2100 * If we are using a privileged stack,
2101 * check to see whether we can exchange it with
2102 * that of the other thread.
2104 if (self
->kernel_stack
== self
->reserved_stack
&& !thread
->reserved_stack
)
2108 * Context switch by performing a stack handoff.
2110 continuation
= thread
->continuation
;
2111 parameter
= thread
->parameter
;
2113 processor
= current_processor();
2114 processor
->active_thread
= thread
;
2115 processor
->current_pri
= thread
->sched_pri
;
2116 processor
->current_thmode
= thread
->sched_mode
;
2117 processor
->current_sfi_class
= thread
->sfi_class
;
2118 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2119 if (thread
->last_processor
->processor_set
!= processor
->processor_set
)
2120 thread
->ps_switch
++;
2123 thread
->last_processor
= processor
;
2125 ast_context(thread
);
2127 thread_unlock(thread
);
2129 self
->reason
= reason
;
2131 processor
->last_dispatch
= ctime
;
2132 self
->last_run_time
= ctime
;
2133 thread_timer_event(ctime
, &thread
->system_timer
);
2134 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2137 * Since non-precise user/kernel time doesn't update the state timer
2138 * during privilege transitions, synthesize an event now.
2140 if (!thread
->precise_user_kernel_time
) {
2141 timer_switch(PROCESSOR_DATA(processor
, current_state
),
2143 PROCESSOR_DATA(processor
, current_state
));
2146 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2147 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_HANDOFF
)|DBG_FUNC_NONE
,
2148 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2150 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= PROCESSOR_NULL
)) {
2151 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
)|DBG_FUNC_NONE
,
2152 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2155 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2157 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2159 TLOG(1, "thread_invoke: calling stack_handoff\n");
2160 stack_handoff(self
, thread
);
2162 /* 'self' is now off core */
2163 assert(thread
== current_thread());
2165 DTRACE_SCHED(on__cpu
);
2168 kperf_on_cpu(thread
, continuation
, NULL
);
2171 thread_dispatch(self
, thread
);
2173 thread
->continuation
= thread
->parameter
= NULL
;
2175 counter(c_thread_invoke_hits
++);
2179 assert(continuation
);
2180 call_continuation(continuation
, parameter
, thread
->wait_result
);
2183 else if (thread
== self
) {
2184 /* same thread but with continuation */
2186 counter(++c_thread_invoke_same
);
2188 thread_unlock(self
);
2191 kperf_on_cpu(thread
, continuation
, NULL
);
2194 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2195 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2196 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2198 self
->continuation
= self
->parameter
= NULL
;
2202 call_continuation(continuation
, parameter
, self
->wait_result
);
2207 * Check that the other thread has a stack
2209 if (!thread
->kernel_stack
) {
2211 if (!stack_alloc_try(thread
)) {
2212 counter(c_thread_invoke_misses
++);
2213 thread_unlock(thread
);
2214 thread_stack_enqueue(thread
);
2217 } else if (thread
== self
) {
2219 counter(++c_thread_invoke_same
);
2220 thread_unlock(self
);
2222 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2223 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2224 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2231 * Context switch by full context save.
2233 processor
= current_processor();
2234 processor
->active_thread
= thread
;
2235 processor
->current_pri
= thread
->sched_pri
;
2236 processor
->current_thmode
= thread
->sched_mode
;
2237 processor
->current_sfi_class
= thread
->sfi_class
;
2238 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2239 if (thread
->last_processor
->processor_set
!= processor
->processor_set
)
2240 thread
->ps_switch
++;
2243 thread
->last_processor
= processor
;
2245 ast_context(thread
);
2247 thread_unlock(thread
);
2249 counter(c_thread_invoke_csw
++);
2251 self
->reason
= reason
;
2253 processor
->last_dispatch
= ctime
;
2254 self
->last_run_time
= ctime
;
2255 thread_timer_event(ctime
, &thread
->system_timer
);
2256 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2259 * Since non-precise user/kernel time doesn't update the state timer
2260 * during privilege transitions, synthesize an event now.
2262 if (!thread
->precise_user_kernel_time
) {
2263 timer_switch(PROCESSOR_DATA(processor
, current_state
),
2265 PROCESSOR_DATA(processor
, current_state
));
2268 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2269 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2270 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2272 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= NULL
)) {
2273 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
)|DBG_FUNC_NONE
,
2274 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2277 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2279 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2282 * This is where we actually switch register context,
2283 * and address space if required. We will next run
2284 * as a result of a subsequent context switch.
2286 * Once registers are switched and the processor is running "thread",
2287 * the stack variables and non-volatile registers will contain whatever
2288 * was there the last time that thread blocked. No local variables should
2289 * be used after this point, except for the special case of "thread", which
2290 * the platform layer returns as the previous thread running on the processor
2291 * via the function call ABI as a return register, and "self", which may have
2292 * been stored on the stack or a non-volatile register, but a stale idea of
2293 * what was on the CPU is newly-accurate because that thread is again
2294 * running on the CPU.
2296 assert(continuation
== self
->continuation
);
2297 thread
= machine_switch_context(self
, continuation
, thread
);
2298 assert(self
== current_thread());
2299 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self
, continuation
, thread
);
2301 DTRACE_SCHED(on__cpu
);
2304 kperf_on_cpu(self
, NULL
, __builtin_frame_address(0));
2308 * We have been resumed and are set to run.
2310 thread_dispatch(thread
, self
);
2313 self
->continuation
= self
->parameter
= NULL
;
2317 call_continuation(continuation
, parameter
, self
->wait_result
);
2324 #if defined(CONFIG_SCHED_DEFERRED_AST)
2326 * pset_cancel_deferred_dispatch:
2328 * Cancels all ASTs that we can cancel for the given processor set
2329 * if the current processor is running the last runnable thread in the
2332 * This function assumes the current thread is runnable. This must
2333 * be called with the pset unlocked.
2336 pset_cancel_deferred_dispatch(
2337 processor_set_t pset
,
2338 processor_t processor
)
2340 processor_t active_processor
= NULL
;
2341 uint32_t sampled_sched_run_count
;
2344 sampled_sched_run_count
= (volatile uint32_t) sched_run_buckets
[TH_BUCKET_RUN
];
2347 * If we have emptied the run queue, and our current thread is runnable, we
2348 * should tell any processors that are still DISPATCHING that they will
2349 * probably not have any work to do. In the event that there are no
2350 * pending signals that we can cancel, this is also uninteresting.
2352 * In the unlikely event that another thread becomes runnable while we are
2353 * doing this (sched_run_count is atomically updated, not guarded), the
2354 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2355 * in order to dispatch it to a processor in our pset. So, the other
2356 * codepath will wait while we squash all cancelable ASTs, get the pset
2357 * lock, and then dispatch the freshly runnable thread. So this should be
2358 * correct (we won't accidentally have a runnable thread that hasn't been
2359 * dispatched to an idle processor), if not ideal (we may be restarting the
2360 * dispatch process, which could have some overhead).
2363 if ((sampled_sched_run_count
== 1) &&
2364 (pset
->pending_deferred_AST_cpu_mask
)) {
2365 qe_foreach_element_safe(active_processor
, &pset
->active_queue
, processor_queue
) {
2367 * If a processor is DISPATCHING, it could be because of
2368 * a cancelable signal.
2370 * IF the processor is not our
2371 * current processor (the current processor should not
2372 * be DISPATCHING, so this is a bit paranoid), AND there
2373 * is a cancelable signal pending on the processor, AND
2374 * there is no non-cancelable signal pending (as there is
2375 * no point trying to backtrack on bringing the processor
2376 * up if a signal we cannot cancel is outstanding), THEN
2377 * it should make sense to roll back the processor state
2378 * to the IDLE state.
2380 * If the racey nature of this approach (as the signal
2381 * will be arbitrated by hardware, and can fire as we
2382 * roll back state) results in the core responding
2383 * despite being pushed back to the IDLE state, it
2384 * should be no different than if the core took some
2385 * interrupt while IDLE.
2387 if ((active_processor
->state
== PROCESSOR_DISPATCHING
) &&
2388 (pset
->pending_deferred_AST_cpu_mask
& (1ULL << active_processor
->cpu_id
)) &&
2389 (!(pset
->pending_AST_cpu_mask
& (1ULL << active_processor
->cpu_id
))) &&
2390 (active_processor
!= processor
)) {
2392 * Squash all of the processor state back to some
2393 * reasonable facsimile of PROCESSOR_IDLE.
2395 * TODO: What queue policy do we actually want here?
2396 * We want to promote selection of a good processor
2397 * to run on. Do we want to enqueue at the head?
2398 * The tail? At the (relative) old position in the
2399 * queue? Or something else entirely?
2401 re_queue_head(&pset
->idle_queue
, &active_processor
->processor_queue
);
2403 assert(active_processor
->next_thread
== THREAD_NULL
);
2405 active_processor
->current_pri
= IDLEPRI
;
2406 active_processor
->current_thmode
= TH_MODE_FIXED
;
2407 active_processor
->current_sfi_class
= SFI_CLASS_KERNEL
;
2408 active_processor
->deadline
= UINT64_MAX
;
2409 active_processor
->state
= PROCESSOR_IDLE
;
2410 pset
->pending_deferred_AST_cpu_mask
&= ~(1U << active_processor
->cpu_id
);
2411 machine_signal_idle_cancel(active_processor
);
2420 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2426 * Handle threads at context switch. Re-dispatch other thread
2427 * if still running, otherwise update run state and perform
2428 * special actions. Update quantum for other thread and begin
2429 * the quantum for ourselves.
2431 * "thread" is the old thread that we have switched away from.
2432 * "self" is the new current thread that we have context switched to
2434 * Called at splsched.
2441 processor_t processor
= self
->last_processor
;
2443 assert(processor
== current_processor());
2444 assert(self
== current_thread());
2445 assert(thread
!= self
);
2447 if (thread
!= THREAD_NULL
) {
2449 * If blocked at a continuation, discard
2452 if (thread
->continuation
!= NULL
&& thread
->kernel_stack
!= 0)
2455 if (thread
->state
& TH_IDLE
) {
2456 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2457 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DISPATCH
) | DBG_FUNC_NONE
,
2458 (uintptr_t)thread_tid(thread
), 0, thread
->state
,
2459 sched_run_buckets
[TH_BUCKET_RUN
], 0);
2462 int64_t remainder
= 0;
2464 if (processor
->quantum_end
> processor
->last_dispatch
)
2465 remainder
= processor
->quantum_end
-
2466 processor
->last_dispatch
;
2468 consumed
= thread
->quantum_remaining
- remainder
;
2470 if ((thread
->reason
& AST_LEDGER
) == 0) {
2472 * Bill CPU time to both the task and
2473 * the individual thread.
2475 ledger_credit(thread
->t_ledger
,
2476 task_ledgers
.cpu_time
, consumed
);
2477 ledger_credit(thread
->t_threadledger
,
2478 thread_ledgers
.cpu_time
, consumed
);
2480 if (thread
->t_bankledger
) {
2481 ledger_credit(thread
->t_bankledger
,
2482 bank_ledgers
.cpu_time
,
2483 (consumed
- thread
->t_deduct_bank_ledger_time
));
2486 thread
->t_deduct_bank_ledger_time
=0;
2491 thread_lock(thread
);
2494 * Apply a priority floor if the thread holds a kernel resource
2495 * Do this before checking starting_pri to avoid overpenalizing
2496 * repeated rwlock blockers.
2498 if (__improbable(thread
->rwlock_count
!= 0))
2499 lck_rw_set_promotion_locked(thread
);
2501 boolean_t keep_quantum
= processor
->first_timeslice
;
2504 * Treat a thread which has dropped priority since it got on core
2505 * as having expired its quantum.
2507 if (processor
->starting_pri
> thread
->sched_pri
)
2508 keep_quantum
= FALSE
;
2510 /* Compute remainder of current quantum. */
2512 processor
->quantum_end
> processor
->last_dispatch
)
2513 thread
->quantum_remaining
= (uint32_t)remainder
;
2515 thread
->quantum_remaining
= 0;
2517 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
2519 * Cancel the deadline if the thread has
2520 * consumed the entire quantum.
2522 if (thread
->quantum_remaining
== 0) {
2523 thread
->realtime
.deadline
= UINT64_MAX
;
2526 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2528 * For non-realtime threads treat a tiny
2529 * remaining quantum as an expired quantum
2530 * but include what's left next time.
2532 if (thread
->quantum_remaining
< min_std_quantum
) {
2533 thread
->reason
|= AST_QUANTUM
;
2534 thread
->quantum_remaining
+= SCHED(initial_quantum_size
)(thread
);
2536 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2540 * If we are doing a direct handoff then
2541 * take the remainder of the quantum.
2543 if ((thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
2544 self
->quantum_remaining
= thread
->quantum_remaining
;
2545 thread
->reason
|= AST_QUANTUM
;
2546 thread
->quantum_remaining
= 0;
2548 #if defined(CONFIG_SCHED_MULTIQ)
2549 if (SCHED(sched_groups_enabled
) &&
2550 thread
->sched_group
== self
->sched_group
) {
2551 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2552 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_QUANTUM_HANDOFF
),
2553 self
->reason
, (uintptr_t)thread_tid(thread
),
2554 self
->quantum_remaining
, thread
->quantum_remaining
, 0);
2556 self
->quantum_remaining
= thread
->quantum_remaining
;
2557 thread
->quantum_remaining
= 0;
2558 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2560 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2563 thread
->computation_metered
+= (processor
->last_dispatch
- thread
->computation_epoch
);
2565 if (!(thread
->state
& TH_WAIT
)) {
2569 thread
->last_made_runnable_time
= mach_approximate_time();
2571 machine_thread_going_off_core(thread
, FALSE
);
2573 if (thread
->reason
& AST_QUANTUM
)
2574 thread_setrun(thread
, SCHED_TAILQ
);
2575 else if (thread
->reason
& AST_PREEMPT
)
2576 thread_setrun(thread
, SCHED_HEADQ
);
2578 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
2580 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2581 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DISPATCH
) | DBG_FUNC_NONE
,
2582 (uintptr_t)thread_tid(thread
), thread
->reason
, thread
->state
,
2583 sched_run_buckets
[TH_BUCKET_RUN
], 0);
2585 if (thread
->wake_active
) {
2586 thread
->wake_active
= FALSE
;
2587 thread_unlock(thread
);
2589 thread_wakeup(&thread
->wake_active
);
2591 thread_unlock(thread
);
2594 wake_unlock(thread
);
2599 boolean_t should_terminate
= FALSE
;
2600 uint32_t new_run_count
;
2602 /* Only the first call to thread_dispatch
2603 * after explicit termination should add
2604 * the thread to the termination queue
2606 if ((thread
->state
& (TH_TERMINATE
|TH_TERMINATE2
)) == TH_TERMINATE
) {
2607 should_terminate
= TRUE
;
2608 thread
->state
|= TH_TERMINATE2
;
2611 thread
->state
&= ~TH_RUN
;
2612 thread
->last_made_runnable_time
= ~0ULL;
2613 thread
->chosen_processor
= PROCESSOR_NULL
;
2615 new_run_count
= sched_run_decr(thread
);
2617 #if CONFIG_SCHED_SFI
2618 if ((thread
->state
& (TH_WAIT
| TH_TERMINATE
)) == TH_WAIT
) {
2619 if (thread
->reason
& AST_SFI
) {
2620 thread
->wait_sfi_begin_time
= processor
->last_dispatch
;
2625 machine_thread_going_off_core(thread
, should_terminate
);
2627 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2628 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DISPATCH
) | DBG_FUNC_NONE
,
2629 (uintptr_t)thread_tid(thread
), thread
->reason
, thread
->state
,
2632 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
2634 if (thread
->wake_active
) {
2635 thread
->wake_active
= FALSE
;
2636 thread_unlock(thread
);
2638 thread_wakeup(&thread
->wake_active
);
2640 thread_unlock(thread
);
2643 wake_unlock(thread
);
2645 if (should_terminate
)
2646 thread_terminate_enqueue(thread
);
2651 /* Update (new) current thread and reprogram quantum timer */
2653 if (!(self
->state
& TH_IDLE
)) {
2654 uint64_t arg1
, arg2
;
2658 #if CONFIG_SCHED_SFI
2661 new_ast
= sfi_thread_needs_ast(self
, NULL
);
2663 if (new_ast
!= AST_NONE
) {
2668 assertf(processor
->last_dispatch
>= self
->last_made_runnable_time
, "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx", processor
->last_dispatch
, self
->last_made_runnable_time
);
2669 latency
= processor
->last_dispatch
- self
->last_made_runnable_time
;
2671 urgency
= thread_get_urgency(self
, &arg1
, &arg2
);
2673 thread_tell_urgency(urgency
, arg1
, arg2
, latency
, self
);
2675 machine_thread_going_on_core(self
, urgency
, latency
);
2678 * Get a new quantum if none remaining.
2680 if (self
->quantum_remaining
== 0) {
2681 thread_quantum_init(self
);
2685 * Set up quantum timer and timeslice.
2687 processor
->quantum_end
= processor
->last_dispatch
+ self
->quantum_remaining
;
2688 timer_call_enter1(&processor
->quantum_timer
, self
, processor
->quantum_end
, TIMER_CALL_SYS_CRITICAL
| TIMER_CALL_LOCAL
);
2690 processor
->first_timeslice
= TRUE
;
2692 timer_call_cancel(&processor
->quantum_timer
);
2693 processor
->first_timeslice
= FALSE
;
2695 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0, 0, self
);
2696 machine_thread_going_on_core(self
, THREAD_URGENCY_NONE
, 0);
2699 self
->computation_epoch
= processor
->last_dispatch
;
2700 self
->reason
= AST_NONE
;
2701 processor
->starting_pri
= self
->sched_pri
;
2703 thread_unlock(self
);
2705 #if defined(CONFIG_SCHED_DEFERRED_AST)
2707 * TODO: Can we state that redispatching our old thread is also
2710 if ((((volatile uint32_t)sched_run_buckets
[TH_BUCKET_RUN
]) == 1) &&
2711 !(self
->state
& TH_IDLE
)) {
2712 pset_cancel_deferred_dispatch(processor
->processor_set
, processor
);
2719 * thread_block_reason:
2721 * Forces a reschedule, blocking the caller if a wait
2722 * has been asserted.
2724 * If a continuation is specified, then thread_invoke will
2725 * attempt to discard the thread's kernel stack. When the
2726 * thread resumes, it will execute the continuation function
2727 * on a new kernel stack.
2729 counter(mach_counter_t c_thread_block_calls
= 0;)
2732 thread_block_reason(
2733 thread_continue_t continuation
,
2737 thread_t self
= current_thread();
2738 processor_t processor
;
2739 thread_t new_thread
;
2742 counter(++c_thread_block_calls
);
2746 processor
= current_processor();
2748 /* If we're explicitly yielding, force a subsequent quantum */
2749 if (reason
& AST_YIELD
)
2750 processor
->first_timeslice
= FALSE
;
2752 /* We're handling all scheduling AST's */
2753 ast_off(AST_SCHEDULING
);
2756 if ((continuation
!= NULL
) && (self
->task
!= kernel_task
)) {
2757 if (uthread_get_proc_refcount(self
->uthread
) != 0) {
2758 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self
->uthread
);
2763 self
->continuation
= continuation
;
2764 self
->parameter
= parameter
;
2766 if (self
->state
& ~(TH_RUN
| TH_IDLE
)) {
2767 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2768 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_BLOCK
),
2769 reason
, VM_KERNEL_UNSLIDE(continuation
), 0, 0, 0);
2774 new_thread
= thread_select(self
, processor
, reason
);
2775 thread_unlock(self
);
2776 } while (!thread_invoke(self
, new_thread
, reason
));
2780 return (self
->wait_result
);
2786 * Block the current thread if a wait has been asserted.
2790 thread_continue_t continuation
)
2792 return thread_block_reason(continuation
, NULL
, AST_NONE
);
2796 thread_block_parameter(
2797 thread_continue_t continuation
,
2800 return thread_block_reason(continuation
, parameter
, AST_NONE
);
2806 * Switch directly from the current thread to the
2807 * new thread, handing off our quantum if appropriate.
2809 * New thread must be runnable, and not on a run queue.
2811 * Called at splsched.
2816 thread_continue_t continuation
,
2818 thread_t new_thread
)
2820 ast_t handoff
= AST_HANDOFF
;
2822 self
->continuation
= continuation
;
2823 self
->parameter
= parameter
;
2825 while (!thread_invoke(self
, new_thread
, handoff
)) {
2826 processor_t processor
= current_processor();
2829 new_thread
= thread_select(self
, processor
, AST_NONE
);
2830 thread_unlock(self
);
2834 return (self
->wait_result
);
2840 * Called at splsched when a thread first receives
2841 * a new stack after a continuation.
2847 thread_t self
= current_thread();
2848 thread_continue_t continuation
;
2851 DTRACE_SCHED(on__cpu
);
2853 continuation
= self
->continuation
;
2854 parameter
= self
->parameter
;
2857 kperf_on_cpu(self
, continuation
, NULL
);
2860 thread_dispatch(thread
, self
);
2862 self
->continuation
= self
->parameter
= NULL
;
2864 if (thread
!= THREAD_NULL
)
2867 TLOG(1, "thread_continue: calling call_continuation \n");
2868 call_continuation(continuation
, parameter
, self
->wait_result
);
2873 thread_quantum_init(thread_t thread
)
2875 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
2876 thread
->quantum_remaining
= thread
->realtime
.computation
;
2878 thread
->quantum_remaining
= SCHED(initial_quantum_size
)(thread
);
2883 sched_timeshare_initial_quantum_size(thread_t thread
)
2885 if ((thread
!= THREAD_NULL
) && thread
->th_sched_bucket
== TH_BUCKET_SHARE_BG
)
2894 * Initialize a run queue before first use.
2901 for (u_int i
= 0; i
< BITMAP_LEN(NRQS
); i
++)
2903 rq
->urgency
= rq
->count
= 0;
2904 for (int i
= 0; i
< NRQS
; i
++)
2905 queue_init(&rq
->queues
[i
]);
2909 * run_queue_dequeue:
2911 * Perform a dequeue operation on a run queue,
2912 * and return the resulting thread.
2914 * The run queue must be locked (see thread_run_queue_remove()
2915 * for more info), and not empty.
2923 queue_t queue
= &rq
->queues
[rq
->highq
];
2925 if (options
& SCHED_HEADQ
) {
2926 thread
= qe_dequeue_head(queue
, struct thread
, runq_links
);
2928 thread
= qe_dequeue_tail(queue
, struct thread
, runq_links
);
2931 assert(thread
!= THREAD_NULL
);
2932 assert_thread_magic(thread
);
2934 thread
->runq
= PROCESSOR_NULL
;
2935 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
2937 if (SCHED(priority_is_urgent
)(rq
->highq
)) {
2938 rq
->urgency
--; assert(rq
->urgency
>= 0);
2940 if (queue_empty(queue
)) {
2941 bitmap_clear(rq
->bitmap
, rq
->highq
);
2942 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
2949 * run_queue_enqueue:
2951 * Perform a enqueue operation on a run queue.
2953 * The run queue must be locked (see thread_run_queue_remove()
2962 queue_t queue
= &rq
->queues
[thread
->sched_pri
];
2963 boolean_t result
= FALSE
;
2965 assert_thread_magic(thread
);
2967 if (queue_empty(queue
)) {
2968 enqueue_tail(queue
, &thread
->runq_links
);
2970 rq_bitmap_set(rq
->bitmap
, thread
->sched_pri
);
2971 if (thread
->sched_pri
> rq
->highq
) {
2972 rq
->highq
= thread
->sched_pri
;
2976 if (options
& SCHED_TAILQ
)
2977 enqueue_tail(queue
, &thread
->runq_links
);
2979 enqueue_head(queue
, &thread
->runq_links
);
2981 if (SCHED(priority_is_urgent
)(thread
->sched_pri
))
2983 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
2992 * Remove a specific thread from a runqueue.
2994 * The run queue must be locked.
3001 assert(thread
->runq
!= PROCESSOR_NULL
);
3002 assert_thread_magic(thread
);
3004 remqueue(&thread
->runq_links
);
3005 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3007 if (SCHED(priority_is_urgent
)(thread
->sched_pri
)) {
3008 rq
->urgency
--; assert(rq
->urgency
>= 0);
3011 if (queue_empty(&rq
->queues
[thread
->sched_pri
])) {
3012 /* update run queue status */
3013 bitmap_clear(rq
->bitmap
, thread
->sched_pri
);
3014 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
3017 thread
->runq
= PROCESSOR_NULL
;
3020 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3022 rt_runq_scan(sched_update_scan_context_t scan_context
)
3030 qe_foreach_element_safe(thread
, &rt_runq
.queue
, runq_links
) {
3031 if (thread
->last_made_runnable_time
< scan_context
->earliest_rt_make_runnable_time
) {
3032 scan_context
->earliest_rt_make_runnable_time
= thread
->last_made_runnable_time
;
3042 * realtime_queue_insert:
3044 * Enqueue a thread for realtime execution.
3047 realtime_queue_insert(thread_t thread
)
3049 queue_t queue
= &rt_runq
.queue
;
3050 uint64_t deadline
= thread
->realtime
.deadline
;
3051 boolean_t preempt
= FALSE
;
3055 if (queue_empty(queue
)) {
3056 enqueue_tail(queue
, &thread
->runq_links
);
3059 /* Insert into rt_runq in thread deadline order */
3061 qe_foreach(iter
, queue
) {
3062 thread_t iter_thread
= qe_element(iter
, struct thread
, runq_links
);
3063 assert_thread_magic(iter_thread
);
3065 if (deadline
< iter_thread
->realtime
.deadline
) {
3066 if (iter
== queue_first(queue
))
3068 insque(&thread
->runq_links
, queue_prev(iter
));
3070 } else if (iter
== queue_last(queue
)) {
3071 enqueue_tail(queue
, &thread
->runq_links
);
3077 thread
->runq
= THREAD_ON_RT_RUNQ
;
3078 SCHED_STATS_RUNQ_CHANGE(&rt_runq
.runq_stats
, rt_runq
.count
);
3089 * Dispatch a thread for realtime execution.
3091 * Thread must be locked. Associated pset must
3092 * be locked, and is returned unlocked.
3096 processor_t processor
,
3099 processor_set_t pset
= processor
->processor_set
;
3102 boolean_t do_signal_idle
= FALSE
, do_cause_ast
= FALSE
;
3104 thread
->chosen_processor
= processor
;
3106 /* <rdar://problem/15102234> */
3107 assert(thread
->bound_processor
== PROCESSOR_NULL
);
3110 * Dispatch directly onto idle processor.
3112 if ( (thread
->bound_processor
== processor
)
3113 && processor
->state
== PROCESSOR_IDLE
) {
3114 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3116 processor
->next_thread
= thread
;
3117 processor
->current_pri
= thread
->sched_pri
;
3118 processor
->current_thmode
= thread
->sched_mode
;
3119 processor
->current_sfi_class
= thread
->sfi_class
;
3120 processor
->deadline
= thread
->realtime
.deadline
;
3121 processor
->state
= PROCESSOR_DISPATCHING
;
3123 if (processor
!= current_processor()) {
3124 if (!(pset
->pending_AST_cpu_mask
& (1ULL << processor
->cpu_id
))) {
3125 /* cleared on exit from main processor_idle() loop */
3126 pset
->pending_AST_cpu_mask
|= (1ULL << processor
->cpu_id
);
3127 do_signal_idle
= TRUE
;
3132 if (do_signal_idle
) {
3133 machine_signal_idle(processor
);
3138 if (processor
->current_pri
< BASEPRI_RTQUEUES
)
3139 preempt
= (AST_PREEMPT
| AST_URGENT
);
3140 else if (thread
->realtime
.deadline
< processor
->deadline
)
3141 preempt
= (AST_PREEMPT
| AST_URGENT
);
3145 realtime_queue_insert(thread
);
3147 if (preempt
!= AST_NONE
) {
3148 if (processor
->state
== PROCESSOR_IDLE
) {
3149 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3151 processor
->next_thread
= THREAD_NULL
;
3152 processor
->current_pri
= thread
->sched_pri
;
3153 processor
->current_thmode
= thread
->sched_mode
;
3154 processor
->current_sfi_class
= thread
->sfi_class
;
3155 processor
->deadline
= thread
->realtime
.deadline
;
3156 processor
->state
= PROCESSOR_DISPATCHING
;
3157 if (processor
== current_processor()) {
3160 if (!(pset
->pending_AST_cpu_mask
& (1ULL << processor
->cpu_id
))) {
3161 /* cleared on exit from main processor_idle() loop */
3162 pset
->pending_AST_cpu_mask
|= (1ULL << processor
->cpu_id
);
3163 do_signal_idle
= TRUE
;
3166 } else if (processor
->state
== PROCESSOR_DISPATCHING
) {
3167 if ((processor
->next_thread
== THREAD_NULL
) && ((processor
->current_pri
< thread
->sched_pri
) || (processor
->deadline
> thread
->realtime
.deadline
))) {
3168 processor
->current_pri
= thread
->sched_pri
;
3169 processor
->current_thmode
= thread
->sched_mode
;
3170 processor
->current_sfi_class
= thread
->sfi_class
;
3171 processor
->deadline
= thread
->realtime
.deadline
;
3174 if (processor
== current_processor()) {
3177 if (!(pset
->pending_AST_cpu_mask
& (1ULL << processor
->cpu_id
))) {
3178 /* cleared after IPI causes csw_check() to be called */
3179 pset
->pending_AST_cpu_mask
|= (1ULL << processor
->cpu_id
);
3180 do_cause_ast
= TRUE
;
3185 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3190 if (do_signal_idle
) {
3191 machine_signal_idle(processor
);
3192 } else if (do_cause_ast
) {
3193 cause_ast_check(processor
);
3198 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3201 priority_is_urgent(int priority
)
3203 return bitmap_test(sched_preempt_pri
, priority
) ? TRUE
: FALSE
;
3206 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3211 * Dispatch a thread for execution on a
3214 * Thread must be locked. Associated pset must
3215 * be locked, and is returned unlocked.
3219 processor_t processor
,
3223 processor_set_t pset
= processor
->processor_set
;
3225 enum { eExitIdle
, eInterruptRunning
, eDoNothing
} ipi_action
= eDoNothing
;
3226 enum { eNoSignal
, eDoSignal
, eDoDeferredSignal
} do_signal_idle
= eNoSignal
;
3228 boolean_t do_cause_ast
= FALSE
;
3230 thread
->chosen_processor
= processor
;
3233 * Dispatch directly onto idle processor.
3235 if ( (SCHED(direct_dispatch_to_idle_processors
) ||
3236 thread
->bound_processor
== processor
)
3237 && processor
->state
== PROCESSOR_IDLE
) {
3239 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3241 processor
->next_thread
= thread
;
3242 processor
->current_pri
= thread
->sched_pri
;
3243 processor
->current_thmode
= thread
->sched_mode
;
3244 processor
->current_sfi_class
= thread
->sfi_class
;
3245 processor
->deadline
= UINT64_MAX
;
3246 processor
->state
= PROCESSOR_DISPATCHING
;
3248 if (!(pset
->pending_AST_cpu_mask
& (1ULL << processor
->cpu_id
))) {
3249 /* cleared on exit from main processor_idle() loop */
3250 pset
->pending_AST_cpu_mask
|= (1ULL << processor
->cpu_id
);
3251 do_signal_idle
= eDoSignal
;
3256 if (do_signal_idle
== eDoSignal
) {
3257 machine_signal_idle(processor
);
3264 * Set preemption mode.
3266 #if defined(CONFIG_SCHED_DEFERRED_AST)
3267 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3269 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) && thread
->sched_pri
> processor
->current_pri
)
3270 preempt
= (AST_PREEMPT
| AST_URGENT
);
3271 else if(processor
->active_thread
&& thread_eager_preemption(processor
->active_thread
))
3272 preempt
= (AST_PREEMPT
| AST_URGENT
);
3273 else if ((thread
->sched_mode
== TH_MODE_TIMESHARE
) && (thread
->sched_pri
< thread
->base_pri
)) {
3274 if(SCHED(priority_is_urgent
)(thread
->base_pri
) && thread
->sched_pri
> processor
->current_pri
) {
3275 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3280 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3282 SCHED(processor_enqueue
)(processor
, thread
, options
);
3284 if (preempt
!= AST_NONE
) {
3285 if (processor
->state
== PROCESSOR_IDLE
) {
3286 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3288 processor
->next_thread
= THREAD_NULL
;
3289 processor
->current_pri
= thread
->sched_pri
;
3290 processor
->current_thmode
= thread
->sched_mode
;
3291 processor
->current_sfi_class
= thread
->sfi_class
;
3292 processor
->deadline
= UINT64_MAX
;
3293 processor
->state
= PROCESSOR_DISPATCHING
;
3295 ipi_action
= eExitIdle
;
3296 } else if ( processor
->state
== PROCESSOR_DISPATCHING
) {
3297 if ((processor
->next_thread
== THREAD_NULL
) && (processor
->current_pri
< thread
->sched_pri
)) {
3298 processor
->current_pri
= thread
->sched_pri
;
3299 processor
->current_thmode
= thread
->sched_mode
;
3300 processor
->current_sfi_class
= thread
->sfi_class
;
3301 processor
->deadline
= UINT64_MAX
;
3303 } else if ( (processor
->state
== PROCESSOR_RUNNING
||
3304 processor
->state
== PROCESSOR_SHUTDOWN
) &&
3305 (thread
->sched_pri
>= processor
->current_pri
)) {
3306 ipi_action
= eInterruptRunning
;
3310 * New thread is not important enough to preempt what is running, but
3311 * special processor states may need special handling
3313 if (processor
->state
== PROCESSOR_SHUTDOWN
&&
3314 thread
->sched_pri
>= processor
->current_pri
) {
3315 ipi_action
= eInterruptRunning
;
3316 } else if ( processor
->state
== PROCESSOR_IDLE
&&
3317 processor
!= current_processor() ) {
3318 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
3320 processor
->next_thread
= THREAD_NULL
;
3321 processor
->current_pri
= thread
->sched_pri
;
3322 processor
->current_thmode
= thread
->sched_mode
;
3323 processor
->current_sfi_class
= thread
->sfi_class
;
3324 processor
->deadline
= UINT64_MAX
;
3325 processor
->state
= PROCESSOR_DISPATCHING
;
3327 ipi_action
= eExitIdle
;
3331 switch (ipi_action
) {
3335 if (processor
== current_processor()) {
3336 if (csw_check_locked(processor
, pset
, AST_NONE
) != AST_NONE
)
3339 #if defined(CONFIG_SCHED_DEFERRED_AST)
3340 if (!(pset
->pending_deferred_AST_cpu_mask
& (1ULL << processor
->cpu_id
)) &&
3341 !(pset
->pending_AST_cpu_mask
& (1ULL << processor
->cpu_id
))) {
3342 /* cleared on exit from main processor_idle() loop */
3343 pset
->pending_deferred_AST_cpu_mask
|= (1ULL << processor
->cpu_id
);
3344 do_signal_idle
= eDoDeferredSignal
;
3347 if (!(pset
->pending_AST_cpu_mask
& (1ULL << processor
->cpu_id
))) {
3348 /* cleared on exit from main processor_idle() loop */
3349 pset
->pending_AST_cpu_mask
|= (1ULL << processor
->cpu_id
);
3350 do_signal_idle
= eDoSignal
;
3355 case eInterruptRunning
:
3356 if (processor
== current_processor()) {
3357 if (csw_check_locked(processor
, pset
, AST_NONE
) != AST_NONE
)
3360 if (!(pset
->pending_AST_cpu_mask
& (1ULL << processor
->cpu_id
))) {
3361 /* cleared after IPI causes csw_check() to be called */
3362 pset
->pending_AST_cpu_mask
|= (1ULL << processor
->cpu_id
);
3363 do_cause_ast
= TRUE
;
3371 if (do_signal_idle
== eDoSignal
) {
3372 machine_signal_idle(processor
);
3374 #if defined(CONFIG_SCHED_DEFERRED_AST)
3375 else if (do_signal_idle
== eDoDeferredSignal
) {
3377 * TODO: The ability to cancel this signal could make
3378 * sending it outside of the pset lock an issue. Do
3379 * we need to address this? Or would the only fallout
3380 * be that the core takes a signal? As long as we do
3381 * not run the risk of having a core marked as signal
3382 * outstanding, with no real signal outstanding, the
3383 * only result should be that we fail to cancel some
3386 machine_signal_idle_deferred(processor
);
3389 else if (do_cause_ast
) {
3390 cause_ast_check(processor
);
3397 * Return the next sibling pset containing
3398 * available processors.
3400 * Returns the original pset if none other is
3403 static processor_set_t
3405 processor_set_t pset
)
3407 processor_set_t nset
= pset
;
3410 nset
= next_pset(nset
);
3411 } while (nset
->online_processor_count
< 1 && nset
!= pset
);
3419 * Choose a processor for the thread, beginning at
3420 * the pset. Accepts an optional processor hint in
3423 * Returns a processor, possibly from a different pset.
3425 * The thread must be locked. The pset must be locked,
3426 * and the resulting pset is locked on return.
3430 processor_set_t pset
,
3431 processor_t processor
,
3434 processor_set_t nset
, cset
= pset
;
3436 assert(thread
->sched_pri
<= BASEPRI_RTQUEUES
);
3439 * Prefer the hinted processor, when appropriate.
3442 /* Fold last processor hint from secondary processor to its primary */
3443 if (processor
!= PROCESSOR_NULL
) {
3444 processor
= processor
->processor_primary
;
3448 * Only consult platform layer if pset is active, which
3449 * it may not be in some cases when a multi-set system
3450 * is going to sleep.
3452 if (pset
->online_processor_count
) {
3453 if ((processor
== PROCESSOR_NULL
) || (processor
->processor_set
== pset
&& processor
->state
== PROCESSOR_IDLE
)) {
3454 processor_t mc_processor
= machine_choose_processor(pset
, processor
);
3455 if (mc_processor
!= PROCESSOR_NULL
)
3456 processor
= mc_processor
->processor_primary
;
3461 * At this point, we may have a processor hint, and we may have
3462 * an initial starting pset. If the hint is not in the pset, or
3463 * if the hint is for a processor in an invalid state, discard
3466 if (processor
!= PROCESSOR_NULL
) {
3467 if (processor
->processor_set
!= pset
) {
3468 processor
= PROCESSOR_NULL
;
3469 } else if (!processor
->is_recommended
) {
3470 processor
= PROCESSOR_NULL
;
3472 switch (processor
->state
) {
3473 case PROCESSOR_START
:
3474 case PROCESSOR_SHUTDOWN
:
3475 case PROCESSOR_OFF_LINE
:
3477 * Hint is for a processor that cannot support running new threads.
3479 processor
= PROCESSOR_NULL
;
3481 case PROCESSOR_IDLE
:
3483 * Hint is for an idle processor. Assume it is no worse than any other
3484 * idle processor. The platform layer had an opportunity to provide
3485 * the "least cost idle" processor above.
3488 case PROCESSOR_RUNNING
:
3489 case PROCESSOR_DISPATCHING
:
3491 * Hint is for an active CPU. This fast-path allows
3492 * realtime threads to preempt non-realtime threads
3493 * to regain their previous executing processor.
3495 if ((thread
->sched_pri
>= BASEPRI_RTQUEUES
) &&
3496 (processor
->current_pri
< BASEPRI_RTQUEUES
))
3499 /* Otherwise, use hint as part of search below */
3502 processor
= PROCESSOR_NULL
;
3509 * Iterate through the processor sets to locate
3510 * an appropriate processor. Seed results with
3511 * a last-processor hint, if available, so that
3512 * a search must find something strictly better
3515 * A primary/secondary pair of SMT processors are
3516 * "unpaired" if the primary is busy but its
3517 * corresponding secondary is idle (so the physical
3518 * core has full use of its resources).
3521 integer_t lowest_priority
= MAXPRI
+ 1;
3522 integer_t lowest_unpaired_primary_priority
= MAXPRI
+ 1;
3523 integer_t lowest_count
= INT_MAX
;
3524 uint64_t furthest_deadline
= 1;
3525 processor_t lp_processor
= PROCESSOR_NULL
;
3526 processor_t lp_unpaired_primary_processor
= PROCESSOR_NULL
;
3527 processor_t lp_unpaired_secondary_processor
= PROCESSOR_NULL
;
3528 processor_t lc_processor
= PROCESSOR_NULL
;
3529 processor_t fd_processor
= PROCESSOR_NULL
;
3531 if (processor
!= PROCESSOR_NULL
) {
3532 /* All other states should be enumerated above. */
3533 assert(processor
->state
== PROCESSOR_RUNNING
|| processor
->state
== PROCESSOR_DISPATCHING
);
3535 lowest_priority
= processor
->current_pri
;
3536 lp_processor
= processor
;
3538 if (processor
->current_pri
>= BASEPRI_RTQUEUES
) {
3539 furthest_deadline
= processor
->deadline
;
3540 fd_processor
= processor
;
3543 lowest_count
= SCHED(processor_runq_count
)(processor
);
3544 lc_processor
= processor
;
3550 * Choose an idle processor, in pset traversal order
3552 qe_foreach_element(processor
, &cset
->idle_queue
, processor_queue
) {
3553 if (processor
->is_recommended
)
3558 * Otherwise, enumerate active and idle processors to find candidates
3559 * with lower priority/etc.
3562 qe_foreach_element(processor
, &cset
->active_queue
, processor_queue
) {
3564 if (!processor
->is_recommended
) {
3568 integer_t cpri
= processor
->current_pri
;
3569 if (cpri
< lowest_priority
) {
3570 lowest_priority
= cpri
;
3571 lp_processor
= processor
;
3574 if ((cpri
>= BASEPRI_RTQUEUES
) && (processor
->deadline
> furthest_deadline
)) {
3575 furthest_deadline
= processor
->deadline
;
3576 fd_processor
= processor
;
3579 integer_t ccount
= SCHED(processor_runq_count
)(processor
);
3580 if (ccount
< lowest_count
) {
3581 lowest_count
= ccount
;
3582 lc_processor
= processor
;
3587 * For SMT configs, these idle secondary processors must have active primary. Otherwise
3588 * the idle primary would have short-circuited the loop above
3590 qe_foreach_element(processor
, &cset
->idle_secondary_queue
, processor_queue
) {
3592 if (!processor
->is_recommended
) {
3596 processor_t cprimary
= processor
->processor_primary
;
3598 /* If the primary processor is offline or starting up, it's not a candidate for this path */
3599 if (cprimary
->state
== PROCESSOR_RUNNING
|| cprimary
->state
== PROCESSOR_DISPATCHING
) {
3600 integer_t primary_pri
= cprimary
->current_pri
;
3602 if (primary_pri
< lowest_unpaired_primary_priority
) {
3603 lowest_unpaired_primary_priority
= primary_pri
;
3604 lp_unpaired_primary_processor
= cprimary
;
3605 lp_unpaired_secondary_processor
= processor
;
3611 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
3614 * For realtime threads, the most important aspect is
3615 * scheduling latency, so we attempt to assign threads
3616 * to good preemption candidates (assuming an idle primary
3617 * processor was not available above).
3620 if (thread
->sched_pri
> lowest_unpaired_primary_priority
) {
3621 /* Move to end of active queue so that the next thread doesn't also pick it */
3622 re_queue_tail(&cset
->active_queue
, &lp_unpaired_primary_processor
->processor_queue
);
3623 return lp_unpaired_primary_processor
;
3625 if (thread
->sched_pri
> lowest_priority
) {
3626 /* Move to end of active queue so that the next thread doesn't also pick it */
3627 re_queue_tail(&cset
->active_queue
, &lp_processor
->processor_queue
);
3628 return lp_processor
;
3630 if (thread
->realtime
.deadline
< furthest_deadline
)
3631 return fd_processor
;
3634 * If all primary and secondary CPUs are busy with realtime
3635 * threads with deadlines earlier than us, move on to next
3641 if (thread
->sched_pri
> lowest_unpaired_primary_priority
) {
3642 /* Move to end of active queue so that the next thread doesn't also pick it */
3643 re_queue_tail(&cset
->active_queue
, &lp_unpaired_primary_processor
->processor_queue
);
3644 return lp_unpaired_primary_processor
;
3646 if (thread
->sched_pri
> lowest_priority
) {
3647 /* Move to end of active queue so that the next thread doesn't also pick it */
3648 re_queue_tail(&cset
->active_queue
, &lp_processor
->processor_queue
);
3649 return lp_processor
;
3653 * If all primary processor in this pset are running a higher
3654 * priority thread, move on to next pset. Only when we have
3655 * exhausted this search do we fall back to other heuristics.
3660 * Move onto the next processor set.
3662 nset
= next_pset(cset
);
3670 } while (nset
!= pset
);
3673 * Make sure that we pick a running processor,
3674 * and that the correct processor set is locked.
3675 * Since we may have unlock the candidate processor's
3676 * pset, it may have changed state.
3678 * All primary processors are running a higher priority
3679 * thread, so the only options left are enqueuing on
3680 * the secondary processor that would perturb the least priority
3681 * primary, or the least busy primary.
3685 /* lowest_priority is evaluated in the main loops above */
3686 if (lp_unpaired_secondary_processor
!= PROCESSOR_NULL
) {
3687 processor
= lp_unpaired_secondary_processor
;
3688 lp_unpaired_secondary_processor
= PROCESSOR_NULL
;
3689 } else if (lc_processor
!= PROCESSOR_NULL
) {
3690 processor
= lc_processor
;
3691 lc_processor
= PROCESSOR_NULL
;
3694 * All processors are executing higher
3695 * priority threads, and the lowest_count
3696 * candidate was not usable
3698 processor
= master_processor
;
3702 * Check that the correct processor set is
3705 if (cset
!= processor
->processor_set
) {
3707 cset
= processor
->processor_set
;
3712 * We must verify that the chosen processor is still available.
3713 * master_processor is an exception, since we may need to preempt
3714 * a running thread on it during processor shutdown (for sleep),
3715 * and that thread needs to be enqueued on its runqueue to run
3716 * when the processor is restarted.
3718 if (processor
!= master_processor
&& (processor
->state
== PROCESSOR_SHUTDOWN
|| processor
->state
== PROCESSOR_OFF_LINE
))
3719 processor
= PROCESSOR_NULL
;
3721 } while (processor
== PROCESSOR_NULL
);
3729 * Dispatch thread for execution, onto an idle
3730 * processor or run queue, and signal a preemption
3733 * Thread must be locked.
3740 processor_t processor
;
3741 processor_set_t pset
;
3743 assert((thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_TERMINATE
|TH_TERMINATE2
)) == TH_RUN
);
3744 assert(thread
->runq
== PROCESSOR_NULL
);
3747 * Update priority if needed.
3749 if (SCHED(can_update_priority
)(thread
))
3750 SCHED(update_priority
)(thread
);
3752 thread
->sfi_class
= sfi_thread_classify(thread
);
3754 assert(thread
->runq
== PROCESSOR_NULL
);
3757 if (thread
->bound_processor
== PROCESSOR_NULL
) {
3761 if (thread
->affinity_set
!= AFFINITY_SET_NULL
) {
3763 * Use affinity set policy hint.
3765 pset
= thread
->affinity_set
->aset_pset
;
3768 processor
= SCHED(choose_processor
)(pset
, PROCESSOR_NULL
, thread
);
3770 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
)|DBG_FUNC_NONE
,
3771 (uintptr_t)thread_tid(thread
), (uintptr_t)-1, processor
->cpu_id
, processor
->state
, 0);
3772 } else if (thread
->last_processor
!= PROCESSOR_NULL
) {
3774 * Simple (last processor) affinity case.
3776 processor
= thread
->last_processor
;
3777 pset
= processor
->processor_set
;
3779 processor
= SCHED(choose_processor
)(pset
, processor
, thread
);
3781 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
)|DBG_FUNC_NONE
,
3782 (uintptr_t)thread_tid(thread
), thread
->last_processor
->cpu_id
, processor
->cpu_id
, processor
->state
, 0);
3787 * Utilitize a per task hint to spread threads
3788 * among the available processor sets.
3790 task_t task
= thread
->task
;
3792 pset
= task
->pset_hint
;
3793 if (pset
== PROCESSOR_SET_NULL
)
3794 pset
= current_processor()->processor_set
;
3796 pset
= choose_next_pset(pset
);
3799 processor
= SCHED(choose_processor
)(pset
, PROCESSOR_NULL
, thread
);
3800 task
->pset_hint
= processor
->processor_set
;
3802 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
)|DBG_FUNC_NONE
,
3803 (uintptr_t)thread_tid(thread
), (uintptr_t)-1, processor
->cpu_id
, processor
->state
, 0);
3809 * Unconditionally dispatch on the processor.
3811 processor
= thread
->bound_processor
;
3812 pset
= processor
->processor_set
;
3815 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHOOSE_PROCESSOR
)|DBG_FUNC_NONE
,
3816 (uintptr_t)thread_tid(thread
), (uintptr_t)-2, processor
->cpu_id
, processor
->state
, 0);
3818 #else /* !__SMP__ */
3819 /* Only one processor to choose */
3820 assert(thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== master_processor
);
3821 processor
= master_processor
;
3822 pset
= processor
->processor_set
;
3824 #endif /* !__SMP__ */
3827 * Dispatch the thread on the chosen processor.
3828 * TODO: This should be based on sched_mode, not sched_pri
3830 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
)
3831 realtime_setrun(processor
, thread
);
3833 processor_setrun(processor
, thread
, options
);
3840 processor_set_t pset
= task
->pset_hint
;
3842 if (pset
!= PROCESSOR_SET_NULL
)
3843 pset
= choose_next_pset(pset
);
3849 * Check for a preemption point in
3850 * the current context.
3852 * Called at splsched with thread locked.
3856 processor_t processor
,
3859 processor_set_t pset
= processor
->processor_set
;
3864 /* If we were sent a remote AST and interrupted a running processor, acknowledge it here with pset lock held */
3865 pset
->pending_AST_cpu_mask
&= ~(1ULL << processor
->cpu_id
);
3867 result
= csw_check_locked(processor
, pset
, check_reason
);
3875 * Check for preemption at splsched with
3876 * pset and thread locked
3880 processor_t processor
,
3881 processor_set_t pset __unused
,
3885 thread_t thread
= processor
->active_thread
;
3887 if (processor
->first_timeslice
) {
3888 if (rt_runq
.count
> 0)
3889 return (check_reason
| AST_PREEMPT
| AST_URGENT
);
3892 if (rt_runq
.count
> 0) {
3893 if (BASEPRI_RTQUEUES
> processor
->current_pri
)
3894 return (check_reason
| AST_PREEMPT
| AST_URGENT
);
3896 return (check_reason
| AST_PREEMPT
);
3900 result
= SCHED(processor_csw_check
)(processor
);
3901 if (result
!= AST_NONE
)
3902 return (check_reason
| result
| (thread_eager_preemption(thread
) ? AST_URGENT
: AST_NONE
));
3907 * If the current thread is running on a processor that is no longer recommended, gently
3908 * (non-urgently) get to a point and then block, and which point thread_select() should
3909 * try to idle the processor and re-dispatch the thread to a recommended processor.
3911 if (!processor
->is_recommended
)
3912 return (check_reason
| AST_PREEMPT
);
3915 * Even though we could continue executing on this processor, a
3916 * secondary SMT core should try to shed load to another primary core.
3918 * TODO: Should this do the same check that thread_select does? i.e.
3919 * if no bound threads target this processor, and idle primaries exist, preempt
3920 * The case of RT threads existing is already taken care of above
3921 * Consider Capri in this scenario.
3923 * if (!SCHED(processor_bound_count)(processor) && !queue_empty(&pset->idle_queue))
3925 * TODO: Alternatively - check if only primary is idle, or check if primary's pri is lower than mine.
3928 if (processor
->current_pri
< BASEPRI_RTQUEUES
&&
3929 processor
->processor_primary
!= processor
)
3930 return (check_reason
| AST_PREEMPT
);
3933 if (thread
->state
& TH_SUSP
)
3934 return (check_reason
| AST_PREEMPT
);
3936 #if CONFIG_SCHED_SFI
3938 * Current thread may not need to be preempted, but maybe needs
3941 result
= sfi_thread_needs_ast(thread
, NULL
);
3942 if (result
!= AST_NONE
)
3943 return (check_reason
| result
);
3952 * Set the scheduled priority of the specified thread.
3954 * This may cause the thread to change queues.
3956 * Thread must be locked.
3963 thread_t cthread
= current_thread();
3964 boolean_t is_current_thread
= (thread
== cthread
) ? TRUE
: FALSE
;
3965 int curgency
, nurgency
;
3966 uint64_t urgency_param1
, urgency_param2
;
3967 boolean_t removed_from_runq
= FALSE
;
3969 /* If we're already at this priority, no need to mess with the runqueue */
3970 if (priority
== thread
->sched_pri
)
3973 if (is_current_thread
) {
3974 assert(thread
->runq
== PROCESSOR_NULL
);
3975 curgency
= thread_get_urgency(thread
, &urgency_param1
, &urgency_param2
);
3977 removed_from_runq
= thread_run_queue_remove(thread
);
3980 thread
->sched_pri
= priority
;
3982 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_CHANGE_PRIORITY
),
3983 (uintptr_t)thread_tid(thread
),
3986 0, /* eventually, 'reason' */
3989 if (is_current_thread
) {
3990 nurgency
= thread_get_urgency(thread
, &urgency_param1
, &urgency_param2
);
3992 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
3993 * class alterations from user space to occur relatively infrequently, hence
3994 * those are lazily handled. QoS classes have distinct priority bands, and QoS
3995 * inheritance is expected to involve priority changes.
3997 if (nurgency
!= curgency
) {
3998 thread_tell_urgency(nurgency
, urgency_param1
, urgency_param2
, 0, thread
);
3999 machine_thread_going_on_core(thread
, nurgency
, 0);
4003 /* TODO: Should this be TAILQ if it went down, HEADQ if it went up? */
4004 if (removed_from_runq
)
4005 thread_run_queue_reinsert(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
4006 else if (thread
->state
& TH_RUN
) {
4007 processor_t processor
= thread
->last_processor
;
4009 if (is_current_thread
) {
4012 processor
->current_pri
= priority
;
4013 processor
->current_thmode
= thread
->sched_mode
;
4014 processor
->current_sfi_class
= thread
->sfi_class
= sfi_thread_classify(thread
);
4015 if ((preempt
= csw_check(processor
, AST_NONE
)) != AST_NONE
)
4017 } else if (processor
!= PROCESSOR_NULL
&& processor
->active_thread
== thread
)
4018 cause_ast_check(processor
);
4023 * thread_run_queue_remove_for_handoff
4025 * Pull a thread or its (recursive) push target out of the runqueue
4026 * so that it is ready for thread_run()
4028 * Called at splsched
4030 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4031 * This may be different than the thread that was passed in.
4034 thread_run_queue_remove_for_handoff(thread_t thread
) {
4036 thread_t pulled_thread
= THREAD_NULL
;
4038 thread_lock(thread
);
4041 * Check that the thread is not bound
4042 * to a different processor, and that realtime
4045 * Next, pull it off its run queue. If it
4046 * doesn't come, it's not eligible.
4049 processor_t processor
= current_processor();
4050 if (processor
->current_pri
< BASEPRI_RTQUEUES
&& thread
->sched_pri
< BASEPRI_RTQUEUES
&&
4051 (thread
->bound_processor
== PROCESSOR_NULL
|| thread
->bound_processor
== processor
)) {
4053 if (thread_run_queue_remove(thread
))
4054 pulled_thread
= thread
;
4057 thread_unlock(thread
);
4059 return pulled_thread
;
4063 * thread_run_queue_remove:
4065 * Remove a thread from its current run queue and
4066 * return TRUE if successful.
4068 * Thread must be locked.
4070 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4071 * run queues because the caller locked the thread. Otherwise
4072 * the thread is on a run queue, but could be chosen for dispatch
4073 * and removed by another processor under a different lock, which
4074 * will set thread->runq to PROCESSOR_NULL.
4076 * Hence the thread select path must not rely on anything that could
4077 * be changed under the thread lock after calling this function,
4078 * most importantly thread->sched_pri.
4081 thread_run_queue_remove(
4084 boolean_t removed
= FALSE
;
4085 processor_t processor
= thread
->runq
;
4087 if ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_WAIT
) {
4088 /* Thread isn't runnable */
4089 assert(thread
->runq
== PROCESSOR_NULL
);
4093 if (processor
== PROCESSOR_NULL
) {
4095 * The thread is either not on the runq,
4096 * or is in the midst of being removed from the runq.
4098 * runq is set to NULL under the pset lock, not the thread
4099 * lock, so the thread may still be in the process of being dequeued
4100 * from the runq. It will wait in invoke for the thread lock to be
4107 if (thread
->sched_pri
< BASEPRI_RTQUEUES
) {
4108 return SCHED(processor_queue_remove
)(processor
, thread
);
4113 if (thread
->runq
!= PROCESSOR_NULL
) {
4115 * Thread is on the RT run queue and we have a lock on
4119 assert(thread
->runq
== THREAD_ON_RT_RUNQ
);
4121 remqueue(&thread
->runq_links
);
4122 SCHED_STATS_RUNQ_CHANGE(&rt_runq
.runq_stats
, rt_runq
.count
);
4125 thread
->runq
= PROCESSOR_NULL
;
4136 * Put the thread back where it goes after a thread_run_queue_remove
4138 * Thread must have been removed under the same thread lock hold
4140 * thread locked, at splsched
4143 thread_run_queue_reinsert(thread_t thread
, integer_t options
)
4145 assert(thread
->runq
== PROCESSOR_NULL
);
4147 assert(thread
->state
& (TH_RUN
));
4148 thread_setrun(thread
, options
);
4153 sys_override_cpu_throttle(int flag
)
4155 if (flag
== CPU_THROTTLE_ENABLE
)
4156 cpu_throttle_enabled
= 1;
4157 if (flag
== CPU_THROTTLE_DISABLE
)
4158 cpu_throttle_enabled
= 0;
4162 thread_get_urgency(thread_t thread
, uint64_t *arg1
, uint64_t *arg2
)
4164 if (thread
== NULL
|| (thread
->state
& TH_IDLE
)) {
4168 return (THREAD_URGENCY_NONE
);
4169 } else if (thread
->sched_mode
== TH_MODE_REALTIME
) {
4170 *arg1
= thread
->realtime
.period
;
4171 *arg2
= thread
->realtime
.deadline
;
4173 return (THREAD_URGENCY_REAL_TIME
);
4174 } else if (cpu_throttle_enabled
&&
4175 ((thread
->sched_pri
<= MAXPRI_THROTTLE
) && (thread
->base_pri
<= MAXPRI_THROTTLE
))) {
4177 * Background urgency applied when thread priority is MAXPRI_THROTTLE or lower and thread is not promoted
4179 *arg1
= thread
->sched_pri
;
4180 *arg2
= thread
->base_pri
;
4182 return (THREAD_URGENCY_BACKGROUND
);
4184 /* For otherwise unclassified threads, report throughput QoS
4187 *arg1
= proc_get_effective_thread_policy(thread
, TASK_POLICY_THROUGH_QOS
);
4188 *arg2
= proc_get_effective_task_policy(thread
->task
, TASK_POLICY_THROUGH_QOS
);
4190 return (THREAD_URGENCY_NORMAL
);
4196 * This is the processor idle loop, which just looks for other threads
4197 * to execute. Processor idle threads invoke this without supplying a
4198 * current thread to idle without an asserted wait state.
4200 * Returns a the next thread to execute if dispatched directly.
4204 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4206 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4212 processor_t processor
)
4214 processor_set_t pset
= processor
->processor_set
;
4215 thread_t new_thread
;
4219 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4220 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_START
,
4221 (uintptr_t)thread_tid(thread
), 0, 0, 0, 0);
4223 SCHED_STATS_CPU_IDLE_START(processor
);
4225 timer_switch(&PROCESSOR_DATA(processor
, system_state
),
4226 mach_absolute_time(), &PROCESSOR_DATA(processor
, idle_state
));
4227 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, idle_state
);
4230 if (processor
->state
!= PROCESSOR_IDLE
) /* unsafe, but worst case we loop around once */
4232 if (pset
->pending_AST_cpu_mask
& (1ULL << processor
->cpu_id
))
4234 if (processor
->is_recommended
) {
4238 if (SCHED(processor_bound_count
)(processor
))
4242 #if CONFIG_SCHED_IDLE_IN_PLACE
4243 if (thread
!= THREAD_NULL
) {
4244 /* Did idle-in-place thread wake up */
4245 if ((thread
->state
& (TH_WAIT
|TH_SUSP
)) != TH_WAIT
|| thread
->wake_active
)
4250 IDLE_KERNEL_DEBUG_CONSTANT(
4251 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq
.count
, SCHED(processor_runq_count
)(processor
), -1, 0);
4253 machine_track_platform_idle(TRUE
);
4257 machine_track_platform_idle(FALSE
);
4261 IDLE_KERNEL_DEBUG_CONSTANT(
4262 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq
.count
, SCHED(processor_runq_count
)(processor
), -2, 0);
4264 if (!SCHED(processor_queue_empty
)(processor
)) {
4265 /* Secondary SMT processors respond to directed wakeups
4266 * exclusively. Some platforms induce 'spurious' SMT wakeups.
4268 if (processor
->processor_primary
== processor
)
4273 timer_switch(&PROCESSOR_DATA(processor
, idle_state
),
4274 mach_absolute_time(), &PROCESSOR_DATA(processor
, system_state
));
4275 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, system_state
);
4279 /* If we were sent a remote AST and came out of idle, acknowledge it here with pset lock held */
4280 pset
->pending_AST_cpu_mask
&= ~(1ULL << processor
->cpu_id
);
4281 #if defined(CONFIG_SCHED_DEFERRED_AST)
4282 pset
->pending_deferred_AST_cpu_mask
&= ~(1ULL << processor
->cpu_id
);
4285 state
= processor
->state
;
4286 if (state
== PROCESSOR_DISPATCHING
) {
4288 * Commmon case -- cpu dispatched.
4290 new_thread
= processor
->next_thread
;
4291 processor
->next_thread
= THREAD_NULL
;
4292 processor
->state
= PROCESSOR_RUNNING
;
4294 if ((new_thread
!= THREAD_NULL
) && (SCHED(processor_queue_has_priority
)(processor
, new_thread
->sched_pri
, FALSE
) ||
4295 (rt_runq
.count
> 0)) ) {
4296 /* Something higher priority has popped up on the runqueue - redispatch this thread elsewhere */
4297 processor
->current_pri
= IDLEPRI
;
4298 processor
->current_thmode
= TH_MODE_FIXED
;
4299 processor
->current_sfi_class
= SFI_CLASS_KERNEL
;
4300 processor
->deadline
= UINT64_MAX
;
4304 thread_lock(new_thread
);
4305 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REDISPATCH
), (uintptr_t)thread_tid(new_thread
), new_thread
->sched_pri
, rt_runq
.count
, 0, 0);
4306 thread_setrun(new_thread
, SCHED_HEADQ
);
4307 thread_unlock(new_thread
);
4309 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4310 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4311 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4313 return (THREAD_NULL
);
4318 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4319 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4320 (uintptr_t)thread_tid(thread
), state
, (uintptr_t)thread_tid(new_thread
), 0, 0);
4322 return (new_thread
);
4324 } else if (state
== PROCESSOR_IDLE
) {
4325 re_queue_tail(&pset
->active_queue
, &processor
->processor_queue
);
4327 processor
->state
= PROCESSOR_RUNNING
;
4328 processor
->current_pri
= IDLEPRI
;
4329 processor
->current_thmode
= TH_MODE_FIXED
;
4330 processor
->current_sfi_class
= SFI_CLASS_KERNEL
;
4331 processor
->deadline
= UINT64_MAX
;
4333 } else if (state
== PROCESSOR_SHUTDOWN
) {
4335 * Going off-line. Force a
4338 if ((new_thread
= processor
->next_thread
) != THREAD_NULL
) {
4339 processor
->next_thread
= THREAD_NULL
;
4340 processor
->current_pri
= IDLEPRI
;
4341 processor
->current_thmode
= TH_MODE_FIXED
;
4342 processor
->current_sfi_class
= SFI_CLASS_KERNEL
;
4343 processor
->deadline
= UINT64_MAX
;
4347 thread_lock(new_thread
);
4348 thread_setrun(new_thread
, SCHED_HEADQ
);
4349 thread_unlock(new_thread
);
4351 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4352 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4353 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4355 return (THREAD_NULL
);
4361 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4362 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4363 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4365 return (THREAD_NULL
);
4369 * Each processor has a dedicated thread which
4370 * executes the idle loop when there is no suitable
4376 processor_t processor
= current_processor();
4377 thread_t new_thread
;
4379 new_thread
= processor_idle(THREAD_NULL
, processor
);
4380 if (new_thread
!= THREAD_NULL
) {
4381 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
4385 thread_block((thread_continue_t
)idle_thread
);
4391 processor_t processor
)
4393 kern_return_t result
;
4397 result
= kernel_thread_create((thread_continue_t
)idle_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
4398 if (result
!= KERN_SUCCESS
)
4402 thread_lock(thread
);
4403 thread
->bound_processor
= processor
;
4404 processor
->idle_thread
= thread
;
4405 thread
->sched_pri
= thread
->base_pri
= IDLEPRI
;
4406 thread
->state
= (TH_RUN
| TH_IDLE
);
4407 thread
->options
|= TH_OPT_IDLE_THREAD
;
4408 thread_unlock(thread
);
4411 thread_deallocate(thread
);
4413 return (KERN_SUCCESS
);
4419 * Kicks off scheduler services.
4421 * Called at splsched.
4426 kern_return_t result
;
4429 simple_lock_init(&sched_vm_group_list_lock
, 0);
4432 result
= kernel_thread_start_priority((thread_continue_t
)sched_init_thread
,
4433 (void *)SCHED(maintenance_continuation
), MAXPRI_KERNEL
, &thread
);
4434 if (result
!= KERN_SUCCESS
)
4435 panic("sched_startup");
4437 thread_deallocate(thread
);
4439 assert_thread_magic(thread
);
4442 * Yield to the sched_init_thread once, to
4443 * initialize our own thread after being switched
4446 * The current thread is the only other thread
4447 * active at this point.
4449 thread_block(THREAD_CONTINUE_NULL
);
4452 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4454 static volatile uint64_t sched_maintenance_deadline
;
4455 static uint64_t sched_tick_last_abstime
;
4456 static uint64_t sched_tick_delta
;
4457 uint64_t sched_tick_max_delta
;
4459 * sched_init_thread:
4461 * Perform periodic bookkeeping functions about ten
4465 sched_timeshare_maintenance_continue(void)
4467 uint64_t sched_tick_ctime
, late_time
;
4469 struct sched_update_scan_context scan_context
= {
4470 .earliest_bg_make_runnable_time
= UINT64_MAX
,
4471 .earliest_normal_make_runnable_time
= UINT64_MAX
,
4472 .earliest_rt_make_runnable_time
= UINT64_MAX
4475 sched_tick_ctime
= mach_absolute_time();
4477 if (__improbable(sched_tick_last_abstime
== 0)) {
4478 sched_tick_last_abstime
= sched_tick_ctime
;
4480 sched_tick_delta
= 1;
4482 late_time
= sched_tick_ctime
- sched_tick_last_abstime
;
4483 sched_tick_delta
= late_time
/ sched_tick_interval
;
4484 /* Ensure a delta of 1, since the interval could be slightly
4485 * smaller than the sched_tick_interval due to dispatch
4488 sched_tick_delta
= MAX(sched_tick_delta
, 1);
4490 /* In the event interrupt latencies or platform
4491 * idle events that advanced the timebase resulted
4492 * in periods where no threads were dispatched,
4493 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
4496 sched_tick_delta
= MIN(sched_tick_delta
, SCHED_TICK_MAX_DELTA
);
4498 sched_tick_last_abstime
= sched_tick_ctime
;
4499 sched_tick_max_delta
= MAX(sched_tick_delta
, sched_tick_max_delta
);
4502 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_MAINTENANCE
)|DBG_FUNC_START
,
4503 sched_tick_delta
, late_time
, 0, 0, 0);
4505 /* Add a number of pseudo-ticks corresponding to the elapsed interval
4506 * This could be greater than 1 if substantial intervals where
4507 * all processors are idle occur, which rarely occurs in practice.
4510 sched_tick
+= sched_tick_delta
;
4513 * Compute various averages.
4515 compute_averages(sched_tick_delta
);
4518 * Scan the run queues for threads which
4519 * may need to be updated, and find the earliest runnable thread on the runqueue
4520 * to report its latency.
4522 SCHED(thread_update_scan
)(&scan_context
);
4524 rt_runq_scan(&scan_context
);
4526 uint64_t ctime
= mach_absolute_time();
4528 uint64_t bg_max_latency
= (ctime
> scan_context
.earliest_bg_make_runnable_time
) ?
4529 ctime
- scan_context
.earliest_bg_make_runnable_time
: 0;
4531 uint64_t default_max_latency
= (ctime
> scan_context
.earliest_normal_make_runnable_time
) ?
4532 ctime
- scan_context
.earliest_normal_make_runnable_time
: 0;
4534 uint64_t realtime_max_latency
= (ctime
> scan_context
.earliest_rt_make_runnable_time
) ?
4535 ctime
- scan_context
.earliest_rt_make_runnable_time
: 0;
4537 machine_max_runnable_latency(bg_max_latency
, default_max_latency
, realtime_max_latency
);
4540 * Check to see if the special sched VM group needs attention.
4542 sched_vm_group_maintenance();
4545 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_MAINTENANCE
) | DBG_FUNC_END
,
4546 sched_pri_shifts
[TH_BUCKET_SHARE_FG
], sched_pri_shifts
[TH_BUCKET_SHARE_BG
],
4547 sched_pri_shifts
[TH_BUCKET_SHARE_UT
], 0, 0);
4549 assert_wait((event_t
)sched_timeshare_maintenance_continue
, THREAD_UNINT
);
4550 thread_block((thread_continue_t
)sched_timeshare_maintenance_continue
);
4554 static uint64_t sched_maintenance_wakeups
;
4557 * Determine if the set of routines formerly driven by a maintenance timer
4558 * must be invoked, based on a deadline comparison. Signals the scheduler
4559 * maintenance thread on deadline expiration. Must be invoked at an interval
4560 * lower than the "sched_tick_interval", currently accomplished by
4561 * invocation via the quantum expiration timer and at context switch time.
4562 * Performance matters: this routine reuses a timestamp approximating the
4563 * current absolute time received from the caller, and should perform
4564 * no more than a comparison against the deadline in the common case.
4567 sched_timeshare_consider_maintenance(uint64_t ctime
) {
4568 uint64_t ndeadline
, deadline
= sched_maintenance_deadline
;
4570 if (__improbable(ctime
>= deadline
)) {
4571 if (__improbable(current_thread() == sched_maintenance_thread
))
4575 ndeadline
= ctime
+ sched_tick_interval
;
4577 if (__probable(__sync_bool_compare_and_swap(&sched_maintenance_deadline
, deadline
, ndeadline
))) {
4578 thread_wakeup((event_t
)sched_timeshare_maintenance_continue
);
4579 sched_maintenance_wakeups
++;
4584 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
4587 sched_init_thread(void (*continuation
)(void))
4589 thread_block(THREAD_CONTINUE_NULL
);
4591 thread_t thread
= current_thread();
4593 thread_set_thread_name(thread
, "sched_maintenance_thread");
4595 sched_maintenance_thread
= thread
;
4602 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4605 * thread_update_scan / runq_scan:
4607 * Scan the run queues to account for timesharing threads
4608 * which need to be updated.
4610 * Scanner runs in two passes. Pass one squirrels likely
4611 * threads away in an array, pass two does the update.
4613 * This is necessary because the run queue is locked for
4614 * the candidate scan, but the thread is locked for the update.
4616 * Array should be sized to make forward progress, without
4617 * disabling preemption for long periods.
4620 #define THREAD_UPDATE_SIZE 128
4622 static thread_t thread_update_array
[THREAD_UPDATE_SIZE
];
4623 static uint32_t thread_update_count
= 0;
4625 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
4627 thread_update_add_thread(thread_t thread
)
4629 if (thread_update_count
== THREAD_UPDATE_SIZE
)
4632 thread_update_array
[thread_update_count
++] = thread
;
4633 thread_reference_internal(thread
);
4638 thread_update_process_threads(void)
4640 assert(thread_update_count
<= THREAD_UPDATE_SIZE
);
4642 for (uint32_t i
= 0 ; i
< thread_update_count
; i
++) {
4643 thread_t thread
= thread_update_array
[i
];
4644 assert_thread_magic(thread
);
4645 thread_update_array
[i
] = THREAD_NULL
;
4647 spl_t s
= splsched();
4648 thread_lock(thread
);
4649 if (!(thread
->state
& (TH_WAIT
)) && thread
->sched_stamp
!= sched_tick
) {
4650 SCHED(update_priority
)(thread
);
4652 thread_unlock(thread
);
4655 thread_deallocate(thread
);
4658 thread_update_count
= 0;
4662 * Scan a runq for candidate threads.
4664 * Returns TRUE if retry is needed.
4669 sched_update_scan_context_t scan_context
)
4671 int count
= runq
->count
;
4679 for (queue_index
= bitmap_first(runq
->bitmap
, NRQS
);
4681 queue_index
= bitmap_next(runq
->bitmap
, queue_index
)) {
4684 queue_t queue
= &runq
->queues
[queue_index
];
4686 qe_foreach_element(thread
, queue
, runq_links
) {
4688 assert_thread_magic(thread
);
4690 if (thread
->sched_stamp
!= sched_tick
&&
4691 thread
->sched_mode
== TH_MODE_TIMESHARE
) {
4692 if (thread_update_add_thread(thread
) == FALSE
)
4696 if (cpu_throttle_enabled
&& ((thread
->sched_pri
<= MAXPRI_THROTTLE
) && (thread
->base_pri
<= MAXPRI_THROTTLE
))) {
4697 if (thread
->last_made_runnable_time
< scan_context
->earliest_bg_make_runnable_time
) {
4698 scan_context
->earliest_bg_make_runnable_time
= thread
->last_made_runnable_time
;
4701 if (thread
->last_made_runnable_time
< scan_context
->earliest_normal_make_runnable_time
) {
4702 scan_context
->earliest_normal_make_runnable_time
= thread
->last_made_runnable_time
;
4712 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
4715 thread_eager_preemption(thread_t thread
)
4717 return ((thread
->sched_flags
& TH_SFLAG_EAGERPREEMPT
) != 0);
4721 thread_set_eager_preempt(thread_t thread
)
4725 ast_t ast
= AST_NONE
;
4728 p
= current_processor();
4730 thread_lock(thread
);
4731 thread
->sched_flags
|= TH_SFLAG_EAGERPREEMPT
;
4733 if (thread
== current_thread()) {
4735 ast
= csw_check(p
, AST_NONE
);
4736 thread_unlock(thread
);
4737 if (ast
!= AST_NONE
) {
4738 (void) thread_block_reason(THREAD_CONTINUE_NULL
, NULL
, ast
);
4741 p
= thread
->last_processor
;
4743 if (p
!= PROCESSOR_NULL
&& p
->state
== PROCESSOR_RUNNING
&&
4744 p
->active_thread
== thread
) {
4748 thread_unlock(thread
);
4755 thread_clear_eager_preempt(thread_t thread
)
4760 thread_lock(thread
);
4762 thread
->sched_flags
&= ~TH_SFLAG_EAGERPREEMPT
;
4764 thread_unlock(thread
);
4769 * Scheduling statistics
4772 sched_stats_handle_csw(processor_t processor
, int reasons
, int selfpri
, int otherpri
)
4774 struct processor_sched_statistics
*stats
;
4775 boolean_t to_realtime
= FALSE
;
4777 stats
= &processor
->processor_data
.sched_stats
;
4780 if (otherpri
>= BASEPRI_REALTIME
) {
4781 stats
->rt_sched_count
++;
4785 if ((reasons
& AST_PREEMPT
) != 0) {
4786 stats
->preempt_count
++;
4788 if (selfpri
>= BASEPRI_REALTIME
) {
4789 stats
->preempted_rt_count
++;
4793 stats
->preempted_by_rt_count
++;
4800 sched_stats_handle_runq_change(struct runq_stats
*stats
, int old_count
)
4802 uint64_t timestamp
= mach_absolute_time();
4804 stats
->count_sum
+= (timestamp
- stats
->last_change_timestamp
) * old_count
;
4805 stats
->last_change_timestamp
= timestamp
;
4809 * For calls from assembly code
4811 #undef thread_wakeup
4820 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
4824 preemption_enabled(void)
4826 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
4830 sched_timer_deadline_tracking_init(void) {
4831 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT
, &timer_deadline_tracking_bin_1
);
4832 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT
, &timer_deadline_tracking_bin_2
);
4837 sched_work_interval_notify(thread_t thread
, uint64_t work_interval_id
, uint64_t start
, uint64_t finish
, uint64_t deadline
, uint64_t next_start
, uint32_t flags
)
4840 uint64_t urgency_param1
, urgency_param2
;
4843 if (work_interval_id
== 0) {
4844 return (KERN_INVALID_ARGUMENT
);
4847 assert(thread
== current_thread());
4849 thread_mtx_lock(thread
);
4850 if (thread
->work_interval_id
!= work_interval_id
) {
4851 thread_mtx_unlock(thread
);
4852 return (KERN_INVALID_ARGUMENT
);
4854 thread_mtx_unlock(thread
);
4857 thread_lock(thread
);
4858 urgency
= thread_get_urgency(thread
, &urgency_param1
, &urgency_param2
);
4859 thread_unlock(thread
);
4862 machine_work_interval_notify(thread
, work_interval_id
, start
, finish
, deadline
, next_start
, urgency
, flags
);
4863 return (KERN_SUCCESS
);
4866 void thread_set_options(uint32_t thopt
) {
4868 thread_t t
= current_thread();
4873 t
->options
|= thopt
;