2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Priority related scheduler bits.
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <kern/ledger.h>
77 #include <machine/machparam.h>
78 #include <kern/machine.h>
80 #ifdef CONFIG_MACH_APPROXIMATE_TIME
81 #include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
85 #include <kern/monotonic.h>
86 #endif /* MONOTONIC */
88 static void sched_update_thread_bucket(thread_t thread
);
91 * thread_quantum_expire:
93 * Recalculate the quantum and priority for a thread.
99 thread_quantum_expire(
100 timer_call_param_t p0
,
101 timer_call_param_t p1
)
103 processor_t processor
= p0
;
104 thread_t thread
= p1
;
108 uint64_t ignore1
, ignore2
;
110 assert(processor
== current_processor());
111 assert(thread
== current_thread());
113 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_QUANTUM_EXPIRED
) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
115 SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor
);
118 * We bill CPU time to both the individual thread and its task.
120 * Because this balance adjustment could potentially attempt to wake this
121 * very thread, we must credit the ledger before taking the thread lock.
122 * The ledger pointers are only manipulated by the thread itself at the ast
125 * TODO: This fails to account for the time between when the timer was
126 * armed and when it fired. It should be based on the system_timer and
127 * running a timer_update operation here.
129 ledger_credit(thread
->t_ledger
, task_ledgers
.cpu_time
, thread
->quantum_remaining
);
130 ledger_credit(thread
->t_threadledger
, thread_ledgers
.cpu_time
, thread
->quantum_remaining
);
131 if (thread
->t_bankledger
) {
132 ledger_credit(thread
->t_bankledger
, bank_ledgers
.cpu_time
,
133 (thread
->quantum_remaining
- thread
->t_deduct_bank_ledger_time
));
135 thread
->t_deduct_bank_ledger_time
= 0;
137 ctime
= mach_absolute_time();
139 #ifdef CONFIG_MACH_APPROXIMATE_TIME
140 commpage_update_mach_approximate_time(ctime
);
144 mt_sched_update(thread
);
145 #endif /* MONOTONIC */
150 * We've run up until our quantum expiration, and will (potentially)
151 * continue without re-entering the scheduler, so update this now.
153 processor
->last_dispatch
= ctime
;
154 thread
->last_run_time
= ctime
;
157 * Check for fail-safe trip.
159 if ((thread
->sched_mode
== TH_MODE_REALTIME
|| thread
->sched_mode
== TH_MODE_FIXED
) &&
160 !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
161 !(thread
->sched_flags
& TH_SFLAG_PROMOTE_REASON_MASK
) &&
162 !(thread
->options
& TH_OPT_SYSTEM_CRITICAL
)) {
163 uint64_t new_computation
;
165 new_computation
= ctime
- thread
->computation_epoch
;
166 new_computation
+= thread
->computation_metered
;
167 if (new_computation
> max_unsafe_computation
) {
168 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_FAILSAFE
)|DBG_FUNC_NONE
,
169 (uintptr_t)thread
->sched_pri
, (uintptr_t)thread
->sched_mode
, 0, 0, 0);
171 thread
->safe_release
= ctime
+ sched_safe_duration
;
173 sched_thread_mode_demote(thread
, TH_SFLAG_FAILSAFE
);
178 * Recompute scheduled priority if appropriate.
180 if (SCHED(can_update_priority
)(thread
))
181 SCHED(update_priority
)(thread
);
183 SCHED(lightweight_update_priority
)(thread
);
185 if (thread
->sched_mode
!= TH_MODE_REALTIME
)
186 SCHED(quantum_expire
)(thread
);
188 processor_state_update_from_thread(processor
, thread
);
191 * This quantum is up, give this thread another.
193 processor
->first_timeslice
= FALSE
;
195 thread_quantum_init(thread
);
197 /* Reload precise timing global policy to thread-local policy */
198 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
201 * Since non-precise user/kernel time doesn't update the state/thread timer
202 * during privilege transitions, synthesize an event now.
204 if (!thread
->precise_user_kernel_time
) {
205 timer_update(PROCESSOR_DATA(processor
, current_state
), ctime
);
206 timer_update(PROCESSOR_DATA(processor
, thread_timer
), ctime
);
207 timer_update(&thread
->runnable_timer
, ctime
);
211 processor
->quantum_end
= ctime
+ thread
->quantum_remaining
;
214 * Context switch check
216 * non-urgent flags don't affect kernel threads, so upgrade to urgent
217 * to ensure that rebalancing and non-recommendation kick in quickly.
220 ast_t check_reason
= AST_QUANTUM
;
221 if (thread
->task
== kernel_task
)
222 check_reason
|= AST_URGENT
;
224 if ((preempt
= csw_check(processor
, check_reason
)) != AST_NONE
)
228 * AST_KEVENT does not send an IPI when setting the AST,
229 * to avoid waiting for the next context switch to propagate the AST,
230 * the AST is propagated here at quantum expiration.
232 ast_propagate(thread
);
234 thread_unlock(thread
);
236 timer_call_quantum_timer_enter(&processor
->quantum_timer
, thread
,
237 processor
->quantum_end
, ctime
);
239 /* Tell platform layer that we are still running this thread */
240 urgency
= thread_get_urgency(thread
, &ignore1
, &ignore2
);
241 machine_thread_going_on_core(thread
, urgency
, 0, 0, ctime
);
242 machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY
, ctime
,
245 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
246 sched_timeshare_consider_maintenance(ctime
);
247 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
249 #if __arm__ || __arm64__
250 if (thread
->sched_mode
== TH_MODE_REALTIME
)
251 sched_consider_recommended_cores(ctime
, thread
);
252 #endif /* __arm__ || __arm64__ */
254 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_QUANTUM_EXPIRED
) | DBG_FUNC_END
, preempt
, 0, 0, 0, 0);
258 * sched_set_thread_base_priority:
260 * Set the base priority of the thread
261 * and reset its scheduled priority.
263 * This is the only path to change base_pri.
265 * Called with the thread locked.
268 sched_set_thread_base_priority(thread_t thread
, int priority
)
270 assert(priority
>= MINPRI
);
273 if (thread
->sched_mode
== TH_MODE_REALTIME
)
274 assert(priority
<= BASEPRI_RTQUEUES
);
276 assert(priority
< BASEPRI_RTQUEUES
);
278 int old_base_pri
= thread
->base_pri
;
279 thread
->base_pri
= priority
;
281 if ((thread
->state
& TH_RUN
) == TH_RUN
) {
282 assert(thread
->last_made_runnable_time
!= THREAD_NOT_RUNNABLE
);
283 ctime
= mach_approximate_time();
284 thread
->last_basepri_change_time
= ctime
;
286 assert(thread
->last_basepri_change_time
== THREAD_NOT_RUNNABLE
);
287 assert(thread
->last_made_runnable_time
== THREAD_NOT_RUNNABLE
);
291 * Currently the perfcontrol_attr depends on the base pri of the
292 * thread. Therefore, we use this function as the hook for the
293 * perfcontrol callout.
295 if (thread
== current_thread() && old_base_pri
!= priority
) {
297 ctime
= mach_approximate_time();
299 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE
,
300 ctime
, PERFCONTROL_CALLOUT_WAKE_UNSAFE
, thread
);
302 sched_update_thread_bucket(thread
);
304 thread_recompute_sched_pri(thread
, SETPRI_DEFAULT
);
308 * thread_recompute_sched_pri:
310 * Reset the scheduled priority of the thread
311 * according to its base priority if the
312 * thread has not been promoted or depressed.
314 * This is the only way to push base_pri changes into sched_pri,
315 * or to recalculate the appropriate sched_pri after changing
316 * a promotion or depression.
318 * Called at splsched with the thread locked.
320 * TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation
323 thread_recompute_sched_pri(thread_t thread
, set_sched_pri_options_t options
)
325 uint32_t sched_flags
= thread
->sched_flags
;
326 sched_mode_t sched_mode
= thread
->sched_mode
;
328 int priority
= thread
->base_pri
;
330 if (sched_mode
== TH_MODE_TIMESHARE
)
331 priority
= SCHED(compute_timeshare_priority
)(thread
);
333 if (sched_flags
& TH_SFLAG_DEPRESS
) {
334 /* thread_yield_internal overrides kernel mutex promotion */
335 priority
= DEPRESSPRI
;
337 /* poll-depress is overridden by mutex promotion and promote-reasons */
338 if ((sched_flags
& TH_SFLAG_POLLDEPRESS
)) {
339 priority
= DEPRESSPRI
;
342 if (sched_flags
& TH_SFLAG_PROMOTED
) {
343 priority
= MAX(priority
, thread
->promotion_priority
);
345 if (sched_mode
!= TH_MODE_REALTIME
)
346 priority
= MIN(priority
, MAXPRI_PROMOTE
);
349 if (sched_flags
& TH_SFLAG_PROMOTE_REASON_MASK
) {
350 if (sched_flags
& TH_SFLAG_RW_PROMOTED
)
351 priority
= MAX(priority
, MINPRI_RWLOCK
);
353 if (sched_flags
& TH_SFLAG_WAITQ_PROMOTED
)
354 priority
= MAX(priority
, MINPRI_WAITQ
);
356 if (sched_flags
& TH_SFLAG_EXEC_PROMOTED
)
357 priority
= MAX(priority
, MINPRI_EXEC
);
361 set_sched_pri(thread
, priority
, options
);
365 sched_default_quantum_expire(thread_t thread __unused
)
368 * No special behavior when a timeshare, fixed, or realtime thread
369 * uses up its entire quantum
373 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
376 * lightweight_update_priority:
378 * Update the scheduled priority for
379 * a timesharing thread.
381 * Only for use on the current thread.
383 * Called with the thread locked.
386 lightweight_update_priority(thread_t thread
)
388 assert(thread
->runq
== PROCESSOR_NULL
);
389 assert(thread
== current_thread());
391 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
395 thread_timer_delta(thread
, delta
);
398 * Accumulate timesharing usage only
399 * during contention for processor
402 if (thread
->pri_shift
< INT8_MAX
)
403 thread
->sched_usage
+= delta
;
405 thread
->cpu_delta
+= delta
;
407 priority
= sched_compute_timeshare_priority(thread
);
409 if (priority
!= thread
->sched_pri
)
410 thread_recompute_sched_pri(thread
, SETPRI_LAZY
);
415 * Define shifts for simulating (5/8) ** n
417 * Shift structures for holding update shifts. Actual computation
418 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
419 * +/- is determined by the sign of shift 2.
426 #define SCHED_DECAY_TICKS 32
427 static struct shift_data sched_decay_shifts
[SCHED_DECAY_TICKS
] = {
428 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
429 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
430 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
431 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
435 * sched_compute_timeshare_priority:
437 * Calculate the timesharing priority based upon usage and load.
439 extern int sched_pri_decay_band_limit
;
441 #ifdef CONFIG_EMBEDDED
444 sched_compute_timeshare_priority(thread_t thread
)
446 int decay_amount
= (thread
->sched_usage
>> thread
->pri_shift
);
447 int decay_limit
= sched_pri_decay_band_limit
;
449 if (thread
->base_pri
> BASEPRI_FOREGROUND
) {
450 decay_limit
+= (thread
->base_pri
- BASEPRI_FOREGROUND
);
453 if (decay_amount
> decay_limit
) {
454 decay_amount
= decay_limit
;
457 /* start with base priority */
458 int priority
= thread
->base_pri
- decay_amount
;
460 if (priority
< MAXPRI_THROTTLE
) {
461 if (thread
->task
->max_priority
> MAXPRI_THROTTLE
) {
462 priority
= MAXPRI_THROTTLE
;
463 } else if (priority
< MINPRI_USER
) {
464 priority
= MINPRI_USER
;
466 } else if (priority
> MAXPRI_KERNEL
) {
467 priority
= MAXPRI_KERNEL
;
473 #else /* CONFIG_EMBEDDED */
476 sched_compute_timeshare_priority(thread_t thread
)
478 /* start with base priority */
479 int priority
= thread
->base_pri
- (thread
->sched_usage
>> thread
->pri_shift
);
481 if (priority
< MINPRI_USER
)
482 priority
= MINPRI_USER
;
483 else if (priority
> MAXPRI_KERNEL
)
484 priority
= MAXPRI_KERNEL
;
489 #endif /* CONFIG_EMBEDDED */
492 * can_update_priority
494 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
496 * Called with the thread locked.
502 if (sched_tick
== thread
->sched_stamp
)
511 * Perform housekeeping operations driven by scheduler tick.
513 * Called with the thread locked.
519 uint32_t ticks
, delta
;
521 ticks
= sched_tick
- thread
->sched_stamp
;
524 thread
->sched_stamp
+= ticks
;
526 /* If requested, accelerate aging of sched_usage */
527 if (sched_decay_usage_age_factor
> 1)
528 ticks
*= sched_decay_usage_age_factor
;
531 * Gather cpu usage data.
533 thread_timer_delta(thread
, delta
);
534 if (ticks
< SCHED_DECAY_TICKS
) {
536 * Accumulate timesharing usage only during contention for processor
537 * resources. Use the pri_shift from the previous tick window to
538 * determine if the system was in a contended state.
540 if (thread
->pri_shift
< INT8_MAX
)
541 thread
->sched_usage
+= delta
;
543 thread
->cpu_usage
+= delta
+ thread
->cpu_delta
;
544 thread
->cpu_delta
= 0;
546 struct shift_data
*shiftp
= &sched_decay_shifts
[ticks
];
548 if (shiftp
->shift2
> 0) {
549 thread
->cpu_usage
= (thread
->cpu_usage
>> shiftp
->shift1
) +
550 (thread
->cpu_usage
>> shiftp
->shift2
);
551 thread
->sched_usage
= (thread
->sched_usage
>> shiftp
->shift1
) +
552 (thread
->sched_usage
>> shiftp
->shift2
);
554 thread
->cpu_usage
= (thread
->cpu_usage
>> shiftp
->shift1
) -
555 (thread
->cpu_usage
>> -(shiftp
->shift2
));
556 thread
->sched_usage
= (thread
->sched_usage
>> shiftp
->shift1
) -
557 (thread
->sched_usage
>> -(shiftp
->shift2
));
560 thread
->cpu_usage
= thread
->cpu_delta
= 0;
561 thread
->sched_usage
= 0;
565 * Check for fail-safe release.
567 if ((thread
->sched_flags
& TH_SFLAG_FAILSAFE
) &&
568 mach_absolute_time() >= thread
->safe_release
) {
569 sched_thread_mode_undemote(thread
, TH_SFLAG_FAILSAFE
);
573 * Now that the thread's CPU usage has been accumulated and aged
574 * based on contention of the previous tick window, update the
575 * pri_shift of the thread to match the current global load/shift
576 * values. The updated pri_shift would be used to calculate the
577 * new priority of the thread.
579 thread
->pri_shift
= sched_pri_shifts
[thread
->th_sched_bucket
];
581 /* Recompute scheduled priority if appropriate. */
582 if (thread
->sched_mode
== TH_MODE_TIMESHARE
)
583 thread_recompute_sched_pri(thread
, SETPRI_LAZY
);
586 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
590 * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
591 * Each other bucket is a count of the runnable non-idle threads
592 * with that property.
594 volatile uint32_t sched_run_buckets
[TH_BUCKET_MAX
];
597 sched_incr_bucket(sched_bucket_t bucket
)
599 assert(bucket
>= TH_BUCKET_FIXPRI
&&
600 bucket
<= TH_BUCKET_SHARE_BG
);
602 hw_atomic_add(&sched_run_buckets
[bucket
], 1);
606 sched_decr_bucket(sched_bucket_t bucket
)
608 assert(bucket
>= TH_BUCKET_FIXPRI
&&
609 bucket
<= TH_BUCKET_SHARE_BG
);
611 assert(sched_run_buckets
[bucket
] > 0);
613 hw_atomic_sub(&sched_run_buckets
[bucket
], 1);
616 /* TH_RUN & !TH_IDLE controls whether a thread has a run count */
619 sched_run_incr(thread_t thread
)
621 assert((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
);
623 uint32_t new_count
= hw_atomic_add(&sched_run_buckets
[TH_BUCKET_RUN
], 1);
625 sched_incr_bucket(thread
->th_sched_bucket
);
631 sched_run_decr(thread_t thread
)
633 assert((thread
->state
& (TH_RUN
|TH_IDLE
)) != TH_RUN
);
635 sched_decr_bucket(thread
->th_sched_bucket
);
637 uint32_t new_count
= hw_atomic_sub(&sched_run_buckets
[TH_BUCKET_RUN
], 1);
643 sched_update_thread_bucket(thread_t thread
)
645 sched_bucket_t old_bucket
= thread
->th_sched_bucket
;
646 sched_bucket_t new_bucket
= TH_BUCKET_RUN
;
648 switch (thread
->sched_mode
) {
650 case TH_MODE_REALTIME
:
651 new_bucket
= TH_BUCKET_FIXPRI
;
654 case TH_MODE_TIMESHARE
:
655 if (thread
->base_pri
> BASEPRI_DEFAULT
)
656 new_bucket
= TH_BUCKET_SHARE_FG
;
657 else if (thread
->base_pri
> BASEPRI_UTILITY
)
658 new_bucket
= TH_BUCKET_SHARE_DF
;
659 else if (thread
->base_pri
> MAXPRI_THROTTLE
)
660 new_bucket
= TH_BUCKET_SHARE_UT
;
662 new_bucket
= TH_BUCKET_SHARE_BG
;
666 panic("unexpected mode: %d", thread
->sched_mode
);
670 if (old_bucket
!= new_bucket
) {
671 thread
->th_sched_bucket
= new_bucket
;
672 thread
->pri_shift
= sched_pri_shifts
[new_bucket
];
674 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
) {
675 sched_decr_bucket(old_bucket
);
676 sched_incr_bucket(new_bucket
);
682 * Set the thread's true scheduling mode
683 * Called with thread mutex and thread locked
684 * The thread has already been removed from the runqueue.
686 * (saved_mode is handled before this point)
689 sched_set_thread_mode(thread_t thread
, sched_mode_t new_mode
)
691 assert(thread
->runq
== PROCESSOR_NULL
);
695 case TH_MODE_REALTIME
:
696 case TH_MODE_TIMESHARE
:
700 panic("unexpected mode: %d", new_mode
);
704 thread
->sched_mode
= new_mode
;
706 sched_update_thread_bucket(thread
);
710 * Demote the true scheduler mode to timeshare (called with the thread locked)
713 sched_thread_mode_demote(thread_t thread
, uint32_t reason
)
715 assert(reason
& TH_SFLAG_DEMOTED_MASK
);
716 assert((thread
->sched_flags
& reason
) != reason
);
718 if (thread
->policy_reset
)
721 if (thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
) {
722 /* Another demotion reason is already active */
723 thread
->sched_flags
|= reason
;
727 assert(thread
->saved_mode
== TH_MODE_NONE
);
729 boolean_t removed
= thread_run_queue_remove(thread
);
731 thread
->sched_flags
|= reason
;
733 thread
->saved_mode
= thread
->sched_mode
;
735 sched_set_thread_mode(thread
, TH_MODE_TIMESHARE
);
737 thread_recompute_priority(thread
);
740 thread_run_queue_reinsert(thread
, SCHED_TAILQ
);
744 * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
747 sched_thread_mode_undemote(thread_t thread
, uint32_t reason
)
749 assert(reason
& TH_SFLAG_DEMOTED_MASK
);
750 assert((thread
->sched_flags
& reason
) == reason
);
751 assert(thread
->saved_mode
!= TH_MODE_NONE
);
752 assert(thread
->sched_mode
== TH_MODE_TIMESHARE
);
753 assert(thread
->policy_reset
== 0);
755 thread
->sched_flags
&= ~reason
;
757 if (thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
) {
758 /* Another demotion reason is still active */
762 boolean_t removed
= thread_run_queue_remove(thread
);
764 sched_set_thread_mode(thread
, thread
->saved_mode
);
766 thread
->saved_mode
= TH_MODE_NONE
;
768 thread_recompute_priority(thread
);
771 thread_run_queue_reinsert(thread
, SCHED_TAILQ
);
775 * Promote thread to a specific priority
777 * Promotion must not last past syscall boundary
778 * Clients must always pair promote and unpromote 1:1
780 * Called at splsched with thread locked
783 sched_thread_promote_to_pri(thread_t thread
,
785 __kdebug_only
uintptr_t trace_obj
/* already unslid */)
787 assert((thread
->sched_flags
& TH_SFLAG_PROMOTED
) != TH_SFLAG_PROMOTED
);
788 assert(thread
->promotion_priority
== 0);
789 assert(priority
<= MAXPRI_PROMOTE
);
790 assert(priority
> 0);
792 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_PROMOTED
),
793 thread_tid(thread
), trace_obj
, priority
);
795 thread
->sched_flags
|= TH_SFLAG_PROMOTED
;
796 thread
->promotion_priority
= priority
;
798 thread_recompute_sched_pri(thread
, SETPRI_DEFAULT
);
803 * Update a pre-existing priority promotion to have a higher priority floor
804 * Priority can only go up from the previous value
805 * Update must occur while a promotion is active
807 * Called at splsched with thread locked
810 sched_thread_update_promotion_to_pri(thread_t thread
,
812 __kdebug_only
uintptr_t trace_obj
/* already unslid */)
814 assert(thread
->promotions
> 0);
815 assert((thread
->sched_flags
& TH_SFLAG_PROMOTED
) == TH_SFLAG_PROMOTED
);
816 assert(thread
->promotion_priority
> 0);
817 assert(priority
<= MAXPRI_PROMOTE
);
819 if (thread
->promotion_priority
< priority
) {
820 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_PROMOTED_UPDATE
),
821 thread_tid(thread
), trace_obj
, priority
);
823 thread
->promotion_priority
= priority
;
824 thread_recompute_sched_pri(thread
, SETPRI_DEFAULT
);
829 * End a priority promotion
830 * Demotes a thread back to its expected priority without the promotion in place
832 * Called at splsched with thread locked
835 sched_thread_unpromote(thread_t thread
,
836 __kdebug_only
uintptr_t trace_obj
/* already unslid */)
838 assert((thread
->sched_flags
& TH_SFLAG_PROMOTED
) == TH_SFLAG_PROMOTED
);
839 assert(thread
->promotion_priority
> 0);
841 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_UNPROMOTED
),
842 thread_tid(thread
), trace_obj
, 0);
844 thread
->sched_flags
&= ~TH_SFLAG_PROMOTED
;
845 thread
->promotion_priority
= 0;
847 thread_recompute_sched_pri(thread
, SETPRI_DEFAULT
);
850 /* called with thread locked */
852 assert_promotions_invariant(thread_t thread
)
854 if (thread
->promotions
> 0)
855 assert((thread
->sched_flags
& TH_SFLAG_PROMOTED
) == TH_SFLAG_PROMOTED
);
857 if (thread
->promotions
== 0)
858 assert((thread
->sched_flags
& TH_SFLAG_PROMOTED
) != TH_SFLAG_PROMOTED
);
862 * Promote thread to have a sched pri floor for a specific reason
864 * Promotion must not last past syscall boundary
865 * Clients must always pair promote and demote 1:1,
866 * Handling nesting of the same promote reason is the client's responsibility
868 * Called at splsched with thread locked
871 sched_thread_promote_reason(thread_t thread
,
873 __kdebug_only
uintptr_t trace_obj
/* already unslid */)
875 assert(reason
& TH_SFLAG_PROMOTE_REASON_MASK
);
876 assert((thread
->sched_flags
& reason
) != reason
);
879 case TH_SFLAG_RW_PROMOTED
:
880 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_RW_PROMOTE
),
881 thread_tid(thread
), thread
->sched_pri
,
882 thread
->base_pri
, trace_obj
);
884 case TH_SFLAG_WAITQ_PROMOTED
:
885 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAITQ_PROMOTE
),
886 thread_tid(thread
), thread
->sched_pri
,
887 thread
->base_pri
, trace_obj
);
889 case TH_SFLAG_EXEC_PROMOTED
:
890 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_EXEC_PROMOTE
),
891 thread_tid(thread
), thread
->sched_pri
,
892 thread
->base_pri
, trace_obj
);
896 thread
->sched_flags
|= reason
;
898 thread_recompute_sched_pri(thread
, SETPRI_DEFAULT
);
902 * End a specific promotion reason
903 * Demotes a thread back to its expected priority without the promotion in place
905 * Called at splsched with thread locked
908 sched_thread_unpromote_reason(thread_t thread
,
910 __kdebug_only
uintptr_t trace_obj
/* already unslid */)
912 assert(reason
& TH_SFLAG_PROMOTE_REASON_MASK
);
913 assert((thread
->sched_flags
& reason
) == reason
);
916 case TH_SFLAG_RW_PROMOTED
:
917 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_RW_DEMOTE
),
918 thread_tid(thread
), thread
->sched_pri
,
919 thread
->base_pri
, trace_obj
);
921 case TH_SFLAG_WAITQ_PROMOTED
:
922 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAITQ_DEMOTE
),
923 thread_tid(thread
), thread
->sched_pri
,
924 thread
->base_pri
, trace_obj
);
926 case TH_SFLAG_EXEC_PROMOTED
:
927 KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_EXEC_DEMOTE
),
928 thread_tid(thread
), thread
->sched_pri
,
929 thread
->base_pri
, trace_obj
);
933 thread
->sched_flags
&= ~reason
;
935 thread_recompute_sched_pri(thread
, SETPRI_DEFAULT
);