#include <kern/processor.h>
#include <kern/ledger.h>
#include <machine/machparam.h>
+#include <kern/machine.h>
#ifdef CONFIG_MACH_APPROXIMATE_TIME
#include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
#endif
+static void sched_update_thread_bucket(thread_t thread);
+
/*
* thread_quantum_expire:
*
thread_t thread = p1;
ast_t preempt;
uint64_t ctime;
+ int urgency;
+ uint64_t ignore1, ignore2;
assert(processor == current_processor());
+ assert(thread == current_thread());
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0);
SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor);
* Because this balance adjustment could potentially attempt to wake this very
* thread, we must credit the ledger before taking the thread lock. The ledger
* pointers are only manipulated by the thread itself at the ast boundary.
+ *
+ * TODO: This fails to account for the time between when the timer was armed and when it fired.
+ * It should be based on the system_timer and running a thread_timer_event operation here.
*/
ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
ctime = mach_absolute_time();
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+ commpage_update_mach_approximate_time(ctime);
+#endif
+
thread_lock(thread);
/*
* We've run up until our quantum expiration, and will (potentially)
* continue without re-entering the scheduler, so update this now.
*/
+ processor->last_dispatch = ctime;
thread->last_run_time = ctime;
-#ifdef CONFIG_MACH_APPROXIMATE_TIME
- commpage_update_mach_approximate_time(ctime);
-#endif
/*
* Check for fail-safe trip.
*/
else
SCHED(lightweight_update_priority)(thread);
- SCHED(quantum_expire)(thread);
-
+ if (thread->sched_mode != TH_MODE_REALTIME)
+ SCHED(quantum_expire)(thread);
+
processor->current_pri = thread->sched_pri;
processor->current_thmode = thread->sched_mode;
+ /* Tell platform layer that we are still running this thread */
+ urgency = thread_get_urgency(thread, &ignore1, &ignore2);
+ machine_thread_going_on_core(thread, urgency, 0);
+
/*
* This quantum is up, give this thread another.
*/
- if (first_timeslice(processor))
- processor->timeslice--;
+ processor->first_timeslice = FALSE;
thread_quantum_init(thread);
}
processor->quantum_end = ctime + thread->quantum_remaining;
- timer_call_enter1(&processor->quantum_timer, thread,
- processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
/*
* Context switch check.
thread_unlock(thread);
+ timer_call_enter1(&processor->quantum_timer, thread,
+ processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+
#if defined(CONFIG_SCHED_TIMESHARE_CORE)
- sched_traditional_consider_maintenance(ctime);
-#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+ sched_timeshare_consider_maintenance(ctime);
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
}
/*
* Set the base priority of the thread
* and reset its scheduled priority.
*
+ * This is the only path to change base_pri.
+ *
* Called with the thread locked.
*/
void
sched_set_thread_base_priority(thread_t thread, int priority)
{
- thread->priority = priority;
- SCHED(compute_priority)(thread, FALSE);
+ assert(priority >= MINPRI);
+
+ if (thread->sched_mode == TH_MODE_REALTIME)
+ assert(priority <= BASEPRI_RTQUEUES);
+ else
+ assert(priority < BASEPRI_RTQUEUES);
+
+ thread->base_pri = priority;
+
+ sched_update_thread_bucket(thread);
+
+ thread_recompute_sched_pri(thread, FALSE);
}
+/*
+ * thread_recompute_sched_pri:
+ *
+ * Reset the scheduled priority of the thread
+ * according to its base priority if the
+ * thread has not been promoted or depressed.
+ *
+ * This is the standard way to push base_pri changes into sched_pri,
+ * or to recalculate the appropriate sched_pri after clearing
+ * a promotion or depression.
+ *
+ * Called at splsched with the thread locked.
+ */
+void
+thread_recompute_sched_pri(
+ thread_t thread,
+ boolean_t override_depress)
+{
+ int priority;
+
+ if (thread->sched_mode == TH_MODE_TIMESHARE)
+ priority = SCHED(compute_timeshare_priority)(thread);
+ else
+ priority = thread->base_pri;
-#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+ if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
+ (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
+ set_sched_pri(thread, priority);
+ }
+}
void
-sched_traditional_quantum_expire(thread_t thread __unused)
+sched_default_quantum_expire(thread_t thread __unused)
{
- /*
- * No special behavior when a timeshare, fixed, or realtime thread
- * uses up its entire quantum
- */
+ /*
+ * No special behavior when a timeshare, fixed, or realtime thread
+ * uses up its entire quantum
+ */
}
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+
+/*
+ * lightweight_update_priority:
+ *
+ * Update the scheduled priority for
+ * a timesharing thread.
+ *
+ * Only for use on the current thread.
+ *
+ * Called with the thread locked.
+ */
void
lightweight_update_priority(thread_t thread)
{
+ assert(thread->runq == PROCESSOR_NULL);
+ assert(thread == current_thread());
+
if (thread->sched_mode == TH_MODE_TIMESHARE) {
- register uint32_t delta;
-
+ int priority;
+ uint32_t delta;
+
thread_timer_delta(thread, delta);
-
+
/*
* Accumulate timesharing usage only
* during contention for processor
*/
if (thread->pri_shift < INT8_MAX)
thread->sched_usage += delta;
-
+
thread->cpu_delta += delta;
-
+
+ priority = sched_compute_timeshare_priority(thread);
+
/*
- * Adjust the scheduled priority if
- * the thread has not been promoted
- * and is not depressed.
+ * Adjust the scheduled priority like thread_recompute_sched_pri,
+ * except with the benefit of knowing the thread is on this core.
*/
- if ( !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
- !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) )
- compute_my_priority(thread);
- }
+ if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
+ (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
+ priority != thread->sched_pri) {
+
+ thread->sched_pri = priority;
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+ (uintptr_t)thread_tid(thread),
+ thread->base_pri,
+ thread->sched_pri,
+ 0, /* eventually, 'reason' */
+ 0);
+ }
+ }
}
/*
};
/*
- * do_priority_computation:
+ * sched_compute_timeshare_priority:
*
* Calculate the timesharing priority based upon usage and load.
*/
extern int sched_pri_decay_band_limit;
-static int do_priority_computation(thread_t th) {
- register int priority = th->priority /* start with base priority */
- - (th->sched_usage >> th->pri_shift);
- if (priority < MINPRI_USER)
- priority = MINPRI_USER;
- else
- if (priority > MAXPRI_KERNEL)
- priority = MAXPRI_KERNEL;
-
- return priority;
-}
-
-
-/*
- * compute_priority:
- *
- * Reset the scheduled priority of the thread
- * according to its base priority if the
- * thread has not been promoted or depressed.
- *
- * Called with the thread locked.
- */
-void
-compute_priority(
- register thread_t thread,
- boolean_t override_depress)
+int
+sched_compute_timeshare_priority(thread_t thread)
{
- register int priority;
+ /* start with base priority */
+ int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift);
- if (thread->sched_mode == TH_MODE_TIMESHARE)
- priority = do_priority_computation(thread);
- else
- priority = thread->priority;
+ if (priority < MINPRI_USER)
+ priority = MINPRI_USER;
+ else if (priority > MAXPRI_KERNEL)
+ priority = MAXPRI_KERNEL;
- if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
- (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
- set_sched_pri(thread, priority);
- }
+ return priority;
}
-/*
- * compute_my_priority:
- *
- * Reset the scheduled priority for
- * a timesharing thread.
- *
- * Only for use on the current thread
- * if timesharing and not depressed.
- *
- * Called with the thread locked.
- */
-void
-compute_my_priority(
- register thread_t thread)
-{
- register int priority;
-
- priority = do_priority_computation(thread);
- assert(thread->runq == PROCESSOR_NULL);
-
- if (priority != thread->sched_pri) {
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_DECAY_PRIORITY)|DBG_FUNC_NONE,
- (uintptr_t)thread_tid(thread),
- thread->priority,
- thread->sched_pri,
- priority,
- 0);
- }
- thread->sched_pri = priority;
-}
/*
* can_update_priority
*/
void
update_priority(
- register thread_t thread)
+ thread_t thread)
{
- register unsigned ticks;
- register uint32_t delta;
+ uint32_t ticks, delta;
ticks = sched_tick - thread->sched_stamp;
assert(ticks != 0);
+
thread->sched_stamp += ticks;
- if (sched_use_combined_fgbg_decay)
- thread->pri_shift = sched_combined_fgbg_pri_shift;
- else if (thread->sched_flags & TH_SFLAG_THROTTLED)
- thread->pri_shift = sched_background_pri_shift;
- else
- thread->pri_shift = sched_pri_shift;
+
+ thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
/* If requested, accelerate aging of sched_usage */
if (sched_decay_usage_age_factor > 1)
*/
thread_timer_delta(thread, delta);
if (ticks < SCHED_DECAY_TICKS) {
- register struct shift_data *shiftp;
-
/*
* Accumulate timesharing usage only
* during contention for processor
thread->cpu_usage += delta + thread->cpu_delta;
thread->cpu_delta = 0;
- shiftp = &sched_decay_shifts[ticks];
+ struct shift_data *shiftp = &sched_decay_shifts[ticks];
+
if (shiftp->shift2 > 0) {
- thread->cpu_usage =
- (thread->cpu_usage >> shiftp->shift1) +
- (thread->cpu_usage >> shiftp->shift2);
- thread->sched_usage =
- (thread->sched_usage >> shiftp->shift1) +
- (thread->sched_usage >> shiftp->shift2);
- }
- else {
- thread->cpu_usage =
- (thread->cpu_usage >> shiftp->shift1) -
- (thread->cpu_usage >> -(shiftp->shift2));
- thread->sched_usage =
- (thread->sched_usage >> shiftp->shift1) -
- (thread->sched_usage >> -(shiftp->shift2));
+ thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) +
+ (thread->cpu_usage >> shiftp->shift2);
+ thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
+ (thread->sched_usage >> shiftp->shift2);
+ } else {
+ thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) -
+ (thread->cpu_usage >> -(shiftp->shift2));
+ thread->sched_usage = (thread->sched_usage >> shiftp->shift1) -
+ (thread->sched_usage >> -(shiftp->shift2));
}
- }
- else {
+ } else {
thread->cpu_usage = thread->cpu_delta = 0;
thread->sched_usage = 0;
}
sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
}
-
/*
* Recompute scheduled priority if appropriate.
*/
- if ( (thread->sched_mode == TH_MODE_TIMESHARE) &&
- !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
- !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ) {
- register int new_pri;
-
- new_pri = do_priority_computation(thread);
- if (new_pri != thread->sched_pri) {
- boolean_t removed = thread_run_queue_remove(thread);
-
-#if 0
- if (sched_use_combined_fgbg_decay && ((thread)->task->max_priority > MAXPRI_THROTTLE) && (new_pri == MAXPRI_THROTTLE)) {
- /* with the alternate (new) algorithm, would we have decayed this far? */
- int alt_pri = thread->priority - (thread->sched_usage >> sched_pri_shift);
- if ((alt_pri > new_pri) && (sched_background_count > 0)) {
- printf("thread %p would have decayed to only %d instead of %d\n", thread, alt_pri, new_pri);
- }
- }
-#endif
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ int priority = sched_compute_timeshare_priority(thread);
+
+ /*
+ * Adjust the scheduled priority like thread_recompute_sched_pri,
+ * except without setting an AST.
+ */
+ if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
+ (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
+ priority != thread->sched_pri) {
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_DECAY_PRIORITY)|DBG_FUNC_NONE,
- (uintptr_t)thread_tid(thread),
- thread->priority,
- thread->sched_pri,
- new_pri,
- 0);
- thread->sched_pri = new_pri;
+ boolean_t removed = thread_run_queue_remove(thread);
+
+ thread->sched_pri = priority;
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+ (uintptr_t)thread_tid(thread),
+ thread->base_pri,
+ thread->sched_pri,
+ 0, /* eventually, 'reason' */
+ 0);
if (removed)
- thread_setrun(thread, SCHED_TAILQ);
+ thread_run_queue_reinsert(thread, SCHED_TAILQ);
}
}
-
+
return;
}
#endif /* CONFIG_SCHED_TIMESHARE_CORE */
-#if MACH_ASSERT
-/* sched_mode == TH_MODE_TIMESHARE controls whether a thread has a timeshare count when it has a run count */
-void sched_share_incr(thread_t thread) {
- assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
- assert(thread->sched_mode == TH_MODE_TIMESHARE);
- assert(thread->SHARE_COUNT == 0);
- thread->SHARE_COUNT++;
- (void)hw_atomic_add(&sched_share_count, 1);
+/*
+ * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
+ * Each other bucket is a count of the runnable non-idle threads
+ * with that property.
+ */
+volatile uint32_t sched_run_buckets[TH_BUCKET_MAX];
+
+static void
+sched_incr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ hw_atomic_add(&sched_run_buckets[bucket], 1);
}
-void sched_share_decr(thread_t thread) {
- assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN || thread->sched_mode != TH_MODE_TIMESHARE);
- assert(thread->SHARE_COUNT == 1);
- (void)hw_atomic_sub(&sched_share_count, 1);
- thread->SHARE_COUNT--;
+static void
+sched_decr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ assert(sched_run_buckets[bucket] > 0);
+
+ hw_atomic_sub(&sched_run_buckets[bucket], 1);
}
-/* TH_SFLAG_THROTTLED controls whether a thread has a background count when it has a run count and a share count */
+/* TH_RUN & !TH_IDLE controls whether a thread has a run count */
-void sched_background_incr(thread_t thread) {
+uint32_t
+sched_run_incr(thread_t thread)
+{
assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
- assert(thread->sched_mode == TH_MODE_TIMESHARE);
- assert((thread->sched_flags & TH_SFLAG_THROTTLED) == TH_SFLAG_THROTTLED);
- assert(thread->BG_COUNT == 0);
- thread->BG_COUNT++;
- int val = hw_atomic_add(&sched_background_count, 1);
- assert(val >= 0);
+ uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1);
- /* Always do the background change while holding a share count */
- assert(thread->SHARE_COUNT == 1);
-}
+ sched_incr_bucket(thread->th_sched_bucket);
-void sched_background_decr(thread_t thread) {
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE)
- assert((thread->sched_flags & TH_SFLAG_THROTTLED) != TH_SFLAG_THROTTLED);
- assert(thread->BG_COUNT == 1);
- int val = hw_atomic_sub(&sched_background_count, 1);
- thread->BG_COUNT--;
- assert(val >= 0);
- assert(thread->BG_COUNT == 0);
-
- /* Always do the background change while holding a share count */
- assert(thread->SHARE_COUNT == 1);
+ return new_count;
}
+uint32_t
+sched_run_decr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN);
-void
-assert_thread_sched_count(thread_t thread) {
- /* Only 0 or 1 are acceptable values */
- assert(thread->BG_COUNT == 0 || thread->BG_COUNT == 1);
- assert(thread->SHARE_COUNT == 0 || thread->SHARE_COUNT == 1);
-
- /* BG is only allowed when you already have a share count */
- if (thread->BG_COUNT == 1)
- assert(thread->SHARE_COUNT == 1);
- if (thread->SHARE_COUNT == 0)
- assert(thread->BG_COUNT == 0);
-
- if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
- (thread->sched_mode != TH_MODE_TIMESHARE))
- assert(thread->SHARE_COUNT == 0);
-
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
- (thread->sched_mode == TH_MODE_TIMESHARE))
- assert(thread->SHARE_COUNT == 1);
-
- if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
- (thread->sched_mode != TH_MODE_TIMESHARE) ||
- !(thread->sched_flags & TH_SFLAG_THROTTLED))
- assert(thread->BG_COUNT == 0);
-
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
- (thread->sched_mode == TH_MODE_TIMESHARE) &&
- (thread->sched_flags & TH_SFLAG_THROTTLED))
- assert(thread->BG_COUNT == 1);
+ sched_decr_bucket(thread->th_sched_bucket);
+
+ uint32_t new_count = hw_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], 1);
+
+ return new_count;
}
-#endif /* MACH_ASSERT */
+static void
+sched_update_thread_bucket(thread_t thread)
+{
+ sched_bucket_t old_bucket = thread->th_sched_bucket;
+ sched_bucket_t new_bucket = TH_BUCKET_RUN;
+
+ switch (thread->sched_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ new_bucket = TH_BUCKET_FIXPRI;
+ break;
+
+ case TH_MODE_TIMESHARE:
+ if (thread->base_pri > BASEPRI_UTILITY)
+ new_bucket = TH_BUCKET_SHARE_FG;
+ else if (thread->base_pri > MAXPRI_THROTTLE)
+ new_bucket = TH_BUCKET_SHARE_UT;
+ else
+ new_bucket = TH_BUCKET_SHARE_BG;
+ break;
+
+ default:
+ panic("unexpected mode: %d", thread->sched_mode);
+ break;
+ }
+
+ if (old_bucket != new_bucket) {
+ thread->th_sched_bucket = new_bucket;
+ thread->pri_shift = sched_pri_shifts[new_bucket];
+
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
+ sched_decr_bucket(old_bucket);
+ sched_incr_bucket(new_bucket);
+ }
+ }
+}
/*
* Set the thread's true scheduling mode
void
sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
{
- assert_thread_sched_count(thread);
-
- sched_mode_t old_mode = thread->sched_mode;
-
- thread->sched_mode = new_mode;
+ assert(thread->runq == PROCESSOR_NULL);
switch (new_mode) {
- case TH_MODE_FIXED:
- case TH_MODE_REALTIME:
- if (old_mode == TH_MODE_TIMESHARE) {
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
- if (thread->sched_flags & TH_SFLAG_THROTTLED)
- sched_background_decr(thread);
-
- sched_share_decr(thread);
- }
- }
- break;
-
- case TH_MODE_TIMESHARE:
- if (old_mode != TH_MODE_TIMESHARE) {
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
- sched_share_incr(thread);
-
- if (thread->sched_flags & TH_SFLAG_THROTTLED)
- sched_background_incr(thread);
- }
- }
- break;
-
- default:
- panic("unexpected mode: %d", new_mode);
- break;
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ case TH_MODE_TIMESHARE:
+ break;
+
+ default:
+ panic("unexpected mode: %d", new_mode);
+ break;
}
- assert_thread_sched_count(thread);
+ thread->sched_mode = new_mode;
+
+ sched_update_thread_bucket(thread);
}
/*
{
assert(reason & TH_SFLAG_DEMOTED_MASK);
assert((thread->sched_flags & reason) != reason);
- assert_thread_sched_count(thread);
if (thread->policy_reset)
return;
boolean_t removed = thread_run_queue_remove(thread);
- if (thread->sched_mode == TH_MODE_REALTIME)
- thread->priority = DEPRESSPRI;
-
thread->sched_flags |= reason;
thread->saved_mode = thread->sched_mode;
sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
- if (removed)
- thread_setrun(thread, SCHED_TAILQ);
+ thread_recompute_priority(thread);
- assert_thread_sched_count(thread);
+ if (removed)
+ thread_run_queue_reinsert(thread, SCHED_TAILQ);
}
/*
assert(thread->sched_mode == TH_MODE_TIMESHARE);
assert(thread->policy_reset == 0);
- assert_thread_sched_count(thread);
-
thread->sched_flags &= ~reason;
if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
thread->saved_mode = TH_MODE_NONE;
- if (thread->sched_mode == TH_MODE_REALTIME) {
- thread->priority = BASEPRI_RTQUEUES;
- }
-
- SCHED(compute_priority)(thread, FALSE);
+ thread_recompute_priority(thread);
if (removed)
- thread_setrun(thread, SCHED_TAILQ);
+ thread_run_queue_reinsert(thread, SCHED_TAILQ);
}
-/*
- * Set the thread to be categorized as 'background'
- * Called with thread mutex and thread lock held
- *
- * TODO: Eventually, 'background' should be a true sched_mode.
- */
-void
-sched_set_thread_throttled(thread_t thread, boolean_t wants_throttle)
-{
- if (thread->policy_reset)
- return;
-
- assert(((thread->sched_flags & TH_SFLAG_THROTTLED) ? TRUE : FALSE) != wants_throttle);
-
- assert_thread_sched_count(thread);
-
- /*
- * When backgrounding a thread, iOS has the semantic that
- * realtime and fixed priority threads should be demoted
- * to timeshare background threads.
- *
- * On OSX, realtime and fixed priority threads don't lose their mode.
- */
-
- if (wants_throttle) {
- thread->sched_flags |= TH_SFLAG_THROTTLED;
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
- sched_background_incr(thread);
- }
-
- assert_thread_sched_count(thread);
-
- } else {
- thread->sched_flags &= ~TH_SFLAG_THROTTLED;
- if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
- sched_background_decr(thread);
- }
-
- assert_thread_sched_count(thread);
-
- }
-
- assert_thread_sched_count(thread);
-}