X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/ebb1b9f42b62218f29061826217bb0f71cd375a6..743345f9a4b36f7e2f9ba37691e70c50baecb56e:/osfmk/kern/priority.c?ds=sidebyside

diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c
index 3273a4f6c..f4f5b1cc8 100644
--- a/osfmk/kern/priority.c
+++ b/osfmk/kern/priority.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -56,11 +56,11 @@
 /*
  */
 /*
- *	File:	clock_prim.c
+ *	File:	priority.c
  *	Author:	Avadis Tevanian, Jr.
  *	Date:	1986
  *
- *	Clock primitives.
+ *	Priority related scheduler bits.
  */
 
 #include <mach/boolean.h>
@@ -73,7 +73,15 @@
 #include <kern/spl.h>
 #include <kern/thread.h>
 #include <kern/processor.h>
+#include <kern/ledger.h>
 #include <machine/machparam.h>
+#include <kern/machine.h>
+
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+#include <machine/commpage.h>  /* for commpage_update_mach_approximate_time */
+#endif
+
+static void sched_update_thread_bucket(thread_t thread);
 
 /*
  *	thread_quantum_expire:
@@ -91,51 +99,72 @@ thread_quantum_expire(
 	processor_t			processor = p0;
 	thread_t			thread = p1;
 	ast_t				preempt;
+	uint64_t			ctime;
+	int					urgency;
+	uint64_t			ignore1, ignore2;
+
+	assert(processor == current_processor());
+	assert(thread == current_thread());
+
+	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0);
 
 	SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor);
 
+	/*
+	 * We bill CPU time to both the individual thread and its task.
+	 *
+	 * Because this balance adjustment could potentially attempt to wake this very
+	 * thread, we must credit the ledger before taking the thread lock. The ledger
+	 * pointers are only manipulated by the thread itself at the ast boundary.
+	 *
+	 * TODO: This fails to account for the time between when the timer was armed and when it fired.
+	 * It should be based on the system_timer and running a thread_timer_event operation here.
+	 */
+	ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
+	ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
+#ifdef CONFIG_BANK
+	if (thread->t_bankledger) {
+		ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
+				(thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
+	}
+	thread->t_deduct_bank_ledger_time = 0;
+#endif
+
+	ctime = mach_absolute_time();
+
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+	commpage_update_mach_approximate_time(ctime);
+#endif
+
 	thread_lock(thread);
 
 	/*
 	 * We've run up until our quantum expiration, and will (potentially)
 	 * continue without re-entering the scheduler, so update this now.
 	 */
-	thread->last_run_time = processor->quantum_end;
-	
+	processor->last_dispatch = ctime;
+	thread->last_run_time = ctime;
+
 	/*
 	 *	Check for fail-safe trip.
 	 */
-	if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && 
-	    !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
-	    !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
-		uint64_t new_computation;
-
-		new_computation = processor->quantum_end - thread->computation_epoch;
-		new_computation += thread->computation_metered;
-		if (new_computation > max_unsafe_computation) {
-
+ 	if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && 
+ 	    !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
+ 	    !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
+ 		uint64_t new_computation;
+  
+ 		new_computation = ctime - thread->computation_epoch;
+ 		new_computation += thread->computation_metered;
+ 		if (new_computation > max_unsafe_computation) {
 			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
 					(uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
 
-			if (thread->sched_mode == TH_MODE_REALTIME) {
-				thread->priority = DEPRESSPRI;
-			}
-			
-			thread->saved_mode = thread->sched_mode;
-
-			if (SCHED(supports_timeshare_mode)) {
-				sched_share_incr();
-				thread->sched_mode = TH_MODE_TIMESHARE;
-			} else {
-				/* XXX handle fixed->fixed case */
-				thread->sched_mode = TH_MODE_FIXED;
-			}
-
-			thread->safe_release = processor->quantum_end + sched_safe_duration;
-			thread->sched_flags |= TH_SFLAG_FAILSAFE;
+			thread->safe_release = ctime + sched_safe_duration;
+
+			sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE);
 		}
 	}
-		
+
 	/*
 	 *	Recompute scheduled priority if appropriate.
 	 */
@@ -144,62 +173,151 @@ thread_quantum_expire(
 	else
 		SCHED(lightweight_update_priority)(thread);
 
-	SCHED(quantum_expire)(thread);
-	
+	if (thread->sched_mode != TH_MODE_REALTIME)
+		SCHED(quantum_expire)(thread);
+
 	processor->current_pri = thread->sched_pri;
 	processor->current_thmode = thread->sched_mode;
 
+	/* Tell platform layer that we are still running this thread */
+	urgency = thread_get_urgency(thread, &ignore1, &ignore2);
+	machine_thread_going_on_core(thread, urgency, 0);
+
 	/*
 	 *	This quantum is up, give this thread another.
 	 */
-	if (first_timeslice(processor))
-		processor->timeslice--;
+	processor->first_timeslice = FALSE;
 
 	thread_quantum_init(thread);
-	thread->last_quantum_refill_time = processor->quantum_end;
 
-	processor->quantum_end += thread->current_quantum;
-	timer_call_enter1(&processor->quantum_timer, thread,
-	    processor->quantum_end, TIMER_CALL_CRITICAL);
+	/* Reload precise timing global policy to thread-local policy */
+	thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
+
+	/*
+	 * Since non-precise user/kernel time doesn't update the state/thread timer
+	 * during privilege transitions, synthesize an event now.
+	 */
+	if (!thread->precise_user_kernel_time) {
+		timer_switch(PROCESSOR_DATA(processor, current_state),
+					 ctime,
+					 PROCESSOR_DATA(processor, current_state));
+		timer_switch(PROCESSOR_DATA(processor, thread_timer),
+					 ctime,
+					 PROCESSOR_DATA(processor, thread_timer));
+	}
+
+	processor->quantum_end = ctime + thread->quantum_remaining;
 
 	/*
 	 *	Context switch check.
 	 */
-	if ((preempt = csw_check(processor)) != AST_NONE)
+	if ((preempt = csw_check(processor, AST_QUANTUM)) != AST_NONE)
 		ast_on(preempt);
-	else {
-		processor_set_t		pset = processor->processor_set;
 
-		pset_lock(pset);
+	thread_unlock(thread);
 
-		pset_pri_hint(pset, processor, processor->current_pri);
-		pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor));
+	timer_call_enter1(&processor->quantum_timer, thread,
+	    processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
 
-		pset_unlock(pset);
-	}
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+	sched_timeshare_consider_maintenance(ctime);
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
 
-	thread_unlock(thread);
+
+	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
 }
 
-#if defined(CONFIG_SCHED_TRADITIONAL)
+/*
+ *	sched_set_thread_base_priority:
+ *
+ *	Set the base priority of the thread
+ *	and reset its scheduled priority.
+ *
+ *	This is the only path to change base_pri.
+ *
+ *	Called with the thread locked.
+ */
+void
+sched_set_thread_base_priority(thread_t thread, int priority)
+{
+	assert(priority >= MINPRI);
+
+	if (thread->sched_mode == TH_MODE_REALTIME)
+		assert(priority <= BASEPRI_RTQUEUES);
+	else
+		assert(priority < BASEPRI_RTQUEUES);
+
+	thread->base_pri = priority;
+
+	sched_update_thread_bucket(thread);
+
+	thread_recompute_sched_pri(thread, FALSE);
+}
+
+/*
+ *	thread_recompute_sched_pri:
+ *
+ *	Reset the scheduled priority of the thread
+ *	according to its base priority if the
+ *	thread has not been promoted or depressed.
+ *
+ *	This is the standard way to push base_pri changes into sched_pri,
+ *	or to recalculate the appropriate sched_pri after clearing
+ *	a promotion or depression.
+ *
+ *	Called at splsched with the thread locked.
+ */
+void
+thread_recompute_sched_pri(
+                           thread_t thread,
+                           boolean_t override_depress)
+{
+	int priority;
+
+	if (thread->sched_mode == TH_MODE_TIMESHARE)
+		priority = SCHED(compute_timeshare_priority)(thread);
+	else
+		priority = thread->base_pri;
+
+	if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK)  || (priority > thread->sched_pri)) &&
+	    (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
+		set_sched_pri(thread, priority);
+	}
+}
 
 void
-sched_traditional_quantum_expire(thread_t	thread __unused)
+sched_default_quantum_expire(thread_t thread __unused)
 {
-	/*
-	 * No special behavior when a timeshare, fixed, or realtime thread
-	 * uses up its entire quantum
-	 */
+      /*
+       * No special behavior when a timeshare, fixed, or realtime thread
+       * uses up its entire quantum
+       */
 }
 
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+
+/*
+ *	lightweight_update_priority:
+ *
+ *	Update the scheduled priority for
+ *	a timesharing thread.
+ *
+ *	Only for use on the current thread.
+ *
+ *	Called with the thread locked.
+ */
 void
 lightweight_update_priority(thread_t thread)
 {
+	assert(thread->runq == PROCESSOR_NULL);
+	assert(thread == current_thread());
+
 	if (thread->sched_mode == TH_MODE_TIMESHARE) {
-		register uint32_t	delta;
-		
+		int priority;
+		uint32_t delta;
+
 		thread_timer_delta(thread, delta);
-		
+
 		/*
 		 *	Accumulate timesharing usage only
 		 *	during contention for processor
@@ -207,18 +325,29 @@ lightweight_update_priority(thread_t thread)
 		 */
 		if (thread->pri_shift < INT8_MAX)
 			thread->sched_usage += delta;
-		
+
 		thread->cpu_delta += delta;
-		
+
+		priority = sched_compute_timeshare_priority(thread);
+
 		/*
-		 * Adjust the scheduled priority if
-		 * the thread has not been promoted
-		 * and is not depressed.
+		 * Adjust the scheduled priority like thread_recompute_sched_pri,
+		 * except with the benefit of knowing the thread is on this core.
 		 */
-		if (	!(thread->sched_flags & TH_SFLAG_PROMOTED)	&&
-			!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)		)
-			compute_my_priority(thread);
-	}	
+		if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK)  || (priority > thread->sched_pri)) &&
+		    (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
+		    priority != thread->sched_pri) {
+
+			thread->sched_pri = priority;
+
+			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+			                      (uintptr_t)thread_tid(thread),
+			                      thread->base_pri,
+			                      thread->sched_pri,
+			                      0, /* eventually, 'reason' */
+			                      0);
+		}
+	}
 }
 
 /*
@@ -242,112 +371,27 @@ static struct shift_data	sched_decay_shifts[SCHED_DECAY_TICKS] = {
 };
 
 /*
- *	do_priority_computation:
+ *	sched_compute_timeshare_priority:
  *
  *	Calculate the timesharing priority based upon usage and load.
  */
-#ifdef CONFIG_EMBEDDED
-
-#define do_priority_computation(thread, pri)							\
-	MACRO_BEGIN															\
-	(pri) = (thread)->priority		/* start with base priority */		\
-	    - ((thread)->sched_usage >> (thread)->pri_shift);				\
-	if ((pri) < MAXPRI_THROTTLE) {										\
-		if ((thread)->task->max_priority > MAXPRI_THROTTLE)				\
-			(pri) = MAXPRI_THROTTLE;									\
-		else															\
-			if ((pri) < MINPRI_USER)									\
-				(pri) = MINPRI_USER;									\
-	} else																\
-	if ((pri) > MAXPRI_KERNEL)											\
-		(pri) = MAXPRI_KERNEL;											\
-	MACRO_END
-
-#else
-
-#define do_priority_computation(thread, pri)							\
-	MACRO_BEGIN															\
-	(pri) = (thread)->priority		/* start with base priority */		\
-	    - ((thread)->sched_usage >> (thread)->pri_shift);				\
-	if ((pri) < MINPRI_USER)											\
-		(pri) = MINPRI_USER;											\
-	else																\
-	if ((pri) > MAXPRI_KERNEL)											\
-		(pri) = MAXPRI_KERNEL;											\
-	MACRO_END
-
-#endif /* defined(CONFIG_SCHED_TRADITIONAL) */
+extern int sched_pri_decay_band_limit;
 
-#endif
 
-/*
- *	set_priority:
- *
- *	Set the base priority of the thread
- *	and reset its scheduled priority.
- *
- *	Called with the thread locked.
- */
-void
-set_priority(
-	register thread_t	thread,
-	register int		priority)
+int
+sched_compute_timeshare_priority(thread_t thread)
 {
-	thread->priority = priority;
-	SCHED(compute_priority)(thread, FALSE);
-}
+	/* start with base priority */
+	int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift);
 
-#if defined(CONFIG_SCHED_TRADITIONAL)
+	if (priority < MINPRI_USER)
+		priority = MINPRI_USER;
+	else if (priority > MAXPRI_KERNEL)
+		priority = MAXPRI_KERNEL;
 
-/*
- *	compute_priority:
- *
- *	Reset the scheduled priority of the thread
- *	according to its base priority if the
- *	thread has not been promoted or depressed.
- *
- *	Called with the thread locked.
- */
-void
-compute_priority(
-	register thread_t	thread,
-	boolean_t			override_depress)
-{
-	register int		priority;
-
-	if (	!(thread->sched_flags & TH_SFLAG_PROMOTED)			&&
-			(!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)	||
-				 override_depress							)		) {
-		if (thread->sched_mode == TH_MODE_TIMESHARE)
-			do_priority_computation(thread, priority);
-		else
-			priority = thread->priority;
-
-		set_sched_pri(thread, priority);
-	}
+	return priority;
 }
 
-/*
- *	compute_my_priority:
- *
- *	Reset the scheduled priority for
- *	a timesharing thread.
- *
- *	Only for use on the current thread
- *	if timesharing and not depressed.
- *
- *	Called with the thread locked.
- */
-void
-compute_my_priority(
-	register thread_t	thread)
-{
-	register int		priority;
-
-	do_priority_computation(thread, priority);
-	assert(thread->runq == PROCESSOR_NULL);
-	thread->sched_pri = priority;
-}
 
 /*
  *	can_update_priority
@@ -375,23 +419,26 @@ can_update_priority(
  */
 void
 update_priority(
-	register thread_t	thread)
+	thread_t	thread)
 {
-	register unsigned	ticks;
-	register uint32_t	delta;
+	uint32_t ticks, delta;
 
 	ticks = sched_tick - thread->sched_stamp;
 	assert(ticks != 0);
+
 	thread->sched_stamp += ticks;
-	thread->pri_shift = sched_pri_shift;
+
+	thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
+
+	/* If requested, accelerate aging of sched_usage */
+	if (sched_decay_usage_age_factor > 1)
+		ticks *= sched_decay_usage_age_factor;
 
 	/*
 	 *	Gather cpu usage data.
 	 */
 	thread_timer_delta(thread, delta);
 	if (ticks < SCHED_DECAY_TICKS) {
-		register struct shift_data	*shiftp;
-
 		/*
 		 *	Accumulate timesharing usage only
 		 *	during contention for processor
@@ -403,25 +450,20 @@ update_priority(
 		thread->cpu_usage += delta + thread->cpu_delta;
 		thread->cpu_delta = 0;
 
-		shiftp = &sched_decay_shifts[ticks];
+		struct shift_data *shiftp = &sched_decay_shifts[ticks];
+
 		if (shiftp->shift2 > 0) {
-		    thread->cpu_usage =
-						(thread->cpu_usage >> shiftp->shift1) +
-						(thread->cpu_usage >> shiftp->shift2);
-		    thread->sched_usage =
-						(thread->sched_usage >> shiftp->shift1) +
-						(thread->sched_usage >> shiftp->shift2);
-		}
-		else {
-		    thread->cpu_usage =
-						(thread->cpu_usage >> shiftp->shift1) -
-						(thread->cpu_usage >> -(shiftp->shift2));
-		    thread->sched_usage =
-						(thread->sched_usage >> shiftp->shift1) -
-						(thread->sched_usage >> -(shiftp->shift2));
+			thread->cpu_usage =   (thread->cpu_usage >> shiftp->shift1) +
+			                      (thread->cpu_usage >> shiftp->shift2);
+			thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
+			                      (thread->sched_usage >> shiftp->shift2);
+		} else {
+			thread->cpu_usage =   (thread->cpu_usage >>   shiftp->shift1) -
+			                      (thread->cpu_usage >> -(shiftp->shift2));
+			thread->sched_usage = (thread->sched_usage >>   shiftp->shift1) -
+			                      (thread->sched_usage >> -(shiftp->shift2));
 		}
-	}
-	else {
+	} else {
 		thread->cpu_usage = thread->cpu_delta = 0;
 		thread->sched_usage = 0;
 	}
@@ -429,45 +471,228 @@ update_priority(
 	/*
 	 *	Check for fail-safe release.
 	 */
-	if (	(thread->sched_flags & TH_SFLAG_FAILSAFE)		&&
-			mach_absolute_time() >= thread->safe_release		) {
-		if (thread->saved_mode != TH_MODE_TIMESHARE) {
-			if (thread->saved_mode == TH_MODE_REALTIME) {
-				thread->priority = BASEPRI_RTQUEUES;
-			}
-
-			thread->sched_mode = thread->saved_mode;
-			thread->saved_mode = TH_MODE_NONE;
-
-			if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
-				sched_share_decr();
-
-			if (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK))
-				set_sched_pri(thread, thread->priority);
-		}
-
-		thread->sched_flags &= ~TH_SFLAG_FAILSAFE;
+	if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
+	    mach_absolute_time() >= thread->safe_release) {
+		sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
 	}
 
 	/*
 	 *	Recompute scheduled priority if appropriate.
 	 */
-	if (	(thread->sched_mode == TH_MODE_TIMESHARE)	&&
-			!(thread->sched_flags & TH_SFLAG_PROMOTED)	&&
-			!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)		) {
-		register int		new_pri;
+	if (thread->sched_mode == TH_MODE_TIMESHARE) {
+		int priority = sched_compute_timeshare_priority(thread);
+
+		/*
+		 * Adjust the scheduled priority like thread_recompute_sched_pri,
+		 * except without setting an AST.
+		 */
+		if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK)  || (priority > thread->sched_pri)) &&
+		    (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
+		    priority != thread->sched_pri) {
+
+			boolean_t removed = thread_run_queue_remove(thread);
+
+			thread->sched_pri = priority;
 
-		do_priority_computation(thread, new_pri);
-		if (new_pri != thread->sched_pri) {
-			boolean_t		removed = thread_run_queue_remove(thread);
+			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+			                      (uintptr_t)thread_tid(thread),
+			                      thread->base_pri,
+			                      thread->sched_pri,
+			                      0, /* eventually, 'reason' */
+			                      0);
 
-			thread->sched_pri = new_pri;
 			if (removed)
-				thread_setrun(thread, SCHED_TAILQ);
+				thread_run_queue_reinsert(thread, SCHED_TAILQ);
 		}
 	}
-	
+
 	return;
 }
 
-#endif /* CONFIG_SCHED_TRADITIONAL */
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+
+/*
+ * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
+ * Each other bucket is a count of the runnable non-idle threads
+ * with that property.
+ */
+volatile uint32_t       sched_run_buckets[TH_BUCKET_MAX];
+
+static void
+sched_incr_bucket(sched_bucket_t bucket)
+{
+	assert(bucket >= TH_BUCKET_FIXPRI &&
+	       bucket <= TH_BUCKET_SHARE_BG);
+
+	hw_atomic_add(&sched_run_buckets[bucket], 1);
+}
+
+static void
+sched_decr_bucket(sched_bucket_t bucket)
+{
+	assert(bucket >= TH_BUCKET_FIXPRI &&
+	       bucket <= TH_BUCKET_SHARE_BG);
+
+	assert(sched_run_buckets[bucket] > 0);
+
+	hw_atomic_sub(&sched_run_buckets[bucket], 1);
+}
+
+/* TH_RUN & !TH_IDLE controls whether a thread has a run count */
+
+uint32_t
+sched_run_incr(thread_t thread)
+{
+	assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
+
+	uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1);
+
+	sched_incr_bucket(thread->th_sched_bucket);
+
+	return new_count;
+}
+
+uint32_t
+sched_run_decr(thread_t thread)
+{
+	assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN);
+
+	sched_decr_bucket(thread->th_sched_bucket);
+
+	uint32_t new_count = hw_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], 1);
+
+	return new_count;
+}
+
+static void
+sched_update_thread_bucket(thread_t thread)
+{
+	sched_bucket_t old_bucket = thread->th_sched_bucket;
+	sched_bucket_t new_bucket = TH_BUCKET_RUN;
+
+	switch (thread->sched_mode) {
+	case TH_MODE_FIXED:
+	case TH_MODE_REALTIME:
+		new_bucket = TH_BUCKET_FIXPRI;
+		break;
+
+	case TH_MODE_TIMESHARE:
+		if (thread->base_pri > BASEPRI_UTILITY)
+			new_bucket = TH_BUCKET_SHARE_FG;
+		else if (thread->base_pri > MAXPRI_THROTTLE)
+			new_bucket = TH_BUCKET_SHARE_UT;
+		else
+			new_bucket = TH_BUCKET_SHARE_BG;
+		break;
+
+	default:
+		panic("unexpected mode: %d", thread->sched_mode);
+		break;
+	}
+
+	if (old_bucket != new_bucket) {
+		thread->th_sched_bucket = new_bucket;
+		thread->pri_shift = sched_pri_shifts[new_bucket];
+
+		if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
+			sched_decr_bucket(old_bucket);
+			sched_incr_bucket(new_bucket);
+		}
+	}
+}
+
+/*
+ * Set the thread's true scheduling mode
+ * Called with thread mutex and thread locked
+ * The thread has already been removed from the runqueue.
+ *
+ * (saved_mode is handled before this point)
+ */
+void
+sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
+{
+	assert(thread->runq == PROCESSOR_NULL);
+
+	switch (new_mode) {
+	case TH_MODE_FIXED:
+	case TH_MODE_REALTIME:
+	case TH_MODE_TIMESHARE:
+		break;
+
+	default:
+		panic("unexpected mode: %d", new_mode);
+		break;
+	}
+
+	thread->sched_mode = new_mode;
+
+	sched_update_thread_bucket(thread);
+}
+
+/*
+ * Demote the true scheduler mode to timeshare (called with the thread locked)
+ */
+void
+sched_thread_mode_demote(thread_t thread, uint32_t reason)
+{
+	assert(reason & TH_SFLAG_DEMOTED_MASK);
+	assert((thread->sched_flags & reason) != reason);
+
+	if (thread->policy_reset)
+		return;
+
+	if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
+		/* Another demotion reason is already active */
+		thread->sched_flags |= reason;
+		return;
+	}
+
+	assert(thread->saved_mode == TH_MODE_NONE);
+
+	boolean_t removed = thread_run_queue_remove(thread);
+
+	thread->sched_flags |= reason;
+
+	thread->saved_mode = thread->sched_mode;
+
+	sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
+
+	thread_recompute_priority(thread);
+
+	if (removed)
+		thread_run_queue_reinsert(thread, SCHED_TAILQ);
+}
+
+/*
+ * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
+ */
+void
+sched_thread_mode_undemote(thread_t thread, uint32_t reason)
+{
+	assert(reason & TH_SFLAG_DEMOTED_MASK);
+	assert((thread->sched_flags & reason) == reason);
+	assert(thread->saved_mode != TH_MODE_NONE);
+	assert(thread->sched_mode == TH_MODE_TIMESHARE);
+	assert(thread->policy_reset == 0);
+
+	thread->sched_flags &= ~reason;
+
+	if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
+		/* Another demotion reason is still active */
+		return;
+	}
+
+	boolean_t removed = thread_run_queue_remove(thread);
+
+	sched_set_thread_mode(thread, thread->saved_mode);
+
+	thread->saved_mode = TH_MODE_NONE;
+
+	thread_recompute_priority(thread);
+
+	if (removed)
+		thread_run_queue_reinsert(thread, SCHED_TAILQ);
+}
+
+