]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/priority.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / kern / priority.c
index f50696079972d9488990e1da585d074f740a71c5..40eb17242b70832b44113719822e03184b7b652d 100644 (file)
 #include <machine/commpage.h>  /* for commpage_update_mach_approximate_time */
 #endif
 
+#if MONOTONIC
+#include <kern/monotonic.h>
+#endif /* MONOTONIC */
+
+static void sched_update_thread_bucket(thread_t thread);
+
 /*
  *     thread_quantum_expire:
  *
@@ -111,19 +117,22 @@ thread_quantum_expire(
        /*
         * We bill CPU time to both the individual thread and its task.
         *
-        * Because this balance adjustment could potentially attempt to wake this very
-        * thread, we must credit the ledger before taking the thread lock. The ledger
-        * pointers are only manipulated by the thread itself at the ast boundary.
+        * Because this balance adjustment could potentially attempt to wake this
+        * very thread, we must credit the ledger before taking the thread lock.
+        * The ledger pointers are only manipulated by the thread itself at the ast
+        * boundary.
+        *
+        * TODO: This fails to account for the time between when the timer was
+        * armed and when it fired.  It should be based on the system_timer and
+        * running a timer_update operation here.
         */
        ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
        ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
-#ifdef CONFIG_BANK
        if (thread->t_bankledger) {
                ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
                                (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
        }
        thread->t_deduct_bank_ledger_time = 0;
-#endif
 
        ctime = mach_absolute_time();
 
@@ -131,6 +140,10 @@ thread_quantum_expire(
        commpage_update_mach_approximate_time(ctime);
 #endif
 
+#if MONOTONIC
+       mt_sched_update(thread);
+#endif /* MONOTONIC */
+
        thread_lock(thread);
 
        /*
@@ -143,14 +156,15 @@ thread_quantum_expire(
        /*
         *      Check for fail-safe trip.
         */
-       if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && 
-           !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
-           !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
-               uint64_t new_computation;
-  
-               new_computation = ctime - thread->computation_epoch;
-               new_computation += thread->computation_metered;
-               if (new_computation > max_unsafe_computation) {
+       if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
+           !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
+           !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) &&
+           !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
+               uint64_t new_computation;
+
+               new_computation = ctime - thread->computation_epoch;
+               new_computation += thread->computation_metered;
+               if (new_computation > max_unsafe_computation) {
                        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
                                        (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
 
@@ -171,12 +185,7 @@ thread_quantum_expire(
        if (thread->sched_mode != TH_MODE_REALTIME)
                SCHED(quantum_expire)(thread);
 
-       processor->current_pri = thread->sched_pri;
-       processor->current_thmode = thread->sched_mode;
-
-       /* Tell platform layer that we are still running this thread */
-       urgency = thread_get_urgency(thread, &ignore1, &ignore2);
-       machine_thread_going_on_core(thread, urgency, 0);
+       processor_state_update_from_thread(processor, thread);
 
        /*
         *      This quantum is up, give this thread another.
@@ -193,31 +202,54 @@ thread_quantum_expire(
         * during privilege transitions, synthesize an event now.
         */
        if (!thread->precise_user_kernel_time) {
-               timer_switch(PROCESSOR_DATA(processor, current_state),
-                                        ctime,
-                                        PROCESSOR_DATA(processor, current_state));
-               timer_switch(PROCESSOR_DATA(processor, thread_timer),
-                                        ctime,
-                                        PROCESSOR_DATA(processor, thread_timer));
+               timer_update(PROCESSOR_DATA(processor, current_state), ctime);
+               timer_update(PROCESSOR_DATA(processor, thread_timer), ctime);
+               timer_update(&thread->runnable_timer, ctime);
        }
 
+
        processor->quantum_end = ctime + thread->quantum_remaining;
 
        /*
-        *      Context switch check.
+        * Context switch check
+        *
+        * non-urgent flags don't affect kernel threads, so upgrade to urgent
+        * to ensure that rebalancing and non-recommendation kick in quickly.
         */
-       if ((preempt = csw_check(processor, AST_QUANTUM)) != AST_NONE)
+
+       ast_t check_reason = AST_QUANTUM;
+       if (thread->task == kernel_task)
+               check_reason |= AST_URGENT;
+
+       if ((preempt = csw_check(processor, check_reason)) != AST_NONE)
                ast_on(preempt);
 
+       /*
+        * AST_KEVENT does not send an IPI when setting the AST,
+        * to avoid waiting for the next context switch to propagate the AST,
+        * the AST is propagated here at quantum expiration.
+        */
+       ast_propagate(thread);
+
        thread_unlock(thread);
 
-       timer_call_enter1(&processor->quantum_timer, thread,
-           processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+       timer_call_quantum_timer_enter(&processor->quantum_timer, thread,
+               processor->quantum_end, ctime);
+
+       /* Tell platform layer that we are still running this thread */
+       urgency = thread_get_urgency(thread, &ignore1, &ignore2);
+       machine_thread_going_on_core(thread, urgency, 0, 0, ctime);
+       machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime,
+               0, thread);
 
 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
        sched_timeshare_consider_maintenance(ctime);
 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
 
+#if __arm__ || __arm64__
+       if (thread->sched_mode == TH_MODE_REALTIME)
+               sched_consider_recommended_cores(ctime, thread);
+#endif /* __arm__ || __arm64__ */
 
        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
 }
@@ -235,17 +267,41 @@ thread_quantum_expire(
 void
 sched_set_thread_base_priority(thread_t thread, int priority)
 {
-       int old_priority = thread->base_pri;
+       assert(priority >= MINPRI);
+       uint64_t ctime = 0;
+
+       if (thread->sched_mode == TH_MODE_REALTIME)
+               assert(priority <= BASEPRI_RTQUEUES);
+       else
+               assert(priority < BASEPRI_RTQUEUES);
+
+       int old_base_pri = thread->base_pri;
        thread->base_pri = priority;
 
-       /* A thread is 'throttled' when its base priority is at or below MAXPRI_THROTTLE */
-       if ((priority > MAXPRI_THROTTLE) && (old_priority <= MAXPRI_THROTTLE)) {
-               sched_set_thread_throttled(thread, FALSE);
-       } else if ((priority <= MAXPRI_THROTTLE) && (old_priority > MAXPRI_THROTTLE)) {
-               sched_set_thread_throttled(thread, TRUE);
+       if ((thread->state & TH_RUN) == TH_RUN) {
+               assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE);
+               ctime = mach_approximate_time();
+               thread->last_basepri_change_time = ctime;
+       } else {
+               assert(thread->last_basepri_change_time == THREAD_NOT_RUNNABLE);
+               assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE);
+       }
+
+       /* 
+        * Currently the perfcontrol_attr depends on the base pri of the 
+        * thread. Therefore, we use this function as the hook for the 
+        * perfcontrol callout. 
+        */
+       if (thread == current_thread() && old_base_pri != priority) {
+               if (!ctime) {
+                   ctime = mach_approximate_time();
+               }
+               machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
+                       ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread);
        }
+       sched_update_thread_bucket(thread);
 
-       thread_recompute_sched_pri(thread, FALSE);
+       thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
 }
 
 /*
@@ -255,28 +311,54 @@ sched_set_thread_base_priority(thread_t thread, int priority)
  *     according to its base priority if the
  *     thread has not been promoted or depressed.
  *
- *     This is the standard way to push base_pri changes into sched_pri,
- *     or to recalculate the appropriate sched_pri after clearing
+ *     This is the only way to push base_pri changes into sched_pri,
+ *     or to recalculate the appropriate sched_pri after changing
  *     a promotion or depression.
  *
  *     Called at splsched with the thread locked.
+ *
+ *     TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation
  */
 void
-thread_recompute_sched_pri(
-                           thread_t thread,
-                           boolean_t override_depress)
+thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options)
 {
-       int priority;
+       uint32_t     sched_flags = thread->sched_flags;
+       sched_mode_t sched_mode  = thread->sched_mode;
 
-       if (thread->sched_mode == TH_MODE_TIMESHARE)
+       int priority = thread->base_pri;
+
+       if (sched_mode == TH_MODE_TIMESHARE)
                priority = SCHED(compute_timeshare_priority)(thread);
-       else
-               priority = thread->base_pri;
 
-       if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK)  || (priority > thread->sched_pri)) &&
-           (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
-               set_sched_pri(thread, priority);
+       if (sched_flags & TH_SFLAG_DEPRESS) {
+               /* thread_yield_internal overrides kernel mutex promotion */
+               priority = DEPRESSPRI;
+       } else {
+               /* poll-depress is overridden by mutex promotion and promote-reasons */
+               if ((sched_flags & TH_SFLAG_POLLDEPRESS)) {
+                       priority = DEPRESSPRI;
+               }
+
+               if (sched_flags & TH_SFLAG_PROMOTED) {
+                       priority = MAX(priority, thread->promotion_priority);
+
+                       if (sched_mode != TH_MODE_REALTIME)
+                               priority = MIN(priority, MAXPRI_PROMOTE);
+               }
+
+               if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) {
+                       if (sched_flags & TH_SFLAG_RW_PROMOTED)
+                               priority = MAX(priority, MINPRI_RWLOCK);
+
+                       if (sched_flags & TH_SFLAG_WAITQ_PROMOTED)
+                               priority = MAX(priority, MINPRI_WAITQ);
+
+                       if (sched_flags & TH_SFLAG_EXEC_PROMOTED)
+                               priority = MAX(priority, MINPRI_EXEC);
+               }
        }
+
+       set_sched_pri(thread, priority, options);
 }
 
 void
@@ -324,23 +406,8 @@ lightweight_update_priority(thread_t thread)
 
                priority = sched_compute_timeshare_priority(thread);
 
-               /*
-                * Adjust the scheduled priority like thread_recompute_sched_pri,
-                * except with the benefit of knowing the thread is on this core.
-                */
-               if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK)  || (priority > thread->sched_pri)) &&
-                   (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
-                   priority != thread->sched_pri) {
-
-                       thread->sched_pri = priority;
-
-                       KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
-                                             (uintptr_t)thread_tid(thread),
-                                             thread->base_pri,
-                                             thread->sched_pri,
-                                             0, /* eventually, 'reason' */
-                                             0);
-               }
+               if (priority != thread->sched_pri)
+                       thread_recompute_sched_pri(thread, SETPRI_LAZY);
        }
 }
 
@@ -371,6 +438,39 @@ static struct shift_data   sched_decay_shifts[SCHED_DECAY_TICKS] = {
  */
 extern int sched_pri_decay_band_limit;
 
+#ifdef CONFIG_EMBEDDED
+
+int
+sched_compute_timeshare_priority(thread_t thread)
+{
+       int decay_amount = (thread->sched_usage >> thread->pri_shift);
+       int decay_limit = sched_pri_decay_band_limit;
+
+       if (thread->base_pri > BASEPRI_FOREGROUND) {
+               decay_limit += (thread->base_pri - BASEPRI_FOREGROUND);
+       }
+
+       if (decay_amount > decay_limit) {
+               decay_amount = decay_limit;
+       }
+
+       /* start with base priority */
+       int priority = thread->base_pri - decay_amount;
+
+       if (priority < MAXPRI_THROTTLE) {
+               if (thread->task->max_priority > MAXPRI_THROTTLE) {
+                       priority = MAXPRI_THROTTLE;
+               } else if (priority < MINPRI_USER) {
+                       priority = MINPRI_USER;
+               }
+       } else if (priority > MAXPRI_KERNEL) {
+               priority = MAXPRI_KERNEL;
+       }
+
+       return priority;
+}
+
+#else /* CONFIG_EMBEDDED */
 
 int
 sched_compute_timeshare_priority(thread_t thread)
@@ -386,6 +486,7 @@ sched_compute_timeshare_priority(thread_t thread)
        return priority;
 }
 
+#endif /* CONFIG_EMBEDDED */
 
 /*
  *     can_update_priority
@@ -413,20 +514,14 @@ can_update_priority(
  */
 void
 update_priority(
-       register thread_t       thread)
+       thread_t        thread)
 {
-       register unsigned       ticks;
-       register uint32_t       delta;
+       uint32_t ticks, delta;
 
        ticks = sched_tick - thread->sched_stamp;
        assert(ticks != 0);
+
        thread->sched_stamp += ticks;
-       if (sched_use_combined_fgbg_decay)
-               thread->pri_shift = sched_combined_fgbg_pri_shift;
-       else if (thread->sched_flags & TH_SFLAG_THROTTLED)
-               thread->pri_shift = sched_background_pri_shift;
-       else
-               thread->pri_shift = sched_pri_shift;
 
        /* If requested, accelerate aging of sched_usage */
        if (sched_decay_usage_age_factor > 1)
@@ -437,12 +532,10 @@ update_priority(
         */
        thread_timer_delta(thread, delta);
        if (ticks < SCHED_DECAY_TICKS) {
-               register struct shift_data      *shiftp;
-
                /*
-                *      Accumulate timesharing usage only
-                *      during contention for processor
-                *      resources.
+                *      Accumulate timesharing usage only during contention for processor
+                *      resources. Use the pri_shift from the previous tick window to 
+                *      determine if the system was in a contended state.
                 */
                if (thread->pri_shift < INT8_MAX)
                        thread->sched_usage += delta;
@@ -450,25 +543,20 @@ update_priority(
                thread->cpu_usage += delta + thread->cpu_delta;
                thread->cpu_delta = 0;
 
-               shiftp = &sched_decay_shifts[ticks];
+               struct shift_data *shiftp = &sched_decay_shifts[ticks];
+
                if (shiftp->shift2 > 0) {
-                   thread->cpu_usage =
-                                               (thread->cpu_usage >> shiftp->shift1) +
-                                               (thread->cpu_usage >> shiftp->shift2);
-                   thread->sched_usage =
-                                               (thread->sched_usage >> shiftp->shift1) +
-                                               (thread->sched_usage >> shiftp->shift2);
-               }
-               else {
-                   thread->cpu_usage =
-                                               (thread->cpu_usage >> shiftp->shift1) -
-                                               (thread->cpu_usage >> -(shiftp->shift2));
-                   thread->sched_usage =
-                                               (thread->sched_usage >> shiftp->shift1) -
-                                               (thread->sched_usage >> -(shiftp->shift2));
+                       thread->cpu_usage =   (thread->cpu_usage >> shiftp->shift1) +
+                                             (thread->cpu_usage >> shiftp->shift2);
+                       thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
+                                             (thread->sched_usage >> shiftp->shift2);
+               } else {
+                       thread->cpu_usage =   (thread->cpu_usage >>   shiftp->shift1) -
+                                             (thread->cpu_usage >> -(shiftp->shift2));
+                       thread->sched_usage = (thread->sched_usage >>   shiftp->shift1) -
+                                             (thread->sched_usage >> -(shiftp->shift2));
                }
-       }
-       else {
+       } else {
                thread->cpu_usage = thread->cpu_delta = 0;
                thread->sched_usage = 0;
        }
@@ -482,120 +570,113 @@ update_priority(
        }
 
        /*
-        *      Recompute scheduled priority if appropriate.
+        * Now that the thread's CPU usage has been accumulated and aged
+        * based on contention of the previous tick window, update the
+        * pri_shift of the thread to match the current global load/shift
+        * values. The updated pri_shift would be used to calculate the
+        * new priority of the thread.
         */
-       if (thread->sched_mode == TH_MODE_TIMESHARE) {
-               int priority = sched_compute_timeshare_priority(thread);
+       thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
 
-               /*
-                * Adjust the scheduled priority like thread_recompute_sched_pri,
-                * except without setting an AST.
-                */
-               if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK)  || (priority > thread->sched_pri)) &&
-                   (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
-                   priority != thread->sched_pri) {
+       /* Recompute scheduled priority if appropriate. */
+       if (thread->sched_mode == TH_MODE_TIMESHARE)
+               thread_recompute_sched_pri(thread, SETPRI_LAZY);
+}
 
-                       boolean_t removed = thread_run_queue_remove(thread);
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
 
-                       thread->sched_pri = priority;
 
-                       KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
-                                             (uintptr_t)thread_tid(thread),
-                                             thread->base_pri,
-                                             thread->sched_pri,
                                            0, /* eventually, 'reason' */
-                                             0);
+/*
+ * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
+ * Each other bucket is a count of the runnable non-idle threads
+ * with that property.
+ */
+volatile uint32_t       sched_run_buckets[TH_BUCKET_MAX];
 
-                       if (removed)
-                               thread_run_queue_reinsert(thread, SCHED_TAILQ);
-               }
-       }
+static void
+sched_incr_bucket(sched_bucket_t bucket)
+{
+       assert(bucket >= TH_BUCKET_FIXPRI &&
+              bucket <= TH_BUCKET_SHARE_BG);
 
-       return;
+       hw_atomic_add(&sched_run_buckets[bucket], 1);
 }
 
-#endif /* CONFIG_SCHED_TIMESHARE_CORE */
-
-#if MACH_ASSERT
-/* sched_mode == TH_MODE_TIMESHARE controls whether a thread has a timeshare count when it has a run count */
+static void
+sched_decr_bucket(sched_bucket_t bucket)
+{
+       assert(bucket >= TH_BUCKET_FIXPRI &&
+              bucket <= TH_BUCKET_SHARE_BG);
 
-void sched_share_incr(thread_t thread) {
-       assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
-       assert(thread->sched_mode == TH_MODE_TIMESHARE);
-       assert(thread->SHARE_COUNT == 0);
-       thread->SHARE_COUNT++;
-       (void)hw_atomic_add(&sched_share_count, 1);
-}
+       assert(sched_run_buckets[bucket] > 0);
 
-void sched_share_decr(thread_t thread) {
-       assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN || thread->sched_mode != TH_MODE_TIMESHARE);
-       assert(thread->SHARE_COUNT == 1);
-       (void)hw_atomic_sub(&sched_share_count, 1);
-       thread->SHARE_COUNT--;
+       hw_atomic_sub(&sched_run_buckets[bucket], 1);
 }
 
-/* TH_SFLAG_THROTTLED controls whether a thread has a background count when it has a run count and a share count */
+/* TH_RUN & !TH_IDLE controls whether a thread has a run count */
 
-void sched_background_incr(thread_t thread) {
+uint32_t
+sched_run_incr(thread_t thread)
+{
        assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
-       assert(thread->sched_mode == TH_MODE_TIMESHARE);
-       assert((thread->sched_flags & TH_SFLAG_THROTTLED) == TH_SFLAG_THROTTLED);
 
-       assert(thread->BG_COUNT == 0);
-       thread->BG_COUNT++;
-       int val = hw_atomic_add(&sched_background_count, 1);
-       assert(val >= 0);
+       uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1);
 
-       /* Always do the background change while holding a share count */
-       assert(thread->SHARE_COUNT == 1);
-}
+       sched_incr_bucket(thread->th_sched_bucket);
 
-void sched_background_decr(thread_t thread) {
-       if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE)
-               assert((thread->sched_flags & TH_SFLAG_THROTTLED) != TH_SFLAG_THROTTLED);
-       assert(thread->BG_COUNT == 1);
-       int val = hw_atomic_sub(&sched_background_count, 1);
-       thread->BG_COUNT--;
-       assert(val >= 0);
-       assert(thread->BG_COUNT == 0);
-
-       /* Always do the background change while holding a share count */
-       assert(thread->SHARE_COUNT == 1);
+       return new_count;
 }
 
+uint32_t
+sched_run_decr(thread_t thread)
+{
+       assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN);
 
-void
-assert_thread_sched_count(thread_t thread) {
-       /* Only 0 or 1 are acceptable values */
-       assert(thread->BG_COUNT    == 0 || thread->BG_COUNT    == 1);
-       assert(thread->SHARE_COUNT == 0 || thread->SHARE_COUNT == 1);
-
-       /* BG is only allowed when you already have a share count */
-       if (thread->BG_COUNT == 1)
-               assert(thread->SHARE_COUNT == 1);
-       if (thread->SHARE_COUNT == 0)
-               assert(thread->BG_COUNT == 0);
-
-       if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
-           (thread->sched_mode != TH_MODE_TIMESHARE))
-               assert(thread->SHARE_COUNT == 0);
-
-       if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
-           (thread->sched_mode == TH_MODE_TIMESHARE))
-               assert(thread->SHARE_COUNT == 1);
-
-       if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
-           (thread->sched_mode != TH_MODE_TIMESHARE)    ||
-           !(thread->sched_flags & TH_SFLAG_THROTTLED))
-               assert(thread->BG_COUNT == 0);
-
-       if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
-           (thread->sched_mode == TH_MODE_TIMESHARE)    &&
-           (thread->sched_flags & TH_SFLAG_THROTTLED))
-               assert(thread->BG_COUNT == 1);
+       sched_decr_bucket(thread->th_sched_bucket);
+
+       uint32_t new_count = hw_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], 1);
+
+       return new_count;
 }
 
-#endif /* MACH_ASSERT */
+static void
+sched_update_thread_bucket(thread_t thread)
+{
+       sched_bucket_t old_bucket = thread->th_sched_bucket;
+       sched_bucket_t new_bucket = TH_BUCKET_RUN;
+
+       switch (thread->sched_mode) {
+       case TH_MODE_FIXED:
+       case TH_MODE_REALTIME:
+               new_bucket = TH_BUCKET_FIXPRI;
+               break;
+
+       case TH_MODE_TIMESHARE:
+               if (thread->base_pri > BASEPRI_DEFAULT)
+                       new_bucket = TH_BUCKET_SHARE_FG;
+               else if (thread->base_pri > BASEPRI_UTILITY)
+                       new_bucket = TH_BUCKET_SHARE_DF;
+               else if (thread->base_pri > MAXPRI_THROTTLE)
+                       new_bucket = TH_BUCKET_SHARE_UT;
+               else
+                       new_bucket = TH_BUCKET_SHARE_BG;
+               break;
+
+       default:
+               panic("unexpected mode: %d", thread->sched_mode);
+               break;
+       }
+
+       if (old_bucket != new_bucket) {
+               thread->th_sched_bucket = new_bucket;
+               thread->pri_shift = sched_pri_shifts[new_bucket];
+
+               if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
+                       sched_decr_bucket(old_bucket);
+                       sched_incr_bucket(new_bucket);
+               }
+       }
+}
 
 /*
  * Set the thread's true scheduling mode
@@ -607,43 +688,22 @@ assert_thread_sched_count(thread_t thread) {
 void
 sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
 {
-       assert_thread_sched_count(thread);
        assert(thread->runq == PROCESSOR_NULL);
 
-       sched_mode_t old_mode = thread->sched_mode;
-
-       thread->sched_mode = new_mode;
-
        switch (new_mode) {
-               case TH_MODE_FIXED:
-               case TH_MODE_REALTIME:
-                       if (old_mode == TH_MODE_TIMESHARE) {
-                               if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
-                                       if (thread->sched_flags & TH_SFLAG_THROTTLED)
-                                               sched_background_decr(thread);
-
-                                       sched_share_decr(thread);
-                               }
-                       }
-                       break;
-
-               case TH_MODE_TIMESHARE:
-                       if (old_mode != TH_MODE_TIMESHARE) {
-                               if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
-                                       sched_share_incr(thread);
-
-                                       if (thread->sched_flags & TH_SFLAG_THROTTLED)
-                                               sched_background_incr(thread);
-                               }
-                       }
-                       break;
-
-               default:
-                       panic("unexpected mode: %d", new_mode);
-                       break;
+       case TH_MODE_FIXED:
+       case TH_MODE_REALTIME:
+       case TH_MODE_TIMESHARE:
+               break;
+
+       default:
+               panic("unexpected mode: %d", new_mode);
+               break;
        }
 
-       assert_thread_sched_count(thread);
+       thread->sched_mode = new_mode;
+
+       sched_update_thread_bucket(thread);
 }
 
 /*
@@ -654,7 +714,6 @@ sched_thread_mode_demote(thread_t thread, uint32_t reason)
 {
        assert(reason & TH_SFLAG_DEMOTED_MASK);
        assert((thread->sched_flags & reason) != reason);
-       assert_thread_sched_count(thread);
 
        if (thread->policy_reset)
                return;
@@ -679,8 +738,6 @@ sched_thread_mode_demote(thread_t thread, uint32_t reason)
 
        if (removed)
                thread_run_queue_reinsert(thread, SCHED_TAILQ);
-
-       assert_thread_sched_count(thread);
 }
 
 /*
@@ -695,8 +752,6 @@ sched_thread_mode_undemote(thread_t thread, uint32_t reason)
        assert(thread->sched_mode == TH_MODE_TIMESHARE);
        assert(thread->policy_reset == 0);
 
-       assert_thread_sched_count(thread);
-
        thread->sched_flags &= ~reason;
 
        if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
@@ -717,33 +772,167 @@ sched_thread_mode_undemote(thread_t thread, uint32_t reason)
 }
 
 /*
- * Set the thread to be categorized as 'background'
- * Called with thread mutex and thread lock held
+ * Promote thread to a specific priority
+ *
+ * Promotion must not last past syscall boundary
+ * Clients must always pair promote and unpromote 1:1
  *
- * TODO: Eventually, 'background' should be a true sched_mode.
+ * Called at splsched with thread locked
  */
 void
-sched_set_thread_throttled(thread_t thread, boolean_t wants_throttle)
+sched_thread_promote_to_pri(thread_t    thread,
+                            int         priority,
+              __kdebug_only uintptr_t   trace_obj /* already unslid */)
 {
-       if (thread->policy_reset)
-               return;
+       assert((thread->sched_flags & TH_SFLAG_PROMOTED) != TH_SFLAG_PROMOTED);
+       assert(thread->promotion_priority == 0);
+       assert(priority <= MAXPRI_PROMOTE);
+       assert(priority > 0);
 
-       assert(((thread->sched_flags & TH_SFLAG_THROTTLED) ? TRUE : FALSE) != wants_throttle);
+       KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PROMOTED),
+            thread_tid(thread), trace_obj, priority);
 
-       assert_thread_sched_count(thread);
+       thread->sched_flags |= TH_SFLAG_PROMOTED;
+       thread->promotion_priority = priority;
 
-       if (wants_throttle) {
-               thread->sched_flags |= TH_SFLAG_THROTTLED;
-               if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
-                       sched_background_incr(thread);
-               }
-       } else {
-               thread->sched_flags &= ~TH_SFLAG_THROTTLED;
-               if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
-                       sched_background_decr(thread);
-               }
+       thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}
+
+
+/*
+ * Update a pre-existing priority promotion to have a higher priority floor
+ * Priority can only go up from the previous value
+ * Update must occur while a promotion is active
+ *
+ * Called at splsched with thread locked
+ */
+void
+sched_thread_update_promotion_to_pri(thread_t   thread,
+                                     int        priority,
+                       __kdebug_only uintptr_t  trace_obj /* already unslid */)
+{
+       assert(thread->promotions > 0);
+       assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED);
+       assert(thread->promotion_priority > 0);
+       assert(priority <= MAXPRI_PROMOTE);
+
+       if (thread->promotion_priority < priority) {
+               KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PROMOTED_UPDATE),
+                    thread_tid(thread), trace_obj, priority);
+
+               thread->promotion_priority = priority;
+               thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
        }
+}
+
+/*
+ * End a priority promotion
+ * Demotes a thread back to its expected priority without the promotion in place
+ *
+ * Called at splsched with thread locked
+ */
+void
+sched_thread_unpromote(thread_t     thread,
+         __kdebug_only uintptr_t    trace_obj /* already unslid */)
+{
+       assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED);
+       assert(thread->promotion_priority > 0);
+
+       KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UNPROMOTED),
+            thread_tid(thread), trace_obj, 0);
+
+       thread->sched_flags &= ~TH_SFLAG_PROMOTED;
+       thread->promotion_priority = 0;
+
+       thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}
+
+/* called with thread locked */
+void
+assert_promotions_invariant(thread_t thread)
+{
+       if (thread->promotions > 0)
+               assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED);
+
+       if (thread->promotions == 0)
+               assert((thread->sched_flags & TH_SFLAG_PROMOTED) != TH_SFLAG_PROMOTED);
+}
+
+/*
+ * Promote thread to have a sched pri floor for a specific reason
+ *
+ * Promotion must not last past syscall boundary
+ * Clients must always pair promote and demote 1:1,
+ * Handling nesting of the same promote reason is the client's responsibility
+ *
+ * Called at splsched with thread locked
+ */
+void
+sched_thread_promote_reason(thread_t    thread,
+                            uint32_t    reason,
+              __kdebug_only uintptr_t   trace_obj /* already unslid */)
+{
+       assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
+       assert((thread->sched_flags & reason) != reason);
 
-       assert_thread_sched_count(thread);
+       switch (reason) {
+       case TH_SFLAG_RW_PROMOTED:
+               KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE),
+                    thread_tid(thread), thread->sched_pri,
+                    thread->base_pri, trace_obj);
+               break;
+       case TH_SFLAG_WAITQ_PROMOTED:
+               KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE),
+                    thread_tid(thread), thread->sched_pri,
+                    thread->base_pri, trace_obj);
+               break;
+       case TH_SFLAG_EXEC_PROMOTED:
+               KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE),
+                    thread_tid(thread), thread->sched_pri,
+                    thread->base_pri, trace_obj);
+               break;
+       }
+
+       thread->sched_flags |= reason;
+
+       thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
 }
 
+/*
+ * End a specific promotion reason
+ * Demotes a thread back to its expected priority without the promotion in place
+ *
+ * Called at splsched with thread locked
+ */
+void
+sched_thread_unpromote_reason(thread_t  thread,
+                              uint32_t  reason,
+                __kdebug_only uintptr_t trace_obj /* already unslid */)
+{
+       assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
+       assert((thread->sched_flags & reason) == reason);
+
+       switch (reason) {
+       case TH_SFLAG_RW_PROMOTED:
+               KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE),
+                    thread_tid(thread), thread->sched_pri,
+                    thread->base_pri, trace_obj);
+               break;
+       case TH_SFLAG_WAITQ_PROMOTED:
+               KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE),
+                    thread_tid(thread), thread->sched_pri,
+                    thread->base_pri, trace_obj);
+               break;
+       case TH_SFLAG_EXEC_PROMOTED:
+               KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE),
+                    thread_tid(thread), thread->sched_pri,
+                    thread->base_pri, trace_obj);
+               break;
+       }
+
+       thread->sched_flags &= ~reason;
+
+       thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
+}
+
+