+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+
+/*
+ * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
+ * Each other bucket is a count of the runnable non-idle threads
+ * with that property.
+ */
+volatile uint32_t sched_run_buckets[TH_BUCKET_MAX];
+
+static void
+sched_incr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ hw_atomic_add(&sched_run_buckets[bucket], 1);
+}
+
+static void
+sched_decr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ assert(sched_run_buckets[bucket] > 0);
+
+ hw_atomic_sub(&sched_run_buckets[bucket], 1);
+}
+
+/* TH_RUN & !TH_IDLE controls whether a thread has a run count */
+
+uint32_t
+sched_run_incr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
+
+ uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1);
+
+ sched_incr_bucket(thread->th_sched_bucket);
+
+ return new_count;
+}
+
+uint32_t
+sched_run_decr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN);
+
+ sched_decr_bucket(thread->th_sched_bucket);
+
+ uint32_t new_count = hw_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], 1);
+
+ return new_count;
+}
+
+static void
+sched_update_thread_bucket(thread_t thread)
+{
+ sched_bucket_t old_bucket = thread->th_sched_bucket;
+ sched_bucket_t new_bucket = TH_BUCKET_RUN;
+
+ switch (thread->sched_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ new_bucket = TH_BUCKET_FIXPRI;
+ break;
+
+ case TH_MODE_TIMESHARE:
+ if (thread->base_pri > BASEPRI_UTILITY)
+ new_bucket = TH_BUCKET_SHARE_FG;
+ else if (thread->base_pri > MAXPRI_THROTTLE)
+ new_bucket = TH_BUCKET_SHARE_UT;
+ else
+ new_bucket = TH_BUCKET_SHARE_BG;
+ break;
+
+ default:
+ panic("unexpected mode: %d", thread->sched_mode);
+ break;
+ }
+
+ if (old_bucket != new_bucket) {
+ thread->th_sched_bucket = new_bucket;
+ thread->pri_shift = sched_pri_shifts[new_bucket];
+
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
+ sched_decr_bucket(old_bucket);
+ sched_incr_bucket(new_bucket);
+ }
+ }
+}
+
+/*
+ * Set the thread's true scheduling mode
+ * Called with thread mutex and thread locked
+ * The thread has already been removed from the runqueue.
+ *
+ * (saved_mode is handled before this point)
+ */
+void
+sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
+{
+ assert(thread->runq == PROCESSOR_NULL);
+
+ switch (new_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ case TH_MODE_TIMESHARE:
+ break;
+
+ default:
+ panic("unexpected mode: %d", new_mode);
+ break;
+ }
+
+ thread->sched_mode = new_mode;
+
+ sched_update_thread_bucket(thread);
+}
+
+/*
+ * Demote the true scheduler mode to timeshare (called with the thread locked)
+ */
+void
+sched_thread_mode_demote(thread_t thread, uint32_t reason)
+{
+ assert(reason & TH_SFLAG_DEMOTED_MASK);
+ assert((thread->sched_flags & reason) != reason);
+
+ if (thread->policy_reset)
+ return;
+
+ if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
+ /* Another demotion reason is already active */
+ thread->sched_flags |= reason;
+ return;
+ }
+
+ assert(thread->saved_mode == TH_MODE_NONE);
+
+ boolean_t removed = thread_run_queue_remove(thread);
+
+ thread->sched_flags |= reason;
+
+ thread->saved_mode = thread->sched_mode;
+
+ sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
+
+ thread_recompute_priority(thread);
+
+ if (removed)
+ thread_run_queue_reinsert(thread, SCHED_TAILQ);
+}
+
+/*
+ * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
+ */
+void
+sched_thread_mode_undemote(thread_t thread, uint32_t reason)
+{
+ assert(reason & TH_SFLAG_DEMOTED_MASK);
+ assert((thread->sched_flags & reason) == reason);
+ assert(thread->saved_mode != TH_MODE_NONE);
+ assert(thread->sched_mode == TH_MODE_TIMESHARE);
+ assert(thread->policy_reset == 0);
+
+ thread->sched_flags &= ~reason;
+
+ if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
+ /* Another demotion reason is still active */
+ return;
+ }
+
+ boolean_t removed = thread_run_queue_remove(thread);
+
+ sched_set_thread_mode(thread, thread->saved_mode);
+
+ thread->saved_mode = TH_MODE_NONE;
+
+ thread_recompute_priority(thread);
+
+ if (removed)
+ thread_run_queue_reinsert(thread, SCHED_TAILQ);
+}
+
+