+ sched_clutch_cpu_usage_update(thread, delta);
+#endif /* CONFIG_SCHED_CLUTCH */
+
+ const struct shift_data *shiftp = &sched_decay_shifts[ticks];
+
+ if (shiftp->shift2 > 0) {
+ thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) +
+ (thread->cpu_usage >> shiftp->shift2);
+ thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
+ (thread->sched_usage >> shiftp->shift2);
+ } else {
+ thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) -
+ (thread->cpu_usage >> -(shiftp->shift2));
+ thread->sched_usage = (thread->sched_usage >> shiftp->shift1) -
+ (thread->sched_usage >> -(shiftp->shift2));
+ }
+ } else {
+ thread->cpu_usage = thread->cpu_delta = 0;
+ thread->sched_usage = 0;
+ }
+
+ /*
+ * Check for fail-safe release.
+ */
+ if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
+ mach_absolute_time() >= thread->safe_release) {
+ sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
+ }
+
+ /*
+ * Now that the thread's CPU usage has been accumulated and aged
+ * based on contention of the previous tick window, update the
+ * pri_shift of the thread to match the current global load/shift
+ * values. The updated pri_shift would be used to calculate the
+ * new priority of the thread.
+ */
+#if CONFIG_SCHED_CLUTCH
+ thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket);
+#else /* CONFIG_SCHED_CLUTCH */
+ thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
+#endif /* CONFIG_SCHED_CLUTCH */
+
+ /* Recompute scheduled priority if appropriate. */
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ thread_recompute_sched_pri(thread, SETPRI_LAZY);
+ }
+}
+
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+
+/*
+ * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
+ * Each other bucket is a count of the runnable non-idle threads
+ * with that property. All updates to these counts should be
+ * performed with os_atomic_* operations.
+ *
+ * For the clutch scheduler, this global bucket is used only for
+ * keeping the total global run count.
+ */
+uint32_t sched_run_buckets[TH_BUCKET_MAX];
+
+static void
+sched_incr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ os_atomic_inc(&sched_run_buckets[bucket], relaxed);
+}
+
+static void
+sched_decr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
+
+ os_atomic_dec(&sched_run_buckets[bucket], relaxed);
+}
+
+uint32_t
+sched_run_incr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
+
+ uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
+
+ sched_incr_bucket(thread->th_sched_bucket);
+
+ return new_count;
+}
+
+uint32_t
+sched_run_decr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
+
+ sched_decr_bucket(thread->th_sched_bucket);
+
+ uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
+
+ return new_count;
+}
+
+void
+sched_update_thread_bucket(thread_t thread)
+{
+ sched_bucket_t old_bucket = thread->th_sched_bucket;
+ sched_bucket_t new_bucket = TH_BUCKET_RUN;
+
+ switch (thread->sched_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ new_bucket = TH_BUCKET_FIXPRI;
+ break;
+
+ case TH_MODE_TIMESHARE:
+ if (thread->base_pri > BASEPRI_DEFAULT) {
+ new_bucket = TH_BUCKET_SHARE_FG;
+ } else if (thread->base_pri > BASEPRI_UTILITY) {
+ new_bucket = TH_BUCKET_SHARE_DF;
+ } else if (thread->base_pri > MAXPRI_THROTTLE) {
+ new_bucket = TH_BUCKET_SHARE_UT;
+ } else {
+ new_bucket = TH_BUCKET_SHARE_BG;
+ }
+ break;
+
+ default:
+ panic("unexpected mode: %d", thread->sched_mode);
+ break;
+ }
+
+ if (old_bucket != new_bucket) {
+ thread->th_sched_bucket = new_bucket;
+ thread->pri_shift = sched_pri_shifts[new_bucket];
+
+ if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
+ sched_decr_bucket(old_bucket);
+ sched_incr_bucket(new_bucket);
+ }
+ }
+}
+
+/*
+ * Set the thread's true scheduling mode
+ * Called with thread mutex and thread locked
+ * The thread has already been removed from the runqueue.
+ *
+ * (saved_mode is handled before this point)
+ */
+void
+sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
+{
+ assert(thread->runq == PROCESSOR_NULL);
+
+ switch (new_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ case TH_MODE_TIMESHARE:
+ break;
+
+ default:
+ panic("unexpected mode: %d", new_mode);
+ break;
+ }
+
+ thread->sched_mode = new_mode;
+
+ SCHED(update_thread_bucket)(thread);
+}
+
+/*
+ * Demote the true scheduler mode to timeshare (called with the thread locked)
+ */
+void
+sched_thread_mode_demote(thread_t thread, uint32_t reason)
+{
+ assert(reason & TH_SFLAG_DEMOTED_MASK);
+ assert((thread->sched_flags & reason) != reason);
+
+ if (thread->policy_reset) {
+ return;
+ }
+
+ if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
+ /* Another demotion reason is already active */
+ thread->sched_flags |= reason;
+ return;
+ }
+
+ assert(thread->saved_mode == TH_MODE_NONE);
+
+ boolean_t removed = thread_run_queue_remove(thread);
+
+ thread->sched_flags |= reason;
+
+ thread->saved_mode = thread->sched_mode;
+
+ sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
+
+ thread_recompute_priority(thread);
+
+ if (removed) {
+ thread_run_queue_reinsert(thread, SCHED_TAILQ);