+ if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
+ mach_absolute_time() >= thread->safe_release) {
+ sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
+ }
+
+ /*
+ * Now that the thread's CPU usage has been accumulated and aged
+ * based on contention of the previous tick window, update the
+ * pri_shift of the thread to match the current global load/shift
+ * values. The updated pri_shift would be used to calculate the
+ * new priority of the thread.
+ */
+#if CONFIG_SCHED_CLUTCH
+ thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket);
+#else /* CONFIG_SCHED_CLUTCH */
+ thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
+#endif /* CONFIG_SCHED_CLUTCH */
+
+ /* Recompute scheduled priority if appropriate. */
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ thread_recompute_sched_pri(thread, SETPRI_LAZY);
+ }
+}
+
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+
+/*
+ * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
+ * Each other bucket is a count of the runnable non-idle threads
+ * with that property. All updates to these counts should be
+ * performed with os_atomic_* operations.
+ *
+ * For the clutch scheduler, this global bucket is used only for
+ * keeping the total global run count.
+ */
+uint32_t sched_run_buckets[TH_BUCKET_MAX];
+
+static void
+sched_incr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ os_atomic_inc(&sched_run_buckets[bucket], relaxed);
+}
+
+static void
+sched_decr_bucket(sched_bucket_t bucket)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
+
+ os_atomic_dec(&sched_run_buckets[bucket], relaxed);
+}
+
+static void
+sched_add_bucket(sched_bucket_t bucket, uint8_t run_weight)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ os_atomic_add(&sched_run_buckets[bucket], run_weight, relaxed);
+}
+
+static void
+sched_sub_bucket(sched_bucket_t bucket, uint8_t run_weight)
+{
+ assert(bucket >= TH_BUCKET_FIXPRI &&
+ bucket <= TH_BUCKET_SHARE_BG);
+
+ assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
+
+ os_atomic_sub(&sched_run_buckets[bucket], run_weight, relaxed);
+}
+
+uint32_t
+sched_run_incr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
+
+ uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
+
+ sched_incr_bucket(thread->th_sched_bucket);
+
+ return new_count;
+}
+
+uint32_t
+sched_run_decr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
+
+ sched_decr_bucket(thread->th_sched_bucket);
+
+ uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
+
+ return new_count;
+}
+
+uint32_t
+sched_smt_run_incr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
+
+ uint8_t run_weight = (thread_no_smt(thread) && smt_timeshare_enabled) ? 2 : 1;
+ thread->sched_saved_run_weight = run_weight;
+
+ uint32_t new_count = os_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
+
+ sched_add_bucket(thread->th_sched_bucket, run_weight);
+
+ return new_count;
+}
+
+uint32_t
+sched_smt_run_decr(thread_t thread)
+{
+ assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
+
+ uint8_t run_weight = thread->sched_saved_run_weight;
+
+ sched_sub_bucket(thread->th_sched_bucket, run_weight);
+
+ uint32_t new_count = os_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
+
+ return new_count;
+}
+
+void
+sched_update_thread_bucket(thread_t thread)
+{
+ sched_bucket_t old_bucket = thread->th_sched_bucket;
+ sched_bucket_t new_bucket = TH_BUCKET_RUN;
+
+ switch (thread->sched_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ new_bucket = TH_BUCKET_FIXPRI;
+ break;
+
+ case TH_MODE_TIMESHARE:
+ if (thread->base_pri > BASEPRI_DEFAULT) {
+ new_bucket = TH_BUCKET_SHARE_FG;
+ } else if (thread->base_pri > BASEPRI_UTILITY) {
+ new_bucket = TH_BUCKET_SHARE_DF;
+ } else if (thread->base_pri > MAXPRI_THROTTLE) {
+ new_bucket = TH_BUCKET_SHARE_UT;
+ } else {
+ new_bucket = TH_BUCKET_SHARE_BG;
+ }
+ break;
+
+ default:
+ panic("unexpected mode: %d", thread->sched_mode);
+ break;
+ }
+
+ if (old_bucket != new_bucket) {
+ thread->th_sched_bucket = new_bucket;
+ thread->pri_shift = sched_pri_shifts[new_bucket];
+
+ if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
+ sched_decr_bucket(old_bucket);
+ sched_incr_bucket(new_bucket);
+ }
+ }
+}