+ assert(thread == current_thread());
+
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ int priority;
+ uint32_t delta;
+
+ thread_timer_delta(thread, delta);
+
+ /*
+ * Accumulate timesharing usage only
+ * during contention for processor
+ * resources.
+ */
+ if (thread->pri_shift < INT8_MAX) {
+ if (thread_no_smt(thread) && smt_timeshare_enabled) {
+ thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4));
+ } else {
+ thread->sched_usage += delta;
+ }
+ }
+
+ thread->cpu_delta += delta;
+
+#if CONFIG_SCHED_CLUTCH
+ /*
+ * Update the CPU usage for the thread group to which the thread belongs.
+ * The implementation assumes that the thread ran for the entire delta
+ * as part of the same thread group.
+ */
+ sched_clutch_cpu_usage_update(thread, delta);
+#endif /* CONFIG_SCHED_CLUTCH */
+
+ priority = sched_compute_timeshare_priority(thread);
+
+ if (priority != thread->sched_pri) {
+ thread_recompute_sched_pri(thread, SETPRI_LAZY);
+ }
+ }
+}
+
+/*
+ * Define shifts for simulating (5/8) ** n
+ *
+ * Shift structures for holding update shifts. Actual computation
+ * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
+ * +/- is determined by the sign of shift 2.
+ */
+
+const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
+ { .shift1 = 1, .shift2 = 1 },
+ { .shift1 = 1, .shift2 = 3 },
+ { .shift1 = 1, .shift2 = -3 },
+ { .shift1 = 2, .shift2 = -7 },
+ { .shift1 = 3, .shift2 = 5 },
+ { .shift1 = 3, .shift2 = -5 },
+ { .shift1 = 4, .shift2 = -8 },
+ { .shift1 = 5, .shift2 = 7 },
+ { .shift1 = 5, .shift2 = -7 },
+ { .shift1 = 6, .shift2 = -10 },
+ { .shift1 = 7, .shift2 = 10 },
+ { .shift1 = 7, .shift2 = -9 },
+ { .shift1 = 8, .shift2 = -11 },
+ { .shift1 = 9, .shift2 = 12 },
+ { .shift1 = 9, .shift2 = -11 },
+ { .shift1 = 10, .shift2 = -13 },
+ { .shift1 = 11, .shift2 = 14 },
+ { .shift1 = 11, .shift2 = -13 },
+ { .shift1 = 12, .shift2 = -15 },
+ { .shift1 = 13, .shift2 = 17 },
+ { .shift1 = 13, .shift2 = -15 },
+ { .shift1 = 14, .shift2 = -17 },
+ { .shift1 = 15, .shift2 = 19 },
+ { .shift1 = 16, .shift2 = 18 },
+ { .shift1 = 16, .shift2 = -19 },
+ { .shift1 = 17, .shift2 = 22 },
+ { .shift1 = 18, .shift2 = 20 },
+ { .shift1 = 18, .shift2 = -20 },
+ { .shift1 = 19, .shift2 = 26 },
+ { .shift1 = 20, .shift2 = 22 },
+ { .shift1 = 20, .shift2 = -22 },
+ { .shift1 = 21, .shift2 = -27 }
+};
+
+/*
+ * sched_compute_timeshare_priority:
+ *
+ * Calculate the timesharing priority based upon usage and load.
+ */
+extern int sched_pri_decay_band_limit;
+
+
+/* Only use the decay floor logic on non-macOS and non-clutch schedulers */
+#if !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH
+
+int
+sched_compute_timeshare_priority(thread_t thread)
+{
+ int decay_amount;
+ int decay_limit = sched_pri_decay_band_limit;
+
+ if (thread->base_pri > BASEPRI_FOREGROUND) {
+ decay_limit += (thread->base_pri - BASEPRI_FOREGROUND);
+ }
+
+ if (thread->pri_shift == INT8_MAX) {
+ decay_amount = 0;
+ } else {
+ decay_amount = (thread->sched_usage >> thread->pri_shift);
+ }
+
+ if (decay_amount > decay_limit) {
+ decay_amount = decay_limit;
+ }
+
+ /* start with base priority */
+ int priority = thread->base_pri - decay_amount;
+
+ if (priority < MAXPRI_THROTTLE) {
+ if (thread->task->max_priority > MAXPRI_THROTTLE) {
+ priority = MAXPRI_THROTTLE;
+ } else if (priority < MINPRI_USER) {
+ priority = MINPRI_USER;
+ }
+ } else if (priority > MAXPRI_KERNEL) {
+ priority = MAXPRI_KERNEL;
+ }
+
+ return priority;
+}
+
+#else /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
+
+int
+sched_compute_timeshare_priority(thread_t thread)
+{
+ /* start with base priority */
+ int priority = thread->base_pri;
+
+ if (thread->pri_shift != INT8_MAX) {
+ priority -= (thread->sched_usage >> thread->pri_shift);
+ }
+
+ if (priority < MINPRI_USER) {
+ priority = MINPRI_USER;
+ } else if (priority > MAXPRI_KERNEL) {
+ priority = MAXPRI_KERNEL;
+ }
+
+ return priority;
+}
+
+#endif /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
+
+/*
+ * can_update_priority
+ *
+ * Make sure we don't do re-dispatches more frequently than a scheduler tick.
+ *
+ * Called with the thread locked.
+ */
+boolean_t
+can_update_priority(
+ thread_t thread)
+{
+ if (sched_tick == thread->sched_stamp) {
+ return FALSE;
+ } else {
+ return TRUE;
+ }