+
+/* TH_SFLAG_THROTTLED controls whether a thread has a background count when it has a run count and a share count */
+
+void sched_background_incr(thread_t thread) {
+ assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
+ assert(thread->sched_mode == TH_MODE_TIMESHARE);
+ assert((thread->sched_flags & TH_SFLAG_THROTTLED) == TH_SFLAG_THROTTLED);
+
+ assert(thread->BG_COUNT == 0);
+ thread->BG_COUNT++;
+ int val = hw_atomic_add(&sched_background_count, 1);
+ assert(val >= 0);
+
+ /* Always do the background change while holding a share count */
+ assert(thread->SHARE_COUNT == 1);
+}
+
+void sched_background_decr(thread_t thread) {
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE)
+ assert((thread->sched_flags & TH_SFLAG_THROTTLED) != TH_SFLAG_THROTTLED);
+ assert(thread->BG_COUNT == 1);
+ int val = hw_atomic_sub(&sched_background_count, 1);
+ thread->BG_COUNT--;
+ assert(val >= 0);
+ assert(thread->BG_COUNT == 0);
+
+ /* Always do the background change while holding a share count */
+ assert(thread->SHARE_COUNT == 1);
+}
+
+
+void
+assert_thread_sched_count(thread_t thread) {
+ /* Only 0 or 1 are acceptable values */
+ assert(thread->BG_COUNT == 0 || thread->BG_COUNT == 1);
+ assert(thread->SHARE_COUNT == 0 || thread->SHARE_COUNT == 1);
+
+ /* BG is only allowed when you already have a share count */
+ if (thread->BG_COUNT == 1)
+ assert(thread->SHARE_COUNT == 1);
+ if (thread->SHARE_COUNT == 0)
+ assert(thread->BG_COUNT == 0);
+
+ if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
+ (thread->sched_mode != TH_MODE_TIMESHARE))
+ assert(thread->SHARE_COUNT == 0);
+
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
+ (thread->sched_mode == TH_MODE_TIMESHARE))
+ assert(thread->SHARE_COUNT == 1);
+
+ if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
+ (thread->sched_mode != TH_MODE_TIMESHARE) ||
+ !(thread->sched_flags & TH_SFLAG_THROTTLED))
+ assert(thread->BG_COUNT == 0);
+
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
+ (thread->sched_mode == TH_MODE_TIMESHARE) &&
+ (thread->sched_flags & TH_SFLAG_THROTTLED))
+ assert(thread->BG_COUNT == 1);
+}
+
+#endif /* MACH_ASSERT */
+
+/*
+ * Set the thread's true scheduling mode
+ * Called with thread mutex and thread locked
+ * The thread has already been removed from the runqueue.
+ *
+ * (saved_mode is handled before this point)
+ */
+void
+sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
+{
+ assert_thread_sched_count(thread);
+ assert(thread->runq == PROCESSOR_NULL);
+
+ sched_mode_t old_mode = thread->sched_mode;
+
+ thread->sched_mode = new_mode;
+
+ switch (new_mode) {
+ case TH_MODE_FIXED:
+ case TH_MODE_REALTIME:
+ if (old_mode == TH_MODE_TIMESHARE) {
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
+ if (thread->sched_flags & TH_SFLAG_THROTTLED)
+ sched_background_decr(thread);
+
+ sched_share_decr(thread);
+ }
+ }
+ break;
+
+ case TH_MODE_TIMESHARE:
+ if (old_mode != TH_MODE_TIMESHARE) {
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
+ sched_share_incr(thread);
+
+ if (thread->sched_flags & TH_SFLAG_THROTTLED)
+ sched_background_incr(thread);
+ }
+ }
+ break;
+
+ default:
+ panic("unexpected mode: %d", new_mode);
+ break;
+ }
+
+ assert_thread_sched_count(thread);
+}
+
+/*
+ * Demote the true scheduler mode to timeshare (called with the thread locked)
+ */
+void
+sched_thread_mode_demote(thread_t thread, uint32_t reason)
+{
+ assert(reason & TH_SFLAG_DEMOTED_MASK);
+ assert((thread->sched_flags & reason) != reason);
+ assert_thread_sched_count(thread);
+
+ if (thread->policy_reset)
+ return;
+
+ if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
+ /* Another demotion reason is already active */
+ thread->sched_flags |= reason;
+ return;
+ }
+
+ assert(thread->saved_mode == TH_MODE_NONE);
+
+ boolean_t removed = thread_run_queue_remove(thread);
+
+ thread->sched_flags |= reason;
+
+ thread->saved_mode = thread->sched_mode;
+
+ sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
+
+ thread_recompute_priority(thread);
+
+ if (removed)
+ thread_run_queue_reinsert(thread, SCHED_TAILQ);
+
+ assert_thread_sched_count(thread);
+}
+
+/*
+ * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
+ */
+void
+sched_thread_mode_undemote(thread_t thread, uint32_t reason)
+{
+ assert(reason & TH_SFLAG_DEMOTED_MASK);
+ assert((thread->sched_flags & reason) == reason);
+ assert(thread->saved_mode != TH_MODE_NONE);
+ assert(thread->sched_mode == TH_MODE_TIMESHARE);
+ assert(thread->policy_reset == 0);
+
+ assert_thread_sched_count(thread);
+
+ thread->sched_flags &= ~reason;
+
+ if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
+ /* Another demotion reason is still active */
+ return;
+ }
+
+ boolean_t removed = thread_run_queue_remove(thread);
+
+ sched_set_thread_mode(thread, thread->saved_mode);
+
+ thread->saved_mode = TH_MODE_NONE;
+
+ thread_recompute_priority(thread);
+
+ if (removed)
+ thread_run_queue_reinsert(thread, SCHED_TAILQ);
+}
+
+/*
+ * Set the thread to be categorized as 'background'
+ * Called with thread mutex and thread lock held
+ *
+ * TODO: Eventually, 'background' should be a true sched_mode.
+ */
+void
+sched_set_thread_throttled(thread_t thread, boolean_t wants_throttle)
+{
+ if (thread->policy_reset)
+ return;
+
+ assert(((thread->sched_flags & TH_SFLAG_THROTTLED) ? TRUE : FALSE) != wants_throttle);
+
+ assert_thread_sched_count(thread);
+
+ /*
+ * When backgrounding a thread, iOS has the semantic that
+ * realtime and fixed priority threads should be demoted
+ * to timeshare background threads.
+ *
+ * On OSX, realtime and fixed priority threads don't lose their mode.
+ */
+
+ if (wants_throttle) {
+ thread->sched_flags |= TH_SFLAG_THROTTLED;
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
+ sched_background_incr(thread);
+ }
+
+ assert_thread_sched_count(thread);
+
+ } else {
+ thread->sched_flags &= ~TH_SFLAG_THROTTLED;
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
+ sched_background_decr(thread);
+ }
+
+ assert_thread_sched_count(thread);
+
+ }
+
+ assert_thread_sched_count(thread);
+}
+