+ processor->deadline = UINT64_MAX;
+ pset_unlock(pset);
+
+ return (new_thread);
+ }
+
+ processor->deadline = UINT64_MAX;
+
+ /*
+ * No runnable threads, attempt to steal
+ * from other processors.
+ */
+ new_thread = SCHED(steal_thread)(pset);
+ if (new_thread != THREAD_NULL) {
+ return (new_thread);
+ }
+
+ /*
+ * If other threads have appeared, shortcut
+ * around again.
+ */
+ if (!SCHED(processor_queue_empty)(processor) || rt_runq.count > 0 || SCHED(fairshare_runq_count)() > 0)
+ continue;
+
+ pset_lock(pset);
+
+ idle:
+ /*
+ * Nothing is runnable, so set this processor idle if it
+ * was running.
+ */
+ if (processor->state == PROCESSOR_RUNNING) {
+ remqueue((queue_entry_t)processor);
+ processor->state = PROCESSOR_IDLE;
+
+ if (processor->processor_primary == processor) {
+ enqueue_head(&pset->idle_queue, (queue_entry_t)processor);
+ }
+ else {
+ enqueue_head(&pset->idle_secondary_queue, (queue_entry_t)processor);
+ }
+ }
+
+ /* Invoked with pset locked, returns with pset unlocked */
+ sched_SMT_balance(processor, pset);
+
+#if CONFIG_SCHED_IDLE_IN_PLACE
+ /*
+ * Choose idle thread if fast idle is not possible.
+ */
+ if (processor->processor_primary != processor)
+ return (processor->idle_thread);
+
+ if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES)
+ return (processor->idle_thread);
+
+ /*
+ * Perform idling activities directly without a
+ * context switch. Return dispatched thread,
+ * else check again for a runnable thread.
+ */
+ new_thread = thread_select_idle(thread, processor);
+
+#else /* !CONFIG_SCHED_IDLE_IN_PLACE */
+
+ /*
+ * Do a full context switch to idle so that the current
+ * thread can start running on another processor without
+ * waiting for the fast-idled processor to wake up.
+ */
+ return (processor->idle_thread);
+
+#endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
+
+ } while (new_thread == THREAD_NULL);
+
+ return (new_thread);
+}
+
+#if CONFIG_SCHED_IDLE_IN_PLACE
+/*
+ * thread_select_idle:
+ *
+ * Idle the processor using the current thread context.
+ *
+ * Called with thread locked, then dropped and relocked.
+ */
+static thread_t
+thread_select_idle(
+ thread_t thread,
+ processor_t processor)
+{
+ thread_t new_thread;
+ uint64_t arg1, arg2;
+ int urgency;
+
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ if (thread->sched_flags & TH_SFLAG_THROTTLED)
+ sched_background_decr(thread);
+
+ sched_share_decr(thread);
+ }
+ sched_run_decr(thread);
+
+ thread->state |= TH_IDLE;
+ processor->current_pri = IDLEPRI;
+ processor->current_thmode = TH_MODE_NONE;
+ processor->current_sfi_class = SFI_CLASS_KERNEL;
+
+ /* Reload precise timing global policy to thread-local policy */
+ thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
+
+ thread_unlock(thread);
+
+ /*
+ * Switch execution timing to processor idle thread.
+ */
+ processor->last_dispatch = mach_absolute_time();
+
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+ commpage_update_mach_approximate_time(processor->last_dispatch);
+#endif
+
+ thread->last_run_time = processor->last_dispatch;
+ thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer);
+ PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer;
+
+ /*
+ * Cancel the quantum timer while idling.
+ */
+ timer_call_cancel(&processor->quantum_timer);
+ processor->timeslice = 0;
+
+ (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
+
+ thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, NULL);
+
+ /*
+ * Enable interrupts and perform idling activities. No
+ * preemption due to TH_IDLE being set.
+ */
+ spllo(); new_thread = processor_idle(thread, processor);
+
+ /*
+ * Return at splsched.
+ */
+ (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
+
+ thread_lock(thread);
+
+ /*
+ * If awakened, switch to thread timer and start a new quantum.
+ * Otherwise skip; we will context switch to another thread or return here.
+ */
+ if (!(thread->state & TH_WAIT)) {
+ processor->last_dispatch = mach_absolute_time();
+ thread_timer_event(processor->last_dispatch, &thread->system_timer);
+ PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
+
+ thread_quantum_init(thread);
+ processor->quantum_end = processor->last_dispatch + thread->quantum_remaining;
+ timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ processor->timeslice = 1;
+
+ thread->computation_epoch = processor->last_dispatch;
+ }
+
+ thread->state &= ~TH_IDLE;
+
+ /*
+ * If we idled in place, simulate a context switch back
+ * to the original priority of the thread so that the
+ * platform layer cannot distinguish this from a true
+ * switch to the idle thread.
+ */
+
+ urgency = thread_get_urgency(thread, &arg1, &arg2);
+
+ thread_tell_urgency(urgency, arg1, arg2, new_thread);
+
+ sched_run_incr(thread);
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ sched_share_incr(thread);
+
+ if (thread->sched_flags & TH_SFLAG_THROTTLED)
+ sched_background_incr(thread);
+ }
+
+ return (new_thread);
+}
+#endif /* CONFIG_SCHED_IDLE_IN_PLACE */
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+static thread_t
+sched_traditional_choose_thread(
+ processor_t processor,
+ int priority,
+ __unused ast_t reason)
+{
+ thread_t thread;
+
+ thread = choose_thread_from_runq(processor, runq_for_processor(processor), priority);
+ if (thread != THREAD_NULL) {
+ runq_consider_decr_bound_count(processor, thread);
+ }
+
+ return thread;
+}
+
+#endif /* defined(CONFIG_SCHED_TRADITIONAL) */
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+
+/*
+ * choose_thread_from_runq:
+ *
+ * Locate a thread to execute from the processor run queue
+ * and return it. Only choose a thread with greater or equal
+ * priority.
+ *
+ * Associated pset must be locked. Returns THREAD_NULL
+ * on failure.
+ */
+thread_t
+choose_thread_from_runq(
+ processor_t processor,
+ run_queue_t rq,
+ int priority)
+{
+ queue_t queue = rq->queues + rq->highq;
+ int pri = rq->highq, count = rq->count;
+ thread_t thread;
+
+ while (count > 0 && pri >= priority) {
+ thread = (thread_t)queue_first(queue);
+ while (!queue_end(queue, (queue_entry_t)thread)) {
+ if (thread->bound_processor == PROCESSOR_NULL ||
+ thread->bound_processor == processor) {
+ remqueue((queue_entry_t)thread);
+
+ thread->runq = PROCESSOR_NULL;
+ SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
+ rq->count--;
+ if (SCHED(priority_is_urgent)(pri)) {
+ rq->urgency--; assert(rq->urgency >= 0);
+ }
+ if (queue_empty(queue)) {
+ if (pri != IDLEPRI)
+ clrbit(MAXPRI - pri, rq->bitmap);
+ rq->highq = MAXPRI - ffsbit(rq->bitmap);
+ }
+
+ return (thread);
+ }
+ count--;
+
+ thread = (thread_t)queue_next((queue_entry_t)thread);
+ }
+
+ queue--; pri--;
+ }
+
+ return (THREAD_NULL);
+}
+
+#endif /* defined(CONFIG_SCHED_TRADITIONAL) */
+
+/*
+ * Perform a context switch and start executing the new thread.
+ *
+ * Returns FALSE on failure, and the thread is re-dispatched.
+ *
+ * Called at splsched.
+ */
+
+/*
+ * thread_invoke
+ *
+ * "self" is what is currently running on the processor,
+ * "thread" is the new thread to context switch to
+ * (which may be the same thread in some cases)
+ */
+static boolean_t
+thread_invoke(
+ thread_t self,
+ thread_t thread,
+ ast_t reason)
+{
+ thread_continue_t continuation = self->continuation;
+ void *parameter = self->parameter;
+ processor_t processor;
+ uint64_t ctime = mach_absolute_time();
+
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+ commpage_update_mach_approximate_time(ctime);
+#endif
+
+ if (__improbable(get_preemption_level() != 0)) {
+ int pl = get_preemption_level();
+ panic("thread_invoke: preemption_level %d, possible cause: %s",
+ pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
+ "blocking while holding a spinlock, or within interrupt context"));
+ }
+
+ assert(self == current_thread());
+ assert(self->runq == PROCESSOR_NULL);
+
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+ sched_traditional_consider_maintenance(ctime);
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+ /*
+ * Mark thread interruptible.
+ */
+ thread_lock(thread);
+ thread->state &= ~TH_UNINT;
+
+ assert(thread_runnable(thread));
+ assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
+ assert(thread->runq == PROCESSOR_NULL);
+
+ /* Reload precise timing global policy to thread-local policy */
+ thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
+
+ /* Update SFI class based on other factors */
+ thread->sfi_class = sfi_thread_classify(thread);
+
+ /*
+ * Allow time constraint threads to hang onto
+ * a stack.
+ */
+ if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack)
+ self->reserved_stack = self->kernel_stack;