+ if (realtime_queue_insert(thread)) {
+ int prstate = processor->state;
+ if (processor == current_processor())
+ ast_on(AST_PREEMPT | AST_URGENT);
+ else if ((prstate == PROCESSOR_DISPATCHING) || (prstate == PROCESSOR_IDLE))
+ machine_signal_idle(processor);
+ else
+ cause_ast_check(processor);
+ }
+
+ pset_unlock(pset);
+}
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+
+static boolean_t
+priority_is_urgent(int priority)
+{
+ return testbit(priority, sched_preempt_pri) ? TRUE : FALSE;
+}
+
+/*
+ * processor_enqueue:
+ *
+ * Enqueue thread on a processor run queue. Thread must be locked,
+ * and not already be on a run queue.
+ *
+ * Returns TRUE if a preemption is indicated based on the state
+ * of the run queue.
+ *
+ * The run queue must be locked (see thread_run_queue_remove()
+ * for more info).
+ */
+static boolean_t
+processor_enqueue(
+ processor_t processor,
+ thread_t thread,
+ integer_t options)
+{
+ run_queue_t rq = runq_for_processor(processor);
+ boolean_t result;
+
+ result = run_queue_enqueue(rq, thread, options);
+ thread->runq = processor;
+ runq_consider_incr_bound_count(processor, thread);
+
+ return (result);
+}
+
+#endif /* CONFIG_SCHED_TRADITIONAL */
+
+/*
+ * processor_setrun:
+ *
+ * Dispatch a thread for execution on a
+ * processor.
+ *
+ * Thread must be locked. Associated pset must
+ * be locked, and is returned unlocked.
+ */
+static void
+processor_setrun(
+ processor_t processor,
+ thread_t thread,
+ integer_t options)
+{
+ processor_set_t pset = processor->processor_set;
+ ast_t preempt;
+
+ thread->chosen_processor = processor;
+
+ /*
+ * Dispatch directly onto idle processor.
+ */
+ if ( (SCHED(direct_dispatch_to_idle_processors) ||
+ thread->bound_processor == processor)
+ && processor->state == PROCESSOR_IDLE) {
+ remqueue((queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
+
+ processor->next_thread = thread;
+ processor->deadline = UINT64_MAX;
+ processor->state = PROCESSOR_DISPATCHING;
+ pset_unlock(pset);
+
+ if (processor != current_processor())
+ machine_signal_idle(processor);
+ return;
+ }
+
+ /*
+ * Set preemption mode.
+ */
+ if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri)
+ preempt = (AST_PREEMPT | AST_URGENT);
+ else if(processor->active_thread && thread_eager_preemption(processor->active_thread))
+ preempt = (AST_PREEMPT | AST_URGENT);
+ else
+ if ((thread->sched_mode == TH_MODE_TIMESHARE) && thread->sched_pri < thread->priority)
+ preempt = AST_NONE;
+ else
+ preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
+
+ if (!SCHED(processor_enqueue)(processor, thread, options))
+ preempt = AST_NONE;
+
+ if (preempt != AST_NONE) {
+ if (processor == current_processor()) {
+ if (csw_check(processor) != AST_NONE)
+ ast_on(preempt);
+ }
+ else
+ if ( processor->state == PROCESSOR_IDLE || processor->state == PROCESSOR_DISPATCHING) {
+ machine_signal_idle(processor);
+ }
+ else
+ if ( (processor->state == PROCESSOR_RUNNING ||
+ processor->state == PROCESSOR_SHUTDOWN) &&
+ (thread->sched_pri >= processor->current_pri ||
+ processor->current_thmode == TH_MODE_FAIRSHARE)) {
+ cause_ast_check(processor);
+ }
+ }
+ else
+ if ( processor->state == PROCESSOR_SHUTDOWN &&
+ thread->sched_pri >= processor->current_pri ) {
+ cause_ast_check(processor);
+ }
+ else
+ if ( processor->state == PROCESSOR_IDLE &&
+ processor != current_processor() ) {
+ machine_signal_idle(processor);
+ }
+
+ pset_unlock(pset);
+}
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+
+static boolean_t
+processor_queue_empty(processor_t processor)
+{
+ return runq_for_processor(processor)->count == 0;
+
+}
+
+static boolean_t
+sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor)
+{
+ processor_set_t pset = processor->processor_set;
+ int count = runq_for_processor(processor)->count;
+
+ /*
+ * The pset runq contains the count of all runnable threads
+ * for all processors in the pset. However, for threads that
+ * are bound to another processor, the current "processor"
+ * is not eligible to execute the thread. So we only
+ * include bound threads that our bound to the current
+ * "processor". This allows the processor to idle when the
+ * count of eligible threads drops to 0, even if there's
+ * a runnable thread bound to a different processor in the
+ * shared runq.
+ */
+
+ count -= pset->pset_runq_bound_count;
+ count += processor->runq_bound_count;
+
+ return count == 0;
+}
+
+static ast_t
+processor_csw_check(processor_t processor)
+{
+ run_queue_t runq;
+
+ assert(processor->active_thread != NULL);
+
+ runq = runq_for_processor(processor);
+ if (runq->highq > processor->current_pri) {
+ if (runq->urgency > 0)
+ return (AST_PREEMPT | AST_URGENT);
+
+ if (processor->active_thread && thread_eager_preemption(processor->active_thread))
+ return (AST_PREEMPT | AST_URGENT);
+
+ return AST_PREEMPT;
+ }
+
+ return AST_NONE;
+}
+
+static boolean_t
+processor_queue_has_priority(processor_t processor,
+ int priority,
+ boolean_t gte)
+{
+ if (gte)
+ return runq_for_processor(processor)->highq >= priority;
+ else
+ return runq_for_processor(processor)->highq > priority;
+}
+
+static boolean_t
+should_current_thread_rechoose_processor(processor_t processor)
+{
+ return (processor->current_pri < BASEPRI_RTQUEUES
+ && processor->processor_meta != PROCESSOR_META_NULL
+ && processor->processor_meta->primary != processor);
+}
+
+static int
+sched_traditional_processor_runq_count(processor_t processor)
+{
+ return runq_for_processor(processor)->count;
+}
+
+
+static uint64_t
+sched_traditional_processor_runq_stats_count_sum(processor_t processor)
+{
+ return runq_for_processor(processor)->runq_stats.count_sum;
+}
+
+static uint64_t
+sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor)
+{
+ if (processor->cpu_id == processor->processor_set->cpu_set_low)
+ return runq_for_processor(processor)->runq_stats.count_sum;
+ else
+ return 0ULL;
+}
+
+#endif /* CONFIG_SCHED_TRADITIONAL */
+
+#define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets)
+
+/*
+ * choose_next_pset:
+ *
+ * Return the next sibling pset containing
+ * available processors.
+ *
+ * Returns the original pset if none other is
+ * suitable.
+ */
+static processor_set_t
+choose_next_pset(
+ processor_set_t pset)
+{
+ processor_set_t nset = pset;
+
+ do {
+ nset = next_pset(nset);
+ } while (nset->online_processor_count < 1 && nset != pset);
+
+ return (nset);
+}
+
+/*
+ * choose_processor:
+ *
+ * Choose a processor for the thread, beginning at
+ * the pset. Accepts an optional processor hint in
+ * the pset.
+ *
+ * Returns a processor, possibly from a different pset.
+ *
+ * The thread must be locked. The pset must be locked,
+ * and the resulting pset is locked on return.
+ */
+processor_t
+choose_processor(
+ processor_set_t pset,
+ processor_t processor,
+ thread_t thread)
+{
+ processor_set_t nset, cset = pset;
+ processor_meta_t pmeta = PROCESSOR_META_NULL;
+ processor_t mprocessor;
+
+ /*
+ * Prefer the hinted processor, when appropriate.
+ */
+
+ if (processor != PROCESSOR_NULL) {
+ if (processor->processor_meta != PROCESSOR_META_NULL)
+ processor = processor->processor_meta->primary;
+ }
+
+ mprocessor = machine_choose_processor(pset, processor);
+ if (mprocessor != PROCESSOR_NULL)
+ processor = mprocessor;
+
+ if (processor != PROCESSOR_NULL) {
+ if (processor->processor_set != pset ||
+ processor->state == PROCESSOR_INACTIVE ||
+ processor->state == PROCESSOR_SHUTDOWN ||
+ processor->state == PROCESSOR_OFF_LINE)
+ processor = PROCESSOR_NULL;
+ else
+ if (processor->state == PROCESSOR_IDLE ||
+ ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
+ (processor->current_pri < BASEPRI_RTQUEUES)))
+ return (processor);
+ }
+
+ /*
+ * Iterate through the processor sets to locate
+ * an appropriate processor.
+ */
+ do {
+ /*
+ * Choose an idle processor.