.direct_dispatch_to_idle_processors = TRUE,
.multiple_psets_enabled = TRUE,
.sched_groups_enabled = FALSE,
+ .avoid_processor_enabled = FALSE,
+ .thread_avoid_processor = NULL,
+ .processor_balance = sched_SMT_balance,
+
+ .rt_runq = sched_rtglobal_runq,
+ .rt_init = sched_rtglobal_init,
+ .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
+ .rt_runq_scan = sched_rtglobal_runq_scan,
+ .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
+
+ .qos_max_parallelism = sched_qos_max_parallelism,
+ .check_spill = sched_check_spill,
+ .ipi_policy = sched_ipi_policy,
+ .thread_should_yield = sched_thread_should_yield,
};
const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = {
.direct_dispatch_to_idle_processors = FALSE,
.multiple_psets_enabled = TRUE,
.sched_groups_enabled = FALSE,
+ .avoid_processor_enabled = FALSE,
+ .thread_avoid_processor = NULL,
+ .processor_balance = sched_SMT_balance,
+
+ .rt_runq = sched_rtglobal_runq,
+ .rt_init = sched_rtglobal_init,
+ .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
+ .rt_runq_scan = sched_rtglobal_runq_scan,
+ .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
+
+ .qos_max_parallelism = sched_qos_max_parallelism,
+ .check_spill = sched_check_spill,
+ .ipi_policy = sched_ipi_policy,
+ .thread_should_yield = sched_thread_should_yield,
};
static void
int priority,
boolean_t gte)
{
- run_queue_t runq = runq_for_processor(processor);
-
- if (runq->count == 0)
- return FALSE;
- else if (gte)
+ if (gte)
return runq_for_processor(processor)->highq >= priority;
else
return runq_for_processor(processor)->highq > priority;
thread_t thread;
do {
- processor = (processor_t)(uintptr_t)queue_first(&cset->active_queue);
- while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) {
+ uint64_t active_map = (pset->cpu_state_map[PROCESSOR_RUNNING] |
+ pset->cpu_state_map[PROCESSOR_DISPATCHING]);
+ for (int cpuid = lsb_first(active_map); cpuid >= 0; cpuid = lsb_next(active_map, cpuid)) {
+ processor = processor_array[cpuid];
if (runq_for_processor(processor)->count > 0) {
thread = sched_traditional_steal_processor_thread(processor);
if (thread != THREAD_NULL) {
- remqueue((queue_entry_t)processor);
- enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
-
pset_unlock(cset);
return (thread);
}
}
-
- processor = (processor_t)(uintptr_t)queue_next((queue_entry_t)processor);
}
nset = next_pset(cset);