#include <kern/sched_prim.h>
static boolean_t
-sched_traditional_use_pset_runqueue = FALSE;
+ sched_traditional_use_pset_runqueue = FALSE;
static void
sched_traditional_init(void);
+static bool
+sched_traditional_steal_thread_enabled(processor_set_t pset);
+
static thread_t
sched_traditional_steal_thread(processor_set_t pset);
sched_traditional_processor_queue_shutdown(processor_t processor);
static boolean_t
-sched_traditional_processor_enqueue(processor_t processor, thread_t thread, integer_t options);
+sched_traditional_processor_enqueue(processor_t processor, thread_t thread,
+ sched_options_t options);
static boolean_t
sched_traditional_processor_queue_remove(processor_t processor, thread_t thread);
.pset_init = sched_traditional_pset_init,
.maintenance_continuation = sched_timeshare_maintenance_continue,
.choose_thread = sched_traditional_choose_thread,
- .steal_thread_enabled = TRUE,
+ .steal_thread_enabled = sched_traditional_steal_thread_enabled,
.steal_thread = sched_traditional_steal_thread,
.compute_timeshare_priority = sched_compute_timeshare_priority,
.choose_processor = choose_processor,
.processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum,
.processor_bound_count = sched_traditional_processor_bound_count,
.thread_update_scan = sched_traditional_thread_update_scan,
- .direct_dispatch_to_idle_processors = TRUE,
.multiple_psets_enabled = TRUE,
.sched_groups_enabled = FALSE,
+ .avoid_processor_enabled = FALSE,
+ .thread_avoid_processor = NULL,
+ .processor_balance = sched_SMT_balance,
+
+ .rt_runq = sched_rtglobal_runq,
+ .rt_init = sched_rtglobal_init,
+ .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
+ .rt_runq_scan = sched_rtglobal_runq_scan,
+ .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
+
+ .qos_max_parallelism = sched_qos_max_parallelism,
+ .check_spill = sched_check_spill,
+ .ipi_policy = sched_ipi_policy,
+ .thread_should_yield = sched_thread_should_yield,
+ .run_count_incr = sched_run_incr,
+ .run_count_decr = sched_run_decr,
+ .update_thread_bucket = sched_update_thread_bucket,
+ .pset_made_schedulable = sched_pset_made_schedulable,
};
const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = {
.pset_init = sched_traditional_pset_init,
.maintenance_continuation = sched_timeshare_maintenance_continue,
.choose_thread = sched_traditional_choose_thread,
- .steal_thread_enabled = TRUE,
+ .steal_thread_enabled = sched_steal_thread_enabled,
.steal_thread = sched_traditional_steal_thread,
.compute_timeshare_priority = sched_compute_timeshare_priority,
.choose_processor = choose_processor,
.processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum,
.processor_bound_count = sched_traditional_processor_bound_count,
.thread_update_scan = sched_traditional_thread_update_scan,
- .direct_dispatch_to_idle_processors = FALSE,
.multiple_psets_enabled = TRUE,
.sched_groups_enabled = FALSE,
+ .avoid_processor_enabled = FALSE,
+ .thread_avoid_processor = NULL,
+ .processor_balance = sched_SMT_balance,
+
+ .rt_runq = sched_rtglobal_runq,
+ .rt_init = sched_rtglobal_init,
+ .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
+ .rt_runq_scan = sched_rtglobal_runq_scan,
+ .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
+
+ .qos_max_parallelism = sched_qos_max_parallelism,
+ .check_spill = sched_check_spill,
+ .ipi_policy = sched_ipi_policy,
+ .thread_should_yield = sched_thread_should_yield,
+ .run_count_incr = sched_run_incr,
+ .run_count_decr = sched_run_decr,
+ .update_thread_bucket = sched_update_thread_bucket,
+ .pset_made_schedulable = sched_pset_made_schedulable,
};
static void
}
__attribute__((always_inline))
-static inline run_queue_t runq_for_processor(processor_t processor)
+static inline run_queue_t
+runq_for_processor(processor_t processor)
{
- if (sched_traditional_use_pset_runqueue)
+ if (sched_traditional_use_pset_runqueue) {
return &processor->processor_set->pset_runq;
- else
+ } else {
return &processor->runq;
+ }
}
__attribute__((always_inline))
-static inline void runq_consider_incr_bound_count(processor_t processor,
- thread_t thread)
+static inline void
+runq_consider_incr_bound_count(processor_t processor,
+ thread_t thread)
{
- if (thread->bound_processor == PROCESSOR_NULL)
+ if (thread->bound_processor == PROCESSOR_NULL) {
return;
+ }
assert(thread->bound_processor == processor);
- if (sched_traditional_use_pset_runqueue)
+ if (sched_traditional_use_pset_runqueue) {
processor->processor_set->pset_runq_bound_count++;
+ }
processor->runq_bound_count++;
}
__attribute__((always_inline))
-static inline void runq_consider_decr_bound_count(processor_t processor,
- thread_t thread)
+static inline void
+runq_consider_decr_bound_count(processor_t processor,
+ thread_t thread)
{
- if (thread->bound_processor == PROCESSOR_NULL)
+ if (thread->bound_processor == PROCESSOR_NULL) {
return;
+ }
assert(thread->bound_processor == processor);
- if (sched_traditional_use_pset_runqueue)
+ if (sched_traditional_use_pset_runqueue) {
processor->processor_set->pset_runq_bound_count--;
+ }
processor->runq_bound_count--;
}
static thread_t
sched_traditional_choose_thread(
- processor_t processor,
- int priority,
- __unused ast_t reason)
+ processor_t processor,
+ int priority,
+ __unused ast_t reason)
{
thread_t thread;
*/
static thread_t
sched_traditional_choose_thread_from_runq(
- processor_t processor,
- run_queue_t rq,
- int priority)
+ processor_t processor,
+ run_queue_t rq,
+ int priority)
{
- queue_t queue = rq->queues + rq->highq;
+ circle_queue_t queue = rq->queues + rq->highq;
int pri = rq->highq;
int count = rq->count;
thread_t thread;
while (count > 0 && pri >= priority) {
- thread = (thread_t)(uintptr_t)queue_first(queue);
- while (!queue_end(queue, (queue_entry_t)thread)) {
+ cqe_foreach_element_safe(thread, queue, runq_links) {
if (thread->bound_processor == PROCESSOR_NULL ||
thread->bound_processor == processor) {
- remqueue((queue_entry_t)thread);
+ circle_dequeue(queue, &thread->runq_links);
thread->runq = PROCESSOR_NULL;
SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
if (SCHED(priority_is_urgent)(pri)) {
rq->urgency--; assert(rq->urgency >= 0);
}
- if (queue_empty(queue)) {
- if (pri != IDLEPRI)
- clrbit(MAXPRI - pri, rq->bitmap);
- rq->highq = MAXPRI - ffsbit(rq->bitmap);
+ if (circle_queue_empty(queue)) {
+ bitmap_clear(rq->bitmap, pri);
+ rq->highq = bitmap_first(rq->bitmap, NRQS);
}
-
- return (thread);
+ return thread;
}
count--;
-
- thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
}
queue--; pri--;
}
- return (THREAD_NULL);
+ return THREAD_NULL;
}
static sched_mode_t
sched_traditional_initial_thread_sched_mode(task_t parent_task)
{
- if (parent_task == kernel_task)
+ if (parent_task == kernel_task) {
return TH_MODE_FIXED;
- else
+ } else {
return TH_MODE_TIMESHARE;
+ }
}
/*
*/
static boolean_t
sched_traditional_processor_enqueue(processor_t processor,
- thread_t thread,
- integer_t options)
+ thread_t thread,
+ sched_options_t options)
{
run_queue_t rq = runq_for_processor(processor);
boolean_t result;
thread->runq = processor;
runq_consider_incr_bound_count(processor, thread);
- return (result);
+ return result;
}
static boolean_t
}
if (has_higher) {
- if (runq->urgency > 0)
- return (AST_PREEMPT | AST_URGENT);
+ if (runq->urgency > 0) {
+ return AST_PREEMPT | AST_URGENT;
+ }
return AST_PREEMPT;
}
static boolean_t
sched_traditional_processor_queue_has_priority(processor_t processor,
- int priority,
- boolean_t gte)
+ int priority,
+ boolean_t gte)
{
- if (gte)
+ if (gte) {
return runq_for_processor(processor)->highq >= priority;
- else
+ } else {
return runq_for_processor(processor)->highq > priority;
+ }
}
static int
static uint64_t
sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor)
{
- if (processor->cpu_id == processor->processor_set->cpu_set_low)
+ if (processor->cpu_id == processor->processor_set->cpu_set_low) {
return runq_for_processor(processor)->runq_stats.count_sum;
- else
+ } else {
return 0ULL;
+ }
}
static int
{
processor_set_t pset = processor->processor_set;
run_queue_t rq = runq_for_processor(processor);
- queue_t queue = rq->queues + rq->highq;
+ circle_queue_t queue = rq->queues + rq->highq;
int pri = rq->highq;
int count = rq->count;
- thread_t next, thread;
- queue_head_t tqueue;
+ thread_t thread;
+ circle_queue_head_t tqueue;
- queue_init(&tqueue);
+ circle_queue_init(&tqueue);
while (count > 0) {
- thread = (thread_t)(uintptr_t)queue_first(queue);
- while (!queue_end(queue, (queue_entry_t)thread)) {
- next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
-
+ cqe_foreach_element_safe(thread, queue, runq_links) {
if (thread->bound_processor == PROCESSOR_NULL) {
- remqueue((queue_entry_t)thread);
+ circle_dequeue(queue, &thread->runq_links);
thread->runq = PROCESSOR_NULL;
SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
if (SCHED(priority_is_urgent)(pri)) {
rq->urgency--; assert(rq->urgency >= 0);
}
- if (queue_empty(queue)) {
- if (pri != IDLEPRI)
- clrbit(MAXPRI - pri, rq->bitmap);
- rq->highq = MAXPRI - ffsbit(rq->bitmap);
+ if (circle_queue_empty(queue)) {
+ bitmap_clear(rq->bitmap, pri);
+ rq->highq = bitmap_first(rq->bitmap, NRQS);
}
- enqueue_tail(&tqueue, (queue_entry_t)thread);
+ circle_enqueue_tail(&tqueue, &thread->runq_links);
}
count--;
-
- thread = next;
}
queue--; pri--;
pset_unlock(pset);
- while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) {
+ while ((thread = cqe_dequeue_head(&tqueue, struct thread, runq_links)) != THREAD_NULL) {
thread_lock(thread);
thread_setrun(thread, SCHED_TAILQ);
#if 0
static void
run_queue_check(
- run_queue_t rq,
- thread_t thread)
+ run_queue_t rq,
+ thread_t thread)
{
queue_t q;
queue_entry_t qe;
- if (rq != thread->runq)
+ if (rq != thread->runq) {
panic("run_queue_check: thread runq");
+ }
- if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
+ if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI) {
panic("run_queue_check: thread sched_pri");
+ }
q = &rq->queues[thread->sched_pri];
qe = queue_first(q);
while (!queue_end(q, qe)) {
- if (qe == (queue_entry_t)thread)
+ if (qe == (queue_entry_t)thread) {
return;
+ }
qe = queue_next(qe);
}
*/
static boolean_t
sched_traditional_processor_queue_remove(processor_t processor,
- thread_t thread)
+ thread_t thread)
{
processor_set_t pset;
run_queue_t rq;
*/
runq_consider_decr_bound_count(processor, thread);
run_queue_remove(rq, thread);
- }
- else {
+ } else {
/*
* The thread left the run queue before we could
* lock the run queue.
pset_unlock(pset);
- return (processor != PROCESSOR_NULL);
+ return processor != PROCESSOR_NULL;
}
/*
sched_traditional_steal_processor_thread(processor_t processor)
{
run_queue_t rq = runq_for_processor(processor);
- queue_t queue = rq->queues + rq->highq;
+ circle_queue_t queue = rq->queues + rq->highq;
int pri = rq->highq;
int count = rq->count;
thread_t thread;
while (count > 0) {
- thread = (thread_t)(uintptr_t)queue_first(queue);
- while (!queue_end(queue, (queue_entry_t)thread)) {
+ cqe_foreach_element_safe(thread, queue, runq_links) {
if (thread->bound_processor == PROCESSOR_NULL) {
- remqueue((queue_entry_t)thread);
+ circle_dequeue(queue, &thread->runq_links);
thread->runq = PROCESSOR_NULL;
SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
if (SCHED(priority_is_urgent)(pri)) {
rq->urgency--; assert(rq->urgency >= 0);
}
- if (queue_empty(queue)) {
- if (pri != IDLEPRI)
- clrbit(MAXPRI - pri, rq->bitmap);
- rq->highq = MAXPRI - ffsbit(rq->bitmap);
+ if (circle_queue_empty(queue)) {
+ bitmap_clear(rq->bitmap, pri);
+ rq->highq = bitmap_first(rq->bitmap, NRQS);
}
- return (thread);
+ return thread;
}
count--;
-
- thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
}
queue--; pri--;
}
- return (THREAD_NULL);
+ return THREAD_NULL;
+}
+
+static bool
+sched_traditional_steal_thread_enabled(processor_set_t pset)
+{
+ (void)pset;
+ return true;
}
/*
thread_t thread;
do {
- processor = (processor_t)(uintptr_t)queue_first(&cset->active_queue);
- while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) {
+ uint64_t active_map = (pset->cpu_state_map[PROCESSOR_RUNNING] |
+ pset->cpu_state_map[PROCESSOR_DISPATCHING]);
+ for (int cpuid = lsb_first(active_map); cpuid >= 0; cpuid = lsb_next(active_map, cpuid)) {
+ processor = processor_array[cpuid];
if (runq_for_processor(processor)->count > 0) {
thread = sched_traditional_steal_processor_thread(processor);
if (thread != THREAD_NULL) {
- remqueue((queue_entry_t)processor);
- enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
-
pset_unlock(cset);
- return (thread);
+ return thread;
}
}
-
- processor = (processor_t)(uintptr_t)queue_next((queue_entry_t)processor);
}
nset = next_pset(cset);
pset_unlock(cset);
- return (THREAD_NULL);
+ return THREAD_NULL;
}
static void
pset_unlock(pset);
splx(s);
- if (restart_needed)
+ if (restart_needed) {
break;
+ }
thread = processor->idle_thread;
if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
thread_update_process_threads();
} while (restart_needed);
}
-