X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3903760236c30e3b5ace7a4eefac3a269d68957c..cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e:/osfmk/kern/sched_traditional.c diff --git a/osfmk/kern/sched_traditional.c b/osfmk/kern/sched_traditional.c index 80f950feb..3297904d0 100644 --- a/osfmk/kern/sched_traditional.c +++ b/osfmk/kern/sched_traditional.c @@ -60,11 +60,14 @@ #include static boolean_t -sched_traditional_use_pset_runqueue = FALSE; + sched_traditional_use_pset_runqueue = FALSE; static void sched_traditional_init(void); +static bool +sched_traditional_steal_thread_enabled(processor_set_t pset); + static thread_t sched_traditional_steal_thread(processor_set_t pset); @@ -78,7 +81,8 @@ static void sched_traditional_processor_queue_shutdown(processor_t processor); static boolean_t -sched_traditional_processor_enqueue(processor_t processor, thread_t thread, integer_t options); +sched_traditional_processor_enqueue(processor_t processor, thread_t thread, + sched_options_t options); static boolean_t sched_traditional_processor_queue_remove(processor_t processor, thread_t thread); @@ -136,7 +140,7 @@ const struct sched_dispatch_table sched_traditional_dispatch = { .pset_init = sched_traditional_pset_init, .maintenance_continuation = sched_timeshare_maintenance_continue, .choose_thread = sched_traditional_choose_thread, - .steal_thread_enabled = TRUE, + .steal_thread_enabled = sched_traditional_steal_thread_enabled, .steal_thread = sched_traditional_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, .choose_processor = choose_processor, @@ -157,9 +161,26 @@ const struct sched_dispatch_table sched_traditional_dispatch = { .processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum, .processor_bound_count = sched_traditional_processor_bound_count, .thread_update_scan = sched_traditional_thread_update_scan, - .direct_dispatch_to_idle_processors = TRUE, .multiple_psets_enabled = TRUE, .sched_groups_enabled = FALSE, + .avoid_processor_enabled = FALSE, + .thread_avoid_processor = NULL, + .processor_balance = sched_SMT_balance, + + .rt_runq = sched_rtglobal_runq, + .rt_init = sched_rtglobal_init, + .rt_queue_shutdown = sched_rtglobal_queue_shutdown, + .rt_runq_scan = sched_rtglobal_runq_scan, + .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + + .qos_max_parallelism = sched_qos_max_parallelism, + .check_spill = sched_check_spill, + .ipi_policy = sched_ipi_policy, + .thread_should_yield = sched_thread_should_yield, + .run_count_incr = sched_run_incr, + .run_count_decr = sched_run_decr, + .update_thread_bucket = sched_update_thread_bucket, + .pset_made_schedulable = sched_pset_made_schedulable, }; const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = { @@ -170,7 +191,7 @@ const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch .pset_init = sched_traditional_pset_init, .maintenance_continuation = sched_timeshare_maintenance_continue, .choose_thread = sched_traditional_choose_thread, - .steal_thread_enabled = TRUE, + .steal_thread_enabled = sched_steal_thread_enabled, .steal_thread = sched_traditional_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, .choose_processor = choose_processor, @@ -191,9 +212,26 @@ const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch .processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum, .processor_bound_count = sched_traditional_processor_bound_count, .thread_update_scan = sched_traditional_thread_update_scan, - .direct_dispatch_to_idle_processors = FALSE, .multiple_psets_enabled = TRUE, .sched_groups_enabled = FALSE, + .avoid_processor_enabled = FALSE, + .thread_avoid_processor = NULL, + .processor_balance = sched_SMT_balance, + + .rt_runq = sched_rtglobal_runq, + .rt_init = sched_rtglobal_init, + .rt_queue_shutdown = sched_rtglobal_queue_shutdown, + .rt_runq_scan = sched_rtglobal_runq_scan, + .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + + .qos_max_parallelism = sched_qos_max_parallelism, + .check_spill = sched_check_spill, + .ipi_policy = sched_ipi_policy, + .thread_should_yield = sched_thread_should_yield, + .run_count_incr = sched_run_incr, + .run_count_decr = sched_run_decr, + .update_thread_bucket = sched_update_thread_bucket, + .pset_made_schedulable = sched_pset_made_schedulable, }; static void @@ -228,49 +266,57 @@ sched_traditional_pset_init(processor_set_t pset) } __attribute__((always_inline)) -static inline run_queue_t runq_for_processor(processor_t processor) +static inline run_queue_t +runq_for_processor(processor_t processor) { - if (sched_traditional_use_pset_runqueue) + if (sched_traditional_use_pset_runqueue) { return &processor->processor_set->pset_runq; - else + } else { return &processor->runq; + } } __attribute__((always_inline)) -static inline void runq_consider_incr_bound_count(processor_t processor, - thread_t thread) +static inline void +runq_consider_incr_bound_count(processor_t processor, + thread_t thread) { - if (thread->bound_processor == PROCESSOR_NULL) + if (thread->bound_processor == PROCESSOR_NULL) { return; + } assert(thread->bound_processor == processor); - if (sched_traditional_use_pset_runqueue) + if (sched_traditional_use_pset_runqueue) { processor->processor_set->pset_runq_bound_count++; + } processor->runq_bound_count++; } __attribute__((always_inline)) -static inline void runq_consider_decr_bound_count(processor_t processor, - thread_t thread) +static inline void +runq_consider_decr_bound_count(processor_t processor, + thread_t thread) { - if (thread->bound_processor == PROCESSOR_NULL) + if (thread->bound_processor == PROCESSOR_NULL) { return; + } assert(thread->bound_processor == processor); - if (sched_traditional_use_pset_runqueue) + if (sched_traditional_use_pset_runqueue) { processor->processor_set->pset_runq_bound_count--; + } processor->runq_bound_count--; } static thread_t sched_traditional_choose_thread( - processor_t processor, - int priority, - __unused ast_t reason) + processor_t processor, + int priority, + __unused ast_t reason) { thread_t thread; @@ -294,21 +340,20 @@ sched_traditional_choose_thread( */ static thread_t sched_traditional_choose_thread_from_runq( - processor_t processor, - run_queue_t rq, - int priority) + processor_t processor, + run_queue_t rq, + int priority) { - queue_t queue = rq->queues + rq->highq; + circle_queue_t queue = rq->queues + rq->highq; int pri = rq->highq; int count = rq->count; thread_t thread; while (count > 0 && pri >= priority) { - thread = (thread_t)(uintptr_t)queue_first(queue); - while (!queue_end(queue, (queue_entry_t)thread)) { + cqe_foreach_element_safe(thread, queue, runq_links) { if (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor) { - remqueue((queue_entry_t)thread); + circle_dequeue(queue, &thread->runq_links); thread->runq = PROCESSOR_NULL; SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); @@ -316,31 +361,29 @@ sched_traditional_choose_thread_from_runq( if (SCHED(priority_is_urgent)(pri)) { rq->urgency--; assert(rq->urgency >= 0); } - if (queue_empty(queue)) { + if (circle_queue_empty(queue)) { bitmap_clear(rq->bitmap, pri); rq->highq = bitmap_first(rq->bitmap, NRQS); } - - return (thread); + return thread; } count--; - - thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread); } queue--; pri--; } - return (THREAD_NULL); + return THREAD_NULL; } static sched_mode_t sched_traditional_initial_thread_sched_mode(task_t parent_task) { - if (parent_task == kernel_task) + if (parent_task == kernel_task) { return TH_MODE_FIXED; - else + } else { return TH_MODE_TIMESHARE; + } } /* @@ -357,8 +400,8 @@ sched_traditional_initial_thread_sched_mode(task_t parent_task) */ static boolean_t sched_traditional_processor_enqueue(processor_t processor, - thread_t thread, - integer_t options) + thread_t thread, + sched_options_t options) { run_queue_t rq = runq_for_processor(processor); boolean_t result; @@ -367,7 +410,7 @@ sched_traditional_processor_enqueue(processor_t processor, thread->runq = processor; runq_consider_incr_bound_count(processor, thread); - return (result); + return result; } static boolean_t @@ -417,8 +460,9 @@ sched_traditional_processor_csw_check(processor_t processor) } if (has_higher) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); + if (runq->urgency > 0) { + return AST_PREEMPT | AST_URGENT; + } return AST_PREEMPT; } @@ -428,17 +472,14 @@ sched_traditional_processor_csw_check(processor_t processor) static boolean_t sched_traditional_processor_queue_has_priority(processor_t processor, - int priority, - boolean_t gte) + int priority, + boolean_t gte) { - run_queue_t runq = runq_for_processor(processor); - - if (runq->count == 0) - return FALSE; - else if (gte) + if (gte) { return runq_for_processor(processor)->highq >= priority; - else + } else { return runq_for_processor(processor)->highq > priority; + } } static int @@ -456,10 +497,11 @@ sched_traditional_processor_runq_stats_count_sum(processor_t processor) static uint64_t sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor) { - if (processor->cpu_id == processor->processor_set->cpu_set_low) + if (processor->cpu_id == processor->processor_set->cpu_set_low) { return runq_for_processor(processor)->runq_stats.count_sum; - else + } else { return 0ULL; + } } static int @@ -482,21 +524,18 @@ sched_traditional_processor_queue_shutdown(processor_t processor) { processor_set_t pset = processor->processor_set; run_queue_t rq = runq_for_processor(processor); - queue_t queue = rq->queues + rq->highq; + circle_queue_t queue = rq->queues + rq->highq; int pri = rq->highq; int count = rq->count; - thread_t next, thread; - queue_head_t tqueue; + thread_t thread; + circle_queue_head_t tqueue; - queue_init(&tqueue); + circle_queue_init(&tqueue); while (count > 0) { - thread = (thread_t)(uintptr_t)queue_first(queue); - while (!queue_end(queue, (queue_entry_t)thread)) { - next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread); - + cqe_foreach_element_safe(thread, queue, runq_links) { if (thread->bound_processor == PROCESSOR_NULL) { - remqueue((queue_entry_t)thread); + circle_dequeue(queue, &thread->runq_links); thread->runq = PROCESSOR_NULL; SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); @@ -505,16 +544,14 @@ sched_traditional_processor_queue_shutdown(processor_t processor) if (SCHED(priority_is_urgent)(pri)) { rq->urgency--; assert(rq->urgency >= 0); } - if (queue_empty(queue)) { + if (circle_queue_empty(queue)) { bitmap_clear(rq->bitmap, pri); rq->highq = bitmap_first(rq->bitmap, NRQS); } - enqueue_tail(&tqueue, (queue_entry_t)thread); + circle_enqueue_tail(&tqueue, &thread->runq_links); } count--; - - thread = next; } queue--; pri--; @@ -522,7 +559,7 @@ sched_traditional_processor_queue_shutdown(processor_t processor) pset_unlock(pset); - while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) { + while ((thread = cqe_dequeue_head(&tqueue, struct thread, runq_links)) != THREAD_NULL) { thread_lock(thread); thread_setrun(thread, SCHED_TAILQ); @@ -534,23 +571,26 @@ sched_traditional_processor_queue_shutdown(processor_t processor) #if 0 static void run_queue_check( - run_queue_t rq, - thread_t thread) + run_queue_t rq, + thread_t thread) { queue_t q; queue_entry_t qe; - if (rq != thread->runq) + if (rq != thread->runq) { panic("run_queue_check: thread runq"); + } - if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI) + if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI) { panic("run_queue_check: thread sched_pri"); + } q = &rq->queues[thread->sched_pri]; qe = queue_first(q); while (!queue_end(q, qe)) { - if (qe == (queue_entry_t)thread) + if (qe == (queue_entry_t)thread) { return; + } qe = queue_next(qe); } @@ -566,7 +606,7 @@ run_queue_check( */ static boolean_t sched_traditional_processor_queue_remove(processor_t processor, - thread_t thread) + thread_t thread) { processor_set_t pset; run_queue_t rq; @@ -583,8 +623,7 @@ sched_traditional_processor_queue_remove(processor_t processor, */ runq_consider_decr_bound_count(processor, thread); run_queue_remove(rq, thread); - } - else { + } else { /* * The thread left the run queue before we could * lock the run queue. @@ -595,7 +634,7 @@ sched_traditional_processor_queue_remove(processor_t processor, pset_unlock(pset); - return (processor != PROCESSOR_NULL); + return processor != PROCESSOR_NULL; } /* @@ -611,16 +650,15 @@ static thread_t sched_traditional_steal_processor_thread(processor_t processor) { run_queue_t rq = runq_for_processor(processor); - queue_t queue = rq->queues + rq->highq; + circle_queue_t queue = rq->queues + rq->highq; int pri = rq->highq; int count = rq->count; thread_t thread; while (count > 0) { - thread = (thread_t)(uintptr_t)queue_first(queue); - while (!queue_end(queue, (queue_entry_t)thread)) { + cqe_foreach_element_safe(thread, queue, runq_links) { if (thread->bound_processor == PROCESSOR_NULL) { - remqueue((queue_entry_t)thread); + circle_dequeue(queue, &thread->runq_links); thread->runq = PROCESSOR_NULL; SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); @@ -629,22 +667,27 @@ sched_traditional_steal_processor_thread(processor_t processor) if (SCHED(priority_is_urgent)(pri)) { rq->urgency--; assert(rq->urgency >= 0); } - if (queue_empty(queue)) { + if (circle_queue_empty(queue)) { bitmap_clear(rq->bitmap, pri); rq->highq = bitmap_first(rq->bitmap, NRQS); } - return (thread); + return thread; } count--; - - thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread); } queue--; pri--; } - return (THREAD_NULL); + return THREAD_NULL; +} + +static bool +sched_traditional_steal_thread_enabled(processor_set_t pset) +{ + (void)pset; + return true; } /* @@ -665,21 +708,18 @@ sched_traditional_steal_thread(processor_set_t pset) thread_t thread; do { - processor = (processor_t)(uintptr_t)queue_first(&cset->active_queue); - while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) { + uint64_t active_map = (pset->cpu_state_map[PROCESSOR_RUNNING] | + pset->cpu_state_map[PROCESSOR_DISPATCHING]); + for (int cpuid = lsb_first(active_map); cpuid >= 0; cpuid = lsb_next(active_map, cpuid)) { + processor = processor_array[cpuid]; if (runq_for_processor(processor)->count > 0) { thread = sched_traditional_steal_processor_thread(processor); if (thread != THREAD_NULL) { - remqueue((queue_entry_t)processor); - enqueue_tail(&cset->active_queue, (queue_entry_t)processor); - pset_unlock(cset); - return (thread); + return thread; } } - - processor = (processor_t)(uintptr_t)queue_next((queue_entry_t)processor); } nset = next_pset(cset); @@ -694,7 +734,7 @@ sched_traditional_steal_thread(processor_set_t pset) pset_unlock(cset); - return (THREAD_NULL); + return THREAD_NULL; } static void @@ -722,8 +762,9 @@ sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context) pset_unlock(pset); splx(s); - if (restart_needed) + if (restart_needed) { break; + } thread = processor->idle_thread; if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) { @@ -738,4 +779,3 @@ sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context) thread_update_process_threads(); } while (restart_needed); } -