X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/fe8ab488e9161c46dd9885d58fc52996dc0249ff..813fb2f63a553c957e917ede5f119b021d6ce391:/osfmk/kern/sched_dualq.c diff --git a/osfmk/kern/sched_dualq.c b/osfmk/kern/sched_dualq.c index 628ee743e..48ff5a038 100644 --- a/osfmk/kern/sched_dualq.c +++ b/osfmk/kern/sched_dualq.c @@ -53,7 +53,7 @@ static thread_t sched_dualq_steal_thread(processor_set_t pset); static void -sched_dualq_thread_update_scan(void); +sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context); static boolean_t sched_dualq_processor_enqueue(processor_t processor, thread_t thread, integer_t options); @@ -94,18 +94,17 @@ sched_dualq_processor_queue_shutdown(processor_t processor); static sched_mode_t sched_dualq_initial_thread_sched_mode(task_t parent_task); -static boolean_t -sched_dualq_should_current_thread_rechoose_processor(processor_t processor); - const struct sched_dispatch_table sched_dualq_dispatch = { + .sched_name = "dualq", .init = sched_dualq_init, - .timebase_init = sched_traditional_timebase_init, + .timebase_init = sched_timeshare_timebase_init, .processor_init = sched_dualq_processor_init, .pset_init = sched_dualq_pset_init, - .maintenance_continuation = sched_traditional_maintenance_continue, + .maintenance_continuation = sched_timeshare_maintenance_continue, .choose_thread = sched_dualq_choose_thread, + .steal_thread_enabled = TRUE, .steal_thread = sched_dualq_steal_thread, - .compute_priority = compute_priority, + .compute_timeshare_priority = sched_compute_timeshare_priority, .choose_processor = choose_processor, .processor_enqueue = sched_dualq_processor_enqueue, .processor_queue_shutdown = sched_dualq_processor_queue_shutdown, @@ -114,24 +113,19 @@ const struct sched_dispatch_table sched_dualq_dispatch = { .priority_is_urgent = priority_is_urgent, .processor_csw_check = sched_dualq_processor_csw_check, .processor_queue_has_priority = sched_dualq_processor_queue_has_priority, - .initial_quantum_size = sched_traditional_initial_quantum_size, + .initial_quantum_size = sched_timeshare_initial_quantum_size, .initial_thread_sched_mode = sched_dualq_initial_thread_sched_mode, .can_update_priority = can_update_priority, .update_priority = update_priority, .lightweight_update_priority = lightweight_update_priority, - .quantum_expire = sched_traditional_quantum_expire, - .should_current_thread_rechoose_processor = sched_dualq_should_current_thread_rechoose_processor, + .quantum_expire = sched_default_quantum_expire, .processor_runq_count = sched_dualq_runq_count, .processor_runq_stats_count_sum = sched_dualq_runq_stats_count_sum, - .fairshare_init = sched_traditional_fairshare_init, - .fairshare_runq_count = sched_traditional_fairshare_runq_count, - .fairshare_runq_stats_count_sum = sched_traditional_fairshare_runq_stats_count_sum, - .fairshare_enqueue = sched_traditional_fairshare_enqueue, - .fairshare_dequeue = sched_traditional_fairshare_dequeue, - .fairshare_queue_remove = sched_traditional_fairshare_queue_remove, .processor_bound_count = sched_dualq_processor_bound_count, .thread_update_scan = sched_dualq_thread_update_scan, .direct_dispatch_to_idle_processors = FALSE, + .multiple_psets_enabled = TRUE, + .sched_groups_enabled = FALSE, }; __attribute__((always_inline)) @@ -181,7 +175,7 @@ sched_dualq_pset_init(processor_set_t pset) static void sched_dualq_init(void) { - sched_traditional_init(); + sched_timeshare_init(); } static thread_t @@ -250,7 +244,7 @@ sched_dualq_processor_csw_check(processor_t processor) pri = MAX(main_runq->highq, bound_runq->highq); - if (first_timeslice(processor)) { + if (processor->first_timeslice) { has_higher = (pri > processor->current_pri); } else { has_higher = (pri >= processor->current_pri); @@ -263,9 +257,6 @@ sched_dualq_processor_csw_check(processor_t processor) if (bound_runq->urgency > 0) return (AST_PREEMPT | AST_URGENT); - if (processor->active_thread && thread_eager_preemption(processor->active_thread)) - return (AST_PREEMPT | AST_URGENT); - return AST_PREEMPT; } @@ -277,7 +268,13 @@ sched_dualq_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte) { - int qpri = MAX(dualq_main_runq(processor)->highq, dualq_bound_runq(processor)->highq); + run_queue_t main_runq = dualq_main_runq(processor); + run_queue_t bound_runq = dualq_bound_runq(processor); + + if (main_runq->count == 0 && bound_runq->count == 0) + return FALSE; + + int qpri = MAX(main_runq->highq, bound_runq->highq); if (gte) return qpri >= priority; @@ -285,12 +282,6 @@ sched_dualq_processor_queue_has_priority(processor_t processor, return qpri > priority; } -static boolean_t -sched_dualq_should_current_thread_rechoose_processor(processor_t processor) -{ - return (processor->current_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor); -} - static int sched_dualq_runq_count(processor_t processor) { @@ -331,12 +322,15 @@ sched_dualq_processor_queue_shutdown(processor_t processor) while (rq->count > 0) { thread = run_queue_dequeue(rq, SCHED_HEADQ); - enqueue_tail(&tqueue, (queue_entry_t)thread); + enqueue_tail(&tqueue, &thread->runq_links); } pset_unlock(pset); - while ((thread = (thread_t)(void*)dequeue_head(&tqueue)) != THREAD_NULL) { + qe_foreach_element_safe(thread, &tqueue, runq_links) { + + remqueue(&thread->runq_links); + thread_lock(thread); thread_setrun(thread, SCHED_TAILQ); @@ -407,7 +401,7 @@ sched_dualq_steal_thread(processor_set_t pset) } static void -sched_dualq_thread_update_scan(void) +sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context) { boolean_t restart_needed = FALSE; processor_t processor = processor_list; @@ -427,7 +421,7 @@ sched_dualq_thread_update_scan(void) s = splsched(); pset_lock(pset); - restart_needed = runq_scan(dualq_bound_runq(processor)); + restart_needed = runq_scan(dualq_bound_runq(processor), scan_context); pset_unlock(pset); splx(s); @@ -456,7 +450,7 @@ sched_dualq_thread_update_scan(void) s = splsched(); pset_lock(pset); - restart_needed = runq_scan(&pset->pset_runq); + restart_needed = runq_scan(&pset->pset_runq, scan_context); pset_unlock(pset); splx(s);