#include <kern/cpu_number.h>
#include <kern/cpu_data.h>
#include <kern/debug.h>
-#include <kern/lock.h>
#include <kern/macro_help.h>
#include <kern/machine.h>
#include <kern/misc_protos.h>
#include <kern/syscall_subr.h>
#include <kern/task.h>
#include <kern/thread.h>
-#include <kern/wait_queue.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
static thread_t
sched_grrr_choose_thread(processor_t processor,
- int priority);
+ int priority,
+ ast_t reason);
static thread_t
sched_grrr_steal_thread(processor_set_t pset);
-static void
-sched_grrr_compute_priority(thread_t thread,
- boolean_t override_depress);
+static int
+sched_grrr_compute_priority(thread_t thread);
static processor_t
sched_grrr_choose_processor( processor_set_t pset,
static sched_mode_t
sched_grrr_initial_thread_sched_mode(task_t parent_task);
-static boolean_t
-sched_grrr_supports_timeshare_mode(void);
-
static boolean_t
sched_grrr_can_update_priority(thread_t thread);
static void
sched_grrr_lightweight_update_priority(thread_t thread);
-static void
-sched_grrr_quantum_expire(thread_t thread);
-
-static boolean_t
-sched_grrr_should_current_thread_rechoose_processor(processor_t processor);
-
static int
sched_grrr_processor_runq_count(processor_t processor);
static uint64_t
sched_grrr_processor_runq_stats_count_sum(processor_t processor);
+static int
+sched_grrr_processor_bound_count(processor_t processor);
+
+static void
+sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
+
const struct sched_dispatch_table sched_grrr_dispatch = {
- sched_grrr_init,
- sched_grrr_timebase_init,
- sched_grrr_processor_init,
- sched_grrr_pset_init,
- sched_grrr_maintenance_continuation,
- sched_grrr_choose_thread,
- sched_grrr_steal_thread,
- sched_grrr_compute_priority,
- sched_grrr_choose_processor,
- sched_grrr_processor_enqueue,
- sched_grrr_processor_queue_shutdown,
- sched_grrr_processor_queue_remove,
- sched_grrr_processor_queue_empty,
- sched_grrr_priority_is_urgent,
- sched_grrr_processor_csw_check,
- sched_grrr_processor_queue_has_priority,
- sched_grrr_initial_quantum_size,
- sched_grrr_initial_thread_sched_mode,
- sched_grrr_supports_timeshare_mode,
- sched_grrr_can_update_priority,
- sched_grrr_update_priority,
- sched_grrr_lightweight_update_priority,
- sched_grrr_quantum_expire,
- sched_grrr_should_current_thread_rechoose_processor,
- sched_grrr_processor_runq_count,
- sched_grrr_processor_runq_stats_count_sum,
- sched_grrr_fairshare_init,
- sched_grrr_fairshare_runq_count,
- sched_grrr_fairshare_runq_stats_count_sum,
- sched_grrr_fairshare_enqueue,
- sched_grrr_fairshare_dequeue,
- sched_grrr_fairshare_queue_remove,
- TRUE /* direct_dispatch_to_idle_processors */
+ .sched_name = "grrr",
+ .init = sched_grrr_init,
+ .timebase_init = sched_grrr_timebase_init,
+ .processor_init = sched_grrr_processor_init,
+ .pset_init = sched_grrr_pset_init,
+ .maintenance_continuation = sched_grrr_maintenance_continuation,
+ .choose_thread = sched_grrr_choose_thread,
+ .steal_thread_enabled = FALSE,
+ .steal_thread = sched_grrr_steal_thread,
+ .compute_timeshare_priority = sched_grrr_compute_priority,
+ .choose_processor = sched_grrr_choose_processor,
+ .processor_enqueue = sched_grrr_processor_enqueue,
+ .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
+ .processor_queue_remove = sched_grrr_processor_queue_remove,
+ .processor_queue_empty = sched_grrr_processor_queue_empty,
+ .priority_is_urgent = sched_grrr_priority_is_urgent,
+ .processor_csw_check = sched_grrr_processor_csw_check,
+ .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
+ .initial_quantum_size = sched_grrr_initial_quantum_size,
+ .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
+ .can_update_priority = sched_grrr_can_update_priority,
+ .update_priority = sched_grrr_update_priority,
+ .lightweight_update_priority = sched_grrr_lightweight_update_priority,
+ .quantum_expire = sched_default_quantum_expire,
+ .processor_runq_count = sched_grrr_processor_runq_count,
+ .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
+ .processor_bound_count = sched_grrr_processor_bound_count,
+ .thread_update_scan = sched_grrr_thread_update_scan,
+ .direct_dispatch_to_idle_processors = TRUE,
+ .multiple_psets_enabled = TRUE,
+ .sched_groups_enabled = FALSE,
};
extern int max_unsafe_quanta;
/*
* Compute various averages.
*/
- compute_averages();
+ compute_averages(1);
if (sched_grrr_tick_deadline == 0)
sched_grrr_tick_deadline = abstime;
static thread_t
sched_grrr_choose_thread(processor_t processor,
- int priority __unused)
+ int priority __unused,
+ ast_t reason __unused)
{
grrr_run_queue_t rq = &processor->grrr_runq;
}
-static void
-sched_grrr_compute_priority(thread_t thread,
- boolean_t override_depress __unused)
+static int
+sched_grrr_compute_priority(thread_t thread)
{
- set_sched_pri(thread, thread->priority);
+ return thread->base_pri;
}
static processor_t
queue_init(&tqueue);
queue_init(&bqueue);
- while ((thread = sched_grrr_choose_thread(processor, IDLEPRI)) != THREAD_NULL) {
+ while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
if (thread->bound_processor == PROCESSOR_NULL) {
enqueue_tail(&tqueue, (queue_entry_t)thread);
} else {
}
}
- while ((thread = (thread_t)dequeue_head(&bqueue)) != THREAD_NULL) {
+ while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
}
pset_unlock(pset);
- while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
+ while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
thread_lock(thread);
thread_setrun(thread, SCHED_TAILQ);
return TH_MODE_TIMESHARE;
}
-static boolean_t
-sched_grrr_supports_timeshare_mode(void)
-{
- return TRUE;
-}
-
static boolean_t
sched_grrr_can_update_priority(thread_t thread __unused)
{
return;
}
-static void
-sched_grrr_quantum_expire(
- thread_t thread __unused)
+static int
+sched_grrr_processor_runq_count(processor_t processor)
{
+ return processor->grrr_runq.count;
}
-
-static boolean_t
-sched_grrr_should_current_thread_rechoose_processor(processor_t processor __unused)
+static uint64_t
+sched_grrr_processor_runq_stats_count_sum(processor_t processor)
{
- return (TRUE);
+ return processor->grrr_runq.runq_stats.count_sum;
}
static int
-sched_grrr_processor_runq_count(processor_t processor)
+sched_grrr_processor_bound_count(__unused processor_t processor)
{
- return processor->grrr_runq.count;
+ return 0;
}
-static uint64_t
-sched_grrr_processor_runq_stats_count_sum(processor_t processor)
+static void
+sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
{
- return processor->grrr_runq.runq_stats.count_sum;
+
}
#endif /* defined(CONFIG_SCHED_GRRR) */
thread = group->current_client;
if (thread == THREAD_NULL) {
- thread = (thread_t)queue_first(&group->clients);
+ thread = (thread_t)(void *)queue_first(&group->clients);
}
if (1 /* deficit */) {
- group->current_client = (thread_t)queue_next((queue_entry_t)thread);
+ group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
- group->current_client = (thread_t)queue_first(&group->clients);
+ group->current_client = (thread_t)(void *)queue_first(&group->clients);
}
thread = group->current_client;
}
#endif /* defined(CONFIG_SCHED_GRRR_CORE) */
-
-#if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
-
-static struct grrr_run_queue fs_grrr_runq;
-#define FS_GRRR_RUNQ ((processor_t)-2)
-decl_simple_lock_data(static,fs_grrr_lock);
-
-void
-sched_grrr_fairshare_init(void)
-{
- grrr_priority_mapping_init();
-
- simple_lock_init(&fs_grrr_lock, 0);
- grrr_runqueue_init(&fs_grrr_runq);
-}
-
-
-int
-sched_grrr_fairshare_runq_count(void)
-{
- return fs_grrr_runq.count;
-}
-
-uint64_t
-sched_grrr_fairshare_runq_stats_count_sum(void)
-{
- return fs_grrr_runq.runq_stats.count_sum;
-}
-
-void
-sched_grrr_fairshare_enqueue(thread_t thread)
-{
- simple_lock(&fs_grrr_lock);
-
- (void)grrr_enqueue(&fs_grrr_runq, thread);
-
- thread->runq = FS_GRRR_RUNQ;
-
- simple_unlock(&fs_grrr_lock);
-}
-
-thread_t sched_grrr_fairshare_dequeue(void)
-{
- thread_t thread;
-
- simple_lock(&fs_grrr_lock);
- if (fs_grrr_runq.count > 0) {
- thread = grrr_select(&fs_grrr_runq);
-
- simple_unlock(&fs_grrr_lock);
-
- return (thread);
- }
- simple_unlock(&fs_grrr_lock);
-
- return THREAD_NULL;
-}
-
-boolean_t sched_grrr_fairshare_queue_remove(thread_t thread)
-{
-
- simple_lock(&fs_grrr_lock);
-
- if (FS_GRRR_RUNQ == thread->runq) {
- grrr_remove(&fs_grrr_runq, thread);
-
- simple_unlock(&fs_grrr_lock);
- return (TRUE);
- }
- else {
- /*
- * The thread left the run queue before we could
- * lock the run queue.
- */
- assert(thread->runq == PROCESSOR_NULL);
- simple_unlock(&fs_grrr_lock);
- return (FALSE);
- }
-}
-
-#endif /* defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) */