]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/sched_grrr.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / osfmk / kern / sched_grrr.c
index 5774cc7b195d50dad76a8041341f7e41360a1ce2..8b70499e8f10422b384f164a2ccc5a61387def37 100644 (file)
@@ -52,7 +52,6 @@
 #include <kern/syscall_subr.h>
 #include <kern/task.h>
 #include <kern/thread.h>
-#include <kern/wait_queue.h>
 
 #include <vm/pmap.h>
 #include <vm/vm_kern.h>
@@ -127,9 +126,8 @@ sched_grrr_choose_thread(processor_t                processor,
 static thread_t
 sched_grrr_steal_thread(processor_set_t                pset);
 
-static void
-sched_grrr_compute_priority(thread_t   thread,
-                                                        boolean_t                      override_depress);
+static int
+sched_grrr_compute_priority(thread_t thread);
 
 static processor_t
 sched_grrr_choose_processor(   processor_set_t         pset,
@@ -180,12 +178,6 @@ sched_grrr_update_priority(thread_t        thread);
 static void
 sched_grrr_lightweight_update_priority(thread_t        thread);
 
-static void
-sched_grrr_quantum_expire(thread_t     thread);
-
-static boolean_t
-sched_grrr_should_current_thread_rechoose_processor(processor_t                        processor);
-
 static int
 sched_grrr_processor_runq_count(processor_t    processor);
 
@@ -196,17 +188,19 @@ static int
 sched_grrr_processor_bound_count(processor_t   processor);
 
 static void
-sched_grrr_thread_update_scan(void);
+sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
 
 const struct sched_dispatch_table sched_grrr_dispatch = {
+       .sched_name                                     = "grrr",
        .init                                           = sched_grrr_init,
        .timebase_init                                  = sched_grrr_timebase_init,
        .processor_init                                 = sched_grrr_processor_init,
        .pset_init                                      = sched_grrr_pset_init,
        .maintenance_continuation                       = sched_grrr_maintenance_continuation,
        .choose_thread                                  = sched_grrr_choose_thread,
+       .steal_thread_enabled                           = FALSE,
        .steal_thread                                   = sched_grrr_steal_thread,
-       .compute_priority                               = sched_grrr_compute_priority,
+       .compute_timeshare_priority                     = sched_grrr_compute_priority,
        .choose_processor                               = sched_grrr_choose_processor,
        .processor_enqueue                              = sched_grrr_processor_enqueue,
        .processor_queue_shutdown                       = sched_grrr_processor_queue_shutdown,
@@ -220,19 +214,14 @@ const struct sched_dispatch_table sched_grrr_dispatch = {
        .can_update_priority                            = sched_grrr_can_update_priority,
        .update_priority                                = sched_grrr_update_priority,
        .lightweight_update_priority                    = sched_grrr_lightweight_update_priority,
-       .quantum_expire                                 = sched_grrr_quantum_expire,
-       .should_current_thread_rechoose_processor       = sched_grrr_should_current_thread_rechoose_processor,
+       .quantum_expire                                 = sched_default_quantum_expire,
        .processor_runq_count                           = sched_grrr_processor_runq_count,
        .processor_runq_stats_count_sum                 = sched_grrr_processor_runq_stats_count_sum,
-       .fairshare_init                                 = sched_grrr_fairshare_init,
-       .fairshare_runq_count                           = sched_grrr_fairshare_runq_count,
-       .fairshare_runq_stats_count_sum                 = sched_grrr_fairshare_runq_stats_count_sum,
-       .fairshare_enqueue                              = sched_grrr_fairshare_enqueue,
-       .fairshare_dequeue                              = sched_grrr_fairshare_dequeue,
-       .fairshare_queue_remove                         = sched_grrr_fairshare_queue_remove,
        .processor_bound_count                          = sched_grrr_processor_bound_count,
        .thread_update_scan                             = sched_grrr_thread_update_scan,
        .direct_dispatch_to_idle_processors             = TRUE,
+       .multiple_psets_enabled                         = TRUE,
+       .sched_groups_enabled                           = FALSE,
 };
 
 extern int     max_unsafe_quanta;
@@ -328,11 +317,10 @@ sched_grrr_steal_thread(processor_set_t           pset)
        
 }
 
-static void
-sched_grrr_compute_priority(thread_t   thread,
-                                                        boolean_t                      override_depress __unused)
+static int
+sched_grrr_compute_priority(thread_t thread)
 {
-       set_sched_pri(thread, thread->priority);
+       return thread->base_pri;
 }
 
 static processor_t
@@ -516,19 +504,6 @@ sched_grrr_lightweight_update_priority(thread_t    thread __unused)
        return;
 }
 
-static void
-sched_grrr_quantum_expire(
-                                                 thread_t      thread __unused)
-{
-}
-
-
-static boolean_t
-sched_grrr_should_current_thread_rechoose_processor(processor_t                        processor __unused)
-{
-       return (TRUE);
-}
-
 static int
 sched_grrr_processor_runq_count(processor_t    processor)
 {
@@ -548,7 +523,7 @@ sched_grrr_processor_bound_count(__unused processor_t       processor)
 }
 
 static void
-sched_grrr_thread_update_scan(void)
+sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
 {
 
 }
@@ -883,84 +858,3 @@ grrr_sorted_list_insert_group(grrr_run_queue_t rq,
 }
 
 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
-
-#if defined(CONFIG_SCHED_GRRR)
-
-static struct grrr_run_queue   fs_grrr_runq;
-#define FS_GRRR_RUNQ           ((processor_t)-2)
-decl_simple_lock_data(static,fs_grrr_lock);
-
-void
-sched_grrr_fairshare_init(void)
-{
-       grrr_priority_mapping_init();
-       
-       simple_lock_init(&fs_grrr_lock, 0);
-       grrr_runqueue_init(&fs_grrr_runq);
-}
-
-
-int
-sched_grrr_fairshare_runq_count(void)
-{
-       return fs_grrr_runq.count;
-}
-
-uint64_t
-sched_grrr_fairshare_runq_stats_count_sum(void)
-{
-       return fs_grrr_runq.runq_stats.count_sum;
-}
-
-void
-sched_grrr_fairshare_enqueue(thread_t thread)
-{
-       simple_lock(&fs_grrr_lock);
-       
-       (void)grrr_enqueue(&fs_grrr_runq, thread);
-
-       thread->runq = FS_GRRR_RUNQ;
-
-       simple_unlock(&fs_grrr_lock);   
-}
-
-thread_t       sched_grrr_fairshare_dequeue(void)
-{
-       thread_t thread;
-       
-       simple_lock(&fs_grrr_lock);
-       if (fs_grrr_runq.count > 0) {
-               thread = grrr_select(&fs_grrr_runq);
-               
-               simple_unlock(&fs_grrr_lock);
-               
-               return (thread);
-       }
-       simple_unlock(&fs_grrr_lock);           
-       
-       return THREAD_NULL;
-}
-
-boolean_t      sched_grrr_fairshare_queue_remove(thread_t thread)
-{
-       
-       simple_lock(&fs_grrr_lock);
-       
-       if (FS_GRRR_RUNQ == thread->runq) {
-               grrr_remove(&fs_grrr_runq, thread);
-               
-               simple_unlock(&fs_grrr_lock);
-               return (TRUE);
-       }
-       else {
-               /*
-                *      The thread left the run queue before we could
-                *      lock the run queue.
-                */
-               assert(thread->runq == PROCESSOR_NULL);
-               simple_unlock(&fs_grrr_lock);
-               return (FALSE);
-       }       
-}
-
-#endif /* defined(CONFIG_SCHED_GRRR) */