+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+
+const struct sched_dispatch_table sched_traditional_dispatch = {
+ .init = sched_traditional_init,
+ .timebase_init = sched_traditional_timebase_init,
+ .processor_init = sched_traditional_processor_init,
+ .pset_init = sched_traditional_pset_init,
+ .maintenance_continuation = sched_traditional_maintenance_continue,
+ .choose_thread = sched_traditional_choose_thread,
+ .steal_thread = steal_thread,
+ .compute_priority = compute_priority,
+ .choose_processor = choose_processor,
+ .processor_enqueue = processor_enqueue,
+ .processor_queue_shutdown = processor_queue_shutdown,
+ .processor_queue_remove = processor_queue_remove,
+ .processor_queue_empty = processor_queue_empty,
+ .priority_is_urgent = priority_is_urgent,
+ .processor_csw_check = processor_csw_check,
+ .processor_queue_has_priority = processor_queue_has_priority,
+ .initial_quantum_size = sched_traditional_initial_quantum_size,
+ .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
+ .can_update_priority = can_update_priority,
+ .update_priority = update_priority,
+ .lightweight_update_priority = lightweight_update_priority,
+ .quantum_expire = sched_traditional_quantum_expire,
+ .should_current_thread_rechoose_processor = should_current_thread_rechoose_processor,
+ .processor_runq_count = sched_traditional_processor_runq_count,
+ .processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum,
+ .fairshare_init = sched_traditional_fairshare_init,
+ .fairshare_runq_count = sched_traditional_fairshare_runq_count,
+ .fairshare_runq_stats_count_sum = sched_traditional_fairshare_runq_stats_count_sum,
+ .fairshare_enqueue = sched_traditional_fairshare_enqueue,
+ .fairshare_dequeue = sched_traditional_fairshare_dequeue,
+ .fairshare_queue_remove = sched_traditional_fairshare_queue_remove,
+ .processor_bound_count = sched_traditional_processor_bound_count,
+ .thread_update_scan = thread_update_scan,
+ .direct_dispatch_to_idle_processors = TRUE,
+};
+
+const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = {
+ .init = sched_traditional_with_pset_runqueue_init,
+ .timebase_init = sched_traditional_timebase_init,
+ .processor_init = sched_traditional_processor_init,
+ .pset_init = sched_traditional_pset_init,
+ .maintenance_continuation = sched_traditional_maintenance_continue,
+ .choose_thread = sched_traditional_choose_thread,
+ .steal_thread = steal_thread,
+ .compute_priority = compute_priority,
+ .choose_processor = choose_processor,
+ .processor_enqueue = processor_enqueue,
+ .processor_queue_shutdown = processor_queue_shutdown,
+ .processor_queue_remove = processor_queue_remove,
+ .processor_queue_empty = sched_traditional_with_pset_runqueue_processor_queue_empty,
+ .priority_is_urgent = priority_is_urgent,
+ .processor_csw_check = processor_csw_check,
+ .processor_queue_has_priority = processor_queue_has_priority,
+ .initial_quantum_size = sched_traditional_initial_quantum_size,
+ .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
+ .can_update_priority = can_update_priority,
+ .update_priority = update_priority,
+ .lightweight_update_priority = lightweight_update_priority,
+ .quantum_expire = sched_traditional_quantum_expire,
+ .should_current_thread_rechoose_processor = should_current_thread_rechoose_processor,
+ .processor_runq_count = sched_traditional_processor_runq_count,
+ .processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum,
+ .fairshare_init = sched_traditional_fairshare_init,
+ .fairshare_runq_count = sched_traditional_fairshare_runq_count,
+ .fairshare_runq_stats_count_sum = sched_traditional_fairshare_runq_stats_count_sum,
+ .fairshare_enqueue = sched_traditional_fairshare_enqueue,
+ .fairshare_dequeue = sched_traditional_fairshare_dequeue,
+ .fairshare_queue_remove = sched_traditional_fairshare_queue_remove,
+ .processor_bound_count = sched_traditional_processor_bound_count,
+ .thread_update_scan = thread_update_scan,
+ .direct_dispatch_to_idle_processors = FALSE,
+};
+
+#endif
+
+const struct sched_dispatch_table *sched_current_dispatch = NULL;
+
+/*
+ * Statically allocate a buffer to hold the longest possible
+ * scheduler description string, as currently implemented.
+ * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
+ * to export to userspace via sysctl(3). If either version
+ * changes, update the other.
+ *
+ * Note that in addition to being an upper bound on the strings
+ * in the kernel, it's also an exact parameter to PE_get_default(),
+ * which interrogates the device tree on some platforms. That
+ * API requires the caller know the exact size of the device tree
+ * property, so we need both a legacy size (32) and the current size
+ * (48) to deal with old and new device trees. The device tree property
+ * is similarly padded to a fixed size so that the same kernel image
+ * can run on multiple devices with different schedulers configured
+ * in the device tree.
+ */
+#define SCHED_STRING_MAX_LENGTH (48)
+
+char sched_string[SCHED_STRING_MAX_LENGTH];
+static enum sched_enum _sched_enum __attribute__((used)) = sched_enum_unknown;
+
+/* Global flag which indicates whether Background Stepper Context is enabled */
+static int cpu_throttle_enabled = 1;