+/*
+ * For DEV & REL kernels, use a static dispatch table instead of
+ * using the indirect function table.
+ */
+extern const struct sched_dispatch_table sched_multiq_dispatch;
+#define SCHED(f) (sched_multiq_dispatch.f)
+
+#endif /* DEBUG */
+
+struct sched_dispatch_table {
+ const char *sched_name;
+ void (*init)(void); /* Init global state */
+ void (*timebase_init)(void); /* Timebase-dependent initialization */
+ void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
+ void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
+
+ void (*maintenance_continuation)(void); /* Function called regularly */
+
+ /*
+ * Choose a thread of greater or equal priority from the per-processor
+ * runqueue for timeshare/fixed threads
+ */
+ thread_t (*choose_thread)(
+ processor_t processor,
+ int priority,
+ ast_t reason);
+
+ /* True if scheduler supports stealing threads */
+ boolean_t steal_thread_enabled;
+
+ /*
+ * Steal a thread from another processor in the pset so that it can run
+ * immediately
+ */
+ thread_t (*steal_thread)(
+ processor_set_t pset);
+
+ /*
+ * Compute priority for a timeshare thread based on base priority.
+ */
+ int (*compute_timeshare_priority)(thread_t thread);
+
+ /*
+ * Pick the best processor for a thread (any kind of thread) to run on.
+ */
+ processor_t (*choose_processor)(
+ processor_set_t pset,
+ processor_t processor,
+ thread_t thread);
+ /*
+ * Enqueue a timeshare or fixed priority thread onto the per-processor
+ * runqueue
+ */
+ boolean_t (*processor_enqueue)(
+ processor_t processor,
+ thread_t thread,
+ integer_t options);
+
+ /* Migrate threads away in preparation for processor shutdown */
+ void (*processor_queue_shutdown)(
+ processor_t processor);
+
+ /* Remove the specific thread from the per-processor runqueue */
+ boolean_t (*processor_queue_remove)(
+ processor_t processor,
+ thread_t thread);
+
+ /*
+ * Does the per-processor runqueue have any timeshare or fixed priority
+ * threads on it? Called without pset lock held, so should
+ * not assume immutability while executing.
+ */
+ boolean_t (*processor_queue_empty)(processor_t processor);
+
+ /*
+ * Would this priority trigger an urgent preemption if it's sitting
+ * on the per-processor runqueue?
+ */
+ boolean_t (*priority_is_urgent)(int priority);
+
+ /*
+ * Does the per-processor runqueue contain runnable threads that
+ * should cause the currently-running thread to be preempted?
+ */
+ ast_t (*processor_csw_check)(processor_t processor);
+
+ /*
+ * Does the per-processor runqueue contain a runnable thread
+ * of > or >= priority, as a preflight for choose_thread() or other
+ * thread selection
+ */
+ boolean_t (*processor_queue_has_priority)(processor_t processor,
+ int priority,
+ boolean_t gte);
+
+ /* Quantum size for the specified non-realtime thread. */
+ uint32_t (*initial_quantum_size)(thread_t thread);
+
+ /* Scheduler mode for a new thread */
+ sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
+
+ /*
+ * Is it safe to call update_priority, which may change a thread's
+ * runqueue or other state. This can be used to throttle changes
+ * to dynamic priority.
+ */
+ boolean_t (*can_update_priority)(thread_t thread);
+
+ /*
+ * Update both scheduled priority and other persistent state.
+ * Side effects may including migration to another processor's runqueue.
+ */
+ void (*update_priority)(thread_t thread);
+
+ /* Lower overhead update to scheduled priority and state. */
+ void (*lightweight_update_priority)(thread_t thread);
+
+ /* Callback for non-realtime threads when the quantum timer fires */
+ void (*quantum_expire)(thread_t thread);
+
+ /*
+ * Runnable threads on per-processor runqueue. Should only
+ * be used for relative comparisons of load between processors.
+ */
+ int (*processor_runq_count)(processor_t processor);
+
+ /* Aggregate runcount statistics for per-processor runqueue */
+ uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
+
+ boolean_t (*processor_bound_count)(processor_t processor);
+
+ void (*thread_update_scan)(sched_update_scan_context_t scan_context);
+
+ /*
+ * Use processor->next_thread to pin a thread to an idle
+ * processor. If FALSE, threads are enqueued and can
+ * be stolen by other processors.
+ */
+ boolean_t direct_dispatch_to_idle_processors;
+
+ /* Supports more than one pset */
+ boolean_t multiple_psets_enabled;
+ /* Supports scheduler groups */
+ boolean_t sched_groups_enabled;
+
+ /* Supports avoid-processor */
+ boolean_t avoid_processor_enabled;
+
+ /* Returns true if this processor should avoid running this thread. */
+ bool (*thread_avoid_processor)(processor_t processor, thread_t thread);
+
+ /*
+ * Invoked when a processor is about to choose the idle thread
+ * Used to send IPIs to a processor which would be preferred to be idle instead.
+ * Called with pset lock held, returns pset lock unlocked.
+ */
+ void (*processor_balance)(processor_t processor, processor_set_t pset);
+ rt_queue_t (*rt_runq)(processor_set_t pset);
+ void (*rt_init)(processor_set_t pset);
+ void (*rt_queue_shutdown)(processor_t processor);
+ void (*rt_runq_scan)(sched_update_scan_context_t scan_context);
+ int64_t (*rt_runq_count_sum)(void);
+
+ uint32_t (*qos_max_parallelism)(int qos, uint64_t options);
+ void (*check_spill)(processor_set_t pset, thread_t thread);
+ sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event);
+ bool (*thread_should_yield)(processor_t processor, thread_t thread);
+};
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+extern const struct sched_dispatch_table sched_traditional_dispatch;
+extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
+#endif
+
+#if defined(CONFIG_SCHED_MULTIQ)
+extern const struct sched_dispatch_table sched_multiq_dispatch;
+extern const struct sched_dispatch_table sched_dualq_dispatch;
+#endif
+
+#if defined(CONFIG_SCHED_PROTO)
+extern const struct sched_dispatch_table sched_proto_dispatch;
+#endif
+
+#if defined(CONFIG_SCHED_GRRR)
+extern const struct sched_dispatch_table sched_grrr_dispatch;
+#endif