#include <kern/kern_types.h>
#include <kern/thread.h>
#include <sys/cdefs.h>
+#include <kern/block_hint.h>
#ifdef MACH_KERNEL_PRIVATE
extern void sched_timebase_init(void);
+extern void pset_rt_init(processor_set_t pset);
+
+extern void sched_rtglobal_init(processor_set_t pset);
+
+extern rt_queue_t sched_rtglobal_runq(processor_set_t pset);
+
+extern void sched_rtglobal_queue_shutdown(processor_t processor);
+
+extern int64_t sched_rtglobal_runq_count_sum(void);
+
+extern void sched_check_spill(processor_set_t pset, thread_t thread);
+
+extern bool sched_thread_should_yield(processor_t processor, thread_t thread);
+
/* Force a preemption point for a thread and wait for it to stop running */
extern boolean_t thread_stop(
thread_t thread,
thread_t thread,
int priority);
-/* Set the thread to be categorized as 'background' */
-extern void sched_set_thread_throttled(thread_t thread,
- boolean_t wants_throttle);
-
/* Set the thread's true scheduling mode */
extern void sched_set_thread_mode(thread_t thread,
sched_mode_t mode);
thread_t thread,
integer_t options);
-#define SCHED_TAILQ 1
-#define SCHED_HEADQ 2
-#define SCHED_PREEMPT 4
-
-extern uintptr_t sched_thread_on_rt_queue;
-#define THREAD_ON_RT_RUNQ ((processor_t)(uintptr_t)&sched_thread_on_rt_queue)
+typedef enum {
+ SCHED_NONE = 0x0,
+ SCHED_TAILQ = 0x1,
+ SCHED_HEADQ = 0x2,
+ SCHED_PREEMPT = 0x4,
+ SCHED_REBALANCE = 0x8,
+} sched_options_t;
extern processor_set_t task_choose_pset(
task_t task);
processor_t processor,
thread_t thread);
+extern void sched_SMT_balance(
+ processor_t processor,
+ processor_set_t pset);
extern void thread_quantum_init(
thread_t thread);
};
typedef struct sched_update_scan_context *sched_update_scan_context_t;
+extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context);
+
+/*
+ * Enum to define various events which need IPIs. The IPI policy
+ * engine decides what kind of IPI to use based on destination
+ * processor state, thread and one of the following scheduling events.
+ */
+typedef enum {
+ SCHED_IPI_EVENT_BOUND_THR = 0x1,
+ SCHED_IPI_EVENT_PREEMPT = 0x2,
+ SCHED_IPI_EVENT_SMT_REBAL = 0x3,
+ SCHED_IPI_EVENT_SPILL = 0x4,
+ SCHED_IPI_EVENT_REBALANCE = 0x5,
+} sched_ipi_event_t;
+
+
+/* Enum to define various IPI types used by the scheduler */
+typedef enum {
+ SCHED_IPI_NONE = 0x0,
+ SCHED_IPI_IMMEDIATE = 0x1,
+ SCHED_IPI_IDLE = 0x2,
+ SCHED_IPI_DEFERRED = 0x3,
+} sched_ipi_type_t;
+
+/* The IPI policy engine behaves in the following manner:
+ * - All scheduler events which need an IPI invoke sched_ipi_action() with
+ * the appropriate destination processor, thread and event.
+ * - sched_ipi_action() performs basic checks, invokes the scheduler specific
+ * ipi_policy routine and sets pending_AST bits based on the result.
+ * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform()
+ * routine which actually sends the appropriate IPI to the destination core.
+ */
+extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread,
+ boolean_t dst_idle, sched_ipi_event_t event);
+extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi);
+
+/* sched_ipi_policy() is the global default IPI policy for all schedulers */
+extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread,
+ boolean_t dst_idle, sched_ipi_event_t event);
+
+/* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */
+extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset,
+ processor_t dst, sched_ipi_event_t event);
+
#if defined(CONFIG_SCHED_TIMESHARE_CORE)
extern boolean_t thread_update_add_thread(thread_t thread);
#endif /* CONFIG_SCHED_TIMESHARE_CORE */
-extern void rt_runq_scan(sched_update_scan_context_t scan_context);
-
/* Remove thread from its run queue */
extern boolean_t thread_run_queue_remove(thread_t thread);
thread_t thread_run_queue_remove_for_handoff(thread_t thread);
int old_count);
+#if DEBUG
#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
do { \
} \
} while (0)
+#else /* DEBUG */
+
+#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0)
+#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0)
+
+#endif /* DEBUG */
+
extern uint32_t sched_debug_flags;
#define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
#define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
extern void active_rt_threads(
boolean_t active);
+/* Returns the perfcontrol attribute for the thread */
+extern perfcontrol_class_t thread_get_perfcontrol_class(
+ thread_t thread);
+
+#define PSET_LOAD_NUMERATOR_SHIFT 16
+#define PSET_LOAD_FRACTIONAL_SHIFT 4
+
+extern int sched_get_pset_load_average(processor_set_t pset);
+extern void sched_update_pset_load_average(processor_set_t pset);
+
+/* Generic routine for Non-AMP schedulers to calculate parallelism */
+extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options);
+
#endif /* MACH_KERNEL_PRIVATE */
__BEGIN_DECLS
#ifdef XNU_KERNEL_PRIVATE
-extern boolean_t assert_wait_possible(void);
-
/* Toggles a global override to turn off CPU Throttling */
#define CPU_THROTTLE_DISABLE 0
#define CPU_THROTTLE_ENABLE 1
wait_result_t result);
/* Start thread running */
-extern void thread_bootstrap_return(void);
+extern void thread_bootstrap_return(void) __attribute__((noreturn));
/* Return from exception (BSD-visible interface) */
extern void thread_exception_return(void) __dead2;
/* String declaring the name of the current scheduler */
extern char sched_string[SCHED_STRING_MAX_LENGTH];
-extern kern_return_t sched_work_interval_notify(thread_t thread, uint64_t work_interval_id, uint64_t start, uint64_t finish, uint64_t deadline, uint64_t next_start, uint32_t flags);
+extern thread_t port_name_to_thread_for_ulock(mach_port_name_t thread_name);
+
+/* Attempt to context switch to a specific runnable thread */
+extern wait_result_t thread_handoff(thread_t thread);
+
+extern struct waitq *assert_wait_queue(event_t event);
+
+extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority);
+
+extern thread_t thread_wakeup_identify(event_t event, int priority);
#endif /* XNU_KERNEL_PRIVATE */
+#ifdef KERNEL_PRIVATE
+/* Set pending block hint for a particular object before we go into a wait state */
+extern void thread_set_pending_block_hint(
+ thread_t thread,
+ block_hint_t block_hint);
+
+#define QOS_PARALLELISM_COUNT_LOGICAL 0x1
+#define QOS_PARALLELISM_REALTIME 0x2
+extern uint32_t qos_max_parallelism(int qos, uint64_t options);
+
+#endif /* KERNEL_PRIVATE */
+
/* Context switch */
extern wait_result_t thread_block(
thread_continue_t continuation);
boolean_t one_thread,
wait_result_t result);
-extern kern_return_t thread_wakeup_prim_internal(
- event_t event,
- boolean_t one_thread,
- wait_result_t result,
- int priority);
-
-
#define thread_wakeup(x) \
thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
#define thread_wakeup_with_result(x, z) \
#define thread_wakeup_one(x) \
thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
-#ifdef MACH_KERNEL_PRIVATE
-#define thread_wakeup_one_with_pri(x, pri) \
- thread_wakeup_prim_internal((x), TRUE, THREAD_AWAKENED, pri)
-#endif
+/* Wakeup the specified thread if it is waiting on this event */
+extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread);
-extern boolean_t preemption_enabled(void);
+extern boolean_t preemption_enabled(void);
#ifdef MACH_KERNEL_PRIVATE
#error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
#endif
+#if DEBUG
#define SCHED(f) (sched_current_dispatch->f)
+#else /* DEBUG */
+
+/*
+ * For DEV & REL kernels, use a static dispatch table instead of
+ * using the indirect function table.
+ */
+extern const struct sched_dispatch_table sched_multiq_dispatch;
+#define SCHED(f) (sched_multiq_dispatch.f)
+
+#endif /* DEBUG */
struct sched_dispatch_table {
const char *sched_name;
boolean_t multiple_psets_enabled;
/* Supports scheduler groups */
boolean_t sched_groups_enabled;
+
+ /* Supports avoid-processor */
+ boolean_t avoid_processor_enabled;
+
+ /* Returns true if this processor should avoid running this thread. */
+ bool (*thread_avoid_processor)(processor_t processor, thread_t thread);
+
+ /*
+ * Invoked when a processor is about to choose the idle thread
+ * Used to send IPIs to a processor which would be preferred to be idle instead.
+ * Called with pset lock held, returns pset lock unlocked.
+ */
+ void (*processor_balance)(processor_t processor, processor_set_t pset);
+ rt_queue_t (*rt_runq)(processor_set_t pset);
+ void (*rt_init)(processor_set_t pset);
+ void (*rt_queue_shutdown)(processor_t processor);
+ void (*rt_runq_scan)(sched_update_scan_context_t scan_context);
+ int64_t (*rt_runq_count_sum)(void);
+
+ uint32_t (*qos_max_parallelism)(int qos, uint64_t options);
+ void (*check_spill)(processor_set_t pset, thread_t thread);
+ sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event);
+ bool (*thread_should_yield)(processor_t processor, thread_t thread);
};
#if defined(CONFIG_SCHED_TRADITIONAL)