/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#include <kern/clock.h>
#include <kern/kern_types.h>
#include <kern/thread.h>
-#include <kern/lock.h>
-#include <kern/time_out.h> /*** ??? temp - remove me soon ***/
-#include <kern/cpu_data.h>
-
-#include <sys/appleapiopts.h>
-
-#ifdef __APPLE_API_PRIVATE
+#include <sys/cdefs.h>
+#include <kern/block_hint.h>
#ifdef MACH_KERNEL_PRIVATE
-#include <mach_ldebug.h>
-/*
- * Exported interface to sched_prim.c.
- * A few of these functions are actually defined in
- * ipc_sched.c, for historical reasons.
- */
+#include <mach/branch_predicates.h>
-/* Initialize scheduler module */
+/* Initialization */
extern void sched_init(void);
-/*
- * Set up thread timeout element(s) when thread is created.
- */
-extern void thread_timer_setup(
- thread_t thread);
-
-extern void thread_timer_terminate(void);
+extern void sched_startup(void);
-#define thread_bind_locked(thread, processor) \
- (thread)->bound_processor = (processor)
+extern void sched_timebase_init(void);
-/*
- * Stop a thread and wait for it to stop running.
- */
+/* Force a preemption point for a thread and wait for it to stop running */
extern boolean_t thread_stop(
- thread_t thread);
+ thread_t thread,
+ boolean_t until_not_runnable);
-/*
- * Wait for a thread to stop running.
- */
-extern boolean_t thread_wait(
+/* Release a previous stop request */
+extern void thread_unstop(
thread_t thread);
-/* Select a thread to run on a particular processor */
-extern thread_t thread_select(
- processor_t myprocessor);
-
-extern kern_return_t thread_go_locked(
- thread_t thread,
- wait_result_t result);
+/* Wait for a thread to stop running */
+extern void thread_wait(
+ thread_t thread,
+ boolean_t until_not_runnable);
-/* Stop old thread and run new thread */
-extern boolean_t thread_invoke(
- thread_t old_thread,
- thread_t new_thread,
- int reason,
- thread_continue_t continuation);
+/* Unblock thread on wake up */
+extern boolean_t thread_unblock(
+ thread_t thread,
+ wait_result_t wresult);
-/* Called when current thread is given new stack */
-extern void thread_continue(
- thread_t old_thread);
+/* Unblock and dispatch thread */
+extern kern_return_t thread_go(
+ thread_t thread,
+ wait_result_t wresult);
-/* Switch directly to a particular thread */
-extern int thread_run(
+/* Handle threads at context switch */
+extern void thread_dispatch(
thread_t old_thread,
- thread_continue_t continuation,
thread_t new_thread);
-/* Dispatch a thread not on a run queue */
-extern void thread_dispatch(
- thread_t thread);
+/* Switch directly to a particular thread */
+extern int thread_run(
+ thread_t self,
+ thread_continue_t continuation,
+ void *parameter,
+ thread_t new_thread);
+
+/* Resume thread with new stack */
+extern void thread_continue(
+ thread_t old_thread);
/* Invoke continuation */
extern void call_continuation(
- thread_continue_t continuation);
+ thread_continue_t continuation,
+ void *parameter,
+ wait_result_t wresult);
/* Set the current scheduled priority */
extern void set_sched_pri(
int priority);
/* Set base priority of the specified thread */
-extern void set_priority(
+extern void sched_set_thread_base_priority(
thread_t thread,
int priority);
+/* Set the thread's true scheduling mode */
+extern void sched_set_thread_mode(thread_t thread,
+ sched_mode_t mode);
+/* Demote the true scheduler mode */
+extern void sched_thread_mode_demote(thread_t thread,
+ uint32_t reason);
+/* Un-demote the true scheduler mode */
+extern void sched_thread_mode_undemote(thread_t thread,
+ uint32_t reason);
+
+/* Re-evaluate base priority of thread (thread locked) */
+void thread_recompute_priority(thread_t thread);
+
+/* Re-evaluate base priority of thread (thread unlocked) */
+void thread_recompute_qos(thread_t thread);
+
/* Reset scheduled priority of thread */
-extern void compute_priority(
+extern void thread_recompute_sched_pri(
thread_t thread,
boolean_t override_depress);
-/* Adjust scheduled priority of thread during execution */
-extern void compute_my_priority(
- thread_t thread);
-
/* Periodic scheduler activity */
-extern void sched_tick_init(void);
+extern void sched_init_thread(void (*)(void));
-/*
- * Update thread to the current scheduler tick.
- */
-extern void update_priority(
+/* Perform sched_tick housekeeping activities */
+extern boolean_t can_update_priority(
thread_t thread);
-/* Idle thread loop */
-extern void idle_thread(void);
+extern void update_priority(
+ thread_t thread);
-/*
- * Machine-dependent code must define these functions.
- */
+extern void lightweight_update_priority(
+ thread_t thread);
-/* Start thread running */
-extern void thread_bootstrap_return(void);
+extern void sched_default_quantum_expire(thread_t thread);
-/* Return from exception */
-extern void thread_exception_return(void);
+/* Idle processor thread */
+extern void idle_thread(void);
+
+extern kern_return_t idle_thread_create(
+ processor_t processor);
/* Continuation return from syscall */
extern void thread_syscall_return(
kern_return_t ret);
-extern thread_t switch_context(
- thread_t old_thread,
- thread_continue_t continuation,
- thread_t new_thread);
+/* Context switch */
+extern wait_result_t thread_block_reason(
+ thread_continue_t continuation,
+ void *parameter,
+ ast_t reason);
+
+/* Reschedule thread for execution */
+extern void thread_setrun(
+ thread_t thread,
+ integer_t options);
-/* Attach stack to thread */
-extern void machine_kernel_stack_init(
- thread_t thread,
- void (*start_pos)(thread_t));
+#define SCHED_TAILQ 1
+#define SCHED_HEADQ 2
+#define SCHED_PREEMPT 4
-extern void load_context(
- thread_t thread);
+extern uintptr_t sched_thread_on_rt_queue;
+#define THREAD_ON_RT_RUNQ ((processor_t)(uintptr_t)&sched_thread_on_rt_queue)
-extern thread_act_t switch_act(
- thread_act_t act);
+extern processor_set_t task_choose_pset(
+ task_t task);
-extern void machine_switch_act(
- thread_t thread,
- thread_act_t old,
- thread_act_t new,
- int cpu);
+/* Bind the current thread to a particular processor */
+extern processor_t thread_bind(
+ processor_t processor);
-/*
- * These functions are either defined in kern/thread.c
- * or are defined directly by machine-dependent code.
- */
+/* Choose the best processor to run a thread */
+extern processor_t choose_processor(
+ processor_set_t pset,
+ processor_t processor,
+ thread_t thread);
-/* Allocate an activation stack */
-extern vm_offset_t stack_alloc(thread_t thread, void (*start_pos)(thread_t));
-/* Free an activation stack */
-extern void stack_free(thread_t thread);
+extern void thread_quantum_init(
+ thread_t thread);
-/* Collect excess kernel stacks */
-extern void stack_collect(void);
+extern void run_queue_init(
+ run_queue_t runq);
-/* Block current thread, indicating reason */
-extern wait_result_t thread_block_reason(
- thread_continue_t continuation,
- ast_t reason);
+extern thread_t run_queue_dequeue(
+ run_queue_t runq,
+ integer_t options);
-/* Dispatch a thread for execution */
-extern void thread_setrun(
- thread_t thread,
- boolean_t tail);
+extern boolean_t run_queue_enqueue(
+ run_queue_t runq,
+ thread_t thread,
+ integer_t options);
-#define HEAD_Q 0 /* FALSE */
-#define TAIL_Q 1 /* TRUE */
+extern void run_queue_remove(
+ run_queue_t runq,
+ thread_t thread);
+
+struct sched_update_scan_context
+{
+ uint64_t earliest_bg_make_runnable_time;
+ uint64_t earliest_normal_make_runnable_time;
+ uint64_t earliest_rt_make_runnable_time;
+};
+typedef struct sched_update_scan_context *sched_update_scan_context_t;
-/* Bind thread to a particular processor */
-extern void thread_bind(
- thread_t thread,
- processor_t processor);
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+
+extern boolean_t thread_update_add_thread(thread_t thread);
+extern void thread_update_process_threads(void);
+extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context);
+
+extern void sched_timeshare_init(void);
+extern void sched_timeshare_timebase_init(void);
+extern void sched_timeshare_maintenance_continue(void);
+
+extern boolean_t priority_is_urgent(int priority);
+extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread);
+
+extern int sched_compute_timeshare_priority(thread_t thread);
+
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+extern void rt_runq_scan(sched_update_scan_context_t scan_context);
+
+/* Remove thread from its run queue */
+extern boolean_t thread_run_queue_remove(thread_t thread);
+thread_t thread_run_queue_remove_for_handoff(thread_t thread);
+
+/* Put a thread back in the run queue after being yanked */
+extern void thread_run_queue_reinsert(thread_t thread, integer_t options);
+
+extern void thread_timer_expire(
+ void *thread,
+ void *p1);
+
+extern boolean_t thread_eager_preemption(
+ thread_t thread);
+
+extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
/* Set the maximum interrupt level for the thread */
__private_extern__ wait_interrupt_t thread_interrupt_level(
thread_t thread,
wait_interrupt_t interruptible);
-/* Sleep, unlocking and then relocking a usimple_lock in the process */
-__private_extern__ wait_result_t thread_sleep_fast_usimple_lock(
- event_t event,
- simple_lock_t lock,
- wait_interrupt_t interruptible);
-
/* Wake up locked thread directly, passing result */
__private_extern__ kern_return_t clear_wait_internal(
thread_t thread,
wait_result_t result);
+extern void sched_stats_handle_csw(
+ processor_t processor,
+ int reasons,
+ int selfpri,
+ int otherpri);
+
+extern void sched_stats_handle_runq_change(
+ struct runq_stats *stats,
+ int old_count);
+
+
+
+#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
+do { \
+ if (__builtin_expect(sched_stats_active, 0)) { \
+ sched_stats_handle_csw((processor), \
+ (reasons), (selfpri), (otherpri)); \
+ } \
+} while (0)
+
+
+#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
+do { \
+ if (__builtin_expect(sched_stats_active, 0)) { \
+ sched_stats_handle_runq_change((stats), \
+ (old_count)); \
+ } \
+} while (0)
+
+extern uint32_t sched_debug_flags;
+#define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
+#define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
+
+#define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \
+ if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \
+ KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \
+ if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \
+ KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
+#define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
+#define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
+#define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
+#define THREAD_URGENCY_MAX 4 /* Marker */
+/* Returns the "urgency" of a thread (provided by scheduler) */
+extern int thread_get_urgency(
+ thread_t thread,
+ uint64_t *rt_period,
+ uint64_t *rt_deadline);
+
+/* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
+extern void thread_tell_urgency(
+ int urgency,
+ uint64_t rt_period,
+ uint64_t rt_deadline,
+ uint64_t sched_latency,
+ thread_t nthread);
+
+/* Tells if there are "active" RT threads in the system (provided by CPU PM) */
+extern void active_rt_threads(
+ boolean_t active);
+
#endif /* MACH_KERNEL_PRIVATE */
+__BEGIN_DECLS
+
+#ifdef XNU_KERNEL_PRIVATE
+
+extern boolean_t assert_wait_possible(void);
+
+/* Toggles a global override to turn off CPU Throttling */
+#define CPU_THROTTLE_DISABLE 0
+#define CPU_THROTTLE_ENABLE 1
+extern void sys_override_cpu_throttle(int flag);
+
/*
****************** Only exported until BSD stops using ********************
*/
-/*
- * Cancel a stop and unblock the thread if already stopped.
- */
-extern void thread_unstop(
- thread_t thread);
+extern void thread_vm_bind_group_add(void);
/* Wake up thread directly, passing result */
extern kern_return_t clear_wait(
thread_t thread,
wait_result_t result);
-#endif /* __APPLE_API_PRIVATE */
+/* Start thread running */
+extern void thread_bootstrap_return(void);
-/*
- * ********************* PUBLIC APIs ************************************
- */
+/* Return from exception (BSD-visible interface) */
+extern void thread_exception_return(void) __dead2;
-/* Set timer for current thread */
-extern void thread_set_timer(
- uint32_t interval,
- uint32_t scale_factor);
+#define SCHED_STRING_MAX_LENGTH (48)
+/* String declaring the name of the current scheduler */
+extern char sched_string[SCHED_STRING_MAX_LENGTH];
-extern void thread_set_timer_deadline(
- uint64_t deadline);
+extern kern_return_t sched_work_interval_notify(thread_t thread, uint64_t work_interval_id, uint64_t start, uint64_t finish, uint64_t deadline, uint64_t next_start, uint32_t flags);
-extern void thread_cancel_timer(void);
+extern thread_t port_name_to_thread_for_ulock(mach_port_name_t thread_name);
-/* Declare thread will wait on a particular event */
-extern wait_result_t assert_wait(
- event_t event,
- wait_interrupt_t interruptflag);
-
-/* Assert that the thread intends to wait for a timeout */
-extern wait_result_t assert_wait_timeout(
- natural_t msecs,
- wait_interrupt_t interruptflags);
-
-/* Sleep, unlocking and then relocking a usimple_lock in the process */
-extern wait_result_t thread_sleep_usimple_lock(
- event_t event,
- usimple_lock_t lock,
- wait_interrupt_t interruptible);
-
-/* Sleep, unlocking and then relocking a mutex in the process */
-extern wait_result_t thread_sleep_mutex(
- event_t event,
- mutex_t *mutex,
- wait_interrupt_t interruptible);
-
-/* Sleep with a deadline, unlocking and then relocking a mutex in the process */
-extern wait_result_t thread_sleep_mutex_deadline(
- event_t event,
- mutex_t *mutex,
- uint64_t deadline,
- wait_interrupt_t interruptible);
-
-/* Sleep, unlocking and then relocking a write lock in the process */
-extern wait_result_t thread_sleep_lock_write(
- event_t event,
- lock_t *lock,
- wait_interrupt_t interruptible);
-
-/* Sleep, hinting that a thread funnel may be involved in the process */
-extern wait_result_t thread_sleep_funnel(
- event_t event,
- wait_interrupt_t interruptible);
+/* Attempt to context switch to a specific runnable thread */
+extern wait_result_t thread_handoff(thread_t thread);
-/* Wake up thread (or threads) waiting on a particular event */
-extern kern_return_t thread_wakeup_prim(
- event_t event,
- boolean_t one_thread,
- wait_result_t result);
+extern struct waitq *assert_wait_queue(event_t event);
-#ifdef __APPLE_API_UNSTABLE
+extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority);
-/* Block current thread (Block reason) */
-extern wait_result_t thread_block(
- thread_continue_t continuation);
+extern thread_t thread_wakeup_identify(event_t event, int priority);
-#endif /* __APPLE_API_UNSTABLE */
+#endif /* XNU_KERNEL_PRIVATE */
-/*
- * Routines defined as macros
- */
+#ifdef KERNEL_PRIVATE
+/* Set pending block hint for a particular object before we go into a wait state */
+extern void thread_set_pending_block_hint(
+ thread_t thread,
+ block_hint_t block_hint);
+#endif /* KERNEL_PRIVATE */
+
+/* Context switch */
+extern wait_result_t thread_block(
+ thread_continue_t continuation);
+
+extern wait_result_t thread_block_parameter(
+ thread_continue_t continuation,
+ void *parameter);
+
+/* Declare thread will wait on a particular event */
+extern wait_result_t assert_wait(
+ event_t event,
+ wait_interrupt_t interruptible);
+
+/* Assert that the thread intends to wait with a timeout */
+extern wait_result_t assert_wait_timeout(
+ event_t event,
+ wait_interrupt_t interruptible,
+ uint32_t interval,
+ uint32_t scale_factor);
+
+/* Assert that the thread intends to wait with an urgency, timeout and leeway */
+extern wait_result_t assert_wait_timeout_with_leeway(
+ event_t event,
+ wait_interrupt_t interruptible,
+ wait_timeout_urgency_t urgency,
+ uint32_t interval,
+ uint32_t leeway,
+ uint32_t scale_factor);
+
+extern wait_result_t assert_wait_deadline(
+ event_t event,
+ wait_interrupt_t interruptible,
+ uint64_t deadline);
+
+/* Assert that the thread intends to wait with an urgency, deadline, and leeway */
+extern wait_result_t assert_wait_deadline_with_leeway(
+ event_t event,
+ wait_interrupt_t interruptible,
+ wait_timeout_urgency_t urgency,
+ uint64_t deadline,
+ uint64_t leeway);
+
+/* Wake up thread (or threads) waiting on a particular event */
+extern kern_return_t thread_wakeup_prim(
+ event_t event,
+ boolean_t one_thread,
+ wait_result_t result);
#define thread_wakeup(x) \
thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
#define thread_wakeup_one(x) \
thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
-#if !defined(MACH_KERNEL_PRIVATE) && !defined(ABSOLUTETIME_SCALAR_TYPE)
+/* Wakeup the specified thread if it is waiting on this event */
+extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread);
-#include <libkern/OSBase.h>
+extern boolean_t preemption_enabled(void);
-#define thread_set_timer_deadline(a) \
- thread_set_timer_deadline(__OSAbsoluteTime(a))
+#ifdef MACH_KERNEL_PRIVATE
+
+/*
+ * Scheduler algorithm indirection. If only one algorithm is
+ * enabled at compile-time, a direction function call is used.
+ * If more than one is enabled, calls are dispatched through
+ * a function pointer table.
+ */
+#if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ)
+#error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
#endif
+#define SCHED(f) (sched_current_dispatch->f)
+
+struct sched_dispatch_table {
+ const char *sched_name;
+ void (*init)(void); /* Init global state */
+ void (*timebase_init)(void); /* Timebase-dependent initialization */
+ void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
+ void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
+
+ void (*maintenance_continuation)(void); /* Function called regularly */
+
+ /*
+ * Choose a thread of greater or equal priority from the per-processor
+ * runqueue for timeshare/fixed threads
+ */
+ thread_t (*choose_thread)(
+ processor_t processor,
+ int priority,
+ ast_t reason);
+
+ /* True if scheduler supports stealing threads */
+ boolean_t steal_thread_enabled;
+
+ /*
+ * Steal a thread from another processor in the pset so that it can run
+ * immediately
+ */
+ thread_t (*steal_thread)(
+ processor_set_t pset);
+
+ /*
+ * Compute priority for a timeshare thread based on base priority.
+ */
+ int (*compute_timeshare_priority)(thread_t thread);
+
+ /*
+ * Pick the best processor for a thread (any kind of thread) to run on.
+ */
+ processor_t (*choose_processor)(
+ processor_set_t pset,
+ processor_t processor,
+ thread_t thread);
+ /*
+ * Enqueue a timeshare or fixed priority thread onto the per-processor
+ * runqueue
+ */
+ boolean_t (*processor_enqueue)(
+ processor_t processor,
+ thread_t thread,
+ integer_t options);
+
+ /* Migrate threads away in preparation for processor shutdown */
+ void (*processor_queue_shutdown)(
+ processor_t processor);
+
+ /* Remove the specific thread from the per-processor runqueue */
+ boolean_t (*processor_queue_remove)(
+ processor_t processor,
+ thread_t thread);
+
+ /*
+ * Does the per-processor runqueue have any timeshare or fixed priority
+ * threads on it? Called without pset lock held, so should
+ * not assume immutability while executing.
+ */
+ boolean_t (*processor_queue_empty)(processor_t processor);
+
+ /*
+ * Would this priority trigger an urgent preemption if it's sitting
+ * on the per-processor runqueue?
+ */
+ boolean_t (*priority_is_urgent)(int priority);
+
+ /*
+ * Does the per-processor runqueue contain runnable threads that
+ * should cause the currently-running thread to be preempted?
+ */
+ ast_t (*processor_csw_check)(processor_t processor);
+
+ /*
+ * Does the per-processor runqueue contain a runnable thread
+ * of > or >= priority, as a preflight for choose_thread() or other
+ * thread selection
+ */
+ boolean_t (*processor_queue_has_priority)(processor_t processor,
+ int priority,
+ boolean_t gte);
+
+ /* Quantum size for the specified non-realtime thread. */
+ uint32_t (*initial_quantum_size)(thread_t thread);
+
+ /* Scheduler mode for a new thread */
+ sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
+
+ /*
+ * Is it safe to call update_priority, which may change a thread's
+ * runqueue or other state. This can be used to throttle changes
+ * to dynamic priority.
+ */
+ boolean_t (*can_update_priority)(thread_t thread);
+
+ /*
+ * Update both scheduled priority and other persistent state.
+ * Side effects may including migration to another processor's runqueue.
+ */
+ void (*update_priority)(thread_t thread);
+
+ /* Lower overhead update to scheduled priority and state. */
+ void (*lightweight_update_priority)(thread_t thread);
+
+ /* Callback for non-realtime threads when the quantum timer fires */
+ void (*quantum_expire)(thread_t thread);
+
+ /*
+ * Runnable threads on per-processor runqueue. Should only
+ * be used for relative comparisons of load between processors.
+ */
+ int (*processor_runq_count)(processor_t processor);
+
+ /* Aggregate runcount statistics for per-processor runqueue */
+ uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
+
+ boolean_t (*processor_bound_count)(processor_t processor);
+
+ void (*thread_update_scan)(sched_update_scan_context_t scan_context);
+
+ /*
+ * Use processor->next_thread to pin a thread to an idle
+ * processor. If FALSE, threads are enqueued and can
+ * be stolen by other processors.
+ */
+ boolean_t direct_dispatch_to_idle_processors;
+
+ /* Supports more than one pset */
+ boolean_t multiple_psets_enabled;
+ /* Supports scheduler groups */
+ boolean_t sched_groups_enabled;
+};
+
+#if defined(CONFIG_SCHED_TRADITIONAL)
+extern const struct sched_dispatch_table sched_traditional_dispatch;
+extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
+#endif
+
+#if defined(CONFIG_SCHED_MULTIQ)
+extern const struct sched_dispatch_table sched_multiq_dispatch;
+extern const struct sched_dispatch_table sched_dualq_dispatch;
+#endif
+
+#if defined(CONFIG_SCHED_PROTO)
+extern const struct sched_dispatch_table sched_proto_dispatch;
+#endif
+
+#if defined(CONFIG_SCHED_GRRR)
+extern const struct sched_dispatch_table sched_grrr_dispatch;
+#endif
+
+/*
+ * It is an error to invoke any scheduler-related code
+ * before this is set up
+ */
+extern const struct sched_dispatch_table *sched_current_dispatch;
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+__END_DECLS
+
#endif /* _KERN_SCHED_PRIM_H_ */