2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Scheduling primitive definitions file
66 #ifndef _KERN_SCHED_PRIM_H_
67 #define _KERN_SCHED_PRIM_H_
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/kern_return.h>
72 #include <kern/clock.h>
73 #include <kern/kern_types.h>
74 #include <kern/thread.h>
75 #include <sys/cdefs.h>
76 #include <kern/block_hint.h>
78 #ifdef MACH_KERNEL_PRIVATE
80 #include <kern/sched_urgency.h>
83 extern void sched_init(void);
85 extern void sched_startup(void);
87 extern void sched_timebase_init(void);
89 extern void pset_rt_init(processor_set_t pset
);
91 extern void sched_rtglobal_init(processor_set_t pset
);
93 extern rt_queue_t
sched_rtglobal_runq(processor_set_t pset
);
95 extern void sched_rtglobal_queue_shutdown(processor_t processor
);
97 extern int64_t sched_rtglobal_runq_count_sum(void);
99 extern void sched_check_spill(processor_set_t pset
, thread_t thread
);
101 extern bool sched_thread_should_yield(processor_t processor
, thread_t thread
);
103 extern bool sched_steal_thread_DISABLED(processor_set_t pset
);
104 extern bool sched_steal_thread_enabled(processor_set_t pset
);
106 /* Force a preemption point for a thread and wait for it to stop running */
107 extern boolean_t
thread_stop(
109 boolean_t until_not_runnable
);
111 /* Release a previous stop request */
112 extern void thread_unstop(
115 /* Wait for a thread to stop running */
116 extern void thread_wait(
118 boolean_t until_not_runnable
);
120 /* Unblock thread on wake up */
121 extern boolean_t
thread_unblock(
123 wait_result_t wresult
);
125 /* Unblock and dispatch thread */
126 extern kern_return_t
thread_go(
128 wait_result_t wresult
);
130 /* Handle threads at context switch */
131 extern void thread_dispatch(
133 thread_t new_thread
);
135 /* Switch directly to a particular thread */
136 extern int thread_run(
138 thread_continue_t continuation
,
140 thread_t new_thread
);
142 /* Resume thread with new stack */
143 extern void thread_continue(
144 thread_t old_thread
);
146 /* Invoke continuation */
147 extern void call_continuation(
148 thread_continue_t continuation
,
150 wait_result_t wresult
,
151 boolean_t enable_interrupts
);
154 * Flags that can be passed to set_sched_pri
155 * to skip side effects
158 SETPRI_DEFAULT
= 0x0,
159 SETPRI_LAZY
= 0x1, /* Avoid setting AST flags or sending IPIs */
160 } set_sched_pri_options_t
;
162 /* Set the current scheduled priority */
163 extern void set_sched_pri(
166 set_sched_pri_options_t options
);
168 /* Set base priority of the specified thread */
169 extern void sched_set_thread_base_priority(
173 /* Set the thread's true scheduling mode */
174 extern void sched_set_thread_mode(thread_t thread
,
176 /* Demote the true scheduler mode */
177 extern void sched_thread_mode_demote(thread_t thread
,
179 /* Un-demote the true scheduler mode */
180 extern void sched_thread_mode_undemote(thread_t thread
,
183 extern void sched_thread_promote_to_pri(thread_t thread
, int priority
, uintptr_t trace_obj
);
184 extern void sched_thread_update_promotion_to_pri(thread_t thread
, int priority
, uintptr_t trace_obj
);
185 extern void sched_thread_unpromote(thread_t thread
, uintptr_t trace_obj
);
187 extern void assert_promotions_invariant(thread_t thread
);
189 extern void sched_thread_promote_reason(thread_t thread
, uint32_t reason
, uintptr_t trace_obj
);
190 extern void sched_thread_unpromote_reason(thread_t thread
, uint32_t reason
, uintptr_t trace_obj
);
192 /* Re-evaluate base priority of thread (thread locked) */
193 void thread_recompute_priority(thread_t thread
);
195 /* Re-evaluate scheduled priority of thread (thread locked) */
196 extern void thread_recompute_sched_pri(
198 set_sched_pri_options_t options
);
200 /* Periodic scheduler activity */
201 extern void sched_init_thread(void (*)(void));
203 /* Perform sched_tick housekeeping activities */
204 extern boolean_t
can_update_priority(
207 extern void update_priority(
210 extern void lightweight_update_priority(
213 extern void sched_default_quantum_expire(thread_t thread
);
215 /* Idle processor thread */
216 extern void idle_thread(void);
218 extern kern_return_t
idle_thread_create(
219 processor_t processor
);
221 /* Continuation return from syscall */
222 extern void thread_syscall_return(
226 extern wait_result_t
thread_block_reason(
227 thread_continue_t continuation
,
231 /* Reschedule thread for execution */
232 extern void thread_setrun(
241 SCHED_REBALANCE
= 0x8,
245 extern processor_set_t
task_choose_pset(
248 /* Bind the current thread to a particular processor */
249 extern processor_t
thread_bind(
250 processor_t processor
);
252 /* Choose the best processor to run a thread */
253 extern processor_t
choose_processor(
254 processor_set_t pset
,
255 processor_t processor
,
258 extern void sched_SMT_balance(
259 processor_t processor
,
260 processor_set_t pset
);
262 extern void thread_quantum_init(
265 extern void run_queue_init(
268 extern thread_t
run_queue_dequeue(
272 extern boolean_t
run_queue_enqueue(
277 extern void run_queue_remove(
281 struct sched_update_scan_context
{
282 uint64_t earliest_bg_make_runnable_time
;
283 uint64_t earliest_normal_make_runnable_time
;
284 uint64_t earliest_rt_make_runnable_time
;
286 typedef struct sched_update_scan_context
*sched_update_scan_context_t
;
288 extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context
);
291 * Enum to define various events which need IPIs. The IPI policy
292 * engine decides what kind of IPI to use based on destination
293 * processor state, thread and one of the following scheduling events.
296 SCHED_IPI_EVENT_BOUND_THR
= 0x1,
297 SCHED_IPI_EVENT_PREEMPT
= 0x2,
298 SCHED_IPI_EVENT_SMT_REBAL
= 0x3,
299 SCHED_IPI_EVENT_SPILL
= 0x4,
300 SCHED_IPI_EVENT_REBALANCE
= 0x5,
304 /* Enum to define various IPI types used by the scheduler */
306 SCHED_IPI_NONE
= 0x0,
307 SCHED_IPI_IMMEDIATE
= 0x1,
308 SCHED_IPI_IDLE
= 0x2,
309 SCHED_IPI_DEFERRED
= 0x3,
312 /* The IPI policy engine behaves in the following manner:
313 * - All scheduler events which need an IPI invoke sched_ipi_action() with
314 * the appropriate destination processor, thread and event.
315 * - sched_ipi_action() performs basic checks, invokes the scheduler specific
316 * ipi_policy routine and sets pending_AST bits based on the result.
317 * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform()
318 * routine which actually sends the appropriate IPI to the destination core.
320 extern sched_ipi_type_t
sched_ipi_action(processor_t dst
, thread_t thread
,
321 boolean_t dst_idle
, sched_ipi_event_t event
);
322 extern void sched_ipi_perform(processor_t dst
, sched_ipi_type_t ipi
);
324 /* sched_ipi_policy() is the global default IPI policy for all schedulers */
325 extern sched_ipi_type_t
sched_ipi_policy(processor_t dst
, thread_t thread
,
326 boolean_t dst_idle
, sched_ipi_event_t event
);
328 /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */
329 extern sched_ipi_type_t
sched_ipi_deferred_policy(processor_set_t pset
,
330 processor_t dst
, sched_ipi_event_t event
);
332 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
334 extern boolean_t
thread_update_add_thread(thread_t thread
);
335 extern void thread_update_process_threads(void);
336 extern boolean_t
runq_scan(run_queue_t runq
, sched_update_scan_context_t scan_context
);
338 extern void sched_timeshare_init(void);
339 extern void sched_timeshare_timebase_init(void);
340 extern void sched_timeshare_maintenance_continue(void);
342 extern boolean_t
priority_is_urgent(int priority
);
343 extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread
);
345 extern int sched_compute_timeshare_priority(thread_t thread
);
347 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
349 /* Remove thread from its run queue */
350 extern boolean_t
thread_run_queue_remove(thread_t thread
);
351 thread_t
thread_run_queue_remove_for_handoff(thread_t thread
);
353 /* Put a thread back in the run queue after being yanked */
354 extern void thread_run_queue_reinsert(thread_t thread
, integer_t options
);
356 extern void thread_timer_expire(
360 extern boolean_t
thread_eager_preemption(
363 extern boolean_t sched_generic_direct_dispatch_to_idle_processors
;
365 /* Set the maximum interrupt level for the thread */
366 __private_extern__ wait_interrupt_t
thread_interrupt_level(
367 wait_interrupt_t interruptible
);
369 __private_extern__ wait_result_t
thread_mark_wait_locked(
371 wait_interrupt_t interruptible
);
373 /* Wake up locked thread directly, passing result */
374 __private_extern__ kern_return_t
clear_wait_internal(
376 wait_result_t result
);
378 extern void sched_stats_handle_csw(
379 processor_t processor
,
384 extern void sched_stats_handle_runq_change(
385 struct runq_stats
*stats
,
391 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
393 if (__builtin_expect(sched_stats_active, 0)) { \
394 sched_stats_handle_csw((processor), \
395 (reasons), (selfpri), (otherpri)); \
400 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
402 if (__builtin_expect(sched_stats_active, 0)) { \
403 sched_stats_handle_runq_change((stats), \
410 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0)
411 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0)
415 extern uint32_t sched_debug_flags
;
416 #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
417 #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
419 #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \
420 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \
421 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
425 #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \
426 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \
427 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
431 /* Tells if there are "active" RT threads in the system (provided by CPU PM) */
432 extern void active_rt_threads(
435 /* Returns the perfcontrol attribute for the thread */
436 extern perfcontrol_class_t
thread_get_perfcontrol_class(
439 /* Generic routine for Non-AMP schedulers to calculate parallelism */
440 extern uint32_t sched_qos_max_parallelism(int qos
, uint64_t options
);
442 #endif /* MACH_KERNEL_PRIVATE */
446 #ifdef XNU_KERNEL_PRIVATE
448 /* Toggles a global override to turn off CPU Throttling */
449 extern void sys_override_cpu_throttle(boolean_t enable_override
);
452 ****************** Only exported until BSD stops using ********************
455 extern void thread_vm_bind_group_add(void);
457 /* Wake up thread directly, passing result */
458 extern kern_return_t
clear_wait(
460 wait_result_t result
);
462 /* Start thread running */
463 extern void thread_bootstrap_return(void) __attribute__((noreturn
));
465 /* Return from exception (BSD-visible interface) */
466 extern void thread_exception_return(void) __dead2
;
468 #define SCHED_STRING_MAX_LENGTH (48)
469 /* String declaring the name of the current scheduler */
470 extern char sched_string
[SCHED_STRING_MAX_LENGTH
];
472 extern thread_t
port_name_to_thread_for_ulock(mach_port_name_t thread_name
);
474 /* Attempt to context switch to a specific runnable thread */
475 extern wait_result_t
thread_handoff_deallocate(thread_t thread
);
477 __attribute__((nonnull(1, 2)))
478 extern void thread_handoff_parameter(thread_t thread
,
479 thread_continue_t continuation
, void *parameter
) __dead2
;
481 extern struct waitq
*assert_wait_queue(event_t event
);
483 extern kern_return_t
thread_wakeup_one_with_pri(event_t event
, int priority
);
485 extern thread_t
thread_wakeup_identify(event_t event
, int priority
);
487 #endif /* XNU_KERNEL_PRIVATE */
489 #ifdef KERNEL_PRIVATE
490 /* Set pending block hint for a particular object before we go into a wait state */
491 extern void thread_set_pending_block_hint(
493 block_hint_t block_hint
);
495 #define QOS_PARALLELISM_COUNT_LOGICAL 0x1
496 #define QOS_PARALLELISM_REALTIME 0x2
497 extern uint32_t qos_max_parallelism(int qos
, uint64_t options
);
498 #endif /* KERNEL_PRIVATE */
500 #if XNU_KERNEL_PRIVATE
501 extern void thread_yield_with_continuation(
502 thread_continue_t continuation
,
503 void *parameter
) __dead2
;
507 extern wait_result_t
thread_block(
508 thread_continue_t continuation
);
510 extern wait_result_t
thread_block_parameter(
511 thread_continue_t continuation
,
514 /* Declare thread will wait on a particular event */
515 extern wait_result_t
assert_wait(
517 wait_interrupt_t interruptible
);
519 /* Assert that the thread intends to wait with a timeout */
520 extern wait_result_t
assert_wait_timeout(
522 wait_interrupt_t interruptible
,
524 uint32_t scale_factor
);
526 /* Assert that the thread intends to wait with an urgency, timeout and leeway */
527 extern wait_result_t
assert_wait_timeout_with_leeway(
529 wait_interrupt_t interruptible
,
530 wait_timeout_urgency_t urgency
,
533 uint32_t scale_factor
);
535 extern wait_result_t
assert_wait_deadline(
537 wait_interrupt_t interruptible
,
540 /* Assert that the thread intends to wait with an urgency, deadline, and leeway */
541 extern wait_result_t
assert_wait_deadline_with_leeway(
543 wait_interrupt_t interruptible
,
544 wait_timeout_urgency_t urgency
,
548 /* Wake up thread (or threads) waiting on a particular event */
549 extern kern_return_t
thread_wakeup_prim(
551 boolean_t one_thread
,
552 wait_result_t result
);
554 #define thread_wakeup(x) \
555 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
556 #define thread_wakeup_with_result(x, z) \
557 thread_wakeup_prim((x), FALSE, (z))
558 #define thread_wakeup_one(x) \
559 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
561 /* Wakeup the specified thread if it is waiting on this event */
562 extern kern_return_t
thread_wakeup_thread(event_t event
, thread_t thread
);
564 extern boolean_t
preemption_enabled(void);
566 #ifdef MACH_KERNEL_PRIVATE
569 * Scheduler algorithm indirection. If only one algorithm is
570 * enabled at compile-time, a direction function call is used.
571 * If more than one is enabled, calls are dispatched through
572 * a function pointer table.
575 #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ)
576 #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
580 #define SCHED(f) (sched_current_dispatch->f)
584 * For DEV & REL kernels, use a static dispatch table instead of
585 * using the indirect function table.
587 extern const struct sched_dispatch_table sched_dualq_dispatch
;
588 #define SCHED(f) (sched_dualq_dispatch.f)
592 struct sched_dispatch_table
{
593 const char *sched_name
;
594 void (*init
)(void); /* Init global state */
595 void (*timebase_init
)(void); /* Timebase-dependent initialization */
596 void (*processor_init
)(processor_t processor
); /* Per-processor scheduler init */
597 void (*pset_init
)(processor_set_t pset
); /* Per-processor set scheduler init */
599 void (*maintenance_continuation
)(void); /* Function called regularly */
602 * Choose a thread of greater or equal priority from the per-processor
603 * runqueue for timeshare/fixed threads
605 thread_t (*choose_thread
)(
606 processor_t processor
,
610 /* True if scheduler supports stealing threads for this pset */
611 bool (*steal_thread_enabled
)(processor_set_t pset
);
614 * Steal a thread from another processor in the pset so that it can run
617 thread_t (*steal_thread
)(
618 processor_set_t pset
);
621 * Compute priority for a timeshare thread based on base priority.
623 int (*compute_timeshare_priority
)(thread_t thread
);
626 * Pick the best processor for a thread (any kind of thread) to run on.
628 processor_t (*choose_processor
)(
629 processor_set_t pset
,
630 processor_t processor
,
633 * Enqueue a timeshare or fixed priority thread onto the per-processor
636 boolean_t (*processor_enqueue
)(
637 processor_t processor
,
641 /* Migrate threads away in preparation for processor shutdown */
642 void (*processor_queue_shutdown
)(
643 processor_t processor
);
645 /* Remove the specific thread from the per-processor runqueue */
646 boolean_t (*processor_queue_remove
)(
647 processor_t processor
,
651 * Does the per-processor runqueue have any timeshare or fixed priority
652 * threads on it? Called without pset lock held, so should
653 * not assume immutability while executing.
655 boolean_t (*processor_queue_empty
)(processor_t processor
);
658 * Would this priority trigger an urgent preemption if it's sitting
659 * on the per-processor runqueue?
661 boolean_t (*priority_is_urgent
)(int priority
);
664 * Does the per-processor runqueue contain runnable threads that
665 * should cause the currently-running thread to be preempted?
667 ast_t (*processor_csw_check
)(processor_t processor
);
670 * Does the per-processor runqueue contain a runnable thread
671 * of > or >= priority, as a preflight for choose_thread() or other
674 boolean_t (*processor_queue_has_priority
)(processor_t processor
,
678 /* Quantum size for the specified non-realtime thread. */
679 uint32_t (*initial_quantum_size
)(thread_t thread
);
681 /* Scheduler mode for a new thread */
682 sched_mode_t (*initial_thread_sched_mode
)(task_t parent_task
);
685 * Is it safe to call update_priority, which may change a thread's
686 * runqueue or other state. This can be used to throttle changes
687 * to dynamic priority.
689 boolean_t (*can_update_priority
)(thread_t thread
);
692 * Update both scheduled priority and other persistent state.
693 * Side effects may including migration to another processor's runqueue.
695 void (*update_priority
)(thread_t thread
);
697 /* Lower overhead update to scheduled priority and state. */
698 void (*lightweight_update_priority
)(thread_t thread
);
700 /* Callback for non-realtime threads when the quantum timer fires */
701 void (*quantum_expire
)(thread_t thread
);
704 * Runnable threads on per-processor runqueue. Should only
705 * be used for relative comparisons of load between processors.
707 int (*processor_runq_count
)(processor_t processor
);
709 /* Aggregate runcount statistics for per-processor runqueue */
710 uint64_t (*processor_runq_stats_count_sum
)(processor_t processor
);
712 boolean_t (*processor_bound_count
)(processor_t processor
);
714 void (*thread_update_scan
)(sched_update_scan_context_t scan_context
);
717 * Use processor->next_thread to pin a thread to an idle
718 * processor. If FALSE, threads are enqueued and can
719 * be stolen by other processors.
721 boolean_t direct_dispatch_to_idle_processors
;
723 /* Supports more than one pset */
724 boolean_t multiple_psets_enabled
;
725 /* Supports scheduler groups */
726 boolean_t sched_groups_enabled
;
728 /* Supports avoid-processor */
729 boolean_t avoid_processor_enabled
;
731 /* Returns true if this processor should avoid running this thread. */
732 bool (*thread_avoid_processor
)(processor_t processor
, thread_t thread
);
735 * Invoked when a processor is about to choose the idle thread
736 * Used to send IPIs to a processor which would be preferred to be idle instead.
737 * Called with pset lock held, returns pset lock unlocked.
739 void (*processor_balance
)(processor_t processor
, processor_set_t pset
);
740 rt_queue_t (*rt_runq
)(processor_set_t pset
);
741 void (*rt_init
)(processor_set_t pset
);
742 void (*rt_queue_shutdown
)(processor_t processor
);
743 void (*rt_runq_scan
)(sched_update_scan_context_t scan_context
);
744 int64_t (*rt_runq_count_sum
)(void);
746 uint32_t (*qos_max_parallelism
)(int qos
, uint64_t options
);
747 void (*check_spill
)(processor_set_t pset
, thread_t thread
);
748 sched_ipi_type_t (*ipi_policy
)(processor_t dst
, thread_t thread
, boolean_t dst_idle
, sched_ipi_event_t event
);
749 bool (*thread_should_yield
)(processor_t processor
, thread_t thread
);
752 #if defined(CONFIG_SCHED_TRADITIONAL)
753 extern const struct sched_dispatch_table sched_traditional_dispatch
;
754 extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
;
757 #if defined(CONFIG_SCHED_MULTIQ)
758 extern const struct sched_dispatch_table sched_multiq_dispatch
;
759 extern const struct sched_dispatch_table sched_dualq_dispatch
;
762 #if defined(CONFIG_SCHED_PROTO)
763 extern const struct sched_dispatch_table sched_proto_dispatch
;
766 #if defined(CONFIG_SCHED_GRRR)
767 extern const struct sched_dispatch_table sched_grrr_dispatch
;
771 * It is an error to invoke any scheduler-related code
772 * before this is set up
774 extern const struct sched_dispatch_table
*sched_current_dispatch
;
776 #endif /* MACH_KERNEL_PRIVATE */
780 #endif /* _KERN_SCHED_PRIM_H_ */