]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.h
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.h
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
66 #ifndef _KERN_SCHED_PRIM_H_
67 #define _KERN_SCHED_PRIM_H_
68
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/kern_return.h>
72 #include <kern/clock.h>
73 #include <kern/kern_types.h>
74 #include <kern/thread.h>
75 #include <sys/cdefs.h>
76 #include <kern/block_hint.h>
77
78 #ifdef MACH_KERNEL_PRIVATE
79
80 #include <kern/sched_urgency.h>
81 #include <kern/thread_group.h>
82
83 /* Initialization */
84 extern void sched_init(void);
85
86 extern void sched_startup(void);
87
88 extern void sched_timebase_init(void);
89
90 extern void pset_rt_init(processor_set_t pset);
91
92 extern void sched_rtglobal_init(processor_set_t pset);
93
94 extern rt_queue_t sched_rtglobal_runq(processor_set_t pset);
95
96 extern void sched_rtglobal_queue_shutdown(processor_t processor);
97
98 extern int64_t sched_rtglobal_runq_count_sum(void);
99
100 extern void sched_check_spill(processor_set_t pset, thread_t thread);
101
102 extern bool sched_thread_should_yield(processor_t processor, thread_t thread);
103
104 extern bool sched_steal_thread_DISABLED(processor_set_t pset);
105 extern bool sched_steal_thread_enabled(processor_set_t pset);
106
107 /* Force a preemption point for a thread and wait for it to stop running */
108 extern boolean_t thread_stop(
109 thread_t thread,
110 boolean_t until_not_runnable);
111
112 /* Release a previous stop request */
113 extern void thread_unstop(
114 thread_t thread);
115
116 /* Wait for a thread to stop running */
117 extern void thread_wait(
118 thread_t thread,
119 boolean_t until_not_runnable);
120
121 /* Unblock thread on wake up */
122 extern boolean_t thread_unblock(
123 thread_t thread,
124 wait_result_t wresult);
125
126 /* Unblock and dispatch thread */
127 extern kern_return_t thread_go(
128 thread_t thread,
129 wait_result_t wresult);
130
131 /* Handle threads at context switch */
132 extern void thread_dispatch(
133 thread_t old_thread,
134 thread_t new_thread);
135
136 /* Switch directly to a particular thread */
137 extern int thread_run(
138 thread_t self,
139 thread_continue_t continuation,
140 void *parameter,
141 thread_t new_thread);
142
143 /* Resume thread with new stack */
144 extern __dead2 void thread_continue(thread_t old_thread);
145
146 /* Invoke continuation */
147 extern __dead2 void call_continuation(
148 thread_continue_t continuation,
149 void *parameter,
150 wait_result_t wresult,
151 boolean_t enable_interrupts);
152
153 /*
154 * Flags that can be passed to set_sched_pri
155 * to skip side effects
156 */
157 __options_decl(set_sched_pri_options_t, uint32_t, {
158 SETPRI_DEFAULT = 0x0,
159 SETPRI_LAZY = 0x1, /* Avoid setting AST flags or sending IPIs */
160 });
161
162 /* Set the current scheduled priority */
163 extern void set_sched_pri(
164 thread_t thread,
165 int priority,
166 set_sched_pri_options_t options);
167
168 /* Set base priority of the specified thread */
169 extern void sched_set_thread_base_priority(
170 thread_t thread,
171 int priority);
172
173 /* Set absolute base priority of the specified thread */
174 extern void sched_set_kernel_thread_priority(
175 thread_t thread,
176 int priority);
177
178
179 /* Set the thread's true scheduling mode */
180 extern void sched_set_thread_mode(thread_t thread,
181 sched_mode_t mode);
182 /* Demote the true scheduler mode */
183 extern void sched_thread_mode_demote(thread_t thread,
184 uint32_t reason);
185 /* Un-demote the true scheduler mode */
186 extern void sched_thread_mode_undemote(thread_t thread,
187 uint32_t reason);
188
189 extern void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj);
190 extern void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj);
191
192 /* Re-evaluate base priority of thread (thread locked) */
193 void thread_recompute_priority(thread_t thread);
194
195 /* Re-evaluate scheduled priority of thread (thread locked) */
196 extern void thread_recompute_sched_pri(
197 thread_t thread,
198 set_sched_pri_options_t options);
199
200 /* Periodic scheduler activity */
201 extern void sched_init_thread(void (*)(void));
202
203 /* Perform sched_tick housekeeping activities */
204 extern boolean_t can_update_priority(
205 thread_t thread);
206
207 extern void update_priority(
208 thread_t thread);
209
210 extern void lightweight_update_priority(
211 thread_t thread);
212
213 extern void sched_default_quantum_expire(thread_t thread);
214
215 /* Idle processor thread continuation */
216 extern void idle_thread(
217 void* parameter,
218 wait_result_t result);
219
220 extern kern_return_t idle_thread_create(
221 processor_t processor);
222
223 /* Continuation return from syscall */
224 extern void thread_syscall_return(
225 kern_return_t ret);
226
227 /* Context switch */
228 extern wait_result_t thread_block_reason(
229 thread_continue_t continuation,
230 void *parameter,
231 ast_t reason);
232
233 __options_decl(sched_options_t, uint32_t, {
234 SCHED_NONE = 0x0,
235 SCHED_TAILQ = 0x1,
236 SCHED_HEADQ = 0x2,
237 SCHED_PREEMPT = 0x4,
238 SCHED_REBALANCE = 0x8,
239 });
240
241 /* Reschedule thread for execution */
242 extern void thread_setrun(
243 thread_t thread,
244 sched_options_t options);
245
246 extern processor_set_t task_choose_pset(
247 task_t task);
248
249 /* Bind the current thread to a particular processor */
250 extern processor_t thread_bind(
251 processor_t processor);
252
253 /* Choose the best processor to run a thread */
254 extern processor_t choose_processor(
255 processor_set_t pset,
256 processor_t processor,
257 thread_t thread);
258
259 extern void sched_SMT_balance(
260 processor_t processor,
261 processor_set_t pset);
262
263 extern void thread_quantum_init(
264 thread_t thread);
265
266 extern void run_queue_init(
267 run_queue_t runq);
268
269 extern thread_t run_queue_dequeue(
270 run_queue_t runq,
271 sched_options_t options);
272
273 extern boolean_t run_queue_enqueue(
274 run_queue_t runq,
275 thread_t thread,
276 sched_options_t options);
277
278 extern void run_queue_remove(
279 run_queue_t runq,
280 thread_t thread);
281
282 extern thread_t run_queue_peek(
283 run_queue_t runq);
284
285 struct sched_update_scan_context {
286 uint64_t earliest_bg_make_runnable_time;
287 uint64_t earliest_normal_make_runnable_time;
288 uint64_t earliest_rt_make_runnable_time;
289 };
290 typedef struct sched_update_scan_context *sched_update_scan_context_t;
291
292 extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context);
293
294 extern void sched_pset_made_schedulable(
295 processor_t processor,
296 processor_set_t pset,
297 boolean_t drop_lock);
298
299 /*
300 * Enum to define various events which need IPIs. The IPI policy
301 * engine decides what kind of IPI to use based on destination
302 * processor state, thread and one of the following scheduling events.
303 */
304 typedef enum {
305 SCHED_IPI_EVENT_BOUND_THR = 0x1,
306 SCHED_IPI_EVENT_PREEMPT = 0x2,
307 SCHED_IPI_EVENT_SMT_REBAL = 0x3,
308 SCHED_IPI_EVENT_SPILL = 0x4,
309 SCHED_IPI_EVENT_REBALANCE = 0x5,
310 } sched_ipi_event_t;
311
312
313 /* Enum to define various IPI types used by the scheduler */
314 typedef enum {
315 SCHED_IPI_NONE = 0x0,
316 SCHED_IPI_IMMEDIATE = 0x1,
317 SCHED_IPI_IDLE = 0x2,
318 SCHED_IPI_DEFERRED = 0x3,
319 } sched_ipi_type_t;
320
321 /* The IPI policy engine behaves in the following manner:
322 * - All scheduler events which need an IPI invoke sched_ipi_action() with
323 * the appropriate destination processor, thread and event.
324 * - sched_ipi_action() performs basic checks, invokes the scheduler specific
325 * ipi_policy routine and sets pending_AST bits based on the result.
326 * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform()
327 * routine which actually sends the appropriate IPI to the destination core.
328 */
329 extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread,
330 boolean_t dst_idle, sched_ipi_event_t event);
331 extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi);
332
333 /* sched_ipi_policy() is the global default IPI policy for all schedulers */
334 extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread,
335 boolean_t dst_idle, sched_ipi_event_t event);
336
337 /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */
338 extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset,
339 processor_t dst, sched_ipi_event_t event);
340
341 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
342
343 extern boolean_t thread_update_add_thread(thread_t thread);
344 extern void thread_update_process_threads(void);
345 extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context);
346
347 extern void sched_timeshare_init(void);
348 extern void sched_timeshare_timebase_init(void);
349 extern void sched_timeshare_maintenance_continue(void);
350
351 extern boolean_t priority_is_urgent(int priority);
352 extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread);
353
354 extern int sched_compute_timeshare_priority(thread_t thread);
355
356 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
357
358 /* Remove thread from its run queue */
359 extern boolean_t thread_run_queue_remove(thread_t thread);
360 thread_t thread_run_queue_remove_for_handoff(thread_t thread);
361
362 /* Put a thread back in the run queue after being yanked */
363 extern void thread_run_queue_reinsert(thread_t thread, sched_options_t options);
364
365 extern void thread_timer_expire(
366 void *thread,
367 void *p1);
368
369 extern boolean_t thread_eager_preemption(
370 thread_t thread);
371
372 extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
373
374 /* Set the maximum interrupt level for the thread */
375 __private_extern__ wait_interrupt_t thread_interrupt_level(
376 wait_interrupt_t interruptible);
377
378 __private_extern__ wait_result_t thread_mark_wait_locked(
379 thread_t thread,
380 wait_interrupt_t interruptible);
381
382 /* Wake up locked thread directly, passing result */
383 __private_extern__ kern_return_t clear_wait_internal(
384 thread_t thread,
385 wait_result_t result);
386
387 extern void sched_stats_handle_csw(
388 processor_t processor,
389 int reasons,
390 int selfpri,
391 int otherpri);
392
393 extern void sched_stats_handle_runq_change(
394 struct runq_stats *stats,
395 int old_count);
396
397
398 #if DEBUG
399
400 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
401 do { \
402 if (__builtin_expect(sched_stats_active, 0)) { \
403 sched_stats_handle_csw((processor), \
404 (reasons), (selfpri), (otherpri)); \
405 } \
406 } while (0)
407
408
409 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
410 do { \
411 if (__builtin_expect(sched_stats_active, 0)) { \
412 sched_stats_handle_runq_change((stats), \
413 (old_count)); \
414 } \
415 } while (0)
416
417 #else /* DEBUG */
418
419 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0)
420 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0)
421
422 #endif /* DEBUG */
423
424 extern uint32_t sched_debug_flags;
425 #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
426 #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
427
428 #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \
429 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \
430 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
431 } \
432 } while(0)
433
434 #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \
435 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \
436 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
437 } \
438 } while(0)
439
440 /* Tells if there are "active" RT threads in the system (provided by CPU PM) */
441 extern void active_rt_threads(
442 boolean_t active);
443
444 /* Returns the perfcontrol attribute for the thread */
445 extern perfcontrol_class_t thread_get_perfcontrol_class(
446 thread_t thread);
447
448 /* Generic routine for Non-AMP schedulers to calculate parallelism */
449 extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options);
450
451 #endif /* MACH_KERNEL_PRIVATE */
452
453 __BEGIN_DECLS
454
455 #ifdef XNU_KERNEL_PRIVATE
456
457 extern void thread_bind_cluster_type(char cluster_type);
458
459 /* Toggles a global override to turn off CPU Throttling */
460 extern void sys_override_cpu_throttle(boolean_t enable_override);
461
462 /*
463 ****************** Only exported until BSD stops using ********************
464 */
465
466 extern void thread_vm_bind_group_add(void);
467
468 /* Wake up thread directly, passing result */
469 extern kern_return_t clear_wait(
470 thread_t thread,
471 wait_result_t result);
472
473 /* Start thread running */
474 extern void thread_bootstrap_return(void) __attribute__((noreturn));
475
476 /* Return from exception (BSD-visible interface) */
477 extern void thread_exception_return(void) __dead2;
478
479 #define SCHED_STRING_MAX_LENGTH (48)
480 /* String declaring the name of the current scheduler */
481 extern char sched_string[SCHED_STRING_MAX_LENGTH];
482
483 /* Attempt to context switch to a specific runnable thread */
484 extern wait_result_t thread_handoff_deallocate(thread_t thread);
485
486 __attribute__((nonnull(1, 2)))
487 extern void thread_handoff_parameter(thread_t thread,
488 thread_continue_t continuation, void *parameter) __dead2;
489
490 extern struct waitq *assert_wait_queue(event_t event);
491
492 extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority);
493
494 extern thread_t thread_wakeup_identify(event_t event, int priority);
495
496 #endif /* XNU_KERNEL_PRIVATE */
497
498 #ifdef KERNEL_PRIVATE
499 /* Set pending block hint for a particular object before we go into a wait state */
500 extern void thread_set_pending_block_hint(
501 thread_t thread,
502 block_hint_t block_hint);
503
504 #define QOS_PARALLELISM_COUNT_LOGICAL 0x1
505 #define QOS_PARALLELISM_REALTIME 0x2
506 extern uint32_t qos_max_parallelism(int qos, uint64_t options);
507 #endif /* KERNEL_PRIVATE */
508
509 #if XNU_KERNEL_PRIVATE
510 extern void thread_yield_with_continuation(
511 thread_continue_t continuation,
512 void *parameter) __dead2;
513 #endif
514
515 /* Context switch */
516 extern wait_result_t thread_block(
517 thread_continue_t continuation);
518
519 extern wait_result_t thread_block_parameter(
520 thread_continue_t continuation,
521 void *parameter);
522
523 /* Declare thread will wait on a particular event */
524 extern wait_result_t assert_wait(
525 event_t event,
526 wait_interrupt_t interruptible);
527
528 /* Assert that the thread intends to wait with a timeout */
529 extern wait_result_t assert_wait_timeout(
530 event_t event,
531 wait_interrupt_t interruptible,
532 uint32_t interval,
533 uint32_t scale_factor);
534
535 /* Assert that the thread intends to wait with an urgency, timeout and leeway */
536 extern wait_result_t assert_wait_timeout_with_leeway(
537 event_t event,
538 wait_interrupt_t interruptible,
539 wait_timeout_urgency_t urgency,
540 uint32_t interval,
541 uint32_t leeway,
542 uint32_t scale_factor);
543
544 extern wait_result_t assert_wait_deadline(
545 event_t event,
546 wait_interrupt_t interruptible,
547 uint64_t deadline);
548
549 /* Assert that the thread intends to wait with an urgency, deadline, and leeway */
550 extern wait_result_t assert_wait_deadline_with_leeway(
551 event_t event,
552 wait_interrupt_t interruptible,
553 wait_timeout_urgency_t urgency,
554 uint64_t deadline,
555 uint64_t leeway);
556
557 /* Wake up thread (or threads) waiting on a particular event */
558 extern kern_return_t thread_wakeup_prim(
559 event_t event,
560 boolean_t one_thread,
561 wait_result_t result);
562
563 #define thread_wakeup(x) \
564 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
565 #define thread_wakeup_with_result(x, z) \
566 thread_wakeup_prim((x), FALSE, (z))
567 #define thread_wakeup_one(x) \
568 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
569
570 /* Wakeup the specified thread if it is waiting on this event */
571 extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread);
572
573 extern boolean_t preemption_enabled(void);
574
575 #ifdef MACH_KERNEL_PRIVATE
576
577 /*
578 * Scheduler algorithm indirection. If only one algorithm is
579 * enabled at compile-time, a direction function call is used.
580 * If more than one is enabled, calls are dispatched through
581 * a function pointer table.
582 */
583
584 #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) && !defined(CONFIG_SCHED_CLUTCH)
585 #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
586 #endif
587
588
589 #if CONFIG_SCHED_CLUTCH
590 extern const struct sched_dispatch_table sched_clutch_dispatch;
591 #define SCHED(f) (sched_clutch_dispatch.f)
592 #else /* CONFIG_SCHED_CLUTCH */
593 extern const struct sched_dispatch_table sched_dualq_dispatch;
594 #define SCHED(f) (sched_dualq_dispatch.f)
595 #endif /* CONFIG_SCHED_CLUTCH */
596
597
598 struct sched_dispatch_table {
599 const char *sched_name;
600 void (*init)(void); /* Init global state */
601 void (*timebase_init)(void); /* Timebase-dependent initialization */
602 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
603 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
604
605 void (*maintenance_continuation)(void); /* Function called regularly */
606
607 /*
608 * Choose a thread of greater or equal priority from the per-processor
609 * runqueue for timeshare/fixed threads
610 */
611 thread_t (*choose_thread)(
612 processor_t processor,
613 int priority,
614 ast_t reason);
615
616 /* True if scheduler supports stealing threads for this pset */
617 bool (*steal_thread_enabled)(processor_set_t pset);
618
619 /*
620 * Steal a thread from another processor in the pset so that it can run
621 * immediately
622 */
623 thread_t (*steal_thread)(
624 processor_set_t pset);
625
626 /*
627 * Compute priority for a timeshare thread based on base priority.
628 */
629 int (*compute_timeshare_priority)(thread_t thread);
630
631 /*
632 * Pick the best processor for a thread (any kind of thread) to run on.
633 */
634 processor_t (*choose_processor)(
635 processor_set_t pset,
636 processor_t processor,
637 thread_t thread);
638 /*
639 * Enqueue a timeshare or fixed priority thread onto the per-processor
640 * runqueue
641 */
642 boolean_t (*processor_enqueue)(
643 processor_t processor,
644 thread_t thread,
645 sched_options_t options);
646
647 /* Migrate threads away in preparation for processor shutdown */
648 void (*processor_queue_shutdown)(
649 processor_t processor);
650
651 /* Remove the specific thread from the per-processor runqueue */
652 boolean_t (*processor_queue_remove)(
653 processor_t processor,
654 thread_t thread);
655
656 /*
657 * Does the per-processor runqueue have any timeshare or fixed priority
658 * threads on it? Called without pset lock held, so should
659 * not assume immutability while executing.
660 */
661 boolean_t (*processor_queue_empty)(processor_t processor);
662
663 /*
664 * Would this priority trigger an urgent preemption if it's sitting
665 * on the per-processor runqueue?
666 */
667 boolean_t (*priority_is_urgent)(int priority);
668
669 /*
670 * Does the per-processor runqueue contain runnable threads that
671 * should cause the currently-running thread to be preempted?
672 */
673 ast_t (*processor_csw_check)(processor_t processor);
674
675 /*
676 * Does the per-processor runqueue contain a runnable thread
677 * of > or >= priority, as a preflight for choose_thread() or other
678 * thread selection
679 */
680 boolean_t (*processor_queue_has_priority)(processor_t processor,
681 int priority,
682 boolean_t gte);
683
684 /* Quantum size for the specified non-realtime thread. */
685 uint32_t (*initial_quantum_size)(thread_t thread);
686
687 /* Scheduler mode for a new thread */
688 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
689
690 /*
691 * Is it safe to call update_priority, which may change a thread's
692 * runqueue or other state. This can be used to throttle changes
693 * to dynamic priority.
694 */
695 boolean_t (*can_update_priority)(thread_t thread);
696
697 /*
698 * Update both scheduled priority and other persistent state.
699 * Side effects may including migration to another processor's runqueue.
700 */
701 void (*update_priority)(thread_t thread);
702
703 /* Lower overhead update to scheduled priority and state. */
704 void (*lightweight_update_priority)(thread_t thread);
705
706 /* Callback for non-realtime threads when the quantum timer fires */
707 void (*quantum_expire)(thread_t thread);
708
709 /*
710 * Runnable threads on per-processor runqueue. Should only
711 * be used for relative comparisons of load between processors.
712 */
713 int (*processor_runq_count)(processor_t processor);
714
715 /* Aggregate runcount statistics for per-processor runqueue */
716 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
717
718 boolean_t (*processor_bound_count)(processor_t processor);
719
720 void (*thread_update_scan)(sched_update_scan_context_t scan_context);
721
722 /* Supports more than one pset */
723 boolean_t multiple_psets_enabled;
724 /* Supports scheduler groups */
725 boolean_t sched_groups_enabled;
726
727 /* Supports avoid-processor */
728 boolean_t avoid_processor_enabled;
729
730 /* Returns true if this processor should avoid running this thread. */
731 bool (*thread_avoid_processor)(processor_t processor, thread_t thread);
732
733 /*
734 * Invoked when a processor is about to choose the idle thread
735 * Used to send IPIs to a processor which would be preferred to be idle instead.
736 * Called with pset lock held, returns pset lock unlocked.
737 */
738 void (*processor_balance)(processor_t processor, processor_set_t pset);
739 rt_queue_t (*rt_runq)(processor_set_t pset);
740 void (*rt_init)(processor_set_t pset);
741 void (*rt_queue_shutdown)(processor_t processor);
742 void (*rt_runq_scan)(sched_update_scan_context_t scan_context);
743 int64_t (*rt_runq_count_sum)(void);
744
745 uint32_t (*qos_max_parallelism)(int qos, uint64_t options);
746 void (*check_spill)(processor_set_t pset, thread_t thread);
747 sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event);
748 bool (*thread_should_yield)(processor_t processor, thread_t thread);
749
750 /* Routine to update run counts */
751 uint32_t (*run_count_incr)(thread_t thread);
752 uint32_t (*run_count_decr)(thread_t thread);
753
754 /* Routine to update scheduling bucket for a thread */
755 void (*update_thread_bucket)(thread_t thread);
756
757 /* Routine to inform the scheduler when a new pset becomes schedulable */
758 void (*pset_made_schedulable)(processor_t processor, processor_set_t pset, boolean_t drop_lock);
759 };
760
761 #if defined(CONFIG_SCHED_TRADITIONAL)
762 extern const struct sched_dispatch_table sched_traditional_dispatch;
763 extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
764 #endif
765
766 #if defined(CONFIG_SCHED_MULTIQ)
767 extern const struct sched_dispatch_table sched_multiq_dispatch;
768 extern const struct sched_dispatch_table sched_dualq_dispatch;
769 #endif
770
771 #if defined(CONFIG_SCHED_PROTO)
772 extern const struct sched_dispatch_table sched_proto_dispatch;
773 #endif
774
775 #if defined(CONFIG_SCHED_GRRR)
776 extern const struct sched_dispatch_table sched_grrr_dispatch;
777 #endif
778
779 #if defined(CONFIG_SCHED_CLUTCH)
780 extern const struct sched_dispatch_table sched_clutch_dispatch;
781 #endif
782
783 #endif /* MACH_KERNEL_PRIVATE */
784
785 __END_DECLS
786
787 #endif /* _KERN_SCHED_PRIM_H_ */