]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.h
c0014e86bfc61d37d0dab28a50ad5f4f730ee494
[apple/xnu.git] / osfmk / kern / sched_prim.h
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
66 #ifndef _KERN_SCHED_PRIM_H_
67 #define _KERN_SCHED_PRIM_H_
68
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/kern_return.h>
72 #include <kern/clock.h>
73 #include <kern/kern_types.h>
74 #include <kern/thread.h>
75 #include <sys/cdefs.h>
76 #include <kern/block_hint.h>
77
78 #ifdef MACH_KERNEL_PRIVATE
79
80 #include <mach/branch_predicates.h>
81
82 /* Initialization */
83 extern void sched_init(void);
84
85 extern void sched_startup(void);
86
87 extern void sched_timebase_init(void);
88
89 extern void pset_rt_init(processor_set_t pset);
90
91 extern void sched_rtglobal_init(processor_set_t pset);
92
93 extern rt_queue_t sched_rtglobal_runq(processor_set_t pset);
94
95 extern void sched_rtglobal_queue_shutdown(processor_t processor);
96
97 extern int64_t sched_rtglobal_runq_count_sum(void);
98
99 extern void sched_check_spill(processor_set_t pset, thread_t thread);
100
101 extern bool sched_thread_should_yield(processor_t processor, thread_t thread);
102
103 /* Force a preemption point for a thread and wait for it to stop running */
104 extern boolean_t thread_stop(
105 thread_t thread,
106 boolean_t until_not_runnable);
107
108 /* Release a previous stop request */
109 extern void thread_unstop(
110 thread_t thread);
111
112 /* Wait for a thread to stop running */
113 extern void thread_wait(
114 thread_t thread,
115 boolean_t until_not_runnable);
116
117 /* Unblock thread on wake up */
118 extern boolean_t thread_unblock(
119 thread_t thread,
120 wait_result_t wresult);
121
122 /* Unblock and dispatch thread */
123 extern kern_return_t thread_go(
124 thread_t thread,
125 wait_result_t wresult);
126
127 /* Handle threads at context switch */
128 extern void thread_dispatch(
129 thread_t old_thread,
130 thread_t new_thread);
131
132 /* Switch directly to a particular thread */
133 extern int thread_run(
134 thread_t self,
135 thread_continue_t continuation,
136 void *parameter,
137 thread_t new_thread);
138
139 /* Resume thread with new stack */
140 extern void thread_continue(
141 thread_t old_thread);
142
143 /* Invoke continuation */
144 extern void call_continuation(
145 thread_continue_t continuation,
146 void *parameter,
147 wait_result_t wresult);
148
149 /* Set the current scheduled priority */
150 extern void set_sched_pri(
151 thread_t thread,
152 int priority);
153
154 /* Set base priority of the specified thread */
155 extern void sched_set_thread_base_priority(
156 thread_t thread,
157 int priority);
158
159 /* Set the thread's true scheduling mode */
160 extern void sched_set_thread_mode(thread_t thread,
161 sched_mode_t mode);
162 /* Demote the true scheduler mode */
163 extern void sched_thread_mode_demote(thread_t thread,
164 uint32_t reason);
165 /* Un-demote the true scheduler mode */
166 extern void sched_thread_mode_undemote(thread_t thread,
167 uint32_t reason);
168
169 /* Re-evaluate base priority of thread (thread locked) */
170 void thread_recompute_priority(thread_t thread);
171
172 /* Re-evaluate base priority of thread (thread unlocked) */
173 void thread_recompute_qos(thread_t thread);
174
175 /* Reset scheduled priority of thread */
176 extern void thread_recompute_sched_pri(
177 thread_t thread,
178 boolean_t override_depress);
179
180 /* Periodic scheduler activity */
181 extern void sched_init_thread(void (*)(void));
182
183 /* Perform sched_tick housekeeping activities */
184 extern boolean_t can_update_priority(
185 thread_t thread);
186
187 extern void update_priority(
188 thread_t thread);
189
190 extern void lightweight_update_priority(
191 thread_t thread);
192
193 extern void sched_default_quantum_expire(thread_t thread);
194
195 /* Idle processor thread */
196 extern void idle_thread(void);
197
198 extern kern_return_t idle_thread_create(
199 processor_t processor);
200
201 /* Continuation return from syscall */
202 extern void thread_syscall_return(
203 kern_return_t ret);
204
205 /* Context switch */
206 extern wait_result_t thread_block_reason(
207 thread_continue_t continuation,
208 void *parameter,
209 ast_t reason);
210
211 /* Reschedule thread for execution */
212 extern void thread_setrun(
213 thread_t thread,
214 integer_t options);
215
216 typedef enum {
217 SCHED_NONE = 0x0,
218 SCHED_TAILQ = 0x1,
219 SCHED_HEADQ = 0x2,
220 SCHED_PREEMPT = 0x4,
221 SCHED_REBALANCE = 0x8,
222 } sched_options_t;
223
224 extern processor_set_t task_choose_pset(
225 task_t task);
226
227 /* Bind the current thread to a particular processor */
228 extern processor_t thread_bind(
229 processor_t processor);
230
231 /* Choose the best processor to run a thread */
232 extern processor_t choose_processor(
233 processor_set_t pset,
234 processor_t processor,
235 thread_t thread);
236
237 extern void sched_SMT_balance(
238 processor_t processor,
239 processor_set_t pset);
240
241 extern void thread_quantum_init(
242 thread_t thread);
243
244 extern void run_queue_init(
245 run_queue_t runq);
246
247 extern thread_t run_queue_dequeue(
248 run_queue_t runq,
249 integer_t options);
250
251 extern boolean_t run_queue_enqueue(
252 run_queue_t runq,
253 thread_t thread,
254 integer_t options);
255
256 extern void run_queue_remove(
257 run_queue_t runq,
258 thread_t thread);
259
260 struct sched_update_scan_context
261 {
262 uint64_t earliest_bg_make_runnable_time;
263 uint64_t earliest_normal_make_runnable_time;
264 uint64_t earliest_rt_make_runnable_time;
265 };
266 typedef struct sched_update_scan_context *sched_update_scan_context_t;
267
268 extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context);
269
270 /*
271 * Enum to define various events which need IPIs. The IPI policy
272 * engine decides what kind of IPI to use based on destination
273 * processor state, thread and one of the following scheduling events.
274 */
275 typedef enum {
276 SCHED_IPI_EVENT_BOUND_THR = 0x1,
277 SCHED_IPI_EVENT_PREEMPT = 0x2,
278 SCHED_IPI_EVENT_SMT_REBAL = 0x3,
279 SCHED_IPI_EVENT_SPILL = 0x4,
280 SCHED_IPI_EVENT_REBALANCE = 0x5,
281 } sched_ipi_event_t;
282
283
284 /* Enum to define various IPI types used by the scheduler */
285 typedef enum {
286 SCHED_IPI_NONE = 0x0,
287 SCHED_IPI_IMMEDIATE = 0x1,
288 SCHED_IPI_IDLE = 0x2,
289 SCHED_IPI_DEFERRED = 0x3,
290 } sched_ipi_type_t;
291
292 /* The IPI policy engine behaves in the following manner:
293 * - All scheduler events which need an IPI invoke sched_ipi_action() with
294 * the appropriate destination processor, thread and event.
295 * - sched_ipi_action() performs basic checks, invokes the scheduler specific
296 * ipi_policy routine and sets pending_AST bits based on the result.
297 * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform()
298 * routine which actually sends the appropriate IPI to the destination core.
299 */
300 extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread,
301 boolean_t dst_idle, sched_ipi_event_t event);
302 extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi);
303
304 /* sched_ipi_policy() is the global default IPI policy for all schedulers */
305 extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread,
306 boolean_t dst_idle, sched_ipi_event_t event);
307
308 /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */
309 extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset,
310 processor_t dst, sched_ipi_event_t event);
311
312 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
313
314 extern boolean_t thread_update_add_thread(thread_t thread);
315 extern void thread_update_process_threads(void);
316 extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context);
317
318 extern void sched_timeshare_init(void);
319 extern void sched_timeshare_timebase_init(void);
320 extern void sched_timeshare_maintenance_continue(void);
321
322 extern boolean_t priority_is_urgent(int priority);
323 extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread);
324
325 extern int sched_compute_timeshare_priority(thread_t thread);
326
327 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
328
329 /* Remove thread from its run queue */
330 extern boolean_t thread_run_queue_remove(thread_t thread);
331 thread_t thread_run_queue_remove_for_handoff(thread_t thread);
332
333 /* Put a thread back in the run queue after being yanked */
334 extern void thread_run_queue_reinsert(thread_t thread, integer_t options);
335
336 extern void thread_timer_expire(
337 void *thread,
338 void *p1);
339
340 extern boolean_t thread_eager_preemption(
341 thread_t thread);
342
343 extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
344
345 /* Set the maximum interrupt level for the thread */
346 __private_extern__ wait_interrupt_t thread_interrupt_level(
347 wait_interrupt_t interruptible);
348
349 __private_extern__ wait_result_t thread_mark_wait_locked(
350 thread_t thread,
351 wait_interrupt_t interruptible);
352
353 /* Wake up locked thread directly, passing result */
354 __private_extern__ kern_return_t clear_wait_internal(
355 thread_t thread,
356 wait_result_t result);
357
358 extern void sched_stats_handle_csw(
359 processor_t processor,
360 int reasons,
361 int selfpri,
362 int otherpri);
363
364 extern void sched_stats_handle_runq_change(
365 struct runq_stats *stats,
366 int old_count);
367
368
369 #if DEBUG
370
371 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
372 do { \
373 if (__builtin_expect(sched_stats_active, 0)) { \
374 sched_stats_handle_csw((processor), \
375 (reasons), (selfpri), (otherpri)); \
376 } \
377 } while (0)
378
379
380 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
381 do { \
382 if (__builtin_expect(sched_stats_active, 0)) { \
383 sched_stats_handle_runq_change((stats), \
384 (old_count)); \
385 } \
386 } while (0)
387
388 #else /* DEBUG */
389
390 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0)
391 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0)
392
393 #endif /* DEBUG */
394
395 extern uint32_t sched_debug_flags;
396 #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
397 #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
398
399 #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \
400 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \
401 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
402 } \
403 } while(0)
404
405 #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \
406 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \
407 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
408 } \
409 } while(0)
410
411 #define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
412 #define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
413 #define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
414 #define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
415 #define THREAD_URGENCY_MAX 4 /* Marker */
416 /* Returns the "urgency" of a thread (provided by scheduler) */
417 extern int thread_get_urgency(
418 thread_t thread,
419 uint64_t *rt_period,
420 uint64_t *rt_deadline);
421
422 /* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
423 extern void thread_tell_urgency(
424 int urgency,
425 uint64_t rt_period,
426 uint64_t rt_deadline,
427 uint64_t sched_latency,
428 thread_t nthread);
429
430 /* Tells if there are "active" RT threads in the system (provided by CPU PM) */
431 extern void active_rt_threads(
432 boolean_t active);
433
434 /* Returns the perfcontrol attribute for the thread */
435 extern perfcontrol_class_t thread_get_perfcontrol_class(
436 thread_t thread);
437
438 #define PSET_LOAD_NUMERATOR_SHIFT 16
439 #define PSET_LOAD_FRACTIONAL_SHIFT 4
440
441 extern int sched_get_pset_load_average(processor_set_t pset);
442 extern void sched_update_pset_load_average(processor_set_t pset);
443
444 /* Generic routine for Non-AMP schedulers to calculate parallelism */
445 extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options);
446
447 #endif /* MACH_KERNEL_PRIVATE */
448
449 __BEGIN_DECLS
450
451 #ifdef XNU_KERNEL_PRIVATE
452
453 /* Toggles a global override to turn off CPU Throttling */
454 #define CPU_THROTTLE_DISABLE 0
455 #define CPU_THROTTLE_ENABLE 1
456 extern void sys_override_cpu_throttle(int flag);
457
458 /*
459 ****************** Only exported until BSD stops using ********************
460 */
461
462 extern void thread_vm_bind_group_add(void);
463
464 /* Wake up thread directly, passing result */
465 extern kern_return_t clear_wait(
466 thread_t thread,
467 wait_result_t result);
468
469 /* Start thread running */
470 extern void thread_bootstrap_return(void);
471
472 /* Return from exception (BSD-visible interface) */
473 extern void thread_exception_return(void) __dead2;
474
475 #define SCHED_STRING_MAX_LENGTH (48)
476 /* String declaring the name of the current scheduler */
477 extern char sched_string[SCHED_STRING_MAX_LENGTH];
478
479 extern thread_t port_name_to_thread_for_ulock(mach_port_name_t thread_name);
480
481 /* Attempt to context switch to a specific runnable thread */
482 extern wait_result_t thread_handoff(thread_t thread);
483
484 extern struct waitq *assert_wait_queue(event_t event);
485
486 extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority);
487
488 extern thread_t thread_wakeup_identify(event_t event, int priority);
489
490 #endif /* XNU_KERNEL_PRIVATE */
491
492 #ifdef KERNEL_PRIVATE
493 /* Set pending block hint for a particular object before we go into a wait state */
494 extern void thread_set_pending_block_hint(
495 thread_t thread,
496 block_hint_t block_hint);
497
498 #define QOS_PARALLELISM_COUNT_LOGICAL 0x1
499 #define QOS_PARALLELISM_REALTIME 0x2
500 extern uint32_t qos_max_parallelism(int qos, uint64_t options);
501
502 #endif /* KERNEL_PRIVATE */
503
504 /* Context switch */
505 extern wait_result_t thread_block(
506 thread_continue_t continuation);
507
508 extern wait_result_t thread_block_parameter(
509 thread_continue_t continuation,
510 void *parameter);
511
512 /* Declare thread will wait on a particular event */
513 extern wait_result_t assert_wait(
514 event_t event,
515 wait_interrupt_t interruptible);
516
517 /* Assert that the thread intends to wait with a timeout */
518 extern wait_result_t assert_wait_timeout(
519 event_t event,
520 wait_interrupt_t interruptible,
521 uint32_t interval,
522 uint32_t scale_factor);
523
524 /* Assert that the thread intends to wait with an urgency, timeout and leeway */
525 extern wait_result_t assert_wait_timeout_with_leeway(
526 event_t event,
527 wait_interrupt_t interruptible,
528 wait_timeout_urgency_t urgency,
529 uint32_t interval,
530 uint32_t leeway,
531 uint32_t scale_factor);
532
533 extern wait_result_t assert_wait_deadline(
534 event_t event,
535 wait_interrupt_t interruptible,
536 uint64_t deadline);
537
538 /* Assert that the thread intends to wait with an urgency, deadline, and leeway */
539 extern wait_result_t assert_wait_deadline_with_leeway(
540 event_t event,
541 wait_interrupt_t interruptible,
542 wait_timeout_urgency_t urgency,
543 uint64_t deadline,
544 uint64_t leeway);
545
546 /* Wake up thread (or threads) waiting on a particular event */
547 extern kern_return_t thread_wakeup_prim(
548 event_t event,
549 boolean_t one_thread,
550 wait_result_t result);
551
552 #define thread_wakeup(x) \
553 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
554 #define thread_wakeup_with_result(x, z) \
555 thread_wakeup_prim((x), FALSE, (z))
556 #define thread_wakeup_one(x) \
557 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
558
559 /* Wakeup the specified thread if it is waiting on this event */
560 extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread);
561
562 extern boolean_t preemption_enabled(void);
563
564 #ifdef MACH_KERNEL_PRIVATE
565
566 /*
567 * Scheduler algorithm indirection. If only one algorithm is
568 * enabled at compile-time, a direction function call is used.
569 * If more than one is enabled, calls are dispatched through
570 * a function pointer table.
571 */
572
573 #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ)
574 #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
575 #endif
576
577 #if DEBUG
578 #define SCHED(f) (sched_current_dispatch->f)
579 #else /* DEBUG */
580
581 /*
582 * For DEV & REL kernels, use a static dispatch table instead of
583 * using the indirect function table.
584 */
585 extern const struct sched_dispatch_table sched_multiq_dispatch;
586 #define SCHED(f) (sched_multiq_dispatch.f)
587
588 #endif /* DEBUG */
589
590 struct sched_dispatch_table {
591 const char *sched_name;
592 void (*init)(void); /* Init global state */
593 void (*timebase_init)(void); /* Timebase-dependent initialization */
594 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
595 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
596
597 void (*maintenance_continuation)(void); /* Function called regularly */
598
599 /*
600 * Choose a thread of greater or equal priority from the per-processor
601 * runqueue for timeshare/fixed threads
602 */
603 thread_t (*choose_thread)(
604 processor_t processor,
605 int priority,
606 ast_t reason);
607
608 /* True if scheduler supports stealing threads */
609 boolean_t steal_thread_enabled;
610
611 /*
612 * Steal a thread from another processor in the pset so that it can run
613 * immediately
614 */
615 thread_t (*steal_thread)(
616 processor_set_t pset);
617
618 /*
619 * Compute priority for a timeshare thread based on base priority.
620 */
621 int (*compute_timeshare_priority)(thread_t thread);
622
623 /*
624 * Pick the best processor for a thread (any kind of thread) to run on.
625 */
626 processor_t (*choose_processor)(
627 processor_set_t pset,
628 processor_t processor,
629 thread_t thread);
630 /*
631 * Enqueue a timeshare or fixed priority thread onto the per-processor
632 * runqueue
633 */
634 boolean_t (*processor_enqueue)(
635 processor_t processor,
636 thread_t thread,
637 integer_t options);
638
639 /* Migrate threads away in preparation for processor shutdown */
640 void (*processor_queue_shutdown)(
641 processor_t processor);
642
643 /* Remove the specific thread from the per-processor runqueue */
644 boolean_t (*processor_queue_remove)(
645 processor_t processor,
646 thread_t thread);
647
648 /*
649 * Does the per-processor runqueue have any timeshare or fixed priority
650 * threads on it? Called without pset lock held, so should
651 * not assume immutability while executing.
652 */
653 boolean_t (*processor_queue_empty)(processor_t processor);
654
655 /*
656 * Would this priority trigger an urgent preemption if it's sitting
657 * on the per-processor runqueue?
658 */
659 boolean_t (*priority_is_urgent)(int priority);
660
661 /*
662 * Does the per-processor runqueue contain runnable threads that
663 * should cause the currently-running thread to be preempted?
664 */
665 ast_t (*processor_csw_check)(processor_t processor);
666
667 /*
668 * Does the per-processor runqueue contain a runnable thread
669 * of > or >= priority, as a preflight for choose_thread() or other
670 * thread selection
671 */
672 boolean_t (*processor_queue_has_priority)(processor_t processor,
673 int priority,
674 boolean_t gte);
675
676 /* Quantum size for the specified non-realtime thread. */
677 uint32_t (*initial_quantum_size)(thread_t thread);
678
679 /* Scheduler mode for a new thread */
680 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
681
682 /*
683 * Is it safe to call update_priority, which may change a thread's
684 * runqueue or other state. This can be used to throttle changes
685 * to dynamic priority.
686 */
687 boolean_t (*can_update_priority)(thread_t thread);
688
689 /*
690 * Update both scheduled priority and other persistent state.
691 * Side effects may including migration to another processor's runqueue.
692 */
693 void (*update_priority)(thread_t thread);
694
695 /* Lower overhead update to scheduled priority and state. */
696 void (*lightweight_update_priority)(thread_t thread);
697
698 /* Callback for non-realtime threads when the quantum timer fires */
699 void (*quantum_expire)(thread_t thread);
700
701 /*
702 * Runnable threads on per-processor runqueue. Should only
703 * be used for relative comparisons of load between processors.
704 */
705 int (*processor_runq_count)(processor_t processor);
706
707 /* Aggregate runcount statistics for per-processor runqueue */
708 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
709
710 boolean_t (*processor_bound_count)(processor_t processor);
711
712 void (*thread_update_scan)(sched_update_scan_context_t scan_context);
713
714 /*
715 * Use processor->next_thread to pin a thread to an idle
716 * processor. If FALSE, threads are enqueued and can
717 * be stolen by other processors.
718 */
719 boolean_t direct_dispatch_to_idle_processors;
720
721 /* Supports more than one pset */
722 boolean_t multiple_psets_enabled;
723 /* Supports scheduler groups */
724 boolean_t sched_groups_enabled;
725
726 /* Supports avoid-processor */
727 boolean_t avoid_processor_enabled;
728
729 /* Returns true if this processor should avoid running this thread. */
730 bool (*thread_avoid_processor)(processor_t processor, thread_t thread);
731
732 /*
733 * Invoked when a processor is about to choose the idle thread
734 * Used to send IPIs to a processor which would be preferred to be idle instead.
735 * Called with pset lock held, returns pset lock unlocked.
736 */
737 void (*processor_balance)(processor_t processor, processor_set_t pset);
738 rt_queue_t (*rt_runq)(processor_set_t pset);
739 void (*rt_init)(processor_set_t pset);
740 void (*rt_queue_shutdown)(processor_t processor);
741 void (*rt_runq_scan)(sched_update_scan_context_t scan_context);
742 int64_t (*rt_runq_count_sum)(void);
743
744 uint32_t (*qos_max_parallelism)(int qos, uint64_t options);
745 void (*check_spill)(processor_set_t pset, thread_t thread);
746 sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event);
747 bool (*thread_should_yield)(processor_t processor, thread_t thread);
748 };
749
750 #if defined(CONFIG_SCHED_TRADITIONAL)
751 extern const struct sched_dispatch_table sched_traditional_dispatch;
752 extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
753 #endif
754
755 #if defined(CONFIG_SCHED_MULTIQ)
756 extern const struct sched_dispatch_table sched_multiq_dispatch;
757 extern const struct sched_dispatch_table sched_dualq_dispatch;
758 #endif
759
760 #if defined(CONFIG_SCHED_PROTO)
761 extern const struct sched_dispatch_table sched_proto_dispatch;
762 #endif
763
764 #if defined(CONFIG_SCHED_GRRR)
765 extern const struct sched_dispatch_table sched_grrr_dispatch;
766 #endif
767
768 /*
769 * It is an error to invoke any scheduler-related code
770 * before this is set up
771 */
772 extern const struct sched_dispatch_table *sched_current_dispatch;
773
774 #endif /* MACH_KERNEL_PRIVATE */
775
776 __END_DECLS
777
778 #endif /* _KERN_SCHED_PRIM_H_ */