]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.h
fd768d26fd34d0cb9d3c48c870929067a7cfbe11
[apple/xnu.git] / osfmk / kern / sched_prim.h
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
66 #ifndef _KERN_SCHED_PRIM_H_
67 #define _KERN_SCHED_PRIM_H_
68
69 #include <sys/cdefs.h>
70 #include <mach/boolean.h>
71 #include <mach/machine/vm_types.h>
72 #include <mach/kern_return.h>
73 #include <kern/clock.h>
74 #include <kern/kern_types.h>
75 #include <kern/percpu.h>
76 #include <kern/thread.h>
77 #include <kern/block_hint.h>
78
79 extern int thread_get_current_cpuid(void);
80
81 #ifdef MACH_KERNEL_PRIVATE
82
83 #include <kern/sched_urgency.h>
84 #include <kern/thread_group.h>
85 #include <kern/waitq.h>
86
87 /* Initialization */
88 extern void sched_init(void);
89
90 extern void sched_startup(void);
91
92 extern void sched_timebase_init(void);
93
94 extern void pset_rt_init(processor_set_t pset);
95
96 extern void sched_rtlocal_init(processor_set_t pset);
97
98 extern rt_queue_t sched_rtlocal_runq(processor_set_t pset);
99
100 extern void sched_rtlocal_queue_shutdown(processor_t processor);
101
102 extern int64_t sched_rtlocal_runq_count_sum(void);
103
104 extern void sched_check_spill(processor_set_t pset, thread_t thread);
105
106 extern bool sched_thread_should_yield(processor_t processor, thread_t thread);
107
108 extern bool sched_steal_thread_DISABLED(processor_set_t pset);
109 extern bool sched_steal_thread_enabled(processor_set_t pset);
110
111 /* Force a preemption point for a thread and wait for it to stop running */
112 extern boolean_t thread_stop(
113 thread_t thread,
114 boolean_t until_not_runnable);
115
116 /* Release a previous stop request */
117 extern void thread_unstop(
118 thread_t thread);
119
120 /* Wait for a thread to stop running */
121 extern void thread_wait(
122 thread_t thread,
123 boolean_t until_not_runnable);
124
125 /* Unblock thread on wake up */
126 extern boolean_t thread_unblock(
127 thread_t thread,
128 wait_result_t wresult);
129
130 /* Unblock and dispatch thread */
131 extern kern_return_t thread_go(
132 thread_t thread,
133 wait_result_t wresult,
134 waitq_options_t option);
135
136 /* Check if direct handoff is allowed */
137 extern boolean_t
138 thread_allowed_for_handoff(
139 thread_t thread);
140
141 /* Handle threads at context switch */
142 extern void thread_dispatch(
143 thread_t old_thread,
144 thread_t new_thread);
145
146 /* Switch directly to a particular thread */
147 extern int thread_run(
148 thread_t self,
149 thread_continue_t continuation,
150 void *parameter,
151 thread_t new_thread);
152
153 /* Resume thread with new stack */
154 extern __dead2 void thread_continue(thread_t old_thread);
155
156 /* Invoke continuation */
157 extern __dead2 void call_continuation(
158 thread_continue_t continuation,
159 void *parameter,
160 wait_result_t wresult,
161 boolean_t enable_interrupts);
162
163 /*
164 * Flags that can be passed to set_sched_pri
165 * to skip side effects
166 */
167 __options_decl(set_sched_pri_options_t, uint32_t, {
168 SETPRI_DEFAULT = 0x0,
169 SETPRI_LAZY = 0x1, /* Avoid setting AST flags or sending IPIs */
170 });
171
172 /* Set the current scheduled priority */
173 extern void set_sched_pri(
174 thread_t thread,
175 int16_t priority,
176 set_sched_pri_options_t options);
177
178 /* Set base priority of the specified thread */
179 extern void sched_set_thread_base_priority(
180 thread_t thread,
181 int priority);
182
183 /* Set absolute base priority of the specified thread */
184 extern void sched_set_kernel_thread_priority(
185 thread_t thread,
186 int priority);
187
188
189 /* Set the thread's true scheduling mode */
190 extern void sched_set_thread_mode(thread_t thread,
191 sched_mode_t mode);
192 /* Demote the true scheduler mode */
193 extern void sched_thread_mode_demote(thread_t thread,
194 uint32_t reason);
195 /* Un-demote the true scheduler mode */
196 extern void sched_thread_mode_undemote(thread_t thread,
197 uint32_t reason);
198
199 extern void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj);
200 extern void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj);
201
202 /* Re-evaluate base priority of thread (thread locked) */
203 void thread_recompute_priority(thread_t thread);
204
205 /* Re-evaluate scheduled priority of thread (thread locked) */
206 extern void thread_recompute_sched_pri(
207 thread_t thread,
208 set_sched_pri_options_t options);
209
210 /* Periodic scheduler activity */
211 extern void sched_init_thread(void);
212
213 /* Perform sched_tick housekeeping activities */
214 extern boolean_t can_update_priority(
215 thread_t thread);
216
217 extern void update_priority(
218 thread_t thread);
219
220 extern void lightweight_update_priority(
221 thread_t thread);
222
223 extern void sched_default_quantum_expire(thread_t thread);
224
225 /* Idle processor thread continuation */
226 extern void idle_thread(
227 void* parameter,
228 wait_result_t result);
229
230 extern kern_return_t idle_thread_create(
231 processor_t processor);
232
233 /* Continuation return from syscall */
234 extern void thread_syscall_return(
235 kern_return_t ret);
236
237 /* Context switch */
238 extern wait_result_t thread_block_reason(
239 thread_continue_t continuation,
240 void *parameter,
241 ast_t reason);
242
243 __options_decl(sched_options_t, uint32_t, {
244 SCHED_NONE = 0x0,
245 SCHED_TAILQ = 0x1,
246 SCHED_HEADQ = 0x2,
247 SCHED_PREEMPT = 0x4,
248 SCHED_REBALANCE = 0x8,
249 });
250
251 /* Reschedule thread for execution */
252 extern void thread_setrun(
253 thread_t thread,
254 sched_options_t options);
255
256 extern processor_set_t task_choose_pset(
257 task_t task);
258
259 /* Bind the current thread to a particular processor */
260 extern processor_t thread_bind(
261 processor_t processor);
262
263 extern bool pset_has_stealable_threads(
264 processor_set_t pset);
265
266 extern processor_set_t choose_starting_pset(
267 pset_node_t node,
268 thread_t thread,
269 processor_t *processor_hint);
270
271 extern pset_node_t sched_choose_node(
272 thread_t thread);
273
274 /* Choose the best processor to run a thread */
275 extern processor_t choose_processor(
276 processor_set_t pset,
277 processor_t processor,
278 thread_t thread);
279
280 extern void sched_SMT_balance(
281 processor_t processor,
282 processor_set_t pset);
283
284 extern void thread_quantum_init(
285 thread_t thread);
286
287 extern void run_queue_init(
288 run_queue_t runq);
289
290 extern thread_t run_queue_dequeue(
291 run_queue_t runq,
292 sched_options_t options);
293
294 extern boolean_t run_queue_enqueue(
295 run_queue_t runq,
296 thread_t thread,
297 sched_options_t options);
298
299 extern void run_queue_remove(
300 run_queue_t runq,
301 thread_t thread);
302
303 extern thread_t run_queue_peek(
304 run_queue_t runq);
305
306 struct sched_update_scan_context {
307 uint64_t earliest_bg_make_runnable_time;
308 uint64_t earliest_normal_make_runnable_time;
309 uint64_t earliest_rt_make_runnable_time;
310 uint64_t sched_tick_last_abstime;
311 };
312 typedef struct sched_update_scan_context *sched_update_scan_context_t;
313
314 extern void sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context);
315
316 extern void sched_pset_made_schedulable(
317 processor_t processor,
318 processor_set_t pset,
319 boolean_t drop_lock);
320
321 /*
322 * Enum to define various events which need IPIs. The IPI policy
323 * engine decides what kind of IPI to use based on destination
324 * processor state, thread and one of the following scheduling events.
325 */
326 typedef enum {
327 SCHED_IPI_EVENT_BOUND_THR = 0x1,
328 SCHED_IPI_EVENT_PREEMPT = 0x2,
329 SCHED_IPI_EVENT_SMT_REBAL = 0x3,
330 SCHED_IPI_EVENT_SPILL = 0x4,
331 SCHED_IPI_EVENT_REBALANCE = 0x5,
332 } sched_ipi_event_t;
333
334
335 /* Enum to define various IPI types used by the scheduler */
336 typedef enum {
337 SCHED_IPI_NONE = 0x0,
338 SCHED_IPI_IMMEDIATE = 0x1,
339 SCHED_IPI_IDLE = 0x2,
340 SCHED_IPI_DEFERRED = 0x3,
341 } sched_ipi_type_t;
342
343 /* The IPI policy engine behaves in the following manner:
344 * - All scheduler events which need an IPI invoke sched_ipi_action() with
345 * the appropriate destination processor, thread and event.
346 * - sched_ipi_action() performs basic checks, invokes the scheduler specific
347 * ipi_policy routine and sets pending_AST bits based on the result.
348 * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform()
349 * routine which actually sends the appropriate IPI to the destination core.
350 */
351 extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread,
352 boolean_t dst_idle, sched_ipi_event_t event);
353 extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi);
354
355 /* sched_ipi_policy() is the global default IPI policy for all schedulers */
356 extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread,
357 boolean_t dst_idle, sched_ipi_event_t event);
358
359 /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */
360 extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset,
361 processor_t dst, sched_ipi_event_t event);
362
363 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
364
365 extern boolean_t thread_update_add_thread(thread_t thread);
366 extern void thread_update_process_threads(void);
367 extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context);
368
369 #if CONFIG_SCHED_CLUTCH
370 extern boolean_t sched_clutch_timeshare_scan(queue_t thread_queue, uint16_t count, sched_update_scan_context_t scan_context);
371 #endif /* CONFIG_SCHED_CLUTCH */
372
373 extern void sched_timeshare_init(void);
374 extern void sched_timeshare_timebase_init(void);
375 extern void sched_timeshare_maintenance_continue(void);
376
377 extern boolean_t priority_is_urgent(int priority);
378 extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread);
379
380 extern int sched_compute_timeshare_priority(thread_t thread);
381
382 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
383
384 /* Remove thread from its run queue */
385 extern boolean_t thread_run_queue_remove(thread_t thread);
386 thread_t thread_run_queue_remove_for_handoff(thread_t thread);
387
388 /* Put a thread back in the run queue after being yanked */
389 extern void thread_run_queue_reinsert(thread_t thread, sched_options_t options);
390
391 extern void thread_timer_expire(
392 void *thread,
393 void *p1);
394
395 extern boolean_t thread_eager_preemption(
396 thread_t thread);
397
398 extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
399
400 /* Set the maximum interrupt level for the thread */
401 __private_extern__ wait_interrupt_t thread_interrupt_level(
402 wait_interrupt_t interruptible);
403
404 __private_extern__ wait_result_t thread_mark_wait_locked(
405 thread_t thread,
406 wait_interrupt_t interruptible);
407
408 /* Wake up locked thread directly, passing result */
409 __private_extern__ kern_return_t clear_wait_internal(
410 thread_t thread,
411 wait_result_t result);
412
413 struct sched_statistics {
414 uint32_t csw_count;
415 uint32_t preempt_count;
416 uint32_t preempted_rt_count;
417 uint32_t preempted_by_rt_count;
418 uint32_t rt_sched_count;
419 uint32_t interrupt_count;
420 uint32_t ipi_count;
421 uint32_t timer_pop_count;
422 uint32_t idle_transitions;
423 uint32_t quantum_timer_expirations;
424 };
425 PERCPU_DECL(struct sched_statistics, sched_stats);
426 extern bool sched_stats_active;
427
428 extern void sched_stats_handle_csw(
429 processor_t processor,
430 int reasons,
431 int selfpri,
432 int otherpri);
433
434 extern void sched_stats_handle_runq_change(
435 struct runq_stats *stats,
436 int old_count);
437
438 #define SCHED_STATS_INC(field) \
439 MACRO_BEGIN \
440 if (__improbable(sched_stats_active)) { \
441 PERCPU_GET(sched_stats)->field++; \
442 } \
443 MACRO_END
444
445 #if DEBUG
446
447 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
448 MACRO_BEGIN \
449 if (__improbable(sched_stats_active)) { \
450 sched_stats_handle_csw((processor), \
451 (reasons), (selfpri), (otherpri)); \
452 } \
453 MACRO_END
454
455
456 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
457 MACRO_BEGIN \
458 if (__improbable(sched_stats_active)) { \
459 sched_stats_handle_runq_change((stats), (old_count)); \
460 } \
461 MACRO_END
462
463 #else /* DEBUG */
464
465 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0)
466 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0)
467
468 #endif /* DEBUG */
469
470 extern uint32_t sched_debug_flags;
471 #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
472 #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
473
474 #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) \
475 MACRO_BEGIN \
476 if (__improbable(sched_debug_flags & \
477 SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \
478 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
479 } \
480 MACRO_END
481
482 #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) \
483 MACRO_BEGIN \
484 if (__improbable(sched_debug_flags & \
485 SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \
486 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
487 } \
488 MACRO_END
489
490 /* Tells if there are "active" RT threads in the system (provided by CPU PM) */
491 extern void active_rt_threads(
492 boolean_t active);
493
494 /* Returns the perfcontrol attribute for the thread */
495 extern perfcontrol_class_t thread_get_perfcontrol_class(
496 thread_t thread);
497
498 /* Generic routine for Non-AMP schedulers to calculate parallelism */
499 extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options);
500
501 #endif /* MACH_KERNEL_PRIVATE */
502
503 __BEGIN_DECLS
504
505 #ifdef XNU_KERNEL_PRIVATE
506
507 extern void thread_bind_cluster_type(thread_t, char cluster_type, bool soft_bind);
508
509 extern int sched_get_rt_n_backup_processors(void);
510 extern void sched_set_rt_n_backup_processors(int n);
511
512 /* Toggles a global override to turn off CPU Throttling */
513 extern void sys_override_cpu_throttle(boolean_t enable_override);
514
515 /*
516 ****************** Only exported until BSD stops using ********************
517 */
518
519 extern void thread_vm_bind_group_add(void);
520
521 /* Wake up thread directly, passing result */
522 extern kern_return_t clear_wait(
523 thread_t thread,
524 wait_result_t result);
525
526 /* Start thread running */
527 extern void thread_bootstrap_return(void) __attribute__((noreturn));
528
529 /* Return from exception (BSD-visible interface) */
530 extern void thread_exception_return(void) __dead2;
531
532 #define SCHED_STRING_MAX_LENGTH (48)
533 /* String declaring the name of the current scheduler */
534 extern char sched_string[SCHED_STRING_MAX_LENGTH];
535
536 __options_decl(thread_handoff_option_t, uint32_t, {
537 THREAD_HANDOFF_NONE = 0,
538 THREAD_HANDOFF_SETRUN_NEEDED = 0x1,
539 });
540
541 /* Remove thread from its run queue */
542 thread_t thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option);
543
544 /* Attempt to context switch to a specific runnable thread */
545 extern wait_result_t thread_handoff_deallocate(thread_t thread, thread_handoff_option_t option);
546
547 __attribute__((nonnull(1, 2)))
548 extern void thread_handoff_parameter(thread_t thread,
549 thread_continue_t continuation, void *parameter, thread_handoff_option_t) __dead2;
550
551 extern struct waitq *assert_wait_queue(event_t event);
552
553 extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority);
554
555 extern thread_t thread_wakeup_identify(event_t event, int priority);
556
557 #endif /* XNU_KERNEL_PRIVATE */
558
559 #ifdef KERNEL_PRIVATE
560 /* Set pending block hint for a particular object before we go into a wait state */
561 extern void thread_set_pending_block_hint(
562 thread_t thread,
563 block_hint_t block_hint);
564
565 #define QOS_PARALLELISM_COUNT_LOGICAL 0x1
566 #define QOS_PARALLELISM_REALTIME 0x2
567 extern uint32_t qos_max_parallelism(int qos, uint64_t options);
568 #endif /* KERNEL_PRIVATE */
569
570 #if XNU_KERNEL_PRIVATE
571 extern void thread_yield_with_continuation(
572 thread_continue_t continuation,
573 void *parameter) __dead2;
574 #endif
575
576 /* Context switch */
577 extern wait_result_t thread_block(
578 thread_continue_t continuation);
579
580 extern wait_result_t thread_block_parameter(
581 thread_continue_t continuation,
582 void *parameter);
583
584 /* Declare thread will wait on a particular event */
585 extern wait_result_t assert_wait(
586 event_t event,
587 wait_interrupt_t interruptible);
588
589 /* Assert that the thread intends to wait with a timeout */
590 extern wait_result_t assert_wait_timeout(
591 event_t event,
592 wait_interrupt_t interruptible,
593 uint32_t interval,
594 uint32_t scale_factor);
595
596 /* Assert that the thread intends to wait with an urgency, timeout and leeway */
597 extern wait_result_t assert_wait_timeout_with_leeway(
598 event_t event,
599 wait_interrupt_t interruptible,
600 wait_timeout_urgency_t urgency,
601 uint32_t interval,
602 uint32_t leeway,
603 uint32_t scale_factor);
604
605 extern wait_result_t assert_wait_deadline(
606 event_t event,
607 wait_interrupt_t interruptible,
608 uint64_t deadline);
609
610 /* Assert that the thread intends to wait with an urgency, deadline, and leeway */
611 extern wait_result_t assert_wait_deadline_with_leeway(
612 event_t event,
613 wait_interrupt_t interruptible,
614 wait_timeout_urgency_t urgency,
615 uint64_t deadline,
616 uint64_t leeway);
617
618 /* Wake up thread (or threads) waiting on a particular event */
619 extern kern_return_t thread_wakeup_prim(
620 event_t event,
621 boolean_t one_thread,
622 wait_result_t result);
623
624 #define thread_wakeup(x) \
625 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
626 #define thread_wakeup_with_result(x, z) \
627 thread_wakeup_prim((x), FALSE, (z))
628 #define thread_wakeup_one(x) \
629 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
630
631 /* Wakeup the specified thread if it is waiting on this event */
632 extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread);
633
634 extern boolean_t preemption_enabled(void);
635
636 #ifdef MACH_KERNEL_PRIVATE
637
638 /*
639 * Scheduler algorithm indirection. If only one algorithm is
640 * enabled at compile-time, a direction function call is used.
641 * If more than one is enabled, calls are dispatched through
642 * a function pointer table.
643 */
644
645 #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) && !defined(CONFIG_SCHED_CLUTCH) && !defined(CONFIG_SCHED_EDGE)
646 #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
647 #endif
648
649 #if __AMP__
650
651 #if CONFIG_SCHED_EDGE
652 extern const struct sched_dispatch_table sched_edge_dispatch;
653 #define SCHED(f) (sched_edge_dispatch.f)
654 #else /* CONFIG_SCHED_EDGE */
655 extern const struct sched_dispatch_table sched_amp_dispatch;
656 #define SCHED(f) (sched_amp_dispatch.f)
657 #endif /* CONFIG_SCHED_EDGE */
658
659 #else /* __AMP__ */
660
661 #if CONFIG_SCHED_CLUTCH
662 extern const struct sched_dispatch_table sched_clutch_dispatch;
663 #define SCHED(f) (sched_clutch_dispatch.f)
664 #else /* CONFIG_SCHED_CLUTCH */
665 extern const struct sched_dispatch_table sched_dualq_dispatch;
666 #define SCHED(f) (sched_dualq_dispatch.f)
667 #endif /* CONFIG_SCHED_CLUTCH */
668
669 #endif /* __AMP__ */
670
671 struct sched_dispatch_table {
672 const char *sched_name;
673 void (*init)(void); /* Init global state */
674 void (*timebase_init)(void); /* Timebase-dependent initialization */
675 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
676 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
677
678 void (*maintenance_continuation)(void); /* Function called regularly */
679
680 /*
681 * Choose a thread of greater or equal priority from the per-processor
682 * runqueue for timeshare/fixed threads
683 */
684 thread_t (*choose_thread)(
685 processor_t processor,
686 int priority,
687 ast_t reason);
688
689 /* True if scheduler supports stealing threads for this pset */
690 bool (*steal_thread_enabled)(processor_set_t pset);
691
692 /*
693 * Steal a thread from another processor in the pset so that it can run
694 * immediately
695 */
696 thread_t (*steal_thread)(
697 processor_set_t pset);
698
699 /*
700 * Compute priority for a timeshare thread based on base priority.
701 */
702 int (*compute_timeshare_priority)(thread_t thread);
703
704 /*
705 * Pick the best node for a thread to run on.
706 */
707 pset_node_t (*choose_node)(
708 thread_t thread);
709
710 /*
711 * Pick the best processor for a thread (any kind of thread) to run on.
712 */
713 processor_t (*choose_processor)(
714 processor_set_t pset,
715 processor_t processor,
716 thread_t thread);
717 /*
718 * Enqueue a timeshare or fixed priority thread onto the per-processor
719 * runqueue
720 */
721 boolean_t (*processor_enqueue)(
722 processor_t processor,
723 thread_t thread,
724 sched_options_t options);
725
726 /* Migrate threads away in preparation for processor shutdown */
727 void (*processor_queue_shutdown)(
728 processor_t processor);
729
730 /* Remove the specific thread from the per-processor runqueue */
731 boolean_t (*processor_queue_remove)(
732 processor_t processor,
733 thread_t thread);
734
735 /*
736 * Does the per-processor runqueue have any timeshare or fixed priority
737 * threads on it? Called without pset lock held, so should
738 * not assume immutability while executing.
739 */
740 boolean_t (*processor_queue_empty)(processor_t processor);
741
742 /*
743 * Would this priority trigger an urgent preemption if it's sitting
744 * on the per-processor runqueue?
745 */
746 boolean_t (*priority_is_urgent)(int priority);
747
748 /*
749 * Does the per-processor runqueue contain runnable threads that
750 * should cause the currently-running thread to be preempted?
751 */
752 ast_t (*processor_csw_check)(processor_t processor);
753
754 /*
755 * Does the per-processor runqueue contain a runnable thread
756 * of > or >= priority, as a preflight for choose_thread() or other
757 * thread selection
758 */
759 boolean_t (*processor_queue_has_priority)(processor_t processor,
760 int priority,
761 boolean_t gte);
762
763 /* Quantum size for the specified non-realtime thread. */
764 uint32_t (*initial_quantum_size)(thread_t thread);
765
766 /* Scheduler mode for a new thread */
767 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
768
769 /*
770 * Is it safe to call update_priority, which may change a thread's
771 * runqueue or other state. This can be used to throttle changes
772 * to dynamic priority.
773 */
774 boolean_t (*can_update_priority)(thread_t thread);
775
776 /*
777 * Update both scheduled priority and other persistent state.
778 * Side effects may including migration to another processor's runqueue.
779 */
780 void (*update_priority)(thread_t thread);
781
782 /* Lower overhead update to scheduled priority and state. */
783 void (*lightweight_update_priority)(thread_t thread);
784
785 /* Callback for non-realtime threads when the quantum timer fires */
786 void (*quantum_expire)(thread_t thread);
787
788 /*
789 * Runnable threads on per-processor runqueue. Should only
790 * be used for relative comparisons of load between processors.
791 */
792 int (*processor_runq_count)(processor_t processor);
793
794 /* Aggregate runcount statistics for per-processor runqueue */
795 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
796
797 boolean_t (*processor_bound_count)(processor_t processor);
798
799 void (*thread_update_scan)(sched_update_scan_context_t scan_context);
800
801 /* Supports more than one pset */
802 boolean_t multiple_psets_enabled;
803 /* Supports scheduler groups */
804 boolean_t sched_groups_enabled;
805
806 /* Supports avoid-processor */
807 boolean_t avoid_processor_enabled;
808
809 /* Returns true if this processor should avoid running this thread. */
810 bool (*thread_avoid_processor)(processor_t processor, thread_t thread);
811
812 /*
813 * Invoked when a processor is about to choose the idle thread
814 * Used to send IPIs to a processor which would be preferred to be idle instead.
815 * Called with pset lock held, returns pset lock unlocked.
816 */
817 void (*processor_balance)(processor_t processor, processor_set_t pset);
818 rt_queue_t (*rt_runq)(processor_set_t pset);
819 void (*rt_init)(processor_set_t pset);
820 void (*rt_queue_shutdown)(processor_t processor);
821 void (*rt_runq_scan)(sched_update_scan_context_t scan_context);
822 int64_t (*rt_runq_count_sum)(void);
823
824 uint32_t (*qos_max_parallelism)(int qos, uint64_t options);
825 void (*check_spill)(processor_set_t pset, thread_t thread);
826 sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event);
827 bool (*thread_should_yield)(processor_t processor, thread_t thread);
828
829 /* Routine to update run counts */
830 uint32_t (*run_count_incr)(thread_t thread);
831 uint32_t (*run_count_decr)(thread_t thread);
832
833 /* Routine to update scheduling bucket for a thread */
834 void (*update_thread_bucket)(thread_t thread);
835
836 /* Routine to inform the scheduler when a new pset becomes schedulable */
837 void (*pset_made_schedulable)(processor_t processor, processor_set_t pset, boolean_t drop_lock);
838 #if CONFIG_THREAD_GROUPS
839 /* Routine to inform the scheduler when CLPC changes a thread group recommendation */
840 void (*thread_group_recommendation_change)(struct thread_group *tg, cluster_type_t new_recommendation);
841 #endif
842 };
843
844 #if defined(CONFIG_SCHED_TRADITIONAL)
845 extern const struct sched_dispatch_table sched_traditional_dispatch;
846 extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
847 #endif
848
849 #if defined(CONFIG_SCHED_MULTIQ)
850 extern const struct sched_dispatch_table sched_multiq_dispatch;
851 extern const struct sched_dispatch_table sched_dualq_dispatch;
852 #if __AMP__
853 extern const struct sched_dispatch_table sched_amp_dispatch;
854 #endif
855 #endif
856
857 #if defined(CONFIG_SCHED_PROTO)
858 extern const struct sched_dispatch_table sched_proto_dispatch;
859 #endif
860
861 #if defined(CONFIG_SCHED_GRRR)
862 extern const struct sched_dispatch_table sched_grrr_dispatch;
863 #endif
864
865 #if defined(CONFIG_SCHED_CLUTCH)
866 extern const struct sched_dispatch_table sched_clutch_dispatch;
867 #endif
868
869 #if defined(CONFIG_SCHED_EDGE)
870 extern const struct sched_dispatch_table sched_edge_dispatch;
871 #endif
872
873
874 #endif /* MACH_KERNEL_PRIVATE */
875
876 __END_DECLS
877
878 #endif /* _KERN_SCHED_PRIM_H_ */