]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: sched_prim.h | |
60 | * Author: David Golub | |
61 | * | |
62 | * Scheduling primitive definitions file | |
63 | * | |
64 | */ | |
65 | ||
66 | #ifndef _KERN_SCHED_PRIM_H_ | |
67 | #define _KERN_SCHED_PRIM_H_ | |
68 | ||
69 | #include <sys/cdefs.h> | |
70 | #include <mach/boolean.h> | |
71 | #include <mach/machine/vm_types.h> | |
72 | #include <mach/kern_return.h> | |
73 | #include <kern/clock.h> | |
74 | #include <kern/kern_types.h> | |
75 | #include <kern/percpu.h> | |
76 | #include <kern/thread.h> | |
77 | #include <kern/block_hint.h> | |
78 | ||
79 | extern int thread_get_current_cpuid(void); | |
80 | ||
81 | #ifdef MACH_KERNEL_PRIVATE | |
82 | ||
83 | #include <kern/sched_urgency.h> | |
84 | #include <kern/thread_group.h> | |
85 | #include <kern/waitq.h> | |
86 | ||
87 | /* Initialization */ | |
88 | extern void sched_init(void); | |
89 | ||
90 | extern void sched_startup(void); | |
91 | ||
92 | extern void sched_timebase_init(void); | |
93 | ||
94 | extern void pset_rt_init(processor_set_t pset); | |
95 | ||
96 | extern void sched_rtlocal_init(processor_set_t pset); | |
97 | ||
98 | extern rt_queue_t sched_rtlocal_runq(processor_set_t pset); | |
99 | ||
100 | extern void sched_rtlocal_queue_shutdown(processor_t processor); | |
101 | ||
102 | extern int64_t sched_rtlocal_runq_count_sum(void); | |
103 | ||
104 | extern void sched_check_spill(processor_set_t pset, thread_t thread); | |
105 | ||
106 | extern bool sched_thread_should_yield(processor_t processor, thread_t thread); | |
107 | ||
108 | extern bool sched_steal_thread_DISABLED(processor_set_t pset); | |
109 | extern bool sched_steal_thread_enabled(processor_set_t pset); | |
110 | ||
111 | /* Force a preemption point for a thread and wait for it to stop running */ | |
112 | extern boolean_t thread_stop( | |
113 | thread_t thread, | |
114 | boolean_t until_not_runnable); | |
115 | ||
116 | /* Release a previous stop request */ | |
117 | extern void thread_unstop( | |
118 | thread_t thread); | |
119 | ||
120 | /* Wait for a thread to stop running */ | |
121 | extern void thread_wait( | |
122 | thread_t thread, | |
123 | boolean_t until_not_runnable); | |
124 | ||
125 | /* Unblock thread on wake up */ | |
126 | extern boolean_t thread_unblock( | |
127 | thread_t thread, | |
128 | wait_result_t wresult); | |
129 | ||
130 | /* Unblock and dispatch thread */ | |
131 | extern kern_return_t thread_go( | |
132 | thread_t thread, | |
133 | wait_result_t wresult, | |
134 | waitq_options_t option); | |
135 | ||
136 | /* Check if direct handoff is allowed */ | |
137 | extern boolean_t | |
138 | thread_allowed_for_handoff( | |
139 | thread_t thread); | |
140 | ||
141 | /* Handle threads at context switch */ | |
142 | extern void thread_dispatch( | |
143 | thread_t old_thread, | |
144 | thread_t new_thread); | |
145 | ||
146 | /* Switch directly to a particular thread */ | |
147 | extern int thread_run( | |
148 | thread_t self, | |
149 | thread_continue_t continuation, | |
150 | void *parameter, | |
151 | thread_t new_thread); | |
152 | ||
153 | /* Resume thread with new stack */ | |
154 | extern __dead2 void thread_continue(thread_t old_thread); | |
155 | ||
156 | /* Invoke continuation */ | |
157 | extern __dead2 void call_continuation( | |
158 | thread_continue_t continuation, | |
159 | void *parameter, | |
160 | wait_result_t wresult, | |
161 | boolean_t enable_interrupts); | |
162 | ||
163 | /* | |
164 | * Flags that can be passed to set_sched_pri | |
165 | * to skip side effects | |
166 | */ | |
167 | __options_decl(set_sched_pri_options_t, uint32_t, { | |
168 | SETPRI_DEFAULT = 0x0, | |
169 | SETPRI_LAZY = 0x1, /* Avoid setting AST flags or sending IPIs */ | |
170 | }); | |
171 | ||
172 | /* Set the current scheduled priority */ | |
173 | extern void set_sched_pri( | |
174 | thread_t thread, | |
175 | int16_t priority, | |
176 | set_sched_pri_options_t options); | |
177 | ||
178 | /* Set base priority of the specified thread */ | |
179 | extern void sched_set_thread_base_priority( | |
180 | thread_t thread, | |
181 | int priority); | |
182 | ||
183 | /* Set absolute base priority of the specified thread */ | |
184 | extern void sched_set_kernel_thread_priority( | |
185 | thread_t thread, | |
186 | int priority); | |
187 | ||
188 | ||
189 | /* Set the thread's true scheduling mode */ | |
190 | extern void sched_set_thread_mode(thread_t thread, | |
191 | sched_mode_t mode); | |
192 | /* Demote the true scheduler mode */ | |
193 | extern void sched_thread_mode_demote(thread_t thread, | |
194 | uint32_t reason); | |
195 | /* Un-demote the true scheduler mode */ | |
196 | extern void sched_thread_mode_undemote(thread_t thread, | |
197 | uint32_t reason); | |
198 | ||
199 | extern void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj); | |
200 | extern void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj); | |
201 | ||
202 | /* Re-evaluate base priority of thread (thread locked) */ | |
203 | void thread_recompute_priority(thread_t thread); | |
204 | ||
205 | /* Re-evaluate scheduled priority of thread (thread locked) */ | |
206 | extern void thread_recompute_sched_pri( | |
207 | thread_t thread, | |
208 | set_sched_pri_options_t options); | |
209 | ||
210 | /* Periodic scheduler activity */ | |
211 | extern void sched_init_thread(void); | |
212 | ||
213 | /* Perform sched_tick housekeeping activities */ | |
214 | extern boolean_t can_update_priority( | |
215 | thread_t thread); | |
216 | ||
217 | extern void update_priority( | |
218 | thread_t thread); | |
219 | ||
220 | extern void lightweight_update_priority( | |
221 | thread_t thread); | |
222 | ||
223 | extern void sched_default_quantum_expire(thread_t thread); | |
224 | ||
225 | /* Idle processor thread continuation */ | |
226 | extern void idle_thread( | |
227 | void* parameter, | |
228 | wait_result_t result); | |
229 | ||
230 | extern kern_return_t idle_thread_create( | |
231 | processor_t processor); | |
232 | ||
233 | /* Continuation return from syscall */ | |
234 | extern void thread_syscall_return( | |
235 | kern_return_t ret); | |
236 | ||
237 | /* Context switch */ | |
238 | extern wait_result_t thread_block_reason( | |
239 | thread_continue_t continuation, | |
240 | void *parameter, | |
241 | ast_t reason); | |
242 | ||
243 | __options_decl(sched_options_t, uint32_t, { | |
244 | SCHED_NONE = 0x0, | |
245 | SCHED_TAILQ = 0x1, | |
246 | SCHED_HEADQ = 0x2, | |
247 | SCHED_PREEMPT = 0x4, | |
248 | SCHED_REBALANCE = 0x8, | |
249 | }); | |
250 | ||
251 | /* Reschedule thread for execution */ | |
252 | extern void thread_setrun( | |
253 | thread_t thread, | |
254 | sched_options_t options); | |
255 | ||
256 | extern processor_set_t task_choose_pset( | |
257 | task_t task); | |
258 | ||
259 | /* Bind the current thread to a particular processor */ | |
260 | extern processor_t thread_bind( | |
261 | processor_t processor); | |
262 | ||
263 | extern bool pset_has_stealable_threads( | |
264 | processor_set_t pset); | |
265 | ||
266 | extern processor_set_t choose_starting_pset( | |
267 | pset_node_t node, | |
268 | thread_t thread, | |
269 | processor_t *processor_hint); | |
270 | ||
271 | extern pset_node_t sched_choose_node( | |
272 | thread_t thread); | |
273 | ||
274 | /* Choose the best processor to run a thread */ | |
275 | extern processor_t choose_processor( | |
276 | processor_set_t pset, | |
277 | processor_t processor, | |
278 | thread_t thread); | |
279 | ||
280 | extern void sched_SMT_balance( | |
281 | processor_t processor, | |
282 | processor_set_t pset); | |
283 | ||
284 | extern void thread_quantum_init( | |
285 | thread_t thread); | |
286 | ||
287 | extern void run_queue_init( | |
288 | run_queue_t runq); | |
289 | ||
290 | extern thread_t run_queue_dequeue( | |
291 | run_queue_t runq, | |
292 | sched_options_t options); | |
293 | ||
294 | extern boolean_t run_queue_enqueue( | |
295 | run_queue_t runq, | |
296 | thread_t thread, | |
297 | sched_options_t options); | |
298 | ||
299 | extern void run_queue_remove( | |
300 | run_queue_t runq, | |
301 | thread_t thread); | |
302 | ||
303 | extern thread_t run_queue_peek( | |
304 | run_queue_t runq); | |
305 | ||
306 | struct sched_update_scan_context { | |
307 | uint64_t earliest_bg_make_runnable_time; | |
308 | uint64_t earliest_normal_make_runnable_time; | |
309 | uint64_t earliest_rt_make_runnable_time; | |
310 | uint64_t sched_tick_last_abstime; | |
311 | }; | |
312 | typedef struct sched_update_scan_context *sched_update_scan_context_t; | |
313 | ||
314 | extern void sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context); | |
315 | ||
316 | extern void sched_pset_made_schedulable( | |
317 | processor_t processor, | |
318 | processor_set_t pset, | |
319 | boolean_t drop_lock); | |
320 | ||
321 | /* | |
322 | * Enum to define various events which need IPIs. The IPI policy | |
323 | * engine decides what kind of IPI to use based on destination | |
324 | * processor state, thread and one of the following scheduling events. | |
325 | */ | |
326 | typedef enum { | |
327 | SCHED_IPI_EVENT_BOUND_THR = 0x1, | |
328 | SCHED_IPI_EVENT_PREEMPT = 0x2, | |
329 | SCHED_IPI_EVENT_SMT_REBAL = 0x3, | |
330 | SCHED_IPI_EVENT_SPILL = 0x4, | |
331 | SCHED_IPI_EVENT_REBALANCE = 0x5, | |
332 | } sched_ipi_event_t; | |
333 | ||
334 | ||
335 | /* Enum to define various IPI types used by the scheduler */ | |
336 | typedef enum { | |
337 | SCHED_IPI_NONE = 0x0, | |
338 | SCHED_IPI_IMMEDIATE = 0x1, | |
339 | SCHED_IPI_IDLE = 0x2, | |
340 | SCHED_IPI_DEFERRED = 0x3, | |
341 | } sched_ipi_type_t; | |
342 | ||
343 | /* The IPI policy engine behaves in the following manner: | |
344 | * - All scheduler events which need an IPI invoke sched_ipi_action() with | |
345 | * the appropriate destination processor, thread and event. | |
346 | * - sched_ipi_action() performs basic checks, invokes the scheduler specific | |
347 | * ipi_policy routine and sets pending_AST bits based on the result. | |
348 | * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform() | |
349 | * routine which actually sends the appropriate IPI to the destination core. | |
350 | */ | |
351 | extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, | |
352 | boolean_t dst_idle, sched_ipi_event_t event); | |
353 | extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi); | |
354 | ||
355 | /* sched_ipi_policy() is the global default IPI policy for all schedulers */ | |
356 | extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, | |
357 | boolean_t dst_idle, sched_ipi_event_t event); | |
358 | ||
359 | /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */ | |
360 | extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, | |
361 | processor_t dst, sched_ipi_event_t event); | |
362 | ||
363 | #if defined(CONFIG_SCHED_TIMESHARE_CORE) | |
364 | ||
365 | extern boolean_t thread_update_add_thread(thread_t thread); | |
366 | extern void thread_update_process_threads(void); | |
367 | extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context); | |
368 | ||
369 | #if CONFIG_SCHED_CLUTCH | |
370 | extern boolean_t sched_clutch_timeshare_scan(queue_t thread_queue, uint16_t count, sched_update_scan_context_t scan_context); | |
371 | #endif /* CONFIG_SCHED_CLUTCH */ | |
372 | ||
373 | extern void sched_timeshare_init(void); | |
374 | extern void sched_timeshare_timebase_init(void); | |
375 | extern void sched_timeshare_maintenance_continue(void); | |
376 | ||
377 | extern boolean_t priority_is_urgent(int priority); | |
378 | extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread); | |
379 | ||
380 | extern int sched_compute_timeshare_priority(thread_t thread); | |
381 | ||
382 | #endif /* CONFIG_SCHED_TIMESHARE_CORE */ | |
383 | ||
384 | /* Remove thread from its run queue */ | |
385 | extern boolean_t thread_run_queue_remove(thread_t thread); | |
386 | thread_t thread_run_queue_remove_for_handoff(thread_t thread); | |
387 | ||
388 | /* Put a thread back in the run queue after being yanked */ | |
389 | extern void thread_run_queue_reinsert(thread_t thread, sched_options_t options); | |
390 | ||
391 | extern void thread_timer_expire( | |
392 | void *thread, | |
393 | void *p1); | |
394 | ||
395 | extern bool thread_is_eager_preempt(thread_t thread); | |
396 | ||
397 | extern boolean_t sched_generic_direct_dispatch_to_idle_processors; | |
398 | ||
399 | /* Set the maximum interrupt level for the thread */ | |
400 | __private_extern__ wait_interrupt_t thread_interrupt_level( | |
401 | wait_interrupt_t interruptible); | |
402 | ||
403 | __private_extern__ wait_result_t thread_mark_wait_locked( | |
404 | thread_t thread, | |
405 | wait_interrupt_t interruptible); | |
406 | ||
407 | /* Wake up locked thread directly, passing result */ | |
408 | __private_extern__ kern_return_t clear_wait_internal( | |
409 | thread_t thread, | |
410 | wait_result_t result); | |
411 | ||
412 | struct sched_statistics { | |
413 | uint32_t csw_count; | |
414 | uint32_t preempt_count; | |
415 | uint32_t preempted_rt_count; | |
416 | uint32_t preempted_by_rt_count; | |
417 | uint32_t rt_sched_count; | |
418 | uint32_t interrupt_count; | |
419 | uint32_t ipi_count; | |
420 | uint32_t timer_pop_count; | |
421 | uint32_t idle_transitions; | |
422 | uint32_t quantum_timer_expirations; | |
423 | }; | |
424 | PERCPU_DECL(struct sched_statistics, sched_stats); | |
425 | extern bool sched_stats_active; | |
426 | ||
427 | extern void sched_stats_handle_csw( | |
428 | processor_t processor, | |
429 | int reasons, | |
430 | int selfpri, | |
431 | int otherpri); | |
432 | ||
433 | extern void sched_stats_handle_runq_change( | |
434 | struct runq_stats *stats, | |
435 | int old_count); | |
436 | ||
437 | #define SCHED_STATS_INC(field) \ | |
438 | MACRO_BEGIN \ | |
439 | if (__improbable(sched_stats_active)) { \ | |
440 | PERCPU_GET(sched_stats)->field++; \ | |
441 | } \ | |
442 | MACRO_END | |
443 | ||
444 | #if DEBUG | |
445 | ||
446 | #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \ | |
447 | MACRO_BEGIN \ | |
448 | if (__improbable(sched_stats_active)) { \ | |
449 | sched_stats_handle_csw((processor), \ | |
450 | (reasons), (selfpri), (otherpri)); \ | |
451 | } \ | |
452 | MACRO_END | |
453 | ||
454 | ||
455 | #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \ | |
456 | MACRO_BEGIN \ | |
457 | if (__improbable(sched_stats_active)) { \ | |
458 | sched_stats_handle_runq_change((stats), (old_count)); \ | |
459 | } \ | |
460 | MACRO_END | |
461 | ||
462 | #else /* DEBUG */ | |
463 | ||
464 | #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0) | |
465 | #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0) | |
466 | ||
467 | #endif /* DEBUG */ | |
468 | ||
469 | extern uint32_t sched_debug_flags; | |
470 | #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001 | |
471 | #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002 | |
472 | ||
473 | #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) \ | |
474 | MACRO_BEGIN \ | |
475 | if (__improbable(sched_debug_flags & \ | |
476 | SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \ | |
477 | KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ | |
478 | } \ | |
479 | MACRO_END | |
480 | ||
481 | #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) \ | |
482 | MACRO_BEGIN \ | |
483 | if (__improbable(sched_debug_flags & \ | |
484 | SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \ | |
485 | KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ | |
486 | } \ | |
487 | MACRO_END | |
488 | ||
489 | /* Tells if there are "active" RT threads in the system (provided by CPU PM) */ | |
490 | extern void active_rt_threads( | |
491 | boolean_t active); | |
492 | ||
493 | /* Returns the perfcontrol attribute for the thread */ | |
494 | extern perfcontrol_class_t thread_get_perfcontrol_class( | |
495 | thread_t thread); | |
496 | ||
497 | /* Generic routine for Non-AMP schedulers to calculate parallelism */ | |
498 | extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options); | |
499 | ||
500 | #endif /* MACH_KERNEL_PRIVATE */ | |
501 | ||
502 | __BEGIN_DECLS | |
503 | ||
504 | #ifdef XNU_KERNEL_PRIVATE | |
505 | ||
506 | extern void thread_bind_cluster_type(thread_t, char cluster_type, bool soft_bind); | |
507 | ||
508 | extern int sched_get_rt_n_backup_processors(void); | |
509 | extern void sched_set_rt_n_backup_processors(int n); | |
510 | ||
511 | /* Toggles a global override to turn off CPU Throttling */ | |
512 | extern void sys_override_cpu_throttle(boolean_t enable_override); | |
513 | ||
514 | /* | |
515 | ****************** Only exported until BSD stops using ******************** | |
516 | */ | |
517 | ||
518 | extern void thread_vm_bind_group_add(void); | |
519 | ||
520 | /* Wake up thread directly, passing result */ | |
521 | extern kern_return_t clear_wait( | |
522 | thread_t thread, | |
523 | wait_result_t result); | |
524 | ||
525 | /* Start thread running */ | |
526 | extern void thread_bootstrap_return(void) __attribute__((noreturn)); | |
527 | ||
528 | /* Return from exception (BSD-visible interface) */ | |
529 | extern void thread_exception_return(void) __dead2; | |
530 | ||
531 | #define SCHED_STRING_MAX_LENGTH (48) | |
532 | /* String declaring the name of the current scheduler */ | |
533 | extern char sched_string[SCHED_STRING_MAX_LENGTH]; | |
534 | ||
535 | __options_decl(thread_handoff_option_t, uint32_t, { | |
536 | THREAD_HANDOFF_NONE = 0, | |
537 | THREAD_HANDOFF_SETRUN_NEEDED = 0x1, | |
538 | }); | |
539 | ||
540 | /* Remove thread from its run queue */ | |
541 | thread_t thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option); | |
542 | ||
543 | /* Attempt to context switch to a specific runnable thread */ | |
544 | extern wait_result_t thread_handoff_deallocate(thread_t thread, thread_handoff_option_t option); | |
545 | ||
546 | __attribute__((nonnull(1, 2))) | |
547 | extern void thread_handoff_parameter(thread_t thread, | |
548 | thread_continue_t continuation, void *parameter, thread_handoff_option_t) __dead2; | |
549 | ||
550 | extern struct waitq *assert_wait_queue(event_t event); | |
551 | ||
552 | extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority); | |
553 | ||
554 | extern thread_t thread_wakeup_identify(event_t event, int priority); | |
555 | ||
556 | #endif /* XNU_KERNEL_PRIVATE */ | |
557 | ||
558 | #ifdef KERNEL_PRIVATE | |
559 | /* Set pending block hint for a particular object before we go into a wait state */ | |
560 | extern void thread_set_pending_block_hint( | |
561 | thread_t thread, | |
562 | block_hint_t block_hint); | |
563 | ||
564 | #define QOS_PARALLELISM_COUNT_LOGICAL 0x1 | |
565 | #define QOS_PARALLELISM_REALTIME 0x2 | |
566 | extern uint32_t qos_max_parallelism(int qos, uint64_t options); | |
567 | #endif /* KERNEL_PRIVATE */ | |
568 | ||
569 | #if XNU_KERNEL_PRIVATE | |
570 | extern void thread_yield_with_continuation( | |
571 | thread_continue_t continuation, | |
572 | void *parameter) __dead2; | |
573 | #endif | |
574 | ||
575 | /* Context switch */ | |
576 | extern wait_result_t thread_block( | |
577 | thread_continue_t continuation); | |
578 | ||
579 | extern wait_result_t thread_block_parameter( | |
580 | thread_continue_t continuation, | |
581 | void *parameter); | |
582 | ||
583 | /* Declare thread will wait on a particular event */ | |
584 | extern wait_result_t assert_wait( | |
585 | event_t event, | |
586 | wait_interrupt_t interruptible); | |
587 | ||
588 | /* Assert that the thread intends to wait with a timeout */ | |
589 | extern wait_result_t assert_wait_timeout( | |
590 | event_t event, | |
591 | wait_interrupt_t interruptible, | |
592 | uint32_t interval, | |
593 | uint32_t scale_factor); | |
594 | ||
595 | /* Assert that the thread intends to wait with an urgency, timeout and leeway */ | |
596 | extern wait_result_t assert_wait_timeout_with_leeway( | |
597 | event_t event, | |
598 | wait_interrupt_t interruptible, | |
599 | wait_timeout_urgency_t urgency, | |
600 | uint32_t interval, | |
601 | uint32_t leeway, | |
602 | uint32_t scale_factor); | |
603 | ||
604 | extern wait_result_t assert_wait_deadline( | |
605 | event_t event, | |
606 | wait_interrupt_t interruptible, | |
607 | uint64_t deadline); | |
608 | ||
609 | /* Assert that the thread intends to wait with an urgency, deadline, and leeway */ | |
610 | extern wait_result_t assert_wait_deadline_with_leeway( | |
611 | event_t event, | |
612 | wait_interrupt_t interruptible, | |
613 | wait_timeout_urgency_t urgency, | |
614 | uint64_t deadline, | |
615 | uint64_t leeway); | |
616 | ||
617 | /* Wake up thread (or threads) waiting on a particular event */ | |
618 | extern kern_return_t thread_wakeup_prim( | |
619 | event_t event, | |
620 | boolean_t one_thread, | |
621 | wait_result_t result); | |
622 | ||
623 | #define thread_wakeup(x) \ | |
624 | thread_wakeup_prim((x), FALSE, THREAD_AWAKENED) | |
625 | #define thread_wakeup_with_result(x, z) \ | |
626 | thread_wakeup_prim((x), FALSE, (z)) | |
627 | #define thread_wakeup_one(x) \ | |
628 | thread_wakeup_prim((x), TRUE, THREAD_AWAKENED) | |
629 | ||
630 | /* Wakeup the specified thread if it is waiting on this event */ | |
631 | extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread); | |
632 | ||
633 | extern boolean_t preemption_enabled(void); | |
634 | ||
635 | #ifdef MACH_KERNEL_PRIVATE | |
636 | ||
637 | /* | |
638 | * Scheduler algorithm indirection. If only one algorithm is | |
639 | * enabled at compile-time, a direction function call is used. | |
640 | * If more than one is enabled, calls are dispatched through | |
641 | * a function pointer table. | |
642 | */ | |
643 | ||
644 | #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) && !defined(CONFIG_SCHED_CLUTCH) && !defined(CONFIG_SCHED_EDGE) | |
645 | #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX | |
646 | #endif | |
647 | ||
648 | #if __AMP__ | |
649 | ||
650 | #if CONFIG_SCHED_EDGE | |
651 | extern const struct sched_dispatch_table sched_edge_dispatch; | |
652 | #define SCHED(f) (sched_edge_dispatch.f) | |
653 | #else /* CONFIG_SCHED_EDGE */ | |
654 | extern const struct sched_dispatch_table sched_amp_dispatch; | |
655 | #define SCHED(f) (sched_amp_dispatch.f) | |
656 | #endif /* CONFIG_SCHED_EDGE */ | |
657 | ||
658 | #else /* __AMP__ */ | |
659 | ||
660 | #if CONFIG_SCHED_CLUTCH | |
661 | extern const struct sched_dispatch_table sched_clutch_dispatch; | |
662 | #define SCHED(f) (sched_clutch_dispatch.f) | |
663 | #else /* CONFIG_SCHED_CLUTCH */ | |
664 | extern const struct sched_dispatch_table sched_dualq_dispatch; | |
665 | #define SCHED(f) (sched_dualq_dispatch.f) | |
666 | #endif /* CONFIG_SCHED_CLUTCH */ | |
667 | ||
668 | #endif /* __AMP__ */ | |
669 | ||
670 | struct sched_dispatch_table { | |
671 | const char *sched_name; | |
672 | void (*init)(void); /* Init global state */ | |
673 | void (*timebase_init)(void); /* Timebase-dependent initialization */ | |
674 | void (*processor_init)(processor_t processor); /* Per-processor scheduler init */ | |
675 | void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */ | |
676 | ||
677 | void (*maintenance_continuation)(void); /* Function called regularly */ | |
678 | ||
679 | /* | |
680 | * Choose a thread of greater or equal priority from the per-processor | |
681 | * runqueue for timeshare/fixed threads | |
682 | */ | |
683 | thread_t (*choose_thread)( | |
684 | processor_t processor, | |
685 | int priority, | |
686 | ast_t reason); | |
687 | ||
688 | /* True if scheduler supports stealing threads for this pset */ | |
689 | bool (*steal_thread_enabled)(processor_set_t pset); | |
690 | ||
691 | /* | |
692 | * Steal a thread from another processor in the pset so that it can run | |
693 | * immediately | |
694 | */ | |
695 | thread_t (*steal_thread)( | |
696 | processor_set_t pset); | |
697 | ||
698 | /* | |
699 | * Compute priority for a timeshare thread based on base priority. | |
700 | */ | |
701 | int (*compute_timeshare_priority)(thread_t thread); | |
702 | ||
703 | /* | |
704 | * Pick the best node for a thread to run on. | |
705 | */ | |
706 | pset_node_t (*choose_node)( | |
707 | thread_t thread); | |
708 | ||
709 | /* | |
710 | * Pick the best processor for a thread (any kind of thread) to run on. | |
711 | */ | |
712 | processor_t (*choose_processor)( | |
713 | processor_set_t pset, | |
714 | processor_t processor, | |
715 | thread_t thread); | |
716 | /* | |
717 | * Enqueue a timeshare or fixed priority thread onto the per-processor | |
718 | * runqueue | |
719 | */ | |
720 | boolean_t (*processor_enqueue)( | |
721 | processor_t processor, | |
722 | thread_t thread, | |
723 | sched_options_t options); | |
724 | ||
725 | /* Migrate threads away in preparation for processor shutdown */ | |
726 | void (*processor_queue_shutdown)( | |
727 | processor_t processor); | |
728 | ||
729 | /* Remove the specific thread from the per-processor runqueue */ | |
730 | boolean_t (*processor_queue_remove)( | |
731 | processor_t processor, | |
732 | thread_t thread); | |
733 | ||
734 | /* | |
735 | * Does the per-processor runqueue have any timeshare or fixed priority | |
736 | * threads on it? Called without pset lock held, so should | |
737 | * not assume immutability while executing. | |
738 | */ | |
739 | boolean_t (*processor_queue_empty)(processor_t processor); | |
740 | ||
741 | /* | |
742 | * Would this priority trigger an urgent preemption if it's sitting | |
743 | * on the per-processor runqueue? | |
744 | */ | |
745 | boolean_t (*priority_is_urgent)(int priority); | |
746 | ||
747 | /* | |
748 | * Does the per-processor runqueue contain runnable threads that | |
749 | * should cause the currently-running thread to be preempted? | |
750 | */ | |
751 | ast_t (*processor_csw_check)(processor_t processor); | |
752 | ||
753 | /* | |
754 | * Does the per-processor runqueue contain a runnable thread | |
755 | * of > or >= priority, as a preflight for choose_thread() or other | |
756 | * thread selection | |
757 | */ | |
758 | boolean_t (*processor_queue_has_priority)(processor_t processor, | |
759 | int priority, | |
760 | boolean_t gte); | |
761 | ||
762 | /* Quantum size for the specified non-realtime thread. */ | |
763 | uint32_t (*initial_quantum_size)(thread_t thread); | |
764 | ||
765 | /* Scheduler mode for a new thread */ | |
766 | sched_mode_t (*initial_thread_sched_mode)(task_t parent_task); | |
767 | ||
768 | /* | |
769 | * Is it safe to call update_priority, which may change a thread's | |
770 | * runqueue or other state. This can be used to throttle changes | |
771 | * to dynamic priority. | |
772 | */ | |
773 | boolean_t (*can_update_priority)(thread_t thread); | |
774 | ||
775 | /* | |
776 | * Update both scheduled priority and other persistent state. | |
777 | * Side effects may including migration to another processor's runqueue. | |
778 | */ | |
779 | void (*update_priority)(thread_t thread); | |
780 | ||
781 | /* Lower overhead update to scheduled priority and state. */ | |
782 | void (*lightweight_update_priority)(thread_t thread); | |
783 | ||
784 | /* Callback for non-realtime threads when the quantum timer fires */ | |
785 | void (*quantum_expire)(thread_t thread); | |
786 | ||
787 | /* | |
788 | * Runnable threads on per-processor runqueue. Should only | |
789 | * be used for relative comparisons of load between processors. | |
790 | */ | |
791 | int (*processor_runq_count)(processor_t processor); | |
792 | ||
793 | /* Aggregate runcount statistics for per-processor runqueue */ | |
794 | uint64_t (*processor_runq_stats_count_sum)(processor_t processor); | |
795 | ||
796 | boolean_t (*processor_bound_count)(processor_t processor); | |
797 | ||
798 | void (*thread_update_scan)(sched_update_scan_context_t scan_context); | |
799 | ||
800 | /* Supports more than one pset */ | |
801 | boolean_t multiple_psets_enabled; | |
802 | /* Supports scheduler groups */ | |
803 | boolean_t sched_groups_enabled; | |
804 | ||
805 | /* Supports avoid-processor */ | |
806 | boolean_t avoid_processor_enabled; | |
807 | ||
808 | /* Returns true if this processor should avoid running this thread. */ | |
809 | bool (*thread_avoid_processor)(processor_t processor, thread_t thread); | |
810 | ||
811 | /* | |
812 | * Invoked when a processor is about to choose the idle thread | |
813 | * Used to send IPIs to a processor which would be preferred to be idle instead. | |
814 | * Called with pset lock held, returns pset lock unlocked. | |
815 | */ | |
816 | void (*processor_balance)(processor_t processor, processor_set_t pset); | |
817 | rt_queue_t (*rt_runq)(processor_set_t pset); | |
818 | void (*rt_init)(processor_set_t pset); | |
819 | void (*rt_queue_shutdown)(processor_t processor); | |
820 | void (*rt_runq_scan)(sched_update_scan_context_t scan_context); | |
821 | int64_t (*rt_runq_count_sum)(void); | |
822 | ||
823 | uint32_t (*qos_max_parallelism)(int qos, uint64_t options); | |
824 | void (*check_spill)(processor_set_t pset, thread_t thread); | |
825 | sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event); | |
826 | bool (*thread_should_yield)(processor_t processor, thread_t thread); | |
827 | ||
828 | /* Routine to update run counts */ | |
829 | uint32_t (*run_count_incr)(thread_t thread); | |
830 | uint32_t (*run_count_decr)(thread_t thread); | |
831 | ||
832 | /* Routine to update scheduling bucket for a thread */ | |
833 | void (*update_thread_bucket)(thread_t thread); | |
834 | ||
835 | /* Routine to inform the scheduler when a new pset becomes schedulable */ | |
836 | void (*pset_made_schedulable)(processor_t processor, processor_set_t pset, boolean_t drop_lock); | |
837 | #if CONFIG_THREAD_GROUPS | |
838 | /* Routine to inform the scheduler when CLPC changes a thread group recommendation */ | |
839 | void (*thread_group_recommendation_change)(struct thread_group *tg, cluster_type_t new_recommendation); | |
840 | #endif | |
841 | }; | |
842 | ||
843 | #if defined(CONFIG_SCHED_TRADITIONAL) | |
844 | extern const struct sched_dispatch_table sched_traditional_dispatch; | |
845 | extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch; | |
846 | #endif | |
847 | ||
848 | #if defined(CONFIG_SCHED_MULTIQ) | |
849 | extern const struct sched_dispatch_table sched_multiq_dispatch; | |
850 | extern const struct sched_dispatch_table sched_dualq_dispatch; | |
851 | #if __AMP__ | |
852 | extern const struct sched_dispatch_table sched_amp_dispatch; | |
853 | #endif | |
854 | #endif | |
855 | ||
856 | #if defined(CONFIG_SCHED_PROTO) | |
857 | extern const struct sched_dispatch_table sched_proto_dispatch; | |
858 | #endif | |
859 | ||
860 | #if defined(CONFIG_SCHED_GRRR) | |
861 | extern const struct sched_dispatch_table sched_grrr_dispatch; | |
862 | #endif | |
863 | ||
864 | #if defined(CONFIG_SCHED_CLUTCH) | |
865 | extern const struct sched_dispatch_table sched_clutch_dispatch; | |
866 | #endif | |
867 | ||
868 | #if defined(CONFIG_SCHED_EDGE) | |
869 | extern const struct sched_dispatch_table sched_edge_dispatch; | |
870 | #endif | |
871 | ||
872 | ||
873 | #endif /* MACH_KERNEL_PRIVATE */ | |
874 | ||
875 | __END_DECLS | |
876 | ||
877 | #endif /* _KERN_SCHED_PRIM_H_ */ |