]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: sched_prim.h | |
60 | * Author: David Golub | |
61 | * | |
62 | * Scheduling primitive definitions file | |
63 | * | |
64 | */ | |
65 | ||
66 | #ifndef _KERN_SCHED_PRIM_H_ | |
67 | #define _KERN_SCHED_PRIM_H_ | |
68 | ||
69 | #include <mach/boolean.h> | |
70 | #include <mach/machine/vm_types.h> | |
71 | #include <mach/kern_return.h> | |
72 | #include <kern/clock.h> | |
73 | #include <kern/kern_types.h> | |
74 | #include <kern/thread.h> | |
75 | #include <sys/cdefs.h> | |
76 | #include <kern/block_hint.h> | |
77 | ||
78 | #ifdef MACH_KERNEL_PRIVATE | |
79 | ||
80 | #include <kern/sched_urgency.h> | |
81 | #include <kern/thread_group.h> | |
82 | ||
83 | /* Initialization */ | |
84 | extern void sched_init(void); | |
85 | ||
86 | extern void sched_startup(void); | |
87 | ||
88 | extern void sched_timebase_init(void); | |
89 | ||
90 | extern void pset_rt_init(processor_set_t pset); | |
91 | ||
92 | extern void sched_rtglobal_init(processor_set_t pset); | |
93 | ||
94 | extern rt_queue_t sched_rtglobal_runq(processor_set_t pset); | |
95 | ||
96 | extern void sched_rtglobal_queue_shutdown(processor_t processor); | |
97 | ||
98 | extern int64_t sched_rtglobal_runq_count_sum(void); | |
99 | ||
100 | extern void sched_check_spill(processor_set_t pset, thread_t thread); | |
101 | ||
102 | extern bool sched_thread_should_yield(processor_t processor, thread_t thread); | |
103 | ||
104 | extern bool sched_steal_thread_DISABLED(processor_set_t pset); | |
105 | extern bool sched_steal_thread_enabled(processor_set_t pset); | |
106 | ||
107 | /* Force a preemption point for a thread and wait for it to stop running */ | |
108 | extern boolean_t thread_stop( | |
109 | thread_t thread, | |
110 | boolean_t until_not_runnable); | |
111 | ||
112 | /* Release a previous stop request */ | |
113 | extern void thread_unstop( | |
114 | thread_t thread); | |
115 | ||
116 | /* Wait for a thread to stop running */ | |
117 | extern void thread_wait( | |
118 | thread_t thread, | |
119 | boolean_t until_not_runnable); | |
120 | ||
121 | /* Unblock thread on wake up */ | |
122 | extern boolean_t thread_unblock( | |
123 | thread_t thread, | |
124 | wait_result_t wresult); | |
125 | ||
126 | /* Unblock and dispatch thread */ | |
127 | extern kern_return_t thread_go( | |
128 | thread_t thread, | |
129 | wait_result_t wresult); | |
130 | ||
131 | /* Handle threads at context switch */ | |
132 | extern void thread_dispatch( | |
133 | thread_t old_thread, | |
134 | thread_t new_thread); | |
135 | ||
136 | /* Switch directly to a particular thread */ | |
137 | extern int thread_run( | |
138 | thread_t self, | |
139 | thread_continue_t continuation, | |
140 | void *parameter, | |
141 | thread_t new_thread); | |
142 | ||
143 | /* Resume thread with new stack */ | |
144 | extern __dead2 void thread_continue(thread_t old_thread); | |
145 | ||
146 | /* Invoke continuation */ | |
147 | extern __dead2 void call_continuation( | |
148 | thread_continue_t continuation, | |
149 | void *parameter, | |
150 | wait_result_t wresult, | |
151 | boolean_t enable_interrupts); | |
152 | ||
153 | /* | |
154 | * Flags that can be passed to set_sched_pri | |
155 | * to skip side effects | |
156 | */ | |
157 | __options_decl(set_sched_pri_options_t, uint32_t, { | |
158 | SETPRI_DEFAULT = 0x0, | |
159 | SETPRI_LAZY = 0x1, /* Avoid setting AST flags or sending IPIs */ | |
160 | }); | |
161 | ||
162 | /* Set the current scheduled priority */ | |
163 | extern void set_sched_pri( | |
164 | thread_t thread, | |
165 | int priority, | |
166 | set_sched_pri_options_t options); | |
167 | ||
168 | /* Set base priority of the specified thread */ | |
169 | extern void sched_set_thread_base_priority( | |
170 | thread_t thread, | |
171 | int priority); | |
172 | ||
173 | /* Set absolute base priority of the specified thread */ | |
174 | extern void sched_set_kernel_thread_priority( | |
175 | thread_t thread, | |
176 | int priority); | |
177 | ||
178 | ||
179 | /* Set the thread's true scheduling mode */ | |
180 | extern void sched_set_thread_mode(thread_t thread, | |
181 | sched_mode_t mode); | |
182 | /* Demote the true scheduler mode */ | |
183 | extern void sched_thread_mode_demote(thread_t thread, | |
184 | uint32_t reason); | |
185 | /* Un-demote the true scheduler mode */ | |
186 | extern void sched_thread_mode_undemote(thread_t thread, | |
187 | uint32_t reason); | |
188 | ||
189 | extern void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj); | |
190 | extern void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj); | |
191 | ||
192 | /* Re-evaluate base priority of thread (thread locked) */ | |
193 | void thread_recompute_priority(thread_t thread); | |
194 | ||
195 | /* Re-evaluate scheduled priority of thread (thread locked) */ | |
196 | extern void thread_recompute_sched_pri( | |
197 | thread_t thread, | |
198 | set_sched_pri_options_t options); | |
199 | ||
200 | /* Periodic scheduler activity */ | |
201 | extern void sched_init_thread(void (*)(void)); | |
202 | ||
203 | /* Perform sched_tick housekeeping activities */ | |
204 | extern boolean_t can_update_priority( | |
205 | thread_t thread); | |
206 | ||
207 | extern void update_priority( | |
208 | thread_t thread); | |
209 | ||
210 | extern void lightweight_update_priority( | |
211 | thread_t thread); | |
212 | ||
213 | extern void sched_default_quantum_expire(thread_t thread); | |
214 | ||
215 | /* Idle processor thread continuation */ | |
216 | extern void idle_thread( | |
217 | void* parameter, | |
218 | wait_result_t result); | |
219 | ||
220 | extern kern_return_t idle_thread_create( | |
221 | processor_t processor); | |
222 | ||
223 | /* Continuation return from syscall */ | |
224 | extern void thread_syscall_return( | |
225 | kern_return_t ret); | |
226 | ||
227 | /* Context switch */ | |
228 | extern wait_result_t thread_block_reason( | |
229 | thread_continue_t continuation, | |
230 | void *parameter, | |
231 | ast_t reason); | |
232 | ||
233 | __options_decl(sched_options_t, uint32_t, { | |
234 | SCHED_NONE = 0x0, | |
235 | SCHED_TAILQ = 0x1, | |
236 | SCHED_HEADQ = 0x2, | |
237 | SCHED_PREEMPT = 0x4, | |
238 | SCHED_REBALANCE = 0x8, | |
239 | }); | |
240 | ||
241 | /* Reschedule thread for execution */ | |
242 | extern void thread_setrun( | |
243 | thread_t thread, | |
244 | sched_options_t options); | |
245 | ||
246 | extern processor_set_t task_choose_pset( | |
247 | task_t task); | |
248 | ||
249 | /* Bind the current thread to a particular processor */ | |
250 | extern processor_t thread_bind( | |
251 | processor_t processor); | |
252 | ||
253 | /* Choose the best processor to run a thread */ | |
254 | extern processor_t choose_processor( | |
255 | processor_set_t pset, | |
256 | processor_t processor, | |
257 | thread_t thread); | |
258 | ||
259 | extern void sched_SMT_balance( | |
260 | processor_t processor, | |
261 | processor_set_t pset); | |
262 | ||
263 | extern void thread_quantum_init( | |
264 | thread_t thread); | |
265 | ||
266 | extern void run_queue_init( | |
267 | run_queue_t runq); | |
268 | ||
269 | extern thread_t run_queue_dequeue( | |
270 | run_queue_t runq, | |
271 | sched_options_t options); | |
272 | ||
273 | extern boolean_t run_queue_enqueue( | |
274 | run_queue_t runq, | |
275 | thread_t thread, | |
276 | sched_options_t options); | |
277 | ||
278 | extern void run_queue_remove( | |
279 | run_queue_t runq, | |
280 | thread_t thread); | |
281 | ||
282 | extern thread_t run_queue_peek( | |
283 | run_queue_t runq); | |
284 | ||
285 | struct sched_update_scan_context { | |
286 | uint64_t earliest_bg_make_runnable_time; | |
287 | uint64_t earliest_normal_make_runnable_time; | |
288 | uint64_t earliest_rt_make_runnable_time; | |
289 | }; | |
290 | typedef struct sched_update_scan_context *sched_update_scan_context_t; | |
291 | ||
292 | extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context); | |
293 | ||
294 | extern void sched_pset_made_schedulable( | |
295 | processor_t processor, | |
296 | processor_set_t pset, | |
297 | boolean_t drop_lock); | |
298 | ||
299 | /* | |
300 | * Enum to define various events which need IPIs. The IPI policy | |
301 | * engine decides what kind of IPI to use based on destination | |
302 | * processor state, thread and one of the following scheduling events. | |
303 | */ | |
304 | typedef enum { | |
305 | SCHED_IPI_EVENT_BOUND_THR = 0x1, | |
306 | SCHED_IPI_EVENT_PREEMPT = 0x2, | |
307 | SCHED_IPI_EVENT_SMT_REBAL = 0x3, | |
308 | SCHED_IPI_EVENT_SPILL = 0x4, | |
309 | SCHED_IPI_EVENT_REBALANCE = 0x5, | |
310 | } sched_ipi_event_t; | |
311 | ||
312 | ||
313 | /* Enum to define various IPI types used by the scheduler */ | |
314 | typedef enum { | |
315 | SCHED_IPI_NONE = 0x0, | |
316 | SCHED_IPI_IMMEDIATE = 0x1, | |
317 | SCHED_IPI_IDLE = 0x2, | |
318 | SCHED_IPI_DEFERRED = 0x3, | |
319 | } sched_ipi_type_t; | |
320 | ||
321 | /* The IPI policy engine behaves in the following manner: | |
322 | * - All scheduler events which need an IPI invoke sched_ipi_action() with | |
323 | * the appropriate destination processor, thread and event. | |
324 | * - sched_ipi_action() performs basic checks, invokes the scheduler specific | |
325 | * ipi_policy routine and sets pending_AST bits based on the result. | |
326 | * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform() | |
327 | * routine which actually sends the appropriate IPI to the destination core. | |
328 | */ | |
329 | extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, | |
330 | boolean_t dst_idle, sched_ipi_event_t event); | |
331 | extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi); | |
332 | ||
333 | /* sched_ipi_policy() is the global default IPI policy for all schedulers */ | |
334 | extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, | |
335 | boolean_t dst_idle, sched_ipi_event_t event); | |
336 | ||
337 | /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */ | |
338 | extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, | |
339 | processor_t dst, sched_ipi_event_t event); | |
340 | ||
341 | #if defined(CONFIG_SCHED_TIMESHARE_CORE) | |
342 | ||
343 | extern boolean_t thread_update_add_thread(thread_t thread); | |
344 | extern void thread_update_process_threads(void); | |
345 | extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context); | |
346 | ||
347 | extern void sched_timeshare_init(void); | |
348 | extern void sched_timeshare_timebase_init(void); | |
349 | extern void sched_timeshare_maintenance_continue(void); | |
350 | ||
351 | extern boolean_t priority_is_urgent(int priority); | |
352 | extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread); | |
353 | ||
354 | extern int sched_compute_timeshare_priority(thread_t thread); | |
355 | ||
356 | #endif /* CONFIG_SCHED_TIMESHARE_CORE */ | |
357 | ||
358 | /* Remove thread from its run queue */ | |
359 | extern boolean_t thread_run_queue_remove(thread_t thread); | |
360 | thread_t thread_run_queue_remove_for_handoff(thread_t thread); | |
361 | ||
362 | /* Put a thread back in the run queue after being yanked */ | |
363 | extern void thread_run_queue_reinsert(thread_t thread, sched_options_t options); | |
364 | ||
365 | extern void thread_timer_expire( | |
366 | void *thread, | |
367 | void *p1); | |
368 | ||
369 | extern boolean_t thread_eager_preemption( | |
370 | thread_t thread); | |
371 | ||
372 | extern boolean_t sched_generic_direct_dispatch_to_idle_processors; | |
373 | ||
374 | /* Set the maximum interrupt level for the thread */ | |
375 | __private_extern__ wait_interrupt_t thread_interrupt_level( | |
376 | wait_interrupt_t interruptible); | |
377 | ||
378 | __private_extern__ wait_result_t thread_mark_wait_locked( | |
379 | thread_t thread, | |
380 | wait_interrupt_t interruptible); | |
381 | ||
382 | /* Wake up locked thread directly, passing result */ | |
383 | __private_extern__ kern_return_t clear_wait_internal( | |
384 | thread_t thread, | |
385 | wait_result_t result); | |
386 | ||
387 | extern void sched_stats_handle_csw( | |
388 | processor_t processor, | |
389 | int reasons, | |
390 | int selfpri, | |
391 | int otherpri); | |
392 | ||
393 | extern void sched_stats_handle_runq_change( | |
394 | struct runq_stats *stats, | |
395 | int old_count); | |
396 | ||
397 | ||
398 | #if DEBUG | |
399 | ||
400 | #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \ | |
401 | do { \ | |
402 | if (__builtin_expect(sched_stats_active, 0)) { \ | |
403 | sched_stats_handle_csw((processor), \ | |
404 | (reasons), (selfpri), (otherpri)); \ | |
405 | } \ | |
406 | } while (0) | |
407 | ||
408 | ||
409 | #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \ | |
410 | do { \ | |
411 | if (__builtin_expect(sched_stats_active, 0)) { \ | |
412 | sched_stats_handle_runq_change((stats), \ | |
413 | (old_count)); \ | |
414 | } \ | |
415 | } while (0) | |
416 | ||
417 | #else /* DEBUG */ | |
418 | ||
419 | #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0) | |
420 | #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0) | |
421 | ||
422 | #endif /* DEBUG */ | |
423 | ||
424 | extern uint32_t sched_debug_flags; | |
425 | #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001 | |
426 | #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002 | |
427 | ||
428 | #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \ | |
429 | if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \ | |
430 | KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ | |
431 | } \ | |
432 | } while(0) | |
433 | ||
434 | #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \ | |
435 | if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \ | |
436 | KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ | |
437 | } \ | |
438 | } while(0) | |
439 | ||
440 | /* Tells if there are "active" RT threads in the system (provided by CPU PM) */ | |
441 | extern void active_rt_threads( | |
442 | boolean_t active); | |
443 | ||
444 | /* Returns the perfcontrol attribute for the thread */ | |
445 | extern perfcontrol_class_t thread_get_perfcontrol_class( | |
446 | thread_t thread); | |
447 | ||
448 | /* Generic routine for Non-AMP schedulers to calculate parallelism */ | |
449 | extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options); | |
450 | ||
451 | #endif /* MACH_KERNEL_PRIVATE */ | |
452 | ||
453 | __BEGIN_DECLS | |
454 | ||
455 | #ifdef XNU_KERNEL_PRIVATE | |
456 | ||
457 | extern void thread_bind_cluster_type(char cluster_type); | |
458 | ||
459 | /* Toggles a global override to turn off CPU Throttling */ | |
460 | extern void sys_override_cpu_throttle(boolean_t enable_override); | |
461 | ||
462 | /* | |
463 | ****************** Only exported until BSD stops using ******************** | |
464 | */ | |
465 | ||
466 | extern void thread_vm_bind_group_add(void); | |
467 | ||
468 | /* Wake up thread directly, passing result */ | |
469 | extern kern_return_t clear_wait( | |
470 | thread_t thread, | |
471 | wait_result_t result); | |
472 | ||
473 | /* Start thread running */ | |
474 | extern void thread_bootstrap_return(void) __attribute__((noreturn)); | |
475 | ||
476 | /* Return from exception (BSD-visible interface) */ | |
477 | extern void thread_exception_return(void) __dead2; | |
478 | ||
479 | #define SCHED_STRING_MAX_LENGTH (48) | |
480 | /* String declaring the name of the current scheduler */ | |
481 | extern char sched_string[SCHED_STRING_MAX_LENGTH]; | |
482 | ||
483 | /* Attempt to context switch to a specific runnable thread */ | |
484 | extern wait_result_t thread_handoff_deallocate(thread_t thread); | |
485 | ||
486 | __attribute__((nonnull(1, 2))) | |
487 | extern void thread_handoff_parameter(thread_t thread, | |
488 | thread_continue_t continuation, void *parameter) __dead2; | |
489 | ||
490 | extern struct waitq *assert_wait_queue(event_t event); | |
491 | ||
492 | extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority); | |
493 | ||
494 | extern thread_t thread_wakeup_identify(event_t event, int priority); | |
495 | ||
496 | #endif /* XNU_KERNEL_PRIVATE */ | |
497 | ||
498 | #ifdef KERNEL_PRIVATE | |
499 | /* Set pending block hint for a particular object before we go into a wait state */ | |
500 | extern void thread_set_pending_block_hint( | |
501 | thread_t thread, | |
502 | block_hint_t block_hint); | |
503 | ||
504 | #define QOS_PARALLELISM_COUNT_LOGICAL 0x1 | |
505 | #define QOS_PARALLELISM_REALTIME 0x2 | |
506 | extern uint32_t qos_max_parallelism(int qos, uint64_t options); | |
507 | #endif /* KERNEL_PRIVATE */ | |
508 | ||
509 | #if XNU_KERNEL_PRIVATE | |
510 | extern void thread_yield_with_continuation( | |
511 | thread_continue_t continuation, | |
512 | void *parameter) __dead2; | |
513 | #endif | |
514 | ||
515 | /* Context switch */ | |
516 | extern wait_result_t thread_block( | |
517 | thread_continue_t continuation); | |
518 | ||
519 | extern wait_result_t thread_block_parameter( | |
520 | thread_continue_t continuation, | |
521 | void *parameter); | |
522 | ||
523 | /* Declare thread will wait on a particular event */ | |
524 | extern wait_result_t assert_wait( | |
525 | event_t event, | |
526 | wait_interrupt_t interruptible); | |
527 | ||
528 | /* Assert that the thread intends to wait with a timeout */ | |
529 | extern wait_result_t assert_wait_timeout( | |
530 | event_t event, | |
531 | wait_interrupt_t interruptible, | |
532 | uint32_t interval, | |
533 | uint32_t scale_factor); | |
534 | ||
535 | /* Assert that the thread intends to wait with an urgency, timeout and leeway */ | |
536 | extern wait_result_t assert_wait_timeout_with_leeway( | |
537 | event_t event, | |
538 | wait_interrupt_t interruptible, | |
539 | wait_timeout_urgency_t urgency, | |
540 | uint32_t interval, | |
541 | uint32_t leeway, | |
542 | uint32_t scale_factor); | |
543 | ||
544 | extern wait_result_t assert_wait_deadline( | |
545 | event_t event, | |
546 | wait_interrupt_t interruptible, | |
547 | uint64_t deadline); | |
548 | ||
549 | /* Assert that the thread intends to wait with an urgency, deadline, and leeway */ | |
550 | extern wait_result_t assert_wait_deadline_with_leeway( | |
551 | event_t event, | |
552 | wait_interrupt_t interruptible, | |
553 | wait_timeout_urgency_t urgency, | |
554 | uint64_t deadline, | |
555 | uint64_t leeway); | |
556 | ||
557 | /* Wake up thread (or threads) waiting on a particular event */ | |
558 | extern kern_return_t thread_wakeup_prim( | |
559 | event_t event, | |
560 | boolean_t one_thread, | |
561 | wait_result_t result); | |
562 | ||
563 | #define thread_wakeup(x) \ | |
564 | thread_wakeup_prim((x), FALSE, THREAD_AWAKENED) | |
565 | #define thread_wakeup_with_result(x, z) \ | |
566 | thread_wakeup_prim((x), FALSE, (z)) | |
567 | #define thread_wakeup_one(x) \ | |
568 | thread_wakeup_prim((x), TRUE, THREAD_AWAKENED) | |
569 | ||
570 | /* Wakeup the specified thread if it is waiting on this event */ | |
571 | extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread); | |
572 | ||
573 | extern boolean_t preemption_enabled(void); | |
574 | ||
575 | #ifdef MACH_KERNEL_PRIVATE | |
576 | ||
577 | /* | |
578 | * Scheduler algorithm indirection. If only one algorithm is | |
579 | * enabled at compile-time, a direction function call is used. | |
580 | * If more than one is enabled, calls are dispatched through | |
581 | * a function pointer table. | |
582 | */ | |
583 | ||
584 | #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) && !defined(CONFIG_SCHED_CLUTCH) | |
585 | #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX | |
586 | #endif | |
587 | ||
588 | #if __AMP__ | |
589 | extern const struct sched_dispatch_table sched_amp_dispatch; | |
590 | #define SCHED(f) (sched_amp_dispatch.f) | |
591 | ||
592 | #else /* __AMP__ */ | |
593 | ||
594 | #if CONFIG_SCHED_CLUTCH | |
595 | extern const struct sched_dispatch_table sched_clutch_dispatch; | |
596 | #define SCHED(f) (sched_clutch_dispatch.f) | |
597 | #else /* CONFIG_SCHED_CLUTCH */ | |
598 | extern const struct sched_dispatch_table sched_dualq_dispatch; | |
599 | #define SCHED(f) (sched_dualq_dispatch.f) | |
600 | #endif /* CONFIG_SCHED_CLUTCH */ | |
601 | ||
602 | #endif /* __AMP__ */ | |
603 | ||
604 | struct sched_dispatch_table { | |
605 | const char *sched_name; | |
606 | void (*init)(void); /* Init global state */ | |
607 | void (*timebase_init)(void); /* Timebase-dependent initialization */ | |
608 | void (*processor_init)(processor_t processor); /* Per-processor scheduler init */ | |
609 | void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */ | |
610 | ||
611 | void (*maintenance_continuation)(void); /* Function called regularly */ | |
612 | ||
613 | /* | |
614 | * Choose a thread of greater or equal priority from the per-processor | |
615 | * runqueue for timeshare/fixed threads | |
616 | */ | |
617 | thread_t (*choose_thread)( | |
618 | processor_t processor, | |
619 | int priority, | |
620 | ast_t reason); | |
621 | ||
622 | /* True if scheduler supports stealing threads for this pset */ | |
623 | bool (*steal_thread_enabled)(processor_set_t pset); | |
624 | ||
625 | /* | |
626 | * Steal a thread from another processor in the pset so that it can run | |
627 | * immediately | |
628 | */ | |
629 | thread_t (*steal_thread)( | |
630 | processor_set_t pset); | |
631 | ||
632 | /* | |
633 | * Compute priority for a timeshare thread based on base priority. | |
634 | */ | |
635 | int (*compute_timeshare_priority)(thread_t thread); | |
636 | ||
637 | /* | |
638 | * Pick the best processor for a thread (any kind of thread) to run on. | |
639 | */ | |
640 | processor_t (*choose_processor)( | |
641 | processor_set_t pset, | |
642 | processor_t processor, | |
643 | thread_t thread); | |
644 | /* | |
645 | * Enqueue a timeshare or fixed priority thread onto the per-processor | |
646 | * runqueue | |
647 | */ | |
648 | boolean_t (*processor_enqueue)( | |
649 | processor_t processor, | |
650 | thread_t thread, | |
651 | sched_options_t options); | |
652 | ||
653 | /* Migrate threads away in preparation for processor shutdown */ | |
654 | void (*processor_queue_shutdown)( | |
655 | processor_t processor); | |
656 | ||
657 | /* Remove the specific thread from the per-processor runqueue */ | |
658 | boolean_t (*processor_queue_remove)( | |
659 | processor_t processor, | |
660 | thread_t thread); | |
661 | ||
662 | /* | |
663 | * Does the per-processor runqueue have any timeshare or fixed priority | |
664 | * threads on it? Called without pset lock held, so should | |
665 | * not assume immutability while executing. | |
666 | */ | |
667 | boolean_t (*processor_queue_empty)(processor_t processor); | |
668 | ||
669 | /* | |
670 | * Would this priority trigger an urgent preemption if it's sitting | |
671 | * on the per-processor runqueue? | |
672 | */ | |
673 | boolean_t (*priority_is_urgent)(int priority); | |
674 | ||
675 | /* | |
676 | * Does the per-processor runqueue contain runnable threads that | |
677 | * should cause the currently-running thread to be preempted? | |
678 | */ | |
679 | ast_t (*processor_csw_check)(processor_t processor); | |
680 | ||
681 | /* | |
682 | * Does the per-processor runqueue contain a runnable thread | |
683 | * of > or >= priority, as a preflight for choose_thread() or other | |
684 | * thread selection | |
685 | */ | |
686 | boolean_t (*processor_queue_has_priority)(processor_t processor, | |
687 | int priority, | |
688 | boolean_t gte); | |
689 | ||
690 | /* Quantum size for the specified non-realtime thread. */ | |
691 | uint32_t (*initial_quantum_size)(thread_t thread); | |
692 | ||
693 | /* Scheduler mode for a new thread */ | |
694 | sched_mode_t (*initial_thread_sched_mode)(task_t parent_task); | |
695 | ||
696 | /* | |
697 | * Is it safe to call update_priority, which may change a thread's | |
698 | * runqueue or other state. This can be used to throttle changes | |
699 | * to dynamic priority. | |
700 | */ | |
701 | boolean_t (*can_update_priority)(thread_t thread); | |
702 | ||
703 | /* | |
704 | * Update both scheduled priority and other persistent state. | |
705 | * Side effects may including migration to another processor's runqueue. | |
706 | */ | |
707 | void (*update_priority)(thread_t thread); | |
708 | ||
709 | /* Lower overhead update to scheduled priority and state. */ | |
710 | void (*lightweight_update_priority)(thread_t thread); | |
711 | ||
712 | /* Callback for non-realtime threads when the quantum timer fires */ | |
713 | void (*quantum_expire)(thread_t thread); | |
714 | ||
715 | /* | |
716 | * Runnable threads on per-processor runqueue. Should only | |
717 | * be used for relative comparisons of load between processors. | |
718 | */ | |
719 | int (*processor_runq_count)(processor_t processor); | |
720 | ||
721 | /* Aggregate runcount statistics for per-processor runqueue */ | |
722 | uint64_t (*processor_runq_stats_count_sum)(processor_t processor); | |
723 | ||
724 | boolean_t (*processor_bound_count)(processor_t processor); | |
725 | ||
726 | void (*thread_update_scan)(sched_update_scan_context_t scan_context); | |
727 | ||
728 | /* Supports more than one pset */ | |
729 | boolean_t multiple_psets_enabled; | |
730 | /* Supports scheduler groups */ | |
731 | boolean_t sched_groups_enabled; | |
732 | ||
733 | /* Supports avoid-processor */ | |
734 | boolean_t avoid_processor_enabled; | |
735 | ||
736 | /* Returns true if this processor should avoid running this thread. */ | |
737 | bool (*thread_avoid_processor)(processor_t processor, thread_t thread); | |
738 | ||
739 | /* | |
740 | * Invoked when a processor is about to choose the idle thread | |
741 | * Used to send IPIs to a processor which would be preferred to be idle instead. | |
742 | * Called with pset lock held, returns pset lock unlocked. | |
743 | */ | |
744 | void (*processor_balance)(processor_t processor, processor_set_t pset); | |
745 | rt_queue_t (*rt_runq)(processor_set_t pset); | |
746 | void (*rt_init)(processor_set_t pset); | |
747 | void (*rt_queue_shutdown)(processor_t processor); | |
748 | void (*rt_runq_scan)(sched_update_scan_context_t scan_context); | |
749 | int64_t (*rt_runq_count_sum)(void); | |
750 | ||
751 | uint32_t (*qos_max_parallelism)(int qos, uint64_t options); | |
752 | void (*check_spill)(processor_set_t pset, thread_t thread); | |
753 | sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event); | |
754 | bool (*thread_should_yield)(processor_t processor, thread_t thread); | |
755 | ||
756 | /* Routine to update run counts */ | |
757 | uint32_t (*run_count_incr)(thread_t thread); | |
758 | uint32_t (*run_count_decr)(thread_t thread); | |
759 | ||
760 | /* Routine to update scheduling bucket for a thread */ | |
761 | void (*update_thread_bucket)(thread_t thread); | |
762 | ||
763 | /* Routine to inform the scheduler when a new pset becomes schedulable */ | |
764 | void (*pset_made_schedulable)(processor_t processor, processor_set_t pset, boolean_t drop_lock); | |
765 | }; | |
766 | ||
767 | #if defined(CONFIG_SCHED_TRADITIONAL) | |
768 | extern const struct sched_dispatch_table sched_traditional_dispatch; | |
769 | extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch; | |
770 | #endif | |
771 | ||
772 | #if defined(CONFIG_SCHED_MULTIQ) | |
773 | extern const struct sched_dispatch_table sched_multiq_dispatch; | |
774 | extern const struct sched_dispatch_table sched_dualq_dispatch; | |
775 | #if __AMP__ | |
776 | extern const struct sched_dispatch_table sched_amp_dispatch; | |
777 | #endif | |
778 | #endif | |
779 | ||
780 | #if defined(CONFIG_SCHED_PROTO) | |
781 | extern const struct sched_dispatch_table sched_proto_dispatch; | |
782 | #endif | |
783 | ||
784 | #if defined(CONFIG_SCHED_GRRR) | |
785 | extern const struct sched_dispatch_table sched_grrr_dispatch; | |
786 | #endif | |
787 | ||
788 | #if defined(CONFIG_SCHED_CLUTCH) | |
789 | extern const struct sched_dispatch_table sched_clutch_dispatch; | |
790 | #endif | |
791 | ||
792 | #endif /* MACH_KERNEL_PRIVATE */ | |
793 | ||
794 | __END_DECLS | |
795 | ||
796 | #endif /* _KERN_SCHED_PRIM_H_ */ |