]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39236c6e | 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: sched_prim.h | |
60 | * Author: David Golub | |
61 | * | |
62 | * Scheduling primitive definitions file | |
63 | * | |
64 | */ | |
65 | ||
66 | #ifndef _KERN_SCHED_PRIM_H_ | |
67 | #define _KERN_SCHED_PRIM_H_ | |
68 | ||
69 | #include <mach/boolean.h> | |
70 | #include <mach/machine/vm_types.h> | |
71 | #include <mach/kern_return.h> | |
72 | #include <kern/clock.h> | |
73 | #include <kern/kern_types.h> | |
74 | #include <kern/thread.h> | |
91447636 | 75 | #include <sys/cdefs.h> |
813fb2f6 | 76 | #include <kern/block_hint.h> |
9bccf70c A |
77 | |
78 | #ifdef MACH_KERNEL_PRIVATE | |
1c79356b | 79 | |
91447636 | 80 | /* Initialization */ |
39236c6e | 81 | extern void sched_init(void); |
1c79356b | 82 | |
91447636 | 83 | extern void sched_startup(void); |
1c79356b | 84 | |
91447636 | 85 | extern void sched_timebase_init(void); |
1c79356b | 86 | |
5ba3f43e A |
87 | extern void pset_rt_init(processor_set_t pset); |
88 | ||
89 | extern void sched_rtglobal_init(processor_set_t pset); | |
90 | ||
91 | extern rt_queue_t sched_rtglobal_runq(processor_set_t pset); | |
92 | ||
93 | extern void sched_rtglobal_queue_shutdown(processor_t processor); | |
94 | ||
95 | extern int64_t sched_rtglobal_runq_count_sum(void); | |
96 | ||
97 | extern void sched_check_spill(processor_set_t pset, thread_t thread); | |
98 | ||
99 | extern bool sched_thread_should_yield(processor_t processor, thread_t thread); | |
100 | ||
91447636 | 101 | /* Force a preemption point for a thread and wait for it to stop running */ |
1c79356b | 102 | extern boolean_t thread_stop( |
39236c6e A |
103 | thread_t thread, |
104 | boolean_t until_not_runnable); | |
1c79356b | 105 | |
91447636 A |
106 | /* Release a previous stop request */ |
107 | extern void thread_unstop( | |
1c79356b A |
108 | thread_t thread); |
109 | ||
91447636 A |
110 | /* Wait for a thread to stop running */ |
111 | extern void thread_wait( | |
316670eb A |
112 | thread_t thread, |
113 | boolean_t until_not_runnable); | |
1c79356b | 114 | |
91447636 A |
115 | /* Unblock thread on wake up */ |
116 | extern boolean_t thread_unblock( | |
117 | thread_t thread, | |
118 | wait_result_t wresult); | |
1c79356b | 119 | |
91447636 A |
120 | /* Unblock and dispatch thread */ |
121 | extern kern_return_t thread_go( | |
122 | thread_t thread, | |
123 | wait_result_t wresult); | |
1c79356b | 124 | |
2d21ac55 | 125 | /* Handle threads at context switch */ |
91447636 | 126 | extern void thread_dispatch( |
2d21ac55 A |
127 | thread_t old_thread, |
128 | thread_t new_thread); | |
1c79356b | 129 | |
91447636 A |
130 | /* Switch directly to a particular thread */ |
131 | extern int thread_run( | |
132 | thread_t self, | |
133 | thread_continue_t continuation, | |
134 | void *parameter, | |
135 | thread_t new_thread); | |
136 | ||
137 | /* Resume thread with new stack */ | |
138 | extern void thread_continue( | |
139 | thread_t old_thread); | |
140 | ||
1c79356b A |
141 | /* Invoke continuation */ |
142 | extern void call_continuation( | |
91447636 A |
143 | thread_continue_t continuation, |
144 | void *parameter, | |
d9a64523 A |
145 | wait_result_t wresult, |
146 | boolean_t enable_interrupts); | |
147 | ||
148 | /* | |
149 | * Flags that can be passed to set_sched_pri | |
150 | * to skip side effects | |
151 | */ | |
152 | typedef enum { | |
153 | SETPRI_DEFAULT = 0x0, | |
154 | SETPRI_LAZY = 0x1, /* Avoid setting AST flags or sending IPIs */ | |
155 | } set_sched_pri_options_t; | |
9bccf70c A |
156 | |
157 | /* Set the current scheduled priority */ | |
d9a64523 A |
158 | extern void set_sched_pri( |
159 | thread_t thread, | |
160 | int priority, | |
161 | set_sched_pri_options_t options); | |
1c79356b | 162 | |
9bccf70c | 163 | /* Set base priority of the specified thread */ |
fe8ab488 | 164 | extern void sched_set_thread_base_priority( |
9bccf70c A |
165 | thread_t thread, |
166 | int priority); | |
167 | ||
fe8ab488 A |
168 | /* Set the thread's true scheduling mode */ |
169 | extern void sched_set_thread_mode(thread_t thread, | |
170 | sched_mode_t mode); | |
171 | /* Demote the true scheduler mode */ | |
172 | extern void sched_thread_mode_demote(thread_t thread, | |
173 | uint32_t reason); | |
174 | /* Un-demote the true scheduler mode */ | |
175 | extern void sched_thread_mode_undemote(thread_t thread, | |
176 | uint32_t reason); | |
177 | ||
d9a64523 A |
178 | extern void sched_thread_promote_to_pri(thread_t thread, int priority, uintptr_t trace_obj); |
179 | extern void sched_thread_update_promotion_to_pri(thread_t thread, int priority, uintptr_t trace_obj); | |
180 | extern void sched_thread_unpromote(thread_t thread, uintptr_t trace_obj); | |
181 | ||
182 | extern void assert_promotions_invariant(thread_t thread); | |
183 | ||
184 | extern void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj); | |
185 | extern void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj); | |
186 | ||
3e170ce0 A |
187 | /* Re-evaluate base priority of thread (thread locked) */ |
188 | void thread_recompute_priority(thread_t thread); | |
189 | ||
d9a64523 A |
190 | /* Re-evaluate scheduled priority of thread (thread locked) */ |
191 | extern void thread_recompute_sched_pri( | |
192 | thread_t thread, | |
193 | set_sched_pri_options_t options); | |
1c79356b | 194 | |
1c79356b | 195 | /* Periodic scheduler activity */ |
6d2010ae | 196 | extern void sched_init_thread(void (*)(void)); |
1c79356b | 197 | |
91447636 | 198 | /* Perform sched_tick housekeeping activities */ |
6d2010ae | 199 | extern boolean_t can_update_priority( |
9bccf70c | 200 | thread_t thread); |
1c79356b | 201 | |
6d2010ae A |
202 | extern void update_priority( |
203 | thread_t thread); | |
204 | ||
205 | extern void lightweight_update_priority( | |
206 | thread_t thread); | |
207 | ||
3e170ce0 | 208 | extern void sched_default_quantum_expire(thread_t thread); |
6d2010ae | 209 | |
91447636 | 210 | /* Idle processor thread */ |
1c79356b A |
211 | extern void idle_thread(void); |
212 | ||
91447636 A |
213 | extern kern_return_t idle_thread_create( |
214 | processor_t processor); | |
1c79356b | 215 | |
1c79356b A |
216 | /* Continuation return from syscall */ |
217 | extern void thread_syscall_return( | |
218 | kern_return_t ret); | |
219 | ||
91447636 | 220 | /* Context switch */ |
9bccf70c A |
221 | extern wait_result_t thread_block_reason( |
222 | thread_continue_t continuation, | |
91447636 | 223 | void *parameter, |
9bccf70c | 224 | ast_t reason); |
1c79356b | 225 | |
91447636 | 226 | /* Reschedule thread for execution */ |
1c79356b | 227 | extern void thread_setrun( |
9bccf70c | 228 | thread_t thread, |
55e303ae | 229 | integer_t options); |
1c79356b | 230 | |
5ba3f43e A |
231 | typedef enum { |
232 | SCHED_NONE = 0x0, | |
233 | SCHED_TAILQ = 0x1, | |
234 | SCHED_HEADQ = 0x2, | |
235 | SCHED_PREEMPT = 0x4, | |
236 | SCHED_REBALANCE = 0x8, | |
237 | } sched_options_t; | |
3e170ce0 | 238 | |
b0d623f7 A |
239 | extern processor_set_t task_choose_pset( |
240 | task_t task); | |
241 | ||
2d21ac55 | 242 | /* Bind the current thread to a particular processor */ |
55e303ae | 243 | extern processor_t thread_bind( |
55e303ae | 244 | processor_t processor); |
1c79356b | 245 | |
6d2010ae A |
246 | /* Choose the best processor to run a thread */ |
247 | extern processor_t choose_processor( | |
248 | processor_set_t pset, | |
249 | processor_t processor, | |
250 | thread_t thread); | |
251 | ||
5ba3f43e A |
252 | extern void sched_SMT_balance( |
253 | processor_t processor, | |
254 | processor_set_t pset); | |
6d2010ae A |
255 | |
256 | extern void thread_quantum_init( | |
257 | thread_t thread); | |
258 | ||
2d21ac55 A |
259 | extern void run_queue_init( |
260 | run_queue_t runq); | |
261 | ||
6d2010ae A |
262 | extern thread_t run_queue_dequeue( |
263 | run_queue_t runq, | |
264 | integer_t options); | |
265 | ||
266 | extern boolean_t run_queue_enqueue( | |
267 | run_queue_t runq, | |
268 | thread_t thread, | |
269 | integer_t options); | |
270 | ||
271 | extern void run_queue_remove( | |
272 | run_queue_t runq, | |
273 | thread_t thread); | |
274 | ||
3e170ce0 A |
275 | struct sched_update_scan_context |
276 | { | |
277 | uint64_t earliest_bg_make_runnable_time; | |
278 | uint64_t earliest_normal_make_runnable_time; | |
279 | uint64_t earliest_rt_make_runnable_time; | |
280 | }; | |
281 | typedef struct sched_update_scan_context *sched_update_scan_context_t; | |
fe8ab488 | 282 | |
5ba3f43e A |
283 | extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context); |
284 | ||
285 | /* | |
286 | * Enum to define various events which need IPIs. The IPI policy | |
287 | * engine decides what kind of IPI to use based on destination | |
288 | * processor state, thread and one of the following scheduling events. | |
289 | */ | |
290 | typedef enum { | |
291 | SCHED_IPI_EVENT_BOUND_THR = 0x1, | |
292 | SCHED_IPI_EVENT_PREEMPT = 0x2, | |
293 | SCHED_IPI_EVENT_SMT_REBAL = 0x3, | |
294 | SCHED_IPI_EVENT_SPILL = 0x4, | |
295 | SCHED_IPI_EVENT_REBALANCE = 0x5, | |
296 | } sched_ipi_event_t; | |
297 | ||
298 | ||
299 | /* Enum to define various IPI types used by the scheduler */ | |
300 | typedef enum { | |
301 | SCHED_IPI_NONE = 0x0, | |
302 | SCHED_IPI_IMMEDIATE = 0x1, | |
303 | SCHED_IPI_IDLE = 0x2, | |
304 | SCHED_IPI_DEFERRED = 0x3, | |
305 | } sched_ipi_type_t; | |
306 | ||
307 | /* The IPI policy engine behaves in the following manner: | |
308 | * - All scheduler events which need an IPI invoke sched_ipi_action() with | |
309 | * the appropriate destination processor, thread and event. | |
310 | * - sched_ipi_action() performs basic checks, invokes the scheduler specific | |
311 | * ipi_policy routine and sets pending_AST bits based on the result. | |
312 | * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform() | |
313 | * routine which actually sends the appropriate IPI to the destination core. | |
314 | */ | |
315 | extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, | |
316 | boolean_t dst_idle, sched_ipi_event_t event); | |
317 | extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi); | |
318 | ||
319 | /* sched_ipi_policy() is the global default IPI policy for all schedulers */ | |
320 | extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, | |
321 | boolean_t dst_idle, sched_ipi_event_t event); | |
322 | ||
323 | /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */ | |
324 | extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, | |
325 | processor_t dst, sched_ipi_event_t event); | |
326 | ||
fe8ab488 A |
327 | #if defined(CONFIG_SCHED_TIMESHARE_CORE) |
328 | ||
3e170ce0 | 329 | extern boolean_t thread_update_add_thread(thread_t thread); |
fe8ab488 | 330 | extern void thread_update_process_threads(void); |
3e170ce0 A |
331 | extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context); |
332 | ||
333 | extern void sched_timeshare_init(void); | |
334 | extern void sched_timeshare_timebase_init(void); | |
335 | extern void sched_timeshare_maintenance_continue(void); | |
336 | ||
337 | extern boolean_t priority_is_urgent(int priority); | |
338 | extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread); | |
fe8ab488 | 339 | |
3e170ce0 | 340 | extern int sched_compute_timeshare_priority(thread_t thread); |
fe8ab488 A |
341 | |
342 | #endif /* CONFIG_SCHED_TIMESHARE_CORE */ | |
343 | ||
6d2010ae | 344 | /* Remove thread from its run queue */ |
3e170ce0 A |
345 | extern boolean_t thread_run_queue_remove(thread_t thread); |
346 | thread_t thread_run_queue_remove_for_handoff(thread_t thread); | |
347 | ||
348 | /* Put a thread back in the run queue after being yanked */ | |
349 | extern void thread_run_queue_reinsert(thread_t thread, integer_t options); | |
6d2010ae | 350 | |
91447636 A |
351 | extern void thread_timer_expire( |
352 | void *thread, | |
353 | void *p1); | |
354 | ||
6d2010ae A |
355 | extern boolean_t thread_eager_preemption( |
356 | thread_t thread); | |
357 | ||
6d2010ae A |
358 | extern boolean_t sched_generic_direct_dispatch_to_idle_processors; |
359 | ||
9bccf70c A |
360 | /* Set the maximum interrupt level for the thread */ |
361 | __private_extern__ wait_interrupt_t thread_interrupt_level( | |
362 | wait_interrupt_t interruptible); | |
363 | ||
364 | __private_extern__ wait_result_t thread_mark_wait_locked( | |
365 | thread_t thread, | |
366 | wait_interrupt_t interruptible); | |
367 | ||
9bccf70c A |
368 | /* Wake up locked thread directly, passing result */ |
369 | __private_extern__ kern_return_t clear_wait_internal( | |
370 | thread_t thread, | |
371 | wait_result_t result); | |
1c79356b | 372 | |
6d2010ae A |
373 | extern void sched_stats_handle_csw( |
374 | processor_t processor, | |
375 | int reasons, | |
376 | int selfpri, | |
377 | int otherpri); | |
378 | ||
379 | extern void sched_stats_handle_runq_change( | |
380 | struct runq_stats *stats, | |
381 | int old_count); | |
382 | ||
383 | ||
5ba3f43e | 384 | #if DEBUG |
6d2010ae A |
385 | |
386 | #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \ | |
387 | do { \ | |
388 | if (__builtin_expect(sched_stats_active, 0)) { \ | |
389 | sched_stats_handle_csw((processor), \ | |
390 | (reasons), (selfpri), (otherpri)); \ | |
391 | } \ | |
392 | } while (0) | |
393 | ||
394 | ||
395 | #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \ | |
396 | do { \ | |
397 | if (__builtin_expect(sched_stats_active, 0)) { \ | |
398 | sched_stats_handle_runq_change((stats), \ | |
399 | (old_count)); \ | |
400 | } \ | |
401 | } while (0) | |
402 | ||
5ba3f43e A |
403 | #else /* DEBUG */ |
404 | ||
405 | #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0) | |
406 | #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0) | |
407 | ||
408 | #endif /* DEBUG */ | |
409 | ||
3e170ce0 A |
410 | extern uint32_t sched_debug_flags; |
411 | #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001 | |
412 | #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002 | |
413 | ||
414 | #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \ | |
415 | if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \ | |
416 | KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ | |
417 | } \ | |
418 | } while(0) | |
419 | ||
420 | #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \ | |
421 | if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \ | |
422 | KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ | |
423 | } \ | |
424 | } while(0) | |
425 | ||
6d2010ae A |
426 | #define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */ |
427 | #define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */ | |
428 | #define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */ | |
429 | #define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */ | |
430 | #define THREAD_URGENCY_MAX 4 /* Marker */ | |
39236c6e | 431 | /* Returns the "urgency" of a thread (provided by scheduler) */ |
6d2010ae | 432 | extern int thread_get_urgency( |
39236c6e | 433 | thread_t thread, |
6d2010ae A |
434 | uint64_t *rt_period, |
435 | uint64_t *rt_deadline); | |
436 | ||
437 | /* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */ | |
438 | extern void thread_tell_urgency( | |
439 | int urgency, | |
440 | uint64_t rt_period, | |
39236c6e | 441 | uint64_t rt_deadline, |
3e170ce0 | 442 | uint64_t sched_latency, |
39236c6e | 443 | thread_t nthread); |
6d2010ae A |
444 | |
445 | /* Tells if there are "active" RT threads in the system (provided by CPU PM) */ | |
446 | extern void active_rt_threads( | |
447 | boolean_t active); | |
448 | ||
5ba3f43e A |
449 | /* Returns the perfcontrol attribute for the thread */ |
450 | extern perfcontrol_class_t thread_get_perfcontrol_class( | |
451 | thread_t thread); | |
452 | ||
5ba3f43e A |
453 | /* Generic routine for Non-AMP schedulers to calculate parallelism */ |
454 | extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options); | |
455 | ||
1c79356b A |
456 | #endif /* MACH_KERNEL_PRIVATE */ |
457 | ||
91447636 | 458 | __BEGIN_DECLS |
55e303ae | 459 | |
91447636 A |
460 | #ifdef XNU_KERNEL_PRIVATE |
461 | ||
39236c6e | 462 | /* Toggles a global override to turn off CPU Throttling */ |
d9a64523 | 463 | extern void sys_override_cpu_throttle(boolean_t enable_override); |
39236c6e | 464 | |
1c79356b | 465 | /* |
91447636 | 466 | ****************** Only exported until BSD stops using ******************** |
1c79356b | 467 | */ |
1c79356b | 468 | |
3e170ce0 A |
469 | extern void thread_vm_bind_group_add(void); |
470 | ||
1c79356b | 471 | /* Wake up thread directly, passing result */ |
9bccf70c A |
472 | extern kern_return_t clear_wait( |
473 | thread_t thread, | |
474 | wait_result_t result); | |
1c79356b | 475 | |
b7266188 | 476 | /* Start thread running */ |
a39ff7e2 | 477 | extern void thread_bootstrap_return(void) __attribute__((noreturn)); |
b7266188 | 478 | |
91447636 | 479 | /* Return from exception (BSD-visible interface) */ |
2d21ac55 | 480 | extern void thread_exception_return(void) __dead2; |
1c79356b | 481 | |
3e170ce0 A |
482 | #define SCHED_STRING_MAX_LENGTH (48) |
483 | /* String declaring the name of the current scheduler */ | |
484 | extern char sched_string[SCHED_STRING_MAX_LENGTH]; | |
485 | ||
39037602 A |
486 | extern thread_t port_name_to_thread_for_ulock(mach_port_name_t thread_name); |
487 | ||
488 | /* Attempt to context switch to a specific runnable thread */ | |
d9a64523 A |
489 | extern wait_result_t thread_handoff_deallocate(thread_t thread); |
490 | ||
491 | __attribute__((nonnull(1, 2))) | |
492 | extern void thread_handoff_parameter(thread_t thread, | |
493 | thread_continue_t continuation, void *parameter) __dead2; | |
39037602 A |
494 | |
495 | extern struct waitq *assert_wait_queue(event_t event); | |
496 | ||
497 | extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority); | |
498 | ||
499 | extern thread_t thread_wakeup_identify(event_t event, int priority); | |
500 | ||
91447636 | 501 | #endif /* XNU_KERNEL_PRIVATE */ |
1c79356b | 502 | |
813fb2f6 A |
503 | #ifdef KERNEL_PRIVATE |
504 | /* Set pending block hint for a particular object before we go into a wait state */ | |
505 | extern void thread_set_pending_block_hint( | |
506 | thread_t thread, | |
507 | block_hint_t block_hint); | |
5ba3f43e A |
508 | |
509 | #define QOS_PARALLELISM_COUNT_LOGICAL 0x1 | |
510 | #define QOS_PARALLELISM_REALTIME 0x2 | |
511 | extern uint32_t qos_max_parallelism(int qos, uint64_t options); | |
813fb2f6 A |
512 | #endif /* KERNEL_PRIVATE */ |
513 | ||
d9a64523 A |
514 | #if XNU_KERNEL_PRIVATE |
515 | extern void thread_yield_with_continuation( | |
516 | thread_continue_t continuation, | |
517 | void *parameter) __dead2; | |
518 | #endif | |
519 | ||
91447636 A |
520 | /* Context switch */ |
521 | extern wait_result_t thread_block( | |
522 | thread_continue_t continuation); | |
1c79356b | 523 | |
91447636 A |
524 | extern wait_result_t thread_block_parameter( |
525 | thread_continue_t continuation, | |
526 | void *parameter); | |
1c79356b | 527 | |
1c79356b | 528 | /* Declare thread will wait on a particular event */ |
91447636 A |
529 | extern wait_result_t assert_wait( |
530 | event_t event, | |
531 | wait_interrupt_t interruptible); | |
1c79356b | 532 | |
91447636 A |
533 | /* Assert that the thread intends to wait with a timeout */ |
534 | extern wait_result_t assert_wait_timeout( | |
535 | event_t event, | |
536 | wait_interrupt_t interruptible, | |
537 | uint32_t interval, | |
538 | uint32_t scale_factor); | |
1c79356b | 539 | |
39236c6e A |
540 | /* Assert that the thread intends to wait with an urgency, timeout and leeway */ |
541 | extern wait_result_t assert_wait_timeout_with_leeway( | |
542 | event_t event, | |
543 | wait_interrupt_t interruptible, | |
544 | wait_timeout_urgency_t urgency, | |
545 | uint32_t interval, | |
546 | uint32_t leeway, | |
547 | uint32_t scale_factor); | |
548 | ||
91447636 A |
549 | extern wait_result_t assert_wait_deadline( |
550 | event_t event, | |
551 | wait_interrupt_t interruptible, | |
552 | uint64_t deadline); | |
1c79356b | 553 | |
39236c6e A |
554 | /* Assert that the thread intends to wait with an urgency, deadline, and leeway */ |
555 | extern wait_result_t assert_wait_deadline_with_leeway( | |
556 | event_t event, | |
557 | wait_interrupt_t interruptible, | |
558 | wait_timeout_urgency_t urgency, | |
559 | uint64_t deadline, | |
560 | uint64_t leeway); | |
561 | ||
91447636 A |
562 | /* Wake up thread (or threads) waiting on a particular event */ |
563 | extern kern_return_t thread_wakeup_prim( | |
564 | event_t event, | |
565 | boolean_t one_thread, | |
6d2010ae A |
566 | wait_result_t result); |
567 | ||
1c79356b | 568 | #define thread_wakeup(x) \ |
316670eb | 569 | thread_wakeup_prim((x), FALSE, THREAD_AWAKENED) |
1c79356b | 570 | #define thread_wakeup_with_result(x, z) \ |
316670eb | 571 | thread_wakeup_prim((x), FALSE, (z)) |
1c79356b | 572 | #define thread_wakeup_one(x) \ |
316670eb | 573 | thread_wakeup_prim((x), TRUE, THREAD_AWAKENED) |
6d2010ae | 574 | |
39037602 A |
575 | /* Wakeup the specified thread if it is waiting on this event */ |
576 | extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread); | |
1c79356b | 577 | |
39037602 | 578 | extern boolean_t preemption_enabled(void); |
91447636 | 579 | |
6d2010ae A |
580 | #ifdef MACH_KERNEL_PRIVATE |
581 | ||
582 | /* | |
583 | * Scheduler algorithm indirection. If only one algorithm is | |
584 | * enabled at compile-time, a direction function call is used. | |
585 | * If more than one is enabled, calls are dispatched through | |
586 | * a function pointer table. | |
587 | */ | |
588 | ||
fe8ab488 | 589 | #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) |
6d2010ae A |
590 | #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX |
591 | #endif | |
592 | ||
5ba3f43e | 593 | #if DEBUG |
6d2010ae | 594 | #define SCHED(f) (sched_current_dispatch->f) |
5ba3f43e A |
595 | #else /* DEBUG */ |
596 | ||
597 | /* | |
598 | * For DEV & REL kernels, use a static dispatch table instead of | |
599 | * using the indirect function table. | |
600 | */ | |
d9a64523 A |
601 | extern const struct sched_dispatch_table sched_dualq_dispatch; |
602 | #define SCHED(f) (sched_dualq_dispatch.f) | |
5ba3f43e A |
603 | |
604 | #endif /* DEBUG */ | |
6d2010ae A |
605 | |
606 | struct sched_dispatch_table { | |
3e170ce0 | 607 | const char *sched_name; |
6d2010ae A |
608 | void (*init)(void); /* Init global state */ |
609 | void (*timebase_init)(void); /* Timebase-dependent initialization */ | |
610 | void (*processor_init)(processor_t processor); /* Per-processor scheduler init */ | |
611 | void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */ | |
3e170ce0 | 612 | |
6d2010ae | 613 | void (*maintenance_continuation)(void); /* Function called regularly */ |
3e170ce0 | 614 | |
6d2010ae A |
615 | /* |
616 | * Choose a thread of greater or equal priority from the per-processor | |
617 | * runqueue for timeshare/fixed threads | |
618 | */ | |
619 | thread_t (*choose_thread)( | |
620 | processor_t processor, | |
fe8ab488 A |
621 | int priority, |
622 | ast_t reason); | |
3e170ce0 A |
623 | |
624 | /* True if scheduler supports stealing threads */ | |
625 | boolean_t steal_thread_enabled; | |
626 | ||
6d2010ae A |
627 | /* |
628 | * Steal a thread from another processor in the pset so that it can run | |
629 | * immediately | |
630 | */ | |
631 | thread_t (*steal_thread)( | |
632 | processor_set_t pset); | |
3e170ce0 | 633 | |
6d2010ae | 634 | /* |
3e170ce0 | 635 | * Compute priority for a timeshare thread based on base priority. |
6d2010ae | 636 | */ |
3e170ce0 A |
637 | int (*compute_timeshare_priority)(thread_t thread); |
638 | ||
6d2010ae A |
639 | /* |
640 | * Pick the best processor for a thread (any kind of thread) to run on. | |
641 | */ | |
642 | processor_t (*choose_processor)( | |
643 | processor_set_t pset, | |
644 | processor_t processor, | |
645 | thread_t thread); | |
646 | /* | |
647 | * Enqueue a timeshare or fixed priority thread onto the per-processor | |
648 | * runqueue | |
649 | */ | |
650 | boolean_t (*processor_enqueue)( | |
651 | processor_t processor, | |
652 | thread_t thread, | |
653 | integer_t options); | |
3e170ce0 | 654 | |
6d2010ae A |
655 | /* Migrate threads away in preparation for processor shutdown */ |
656 | void (*processor_queue_shutdown)( | |
657 | processor_t processor); | |
3e170ce0 | 658 | |
6d2010ae A |
659 | /* Remove the specific thread from the per-processor runqueue */ |
660 | boolean_t (*processor_queue_remove)( | |
fe8ab488 | 661 | processor_t processor, |
6d2010ae | 662 | thread_t thread); |
3e170ce0 | 663 | |
6d2010ae A |
664 | /* |
665 | * Does the per-processor runqueue have any timeshare or fixed priority | |
666 | * threads on it? Called without pset lock held, so should | |
667 | * not assume immutability while executing. | |
668 | */ | |
669 | boolean_t (*processor_queue_empty)(processor_t processor); | |
3e170ce0 | 670 | |
6d2010ae A |
671 | /* |
672 | * Would this priority trigger an urgent preemption if it's sitting | |
673 | * on the per-processor runqueue? | |
674 | */ | |
675 | boolean_t (*priority_is_urgent)(int priority); | |
3e170ce0 | 676 | |
6d2010ae A |
677 | /* |
678 | * Does the per-processor runqueue contain runnable threads that | |
679 | * should cause the currently-running thread to be preempted? | |
680 | */ | |
681 | ast_t (*processor_csw_check)(processor_t processor); | |
3e170ce0 | 682 | |
6d2010ae A |
683 | /* |
684 | * Does the per-processor runqueue contain a runnable thread | |
685 | * of > or >= priority, as a preflight for choose_thread() or other | |
686 | * thread selection | |
687 | */ | |
688 | boolean_t (*processor_queue_has_priority)(processor_t processor, | |
689 | int priority, | |
690 | boolean_t gte); | |
3e170ce0 | 691 | |
6d2010ae A |
692 | /* Quantum size for the specified non-realtime thread. */ |
693 | uint32_t (*initial_quantum_size)(thread_t thread); | |
694 | ||
695 | /* Scheduler mode for a new thread */ | |
696 | sched_mode_t (*initial_thread_sched_mode)(task_t parent_task); | |
3e170ce0 | 697 | |
6d2010ae A |
698 | /* |
699 | * Is it safe to call update_priority, which may change a thread's | |
700 | * runqueue or other state. This can be used to throttle changes | |
701 | * to dynamic priority. | |
702 | */ | |
703 | boolean_t (*can_update_priority)(thread_t thread); | |
704 | ||
705 | /* | |
706 | * Update both scheduled priority and other persistent state. | |
707 | * Side effects may including migration to another processor's runqueue. | |
708 | */ | |
709 | void (*update_priority)(thread_t thread); | |
3e170ce0 | 710 | |
6d2010ae A |
711 | /* Lower overhead update to scheduled priority and state. */ |
712 | void (*lightweight_update_priority)(thread_t thread); | |
3e170ce0 | 713 | |
6d2010ae A |
714 | /* Callback for non-realtime threads when the quantum timer fires */ |
715 | void (*quantum_expire)(thread_t thread); | |
3e170ce0 | 716 | |
6d2010ae A |
717 | /* |
718 | * Runnable threads on per-processor runqueue. Should only | |
719 | * be used for relative comparisons of load between processors. | |
720 | */ | |
721 | int (*processor_runq_count)(processor_t processor); | |
6d2010ae | 722 | |
3e170ce0 A |
723 | /* Aggregate runcount statistics for per-processor runqueue */ |
724 | uint64_t (*processor_runq_stats_count_sum)(processor_t processor); | |
fe8ab488 A |
725 | |
726 | boolean_t (*processor_bound_count)(processor_t processor); | |
727 | ||
3e170ce0 | 728 | void (*thread_update_scan)(sched_update_scan_context_t scan_context); |
fe8ab488 | 729 | |
6d2010ae A |
730 | /* |
731 | * Use processor->next_thread to pin a thread to an idle | |
732 | * processor. If FALSE, threads are enqueued and can | |
733 | * be stolen by other processors. | |
734 | */ | |
735 | boolean_t direct_dispatch_to_idle_processors; | |
3e170ce0 A |
736 | |
737 | /* Supports more than one pset */ | |
738 | boolean_t multiple_psets_enabled; | |
739 | /* Supports scheduler groups */ | |
740 | boolean_t sched_groups_enabled; | |
5ba3f43e A |
741 | |
742 | /* Supports avoid-processor */ | |
743 | boolean_t avoid_processor_enabled; | |
744 | ||
745 | /* Returns true if this processor should avoid running this thread. */ | |
746 | bool (*thread_avoid_processor)(processor_t processor, thread_t thread); | |
747 | ||
748 | /* | |
749 | * Invoked when a processor is about to choose the idle thread | |
750 | * Used to send IPIs to a processor which would be preferred to be idle instead. | |
751 | * Called with pset lock held, returns pset lock unlocked. | |
752 | */ | |
753 | void (*processor_balance)(processor_t processor, processor_set_t pset); | |
754 | rt_queue_t (*rt_runq)(processor_set_t pset); | |
755 | void (*rt_init)(processor_set_t pset); | |
756 | void (*rt_queue_shutdown)(processor_t processor); | |
757 | void (*rt_runq_scan)(sched_update_scan_context_t scan_context); | |
758 | int64_t (*rt_runq_count_sum)(void); | |
759 | ||
760 | uint32_t (*qos_max_parallelism)(int qos, uint64_t options); | |
761 | void (*check_spill)(processor_set_t pset, thread_t thread); | |
762 | sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event); | |
763 | bool (*thread_should_yield)(processor_t processor, thread_t thread); | |
6d2010ae A |
764 | }; |
765 | ||
766 | #if defined(CONFIG_SCHED_TRADITIONAL) | |
6d2010ae A |
767 | extern const struct sched_dispatch_table sched_traditional_dispatch; |
768 | extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch; | |
769 | #endif | |
770 | ||
fe8ab488 A |
771 | #if defined(CONFIG_SCHED_MULTIQ) |
772 | extern const struct sched_dispatch_table sched_multiq_dispatch; | |
fe8ab488 | 773 | extern const struct sched_dispatch_table sched_dualq_dispatch; |
fe8ab488 A |
774 | #endif |
775 | ||
6d2010ae | 776 | #if defined(CONFIG_SCHED_PROTO) |
6d2010ae A |
777 | extern const struct sched_dispatch_table sched_proto_dispatch; |
778 | #endif | |
779 | ||
780 | #if defined(CONFIG_SCHED_GRRR) | |
6d2010ae A |
781 | extern const struct sched_dispatch_table sched_grrr_dispatch; |
782 | #endif | |
783 | ||
6d2010ae A |
784 | /* |
785 | * It is an error to invoke any scheduler-related code | |
786 | * before this is set up | |
787 | */ | |
6d2010ae A |
788 | extern const struct sched_dispatch_table *sched_current_dispatch; |
789 | ||
790 | #endif /* MACH_KERNEL_PRIVATE */ | |
791 | ||
91447636 | 792 | __END_DECLS |
1c79356b A |
793 | |
794 | #endif /* _KERN_SCHED_PRIM_H_ */ |