]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.h
xnu-6153.81.5.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.h
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
0a7de745 66#ifndef _KERN_SCHED_PRIM_H_
1c79356b
A
67#define _KERN_SCHED_PRIM_H_
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/kern_return.h>
72#include <kern/clock.h>
73#include <kern/kern_types.h>
74#include <kern/thread.h>
91447636 75#include <sys/cdefs.h>
813fb2f6 76#include <kern/block_hint.h>
9bccf70c 77
0a7de745
A
78#ifdef MACH_KERNEL_PRIVATE
79
80#include <kern/sched_urgency.h>
cb323159 81#include <kern/thread_group.h>
1c79356b 82
91447636 83/* Initialization */
0a7de745 84extern void sched_init(void);
1c79356b 85
0a7de745 86extern void sched_startup(void);
1c79356b 87
0a7de745 88extern void sched_timebase_init(void);
1c79356b 89
0a7de745 90extern void pset_rt_init(processor_set_t pset);
5ba3f43e 91
0a7de745 92extern void sched_rtglobal_init(processor_set_t pset);
5ba3f43e 93
0a7de745 94extern rt_queue_t sched_rtglobal_runq(processor_set_t pset);
5ba3f43e 95
0a7de745 96extern void sched_rtglobal_queue_shutdown(processor_t processor);
5ba3f43e 97
0a7de745 98extern int64_t sched_rtglobal_runq_count_sum(void);
5ba3f43e 99
0a7de745 100extern void sched_check_spill(processor_set_t pset, thread_t thread);
5ba3f43e
A
101
102extern bool sched_thread_should_yield(processor_t processor, thread_t thread);
103
0a7de745
A
104extern bool sched_steal_thread_DISABLED(processor_set_t pset);
105extern bool sched_steal_thread_enabled(processor_set_t pset);
106
91447636 107/* Force a preemption point for a thread and wait for it to stop running */
0a7de745
A
108extern boolean_t thread_stop(
109 thread_t thread,
110 boolean_t until_not_runnable);
1c79356b 111
91447636 112/* Release a previous stop request */
0a7de745
A
113extern void thread_unstop(
114 thread_t thread);
1c79356b 115
91447636 116/* Wait for a thread to stop running */
0a7de745
A
117extern void thread_wait(
118 thread_t thread,
119 boolean_t until_not_runnable);
1c79356b 120
91447636 121/* Unblock thread on wake up */
0a7de745
A
122extern boolean_t thread_unblock(
123 thread_t thread,
124 wait_result_t wresult);
1c79356b 125
91447636 126/* Unblock and dispatch thread */
0a7de745
A
127extern kern_return_t thread_go(
128 thread_t thread,
129 wait_result_t wresult);
1c79356b 130
2d21ac55 131/* Handle threads at context switch */
0a7de745
A
132extern void thread_dispatch(
133 thread_t old_thread,
134 thread_t new_thread);
1c79356b 135
91447636 136/* Switch directly to a particular thread */
0a7de745
A
137extern int thread_run(
138 thread_t self,
139 thread_continue_t continuation,
140 void *parameter,
141 thread_t new_thread);
91447636
A
142
143/* Resume thread with new stack */
cb323159 144extern __dead2 void thread_continue(thread_t old_thread);
91447636 145
1c79356b 146/* Invoke continuation */
cb323159 147extern __dead2 void call_continuation(
0a7de745 148 thread_continue_t continuation,
cb323159 149 void *parameter,
0a7de745 150 wait_result_t wresult,
cb323159 151 boolean_t enable_interrupts);
d9a64523
A
152
153/*
154 * Flags that can be passed to set_sched_pri
155 * to skip side effects
156 */
cb323159 157__options_decl(set_sched_pri_options_t, uint32_t, {
d9a64523
A
158 SETPRI_DEFAULT = 0x0,
159 SETPRI_LAZY = 0x1, /* Avoid setting AST flags or sending IPIs */
cb323159 160});
9bccf70c
A
161
162/* Set the current scheduled priority */
d9a64523 163extern void set_sched_pri(
0a7de745
A
164 thread_t thread,
165 int priority,
166 set_sched_pri_options_t options);
1c79356b 167
9bccf70c 168/* Set base priority of the specified thread */
0a7de745
A
169extern void sched_set_thread_base_priority(
170 thread_t thread,
171 int priority);
9bccf70c 172
cb323159
A
173/* Set absolute base priority of the specified thread */
174extern void sched_set_kernel_thread_priority(
175 thread_t thread,
176 int priority);
177
178
fe8ab488
A
179/* Set the thread's true scheduling mode */
180extern void sched_set_thread_mode(thread_t thread,
0a7de745 181 sched_mode_t mode);
fe8ab488
A
182/* Demote the true scheduler mode */
183extern void sched_thread_mode_demote(thread_t thread,
0a7de745 184 uint32_t reason);
fe8ab488
A
185/* Un-demote the true scheduler mode */
186extern void sched_thread_mode_undemote(thread_t thread,
0a7de745 187 uint32_t reason);
fe8ab488 188
d9a64523
A
189extern void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj);
190extern void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj);
191
3e170ce0
A
192/* Re-evaluate base priority of thread (thread locked) */
193void thread_recompute_priority(thread_t thread);
194
d9a64523
A
195/* Re-evaluate scheduled priority of thread (thread locked) */
196extern void thread_recompute_sched_pri(
0a7de745
A
197 thread_t thread,
198 set_sched_pri_options_t options);
1c79356b 199
1c79356b 200/* Periodic scheduler activity */
0a7de745 201extern void sched_init_thread(void (*)(void));
1c79356b 202
91447636 203/* Perform sched_tick housekeeping activities */
0a7de745
A
204extern boolean_t can_update_priority(
205 thread_t thread);
1c79356b 206
0a7de745
A
207extern void update_priority(
208 thread_t thread);
6d2010ae 209
0a7de745
A
210extern void lightweight_update_priority(
211 thread_t thread);
6d2010ae 212
3e170ce0 213extern void sched_default_quantum_expire(thread_t thread);
6d2010ae 214
cb323159
A
215/* Idle processor thread continuation */
216extern void idle_thread(
217 void* parameter,
218 wait_result_t result);
1c79356b 219
0a7de745
A
220extern kern_return_t idle_thread_create(
221 processor_t processor);
1c79356b 222
1c79356b
A
223/* Continuation return from syscall */
224extern void thread_syscall_return(
0a7de745 225 kern_return_t ret);
1c79356b 226
91447636 227/* Context switch */
0a7de745
A
228extern wait_result_t thread_block_reason(
229 thread_continue_t continuation,
230 void *parameter,
231 ast_t reason);
1c79356b 232
cb323159 233__options_decl(sched_options_t, uint32_t, {
5ba3f43e
A
234 SCHED_NONE = 0x0,
235 SCHED_TAILQ = 0x1,
236 SCHED_HEADQ = 0x2,
237 SCHED_PREEMPT = 0x4,
238 SCHED_REBALANCE = 0x8,
cb323159
A
239});
240
241/* Reschedule thread for execution */
242extern void thread_setrun(
243 thread_t thread,
244 sched_options_t options);
3e170ce0 245
0a7de745
A
246extern processor_set_t task_choose_pset(
247 task_t task);
b0d623f7 248
2d21ac55 249/* Bind the current thread to a particular processor */
0a7de745
A
250extern processor_t thread_bind(
251 processor_t processor);
1c79356b 252
6d2010ae 253/* Choose the best processor to run a thread */
0a7de745
A
254extern processor_t choose_processor(
255 processor_set_t pset,
256 processor_t processor,
257 thread_t thread);
6d2010ae 258
5ba3f43e 259extern void sched_SMT_balance(
0a7de745
A
260 processor_t processor,
261 processor_set_t pset);
6d2010ae
A
262
263extern void thread_quantum_init(
0a7de745
A
264 thread_t thread);
265
266extern void run_queue_init(
267 run_queue_t runq);
268
269extern thread_t run_queue_dequeue(
270 run_queue_t runq,
cb323159 271 sched_options_t options);
0a7de745
A
272
273extern boolean_t run_queue_enqueue(
274 run_queue_t runq,
cb323159
A
275 thread_t thread,
276 sched_options_t options);
0a7de745
A
277
278extern void run_queue_remove(
279 run_queue_t runq,
280 thread_t thread);
281
cb323159
A
282extern thread_t run_queue_peek(
283 run_queue_t runq);
284
0a7de745
A
285struct sched_update_scan_context {
286 uint64_t earliest_bg_make_runnable_time;
287 uint64_t earliest_normal_make_runnable_time;
288 uint64_t earliest_rt_make_runnable_time;
3e170ce0
A
289};
290typedef struct sched_update_scan_context *sched_update_scan_context_t;
fe8ab488 291
0a7de745 292extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context);
5ba3f43e 293
cb323159
A
294extern void sched_pset_made_schedulable(
295 processor_t processor,
296 processor_set_t pset,
297 boolean_t drop_lock);
298
0a7de745
A
299/*
300 * Enum to define various events which need IPIs. The IPI policy
301 * engine decides what kind of IPI to use based on destination
5ba3f43e
A
302 * processor state, thread and one of the following scheduling events.
303 */
304typedef enum {
305 SCHED_IPI_EVENT_BOUND_THR = 0x1,
0a7de745 306 SCHED_IPI_EVENT_PREEMPT = 0x2,
5ba3f43e 307 SCHED_IPI_EVENT_SMT_REBAL = 0x3,
0a7de745 308 SCHED_IPI_EVENT_SPILL = 0x4,
5ba3f43e
A
309 SCHED_IPI_EVENT_REBALANCE = 0x5,
310} sched_ipi_event_t;
311
312
313/* Enum to define various IPI types used by the scheduler */
314typedef enum {
0a7de745
A
315 SCHED_IPI_NONE = 0x0,
316 SCHED_IPI_IMMEDIATE = 0x1,
317 SCHED_IPI_IDLE = 0x2,
318 SCHED_IPI_DEFERRED = 0x3,
5ba3f43e
A
319} sched_ipi_type_t;
320
321/* The IPI policy engine behaves in the following manner:
0a7de745 322 * - All scheduler events which need an IPI invoke sched_ipi_action() with
5ba3f43e
A
323 * the appropriate destination processor, thread and event.
324 * - sched_ipi_action() performs basic checks, invokes the scheduler specific
325 * ipi_policy routine and sets pending_AST bits based on the result.
326 * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform()
327 * routine which actually sends the appropriate IPI to the destination core.
328 */
329extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread,
0a7de745 330 boolean_t dst_idle, sched_ipi_event_t event);
5ba3f43e
A
331extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi);
332
333/* sched_ipi_policy() is the global default IPI policy for all schedulers */
334extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread,
0a7de745 335 boolean_t dst_idle, sched_ipi_event_t event);
5ba3f43e
A
336
337/* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */
338extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset,
0a7de745 339 processor_t dst, sched_ipi_event_t event);
5ba3f43e 340
fe8ab488
A
341#if defined(CONFIG_SCHED_TIMESHARE_CORE)
342
3e170ce0 343extern boolean_t thread_update_add_thread(thread_t thread);
fe8ab488 344extern void thread_update_process_threads(void);
3e170ce0
A
345extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context);
346
347extern void sched_timeshare_init(void);
348extern void sched_timeshare_timebase_init(void);
349extern void sched_timeshare_maintenance_continue(void);
350
351extern boolean_t priority_is_urgent(int priority);
352extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread);
fe8ab488 353
3e170ce0 354extern int sched_compute_timeshare_priority(thread_t thread);
fe8ab488
A
355
356#endif /* CONFIG_SCHED_TIMESHARE_CORE */
357
6d2010ae 358/* Remove thread from its run queue */
0a7de745 359extern boolean_t thread_run_queue_remove(thread_t thread);
3e170ce0
A
360thread_t thread_run_queue_remove_for_handoff(thread_t thread);
361
362/* Put a thread back in the run queue after being yanked */
cb323159 363extern void thread_run_queue_reinsert(thread_t thread, sched_options_t options);
6d2010ae 364
0a7de745
A
365extern void thread_timer_expire(
366 void *thread,
367 void *p1);
91447636 368
0a7de745
A
369extern boolean_t thread_eager_preemption(
370 thread_t thread);
6d2010ae 371
6d2010ae
A
372extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
373
9bccf70c
A
374/* Set the maximum interrupt level for the thread */
375__private_extern__ wait_interrupt_t thread_interrupt_level(
0a7de745 376 wait_interrupt_t interruptible);
9bccf70c
A
377
378__private_extern__ wait_result_t thread_mark_wait_locked(
0a7de745
A
379 thread_t thread,
380 wait_interrupt_t interruptible);
9bccf70c 381
9bccf70c
A
382/* Wake up locked thread directly, passing result */
383__private_extern__ kern_return_t clear_wait_internal(
0a7de745
A
384 thread_t thread,
385 wait_result_t result);
1c79356b 386
6d2010ae 387extern void sched_stats_handle_csw(
0a7de745
A
388 processor_t processor,
389 int reasons,
390 int selfpri,
391 int otherpri);
6d2010ae
A
392
393extern void sched_stats_handle_runq_change(
0a7de745
A
394 struct runq_stats *stats,
395 int old_count);
6d2010ae
A
396
397
5ba3f43e 398#if DEBUG
6d2010ae 399
0a7de745
A
400#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
401do { \
402 if (__builtin_expect(sched_stats_active, 0)) { \
403 sched_stats_handle_csw((processor), \
404 (reasons), (selfpri), (otherpri)); \
405 } \
406} while (0)
6d2010ae
A
407
408
0a7de745
A
409#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
410do { \
411 if (__builtin_expect(sched_stats_active, 0)) { \
412 sched_stats_handle_runq_change((stats), \
413 (old_count)); \
414 } \
415} while (0)
6d2010ae 416
5ba3f43e
A
417#else /* DEBUG */
418
419#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0)
420#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0)
421
422#endif /* DEBUG */
423
3e170ce0 424extern uint32_t sched_debug_flags;
0a7de745
A
425#define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
426#define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
3e170ce0 427
0a7de745
A
428#define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \
429 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \
430 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
431 } \
3e170ce0
A
432 } while(0)
433
0a7de745
A
434#define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \
435 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \
436 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
437 } \
3e170ce0
A
438 } while(0)
439
6d2010ae 440/* Tells if there are "active" RT threads in the system (provided by CPU PM) */
0a7de745
A
441extern void active_rt_threads(
442 boolean_t active);
6d2010ae 443
5ba3f43e
A
444/* Returns the perfcontrol attribute for the thread */
445extern perfcontrol_class_t thread_get_perfcontrol_class(
0a7de745 446 thread_t thread);
5ba3f43e 447
5ba3f43e
A
448/* Generic routine for Non-AMP schedulers to calculate parallelism */
449extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options);
450
1c79356b
A
451#endif /* MACH_KERNEL_PRIVATE */
452
91447636 453__BEGIN_DECLS
55e303ae 454
0a7de745 455#ifdef XNU_KERNEL_PRIVATE
91447636 456
94ff46dc
A
457extern void thread_bind_cluster_type(char cluster_type);
458
39236c6e 459/* Toggles a global override to turn off CPU Throttling */
0a7de745 460extern void sys_override_cpu_throttle(boolean_t enable_override);
39236c6e 461
1c79356b 462/*
91447636 463 ****************** Only exported until BSD stops using ********************
1c79356b 464 */
1c79356b 465
0a7de745 466extern void thread_vm_bind_group_add(void);
3e170ce0 467
1c79356b 468/* Wake up thread directly, passing result */
9bccf70c 469extern kern_return_t clear_wait(
0a7de745
A
470 thread_t thread,
471 wait_result_t result);
1c79356b 472
b7266188 473/* Start thread running */
0a7de745 474extern void thread_bootstrap_return(void) __attribute__((noreturn));
b7266188 475
91447636 476/* Return from exception (BSD-visible interface) */
0a7de745 477extern void thread_exception_return(void) __dead2;
1c79356b 478
3e170ce0
A
479#define SCHED_STRING_MAX_LENGTH (48)
480/* String declaring the name of the current scheduler */
481extern char sched_string[SCHED_STRING_MAX_LENGTH];
482
39037602 483/* Attempt to context switch to a specific runnable thread */
d9a64523
A
484extern wait_result_t thread_handoff_deallocate(thread_t thread);
485
486__attribute__((nonnull(1, 2)))
487extern void thread_handoff_parameter(thread_t thread,
0a7de745 488 thread_continue_t continuation, void *parameter) __dead2;
39037602 489
0a7de745 490extern struct waitq *assert_wait_queue(event_t event);
39037602
A
491
492extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority);
493
494extern thread_t thread_wakeup_identify(event_t event, int priority);
495
0a7de745 496#endif /* XNU_KERNEL_PRIVATE */
1c79356b 497
813fb2f6
A
498#ifdef KERNEL_PRIVATE
499/* Set pending block hint for a particular object before we go into a wait state */
0a7de745
A
500extern void thread_set_pending_block_hint(
501 thread_t thread,
502 block_hint_t block_hint);
5ba3f43e
A
503
504#define QOS_PARALLELISM_COUNT_LOGICAL 0x1
505#define QOS_PARALLELISM_REALTIME 0x2
506extern uint32_t qos_max_parallelism(int qos, uint64_t options);
813fb2f6
A
507#endif /* KERNEL_PRIVATE */
508
d9a64523 509#if XNU_KERNEL_PRIVATE
0a7de745
A
510extern void thread_yield_with_continuation(
511 thread_continue_t continuation,
512 void *parameter) __dead2;
d9a64523
A
513#endif
514
91447636 515/* Context switch */
0a7de745
A
516extern wait_result_t thread_block(
517 thread_continue_t continuation);
1c79356b 518
0a7de745
A
519extern wait_result_t thread_block_parameter(
520 thread_continue_t continuation,
521 void *parameter);
1c79356b 522
1c79356b 523/* Declare thread will wait on a particular event */
0a7de745
A
524extern wait_result_t assert_wait(
525 event_t event,
526 wait_interrupt_t interruptible);
1c79356b 527
91447636 528/* Assert that the thread intends to wait with a timeout */
0a7de745
A
529extern wait_result_t assert_wait_timeout(
530 event_t event,
531 wait_interrupt_t interruptible,
532 uint32_t interval,
533 uint32_t scale_factor);
1c79356b 534
39236c6e 535/* Assert that the thread intends to wait with an urgency, timeout and leeway */
0a7de745
A
536extern wait_result_t assert_wait_timeout_with_leeway(
537 event_t event,
538 wait_interrupt_t interruptible,
539 wait_timeout_urgency_t urgency,
540 uint32_t interval,
541 uint32_t leeway,
542 uint32_t scale_factor);
543
544extern wait_result_t assert_wait_deadline(
545 event_t event,
546 wait_interrupt_t interruptible,
547 uint64_t deadline);
1c79356b 548
39236c6e 549/* Assert that the thread intends to wait with an urgency, deadline, and leeway */
0a7de745
A
550extern wait_result_t assert_wait_deadline_with_leeway(
551 event_t event,
552 wait_interrupt_t interruptible,
553 wait_timeout_urgency_t urgency,
554 uint64_t deadline,
555 uint64_t leeway);
39236c6e 556
91447636 557/* Wake up thread (or threads) waiting on a particular event */
0a7de745
A
558extern kern_return_t thread_wakeup_prim(
559 event_t event,
560 boolean_t one_thread,
561 wait_result_t result);
562
563#define thread_wakeup(x) \
564 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
565#define thread_wakeup_with_result(x, z) \
566 thread_wakeup_prim((x), FALSE, (z))
567#define thread_wakeup_one(x) \
568 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
6d2010ae 569
39037602
A
570/* Wakeup the specified thread if it is waiting on this event */
571extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread);
1c79356b 572
39037602 573extern boolean_t preemption_enabled(void);
91447636 574
6d2010ae
A
575#ifdef MACH_KERNEL_PRIVATE
576
577/*
578 * Scheduler algorithm indirection. If only one algorithm is
579 * enabled at compile-time, a direction function call is used.
580 * If more than one is enabled, calls are dispatched through
581 * a function pointer table.
582 */
583
cb323159 584#if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) && !defined(CONFIG_SCHED_CLUTCH)
6d2010ae
A
585#error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
586#endif
587
c6bf4f31
A
588#if __AMP__
589extern const struct sched_dispatch_table sched_amp_dispatch;
590#define SCHED(f) (sched_amp_dispatch.f)
591
592#else /* __AMP__ */
5ba3f43e 593
cb323159
A
594#if CONFIG_SCHED_CLUTCH
595extern const struct sched_dispatch_table sched_clutch_dispatch;
596#define SCHED(f) (sched_clutch_dispatch.f)
597#else /* CONFIG_SCHED_CLUTCH */
d9a64523
A
598extern const struct sched_dispatch_table sched_dualq_dispatch;
599#define SCHED(f) (sched_dualq_dispatch.f)
cb323159 600#endif /* CONFIG_SCHED_CLUTCH */
5ba3f43e 601
c6bf4f31 602#endif /* __AMP__ */
6d2010ae
A
603
604struct sched_dispatch_table {
3e170ce0 605 const char *sched_name;
0a7de745
A
606 void (*init)(void); /* Init global state */
607 void (*timebase_init)(void); /* Timebase-dependent initialization */
608 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
609 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
3e170ce0 610
0a7de745 611 void (*maintenance_continuation)(void); /* Function called regularly */
3e170ce0 612
6d2010ae
A
613 /*
614 * Choose a thread of greater or equal priority from the per-processor
615 * runqueue for timeshare/fixed threads
616 */
0a7de745
A
617 thread_t (*choose_thread)(
618 processor_t processor,
619 int priority,
620 ast_t reason);
3e170ce0 621
0a7de745
A
622 /* True if scheduler supports stealing threads for this pset */
623 bool (*steal_thread_enabled)(processor_set_t pset);
3e170ce0 624
6d2010ae
A
625 /*
626 * Steal a thread from another processor in the pset so that it can run
627 * immediately
628 */
0a7de745
A
629 thread_t (*steal_thread)(
630 processor_set_t pset);
3e170ce0 631
6d2010ae 632 /*
3e170ce0 633 * Compute priority for a timeshare thread based on base priority.
6d2010ae 634 */
3e170ce0
A
635 int (*compute_timeshare_priority)(thread_t thread);
636
6d2010ae
A
637 /*
638 * Pick the best processor for a thread (any kind of thread) to run on.
639 */
0a7de745
A
640 processor_t (*choose_processor)(
641 processor_set_t pset,
642 processor_t processor,
643 thread_t thread);
6d2010ae
A
644 /*
645 * Enqueue a timeshare or fixed priority thread onto the per-processor
646 * runqueue
647 */
648 boolean_t (*processor_enqueue)(
0a7de745
A
649 processor_t processor,
650 thread_t thread,
cb323159 651 sched_options_t options);
3e170ce0 652
6d2010ae
A
653 /* Migrate threads away in preparation for processor shutdown */
654 void (*processor_queue_shutdown)(
0a7de745 655 processor_t processor);
3e170ce0 656
6d2010ae 657 /* Remove the specific thread from the per-processor runqueue */
0a7de745
A
658 boolean_t (*processor_queue_remove)(
659 processor_t processor,
660 thread_t thread);
3e170ce0 661
6d2010ae
A
662 /*
663 * Does the per-processor runqueue have any timeshare or fixed priority
664 * threads on it? Called without pset lock held, so should
665 * not assume immutability while executing.
666 */
0a7de745 667 boolean_t (*processor_queue_empty)(processor_t processor);
3e170ce0 668
6d2010ae
A
669 /*
670 * Would this priority trigger an urgent preemption if it's sitting
671 * on the per-processor runqueue?
672 */
0a7de745 673 boolean_t (*priority_is_urgent)(int priority);
3e170ce0 674
6d2010ae
A
675 /*
676 * Does the per-processor runqueue contain runnable threads that
677 * should cause the currently-running thread to be preempted?
678 */
0a7de745 679 ast_t (*processor_csw_check)(processor_t processor);
3e170ce0 680
6d2010ae
A
681 /*
682 * Does the per-processor runqueue contain a runnable thread
683 * of > or >= priority, as a preflight for choose_thread() or other
684 * thread selection
685 */
0a7de745
A
686 boolean_t (*processor_queue_has_priority)(processor_t processor,
687 int priority,
688 boolean_t gte);
3e170ce0 689
6d2010ae 690 /* Quantum size for the specified non-realtime thread. */
0a7de745
A
691 uint32_t (*initial_quantum_size)(thread_t thread);
692
6d2010ae 693 /* Scheduler mode for a new thread */
0a7de745 694 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
3e170ce0 695
6d2010ae
A
696 /*
697 * Is it safe to call update_priority, which may change a thread's
698 * runqueue or other state. This can be used to throttle changes
699 * to dynamic priority.
700 */
0a7de745 701 boolean_t (*can_update_priority)(thread_t thread);
6d2010ae
A
702
703 /*
704 * Update both scheduled priority and other persistent state.
705 * Side effects may including migration to another processor's runqueue.
706 */
0a7de745 707 void (*update_priority)(thread_t thread);
3e170ce0 708
6d2010ae 709 /* Lower overhead update to scheduled priority and state. */
0a7de745 710 void (*lightweight_update_priority)(thread_t thread);
3e170ce0 711
6d2010ae 712 /* Callback for non-realtime threads when the quantum timer fires */
0a7de745 713 void (*quantum_expire)(thread_t thread);
3e170ce0 714
6d2010ae
A
715 /*
716 * Runnable threads on per-processor runqueue. Should only
717 * be used for relative comparisons of load between processors.
718 */
0a7de745 719 int (*processor_runq_count)(processor_t processor);
6d2010ae 720
3e170ce0
A
721 /* Aggregate runcount statistics for per-processor runqueue */
722 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
fe8ab488 723
0a7de745 724 boolean_t (*processor_bound_count)(processor_t processor);
fe8ab488 725
0a7de745 726 void (*thread_update_scan)(sched_update_scan_context_t scan_context);
fe8ab488 727
3e170ce0
A
728 /* Supports more than one pset */
729 boolean_t multiple_psets_enabled;
730 /* Supports scheduler groups */
731 boolean_t sched_groups_enabled;
5ba3f43e
A
732
733 /* Supports avoid-processor */
734 boolean_t avoid_processor_enabled;
735
736 /* Returns true if this processor should avoid running this thread. */
737 bool (*thread_avoid_processor)(processor_t processor, thread_t thread);
738
739 /*
740 * Invoked when a processor is about to choose the idle thread
741 * Used to send IPIs to a processor which would be preferred to be idle instead.
742 * Called with pset lock held, returns pset lock unlocked.
743 */
744 void (*processor_balance)(processor_t processor, processor_set_t pset);
0a7de745
A
745 rt_queue_t (*rt_runq)(processor_set_t pset);
746 void (*rt_init)(processor_set_t pset);
747 void (*rt_queue_shutdown)(processor_t processor);
748 void (*rt_runq_scan)(sched_update_scan_context_t scan_context);
749 int64_t (*rt_runq_count_sum)(void);
5ba3f43e
A
750
751 uint32_t (*qos_max_parallelism)(int qos, uint64_t options);
0a7de745 752 void (*check_spill)(processor_set_t pset, thread_t thread);
5ba3f43e
A
753 sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event);
754 bool (*thread_should_yield)(processor_t processor, thread_t thread);
cb323159
A
755
756 /* Routine to update run counts */
757 uint32_t (*run_count_incr)(thread_t thread);
758 uint32_t (*run_count_decr)(thread_t thread);
759
760 /* Routine to update scheduling bucket for a thread */
761 void (*update_thread_bucket)(thread_t thread);
762
763 /* Routine to inform the scheduler when a new pset becomes schedulable */
764 void (*pset_made_schedulable)(processor_t processor, processor_set_t pset, boolean_t drop_lock);
6d2010ae
A
765};
766
767#if defined(CONFIG_SCHED_TRADITIONAL)
6d2010ae
A
768extern const struct sched_dispatch_table sched_traditional_dispatch;
769extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
770#endif
771
fe8ab488
A
772#if defined(CONFIG_SCHED_MULTIQ)
773extern const struct sched_dispatch_table sched_multiq_dispatch;
fe8ab488 774extern const struct sched_dispatch_table sched_dualq_dispatch;
c6bf4f31
A
775#if __AMP__
776extern const struct sched_dispatch_table sched_amp_dispatch;
777#endif
fe8ab488
A
778#endif
779
6d2010ae 780#if defined(CONFIG_SCHED_PROTO)
6d2010ae
A
781extern const struct sched_dispatch_table sched_proto_dispatch;
782#endif
783
784#if defined(CONFIG_SCHED_GRRR)
6d2010ae
A
785extern const struct sched_dispatch_table sched_grrr_dispatch;
786#endif
787
cb323159
A
788#if defined(CONFIG_SCHED_CLUTCH)
789extern const struct sched_dispatch_table sched_clutch_dispatch;
790#endif
6d2010ae 791
0a7de745 792#endif /* MACH_KERNEL_PRIVATE */
6d2010ae 793
91447636 794__END_DECLS
1c79356b 795
0a7de745 796#endif /* _KERN_SCHED_PRIM_H_ */