]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.h
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
0a7de745 66#ifndef _KERN_SCHED_PRIM_H_
1c79356b
A
67#define _KERN_SCHED_PRIM_H_
68
f427ee49 69#include <sys/cdefs.h>
1c79356b
A
70#include <mach/boolean.h>
71#include <mach/machine/vm_types.h>
72#include <mach/kern_return.h>
73#include <kern/clock.h>
74#include <kern/kern_types.h>
f427ee49 75#include <kern/percpu.h>
1c79356b 76#include <kern/thread.h>
813fb2f6 77#include <kern/block_hint.h>
9bccf70c 78
f427ee49
A
79extern int thread_get_current_cpuid(void);
80
0a7de745
A
81#ifdef MACH_KERNEL_PRIVATE
82
83#include <kern/sched_urgency.h>
cb323159 84#include <kern/thread_group.h>
f427ee49 85#include <kern/waitq.h>
1c79356b 86
91447636 87/* Initialization */
0a7de745 88extern void sched_init(void);
1c79356b 89
0a7de745 90extern void sched_startup(void);
1c79356b 91
0a7de745 92extern void sched_timebase_init(void);
1c79356b 93
0a7de745 94extern void pset_rt_init(processor_set_t pset);
5ba3f43e 95
f427ee49 96extern void sched_rtlocal_init(processor_set_t pset);
5ba3f43e 97
f427ee49 98extern rt_queue_t sched_rtlocal_runq(processor_set_t pset);
5ba3f43e 99
f427ee49 100extern void sched_rtlocal_queue_shutdown(processor_t processor);
5ba3f43e 101
f427ee49 102extern int64_t sched_rtlocal_runq_count_sum(void);
5ba3f43e 103
0a7de745 104extern void sched_check_spill(processor_set_t pset, thread_t thread);
5ba3f43e
A
105
106extern bool sched_thread_should_yield(processor_t processor, thread_t thread);
107
0a7de745
A
108extern bool sched_steal_thread_DISABLED(processor_set_t pset);
109extern bool sched_steal_thread_enabled(processor_set_t pset);
110
91447636 111/* Force a preemption point for a thread and wait for it to stop running */
0a7de745
A
112extern boolean_t thread_stop(
113 thread_t thread,
114 boolean_t until_not_runnable);
1c79356b 115
91447636 116/* Release a previous stop request */
0a7de745
A
117extern void thread_unstop(
118 thread_t thread);
1c79356b 119
91447636 120/* Wait for a thread to stop running */
0a7de745
A
121extern void thread_wait(
122 thread_t thread,
123 boolean_t until_not_runnable);
1c79356b 124
91447636 125/* Unblock thread on wake up */
0a7de745
A
126extern boolean_t thread_unblock(
127 thread_t thread,
128 wait_result_t wresult);
1c79356b 129
91447636 130/* Unblock and dispatch thread */
0a7de745
A
131extern kern_return_t thread_go(
132 thread_t thread,
f427ee49
A
133 wait_result_t wresult,
134 waitq_options_t option);
135
136/* Check if direct handoff is allowed */
137extern boolean_t
138thread_allowed_for_handoff(
139 thread_t thread);
1c79356b 140
2d21ac55 141/* Handle threads at context switch */
0a7de745
A
142extern void thread_dispatch(
143 thread_t old_thread,
144 thread_t new_thread);
1c79356b 145
91447636 146/* Switch directly to a particular thread */
0a7de745
A
147extern int thread_run(
148 thread_t self,
149 thread_continue_t continuation,
150 void *parameter,
151 thread_t new_thread);
91447636
A
152
153/* Resume thread with new stack */
cb323159 154extern __dead2 void thread_continue(thread_t old_thread);
91447636 155
1c79356b 156/* Invoke continuation */
cb323159 157extern __dead2 void call_continuation(
0a7de745 158 thread_continue_t continuation,
cb323159 159 void *parameter,
0a7de745 160 wait_result_t wresult,
cb323159 161 boolean_t enable_interrupts);
d9a64523
A
162
163/*
164 * Flags that can be passed to set_sched_pri
165 * to skip side effects
166 */
cb323159 167__options_decl(set_sched_pri_options_t, uint32_t, {
d9a64523
A
168 SETPRI_DEFAULT = 0x0,
169 SETPRI_LAZY = 0x1, /* Avoid setting AST flags or sending IPIs */
cb323159 170});
9bccf70c
A
171
172/* Set the current scheduled priority */
d9a64523 173extern void set_sched_pri(
0a7de745 174 thread_t thread,
f427ee49 175 int16_t priority,
0a7de745 176 set_sched_pri_options_t options);
1c79356b 177
9bccf70c 178/* Set base priority of the specified thread */
0a7de745
A
179extern void sched_set_thread_base_priority(
180 thread_t thread,
181 int priority);
9bccf70c 182
cb323159
A
183/* Set absolute base priority of the specified thread */
184extern void sched_set_kernel_thread_priority(
185 thread_t thread,
186 int priority);
187
188
fe8ab488
A
189/* Set the thread's true scheduling mode */
190extern void sched_set_thread_mode(thread_t thread,
0a7de745 191 sched_mode_t mode);
fe8ab488
A
192/* Demote the true scheduler mode */
193extern void sched_thread_mode_demote(thread_t thread,
0a7de745 194 uint32_t reason);
fe8ab488
A
195/* Un-demote the true scheduler mode */
196extern void sched_thread_mode_undemote(thread_t thread,
0a7de745 197 uint32_t reason);
fe8ab488 198
d9a64523
A
199extern void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj);
200extern void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj);
201
3e170ce0
A
202/* Re-evaluate base priority of thread (thread locked) */
203void thread_recompute_priority(thread_t thread);
204
d9a64523
A
205/* Re-evaluate scheduled priority of thread (thread locked) */
206extern void thread_recompute_sched_pri(
0a7de745
A
207 thread_t thread,
208 set_sched_pri_options_t options);
1c79356b 209
1c79356b 210/* Periodic scheduler activity */
f427ee49 211extern void sched_init_thread(void);
1c79356b 212
91447636 213/* Perform sched_tick housekeeping activities */
0a7de745
A
214extern boolean_t can_update_priority(
215 thread_t thread);
1c79356b 216
0a7de745
A
217extern void update_priority(
218 thread_t thread);
6d2010ae 219
0a7de745
A
220extern void lightweight_update_priority(
221 thread_t thread);
6d2010ae 222
3e170ce0 223extern void sched_default_quantum_expire(thread_t thread);
6d2010ae 224
cb323159
A
225/* Idle processor thread continuation */
226extern void idle_thread(
227 void* parameter,
228 wait_result_t result);
1c79356b 229
0a7de745
A
230extern kern_return_t idle_thread_create(
231 processor_t processor);
1c79356b 232
1c79356b
A
233/* Continuation return from syscall */
234extern void thread_syscall_return(
0a7de745 235 kern_return_t ret);
1c79356b 236
91447636 237/* Context switch */
0a7de745
A
238extern wait_result_t thread_block_reason(
239 thread_continue_t continuation,
240 void *parameter,
241 ast_t reason);
1c79356b 242
cb323159 243__options_decl(sched_options_t, uint32_t, {
5ba3f43e
A
244 SCHED_NONE = 0x0,
245 SCHED_TAILQ = 0x1,
246 SCHED_HEADQ = 0x2,
247 SCHED_PREEMPT = 0x4,
248 SCHED_REBALANCE = 0x8,
cb323159
A
249});
250
251/* Reschedule thread for execution */
252extern void thread_setrun(
253 thread_t thread,
254 sched_options_t options);
3e170ce0 255
0a7de745
A
256extern processor_set_t task_choose_pset(
257 task_t task);
b0d623f7 258
2d21ac55 259/* Bind the current thread to a particular processor */
0a7de745
A
260extern processor_t thread_bind(
261 processor_t processor);
1c79356b 262
f427ee49
A
263extern bool pset_has_stealable_threads(
264 processor_set_t pset);
265
266extern processor_set_t choose_starting_pset(
267 pset_node_t node,
268 thread_t thread,
269 processor_t *processor_hint);
270
271extern pset_node_t sched_choose_node(
272 thread_t thread);
273
6d2010ae 274/* Choose the best processor to run a thread */
0a7de745
A
275extern processor_t choose_processor(
276 processor_set_t pset,
277 processor_t processor,
278 thread_t thread);
6d2010ae 279
5ba3f43e 280extern void sched_SMT_balance(
0a7de745
A
281 processor_t processor,
282 processor_set_t pset);
6d2010ae
A
283
284extern void thread_quantum_init(
0a7de745
A
285 thread_t thread);
286
287extern void run_queue_init(
288 run_queue_t runq);
289
290extern thread_t run_queue_dequeue(
291 run_queue_t runq,
cb323159 292 sched_options_t options);
0a7de745
A
293
294extern boolean_t run_queue_enqueue(
295 run_queue_t runq,
cb323159
A
296 thread_t thread,
297 sched_options_t options);
0a7de745
A
298
299extern void run_queue_remove(
300 run_queue_t runq,
301 thread_t thread);
302
cb323159
A
303extern thread_t run_queue_peek(
304 run_queue_t runq);
305
0a7de745
A
306struct sched_update_scan_context {
307 uint64_t earliest_bg_make_runnable_time;
308 uint64_t earliest_normal_make_runnable_time;
309 uint64_t earliest_rt_make_runnable_time;
f427ee49 310 uint64_t sched_tick_last_abstime;
3e170ce0
A
311};
312typedef struct sched_update_scan_context *sched_update_scan_context_t;
fe8ab488 313
f427ee49 314extern void sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context);
5ba3f43e 315
cb323159
A
316extern void sched_pset_made_schedulable(
317 processor_t processor,
318 processor_set_t pset,
319 boolean_t drop_lock);
320
0a7de745
A
321/*
322 * Enum to define various events which need IPIs. The IPI policy
323 * engine decides what kind of IPI to use based on destination
5ba3f43e
A
324 * processor state, thread and one of the following scheduling events.
325 */
326typedef enum {
327 SCHED_IPI_EVENT_BOUND_THR = 0x1,
0a7de745 328 SCHED_IPI_EVENT_PREEMPT = 0x2,
5ba3f43e 329 SCHED_IPI_EVENT_SMT_REBAL = 0x3,
0a7de745 330 SCHED_IPI_EVENT_SPILL = 0x4,
5ba3f43e
A
331 SCHED_IPI_EVENT_REBALANCE = 0x5,
332} sched_ipi_event_t;
333
334
335/* Enum to define various IPI types used by the scheduler */
336typedef enum {
0a7de745
A
337 SCHED_IPI_NONE = 0x0,
338 SCHED_IPI_IMMEDIATE = 0x1,
339 SCHED_IPI_IDLE = 0x2,
340 SCHED_IPI_DEFERRED = 0x3,
5ba3f43e
A
341} sched_ipi_type_t;
342
343/* The IPI policy engine behaves in the following manner:
0a7de745 344 * - All scheduler events which need an IPI invoke sched_ipi_action() with
5ba3f43e
A
345 * the appropriate destination processor, thread and event.
346 * - sched_ipi_action() performs basic checks, invokes the scheduler specific
347 * ipi_policy routine and sets pending_AST bits based on the result.
348 * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform()
349 * routine which actually sends the appropriate IPI to the destination core.
350 */
351extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread,
0a7de745 352 boolean_t dst_idle, sched_ipi_event_t event);
5ba3f43e
A
353extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi);
354
355/* sched_ipi_policy() is the global default IPI policy for all schedulers */
356extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread,
0a7de745 357 boolean_t dst_idle, sched_ipi_event_t event);
5ba3f43e
A
358
359/* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */
360extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset,
0a7de745 361 processor_t dst, sched_ipi_event_t event);
5ba3f43e 362
fe8ab488
A
363#if defined(CONFIG_SCHED_TIMESHARE_CORE)
364
3e170ce0 365extern boolean_t thread_update_add_thread(thread_t thread);
fe8ab488 366extern void thread_update_process_threads(void);
3e170ce0
A
367extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context);
368
f427ee49
A
369#if CONFIG_SCHED_CLUTCH
370extern boolean_t sched_clutch_timeshare_scan(queue_t thread_queue, uint16_t count, sched_update_scan_context_t scan_context);
371#endif /* CONFIG_SCHED_CLUTCH */
372
3e170ce0
A
373extern void sched_timeshare_init(void);
374extern void sched_timeshare_timebase_init(void);
375extern void sched_timeshare_maintenance_continue(void);
376
377extern boolean_t priority_is_urgent(int priority);
378extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread);
fe8ab488 379
3e170ce0 380extern int sched_compute_timeshare_priority(thread_t thread);
fe8ab488
A
381
382#endif /* CONFIG_SCHED_TIMESHARE_CORE */
383
6d2010ae 384/* Remove thread from its run queue */
0a7de745 385extern boolean_t thread_run_queue_remove(thread_t thread);
3e170ce0
A
386thread_t thread_run_queue_remove_for_handoff(thread_t thread);
387
388/* Put a thread back in the run queue after being yanked */
cb323159 389extern void thread_run_queue_reinsert(thread_t thread, sched_options_t options);
6d2010ae 390
0a7de745
A
391extern void thread_timer_expire(
392 void *thread,
393 void *p1);
91447636 394
2a1bd2d3 395extern bool thread_is_eager_preempt(thread_t thread);
6d2010ae 396
6d2010ae
A
397extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
398
9bccf70c
A
399/* Set the maximum interrupt level for the thread */
400__private_extern__ wait_interrupt_t thread_interrupt_level(
0a7de745 401 wait_interrupt_t interruptible);
9bccf70c
A
402
403__private_extern__ wait_result_t thread_mark_wait_locked(
0a7de745
A
404 thread_t thread,
405 wait_interrupt_t interruptible);
9bccf70c 406
9bccf70c
A
407/* Wake up locked thread directly, passing result */
408__private_extern__ kern_return_t clear_wait_internal(
0a7de745
A
409 thread_t thread,
410 wait_result_t result);
1c79356b 411
f427ee49
A
412struct sched_statistics {
413 uint32_t csw_count;
414 uint32_t preempt_count;
415 uint32_t preempted_rt_count;
416 uint32_t preempted_by_rt_count;
417 uint32_t rt_sched_count;
418 uint32_t interrupt_count;
419 uint32_t ipi_count;
420 uint32_t timer_pop_count;
421 uint32_t idle_transitions;
422 uint32_t quantum_timer_expirations;
423};
424PERCPU_DECL(struct sched_statistics, sched_stats);
425extern bool sched_stats_active;
426
6d2010ae 427extern void sched_stats_handle_csw(
0a7de745
A
428 processor_t processor,
429 int reasons,
430 int selfpri,
431 int otherpri);
6d2010ae
A
432
433extern void sched_stats_handle_runq_change(
0a7de745
A
434 struct runq_stats *stats,
435 int old_count);
6d2010ae 436
f427ee49
A
437#define SCHED_STATS_INC(field) \
438MACRO_BEGIN \
439 if (__improbable(sched_stats_active)) { \
440 PERCPU_GET(sched_stats)->field++; \
441 } \
442MACRO_END
6d2010ae 443
5ba3f43e 444#if DEBUG
6d2010ae 445
f427ee49
A
446#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
447MACRO_BEGIN \
448 if (__improbable(sched_stats_active)) { \
449 sched_stats_handle_csw((processor), \
450 (reasons), (selfpri), (otherpri)); \
451 } \
452MACRO_END
6d2010ae
A
453
454
f427ee49
A
455#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
456MACRO_BEGIN \
457 if (__improbable(sched_stats_active)) { \
458 sched_stats_handle_runq_change((stats), (old_count)); \
459 } \
460MACRO_END
6d2010ae 461
5ba3f43e
A
462#else /* DEBUG */
463
464#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0)
465#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0)
466
467#endif /* DEBUG */
468
3e170ce0 469extern uint32_t sched_debug_flags;
f427ee49 470#define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
0a7de745 471#define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
3e170ce0 472
f427ee49
A
473#define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) \
474MACRO_BEGIN \
475 if (__improbable(sched_debug_flags & \
476 SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \
477 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
478 } \
479MACRO_END
480
481#define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) \
482MACRO_BEGIN \
483 if (__improbable(sched_debug_flags & \
484 SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \
485 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
486 } \
487MACRO_END
3e170ce0 488
6d2010ae 489/* Tells if there are "active" RT threads in the system (provided by CPU PM) */
0a7de745
A
490extern void active_rt_threads(
491 boolean_t active);
6d2010ae 492
5ba3f43e
A
493/* Returns the perfcontrol attribute for the thread */
494extern perfcontrol_class_t thread_get_perfcontrol_class(
0a7de745 495 thread_t thread);
5ba3f43e 496
5ba3f43e
A
497/* Generic routine for Non-AMP schedulers to calculate parallelism */
498extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options);
499
1c79356b
A
500#endif /* MACH_KERNEL_PRIVATE */
501
91447636 502__BEGIN_DECLS
55e303ae 503
0a7de745 504#ifdef XNU_KERNEL_PRIVATE
91447636 505
f427ee49
A
506extern void thread_bind_cluster_type(thread_t, char cluster_type, bool soft_bind);
507
508extern int sched_get_rt_n_backup_processors(void);
509extern void sched_set_rt_n_backup_processors(int n);
94ff46dc 510
39236c6e 511/* Toggles a global override to turn off CPU Throttling */
0a7de745 512extern void sys_override_cpu_throttle(boolean_t enable_override);
39236c6e 513
1c79356b 514/*
91447636 515 ****************** Only exported until BSD stops using ********************
1c79356b 516 */
1c79356b 517
0a7de745 518extern void thread_vm_bind_group_add(void);
3e170ce0 519
1c79356b 520/* Wake up thread directly, passing result */
9bccf70c 521extern kern_return_t clear_wait(
0a7de745
A
522 thread_t thread,
523 wait_result_t result);
1c79356b 524
b7266188 525/* Start thread running */
0a7de745 526extern void thread_bootstrap_return(void) __attribute__((noreturn));
b7266188 527
91447636 528/* Return from exception (BSD-visible interface) */
0a7de745 529extern void thread_exception_return(void) __dead2;
1c79356b 530
3e170ce0
A
531#define SCHED_STRING_MAX_LENGTH (48)
532/* String declaring the name of the current scheduler */
533extern char sched_string[SCHED_STRING_MAX_LENGTH];
534
f427ee49
A
535__options_decl(thread_handoff_option_t, uint32_t, {
536 THREAD_HANDOFF_NONE = 0,
537 THREAD_HANDOFF_SETRUN_NEEDED = 0x1,
538});
539
540/* Remove thread from its run queue */
541thread_t thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option);
542
39037602 543/* Attempt to context switch to a specific runnable thread */
f427ee49 544extern wait_result_t thread_handoff_deallocate(thread_t thread, thread_handoff_option_t option);
d9a64523
A
545
546__attribute__((nonnull(1, 2)))
547extern void thread_handoff_parameter(thread_t thread,
f427ee49 548 thread_continue_t continuation, void *parameter, thread_handoff_option_t) __dead2;
39037602 549
0a7de745 550extern struct waitq *assert_wait_queue(event_t event);
39037602
A
551
552extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority);
553
554extern thread_t thread_wakeup_identify(event_t event, int priority);
555
0a7de745 556#endif /* XNU_KERNEL_PRIVATE */
1c79356b 557
813fb2f6
A
558#ifdef KERNEL_PRIVATE
559/* Set pending block hint for a particular object before we go into a wait state */
0a7de745
A
560extern void thread_set_pending_block_hint(
561 thread_t thread,
562 block_hint_t block_hint);
5ba3f43e
A
563
564#define QOS_PARALLELISM_COUNT_LOGICAL 0x1
565#define QOS_PARALLELISM_REALTIME 0x2
566extern uint32_t qos_max_parallelism(int qos, uint64_t options);
813fb2f6
A
567#endif /* KERNEL_PRIVATE */
568
d9a64523 569#if XNU_KERNEL_PRIVATE
0a7de745
A
570extern void thread_yield_with_continuation(
571 thread_continue_t continuation,
572 void *parameter) __dead2;
d9a64523
A
573#endif
574
91447636 575/* Context switch */
0a7de745
A
576extern wait_result_t thread_block(
577 thread_continue_t continuation);
1c79356b 578
0a7de745
A
579extern wait_result_t thread_block_parameter(
580 thread_continue_t continuation,
581 void *parameter);
1c79356b 582
1c79356b 583/* Declare thread will wait on a particular event */
0a7de745
A
584extern wait_result_t assert_wait(
585 event_t event,
586 wait_interrupt_t interruptible);
1c79356b 587
91447636 588/* Assert that the thread intends to wait with a timeout */
0a7de745
A
589extern wait_result_t assert_wait_timeout(
590 event_t event,
591 wait_interrupt_t interruptible,
592 uint32_t interval,
593 uint32_t scale_factor);
1c79356b 594
39236c6e 595/* Assert that the thread intends to wait with an urgency, timeout and leeway */
0a7de745
A
596extern wait_result_t assert_wait_timeout_with_leeway(
597 event_t event,
598 wait_interrupt_t interruptible,
599 wait_timeout_urgency_t urgency,
600 uint32_t interval,
601 uint32_t leeway,
602 uint32_t scale_factor);
603
604extern wait_result_t assert_wait_deadline(
605 event_t event,
606 wait_interrupt_t interruptible,
607 uint64_t deadline);
1c79356b 608
39236c6e 609/* Assert that the thread intends to wait with an urgency, deadline, and leeway */
0a7de745
A
610extern wait_result_t assert_wait_deadline_with_leeway(
611 event_t event,
612 wait_interrupt_t interruptible,
613 wait_timeout_urgency_t urgency,
614 uint64_t deadline,
615 uint64_t leeway);
39236c6e 616
91447636 617/* Wake up thread (or threads) waiting on a particular event */
0a7de745
A
618extern kern_return_t thread_wakeup_prim(
619 event_t event,
620 boolean_t one_thread,
621 wait_result_t result);
622
623#define thread_wakeup(x) \
624 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
625#define thread_wakeup_with_result(x, z) \
626 thread_wakeup_prim((x), FALSE, (z))
627#define thread_wakeup_one(x) \
628 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
6d2010ae 629
39037602
A
630/* Wakeup the specified thread if it is waiting on this event */
631extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread);
1c79356b 632
39037602 633extern boolean_t preemption_enabled(void);
91447636 634
6d2010ae
A
635#ifdef MACH_KERNEL_PRIVATE
636
637/*
638 * Scheduler algorithm indirection. If only one algorithm is
639 * enabled at compile-time, a direction function call is used.
640 * If more than one is enabled, calls are dispatched through
641 * a function pointer table.
642 */
643
f427ee49 644#if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) && !defined(CONFIG_SCHED_CLUTCH) && !defined(CONFIG_SCHED_EDGE)
6d2010ae
A
645#error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
646#endif
647
c6bf4f31 648#if __AMP__
f427ee49
A
649
650#if CONFIG_SCHED_EDGE
651extern const struct sched_dispatch_table sched_edge_dispatch;
652#define SCHED(f) (sched_edge_dispatch.f)
653#else /* CONFIG_SCHED_EDGE */
c6bf4f31
A
654extern const struct sched_dispatch_table sched_amp_dispatch;
655#define SCHED(f) (sched_amp_dispatch.f)
f427ee49 656#endif /* CONFIG_SCHED_EDGE */
c6bf4f31
A
657
658#else /* __AMP__ */
5ba3f43e 659
cb323159
A
660#if CONFIG_SCHED_CLUTCH
661extern const struct sched_dispatch_table sched_clutch_dispatch;
662#define SCHED(f) (sched_clutch_dispatch.f)
663#else /* CONFIG_SCHED_CLUTCH */
d9a64523
A
664extern const struct sched_dispatch_table sched_dualq_dispatch;
665#define SCHED(f) (sched_dualq_dispatch.f)
cb323159 666#endif /* CONFIG_SCHED_CLUTCH */
5ba3f43e 667
c6bf4f31 668#endif /* __AMP__ */
6d2010ae
A
669
670struct sched_dispatch_table {
3e170ce0 671 const char *sched_name;
0a7de745
A
672 void (*init)(void); /* Init global state */
673 void (*timebase_init)(void); /* Timebase-dependent initialization */
674 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
675 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
3e170ce0 676
0a7de745 677 void (*maintenance_continuation)(void); /* Function called regularly */
3e170ce0 678
6d2010ae
A
679 /*
680 * Choose a thread of greater or equal priority from the per-processor
681 * runqueue for timeshare/fixed threads
682 */
0a7de745
A
683 thread_t (*choose_thread)(
684 processor_t processor,
685 int priority,
686 ast_t reason);
3e170ce0 687
0a7de745
A
688 /* True if scheduler supports stealing threads for this pset */
689 bool (*steal_thread_enabled)(processor_set_t pset);
3e170ce0 690
6d2010ae
A
691 /*
692 * Steal a thread from another processor in the pset so that it can run
693 * immediately
694 */
0a7de745
A
695 thread_t (*steal_thread)(
696 processor_set_t pset);
3e170ce0 697
6d2010ae 698 /*
3e170ce0 699 * Compute priority for a timeshare thread based on base priority.
6d2010ae 700 */
3e170ce0
A
701 int (*compute_timeshare_priority)(thread_t thread);
702
f427ee49
A
703 /*
704 * Pick the best node for a thread to run on.
705 */
706 pset_node_t (*choose_node)(
707 thread_t thread);
708
6d2010ae
A
709 /*
710 * Pick the best processor for a thread (any kind of thread) to run on.
711 */
0a7de745
A
712 processor_t (*choose_processor)(
713 processor_set_t pset,
714 processor_t processor,
715 thread_t thread);
6d2010ae
A
716 /*
717 * Enqueue a timeshare or fixed priority thread onto the per-processor
718 * runqueue
719 */
720 boolean_t (*processor_enqueue)(
0a7de745
A
721 processor_t processor,
722 thread_t thread,
cb323159 723 sched_options_t options);
3e170ce0 724
6d2010ae
A
725 /* Migrate threads away in preparation for processor shutdown */
726 void (*processor_queue_shutdown)(
0a7de745 727 processor_t processor);
3e170ce0 728
6d2010ae 729 /* Remove the specific thread from the per-processor runqueue */
0a7de745
A
730 boolean_t (*processor_queue_remove)(
731 processor_t processor,
732 thread_t thread);
3e170ce0 733
6d2010ae
A
734 /*
735 * Does the per-processor runqueue have any timeshare or fixed priority
736 * threads on it? Called without pset lock held, so should
737 * not assume immutability while executing.
738 */
0a7de745 739 boolean_t (*processor_queue_empty)(processor_t processor);
3e170ce0 740
6d2010ae
A
741 /*
742 * Would this priority trigger an urgent preemption if it's sitting
743 * on the per-processor runqueue?
744 */
0a7de745 745 boolean_t (*priority_is_urgent)(int priority);
3e170ce0 746
6d2010ae
A
747 /*
748 * Does the per-processor runqueue contain runnable threads that
749 * should cause the currently-running thread to be preempted?
750 */
0a7de745 751 ast_t (*processor_csw_check)(processor_t processor);
3e170ce0 752
6d2010ae
A
753 /*
754 * Does the per-processor runqueue contain a runnable thread
755 * of > or >= priority, as a preflight for choose_thread() or other
756 * thread selection
757 */
0a7de745
A
758 boolean_t (*processor_queue_has_priority)(processor_t processor,
759 int priority,
760 boolean_t gte);
3e170ce0 761
6d2010ae 762 /* Quantum size for the specified non-realtime thread. */
0a7de745
A
763 uint32_t (*initial_quantum_size)(thread_t thread);
764
6d2010ae 765 /* Scheduler mode for a new thread */
0a7de745 766 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
3e170ce0 767
6d2010ae
A
768 /*
769 * Is it safe to call update_priority, which may change a thread's
770 * runqueue or other state. This can be used to throttle changes
771 * to dynamic priority.
772 */
0a7de745 773 boolean_t (*can_update_priority)(thread_t thread);
6d2010ae
A
774
775 /*
776 * Update both scheduled priority and other persistent state.
777 * Side effects may including migration to another processor's runqueue.
778 */
0a7de745 779 void (*update_priority)(thread_t thread);
3e170ce0 780
6d2010ae 781 /* Lower overhead update to scheduled priority and state. */
0a7de745 782 void (*lightweight_update_priority)(thread_t thread);
3e170ce0 783
6d2010ae 784 /* Callback for non-realtime threads when the quantum timer fires */
0a7de745 785 void (*quantum_expire)(thread_t thread);
3e170ce0 786
6d2010ae
A
787 /*
788 * Runnable threads on per-processor runqueue. Should only
789 * be used for relative comparisons of load between processors.
790 */
0a7de745 791 int (*processor_runq_count)(processor_t processor);
6d2010ae 792
3e170ce0
A
793 /* Aggregate runcount statistics for per-processor runqueue */
794 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
fe8ab488 795
0a7de745 796 boolean_t (*processor_bound_count)(processor_t processor);
fe8ab488 797
0a7de745 798 void (*thread_update_scan)(sched_update_scan_context_t scan_context);
fe8ab488 799
3e170ce0
A
800 /* Supports more than one pset */
801 boolean_t multiple_psets_enabled;
802 /* Supports scheduler groups */
803 boolean_t sched_groups_enabled;
5ba3f43e
A
804
805 /* Supports avoid-processor */
806 boolean_t avoid_processor_enabled;
807
808 /* Returns true if this processor should avoid running this thread. */
809 bool (*thread_avoid_processor)(processor_t processor, thread_t thread);
810
811 /*
812 * Invoked when a processor is about to choose the idle thread
813 * Used to send IPIs to a processor which would be preferred to be idle instead.
814 * Called with pset lock held, returns pset lock unlocked.
815 */
816 void (*processor_balance)(processor_t processor, processor_set_t pset);
0a7de745
A
817 rt_queue_t (*rt_runq)(processor_set_t pset);
818 void (*rt_init)(processor_set_t pset);
819 void (*rt_queue_shutdown)(processor_t processor);
820 void (*rt_runq_scan)(sched_update_scan_context_t scan_context);
821 int64_t (*rt_runq_count_sum)(void);
5ba3f43e
A
822
823 uint32_t (*qos_max_parallelism)(int qos, uint64_t options);
0a7de745 824 void (*check_spill)(processor_set_t pset, thread_t thread);
5ba3f43e
A
825 sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event);
826 bool (*thread_should_yield)(processor_t processor, thread_t thread);
cb323159
A
827
828 /* Routine to update run counts */
829 uint32_t (*run_count_incr)(thread_t thread);
830 uint32_t (*run_count_decr)(thread_t thread);
831
832 /* Routine to update scheduling bucket for a thread */
833 void (*update_thread_bucket)(thread_t thread);
834
835 /* Routine to inform the scheduler when a new pset becomes schedulable */
836 void (*pset_made_schedulable)(processor_t processor, processor_set_t pset, boolean_t drop_lock);
f427ee49
A
837#if CONFIG_THREAD_GROUPS
838 /* Routine to inform the scheduler when CLPC changes a thread group recommendation */
839 void (*thread_group_recommendation_change)(struct thread_group *tg, cluster_type_t new_recommendation);
840#endif
6d2010ae
A
841};
842
843#if defined(CONFIG_SCHED_TRADITIONAL)
6d2010ae
A
844extern const struct sched_dispatch_table sched_traditional_dispatch;
845extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
846#endif
847
fe8ab488
A
848#if defined(CONFIG_SCHED_MULTIQ)
849extern const struct sched_dispatch_table sched_multiq_dispatch;
fe8ab488 850extern const struct sched_dispatch_table sched_dualq_dispatch;
c6bf4f31
A
851#if __AMP__
852extern const struct sched_dispatch_table sched_amp_dispatch;
853#endif
fe8ab488
A
854#endif
855
6d2010ae 856#if defined(CONFIG_SCHED_PROTO)
6d2010ae
A
857extern const struct sched_dispatch_table sched_proto_dispatch;
858#endif
859
860#if defined(CONFIG_SCHED_GRRR)
6d2010ae
A
861extern const struct sched_dispatch_table sched_grrr_dispatch;
862#endif
863
cb323159
A
864#if defined(CONFIG_SCHED_CLUTCH)
865extern const struct sched_dispatch_table sched_clutch_dispatch;
866#endif
6d2010ae 867
f427ee49
A
868#if defined(CONFIG_SCHED_EDGE)
869extern const struct sched_dispatch_table sched_edge_dispatch;
870#endif
871
872
0a7de745 873#endif /* MACH_KERNEL_PRIVATE */
6d2010ae 874
91447636 875__END_DECLS
1c79356b 876
0a7de745 877#endif /* _KERN_SCHED_PRIM_H_ */