]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.h
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.h
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
66#ifndef _KERN_SCHED_PRIM_H_
67#define _KERN_SCHED_PRIM_H_
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/kern_return.h>
72#include <kern/clock.h>
73#include <kern/kern_types.h>
74#include <kern/thread.h>
91447636 75#include <sys/cdefs.h>
9bccf70c
A
76
77#ifdef MACH_KERNEL_PRIVATE
1c79356b 78
91447636 79/* Initialization */
39236c6e 80extern void sched_init(void);
1c79356b 81
91447636 82extern void sched_startup(void);
1c79356b 83
91447636 84extern void sched_timebase_init(void);
1c79356b 85
91447636 86/* Force a preemption point for a thread and wait for it to stop running */
1c79356b 87extern boolean_t thread_stop(
39236c6e
A
88 thread_t thread,
89 boolean_t until_not_runnable);
1c79356b 90
91447636
A
91/* Release a previous stop request */
92extern void thread_unstop(
1c79356b
A
93 thread_t thread);
94
91447636
A
95/* Wait for a thread to stop running */
96extern void thread_wait(
316670eb
A
97 thread_t thread,
98 boolean_t until_not_runnable);
1c79356b 99
91447636
A
100/* Unblock thread on wake up */
101extern boolean_t thread_unblock(
102 thread_t thread,
103 wait_result_t wresult);
1c79356b 104
91447636
A
105/* Unblock and dispatch thread */
106extern kern_return_t thread_go(
107 thread_t thread,
108 wait_result_t wresult);
1c79356b 109
2d21ac55 110/* Handle threads at context switch */
91447636 111extern void thread_dispatch(
2d21ac55
A
112 thread_t old_thread,
113 thread_t new_thread);
1c79356b 114
91447636
A
115/* Switch directly to a particular thread */
116extern int thread_run(
117 thread_t self,
118 thread_continue_t continuation,
119 void *parameter,
120 thread_t new_thread);
121
122/* Resume thread with new stack */
123extern void thread_continue(
124 thread_t old_thread);
125
1c79356b
A
126/* Invoke continuation */
127extern void call_continuation(
91447636
A
128 thread_continue_t continuation,
129 void *parameter,
130 wait_result_t wresult);
9bccf70c
A
131
132/* Set the current scheduled priority */
133extern void set_sched_pri(
134 thread_t thread,
135 int priority);
1c79356b 136
9bccf70c
A
137/* Set base priority of the specified thread */
138extern void set_priority(
139 thread_t thread,
140 int priority);
141
142/* Reset scheduled priority of thread */
1c79356b 143extern void compute_priority(
9bccf70c
A
144 thread_t thread,
145 boolean_t override_depress);
1c79356b 146
9bccf70c 147/* Adjust scheduled priority of thread during execution */
1c79356b 148extern void compute_my_priority(
9bccf70c 149 thread_t thread);
1c79356b
A
150
151/* Periodic scheduler activity */
6d2010ae 152extern void sched_init_thread(void (*)(void));
1c79356b 153
91447636 154/* Perform sched_tick housekeeping activities */
6d2010ae 155extern boolean_t can_update_priority(
9bccf70c 156 thread_t thread);
1c79356b 157
6d2010ae
A
158extern void update_priority(
159 thread_t thread);
160
161extern void lightweight_update_priority(
162 thread_t thread);
163
164extern void sched_traditional_quantum_expire(thread_t thread);
165
91447636 166/* Idle processor thread */
1c79356b
A
167extern void idle_thread(void);
168
91447636
A
169extern kern_return_t idle_thread_create(
170 processor_t processor);
1c79356b 171
1c79356b
A
172/* Continuation return from syscall */
173extern void thread_syscall_return(
174 kern_return_t ret);
175
91447636 176/* Context switch */
9bccf70c
A
177extern wait_result_t thread_block_reason(
178 thread_continue_t continuation,
91447636 179 void *parameter,
9bccf70c 180 ast_t reason);
1c79356b 181
91447636 182/* Reschedule thread for execution */
1c79356b 183extern void thread_setrun(
9bccf70c 184 thread_t thread,
55e303ae 185 integer_t options);
1c79356b 186
2d21ac55
A
187#define SCHED_TAILQ 1
188#define SCHED_HEADQ 2
189#define SCHED_PREEMPT 4
1c79356b 190
b0d623f7
A
191extern processor_set_t task_choose_pset(
192 task_t task);
193
2d21ac55 194/* Bind the current thread to a particular processor */
55e303ae 195extern processor_t thread_bind(
55e303ae 196 processor_t processor);
1c79356b 197
6d2010ae
A
198/* Choose the best processor to run a thread */
199extern processor_t choose_processor(
200 processor_set_t pset,
201 processor_t processor,
202 thread_t thread);
203
204/* Choose a thread from a processor's priority-based runq */
205extern thread_t choose_thread(
206 processor_t processor,
207 run_queue_t runq,
208 int priority);
209
210
211extern void thread_quantum_init(
212 thread_t thread);
213
2d21ac55
A
214extern void run_queue_init(
215 run_queue_t runq);
216
6d2010ae
A
217extern thread_t run_queue_dequeue(
218 run_queue_t runq,
219 integer_t options);
220
221extern boolean_t run_queue_enqueue(
222 run_queue_t runq,
223 thread_t thread,
224 integer_t options);
225
226extern void run_queue_remove(
227 run_queue_t runq,
228 thread_t thread);
229
230/* Remove thread from its run queue */
231extern boolean_t thread_run_queue_remove(
232 thread_t thread);
233
91447636
A
234extern void thread_timer_expire(
235 void *thread,
236 void *p1);
237
6d2010ae
A
238extern boolean_t thread_eager_preemption(
239 thread_t thread);
240
241/* Fair Share routines */
242#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
243void sched_traditional_fairshare_init(void);
244
245int sched_traditional_fairshare_runq_count(void);
246
247uint64_t sched_traditional_fairshare_runq_stats_count_sum(void);
248
249void sched_traditional_fairshare_enqueue(thread_t thread);
250
251thread_t sched_traditional_fairshare_dequeue(void);
252
253boolean_t sched_traditional_fairshare_queue_remove(thread_t thread);
254#endif
255
256#if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
257void sched_grrr_fairshare_init(void);
258
259int sched_grrr_fairshare_runq_count(void);
260
261uint64_t sched_grrr_fairshare_runq_stats_count_sum(void);
262
263void sched_grrr_fairshare_enqueue(thread_t thread);
264
265thread_t sched_grrr_fairshare_dequeue(void);
266
267boolean_t sched_grrr_fairshare_queue_remove(thread_t thread);
268#endif
269
270extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
271
9bccf70c
A
272/* Set the maximum interrupt level for the thread */
273__private_extern__ wait_interrupt_t thread_interrupt_level(
274 wait_interrupt_t interruptible);
275
276__private_extern__ wait_result_t thread_mark_wait_locked(
277 thread_t thread,
278 wait_interrupt_t interruptible);
279
9bccf70c
A
280/* Wake up locked thread directly, passing result */
281__private_extern__ kern_return_t clear_wait_internal(
282 thread_t thread,
283 wait_result_t result);
1c79356b 284
6d2010ae
A
285extern void sched_stats_handle_csw(
286 processor_t processor,
287 int reasons,
288 int selfpri,
289 int otherpri);
290
291extern void sched_stats_handle_runq_change(
292 struct runq_stats *stats,
293 int old_count);
294
295
296
297#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
298do { \
299 if (__builtin_expect(sched_stats_active, 0)) { \
300 sched_stats_handle_csw((processor), \
301 (reasons), (selfpri), (otherpri)); \
302 } \
303} while (0)
304
305
306#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
307do { \
308 if (__builtin_expect(sched_stats_active, 0)) { \
309 sched_stats_handle_runq_change((stats), \
310 (old_count)); \
311 } \
312} while (0)
313
314#define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
315#define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
316#define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
317#define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
318#define THREAD_URGENCY_MAX 4 /* Marker */
39236c6e 319/* Returns the "urgency" of a thread (provided by scheduler) */
6d2010ae 320extern int thread_get_urgency(
39236c6e 321 thread_t thread,
6d2010ae
A
322 uint64_t *rt_period,
323 uint64_t *rt_deadline);
324
325/* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
326extern void thread_tell_urgency(
327 int urgency,
328 uint64_t rt_period,
39236c6e
A
329 uint64_t rt_deadline,
330 thread_t nthread);
6d2010ae
A
331
332/* Tells if there are "active" RT threads in the system (provided by CPU PM) */
333extern void active_rt_threads(
334 boolean_t active);
335
1c79356b
A
336#endif /* MACH_KERNEL_PRIVATE */
337
91447636 338__BEGIN_DECLS
55e303ae 339
91447636
A
340#ifdef XNU_KERNEL_PRIVATE
341
342extern boolean_t assert_wait_possible(void);
1c79356b 343
39236c6e
A
344/* Toggles a global override to turn off CPU Throttling */
345#define CPU_THROTTLE_DISABLE 0
346#define CPU_THROTTLE_ENABLE 1
347extern void sys_override_cpu_throttle(int flag);
348
1c79356b 349/*
91447636 350 ****************** Only exported until BSD stops using ********************
1c79356b 351 */
1c79356b
A
352
353/* Wake up thread directly, passing result */
9bccf70c
A
354extern kern_return_t clear_wait(
355 thread_t thread,
356 wait_result_t result);
1c79356b 357
b7266188
A
358/* Start thread running */
359extern void thread_bootstrap_return(void);
360
91447636 361/* Return from exception (BSD-visible interface) */
2d21ac55 362extern void thread_exception_return(void) __dead2;
1c79356b 363
91447636 364#endif /* XNU_KERNEL_PRIVATE */
1c79356b 365
91447636
A
366/* Context switch */
367extern wait_result_t thread_block(
368 thread_continue_t continuation);
1c79356b 369
91447636
A
370extern wait_result_t thread_block_parameter(
371 thread_continue_t continuation,
372 void *parameter);
1c79356b 373
1c79356b 374/* Declare thread will wait on a particular event */
91447636
A
375extern wait_result_t assert_wait(
376 event_t event,
377 wait_interrupt_t interruptible);
1c79356b 378
91447636
A
379/* Assert that the thread intends to wait with a timeout */
380extern wait_result_t assert_wait_timeout(
381 event_t event,
382 wait_interrupt_t interruptible,
383 uint32_t interval,
384 uint32_t scale_factor);
1c79356b 385
39236c6e
A
386/* Assert that the thread intends to wait with an urgency, timeout and leeway */
387extern wait_result_t assert_wait_timeout_with_leeway(
388 event_t event,
389 wait_interrupt_t interruptible,
390 wait_timeout_urgency_t urgency,
391 uint32_t interval,
392 uint32_t leeway,
393 uint32_t scale_factor);
394
91447636
A
395extern wait_result_t assert_wait_deadline(
396 event_t event,
397 wait_interrupt_t interruptible,
398 uint64_t deadline);
1c79356b 399
39236c6e
A
400/* Assert that the thread intends to wait with an urgency, deadline, and leeway */
401extern wait_result_t assert_wait_deadline_with_leeway(
402 event_t event,
403 wait_interrupt_t interruptible,
404 wait_timeout_urgency_t urgency,
405 uint64_t deadline,
406 uint64_t leeway);
407
91447636
A
408/* Wake up thread (or threads) waiting on a particular event */
409extern kern_return_t thread_wakeup_prim(
410 event_t event,
411 boolean_t one_thread,
6d2010ae
A
412 wait_result_t result);
413
316670eb
A
414extern kern_return_t thread_wakeup_prim_internal(
415 event_t event,
6d2010ae
A
416 boolean_t one_thread,
417 wait_result_t result,
418 int priority);
316670eb 419
1c79356b
A
420
421#define thread_wakeup(x) \
316670eb 422 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
1c79356b 423#define thread_wakeup_with_result(x, z) \
316670eb 424 thread_wakeup_prim((x), FALSE, (z))
1c79356b 425#define thread_wakeup_one(x) \
316670eb 426 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
6d2010ae
A
427
428#ifdef MACH_KERNEL_PRIVATE
316670eb 429#define thread_wakeup_one_with_pri(x, pri) \
6d2010ae
A
430 thread_wakeup_prim_internal((x), TRUE, THREAD_AWAKENED, pri)
431#endif
1c79356b 432
91447636
A
433extern boolean_t preemption_enabled(void);
434
6d2010ae
A
435#ifdef MACH_KERNEL_PRIVATE
436
437/*
438 * Scheduler algorithm indirection. If only one algorithm is
439 * enabled at compile-time, a direction function call is used.
440 * If more than one is enabled, calls are dispatched through
441 * a function pointer table.
442 */
443
444#if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_FIXEDPRIORITY)
445#error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
446#endif
447
448#define SCHED(f) (sched_current_dispatch->f)
449
450struct sched_dispatch_table {
451 void (*init)(void); /* Init global state */
452 void (*timebase_init)(void); /* Timebase-dependent initialization */
453 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
454 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
455
456 void (*maintenance_continuation)(void); /* Function called regularly */
457
458 /*
459 * Choose a thread of greater or equal priority from the per-processor
460 * runqueue for timeshare/fixed threads
461 */
462 thread_t (*choose_thread)(
463 processor_t processor,
464 int priority);
465
466 /*
467 * Steal a thread from another processor in the pset so that it can run
468 * immediately
469 */
470 thread_t (*steal_thread)(
471 processor_set_t pset);
472
473 /*
474 * Recalculate sched_pri based on base priority, past running time,
475 * and scheduling class.
476 */
477 void (*compute_priority)(
478 thread_t thread,
479 boolean_t override_depress);
480
481 /*
482 * Pick the best processor for a thread (any kind of thread) to run on.
483 */
484 processor_t (*choose_processor)(
485 processor_set_t pset,
486 processor_t processor,
487 thread_t thread);
488 /*
489 * Enqueue a timeshare or fixed priority thread onto the per-processor
490 * runqueue
491 */
492 boolean_t (*processor_enqueue)(
493 processor_t processor,
494 thread_t thread,
495 integer_t options);
496
497 /* Migrate threads away in preparation for processor shutdown */
498 void (*processor_queue_shutdown)(
499 processor_t processor);
500
501 /* Remove the specific thread from the per-processor runqueue */
502 boolean_t (*processor_queue_remove)(
503 processor_t processor,
504 thread_t thread);
505
506 /*
507 * Does the per-processor runqueue have any timeshare or fixed priority
508 * threads on it? Called without pset lock held, so should
509 * not assume immutability while executing.
510 */
511 boolean_t (*processor_queue_empty)(processor_t processor);
512
513 /*
514 * Would this priority trigger an urgent preemption if it's sitting
515 * on the per-processor runqueue?
516 */
517 boolean_t (*priority_is_urgent)(int priority);
518
519 /*
520 * Does the per-processor runqueue contain runnable threads that
521 * should cause the currently-running thread to be preempted?
522 */
523 ast_t (*processor_csw_check)(processor_t processor);
524
525 /*
526 * Does the per-processor runqueue contain a runnable thread
527 * of > or >= priority, as a preflight for choose_thread() or other
528 * thread selection
529 */
530 boolean_t (*processor_queue_has_priority)(processor_t processor,
531 int priority,
532 boolean_t gte);
533
534 /* Quantum size for the specified non-realtime thread. */
535 uint32_t (*initial_quantum_size)(thread_t thread);
536
537 /* Scheduler mode for a new thread */
538 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
539
540 /* Scheduler algorithm supports timeshare (decay) mode */
541 boolean_t (*supports_timeshare_mode)(void);
542
543 /*
544 * Is it safe to call update_priority, which may change a thread's
545 * runqueue or other state. This can be used to throttle changes
546 * to dynamic priority.
547 */
548 boolean_t (*can_update_priority)(thread_t thread);
549
550 /*
551 * Update both scheduled priority and other persistent state.
552 * Side effects may including migration to another processor's runqueue.
553 */
554 void (*update_priority)(thread_t thread);
555
556 /* Lower overhead update to scheduled priority and state. */
557 void (*lightweight_update_priority)(thread_t thread);
558
559 /* Callback for non-realtime threads when the quantum timer fires */
560 void (*quantum_expire)(thread_t thread);
561
562 /*
563 * Even though we could continue executing on this processor, does the
564 * topology (SMT, for instance) indicate that a better processor could be
565 * chosen
566 */
567 boolean_t (*should_current_thread_rechoose_processor)(processor_t processor);
568
569 /*
570 * Runnable threads on per-processor runqueue. Should only
571 * be used for relative comparisons of load between processors.
572 */
573 int (*processor_runq_count)(processor_t processor);
574
575 /* Aggregate runcount statistics for per-processor runqueue */
576 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
577
578 /* Initialize structures to track demoted fairshare threads */
579 void (*fairshare_init)(void);
580
581 /* Number of runnable fairshare threads */
582 int (*fairshare_runq_count)(void);
583
584 /* Aggregate runcount statistics for fairshare runqueue */
585 uint64_t (*fairshare_runq_stats_count_sum)(void);
586
587 void (*fairshare_enqueue)(thread_t thread);
588
589 thread_t (*fairshare_dequeue)(void);
590
591 boolean_t (*fairshare_queue_remove)(thread_t thread);
592
593 /*
594 * Use processor->next_thread to pin a thread to an idle
595 * processor. If FALSE, threads are enqueued and can
596 * be stolen by other processors.
597 */
598 boolean_t direct_dispatch_to_idle_processors;
599};
600
601#if defined(CONFIG_SCHED_TRADITIONAL)
602#define kSchedTraditionalString "traditional"
603#define kSchedTraditionalWithPsetRunqueueString "traditional_with_pset_runqueue"
604extern const struct sched_dispatch_table sched_traditional_dispatch;
605extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
606#endif
607
608#if defined(CONFIG_SCHED_PROTO)
609#define kSchedProtoString "proto"
610extern const struct sched_dispatch_table sched_proto_dispatch;
611#endif
612
613#if defined(CONFIG_SCHED_GRRR)
614#define kSchedGRRRString "grrr"
615extern const struct sched_dispatch_table sched_grrr_dispatch;
616#endif
617
618#if defined(CONFIG_SCHED_FIXEDPRIORITY)
619#define kSchedFixedPriorityString "fixedpriority"
620#define kSchedFixedPriorityWithPsetRunqueueString "fixedpriority_with_pset_runqueue"
621extern const struct sched_dispatch_table sched_fixedpriority_dispatch;
622extern const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch;
623#endif
624
625/*
626 * It is an error to invoke any scheduler-related code
627 * before this is set up
628 */
629enum sched_enum {
630 sched_enum_unknown = 0,
631#if defined(CONFIG_SCHED_TRADITIONAL)
632 sched_enum_traditional = 1,
633 sched_enum_traditional_with_pset_runqueue = 2,
634#endif
635#if defined(CONFIG_SCHED_PROTO)
636 sched_enum_proto = 3,
637#endif
638#if defined(CONFIG_SCHED_GRRR)
639 sched_enum_grrr = 4,
640#endif
641#if defined(CONFIG_SCHED_FIXEDPRIORITY)
642 sched_enum_fixedpriority = 5,
643 sched_enum_fixedpriority_with_pset_runqueue = 6,
644#endif
645 sched_enum_max = 7
646};
647
648extern const struct sched_dispatch_table *sched_current_dispatch;
649
650#endif /* MACH_KERNEL_PRIVATE */
651
91447636 652__END_DECLS
1c79356b
A
653
654#endif /* _KERN_SCHED_PRIM_H_ */