]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.h
xnu-1699.24.8.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.h
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
66#ifndef _KERN_SCHED_PRIM_H_
67#define _KERN_SCHED_PRIM_H_
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/kern_return.h>
72#include <kern/clock.h>
73#include <kern/kern_types.h>
74#include <kern/thread.h>
91447636 75#include <sys/cdefs.h>
9bccf70c
A
76
77#ifdef MACH_KERNEL_PRIVATE
1c79356b 78
91447636 79/* Initialization */
2d21ac55 80extern void sched_init(void) __attribute__((section("__TEXT, initcode")));
1c79356b 81
91447636 82extern void sched_startup(void);
1c79356b 83
91447636 84extern void sched_timebase_init(void);
1c79356b 85
91447636 86/* Force a preemption point for a thread and wait for it to stop running */
1c79356b
A
87extern boolean_t thread_stop(
88 thread_t thread);
89
91447636
A
90/* Release a previous stop request */
91extern void thread_unstop(
1c79356b
A
92 thread_t thread);
93
91447636
A
94/* Wait for a thread to stop running */
95extern void thread_wait(
96 thread_t thread);
1c79356b 97
91447636
A
98/* Unblock thread on wake up */
99extern boolean_t thread_unblock(
100 thread_t thread,
101 wait_result_t wresult);
1c79356b 102
91447636
A
103/* Unblock and dispatch thread */
104extern kern_return_t thread_go(
105 thread_t thread,
106 wait_result_t wresult);
1c79356b 107
2d21ac55 108/* Handle threads at context switch */
91447636 109extern void thread_dispatch(
2d21ac55
A
110 thread_t old_thread,
111 thread_t new_thread);
1c79356b 112
91447636
A
113/* Switch directly to a particular thread */
114extern int thread_run(
115 thread_t self,
116 thread_continue_t continuation,
117 void *parameter,
118 thread_t new_thread);
119
120/* Resume thread with new stack */
121extern void thread_continue(
122 thread_t old_thread);
123
1c79356b
A
124/* Invoke continuation */
125extern void call_continuation(
91447636
A
126 thread_continue_t continuation,
127 void *parameter,
128 wait_result_t wresult);
9bccf70c
A
129
130/* Set the current scheduled priority */
131extern void set_sched_pri(
132 thread_t thread,
133 int priority);
1c79356b 134
9bccf70c
A
135/* Set base priority of the specified thread */
136extern void set_priority(
137 thread_t thread,
138 int priority);
139
140/* Reset scheduled priority of thread */
1c79356b 141extern void compute_priority(
9bccf70c
A
142 thread_t thread,
143 boolean_t override_depress);
1c79356b 144
9bccf70c 145/* Adjust scheduled priority of thread during execution */
1c79356b 146extern void compute_my_priority(
9bccf70c 147 thread_t thread);
1c79356b
A
148
149/* Periodic scheduler activity */
6d2010ae 150extern void sched_init_thread(void (*)(void));
1c79356b 151
91447636 152/* Perform sched_tick housekeeping activities */
6d2010ae 153extern boolean_t can_update_priority(
9bccf70c 154 thread_t thread);
1c79356b 155
6d2010ae
A
156extern void update_priority(
157 thread_t thread);
158
159extern void lightweight_update_priority(
160 thread_t thread);
161
162extern void sched_traditional_quantum_expire(thread_t thread);
163
91447636 164/* Idle processor thread */
1c79356b
A
165extern void idle_thread(void);
166
91447636
A
167extern kern_return_t idle_thread_create(
168 processor_t processor);
1c79356b 169
1c79356b
A
170/* Continuation return from syscall */
171extern void thread_syscall_return(
172 kern_return_t ret);
173
91447636 174/* Context switch */
9bccf70c
A
175extern wait_result_t thread_block_reason(
176 thread_continue_t continuation,
91447636 177 void *parameter,
9bccf70c 178 ast_t reason);
1c79356b 179
91447636 180/* Reschedule thread for execution */
1c79356b 181extern void thread_setrun(
9bccf70c 182 thread_t thread,
55e303ae 183 integer_t options);
1c79356b 184
2d21ac55
A
185#define SCHED_TAILQ 1
186#define SCHED_HEADQ 2
187#define SCHED_PREEMPT 4
1c79356b 188
b0d623f7
A
189extern processor_set_t task_choose_pset(
190 task_t task);
191
2d21ac55 192/* Bind the current thread to a particular processor */
55e303ae 193extern processor_t thread_bind(
55e303ae 194 processor_t processor);
1c79356b 195
6d2010ae
A
196/* Choose the best processor to run a thread */
197extern processor_t choose_processor(
198 processor_set_t pset,
199 processor_t processor,
200 thread_t thread);
201
202/* Choose a thread from a processor's priority-based runq */
203extern thread_t choose_thread(
204 processor_t processor,
205 run_queue_t runq,
206 int priority);
207
208
209extern void thread_quantum_init(
210 thread_t thread);
211
2d21ac55
A
212extern void run_queue_init(
213 run_queue_t runq);
214
6d2010ae
A
215extern thread_t run_queue_dequeue(
216 run_queue_t runq,
217 integer_t options);
218
219extern boolean_t run_queue_enqueue(
220 run_queue_t runq,
221 thread_t thread,
222 integer_t options);
223
224extern void run_queue_remove(
225 run_queue_t runq,
226 thread_t thread);
227
228/* Remove thread from its run queue */
229extern boolean_t thread_run_queue_remove(
230 thread_t thread);
231
91447636
A
232extern void thread_timer_expire(
233 void *thread,
234 void *p1);
235
6d2010ae
A
236extern boolean_t thread_eager_preemption(
237 thread_t thread);
238
239/* Fair Share routines */
240#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
241void sched_traditional_fairshare_init(void);
242
243int sched_traditional_fairshare_runq_count(void);
244
245uint64_t sched_traditional_fairshare_runq_stats_count_sum(void);
246
247void sched_traditional_fairshare_enqueue(thread_t thread);
248
249thread_t sched_traditional_fairshare_dequeue(void);
250
251boolean_t sched_traditional_fairshare_queue_remove(thread_t thread);
252#endif
253
254#if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
255void sched_grrr_fairshare_init(void);
256
257int sched_grrr_fairshare_runq_count(void);
258
259uint64_t sched_grrr_fairshare_runq_stats_count_sum(void);
260
261void sched_grrr_fairshare_enqueue(thread_t thread);
262
263thread_t sched_grrr_fairshare_dequeue(void);
264
265boolean_t sched_grrr_fairshare_queue_remove(thread_t thread);
266#endif
267
268extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
269
9bccf70c
A
270/* Set the maximum interrupt level for the thread */
271__private_extern__ wait_interrupt_t thread_interrupt_level(
272 wait_interrupt_t interruptible);
273
274__private_extern__ wait_result_t thread_mark_wait_locked(
275 thread_t thread,
276 wait_interrupt_t interruptible);
277
9bccf70c
A
278/* Wake up locked thread directly, passing result */
279__private_extern__ kern_return_t clear_wait_internal(
280 thread_t thread,
281 wait_result_t result);
1c79356b 282
6d2010ae
A
283extern void sched_stats_handle_csw(
284 processor_t processor,
285 int reasons,
286 int selfpri,
287 int otherpri);
288
289extern void sched_stats_handle_runq_change(
290 struct runq_stats *stats,
291 int old_count);
292
293
294
295#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
296do { \
297 if (__builtin_expect(sched_stats_active, 0)) { \
298 sched_stats_handle_csw((processor), \
299 (reasons), (selfpri), (otherpri)); \
300 } \
301} while (0)
302
303
304#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
305do { \
306 if (__builtin_expect(sched_stats_active, 0)) { \
307 sched_stats_handle_runq_change((stats), \
308 (old_count)); \
309 } \
310} while (0)
311
312#define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
313#define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
314#define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
315#define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
316#define THREAD_URGENCY_MAX 4 /* Marker */
317/* Returns the "urgency" of the currently running thread (provided by scheduler) */
318extern int thread_get_urgency(
319 uint64_t *rt_period,
320 uint64_t *rt_deadline);
321
322/* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
323extern void thread_tell_urgency(
324 int urgency,
325 uint64_t rt_period,
326 uint64_t rt_deadline);
327
328/* Tells if there are "active" RT threads in the system (provided by CPU PM) */
329extern void active_rt_threads(
330 boolean_t active);
331
1c79356b
A
332#endif /* MACH_KERNEL_PRIVATE */
333
91447636 334__BEGIN_DECLS
55e303ae 335
91447636
A
336#ifdef XNU_KERNEL_PRIVATE
337
338extern boolean_t assert_wait_possible(void);
1c79356b
A
339
340/*
91447636 341 ****************** Only exported until BSD stops using ********************
1c79356b 342 */
1c79356b
A
343
344/* Wake up thread directly, passing result */
9bccf70c
A
345extern kern_return_t clear_wait(
346 thread_t thread,
347 wait_result_t result);
1c79356b 348
b7266188
A
349/* Start thread running */
350extern void thread_bootstrap_return(void);
351
91447636 352/* Return from exception (BSD-visible interface) */
2d21ac55 353extern void thread_exception_return(void) __dead2;
1c79356b 354
91447636 355#endif /* XNU_KERNEL_PRIVATE */
1c79356b 356
91447636
A
357/* Context switch */
358extern wait_result_t thread_block(
359 thread_continue_t continuation);
1c79356b 360
91447636
A
361extern wait_result_t thread_block_parameter(
362 thread_continue_t continuation,
363 void *parameter);
1c79356b 364
1c79356b 365/* Declare thread will wait on a particular event */
91447636
A
366extern wait_result_t assert_wait(
367 event_t event,
368 wait_interrupt_t interruptible);
1c79356b 369
91447636
A
370/* Assert that the thread intends to wait with a timeout */
371extern wait_result_t assert_wait_timeout(
372 event_t event,
373 wait_interrupt_t interruptible,
374 uint32_t interval,
375 uint32_t scale_factor);
1c79356b 376
91447636
A
377extern wait_result_t assert_wait_deadline(
378 event_t event,
379 wait_interrupt_t interruptible,
380 uint64_t deadline);
1c79356b 381
91447636
A
382/* Wake up thread (or threads) waiting on a particular event */
383extern kern_return_t thread_wakeup_prim(
384 event_t event,
385 boolean_t one_thread,
6d2010ae
A
386 wait_result_t result);
387
388#ifdef MACH_KERNEL_PRIVATE
389extern kern_return_t thread_wakeup_prim_internal(
390 event_t event,
391 boolean_t one_thread,
392 wait_result_t result,
393 int priority);
394#endif
1c79356b
A
395
396#define thread_wakeup(x) \
6d2010ae 397 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
1c79356b 398#define thread_wakeup_with_result(x, z) \
6d2010ae 399 thread_wakeup_prim((x), FALSE, (z))
1c79356b 400#define thread_wakeup_one(x) \
6d2010ae
A
401 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
402
403#ifdef MACH_KERNEL_PRIVATE
404#define thread_wakeup_one_with_pri(x, pri) \
405 thread_wakeup_prim_internal((x), TRUE, THREAD_AWAKENED, pri)
406#endif
1c79356b 407
91447636
A
408extern boolean_t preemption_enabled(void);
409
410#ifdef KERNEL_PRIVATE
411
b0d623f7
A
412#ifndef __LP64__
413
91447636
A
414/*
415 * Obsolete interfaces.
416 */
417
418extern void thread_set_timer(
419 uint32_t interval,
420 uint32_t scale_factor);
421
422extern void thread_set_timer_deadline(
423 uint64_t deadline);
0b4e3aa0 424
91447636
A
425extern void thread_cancel_timer(void);
426
427#ifndef MACH_KERNEL_PRIVATE
428
429#ifndef ABSOLUTETIME_SCALAR_TYPE
0b4e3aa0
A
430
431#define thread_set_timer_deadline(a) \
432 thread_set_timer_deadline(__OSAbsoluteTime(a))
433
91447636
A
434#endif /* ABSOLUTETIME_SCALAR_TYPE */
435
436#endif /* MACH_KERNEL_PRIVATE */
437
b0d623f7
A
438#endif /* __LP64__ */
439
91447636
A
440#endif /* KERNEL_PRIVATE */
441
6d2010ae
A
442#ifdef MACH_KERNEL_PRIVATE
443
444/*
445 * Scheduler algorithm indirection. If only one algorithm is
446 * enabled at compile-time, a direction function call is used.
447 * If more than one is enabled, calls are dispatched through
448 * a function pointer table.
449 */
450
451#if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_FIXEDPRIORITY)
452#error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
453#endif
454
455#define SCHED(f) (sched_current_dispatch->f)
456
457struct sched_dispatch_table {
458 void (*init)(void); /* Init global state */
459 void (*timebase_init)(void); /* Timebase-dependent initialization */
460 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
461 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
462
463 void (*maintenance_continuation)(void); /* Function called regularly */
464
465 /*
466 * Choose a thread of greater or equal priority from the per-processor
467 * runqueue for timeshare/fixed threads
468 */
469 thread_t (*choose_thread)(
470 processor_t processor,
471 int priority);
472
473 /*
474 * Steal a thread from another processor in the pset so that it can run
475 * immediately
476 */
477 thread_t (*steal_thread)(
478 processor_set_t pset);
479
480 /*
481 * Recalculate sched_pri based on base priority, past running time,
482 * and scheduling class.
483 */
484 void (*compute_priority)(
485 thread_t thread,
486 boolean_t override_depress);
487
488 /*
489 * Pick the best processor for a thread (any kind of thread) to run on.
490 */
491 processor_t (*choose_processor)(
492 processor_set_t pset,
493 processor_t processor,
494 thread_t thread);
495 /*
496 * Enqueue a timeshare or fixed priority thread onto the per-processor
497 * runqueue
498 */
499 boolean_t (*processor_enqueue)(
500 processor_t processor,
501 thread_t thread,
502 integer_t options);
503
504 /* Migrate threads away in preparation for processor shutdown */
505 void (*processor_queue_shutdown)(
506 processor_t processor);
507
508 /* Remove the specific thread from the per-processor runqueue */
509 boolean_t (*processor_queue_remove)(
510 processor_t processor,
511 thread_t thread);
512
513 /*
514 * Does the per-processor runqueue have any timeshare or fixed priority
515 * threads on it? Called without pset lock held, so should
516 * not assume immutability while executing.
517 */
518 boolean_t (*processor_queue_empty)(processor_t processor);
519
520 /*
521 * Would this priority trigger an urgent preemption if it's sitting
522 * on the per-processor runqueue?
523 */
524 boolean_t (*priority_is_urgent)(int priority);
525
526 /*
527 * Does the per-processor runqueue contain runnable threads that
528 * should cause the currently-running thread to be preempted?
529 */
530 ast_t (*processor_csw_check)(processor_t processor);
531
532 /*
533 * Does the per-processor runqueue contain a runnable thread
534 * of > or >= priority, as a preflight for choose_thread() or other
535 * thread selection
536 */
537 boolean_t (*processor_queue_has_priority)(processor_t processor,
538 int priority,
539 boolean_t gte);
540
541 /* Quantum size for the specified non-realtime thread. */
542 uint32_t (*initial_quantum_size)(thread_t thread);
543
544 /* Scheduler mode for a new thread */
545 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
546
547 /* Scheduler algorithm supports timeshare (decay) mode */
548 boolean_t (*supports_timeshare_mode)(void);
549
550 /*
551 * Is it safe to call update_priority, which may change a thread's
552 * runqueue or other state. This can be used to throttle changes
553 * to dynamic priority.
554 */
555 boolean_t (*can_update_priority)(thread_t thread);
556
557 /*
558 * Update both scheduled priority and other persistent state.
559 * Side effects may including migration to another processor's runqueue.
560 */
561 void (*update_priority)(thread_t thread);
562
563 /* Lower overhead update to scheduled priority and state. */
564 void (*lightweight_update_priority)(thread_t thread);
565
566 /* Callback for non-realtime threads when the quantum timer fires */
567 void (*quantum_expire)(thread_t thread);
568
569 /*
570 * Even though we could continue executing on this processor, does the
571 * topology (SMT, for instance) indicate that a better processor could be
572 * chosen
573 */
574 boolean_t (*should_current_thread_rechoose_processor)(processor_t processor);
575
576 /*
577 * Runnable threads on per-processor runqueue. Should only
578 * be used for relative comparisons of load between processors.
579 */
580 int (*processor_runq_count)(processor_t processor);
581
582 /* Aggregate runcount statistics for per-processor runqueue */
583 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
584
585 /* Initialize structures to track demoted fairshare threads */
586 void (*fairshare_init)(void);
587
588 /* Number of runnable fairshare threads */
589 int (*fairshare_runq_count)(void);
590
591 /* Aggregate runcount statistics for fairshare runqueue */
592 uint64_t (*fairshare_runq_stats_count_sum)(void);
593
594 void (*fairshare_enqueue)(thread_t thread);
595
596 thread_t (*fairshare_dequeue)(void);
597
598 boolean_t (*fairshare_queue_remove)(thread_t thread);
599
600 /*
601 * Use processor->next_thread to pin a thread to an idle
602 * processor. If FALSE, threads are enqueued and can
603 * be stolen by other processors.
604 */
605 boolean_t direct_dispatch_to_idle_processors;
606};
607
608#if defined(CONFIG_SCHED_TRADITIONAL)
609#define kSchedTraditionalString "traditional"
610#define kSchedTraditionalWithPsetRunqueueString "traditional_with_pset_runqueue"
611extern const struct sched_dispatch_table sched_traditional_dispatch;
612extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
613#endif
614
615#if defined(CONFIG_SCHED_PROTO)
616#define kSchedProtoString "proto"
617extern const struct sched_dispatch_table sched_proto_dispatch;
618#endif
619
620#if defined(CONFIG_SCHED_GRRR)
621#define kSchedGRRRString "grrr"
622extern const struct sched_dispatch_table sched_grrr_dispatch;
623#endif
624
625#if defined(CONFIG_SCHED_FIXEDPRIORITY)
626#define kSchedFixedPriorityString "fixedpriority"
627#define kSchedFixedPriorityWithPsetRunqueueString "fixedpriority_with_pset_runqueue"
628extern const struct sched_dispatch_table sched_fixedpriority_dispatch;
629extern const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch;
630#endif
631
632/*
633 * It is an error to invoke any scheduler-related code
634 * before this is set up
635 */
636enum sched_enum {
637 sched_enum_unknown = 0,
638#if defined(CONFIG_SCHED_TRADITIONAL)
639 sched_enum_traditional = 1,
640 sched_enum_traditional_with_pset_runqueue = 2,
641#endif
642#if defined(CONFIG_SCHED_PROTO)
643 sched_enum_proto = 3,
644#endif
645#if defined(CONFIG_SCHED_GRRR)
646 sched_enum_grrr = 4,
647#endif
648#if defined(CONFIG_SCHED_FIXEDPRIORITY)
649 sched_enum_fixedpriority = 5,
650 sched_enum_fixedpriority_with_pset_runqueue = 6,
651#endif
652 sched_enum_max = 7
653};
654
655extern const struct sched_dispatch_table *sched_current_dispatch;
656
657#endif /* MACH_KERNEL_PRIVATE */
658
91447636 659__END_DECLS
1c79356b
A
660
661#endif /* _KERN_SCHED_PRIM_H_ */