]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.h
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.h
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
66#ifndef _KERN_SCHED_PRIM_H_
67#define _KERN_SCHED_PRIM_H_
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/kern_return.h>
72#include <kern/clock.h>
73#include <kern/kern_types.h>
74#include <kern/thread.h>
91447636 75#include <sys/cdefs.h>
9bccf70c
A
76
77#ifdef MACH_KERNEL_PRIVATE
1c79356b 78
91447636 79/* Initialization */
2d21ac55 80extern void sched_init(void) __attribute__((section("__TEXT, initcode")));
1c79356b 81
91447636 82extern void sched_startup(void);
1c79356b 83
91447636 84extern void sched_timebase_init(void);
1c79356b 85
91447636 86/* Force a preemption point for a thread and wait for it to stop running */
1c79356b
A
87extern boolean_t thread_stop(
88 thread_t thread);
89
91447636
A
90/* Release a previous stop request */
91extern void thread_unstop(
1c79356b
A
92 thread_t thread);
93
91447636
A
94/* Wait for a thread to stop running */
95extern void thread_wait(
316670eb
A
96 thread_t thread,
97 boolean_t until_not_runnable);
1c79356b 98
91447636
A
99/* Unblock thread on wake up */
100extern boolean_t thread_unblock(
101 thread_t thread,
102 wait_result_t wresult);
1c79356b 103
91447636
A
104/* Unblock and dispatch thread */
105extern kern_return_t thread_go(
106 thread_t thread,
107 wait_result_t wresult);
1c79356b 108
2d21ac55 109/* Handle threads at context switch */
91447636 110extern void thread_dispatch(
2d21ac55
A
111 thread_t old_thread,
112 thread_t new_thread);
1c79356b 113
91447636
A
114/* Switch directly to a particular thread */
115extern int thread_run(
116 thread_t self,
117 thread_continue_t continuation,
118 void *parameter,
119 thread_t new_thread);
120
121/* Resume thread with new stack */
122extern void thread_continue(
123 thread_t old_thread);
124
1c79356b
A
125/* Invoke continuation */
126extern void call_continuation(
91447636
A
127 thread_continue_t continuation,
128 void *parameter,
129 wait_result_t wresult);
9bccf70c
A
130
131/* Set the current scheduled priority */
132extern void set_sched_pri(
133 thread_t thread,
134 int priority);
1c79356b 135
9bccf70c
A
136/* Set base priority of the specified thread */
137extern void set_priority(
138 thread_t thread,
139 int priority);
140
141/* Reset scheduled priority of thread */
1c79356b 142extern void compute_priority(
9bccf70c
A
143 thread_t thread,
144 boolean_t override_depress);
1c79356b 145
9bccf70c 146/* Adjust scheduled priority of thread during execution */
1c79356b 147extern void compute_my_priority(
9bccf70c 148 thread_t thread);
1c79356b
A
149
150/* Periodic scheduler activity */
6d2010ae 151extern void sched_init_thread(void (*)(void));
1c79356b 152
91447636 153/* Perform sched_tick housekeeping activities */
6d2010ae 154extern boolean_t can_update_priority(
9bccf70c 155 thread_t thread);
1c79356b 156
6d2010ae
A
157extern void update_priority(
158 thread_t thread);
159
160extern void lightweight_update_priority(
161 thread_t thread);
162
163extern void sched_traditional_quantum_expire(thread_t thread);
164
91447636 165/* Idle processor thread */
1c79356b
A
166extern void idle_thread(void);
167
91447636
A
168extern kern_return_t idle_thread_create(
169 processor_t processor);
1c79356b 170
1c79356b
A
171/* Continuation return from syscall */
172extern void thread_syscall_return(
173 kern_return_t ret);
174
91447636 175/* Context switch */
9bccf70c
A
176extern wait_result_t thread_block_reason(
177 thread_continue_t continuation,
91447636 178 void *parameter,
9bccf70c 179 ast_t reason);
1c79356b 180
91447636 181/* Reschedule thread for execution */
1c79356b 182extern void thread_setrun(
9bccf70c 183 thread_t thread,
55e303ae 184 integer_t options);
1c79356b 185
2d21ac55
A
186#define SCHED_TAILQ 1
187#define SCHED_HEADQ 2
188#define SCHED_PREEMPT 4
1c79356b 189
b0d623f7
A
190extern processor_set_t task_choose_pset(
191 task_t task);
192
2d21ac55 193/* Bind the current thread to a particular processor */
55e303ae 194extern processor_t thread_bind(
55e303ae 195 processor_t processor);
1c79356b 196
6d2010ae
A
197/* Choose the best processor to run a thread */
198extern processor_t choose_processor(
199 processor_set_t pset,
200 processor_t processor,
201 thread_t thread);
202
203/* Choose a thread from a processor's priority-based runq */
204extern thread_t choose_thread(
205 processor_t processor,
206 run_queue_t runq,
207 int priority);
208
209
210extern void thread_quantum_init(
211 thread_t thread);
212
2d21ac55
A
213extern void run_queue_init(
214 run_queue_t runq);
215
6d2010ae
A
216extern thread_t run_queue_dequeue(
217 run_queue_t runq,
218 integer_t options);
219
220extern boolean_t run_queue_enqueue(
221 run_queue_t runq,
222 thread_t thread,
223 integer_t options);
224
225extern void run_queue_remove(
226 run_queue_t runq,
227 thread_t thread);
228
229/* Remove thread from its run queue */
230extern boolean_t thread_run_queue_remove(
231 thread_t thread);
232
91447636
A
233extern void thread_timer_expire(
234 void *thread,
235 void *p1);
236
6d2010ae
A
237extern boolean_t thread_eager_preemption(
238 thread_t thread);
239
240/* Fair Share routines */
241#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
242void sched_traditional_fairshare_init(void);
243
244int sched_traditional_fairshare_runq_count(void);
245
246uint64_t sched_traditional_fairshare_runq_stats_count_sum(void);
247
248void sched_traditional_fairshare_enqueue(thread_t thread);
249
250thread_t sched_traditional_fairshare_dequeue(void);
251
252boolean_t sched_traditional_fairshare_queue_remove(thread_t thread);
253#endif
254
255#if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
256void sched_grrr_fairshare_init(void);
257
258int sched_grrr_fairshare_runq_count(void);
259
260uint64_t sched_grrr_fairshare_runq_stats_count_sum(void);
261
262void sched_grrr_fairshare_enqueue(thread_t thread);
263
264thread_t sched_grrr_fairshare_dequeue(void);
265
266boolean_t sched_grrr_fairshare_queue_remove(thread_t thread);
267#endif
268
269extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
270
9bccf70c
A
271/* Set the maximum interrupt level for the thread */
272__private_extern__ wait_interrupt_t thread_interrupt_level(
273 wait_interrupt_t interruptible);
274
275__private_extern__ wait_result_t thread_mark_wait_locked(
276 thread_t thread,
277 wait_interrupt_t interruptible);
278
9bccf70c
A
279/* Wake up locked thread directly, passing result */
280__private_extern__ kern_return_t clear_wait_internal(
281 thread_t thread,
282 wait_result_t result);
1c79356b 283
6d2010ae
A
284extern void sched_stats_handle_csw(
285 processor_t processor,
286 int reasons,
287 int selfpri,
288 int otherpri);
289
290extern void sched_stats_handle_runq_change(
291 struct runq_stats *stats,
292 int old_count);
293
294
295
296#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
297do { \
298 if (__builtin_expect(sched_stats_active, 0)) { \
299 sched_stats_handle_csw((processor), \
300 (reasons), (selfpri), (otherpri)); \
301 } \
302} while (0)
303
304
305#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
306do { \
307 if (__builtin_expect(sched_stats_active, 0)) { \
308 sched_stats_handle_runq_change((stats), \
309 (old_count)); \
310 } \
311} while (0)
312
313#define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
314#define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
315#define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
316#define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
317#define THREAD_URGENCY_MAX 4 /* Marker */
318/* Returns the "urgency" of the currently running thread (provided by scheduler) */
319extern int thread_get_urgency(
320 uint64_t *rt_period,
321 uint64_t *rt_deadline);
322
323/* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
324extern void thread_tell_urgency(
325 int urgency,
326 uint64_t rt_period,
327 uint64_t rt_deadline);
328
329/* Tells if there are "active" RT threads in the system (provided by CPU PM) */
330extern void active_rt_threads(
331 boolean_t active);
332
1c79356b
A
333#endif /* MACH_KERNEL_PRIVATE */
334
91447636 335__BEGIN_DECLS
55e303ae 336
91447636
A
337#ifdef XNU_KERNEL_PRIVATE
338
339extern boolean_t assert_wait_possible(void);
1c79356b
A
340
341/*
91447636 342 ****************** Only exported until BSD stops using ********************
1c79356b 343 */
1c79356b
A
344
345/* Wake up thread directly, passing result */
9bccf70c
A
346extern kern_return_t clear_wait(
347 thread_t thread,
348 wait_result_t result);
1c79356b 349
b7266188
A
350/* Start thread running */
351extern void thread_bootstrap_return(void);
352
91447636 353/* Return from exception (BSD-visible interface) */
2d21ac55 354extern void thread_exception_return(void) __dead2;
1c79356b 355
91447636 356#endif /* XNU_KERNEL_PRIVATE */
1c79356b 357
91447636
A
358/* Context switch */
359extern wait_result_t thread_block(
360 thread_continue_t continuation);
1c79356b 361
91447636
A
362extern wait_result_t thread_block_parameter(
363 thread_continue_t continuation,
364 void *parameter);
1c79356b 365
1c79356b 366/* Declare thread will wait on a particular event */
91447636
A
367extern wait_result_t assert_wait(
368 event_t event,
369 wait_interrupt_t interruptible);
1c79356b 370
91447636
A
371/* Assert that the thread intends to wait with a timeout */
372extern wait_result_t assert_wait_timeout(
373 event_t event,
374 wait_interrupt_t interruptible,
375 uint32_t interval,
376 uint32_t scale_factor);
1c79356b 377
91447636
A
378extern wait_result_t assert_wait_deadline(
379 event_t event,
380 wait_interrupt_t interruptible,
381 uint64_t deadline);
1c79356b 382
91447636
A
383/* Wake up thread (or threads) waiting on a particular event */
384extern kern_return_t thread_wakeup_prim(
385 event_t event,
386 boolean_t one_thread,
6d2010ae
A
387 wait_result_t result);
388
316670eb
A
389extern kern_return_t thread_wakeup_prim_internal(
390 event_t event,
6d2010ae
A
391 boolean_t one_thread,
392 wait_result_t result,
393 int priority);
316670eb 394
1c79356b
A
395
396#define thread_wakeup(x) \
316670eb 397 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
1c79356b 398#define thread_wakeup_with_result(x, z) \
316670eb 399 thread_wakeup_prim((x), FALSE, (z))
1c79356b 400#define thread_wakeup_one(x) \
316670eb 401 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
6d2010ae
A
402
403#ifdef MACH_KERNEL_PRIVATE
316670eb 404#define thread_wakeup_one_with_pri(x, pri) \
6d2010ae
A
405 thread_wakeup_prim_internal((x), TRUE, THREAD_AWAKENED, pri)
406#endif
1c79356b 407
91447636
A
408extern boolean_t preemption_enabled(void);
409
410#ifdef KERNEL_PRIVATE
411
b0d623f7
A
412#ifndef __LP64__
413
91447636
A
414/*
415 * Obsolete interfaces.
416 */
417
418extern void thread_set_timer(
419 uint32_t interval,
420 uint32_t scale_factor);
421
422extern void thread_set_timer_deadline(
423 uint64_t deadline);
0b4e3aa0 424
91447636
A
425extern void thread_cancel_timer(void);
426
427#ifndef MACH_KERNEL_PRIVATE
428
429#ifndef ABSOLUTETIME_SCALAR_TYPE
0b4e3aa0
A
430
431#define thread_set_timer_deadline(a) \
432 thread_set_timer_deadline(__OSAbsoluteTime(a))
433
91447636
A
434#endif /* ABSOLUTETIME_SCALAR_TYPE */
435
436#endif /* MACH_KERNEL_PRIVATE */
437
b0d623f7
A
438#endif /* __LP64__ */
439
91447636
A
440#endif /* KERNEL_PRIVATE */
441
6d2010ae
A
442#ifdef MACH_KERNEL_PRIVATE
443
444/*
445 * Scheduler algorithm indirection. If only one algorithm is
446 * enabled at compile-time, a direction function call is used.
447 * If more than one is enabled, calls are dispatched through
448 * a function pointer table.
449 */
450
451#if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_FIXEDPRIORITY)
452#error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
453#endif
454
455#define SCHED(f) (sched_current_dispatch->f)
456
457struct sched_dispatch_table {
458 void (*init)(void); /* Init global state */
459 void (*timebase_init)(void); /* Timebase-dependent initialization */
460 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
461 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
462
463 void (*maintenance_continuation)(void); /* Function called regularly */
464
465 /*
466 * Choose a thread of greater or equal priority from the per-processor
467 * runqueue for timeshare/fixed threads
468 */
469 thread_t (*choose_thread)(
470 processor_t processor,
471 int priority);
472
473 /*
474 * Steal a thread from another processor in the pset so that it can run
475 * immediately
476 */
477 thread_t (*steal_thread)(
478 processor_set_t pset);
479
480 /*
481 * Recalculate sched_pri based on base priority, past running time,
482 * and scheduling class.
483 */
484 void (*compute_priority)(
485 thread_t thread,
486 boolean_t override_depress);
487
488 /*
489 * Pick the best processor for a thread (any kind of thread) to run on.
490 */
491 processor_t (*choose_processor)(
492 processor_set_t pset,
493 processor_t processor,
494 thread_t thread);
495 /*
496 * Enqueue a timeshare or fixed priority thread onto the per-processor
497 * runqueue
498 */
499 boolean_t (*processor_enqueue)(
500 processor_t processor,
501 thread_t thread,
502 integer_t options);
503
504 /* Migrate threads away in preparation for processor shutdown */
505 void (*processor_queue_shutdown)(
506 processor_t processor);
507
508 /* Remove the specific thread from the per-processor runqueue */
509 boolean_t (*processor_queue_remove)(
510 processor_t processor,
511 thread_t thread);
512
513 /*
514 * Does the per-processor runqueue have any timeshare or fixed priority
515 * threads on it? Called without pset lock held, so should
516 * not assume immutability while executing.
517 */
518 boolean_t (*processor_queue_empty)(processor_t processor);
519
520 /*
521 * Would this priority trigger an urgent preemption if it's sitting
522 * on the per-processor runqueue?
523 */
524 boolean_t (*priority_is_urgent)(int priority);
525
526 /*
527 * Does the per-processor runqueue contain runnable threads that
528 * should cause the currently-running thread to be preempted?
529 */
530 ast_t (*processor_csw_check)(processor_t processor);
531
532 /*
533 * Does the per-processor runqueue contain a runnable thread
534 * of > or >= priority, as a preflight for choose_thread() or other
535 * thread selection
536 */
537 boolean_t (*processor_queue_has_priority)(processor_t processor,
538 int priority,
539 boolean_t gte);
540
541 /* Quantum size for the specified non-realtime thread. */
542 uint32_t (*initial_quantum_size)(thread_t thread);
543
544 /* Scheduler mode for a new thread */
545 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
546
547 /* Scheduler algorithm supports timeshare (decay) mode */
548 boolean_t (*supports_timeshare_mode)(void);
549
550 /*
551 * Is it safe to call update_priority, which may change a thread's
552 * runqueue or other state. This can be used to throttle changes
553 * to dynamic priority.
554 */
555 boolean_t (*can_update_priority)(thread_t thread);
556
557 /*
558 * Update both scheduled priority and other persistent state.
559 * Side effects may including migration to another processor's runqueue.
560 */
561 void (*update_priority)(thread_t thread);
562
563 /* Lower overhead update to scheduled priority and state. */
564 void (*lightweight_update_priority)(thread_t thread);
565
566 /* Callback for non-realtime threads when the quantum timer fires */
567 void (*quantum_expire)(thread_t thread);
568
569 /*
570 * Even though we could continue executing on this processor, does the
571 * topology (SMT, for instance) indicate that a better processor could be
572 * chosen
573 */
574 boolean_t (*should_current_thread_rechoose_processor)(processor_t processor);
575
576 /*
577 * Runnable threads on per-processor runqueue. Should only
578 * be used for relative comparisons of load between processors.
579 */
580 int (*processor_runq_count)(processor_t processor);
581
582 /* Aggregate runcount statistics for per-processor runqueue */
583 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
584
585 /* Initialize structures to track demoted fairshare threads */
586 void (*fairshare_init)(void);
587
588 /* Number of runnable fairshare threads */
589 int (*fairshare_runq_count)(void);
590
591 /* Aggregate runcount statistics for fairshare runqueue */
592 uint64_t (*fairshare_runq_stats_count_sum)(void);
593
594 void (*fairshare_enqueue)(thread_t thread);
595
596 thread_t (*fairshare_dequeue)(void);
597
598 boolean_t (*fairshare_queue_remove)(thread_t thread);
599
600 /*
601 * Use processor->next_thread to pin a thread to an idle
602 * processor. If FALSE, threads are enqueued and can
603 * be stolen by other processors.
604 */
605 boolean_t direct_dispatch_to_idle_processors;
606};
607
608#if defined(CONFIG_SCHED_TRADITIONAL)
609#define kSchedTraditionalString "traditional"
610#define kSchedTraditionalWithPsetRunqueueString "traditional_with_pset_runqueue"
611extern const struct sched_dispatch_table sched_traditional_dispatch;
612extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
613#endif
614
615#if defined(CONFIG_SCHED_PROTO)
616#define kSchedProtoString "proto"
617extern const struct sched_dispatch_table sched_proto_dispatch;
618#endif
619
620#if defined(CONFIG_SCHED_GRRR)
621#define kSchedGRRRString "grrr"
622extern const struct sched_dispatch_table sched_grrr_dispatch;
623#endif
624
625#if defined(CONFIG_SCHED_FIXEDPRIORITY)
626#define kSchedFixedPriorityString "fixedpriority"
627#define kSchedFixedPriorityWithPsetRunqueueString "fixedpriority_with_pset_runqueue"
628extern const struct sched_dispatch_table sched_fixedpriority_dispatch;
629extern const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch;
630#endif
631
632/*
633 * It is an error to invoke any scheduler-related code
634 * before this is set up
635 */
636enum sched_enum {
637 sched_enum_unknown = 0,
638#if defined(CONFIG_SCHED_TRADITIONAL)
639 sched_enum_traditional = 1,
640 sched_enum_traditional_with_pset_runqueue = 2,
641#endif
642#if defined(CONFIG_SCHED_PROTO)
643 sched_enum_proto = 3,
644#endif
645#if defined(CONFIG_SCHED_GRRR)
646 sched_enum_grrr = 4,
647#endif
648#if defined(CONFIG_SCHED_FIXEDPRIORITY)
649 sched_enum_fixedpriority = 5,
650 sched_enum_fixedpriority_with_pset_runqueue = 6,
651#endif
652 sched_enum_max = 7
653};
654
655extern const struct sched_dispatch_table *sched_current_dispatch;
656
657#endif /* MACH_KERNEL_PRIVATE */
658
91447636 659__END_DECLS
1c79356b
A
660
661#endif /* _KERN_SCHED_PRIM_H_ */