2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Scheduling primitive definitions file
66 #ifndef _KERN_SCHED_PRIM_H_
67 #define _KERN_SCHED_PRIM_H_
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/kern_return.h>
72 #include <kern/clock.h>
73 #include <kern/kern_types.h>
74 #include <kern/thread.h>
75 #include <sys/cdefs.h>
77 #ifdef MACH_KERNEL_PRIVATE
80 extern void sched_init(void) __attribute__((section("__TEXT, initcode")));
82 extern void sched_startup(void);
84 extern void sched_timebase_init(void);
86 /* Force a preemption point for a thread and wait for it to stop running */
87 extern boolean_t
thread_stop(
90 /* Release a previous stop request */
91 extern void thread_unstop(
94 /* Wait for a thread to stop running */
95 extern void thread_wait(
98 /* Unblock thread on wake up */
99 extern boolean_t
thread_unblock(
101 wait_result_t wresult
);
103 /* Unblock and dispatch thread */
104 extern kern_return_t
thread_go(
106 wait_result_t wresult
);
108 /* Handle threads at context switch */
109 extern void thread_dispatch(
111 thread_t new_thread
);
113 /* Switch directly to a particular thread */
114 extern int thread_run(
116 thread_continue_t continuation
,
118 thread_t new_thread
);
120 /* Resume thread with new stack */
121 extern void thread_continue(
122 thread_t old_thread
);
124 /* Invoke continuation */
125 extern void call_continuation(
126 thread_continue_t continuation
,
128 wait_result_t wresult
);
130 /* Set the current scheduled priority */
131 extern void set_sched_pri(
135 /* Set base priority of the specified thread */
136 extern void set_priority(
140 /* Reset scheduled priority of thread */
141 extern void compute_priority(
143 boolean_t override_depress
);
145 /* Adjust scheduled priority of thread during execution */
146 extern void compute_my_priority(
149 /* Periodic scheduler activity */
150 extern void sched_init_thread(void (*)(void));
152 /* Perform sched_tick housekeeping activities */
153 extern boolean_t
can_update_priority(
156 extern void update_priority(
159 extern void lightweight_update_priority(
162 extern void sched_traditional_quantum_expire(thread_t thread
);
164 /* Idle processor thread */
165 extern void idle_thread(void);
167 extern kern_return_t
idle_thread_create(
168 processor_t processor
);
170 /* Continuation return from syscall */
171 extern void thread_syscall_return(
175 extern wait_result_t
thread_block_reason(
176 thread_continue_t continuation
,
180 /* Reschedule thread for execution */
181 extern void thread_setrun(
185 #define SCHED_TAILQ 1
186 #define SCHED_HEADQ 2
187 #define SCHED_PREEMPT 4
189 extern processor_set_t
task_choose_pset(
192 /* Bind the current thread to a particular processor */
193 extern processor_t
thread_bind(
194 processor_t processor
);
196 /* Choose the best processor to run a thread */
197 extern processor_t
choose_processor(
198 processor_set_t pset
,
199 processor_t processor
,
202 /* Choose a thread from a processor's priority-based runq */
203 extern thread_t
choose_thread(
204 processor_t processor
,
209 extern void thread_quantum_init(
212 extern void run_queue_init(
215 extern thread_t
run_queue_dequeue(
219 extern boolean_t
run_queue_enqueue(
224 extern void run_queue_remove(
228 /* Remove thread from its run queue */
229 extern boolean_t
thread_run_queue_remove(
232 extern void thread_timer_expire(
236 extern boolean_t
thread_eager_preemption(
239 /* Fair Share routines */
240 #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
241 void sched_traditional_fairshare_init(void);
243 int sched_traditional_fairshare_runq_count(void);
245 uint64_t sched_traditional_fairshare_runq_stats_count_sum(void);
247 void sched_traditional_fairshare_enqueue(thread_t thread
);
249 thread_t
sched_traditional_fairshare_dequeue(void);
251 boolean_t
sched_traditional_fairshare_queue_remove(thread_t thread
);
254 #if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
255 void sched_grrr_fairshare_init(void);
257 int sched_grrr_fairshare_runq_count(void);
259 uint64_t sched_grrr_fairshare_runq_stats_count_sum(void);
261 void sched_grrr_fairshare_enqueue(thread_t thread
);
263 thread_t
sched_grrr_fairshare_dequeue(void);
265 boolean_t
sched_grrr_fairshare_queue_remove(thread_t thread
);
268 extern boolean_t sched_generic_direct_dispatch_to_idle_processors
;
270 /* Set the maximum interrupt level for the thread */
271 __private_extern__ wait_interrupt_t
thread_interrupt_level(
272 wait_interrupt_t interruptible
);
274 __private_extern__ wait_result_t
thread_mark_wait_locked(
276 wait_interrupt_t interruptible
);
278 /* Wake up locked thread directly, passing result */
279 __private_extern__ kern_return_t
clear_wait_internal(
281 wait_result_t result
);
283 extern void sched_stats_handle_csw(
284 processor_t processor
,
289 extern void sched_stats_handle_runq_change(
290 struct runq_stats
*stats
,
295 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
297 if (__builtin_expect(sched_stats_active, 0)) { \
298 sched_stats_handle_csw((processor), \
299 (reasons), (selfpri), (otherpri)); \
304 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
306 if (__builtin_expect(sched_stats_active, 0)) { \
307 sched_stats_handle_runq_change((stats), \
312 #define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
313 #define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
314 #define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
315 #define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
316 #define THREAD_URGENCY_MAX 4 /* Marker */
317 /* Returns the "urgency" of the currently running thread (provided by scheduler) */
318 extern int thread_get_urgency(
320 uint64_t *rt_deadline
);
322 /* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
323 extern void thread_tell_urgency(
326 uint64_t rt_deadline
);
328 /* Tells if there are "active" RT threads in the system (provided by CPU PM) */
329 extern void active_rt_threads(
332 #endif /* MACH_KERNEL_PRIVATE */
336 #ifdef XNU_KERNEL_PRIVATE
338 extern boolean_t
assert_wait_possible(void);
341 ****************** Only exported until BSD stops using ********************
344 /* Wake up thread directly, passing result */
345 extern kern_return_t
clear_wait(
347 wait_result_t result
);
349 /* Start thread running */
350 extern void thread_bootstrap_return(void);
352 /* Return from exception (BSD-visible interface) */
353 extern void thread_exception_return(void) __dead2
;
355 #endif /* XNU_KERNEL_PRIVATE */
358 extern wait_result_t
thread_block(
359 thread_continue_t continuation
);
361 extern wait_result_t
thread_block_parameter(
362 thread_continue_t continuation
,
365 /* Declare thread will wait on a particular event */
366 extern wait_result_t
assert_wait(
368 wait_interrupt_t interruptible
);
370 /* Assert that the thread intends to wait with a timeout */
371 extern wait_result_t
assert_wait_timeout(
373 wait_interrupt_t interruptible
,
375 uint32_t scale_factor
);
377 extern wait_result_t
assert_wait_deadline(
379 wait_interrupt_t interruptible
,
382 /* Wake up thread (or threads) waiting on a particular event */
383 extern kern_return_t
thread_wakeup_prim(
385 boolean_t one_thread
,
386 wait_result_t result
);
388 #ifdef MACH_KERNEL_PRIVATE
389 extern kern_return_t
thread_wakeup_prim_internal(
391 boolean_t one_thread
,
392 wait_result_t result
,
396 #define thread_wakeup(x) \
397 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
398 #define thread_wakeup_with_result(x, z) \
399 thread_wakeup_prim((x), FALSE, (z))
400 #define thread_wakeup_one(x) \
401 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
403 #ifdef MACH_KERNEL_PRIVATE
404 #define thread_wakeup_one_with_pri(x, pri) \
405 thread_wakeup_prim_internal((x), TRUE, THREAD_AWAKENED, pri)
408 extern boolean_t
preemption_enabled(void);
410 #ifdef KERNEL_PRIVATE
415 * Obsolete interfaces.
418 extern void thread_set_timer(
420 uint32_t scale_factor
);
422 extern void thread_set_timer_deadline(
425 extern void thread_cancel_timer(void);
427 #ifndef MACH_KERNEL_PRIVATE
429 #ifndef ABSOLUTETIME_SCALAR_TYPE
431 #define thread_set_timer_deadline(a) \
432 thread_set_timer_deadline(__OSAbsoluteTime(a))
434 #endif /* ABSOLUTETIME_SCALAR_TYPE */
436 #endif /* MACH_KERNEL_PRIVATE */
438 #endif /* __LP64__ */
440 #endif /* KERNEL_PRIVATE */
442 #ifdef MACH_KERNEL_PRIVATE
445 * Scheduler algorithm indirection. If only one algorithm is
446 * enabled at compile-time, a direction function call is used.
447 * If more than one is enabled, calls are dispatched through
448 * a function pointer table.
451 #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_FIXEDPRIORITY)
452 #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
455 #define SCHED(f) (sched_current_dispatch->f)
457 struct sched_dispatch_table
{
458 void (*init
)(void); /* Init global state */
459 void (*timebase_init
)(void); /* Timebase-dependent initialization */
460 void (*processor_init
)(processor_t processor
); /* Per-processor scheduler init */
461 void (*pset_init
)(processor_set_t pset
); /* Per-processor set scheduler init */
463 void (*maintenance_continuation
)(void); /* Function called regularly */
466 * Choose a thread of greater or equal priority from the per-processor
467 * runqueue for timeshare/fixed threads
469 thread_t (*choose_thread
)(
470 processor_t processor
,
474 * Steal a thread from another processor in the pset so that it can run
477 thread_t (*steal_thread
)(
478 processor_set_t pset
);
481 * Recalculate sched_pri based on base priority, past running time,
482 * and scheduling class.
484 void (*compute_priority
)(
486 boolean_t override_depress
);
489 * Pick the best processor for a thread (any kind of thread) to run on.
491 processor_t (*choose_processor
)(
492 processor_set_t pset
,
493 processor_t processor
,
496 * Enqueue a timeshare or fixed priority thread onto the per-processor
499 boolean_t (*processor_enqueue
)(
500 processor_t processor
,
504 /* Migrate threads away in preparation for processor shutdown */
505 void (*processor_queue_shutdown
)(
506 processor_t processor
);
508 /* Remove the specific thread from the per-processor runqueue */
509 boolean_t (*processor_queue_remove
)(
510 processor_t processor
,
514 * Does the per-processor runqueue have any timeshare or fixed priority
515 * threads on it? Called without pset lock held, so should
516 * not assume immutability while executing.
518 boolean_t (*processor_queue_empty
)(processor_t processor
);
521 * Would this priority trigger an urgent preemption if it's sitting
522 * on the per-processor runqueue?
524 boolean_t (*priority_is_urgent
)(int priority
);
527 * Does the per-processor runqueue contain runnable threads that
528 * should cause the currently-running thread to be preempted?
530 ast_t (*processor_csw_check
)(processor_t processor
);
533 * Does the per-processor runqueue contain a runnable thread
534 * of > or >= priority, as a preflight for choose_thread() or other
537 boolean_t (*processor_queue_has_priority
)(processor_t processor
,
541 /* Quantum size for the specified non-realtime thread. */
542 uint32_t (*initial_quantum_size
)(thread_t thread
);
544 /* Scheduler mode for a new thread */
545 sched_mode_t (*initial_thread_sched_mode
)(task_t parent_task
);
547 /* Scheduler algorithm supports timeshare (decay) mode */
548 boolean_t (*supports_timeshare_mode
)(void);
551 * Is it safe to call update_priority, which may change a thread's
552 * runqueue or other state. This can be used to throttle changes
553 * to dynamic priority.
555 boolean_t (*can_update_priority
)(thread_t thread
);
558 * Update both scheduled priority and other persistent state.
559 * Side effects may including migration to another processor's runqueue.
561 void (*update_priority
)(thread_t thread
);
563 /* Lower overhead update to scheduled priority and state. */
564 void (*lightweight_update_priority
)(thread_t thread
);
566 /* Callback for non-realtime threads when the quantum timer fires */
567 void (*quantum_expire
)(thread_t thread
);
570 * Even though we could continue executing on this processor, does the
571 * topology (SMT, for instance) indicate that a better processor could be
574 boolean_t (*should_current_thread_rechoose_processor
)(processor_t processor
);
577 * Runnable threads on per-processor runqueue. Should only
578 * be used for relative comparisons of load between processors.
580 int (*processor_runq_count
)(processor_t processor
);
582 /* Aggregate runcount statistics for per-processor runqueue */
583 uint64_t (*processor_runq_stats_count_sum
)(processor_t processor
);
585 /* Initialize structures to track demoted fairshare threads */
586 void (*fairshare_init
)(void);
588 /* Number of runnable fairshare threads */
589 int (*fairshare_runq_count
)(void);
591 /* Aggregate runcount statistics for fairshare runqueue */
592 uint64_t (*fairshare_runq_stats_count_sum
)(void);
594 void (*fairshare_enqueue
)(thread_t thread
);
596 thread_t (*fairshare_dequeue
)(void);
598 boolean_t (*fairshare_queue_remove
)(thread_t thread
);
601 * Use processor->next_thread to pin a thread to an idle
602 * processor. If FALSE, threads are enqueued and can
603 * be stolen by other processors.
605 boolean_t direct_dispatch_to_idle_processors
;
608 #if defined(CONFIG_SCHED_TRADITIONAL)
609 #define kSchedTraditionalString "traditional"
610 #define kSchedTraditionalWithPsetRunqueueString "traditional_with_pset_runqueue"
611 extern const struct sched_dispatch_table sched_traditional_dispatch
;
612 extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
;
615 #if defined(CONFIG_SCHED_PROTO)
616 #define kSchedProtoString "proto"
617 extern const struct sched_dispatch_table sched_proto_dispatch
;
620 #if defined(CONFIG_SCHED_GRRR)
621 #define kSchedGRRRString "grrr"
622 extern const struct sched_dispatch_table sched_grrr_dispatch
;
625 #if defined(CONFIG_SCHED_FIXEDPRIORITY)
626 #define kSchedFixedPriorityString "fixedpriority"
627 #define kSchedFixedPriorityWithPsetRunqueueString "fixedpriority_with_pset_runqueue"
628 extern const struct sched_dispatch_table sched_fixedpriority_dispatch
;
629 extern const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch
;
633 * It is an error to invoke any scheduler-related code
634 * before this is set up
637 sched_enum_unknown
= 0,
638 #if defined(CONFIG_SCHED_TRADITIONAL)
639 sched_enum_traditional
= 1,
640 sched_enum_traditional_with_pset_runqueue
= 2,
642 #if defined(CONFIG_SCHED_PROTO)
643 sched_enum_proto
= 3,
645 #if defined(CONFIG_SCHED_GRRR)
648 #if defined(CONFIG_SCHED_FIXEDPRIORITY)
649 sched_enum_fixedpriority
= 5,
650 sched_enum_fixedpriority_with_pset_runqueue
= 6,
655 extern const struct sched_dispatch_table
*sched_current_dispatch
;
657 #endif /* MACH_KERNEL_PRIVATE */
661 #endif /* _KERN_SCHED_PRIM_H_ */