2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Scheduling primitive definitions file
66 #ifndef _KERN_SCHED_PRIM_H_
67 #define _KERN_SCHED_PRIM_H_
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/kern_return.h>
72 #include <kern/clock.h>
73 #include <kern/kern_types.h>
74 #include <kern/thread.h>
75 #include <sys/cdefs.h>
77 #ifdef MACH_KERNEL_PRIVATE
80 extern void sched_init(void);
82 extern void sched_startup(void);
84 extern void sched_timebase_init(void);
86 /* Force a preemption point for a thread and wait for it to stop running */
87 extern boolean_t
thread_stop(
89 boolean_t until_not_runnable
);
91 /* Release a previous stop request */
92 extern void thread_unstop(
95 /* Wait for a thread to stop running */
96 extern void thread_wait(
98 boolean_t until_not_runnable
);
100 /* Unblock thread on wake up */
101 extern boolean_t
thread_unblock(
103 wait_result_t wresult
);
105 /* Unblock and dispatch thread */
106 extern kern_return_t
thread_go(
108 wait_result_t wresult
);
110 /* Handle threads at context switch */
111 extern void thread_dispatch(
113 thread_t new_thread
);
115 /* Switch directly to a particular thread */
116 extern int thread_run(
118 thread_continue_t continuation
,
120 thread_t new_thread
);
122 /* Resume thread with new stack */
123 extern void thread_continue(
124 thread_t old_thread
);
126 /* Invoke continuation */
127 extern void call_continuation(
128 thread_continue_t continuation
,
130 wait_result_t wresult
);
132 /* Set the current scheduled priority */
133 extern void set_sched_pri(
137 /* Set base priority of the specified thread */
138 extern void set_priority(
142 /* Reset scheduled priority of thread */
143 extern void compute_priority(
145 boolean_t override_depress
);
147 /* Adjust scheduled priority of thread during execution */
148 extern void compute_my_priority(
151 /* Periodic scheduler activity */
152 extern void sched_init_thread(void (*)(void));
154 /* Perform sched_tick housekeeping activities */
155 extern boolean_t
can_update_priority(
158 extern void update_priority(
161 extern void lightweight_update_priority(
164 extern void sched_traditional_quantum_expire(thread_t thread
);
166 /* Idle processor thread */
167 extern void idle_thread(void);
169 extern kern_return_t
idle_thread_create(
170 processor_t processor
);
172 /* Continuation return from syscall */
173 extern void thread_syscall_return(
177 extern wait_result_t
thread_block_reason(
178 thread_continue_t continuation
,
182 /* Reschedule thread for execution */
183 extern void thread_setrun(
187 #define SCHED_TAILQ 1
188 #define SCHED_HEADQ 2
189 #define SCHED_PREEMPT 4
191 extern processor_set_t
task_choose_pset(
194 /* Bind the current thread to a particular processor */
195 extern processor_t
thread_bind(
196 processor_t processor
);
198 /* Choose the best processor to run a thread */
199 extern processor_t
choose_processor(
200 processor_set_t pset
,
201 processor_t processor
,
204 /* Choose a thread from a processor's priority-based runq */
205 extern thread_t
choose_thread(
206 processor_t processor
,
211 extern void thread_quantum_init(
214 extern void run_queue_init(
217 extern thread_t
run_queue_dequeue(
221 extern boolean_t
run_queue_enqueue(
226 extern void run_queue_remove(
230 /* Remove thread from its run queue */
231 extern boolean_t
thread_run_queue_remove(
234 extern void thread_timer_expire(
238 extern boolean_t
thread_eager_preemption(
241 /* Fair Share routines */
242 #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
243 void sched_traditional_fairshare_init(void);
245 int sched_traditional_fairshare_runq_count(void);
247 uint64_t sched_traditional_fairshare_runq_stats_count_sum(void);
249 void sched_traditional_fairshare_enqueue(thread_t thread
);
251 thread_t
sched_traditional_fairshare_dequeue(void);
253 boolean_t
sched_traditional_fairshare_queue_remove(thread_t thread
);
256 #if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
257 void sched_grrr_fairshare_init(void);
259 int sched_grrr_fairshare_runq_count(void);
261 uint64_t sched_grrr_fairshare_runq_stats_count_sum(void);
263 void sched_grrr_fairshare_enqueue(thread_t thread
);
265 thread_t
sched_grrr_fairshare_dequeue(void);
267 boolean_t
sched_grrr_fairshare_queue_remove(thread_t thread
);
270 extern boolean_t sched_generic_direct_dispatch_to_idle_processors
;
272 /* Set the maximum interrupt level for the thread */
273 __private_extern__ wait_interrupt_t
thread_interrupt_level(
274 wait_interrupt_t interruptible
);
276 __private_extern__ wait_result_t
thread_mark_wait_locked(
278 wait_interrupt_t interruptible
);
280 /* Wake up locked thread directly, passing result */
281 __private_extern__ kern_return_t
clear_wait_internal(
283 wait_result_t result
);
285 extern void sched_stats_handle_csw(
286 processor_t processor
,
291 extern void sched_stats_handle_runq_change(
292 struct runq_stats
*stats
,
297 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
299 if (__builtin_expect(sched_stats_active, 0)) { \
300 sched_stats_handle_csw((processor), \
301 (reasons), (selfpri), (otherpri)); \
306 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
308 if (__builtin_expect(sched_stats_active, 0)) { \
309 sched_stats_handle_runq_change((stats), \
314 #define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
315 #define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
316 #define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
317 #define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
318 #define THREAD_URGENCY_MAX 4 /* Marker */
319 /* Returns the "urgency" of a thread (provided by scheduler) */
320 extern int thread_get_urgency(
323 uint64_t *rt_deadline
);
325 /* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
326 extern void thread_tell_urgency(
329 uint64_t rt_deadline
,
332 /* Tells if there are "active" RT threads in the system (provided by CPU PM) */
333 extern void active_rt_threads(
336 #endif /* MACH_KERNEL_PRIVATE */
340 #ifdef XNU_KERNEL_PRIVATE
342 extern boolean_t
assert_wait_possible(void);
344 /* Toggles a global override to turn off CPU Throttling */
345 #define CPU_THROTTLE_DISABLE 0
346 #define CPU_THROTTLE_ENABLE 1
347 extern void sys_override_cpu_throttle(int flag
);
350 ****************** Only exported until BSD stops using ********************
353 /* Wake up thread directly, passing result */
354 extern kern_return_t
clear_wait(
356 wait_result_t result
);
358 /* Start thread running */
359 extern void thread_bootstrap_return(void);
361 /* Return from exception (BSD-visible interface) */
362 extern void thread_exception_return(void) __dead2
;
364 #endif /* XNU_KERNEL_PRIVATE */
367 extern wait_result_t
thread_block(
368 thread_continue_t continuation
);
370 extern wait_result_t
thread_block_parameter(
371 thread_continue_t continuation
,
374 /* Declare thread will wait on a particular event */
375 extern wait_result_t
assert_wait(
377 wait_interrupt_t interruptible
);
379 /* Assert that the thread intends to wait with a timeout */
380 extern wait_result_t
assert_wait_timeout(
382 wait_interrupt_t interruptible
,
384 uint32_t scale_factor
);
386 /* Assert that the thread intends to wait with an urgency, timeout and leeway */
387 extern wait_result_t
assert_wait_timeout_with_leeway(
389 wait_interrupt_t interruptible
,
390 wait_timeout_urgency_t urgency
,
393 uint32_t scale_factor
);
395 extern wait_result_t
assert_wait_deadline(
397 wait_interrupt_t interruptible
,
400 /* Assert that the thread intends to wait with an urgency, deadline, and leeway */
401 extern wait_result_t
assert_wait_deadline_with_leeway(
403 wait_interrupt_t interruptible
,
404 wait_timeout_urgency_t urgency
,
408 /* Wake up thread (or threads) waiting on a particular event */
409 extern kern_return_t
thread_wakeup_prim(
411 boolean_t one_thread
,
412 wait_result_t result
);
414 extern kern_return_t
thread_wakeup_prim_internal(
416 boolean_t one_thread
,
417 wait_result_t result
,
421 #define thread_wakeup(x) \
422 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
423 #define thread_wakeup_with_result(x, z) \
424 thread_wakeup_prim((x), FALSE, (z))
425 #define thread_wakeup_one(x) \
426 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
428 #ifdef MACH_KERNEL_PRIVATE
429 #define thread_wakeup_one_with_pri(x, pri) \
430 thread_wakeup_prim_internal((x), TRUE, THREAD_AWAKENED, pri)
433 extern boolean_t
preemption_enabled(void);
435 #ifdef MACH_KERNEL_PRIVATE
438 * Scheduler algorithm indirection. If only one algorithm is
439 * enabled at compile-time, a direction function call is used.
440 * If more than one is enabled, calls are dispatched through
441 * a function pointer table.
444 #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_FIXEDPRIORITY)
445 #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
448 #define SCHED(f) (sched_current_dispatch->f)
450 struct sched_dispatch_table
{
451 void (*init
)(void); /* Init global state */
452 void (*timebase_init
)(void); /* Timebase-dependent initialization */
453 void (*processor_init
)(processor_t processor
); /* Per-processor scheduler init */
454 void (*pset_init
)(processor_set_t pset
); /* Per-processor set scheduler init */
456 void (*maintenance_continuation
)(void); /* Function called regularly */
459 * Choose a thread of greater or equal priority from the per-processor
460 * runqueue for timeshare/fixed threads
462 thread_t (*choose_thread
)(
463 processor_t processor
,
467 * Steal a thread from another processor in the pset so that it can run
470 thread_t (*steal_thread
)(
471 processor_set_t pset
);
474 * Recalculate sched_pri based on base priority, past running time,
475 * and scheduling class.
477 void (*compute_priority
)(
479 boolean_t override_depress
);
482 * Pick the best processor for a thread (any kind of thread) to run on.
484 processor_t (*choose_processor
)(
485 processor_set_t pset
,
486 processor_t processor
,
489 * Enqueue a timeshare or fixed priority thread onto the per-processor
492 boolean_t (*processor_enqueue
)(
493 processor_t processor
,
497 /* Migrate threads away in preparation for processor shutdown */
498 void (*processor_queue_shutdown
)(
499 processor_t processor
);
501 /* Remove the specific thread from the per-processor runqueue */
502 boolean_t (*processor_queue_remove
)(
503 processor_t processor
,
507 * Does the per-processor runqueue have any timeshare or fixed priority
508 * threads on it? Called without pset lock held, so should
509 * not assume immutability while executing.
511 boolean_t (*processor_queue_empty
)(processor_t processor
);
514 * Would this priority trigger an urgent preemption if it's sitting
515 * on the per-processor runqueue?
517 boolean_t (*priority_is_urgent
)(int priority
);
520 * Does the per-processor runqueue contain runnable threads that
521 * should cause the currently-running thread to be preempted?
523 ast_t (*processor_csw_check
)(processor_t processor
);
526 * Does the per-processor runqueue contain a runnable thread
527 * of > or >= priority, as a preflight for choose_thread() or other
530 boolean_t (*processor_queue_has_priority
)(processor_t processor
,
534 /* Quantum size for the specified non-realtime thread. */
535 uint32_t (*initial_quantum_size
)(thread_t thread
);
537 /* Scheduler mode for a new thread */
538 sched_mode_t (*initial_thread_sched_mode
)(task_t parent_task
);
540 /* Scheduler algorithm supports timeshare (decay) mode */
541 boolean_t (*supports_timeshare_mode
)(void);
544 * Is it safe to call update_priority, which may change a thread's
545 * runqueue or other state. This can be used to throttle changes
546 * to dynamic priority.
548 boolean_t (*can_update_priority
)(thread_t thread
);
551 * Update both scheduled priority and other persistent state.
552 * Side effects may including migration to another processor's runqueue.
554 void (*update_priority
)(thread_t thread
);
556 /* Lower overhead update to scheduled priority and state. */
557 void (*lightweight_update_priority
)(thread_t thread
);
559 /* Callback for non-realtime threads when the quantum timer fires */
560 void (*quantum_expire
)(thread_t thread
);
563 * Even though we could continue executing on this processor, does the
564 * topology (SMT, for instance) indicate that a better processor could be
567 boolean_t (*should_current_thread_rechoose_processor
)(processor_t processor
);
570 * Runnable threads on per-processor runqueue. Should only
571 * be used for relative comparisons of load between processors.
573 int (*processor_runq_count
)(processor_t processor
);
575 /* Aggregate runcount statistics for per-processor runqueue */
576 uint64_t (*processor_runq_stats_count_sum
)(processor_t processor
);
578 /* Initialize structures to track demoted fairshare threads */
579 void (*fairshare_init
)(void);
581 /* Number of runnable fairshare threads */
582 int (*fairshare_runq_count
)(void);
584 /* Aggregate runcount statistics for fairshare runqueue */
585 uint64_t (*fairshare_runq_stats_count_sum
)(void);
587 void (*fairshare_enqueue
)(thread_t thread
);
589 thread_t (*fairshare_dequeue
)(void);
591 boolean_t (*fairshare_queue_remove
)(thread_t thread
);
594 * Use processor->next_thread to pin a thread to an idle
595 * processor. If FALSE, threads are enqueued and can
596 * be stolen by other processors.
598 boolean_t direct_dispatch_to_idle_processors
;
601 #if defined(CONFIG_SCHED_TRADITIONAL)
602 #define kSchedTraditionalString "traditional"
603 #define kSchedTraditionalWithPsetRunqueueString "traditional_with_pset_runqueue"
604 extern const struct sched_dispatch_table sched_traditional_dispatch
;
605 extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
;
608 #if defined(CONFIG_SCHED_PROTO)
609 #define kSchedProtoString "proto"
610 extern const struct sched_dispatch_table sched_proto_dispatch
;
613 #if defined(CONFIG_SCHED_GRRR)
614 #define kSchedGRRRString "grrr"
615 extern const struct sched_dispatch_table sched_grrr_dispatch
;
618 #if defined(CONFIG_SCHED_FIXEDPRIORITY)
619 #define kSchedFixedPriorityString "fixedpriority"
620 #define kSchedFixedPriorityWithPsetRunqueueString "fixedpriority_with_pset_runqueue"
621 extern const struct sched_dispatch_table sched_fixedpriority_dispatch
;
622 extern const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch
;
626 * It is an error to invoke any scheduler-related code
627 * before this is set up
630 sched_enum_unknown
= 0,
631 #if defined(CONFIG_SCHED_TRADITIONAL)
632 sched_enum_traditional
= 1,
633 sched_enum_traditional_with_pset_runqueue
= 2,
635 #if defined(CONFIG_SCHED_PROTO)
636 sched_enum_proto
= 3,
638 #if defined(CONFIG_SCHED_GRRR)
641 #if defined(CONFIG_SCHED_FIXEDPRIORITY)
642 sched_enum_fixedpriority
= 5,
643 sched_enum_fixedpriority_with_pset_runqueue
= 6,
648 extern const struct sched_dispatch_table
*sched_current_dispatch
;
650 #endif /* MACH_KERNEL_PRIVATE */
654 #endif /* _KERN_SCHED_PRIM_H_ */