]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.h
xnu-1699.24.8.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.h
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
66 #ifndef _KERN_SCHED_PRIM_H_
67 #define _KERN_SCHED_PRIM_H_
68
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/kern_return.h>
72 #include <kern/clock.h>
73 #include <kern/kern_types.h>
74 #include <kern/thread.h>
75 #include <sys/cdefs.h>
76
77 #ifdef MACH_KERNEL_PRIVATE
78
79 /* Initialization */
80 extern void sched_init(void) __attribute__((section("__TEXT, initcode")));
81
82 extern void sched_startup(void);
83
84 extern void sched_timebase_init(void);
85
86 /* Force a preemption point for a thread and wait for it to stop running */
87 extern boolean_t thread_stop(
88 thread_t thread);
89
90 /* Release a previous stop request */
91 extern void thread_unstop(
92 thread_t thread);
93
94 /* Wait for a thread to stop running */
95 extern void thread_wait(
96 thread_t thread);
97
98 /* Unblock thread on wake up */
99 extern boolean_t thread_unblock(
100 thread_t thread,
101 wait_result_t wresult);
102
103 /* Unblock and dispatch thread */
104 extern kern_return_t thread_go(
105 thread_t thread,
106 wait_result_t wresult);
107
108 /* Handle threads at context switch */
109 extern void thread_dispatch(
110 thread_t old_thread,
111 thread_t new_thread);
112
113 /* Switch directly to a particular thread */
114 extern int thread_run(
115 thread_t self,
116 thread_continue_t continuation,
117 void *parameter,
118 thread_t new_thread);
119
120 /* Resume thread with new stack */
121 extern void thread_continue(
122 thread_t old_thread);
123
124 /* Invoke continuation */
125 extern void call_continuation(
126 thread_continue_t continuation,
127 void *parameter,
128 wait_result_t wresult);
129
130 /* Set the current scheduled priority */
131 extern void set_sched_pri(
132 thread_t thread,
133 int priority);
134
135 /* Set base priority of the specified thread */
136 extern void set_priority(
137 thread_t thread,
138 int priority);
139
140 /* Reset scheduled priority of thread */
141 extern void compute_priority(
142 thread_t thread,
143 boolean_t override_depress);
144
145 /* Adjust scheduled priority of thread during execution */
146 extern void compute_my_priority(
147 thread_t thread);
148
149 /* Periodic scheduler activity */
150 extern void sched_init_thread(void (*)(void));
151
152 /* Perform sched_tick housekeeping activities */
153 extern boolean_t can_update_priority(
154 thread_t thread);
155
156 extern void update_priority(
157 thread_t thread);
158
159 extern void lightweight_update_priority(
160 thread_t thread);
161
162 extern void sched_traditional_quantum_expire(thread_t thread);
163
164 /* Idle processor thread */
165 extern void idle_thread(void);
166
167 extern kern_return_t idle_thread_create(
168 processor_t processor);
169
170 /* Continuation return from syscall */
171 extern void thread_syscall_return(
172 kern_return_t ret);
173
174 /* Context switch */
175 extern wait_result_t thread_block_reason(
176 thread_continue_t continuation,
177 void *parameter,
178 ast_t reason);
179
180 /* Reschedule thread for execution */
181 extern void thread_setrun(
182 thread_t thread,
183 integer_t options);
184
185 #define SCHED_TAILQ 1
186 #define SCHED_HEADQ 2
187 #define SCHED_PREEMPT 4
188
189 extern processor_set_t task_choose_pset(
190 task_t task);
191
192 /* Bind the current thread to a particular processor */
193 extern processor_t thread_bind(
194 processor_t processor);
195
196 /* Choose the best processor to run a thread */
197 extern processor_t choose_processor(
198 processor_set_t pset,
199 processor_t processor,
200 thread_t thread);
201
202 /* Choose a thread from a processor's priority-based runq */
203 extern thread_t choose_thread(
204 processor_t processor,
205 run_queue_t runq,
206 int priority);
207
208
209 extern void thread_quantum_init(
210 thread_t thread);
211
212 extern void run_queue_init(
213 run_queue_t runq);
214
215 extern thread_t run_queue_dequeue(
216 run_queue_t runq,
217 integer_t options);
218
219 extern boolean_t run_queue_enqueue(
220 run_queue_t runq,
221 thread_t thread,
222 integer_t options);
223
224 extern void run_queue_remove(
225 run_queue_t runq,
226 thread_t thread);
227
228 /* Remove thread from its run queue */
229 extern boolean_t thread_run_queue_remove(
230 thread_t thread);
231
232 extern void thread_timer_expire(
233 void *thread,
234 void *p1);
235
236 extern boolean_t thread_eager_preemption(
237 thread_t thread);
238
239 /* Fair Share routines */
240 #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
241 void sched_traditional_fairshare_init(void);
242
243 int sched_traditional_fairshare_runq_count(void);
244
245 uint64_t sched_traditional_fairshare_runq_stats_count_sum(void);
246
247 void sched_traditional_fairshare_enqueue(thread_t thread);
248
249 thread_t sched_traditional_fairshare_dequeue(void);
250
251 boolean_t sched_traditional_fairshare_queue_remove(thread_t thread);
252 #endif
253
254 #if defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
255 void sched_grrr_fairshare_init(void);
256
257 int sched_grrr_fairshare_runq_count(void);
258
259 uint64_t sched_grrr_fairshare_runq_stats_count_sum(void);
260
261 void sched_grrr_fairshare_enqueue(thread_t thread);
262
263 thread_t sched_grrr_fairshare_dequeue(void);
264
265 boolean_t sched_grrr_fairshare_queue_remove(thread_t thread);
266 #endif
267
268 extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
269
270 /* Set the maximum interrupt level for the thread */
271 __private_extern__ wait_interrupt_t thread_interrupt_level(
272 wait_interrupt_t interruptible);
273
274 __private_extern__ wait_result_t thread_mark_wait_locked(
275 thread_t thread,
276 wait_interrupt_t interruptible);
277
278 /* Wake up locked thread directly, passing result */
279 __private_extern__ kern_return_t clear_wait_internal(
280 thread_t thread,
281 wait_result_t result);
282
283 extern void sched_stats_handle_csw(
284 processor_t processor,
285 int reasons,
286 int selfpri,
287 int otherpri);
288
289 extern void sched_stats_handle_runq_change(
290 struct runq_stats *stats,
291 int old_count);
292
293
294
295 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
296 do { \
297 if (__builtin_expect(sched_stats_active, 0)) { \
298 sched_stats_handle_csw((processor), \
299 (reasons), (selfpri), (otherpri)); \
300 } \
301 } while (0)
302
303
304 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
305 do { \
306 if (__builtin_expect(sched_stats_active, 0)) { \
307 sched_stats_handle_runq_change((stats), \
308 (old_count)); \
309 } \
310 } while (0)
311
312 #define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
313 #define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
314 #define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
315 #define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
316 #define THREAD_URGENCY_MAX 4 /* Marker */
317 /* Returns the "urgency" of the currently running thread (provided by scheduler) */
318 extern int thread_get_urgency(
319 uint64_t *rt_period,
320 uint64_t *rt_deadline);
321
322 /* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
323 extern void thread_tell_urgency(
324 int urgency,
325 uint64_t rt_period,
326 uint64_t rt_deadline);
327
328 /* Tells if there are "active" RT threads in the system (provided by CPU PM) */
329 extern void active_rt_threads(
330 boolean_t active);
331
332 #endif /* MACH_KERNEL_PRIVATE */
333
334 __BEGIN_DECLS
335
336 #ifdef XNU_KERNEL_PRIVATE
337
338 extern boolean_t assert_wait_possible(void);
339
340 /*
341 ****************** Only exported until BSD stops using ********************
342 */
343
344 /* Wake up thread directly, passing result */
345 extern kern_return_t clear_wait(
346 thread_t thread,
347 wait_result_t result);
348
349 /* Start thread running */
350 extern void thread_bootstrap_return(void);
351
352 /* Return from exception (BSD-visible interface) */
353 extern void thread_exception_return(void) __dead2;
354
355 #endif /* XNU_KERNEL_PRIVATE */
356
357 /* Context switch */
358 extern wait_result_t thread_block(
359 thread_continue_t continuation);
360
361 extern wait_result_t thread_block_parameter(
362 thread_continue_t continuation,
363 void *parameter);
364
365 /* Declare thread will wait on a particular event */
366 extern wait_result_t assert_wait(
367 event_t event,
368 wait_interrupt_t interruptible);
369
370 /* Assert that the thread intends to wait with a timeout */
371 extern wait_result_t assert_wait_timeout(
372 event_t event,
373 wait_interrupt_t interruptible,
374 uint32_t interval,
375 uint32_t scale_factor);
376
377 extern wait_result_t assert_wait_deadline(
378 event_t event,
379 wait_interrupt_t interruptible,
380 uint64_t deadline);
381
382 /* Wake up thread (or threads) waiting on a particular event */
383 extern kern_return_t thread_wakeup_prim(
384 event_t event,
385 boolean_t one_thread,
386 wait_result_t result);
387
388 #ifdef MACH_KERNEL_PRIVATE
389 extern kern_return_t thread_wakeup_prim_internal(
390 event_t event,
391 boolean_t one_thread,
392 wait_result_t result,
393 int priority);
394 #endif
395
396 #define thread_wakeup(x) \
397 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
398 #define thread_wakeup_with_result(x, z) \
399 thread_wakeup_prim((x), FALSE, (z))
400 #define thread_wakeup_one(x) \
401 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
402
403 #ifdef MACH_KERNEL_PRIVATE
404 #define thread_wakeup_one_with_pri(x, pri) \
405 thread_wakeup_prim_internal((x), TRUE, THREAD_AWAKENED, pri)
406 #endif
407
408 extern boolean_t preemption_enabled(void);
409
410 #ifdef KERNEL_PRIVATE
411
412 #ifndef __LP64__
413
414 /*
415 * Obsolete interfaces.
416 */
417
418 extern void thread_set_timer(
419 uint32_t interval,
420 uint32_t scale_factor);
421
422 extern void thread_set_timer_deadline(
423 uint64_t deadline);
424
425 extern void thread_cancel_timer(void);
426
427 #ifndef MACH_KERNEL_PRIVATE
428
429 #ifndef ABSOLUTETIME_SCALAR_TYPE
430
431 #define thread_set_timer_deadline(a) \
432 thread_set_timer_deadline(__OSAbsoluteTime(a))
433
434 #endif /* ABSOLUTETIME_SCALAR_TYPE */
435
436 #endif /* MACH_KERNEL_PRIVATE */
437
438 #endif /* __LP64__ */
439
440 #endif /* KERNEL_PRIVATE */
441
442 #ifdef MACH_KERNEL_PRIVATE
443
444 /*
445 * Scheduler algorithm indirection. If only one algorithm is
446 * enabled at compile-time, a direction function call is used.
447 * If more than one is enabled, calls are dispatched through
448 * a function pointer table.
449 */
450
451 #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_FIXEDPRIORITY)
452 #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
453 #endif
454
455 #define SCHED(f) (sched_current_dispatch->f)
456
457 struct sched_dispatch_table {
458 void (*init)(void); /* Init global state */
459 void (*timebase_init)(void); /* Timebase-dependent initialization */
460 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
461 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
462
463 void (*maintenance_continuation)(void); /* Function called regularly */
464
465 /*
466 * Choose a thread of greater or equal priority from the per-processor
467 * runqueue for timeshare/fixed threads
468 */
469 thread_t (*choose_thread)(
470 processor_t processor,
471 int priority);
472
473 /*
474 * Steal a thread from another processor in the pset so that it can run
475 * immediately
476 */
477 thread_t (*steal_thread)(
478 processor_set_t pset);
479
480 /*
481 * Recalculate sched_pri based on base priority, past running time,
482 * and scheduling class.
483 */
484 void (*compute_priority)(
485 thread_t thread,
486 boolean_t override_depress);
487
488 /*
489 * Pick the best processor for a thread (any kind of thread) to run on.
490 */
491 processor_t (*choose_processor)(
492 processor_set_t pset,
493 processor_t processor,
494 thread_t thread);
495 /*
496 * Enqueue a timeshare or fixed priority thread onto the per-processor
497 * runqueue
498 */
499 boolean_t (*processor_enqueue)(
500 processor_t processor,
501 thread_t thread,
502 integer_t options);
503
504 /* Migrate threads away in preparation for processor shutdown */
505 void (*processor_queue_shutdown)(
506 processor_t processor);
507
508 /* Remove the specific thread from the per-processor runqueue */
509 boolean_t (*processor_queue_remove)(
510 processor_t processor,
511 thread_t thread);
512
513 /*
514 * Does the per-processor runqueue have any timeshare or fixed priority
515 * threads on it? Called without pset lock held, so should
516 * not assume immutability while executing.
517 */
518 boolean_t (*processor_queue_empty)(processor_t processor);
519
520 /*
521 * Would this priority trigger an urgent preemption if it's sitting
522 * on the per-processor runqueue?
523 */
524 boolean_t (*priority_is_urgent)(int priority);
525
526 /*
527 * Does the per-processor runqueue contain runnable threads that
528 * should cause the currently-running thread to be preempted?
529 */
530 ast_t (*processor_csw_check)(processor_t processor);
531
532 /*
533 * Does the per-processor runqueue contain a runnable thread
534 * of > or >= priority, as a preflight for choose_thread() or other
535 * thread selection
536 */
537 boolean_t (*processor_queue_has_priority)(processor_t processor,
538 int priority,
539 boolean_t gte);
540
541 /* Quantum size for the specified non-realtime thread. */
542 uint32_t (*initial_quantum_size)(thread_t thread);
543
544 /* Scheduler mode for a new thread */
545 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
546
547 /* Scheduler algorithm supports timeshare (decay) mode */
548 boolean_t (*supports_timeshare_mode)(void);
549
550 /*
551 * Is it safe to call update_priority, which may change a thread's
552 * runqueue or other state. This can be used to throttle changes
553 * to dynamic priority.
554 */
555 boolean_t (*can_update_priority)(thread_t thread);
556
557 /*
558 * Update both scheduled priority and other persistent state.
559 * Side effects may including migration to another processor's runqueue.
560 */
561 void (*update_priority)(thread_t thread);
562
563 /* Lower overhead update to scheduled priority and state. */
564 void (*lightweight_update_priority)(thread_t thread);
565
566 /* Callback for non-realtime threads when the quantum timer fires */
567 void (*quantum_expire)(thread_t thread);
568
569 /*
570 * Even though we could continue executing on this processor, does the
571 * topology (SMT, for instance) indicate that a better processor could be
572 * chosen
573 */
574 boolean_t (*should_current_thread_rechoose_processor)(processor_t processor);
575
576 /*
577 * Runnable threads on per-processor runqueue. Should only
578 * be used for relative comparisons of load between processors.
579 */
580 int (*processor_runq_count)(processor_t processor);
581
582 /* Aggregate runcount statistics for per-processor runqueue */
583 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
584
585 /* Initialize structures to track demoted fairshare threads */
586 void (*fairshare_init)(void);
587
588 /* Number of runnable fairshare threads */
589 int (*fairshare_runq_count)(void);
590
591 /* Aggregate runcount statistics for fairshare runqueue */
592 uint64_t (*fairshare_runq_stats_count_sum)(void);
593
594 void (*fairshare_enqueue)(thread_t thread);
595
596 thread_t (*fairshare_dequeue)(void);
597
598 boolean_t (*fairshare_queue_remove)(thread_t thread);
599
600 /*
601 * Use processor->next_thread to pin a thread to an idle
602 * processor. If FALSE, threads are enqueued and can
603 * be stolen by other processors.
604 */
605 boolean_t direct_dispatch_to_idle_processors;
606 };
607
608 #if defined(CONFIG_SCHED_TRADITIONAL)
609 #define kSchedTraditionalString "traditional"
610 #define kSchedTraditionalWithPsetRunqueueString "traditional_with_pset_runqueue"
611 extern const struct sched_dispatch_table sched_traditional_dispatch;
612 extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
613 #endif
614
615 #if defined(CONFIG_SCHED_PROTO)
616 #define kSchedProtoString "proto"
617 extern const struct sched_dispatch_table sched_proto_dispatch;
618 #endif
619
620 #if defined(CONFIG_SCHED_GRRR)
621 #define kSchedGRRRString "grrr"
622 extern const struct sched_dispatch_table sched_grrr_dispatch;
623 #endif
624
625 #if defined(CONFIG_SCHED_FIXEDPRIORITY)
626 #define kSchedFixedPriorityString "fixedpriority"
627 #define kSchedFixedPriorityWithPsetRunqueueString "fixedpriority_with_pset_runqueue"
628 extern const struct sched_dispatch_table sched_fixedpriority_dispatch;
629 extern const struct sched_dispatch_table sched_fixedpriority_with_pset_runqueue_dispatch;
630 #endif
631
632 /*
633 * It is an error to invoke any scheduler-related code
634 * before this is set up
635 */
636 enum sched_enum {
637 sched_enum_unknown = 0,
638 #if defined(CONFIG_SCHED_TRADITIONAL)
639 sched_enum_traditional = 1,
640 sched_enum_traditional_with_pset_runqueue = 2,
641 #endif
642 #if defined(CONFIG_SCHED_PROTO)
643 sched_enum_proto = 3,
644 #endif
645 #if defined(CONFIG_SCHED_GRRR)
646 sched_enum_grrr = 4,
647 #endif
648 #if defined(CONFIG_SCHED_FIXEDPRIORITY)
649 sched_enum_fixedpriority = 5,
650 sched_enum_fixedpriority_with_pset_runqueue = 6,
651 #endif
652 sched_enum_max = 7
653 };
654
655 extern const struct sched_dispatch_table *sched_current_dispatch;
656
657 #endif /* MACH_KERNEL_PRIVATE */
658
659 __END_DECLS
660
661 #endif /* _KERN_SCHED_PRIM_H_ */