]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.h
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.h
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.h
60 * Author: David Golub
61 *
62 * Scheduling primitive definitions file
63 *
64 */
65
66 #ifndef _KERN_SCHED_PRIM_H_
67 #define _KERN_SCHED_PRIM_H_
68
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/kern_return.h>
72 #include <kern/clock.h>
73 #include <kern/kern_types.h>
74 #include <kern/thread.h>
75 #include <sys/cdefs.h>
76
77 #ifdef MACH_KERNEL_PRIVATE
78
79 #include <mach/branch_predicates.h>
80
81 /* Initialization */
82 extern void sched_init(void);
83
84 extern void sched_startup(void);
85
86 extern void sched_timebase_init(void);
87
88 /* Force a preemption point for a thread and wait for it to stop running */
89 extern boolean_t thread_stop(
90 thread_t thread,
91 boolean_t until_not_runnable);
92
93 /* Release a previous stop request */
94 extern void thread_unstop(
95 thread_t thread);
96
97 /* Wait for a thread to stop running */
98 extern void thread_wait(
99 thread_t thread,
100 boolean_t until_not_runnable);
101
102 /* Unblock thread on wake up */
103 extern boolean_t thread_unblock(
104 thread_t thread,
105 wait_result_t wresult);
106
107 /* Unblock and dispatch thread */
108 extern kern_return_t thread_go(
109 thread_t thread,
110 wait_result_t wresult);
111
112 /* Handle threads at context switch */
113 extern void thread_dispatch(
114 thread_t old_thread,
115 thread_t new_thread);
116
117 /* Switch directly to a particular thread */
118 extern int thread_run(
119 thread_t self,
120 thread_continue_t continuation,
121 void *parameter,
122 thread_t new_thread);
123
124 /* Resume thread with new stack */
125 extern void thread_continue(
126 thread_t old_thread);
127
128 /* Invoke continuation */
129 extern void call_continuation(
130 thread_continue_t continuation,
131 void *parameter,
132 wait_result_t wresult);
133
134 /* Set the current scheduled priority */
135 extern void set_sched_pri(
136 thread_t thread,
137 int priority);
138
139 /* Set base priority of the specified thread */
140 extern void sched_set_thread_base_priority(
141 thread_t thread,
142 int priority);
143
144 /* Set the thread's true scheduling mode */
145 extern void sched_set_thread_mode(thread_t thread,
146 sched_mode_t mode);
147 /* Demote the true scheduler mode */
148 extern void sched_thread_mode_demote(thread_t thread,
149 uint32_t reason);
150 /* Un-demote the true scheduler mode */
151 extern void sched_thread_mode_undemote(thread_t thread,
152 uint32_t reason);
153
154 /* Re-evaluate base priority of thread (thread locked) */
155 void thread_recompute_priority(thread_t thread);
156
157 /* Re-evaluate base priority of thread (thread unlocked) */
158 void thread_recompute_qos(thread_t thread);
159
160 /* Reset scheduled priority of thread */
161 extern void thread_recompute_sched_pri(
162 thread_t thread,
163 boolean_t override_depress);
164
165 /* Periodic scheduler activity */
166 extern void sched_init_thread(void (*)(void));
167
168 /* Perform sched_tick housekeeping activities */
169 extern boolean_t can_update_priority(
170 thread_t thread);
171
172 extern void update_priority(
173 thread_t thread);
174
175 extern void lightweight_update_priority(
176 thread_t thread);
177
178 extern void sched_default_quantum_expire(thread_t thread);
179
180 /* Idle processor thread */
181 extern void idle_thread(void);
182
183 extern kern_return_t idle_thread_create(
184 processor_t processor);
185
186 /* Continuation return from syscall */
187 extern void thread_syscall_return(
188 kern_return_t ret);
189
190 /* Context switch */
191 extern wait_result_t thread_block_reason(
192 thread_continue_t continuation,
193 void *parameter,
194 ast_t reason);
195
196 /* Reschedule thread for execution */
197 extern void thread_setrun(
198 thread_t thread,
199 integer_t options);
200
201 #define SCHED_TAILQ 1
202 #define SCHED_HEADQ 2
203 #define SCHED_PREEMPT 4
204
205 extern uintptr_t sched_thread_on_rt_queue;
206 #define THREAD_ON_RT_RUNQ ((processor_t)(uintptr_t)&sched_thread_on_rt_queue)
207
208 extern processor_set_t task_choose_pset(
209 task_t task);
210
211 /* Bind the current thread to a particular processor */
212 extern processor_t thread_bind(
213 processor_t processor);
214
215 /* Choose the best processor to run a thread */
216 extern processor_t choose_processor(
217 processor_set_t pset,
218 processor_t processor,
219 thread_t thread);
220
221
222 extern void thread_quantum_init(
223 thread_t thread);
224
225 extern void run_queue_init(
226 run_queue_t runq);
227
228 extern thread_t run_queue_dequeue(
229 run_queue_t runq,
230 integer_t options);
231
232 extern boolean_t run_queue_enqueue(
233 run_queue_t runq,
234 thread_t thread,
235 integer_t options);
236
237 extern void run_queue_remove(
238 run_queue_t runq,
239 thread_t thread);
240
241 struct sched_update_scan_context
242 {
243 uint64_t earliest_bg_make_runnable_time;
244 uint64_t earliest_normal_make_runnable_time;
245 uint64_t earliest_rt_make_runnable_time;
246 };
247 typedef struct sched_update_scan_context *sched_update_scan_context_t;
248
249 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
250
251 extern boolean_t thread_update_add_thread(thread_t thread);
252 extern void thread_update_process_threads(void);
253 extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context);
254
255 extern void sched_timeshare_init(void);
256 extern void sched_timeshare_timebase_init(void);
257 extern void sched_timeshare_maintenance_continue(void);
258
259 extern boolean_t priority_is_urgent(int priority);
260 extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread);
261
262 extern int sched_compute_timeshare_priority(thread_t thread);
263
264 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
265
266 extern void rt_runq_scan(sched_update_scan_context_t scan_context);
267
268 /* Remove thread from its run queue */
269 extern boolean_t thread_run_queue_remove(thread_t thread);
270 thread_t thread_run_queue_remove_for_handoff(thread_t thread);
271
272 /* Put a thread back in the run queue after being yanked */
273 extern void thread_run_queue_reinsert(thread_t thread, integer_t options);
274
275 extern void thread_timer_expire(
276 void *thread,
277 void *p1);
278
279 extern boolean_t thread_eager_preemption(
280 thread_t thread);
281
282 extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
283
284 /* Set the maximum interrupt level for the thread */
285 __private_extern__ wait_interrupt_t thread_interrupt_level(
286 wait_interrupt_t interruptible);
287
288 __private_extern__ wait_result_t thread_mark_wait_locked(
289 thread_t thread,
290 wait_interrupt_t interruptible);
291
292 /* Wake up locked thread directly, passing result */
293 __private_extern__ kern_return_t clear_wait_internal(
294 thread_t thread,
295 wait_result_t result);
296
297 extern void sched_stats_handle_csw(
298 processor_t processor,
299 int reasons,
300 int selfpri,
301 int otherpri);
302
303 extern void sched_stats_handle_runq_change(
304 struct runq_stats *stats,
305 int old_count);
306
307
308
309 #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \
310 do { \
311 if (__builtin_expect(sched_stats_active, 0)) { \
312 sched_stats_handle_csw((processor), \
313 (reasons), (selfpri), (otherpri)); \
314 } \
315 } while (0)
316
317
318 #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \
319 do { \
320 if (__builtin_expect(sched_stats_active, 0)) { \
321 sched_stats_handle_runq_change((stats), \
322 (old_count)); \
323 } \
324 } while (0)
325
326 extern uint32_t sched_debug_flags;
327 #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001
328 #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002
329
330 #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \
331 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \
332 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
333 } \
334 } while(0)
335
336 #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \
337 if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \
338 KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \
339 } \
340 } while(0)
341
342 #define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */
343 #define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */
344 #define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */
345 #define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */
346 #define THREAD_URGENCY_MAX 4 /* Marker */
347 /* Returns the "urgency" of a thread (provided by scheduler) */
348 extern int thread_get_urgency(
349 thread_t thread,
350 uint64_t *rt_period,
351 uint64_t *rt_deadline);
352
353 /* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */
354 extern void thread_tell_urgency(
355 int urgency,
356 uint64_t rt_period,
357 uint64_t rt_deadline,
358 uint64_t sched_latency,
359 thread_t nthread);
360
361 /* Tells if there are "active" RT threads in the system (provided by CPU PM) */
362 extern void active_rt_threads(
363 boolean_t active);
364
365 #endif /* MACH_KERNEL_PRIVATE */
366
367 __BEGIN_DECLS
368
369 #ifdef XNU_KERNEL_PRIVATE
370
371 extern boolean_t assert_wait_possible(void);
372
373 /* Toggles a global override to turn off CPU Throttling */
374 #define CPU_THROTTLE_DISABLE 0
375 #define CPU_THROTTLE_ENABLE 1
376 extern void sys_override_cpu_throttle(int flag);
377
378 /*
379 ****************** Only exported until BSD stops using ********************
380 */
381
382 extern void thread_vm_bind_group_add(void);
383
384 /* Wake up thread directly, passing result */
385 extern kern_return_t clear_wait(
386 thread_t thread,
387 wait_result_t result);
388
389 /* Start thread running */
390 extern void thread_bootstrap_return(void);
391
392 /* Return from exception (BSD-visible interface) */
393 extern void thread_exception_return(void) __dead2;
394
395 #define SCHED_STRING_MAX_LENGTH (48)
396 /* String declaring the name of the current scheduler */
397 extern char sched_string[SCHED_STRING_MAX_LENGTH];
398
399 extern kern_return_t sched_work_interval_notify(thread_t thread, uint64_t work_interval_id, uint64_t start, uint64_t finish, uint64_t deadline, uint64_t next_start, uint32_t flags);
400
401 extern thread_t port_name_to_thread_for_ulock(mach_port_name_t thread_name);
402
403 /* Attempt to context switch to a specific runnable thread */
404 extern wait_result_t thread_handoff(thread_t thread);
405
406 extern struct waitq *assert_wait_queue(event_t event);
407
408 extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority);
409
410 extern thread_t thread_wakeup_identify(event_t event, int priority);
411
412 #endif /* XNU_KERNEL_PRIVATE */
413
414 /* Context switch */
415 extern wait_result_t thread_block(
416 thread_continue_t continuation);
417
418 extern wait_result_t thread_block_parameter(
419 thread_continue_t continuation,
420 void *parameter);
421
422 /* Declare thread will wait on a particular event */
423 extern wait_result_t assert_wait(
424 event_t event,
425 wait_interrupt_t interruptible);
426
427 /* Assert that the thread intends to wait with a timeout */
428 extern wait_result_t assert_wait_timeout(
429 event_t event,
430 wait_interrupt_t interruptible,
431 uint32_t interval,
432 uint32_t scale_factor);
433
434 /* Assert that the thread intends to wait with an urgency, timeout and leeway */
435 extern wait_result_t assert_wait_timeout_with_leeway(
436 event_t event,
437 wait_interrupt_t interruptible,
438 wait_timeout_urgency_t urgency,
439 uint32_t interval,
440 uint32_t leeway,
441 uint32_t scale_factor);
442
443 extern wait_result_t assert_wait_deadline(
444 event_t event,
445 wait_interrupt_t interruptible,
446 uint64_t deadline);
447
448 /* Assert that the thread intends to wait with an urgency, deadline, and leeway */
449 extern wait_result_t assert_wait_deadline_with_leeway(
450 event_t event,
451 wait_interrupt_t interruptible,
452 wait_timeout_urgency_t urgency,
453 uint64_t deadline,
454 uint64_t leeway);
455
456 /* Wake up thread (or threads) waiting on a particular event */
457 extern kern_return_t thread_wakeup_prim(
458 event_t event,
459 boolean_t one_thread,
460 wait_result_t result);
461
462 #define thread_wakeup(x) \
463 thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
464 #define thread_wakeup_with_result(x, z) \
465 thread_wakeup_prim((x), FALSE, (z))
466 #define thread_wakeup_one(x) \
467 thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
468
469 /* Wakeup the specified thread if it is waiting on this event */
470 extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread);
471
472 extern boolean_t preemption_enabled(void);
473
474 #ifdef MACH_KERNEL_PRIVATE
475
476 /*
477 * Scheduler algorithm indirection. If only one algorithm is
478 * enabled at compile-time, a direction function call is used.
479 * If more than one is enabled, calls are dispatched through
480 * a function pointer table.
481 */
482
483 #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ)
484 #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX
485 #endif
486
487 #define SCHED(f) (sched_current_dispatch->f)
488
489 struct sched_dispatch_table {
490 const char *sched_name;
491 void (*init)(void); /* Init global state */
492 void (*timebase_init)(void); /* Timebase-dependent initialization */
493 void (*processor_init)(processor_t processor); /* Per-processor scheduler init */
494 void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */
495
496 void (*maintenance_continuation)(void); /* Function called regularly */
497
498 /*
499 * Choose a thread of greater or equal priority from the per-processor
500 * runqueue for timeshare/fixed threads
501 */
502 thread_t (*choose_thread)(
503 processor_t processor,
504 int priority,
505 ast_t reason);
506
507 /* True if scheduler supports stealing threads */
508 boolean_t steal_thread_enabled;
509
510 /*
511 * Steal a thread from another processor in the pset so that it can run
512 * immediately
513 */
514 thread_t (*steal_thread)(
515 processor_set_t pset);
516
517 /*
518 * Compute priority for a timeshare thread based on base priority.
519 */
520 int (*compute_timeshare_priority)(thread_t thread);
521
522 /*
523 * Pick the best processor for a thread (any kind of thread) to run on.
524 */
525 processor_t (*choose_processor)(
526 processor_set_t pset,
527 processor_t processor,
528 thread_t thread);
529 /*
530 * Enqueue a timeshare or fixed priority thread onto the per-processor
531 * runqueue
532 */
533 boolean_t (*processor_enqueue)(
534 processor_t processor,
535 thread_t thread,
536 integer_t options);
537
538 /* Migrate threads away in preparation for processor shutdown */
539 void (*processor_queue_shutdown)(
540 processor_t processor);
541
542 /* Remove the specific thread from the per-processor runqueue */
543 boolean_t (*processor_queue_remove)(
544 processor_t processor,
545 thread_t thread);
546
547 /*
548 * Does the per-processor runqueue have any timeshare or fixed priority
549 * threads on it? Called without pset lock held, so should
550 * not assume immutability while executing.
551 */
552 boolean_t (*processor_queue_empty)(processor_t processor);
553
554 /*
555 * Would this priority trigger an urgent preemption if it's sitting
556 * on the per-processor runqueue?
557 */
558 boolean_t (*priority_is_urgent)(int priority);
559
560 /*
561 * Does the per-processor runqueue contain runnable threads that
562 * should cause the currently-running thread to be preempted?
563 */
564 ast_t (*processor_csw_check)(processor_t processor);
565
566 /*
567 * Does the per-processor runqueue contain a runnable thread
568 * of > or >= priority, as a preflight for choose_thread() or other
569 * thread selection
570 */
571 boolean_t (*processor_queue_has_priority)(processor_t processor,
572 int priority,
573 boolean_t gte);
574
575 /* Quantum size for the specified non-realtime thread. */
576 uint32_t (*initial_quantum_size)(thread_t thread);
577
578 /* Scheduler mode for a new thread */
579 sched_mode_t (*initial_thread_sched_mode)(task_t parent_task);
580
581 /*
582 * Is it safe to call update_priority, which may change a thread's
583 * runqueue or other state. This can be used to throttle changes
584 * to dynamic priority.
585 */
586 boolean_t (*can_update_priority)(thread_t thread);
587
588 /*
589 * Update both scheduled priority and other persistent state.
590 * Side effects may including migration to another processor's runqueue.
591 */
592 void (*update_priority)(thread_t thread);
593
594 /* Lower overhead update to scheduled priority and state. */
595 void (*lightweight_update_priority)(thread_t thread);
596
597 /* Callback for non-realtime threads when the quantum timer fires */
598 void (*quantum_expire)(thread_t thread);
599
600 /*
601 * Runnable threads on per-processor runqueue. Should only
602 * be used for relative comparisons of load between processors.
603 */
604 int (*processor_runq_count)(processor_t processor);
605
606 /* Aggregate runcount statistics for per-processor runqueue */
607 uint64_t (*processor_runq_stats_count_sum)(processor_t processor);
608
609 boolean_t (*processor_bound_count)(processor_t processor);
610
611 void (*thread_update_scan)(sched_update_scan_context_t scan_context);
612
613 /*
614 * Use processor->next_thread to pin a thread to an idle
615 * processor. If FALSE, threads are enqueued and can
616 * be stolen by other processors.
617 */
618 boolean_t direct_dispatch_to_idle_processors;
619
620 /* Supports more than one pset */
621 boolean_t multiple_psets_enabled;
622 /* Supports scheduler groups */
623 boolean_t sched_groups_enabled;
624 };
625
626 #if defined(CONFIG_SCHED_TRADITIONAL)
627 extern const struct sched_dispatch_table sched_traditional_dispatch;
628 extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch;
629 #endif
630
631 #if defined(CONFIG_SCHED_MULTIQ)
632 extern const struct sched_dispatch_table sched_multiq_dispatch;
633 extern const struct sched_dispatch_table sched_dualq_dispatch;
634 #endif
635
636 #if defined(CONFIG_SCHED_PROTO)
637 extern const struct sched_dispatch_table sched_proto_dispatch;
638 #endif
639
640 #if defined(CONFIG_SCHED_GRRR)
641 extern const struct sched_dispatch_table sched_grrr_dispatch;
642 #endif
643
644 /*
645 * It is an error to invoke any scheduler-related code
646 * before this is set up
647 */
648 extern const struct sched_dispatch_table *sched_current_dispatch;
649
650 #endif /* MACH_KERNEL_PRIVATE */
651
652 __END_DECLS
653
654 #endif /* _KERN_SCHED_PRIM_H_ */