]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched.h
1a46180a9d28946b74f7bd4d03d985664abbcbc5
[apple/xnu.git] / osfmk / kern / sched.h
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * Header file for scheduler.
64 *
65 */
66
67 #ifndef _KERN_SCHED_H_
68 #define _KERN_SCHED_H_
69
70 #include <mach/policy.h>
71 #include <kern/kern_types.h>
72 #include <kern/queue.h>
73 #include <kern/macro_help.h>
74 #include <kern/timer_call.h>
75 #include <kern/ast.h>
76
77 #define NRQS 128 /* 128 levels per run queue */
78 #define NRQBM (NRQS / 32) /* number of words per bit map */
79
80 #define MAXPRI (NRQS-1)
81 #define MINPRI IDLEPRI /* lowest legal priority schedulable */
82 #define IDLEPRI 0 /* idle thread priority */
83
84 /*
85 * High-level priority assignments
86 *
87 *************************************************************************
88 * 127 Reserved (real-time)
89 * A
90 * +
91 * (32 levels)
92 * +
93 * V
94 * 96 Reserved (real-time)
95 * 95 Kernel mode only
96 * A
97 * +
98 * (16 levels)
99 * +
100 * V
101 * 80 Kernel mode only
102 * 79 System high priority
103 * A
104 * +
105 * (16 levels)
106 * +
107 * V
108 * 64 System high priority
109 * 63 Elevated priorities
110 * A
111 * +
112 * (12 levels)
113 * +
114 * V
115 * 52 Elevated priorities
116 * 51 Elevated priorities (incl. BSD +nice)
117 * A
118 * +
119 * (20 levels)
120 * +
121 * V
122 * 32 Elevated priorities (incl. BSD +nice)
123 * 31 Default (default base for threads)
124 * 30 Lowered priorities (incl. BSD -nice)
125 * A
126 * +
127 * (20 levels)
128 * +
129 * V
130 * 11 Lowered priorities (incl. BSD -nice)
131 * 10 Lowered priorities (aged pri's)
132 * A
133 * +
134 * (11 levels)
135 * +
136 * V
137 * 0 Lowered priorities (aged pri's / idle)
138 *************************************************************************
139 */
140
141 #define BASEPRI_RTQUEUES (BASEPRI_REALTIME + 1) /* 97 */
142 #define BASEPRI_REALTIME (MAXPRI - (NRQS / 4) + 1) /* 96 */
143
144 #define MAXPRI_KERNEL (BASEPRI_REALTIME - 1) /* 95 */
145 #define BASEPRI_PREEMPT (MAXPRI_KERNEL - 2) /* 93 */
146 #define BASEPRI_KERNEL (MINPRI_KERNEL + 1) /* 81 */
147 #define MINPRI_KERNEL (MAXPRI_KERNEL - (NRQS / 8) + 1) /* 80 */
148
149 #define MAXPRI_RESERVED (MINPRI_KERNEL - 1) /* 79 */
150 #define BASEPRI_GRAPHICS (MAXPRI_RESERVED - 3) /* 76 */
151 #define MINPRI_RESERVED (MAXPRI_RESERVED - (NRQS / 8) + 1) /* 64 */
152
153 #define MAXPRI_USER (MINPRI_RESERVED - 1) /* 63 */
154 #define BASEPRI_CONTROL (BASEPRI_DEFAULT + 17) /* 48 */
155 #define BASEPRI_FOREGROUND (BASEPRI_DEFAULT + 16) /* 47 */
156 #define BASEPRI_BACKGROUND (BASEPRI_DEFAULT + 15) /* 46 */
157 #define BASEPRI_USER_INITIATED (BASEPRI_DEFAULT + 6) /* 37 */
158 #define BASEPRI_DEFAULT (MAXPRI_USER - (NRQS / 4)) /* 31 */
159 #define MAXPRI_SUPPRESSED (BASEPRI_DEFAULT - 3) /* 28 */
160 #define BASEPRI_UTILITY (BASEPRI_DEFAULT - 11) /* 20 */
161 #define MAXPRI_THROTTLE (MINPRI + 4) /* 4 */
162 #define MINPRI_USER MINPRI /* 0 */
163
164 #define DEPRESSPRI MINPRI /* depress priority */
165 #define MAXPRI_PROMOTE (MAXPRI_KERNEL) /* ceiling for mutex promotion */
166
167 /* Type used for thread->sched_mode and saved_mode */
168 typedef enum {
169 TH_MODE_NONE = 0, /* unassigned, usually for saved_mode only */
170 TH_MODE_REALTIME, /* time constraints supplied */
171 TH_MODE_FIXED, /* use fixed priorities, no decay */
172 TH_MODE_TIMESHARE, /* use timesharing algorithm */
173 } sched_mode_t;
174
175 /*
176 * Macro to check for invalid priorities.
177 */
178 #define invalid_pri(pri) ((pri) < MINPRI || (pri) > MAXPRI)
179
180 struct runq_stats {
181 uint64_t count_sum;
182 uint64_t last_change_timestamp;
183 };
184
185 #if defined(CONFIG_SCHED_TIMESHARE_CORE) || defined(CONFIG_SCHED_PROTO)
186
187 struct run_queue {
188 int highq; /* highest runnable queue */
189 int bitmap[NRQBM]; /* run queue bitmap array */
190 int count; /* # of threads total */
191 int urgency; /* level of preemption urgency */
192 queue_head_t queues[NRQS]; /* one for each priority */
193
194 struct runq_stats runq_stats;
195 };
196
197 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) || defined(CONFIG_SCHED_PROTO) */
198
199 struct rt_queue {
200 int count; /* # of threads total */
201 queue_head_t queue; /* all runnable RT threads */
202
203 struct runq_stats runq_stats;
204 };
205
206 #if defined(CONFIG_SCHED_GRRR_CORE)
207
208 /*
209 * We map standard Mach priorities to an abstract scale that more properly
210 * indicates how we want processor time allocated under contention.
211 */
212 typedef uint8_t grrr_proportional_priority_t;
213 typedef uint8_t grrr_group_index_t;
214
215 #define NUM_GRRR_PROPORTIONAL_PRIORITIES 256
216 #define MAX_GRRR_PROPORTIONAL_PRIORITY ((grrr_proportional_priority_t)255)
217
218 #if 0
219 #define NUM_GRRR_GROUPS 8 /* log(256) */
220 #endif
221
222 #define NUM_GRRR_GROUPS 64 /* 256/4 */
223
224 struct grrr_group {
225 queue_chain_t priority_order; /* next greatest weight group */
226 grrr_proportional_priority_t minpriority;
227 grrr_group_index_t index;
228
229 queue_head_t clients;
230 int count;
231 uint32_t weight;
232 #if 0
233 uint32_t deferred_removal_weight;
234 #endif
235 uint32_t work;
236 thread_t current_client;
237 };
238
239 struct grrr_run_queue {
240 int count;
241 uint32_t last_rescale_tick;
242 struct grrr_group groups[NUM_GRRR_GROUPS];
243 queue_head_t sorted_group_list;
244 uint32_t weight;
245 grrr_group_t current_group;
246
247 struct runq_stats runq_stats;
248 };
249
250 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
251
252 extern struct rt_queue rt_runq;
253
254 #if defined(CONFIG_SCHED_MULTIQ)
255 sched_group_t sched_group_create(void);
256 void sched_group_destroy(sched_group_t sched_group);
257 #endif /* defined(CONFIG_SCHED_MULTIQ) */
258
259
260
261 /*
262 * Scheduler routines.
263 */
264
265 /* Handle quantum expiration for an executing thread */
266 extern void thread_quantum_expire(
267 timer_call_param_t processor,
268 timer_call_param_t thread);
269
270 /* Context switch check for current processor */
271 extern ast_t csw_check(processor_t processor,
272 ast_t check_reason);
273
274 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
275 extern uint32_t std_quantum, min_std_quantum;
276 extern uint32_t std_quantum_us;
277 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
278
279 extern uint32_t thread_depress_time;
280 extern uint32_t default_timeshare_computation;
281 extern uint32_t default_timeshare_constraint;
282
283 extern uint32_t max_rt_quantum, min_rt_quantum;
284
285 extern int default_preemption_rate;
286 extern int default_bg_preemption_rate;
287
288 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
289
290 /*
291 * Age usage at approximately (1 << SCHED_TICK_SHIFT) times per second
292 * Aging may be deferred during periods where all processors are idle
293 * and cumulatively applied during periods of activity.
294 */
295 #define SCHED_TICK_SHIFT 3
296 #define SCHED_TICK_MAX_DELTA (8)
297
298 extern unsigned sched_tick;
299 extern uint32_t sched_tick_interval;
300
301 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
302
303 extern uint64_t sched_one_second_interval;
304
305 /* Periodic computation of various averages */
306 extern void compute_averages(uint64_t);
307
308 extern void compute_averunnable(
309 void *nrun);
310
311 extern void compute_stack_target(
312 void *arg);
313
314 extern void compute_memory_pressure(
315 void *arg);
316
317 extern void compute_zone_gc_throttle(
318 void *arg);
319
320 extern void compute_pageout_gc_throttle(
321 void *arg);
322
323 extern void compute_pmap_gc_throttle(
324 void *arg);
325
326 /*
327 * Conversion factor from usage
328 * to priority.
329 */
330 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
331 extern uint32_t sched_pri_shift;
332 extern uint32_t sched_background_pri_shift;
333 extern uint32_t sched_combined_fgbg_pri_shift;
334 extern uint32_t sched_fixed_shift;
335 extern int8_t sched_load_shifts[NRQS];
336 extern uint32_t sched_decay_usage_age_factor;
337 extern uint32_t sched_use_combined_fgbg_decay;
338 void sched_timeshare_consider_maintenance(uint64_t);
339 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
340
341 extern int32_t sched_poll_yield_shift;
342 extern uint64_t sched_safe_duration;
343
344 extern uint32_t sched_run_count, sched_share_count, sched_background_count;
345 extern uint32_t sched_load_average, sched_mach_factor;
346
347 extern uint32_t avenrun[3], mach_factor[3];
348
349 extern uint64_t max_unsafe_computation;
350 extern uint64_t max_poll_computation;
351
352 /* TH_RUN & !TH_IDLE controls whether a thread has a run count */
353 #define sched_run_incr(th) \
354 hw_atomic_add(&sched_run_count, 1) \
355
356 #define sched_run_decr(th) \
357 hw_atomic_sub(&sched_run_count, 1) \
358
359 #if MACH_ASSERT
360 extern void sched_share_incr(thread_t thread);
361 extern void sched_share_decr(thread_t thread);
362 extern void sched_background_incr(thread_t thread);
363 extern void sched_background_decr(thread_t thread);
364
365 extern void assert_thread_sched_count(thread_t thread);
366
367 #else /* MACH_ASSERT */
368 /* sched_mode == TH_MODE_TIMESHARE controls whether a thread has a timeshare count when it has a run count */
369 #define sched_share_incr(th) \
370 MACRO_BEGIN \
371 (void)hw_atomic_add(&sched_share_count, 1); \
372 MACRO_END
373
374 #define sched_share_decr(th) \
375 MACRO_BEGIN \
376 (void)hw_atomic_sub(&sched_share_count, 1); \
377 MACRO_END
378
379 /* TH_SFLAG_THROTTLED controls whether a thread has a background count when it has a run count and a share count */
380 #define sched_background_incr(th) \
381 MACRO_BEGIN \
382 hw_atomic_add(&sched_background_count, 1); \
383 MACRO_END
384
385 #define sched_background_decr(th) \
386 MACRO_BEGIN \
387 hw_atomic_sub(&sched_background_count, 1); \
388 MACRO_END
389
390 #define assert_thread_sched_count(th) \
391 MACRO_BEGIN \
392 MACRO_END
393
394 #endif /* !MACH_ASSERT */
395
396 /*
397 * thread_timer_delta macro takes care of both thread timers.
398 */
399 #define thread_timer_delta(thread, delta) \
400 MACRO_BEGIN \
401 (delta) = (typeof(delta))timer_delta(&(thread)->system_timer, \
402 &(thread)->system_timer_save); \
403 (delta) += (typeof(delta))timer_delta(&(thread)->user_timer, \
404 &(thread)->user_timer_save); \
405 MACRO_END
406
407 #endif /* _KERN_SCHED_H_ */