]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched.h
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched.h
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * Header file for scheduler.
64 *
65 */
66
67 #ifndef _KERN_SCHED_H_
68 #define _KERN_SCHED_H_
69
70 #include <mach/policy.h>
71 #include <kern/kern_types.h>
72 #include <kern/smp.h>
73 #include <kern/circle_queue.h>
74 #include <kern/macro_help.h>
75 #include <kern/timer_call.h>
76 #include <kern/ast.h>
77 #include <kern/kalloc.h>
78 #include <kern/bits.h>
79
80 #define NRQS_MAX (128) /* maximum number of priority levels */
81
82 #define MAXPRI (NRQS_MAX-1)
83 #define MINPRI 0 /* lowest legal priority schedulable */
84 #define IDLEPRI MINPRI /* idle thread priority */
85 #define NOPRI -1
86
87 /*
88 * High-level priority assignments
89 *
90 *************************************************************************
91 * 127 Reserved (real-time)
92 * A
93 * +
94 * (32 levels)
95 * +
96 * V
97 * 96 Reserved (real-time)
98 * 95 Kernel mode only
99 * A
100 * +
101 * (16 levels)
102 * +
103 * V
104 * 80 Kernel mode only
105 * 79 System high priority
106 * A
107 * +
108 * (16 levels)
109 * +
110 * V
111 * 64 System high priority
112 * 63 Elevated priorities
113 * A
114 * +
115 * (12 levels)
116 * +
117 * V
118 * 52 Elevated priorities
119 * 51 Elevated priorities (incl. BSD +nice)
120 * A
121 * +
122 * (20 levels)
123 * +
124 * V
125 * 32 Elevated priorities (incl. BSD +nice)
126 * 31 Default (default base for threads)
127 * 30 Lowered priorities (incl. BSD -nice)
128 * A
129 * +
130 * (20 levels)
131 * +
132 * V
133 * 11 Lowered priorities (incl. BSD -nice)
134 * 10 Lowered priorities (aged pri's)
135 * A
136 * +
137 * (11 levels)
138 * +
139 * V
140 * 0 Lowered priorities (aged pri's / idle)
141 *************************************************************************
142 */
143
144 #define BASEPRI_RTQUEUES (BASEPRI_REALTIME + 1) /* 97 */
145 #define BASEPRI_REALTIME (MAXPRI - (NRQS_MAX / 4) + 1) /* 96 */
146
147 #define MAXPRI_KERNEL (BASEPRI_REALTIME - 1) /* 95 */
148 #define BASEPRI_PREEMPT_HIGH (BASEPRI_PREEMPT + 1) /* 93 */
149 #define BASEPRI_PREEMPT (MAXPRI_KERNEL - 3) /* 92 */
150 #define BASEPRI_VM (BASEPRI_PREEMPT - 1) /* 91 */
151
152 #define BASEPRI_KERNEL (MINPRI_KERNEL + 1) /* 81 */
153 #define MINPRI_KERNEL (MAXPRI_KERNEL - (NRQS_MAX / 8) + 1) /* 80 */
154
155 #define MAXPRI_RESERVED (MINPRI_KERNEL - 1) /* 79 */
156 #define BASEPRI_GRAPHICS (MAXPRI_RESERVED - 3) /* 76 */
157 #define MINPRI_RESERVED (MAXPRI_RESERVED - (NRQS_MAX / 8) + 1) /* 64 */
158
159 #define MAXPRI_USER (MINPRI_RESERVED - 1) /* 63 */
160 #define BASEPRI_CONTROL (BASEPRI_DEFAULT + 17) /* 48 */
161 #define BASEPRI_FOREGROUND (BASEPRI_DEFAULT + 16) /* 47 */
162 #define BASEPRI_BACKGROUND (BASEPRI_DEFAULT + 15) /* 46 */
163 #define BASEPRI_USER_INITIATED (BASEPRI_DEFAULT + 6) /* 37 */
164 #define BASEPRI_DEFAULT (MAXPRI_USER - (NRQS_MAX / 4)) /* 31 */
165 #define MAXPRI_SUPPRESSED (BASEPRI_DEFAULT - 3) /* 28 */
166 #define BASEPRI_UTILITY (BASEPRI_DEFAULT - 11) /* 20 */
167 #define MAXPRI_THROTTLE (MINPRI + 4) /* 4 */
168 #define MINPRI_USER MINPRI /* 0 */
169
170 #define DEPRESSPRI (MINPRI) /* depress priority */
171
172 #define MAXPRI_PROMOTE (MAXPRI_KERNEL) /* ceiling for mutex promotion */
173 #define MINPRI_RWLOCK (BASEPRI_BACKGROUND) /* floor when holding rwlock count */
174 #define MINPRI_EXEC (BASEPRI_DEFAULT) /* floor when in exec state */
175 #define MINPRI_WAITQ (BASEPRI_DEFAULT) /* floor when in waitq handover state */
176
177 #define NRQS (BASEPRI_REALTIME) /* Non-realtime levels for runqs */
178
179 /* Ensure that NRQS is large enough to represent all non-realtime threads; even promoted ones */
180 _Static_assert((NRQS == (MAXPRI_PROMOTE + 1)), "Runqueues are too small to hold all non-realtime threads");
181
182 /* Type used for thread->sched_mode and saved_mode */
183 typedef enum {
184 TH_MODE_NONE = 0, /* unassigned, usually for saved_mode only */
185 TH_MODE_REALTIME, /* time constraints supplied */
186 TH_MODE_FIXED, /* use fixed priorities, no decay */
187 TH_MODE_TIMESHARE, /* use timesharing algorithm */
188 } sched_mode_t;
189
190 /*
191 * Since the clutch scheduler organizes threads based on the thread group
192 * and the scheduling bucket, its important to not mix threads from multiple
193 * priority bands into the same bucket. To achieve that, in the clutch bucket
194 * world, there is a scheduling bucket per QoS effectively.
195 */
196
197 /* Buckets used for load calculation */
198 typedef enum {
199 TH_BUCKET_FIXPRI = 0, /* Fixed-priority */
200 TH_BUCKET_SHARE_FG, /* Timeshare thread above BASEPRI_DEFAULT */
201 #if CONFIG_SCHED_CLUTCH
202 TH_BUCKET_SHARE_IN, /* Timeshare thread between BASEPRI_USER_INITIATED and BASEPRI_DEFAULT */
203 #endif /* CONFIG_SCHED_CLUTCH */
204 TH_BUCKET_SHARE_DF, /* Timeshare thread between BASEPRI_DEFAULT and BASEPRI_UTILITY */
205 TH_BUCKET_SHARE_UT, /* Timeshare thread between BASEPRI_UTILITY and MAXPRI_THROTTLE */
206 TH_BUCKET_SHARE_BG, /* Timeshare thread between MAXPRI_THROTTLE and MINPRI */
207 TH_BUCKET_RUN, /* All runnable threads */
208 TH_BUCKET_SCHED_MAX = TH_BUCKET_RUN, /* Maximum schedulable buckets */
209 TH_BUCKET_MAX,
210 } sched_bucket_t;
211
212 /*
213 * Macro to check for invalid priorities.
214 */
215 #define invalid_pri(pri) ((pri) < MINPRI || (pri) > MAXPRI)
216
217 struct runq_stats {
218 uint64_t count_sum;
219 uint64_t last_change_timestamp;
220 };
221
222 #if defined(CONFIG_SCHED_TIMESHARE_CORE) || defined(CONFIG_SCHED_PROTO)
223
224 struct run_queue {
225 int highq; /* highest runnable queue */
226 bitmap_t bitmap[BITMAP_LEN(NRQS)]; /* run queue bitmap array */
227 int count; /* # of threads total */
228 int urgency; /* level of preemption urgency */
229 circle_queue_head_t queues[NRQS]; /* one for each priority */
230
231 struct runq_stats runq_stats;
232 };
233
234 inline static void
235 rq_bitmap_set(bitmap_t *map, u_int n)
236 {
237 assert(n < NRQS);
238 bitmap_set(map, n);
239 }
240
241 inline static void
242 rq_bitmap_clear(bitmap_t *map, u_int n)
243 {
244 assert(n < NRQS);
245 bitmap_clear(map, n);
246 }
247
248 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) || defined(CONFIG_SCHED_PROTO) */
249
250 struct rt_queue {
251 _Atomic int count; /* # of threads total */
252 queue_head_t queue; /* all runnable RT threads */
253 #if __SMP__
254 decl_simple_lock_data(, rt_lock);
255 #endif
256 struct runq_stats runq_stats;
257 };
258 typedef struct rt_queue *rt_queue_t;
259
260 #if defined(CONFIG_SCHED_GRRR_CORE)
261
262 /*
263 * We map standard Mach priorities to an abstract scale that more properly
264 * indicates how we want processor time allocated under contention.
265 */
266 typedef uint8_t grrr_proportional_priority_t;
267 typedef uint8_t grrr_group_index_t;
268
269 #define NUM_GRRR_PROPORTIONAL_PRIORITIES 256
270 #define MAX_GRRR_PROPORTIONAL_PRIORITY ((grrr_proportional_priority_t)255)
271
272 #if 0
273 #define NUM_GRRR_GROUPS 8 /* log(256) */
274 #endif
275
276 #define NUM_GRRR_GROUPS 64 /* 256/4 */
277
278 struct grrr_group {
279 queue_chain_t priority_order; /* next greatest weight group */
280 grrr_proportional_priority_t minpriority;
281 grrr_group_index_t index;
282
283 queue_head_t clients;
284 int count;
285 uint32_t weight;
286 #if 0
287 uint32_t deferred_removal_weight;
288 #endif
289 uint32_t work;
290 thread_t current_client;
291 };
292
293 struct grrr_run_queue {
294 int count;
295 uint32_t last_rescale_tick;
296 struct grrr_group groups[NUM_GRRR_GROUPS];
297 queue_head_t sorted_group_list;
298 uint32_t weight;
299 grrr_group_t current_group;
300
301 struct runq_stats runq_stats;
302 };
303
304 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
305
306 extern int rt_runq_count(processor_set_t);
307 extern void rt_runq_count_incr(processor_set_t);
308 extern void rt_runq_count_decr(processor_set_t);
309
310 #if defined(CONFIG_SCHED_MULTIQ)
311 sched_group_t sched_group_create(void);
312 void sched_group_destroy(sched_group_t sched_group);
313 #endif /* defined(CONFIG_SCHED_MULTIQ) */
314
315
316
317 /*
318 * Scheduler routines.
319 */
320
321 /* Handle quantum expiration for an executing thread */
322 extern void thread_quantum_expire(
323 timer_call_param_t processor,
324 timer_call_param_t thread);
325
326 /* Context switch check for current processor */
327 extern ast_t csw_check(
328 thread_t thread,
329 processor_t processor,
330 ast_t check_reason);
331
332 /* Check for pending ASTs */
333 extern void ast_check(processor_t processor);
334
335 extern void sched_update_generation_count(void);
336
337 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
338 extern uint32_t std_quantum, min_std_quantum;
339 extern uint32_t std_quantum_us;
340 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
341
342 extern uint32_t thread_depress_time;
343 extern uint32_t default_timeshare_computation;
344 extern uint32_t default_timeshare_constraint;
345
346 extern uint32_t max_rt_quantum, min_rt_quantum;
347
348 extern int default_preemption_rate;
349 extern int default_bg_preemption_rate;
350
351 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
352
353 /*
354 * Age usage at approximately (1 << SCHED_TICK_SHIFT) times per second
355 * Aging may be deferred during periods where all processors are idle
356 * and cumulatively applied during periods of activity.
357 */
358 #define SCHED_TICK_SHIFT 3
359 #define SCHED_TICK_MAX_DELTA (8)
360
361 extern unsigned sched_tick;
362 extern uint32_t sched_tick_interval;
363
364 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
365
366 extern uint64_t sched_one_second_interval;
367
368 /* Periodic computation of various averages */
369 extern void compute_sched_load(void);
370
371 extern void compute_averages(uint64_t);
372
373 extern void compute_averunnable(
374 void *nrun);
375
376 extern void compute_stack_target(
377 void *arg);
378
379 extern void compute_pageout_gc_throttle(
380 void *arg);
381
382 extern void compute_pmap_gc_throttle(
383 void *arg);
384
385 /*
386 * Conversion factor from usage
387 * to priority.
388 */
389 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
390
391 #define MAX_LOAD (NRQS - 1)
392 extern uint32_t sched_pri_shifts[TH_BUCKET_MAX];
393 extern uint32_t sched_fixed_shift;
394 extern int8_t sched_load_shifts[NRQS];
395 extern uint32_t sched_decay_usage_age_factor;
396 void sched_timeshare_consider_maintenance(uint64_t ctime);
397 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
398
399 void sched_consider_recommended_cores(uint64_t ctime, thread_t thread);
400
401 extern int32_t sched_poll_yield_shift;
402 extern uint64_t sched_safe_duration;
403
404 extern uint32_t sched_load_average, sched_mach_factor;
405
406 extern uint32_t avenrun[3], mach_factor[3];
407
408 extern uint64_t max_unsafe_computation;
409 extern uint64_t max_poll_computation;
410
411 extern uint32_t sched_run_buckets[TH_BUCKET_MAX];
412
413 extern uint32_t sched_run_incr(thread_t thread);
414 extern uint32_t sched_run_decr(thread_t thread);
415 extern void sched_update_thread_bucket(thread_t thread);
416
417 #define SCHED_DECAY_TICKS 32
418 struct shift_data {
419 int shift1;
420 int shift2;
421 };
422
423 /*
424 * thread_timer_delta macro takes care of both thread timers.
425 */
426 #define thread_timer_delta(thread, delta) \
427 MACRO_BEGIN \
428 (delta) = (typeof(delta))timer_delta(&(thread)->system_timer, \
429 &(thread)->system_timer_save); \
430 (delta) += (typeof(delta))timer_delta(&(thread)->user_timer, \
431 &(thread)->user_timer_save); \
432 MACRO_END
433
434 #endif /* _KERN_SCHED_H_ */