2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Header file for scheduler.
67 #ifndef _KERN_SCHED_H_
68 #define _KERN_SCHED_H_
70 #include <mach/policy.h>
71 #include <kern/kern_types.h>
73 #include <kern/circle_queue.h>
74 #include <kern/macro_help.h>
75 #include <kern/timer_call.h>
77 #include <kern/kalloc.h>
78 #include <kern/bits.h>
80 #define NRQS_MAX (128) /* maximum number of priority levels */
82 #define MAXPRI (NRQS_MAX-1)
83 #define MINPRI 0 /* lowest legal priority schedulable */
84 #define IDLEPRI MINPRI /* idle thread priority */
88 * High-level priority assignments
90 *************************************************************************
91 * 127 Reserved (real-time)
97 * 96 Reserved (real-time)
104 * 80 Kernel mode only
105 * 79 System high priority
111 * 64 System high priority
112 * 63 Elevated priorities
118 * 52 Elevated priorities
119 * 51 Elevated priorities (incl. BSD +nice)
125 * 32 Elevated priorities (incl. BSD +nice)
126 * 31 Default (default base for threads)
127 * 30 Lowered priorities (incl. BSD -nice)
133 * 11 Lowered priorities (incl. BSD -nice)
134 * 10 Lowered priorities (aged pri's)
140 * 0 Lowered priorities (aged pri's / idle)
141 *************************************************************************
144 #define BASEPRI_RTQUEUES (BASEPRI_REALTIME + 1) /* 97 */
145 #define BASEPRI_REALTIME (MAXPRI - (NRQS_MAX / 4) + 1) /* 96 */
147 #define MAXPRI_KERNEL (BASEPRI_REALTIME - 1) /* 95 */
148 #define BASEPRI_PREEMPT_HIGH (BASEPRI_PREEMPT + 1) /* 93 */
149 #define BASEPRI_PREEMPT (MAXPRI_KERNEL - 3) /* 92 */
150 #define BASEPRI_VM (BASEPRI_PREEMPT - 1) /* 91 */
152 #define BASEPRI_KERNEL (MINPRI_KERNEL + 1) /* 81 */
153 #define MINPRI_KERNEL (MAXPRI_KERNEL - (NRQS_MAX / 8) + 1) /* 80 */
155 #define MAXPRI_RESERVED (MINPRI_KERNEL - 1) /* 79 */
156 #define BASEPRI_GRAPHICS (MAXPRI_RESERVED - 3) /* 76 */
157 #define MINPRI_RESERVED (MAXPRI_RESERVED - (NRQS_MAX / 8) + 1) /* 64 */
159 #define MAXPRI_USER (MINPRI_RESERVED - 1) /* 63 */
160 #define BASEPRI_CONTROL (BASEPRI_DEFAULT + 17) /* 48 */
161 #define BASEPRI_FOREGROUND (BASEPRI_DEFAULT + 16) /* 47 */
162 #define BASEPRI_BACKGROUND (BASEPRI_DEFAULT + 15) /* 46 */
163 #define BASEPRI_USER_INITIATED (BASEPRI_DEFAULT + 6) /* 37 */
164 #define BASEPRI_DEFAULT (MAXPRI_USER - (NRQS_MAX / 4)) /* 31 */
165 #define MAXPRI_SUPPRESSED (BASEPRI_DEFAULT - 3) /* 28 */
166 #define BASEPRI_UTILITY (BASEPRI_DEFAULT - 11) /* 20 */
167 #define MAXPRI_THROTTLE (MINPRI + 4) /* 4 */
168 #define MINPRI_USER MINPRI /* 0 */
170 #define DEPRESSPRI (MINPRI) /* depress priority */
172 #define MAXPRI_PROMOTE (MAXPRI_KERNEL) /* ceiling for mutex promotion */
173 #define MINPRI_RWLOCK (BASEPRI_BACKGROUND) /* floor when holding rwlock count */
174 #define MINPRI_EXEC (BASEPRI_DEFAULT) /* floor when in exec state */
175 #define MINPRI_WAITQ (BASEPRI_DEFAULT) /* floor when in waitq handover state */
177 #define NRQS (BASEPRI_REALTIME) /* Non-realtime levels for runqs */
179 /* Ensure that NRQS is large enough to represent all non-realtime threads; even promoted ones */
180 _Static_assert((NRQS
== (MAXPRI_PROMOTE
+ 1)), "Runqueues are too small to hold all non-realtime threads");
182 /* Type used for thread->sched_mode and saved_mode */
184 TH_MODE_NONE
= 0, /* unassigned, usually for saved_mode only */
185 TH_MODE_REALTIME
, /* time constraints supplied */
186 TH_MODE_FIXED
, /* use fixed priorities, no decay */
187 TH_MODE_TIMESHARE
, /* use timesharing algorithm */
191 * Since the clutch scheduler organizes threads based on the thread group
192 * and the scheduling bucket, its important to not mix threads from multiple
193 * priority bands into the same bucket. To achieve that, in the clutch bucket
194 * world, there is a scheduling bucket per QoS effectively.
197 /* Buckets used for load calculation */
199 TH_BUCKET_FIXPRI
= 0, /* Fixed-priority */
200 TH_BUCKET_SHARE_FG
, /* Timeshare thread above BASEPRI_DEFAULT */
201 #if CONFIG_SCHED_CLUTCH
202 TH_BUCKET_SHARE_IN
, /* Timeshare thread between BASEPRI_USER_INITIATED and BASEPRI_DEFAULT */
203 #endif /* CONFIG_SCHED_CLUTCH */
204 TH_BUCKET_SHARE_DF
, /* Timeshare thread between BASEPRI_DEFAULT and BASEPRI_UTILITY */
205 TH_BUCKET_SHARE_UT
, /* Timeshare thread between BASEPRI_UTILITY and MAXPRI_THROTTLE */
206 TH_BUCKET_SHARE_BG
, /* Timeshare thread between MAXPRI_THROTTLE and MINPRI */
207 TH_BUCKET_RUN
, /* All runnable threads */
208 TH_BUCKET_SCHED_MAX
= TH_BUCKET_RUN
, /* Maximum schedulable buckets */
213 * Macro to check for invalid priorities.
215 #define invalid_pri(pri) ((pri) < MINPRI || (pri) > MAXPRI)
219 uint64_t last_change_timestamp
;
222 #if defined(CONFIG_SCHED_TIMESHARE_CORE) || defined(CONFIG_SCHED_PROTO)
225 int highq
; /* highest runnable queue */
226 bitmap_t bitmap
[BITMAP_LEN(NRQS
)]; /* run queue bitmap array */
227 int count
; /* # of threads total */
228 int urgency
; /* level of preemption urgency */
229 circle_queue_head_t queues
[NRQS
]; /* one for each priority */
231 struct runq_stats runq_stats
;
235 rq_bitmap_set(bitmap_t
*map
, u_int n
)
242 rq_bitmap_clear(bitmap_t
*map
, u_int n
)
245 bitmap_clear(map
, n
);
248 #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) || defined(CONFIG_SCHED_PROTO) */
251 _Atomic
int count
; /* # of threads total */
252 queue_head_t queue
; /* all runnable RT threads */
254 decl_simple_lock_data(, rt_lock
);
256 struct runq_stats runq_stats
;
258 typedef struct rt_queue
*rt_queue_t
;
260 #if defined(CONFIG_SCHED_GRRR_CORE)
263 * We map standard Mach priorities to an abstract scale that more properly
264 * indicates how we want processor time allocated under contention.
266 typedef uint8_t grrr_proportional_priority_t
;
267 typedef uint8_t grrr_group_index_t
;
269 #define NUM_GRRR_PROPORTIONAL_PRIORITIES 256
270 #define MAX_GRRR_PROPORTIONAL_PRIORITY ((grrr_proportional_priority_t)255)
273 #define NUM_GRRR_GROUPS 8 /* log(256) */
276 #define NUM_GRRR_GROUPS 64 /* 256/4 */
279 queue_chain_t priority_order
; /* next greatest weight group */
280 grrr_proportional_priority_t minpriority
;
281 grrr_group_index_t index
;
283 queue_head_t clients
;
287 uint32_t deferred_removal_weight
;
290 thread_t current_client
;
293 struct grrr_run_queue
{
295 uint32_t last_rescale_tick
;
296 struct grrr_group groups
[NUM_GRRR_GROUPS
];
297 queue_head_t sorted_group_list
;
299 grrr_group_t current_group
;
301 struct runq_stats runq_stats
;
304 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
306 extern int rt_runq_count(processor_set_t
);
307 extern void rt_runq_count_incr(processor_set_t
);
308 extern void rt_runq_count_decr(processor_set_t
);
310 #if defined(CONFIG_SCHED_MULTIQ)
311 sched_group_t
sched_group_create(void);
312 void sched_group_destroy(sched_group_t sched_group
);
313 #endif /* defined(CONFIG_SCHED_MULTIQ) */
318 * Scheduler routines.
321 /* Handle quantum expiration for an executing thread */
322 extern void thread_quantum_expire(
323 timer_call_param_t processor
,
324 timer_call_param_t thread
);
326 /* Context switch check for current processor */
327 extern ast_t
csw_check(
329 processor_t processor
,
332 /* Check for pending ASTs */
333 extern void ast_check(processor_t processor
);
335 extern void sched_update_generation_count(void);
337 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
338 extern uint32_t std_quantum
, min_std_quantum
;
339 extern uint32_t std_quantum_us
;
340 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
342 extern uint32_t thread_depress_time
;
343 extern uint32_t default_timeshare_computation
;
344 extern uint32_t default_timeshare_constraint
;
346 extern uint32_t max_rt_quantum
, min_rt_quantum
;
348 extern int default_preemption_rate
;
349 extern int default_bg_preemption_rate
;
351 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
354 * Age usage at approximately (1 << SCHED_TICK_SHIFT) times per second
355 * Aging may be deferred during periods where all processors are idle
356 * and cumulatively applied during periods of activity.
358 #define SCHED_TICK_SHIFT 3
359 #define SCHED_TICK_MAX_DELTA (8)
361 extern unsigned sched_tick
;
362 extern uint32_t sched_tick_interval
;
364 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
366 extern uint64_t sched_one_second_interval
;
368 /* Periodic computation of various averages */
369 extern void compute_sched_load(void);
371 extern void compute_averages(uint64_t);
373 extern void compute_averunnable(
376 extern void compute_stack_target(
379 extern void compute_pageout_gc_throttle(
382 extern void compute_pmap_gc_throttle(
386 * Conversion factor from usage
389 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
391 #define MAX_LOAD (NRQS - 1)
392 extern uint32_t sched_pri_shifts
[TH_BUCKET_MAX
];
393 extern uint32_t sched_fixed_shift
;
394 extern int8_t sched_load_shifts
[NRQS
];
395 extern uint32_t sched_decay_usage_age_factor
;
396 void sched_timeshare_consider_maintenance(uint64_t ctime
);
397 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
399 void sched_consider_recommended_cores(uint64_t ctime
, thread_t thread
);
401 extern int32_t sched_poll_yield_shift
;
402 extern uint64_t sched_safe_duration
;
404 extern uint32_t sched_load_average
, sched_mach_factor
;
406 extern uint32_t avenrun
[3], mach_factor
[3];
408 extern uint64_t max_unsafe_computation
;
409 extern uint64_t max_poll_computation
;
411 extern uint32_t sched_run_buckets
[TH_BUCKET_MAX
];
413 extern uint32_t sched_run_incr(thread_t thread
);
414 extern uint32_t sched_run_decr(thread_t thread
);
415 extern void sched_update_thread_bucket(thread_t thread
);
417 #define SCHED_DECAY_TICKS 32
424 * thread_timer_delta macro takes care of both thread timers.
426 #define thread_timer_delta(thread, delta) \
428 (delta) = (typeof(delta))timer_delta(&(thread)->system_timer, \
429 &(thread)->system_timer_save); \
430 (delta) += (typeof(delta))timer_delta(&(thread)->user_timer, \
431 &(thread)->user_timer_save); \
434 #endif /* _KERN_SCHED_H_ */