]>
Commit | Line | Data |
---|---|---|
cb323159 A |
1 | /* |
2 | * Copyright (c) 2018 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #ifndef _KERN_SCHED_CLUTCH_H_ | |
30 | #define _KERN_SCHED_CLUTCH_H_ | |
31 | ||
32 | #include <kern/sched.h> | |
33 | #include <machine/atomic.h> | |
34 | #include <kern/priority_queue.h> | |
35 | #include <kern/thread_group.h> | |
36 | #include <kern/bits.h> | |
37 | ||
38 | #if CONFIG_SCHED_CLUTCH | |
39 | ||
40 | /* | |
41 | * Clutch ordering based on thread group flags (specified | |
42 | * by the thread grouping mechanism). These properties | |
43 | * define a thread group specific priority boost. | |
44 | * | |
45 | * The current implementation gives a slight boost to | |
46 | * HIGH & MED thread groups which effectively deprioritizes | |
47 | * daemon thread groups which are marked "Efficient" on AMP | |
48 | * systems. | |
49 | */ | |
50 | #define SCHED_CLUTCH_TG_PRI_LOW 0x0 | |
51 | #define SCHED_CLUTCH_TG_PRI_MED 0x1 | |
52 | #define SCHED_CLUTCH_TG_PRI_HIGH 0x2 | |
53 | ||
54 | /* | |
55 | * For the current implementation, bound threads are not managed | |
56 | * in the clutch hierarchy. This helper macro is used to indicate | |
57 | * if the thread should be in the hierarchy. | |
58 | */ | |
59 | #define SCHED_CLUTCH_THREAD_ELIGIBLE(thread) ((thread->bound_processor) == PROCESSOR_NULL) | |
60 | ||
ea3f0419 A |
61 | /* |
62 | * Clutch Bucket Runqueue Structure. | |
63 | */ | |
64 | struct sched_clutch_bucket_runq { | |
65 | int scbrq_highq; | |
66 | bitmap_t scbrq_bitmap[BITMAP_LEN(NRQS_MAX)]; | |
67 | int scbrq_count; | |
68 | circle_queue_head_t scbrq_queues[NRQS_MAX]; | |
69 | }; | |
70 | typedef struct sched_clutch_bucket_runq *sched_clutch_bucket_runq_t; | |
71 | ||
cb323159 A |
72 | /* |
73 | * | |
74 | * Clutch hierarchy locking protocol | |
75 | * | |
76 | * The scheduler clutch hierarchy is protected by a combination of | |
77 | * atomics and pset lock. | |
78 | * - All fields protected by the pset lock are annotated with (P) | |
79 | * - All fields updated using atomics are annotated with (A) | |
80 | * - All fields that are unprotected and are not updated after | |
81 | * initialization are annotated with (I) | |
82 | */ | |
83 | ||
84 | /* | |
85 | * struct sched_clutch_root_bucket | |
86 | * | |
87 | * A clutch_root_bucket represents all threads across all thread groups | |
88 | * that are in the same scheduler bucket (FG/IN/...). The clutch_root_bucket | |
89 | * is selected for execution by the root level bucket selection algorithm | |
90 | * which bases the decision on the clutch_root_bucket's deadline (EDF). The | |
91 | * deadline for a root bucket is calculated based on its runnable timestamp | |
92 | * and the worst-case-execution-latency values specied in sched_clutch_root_bucket_wcel[] | |
93 | */ | |
94 | struct sched_clutch_root_bucket { | |
95 | /* (I) sched bucket represented by this root bucket */ | |
96 | uint8_t scrb_bucket; | |
97 | /* (P) priority queue for all clutch buckets in this sched bucket */ | |
ea3f0419 | 98 | struct sched_clutch_bucket_runq scrb_clutch_buckets; |
cb323159 A |
99 | /* (P) priority queue entry to use for enqueueing root bucket into root prioq */ |
100 | struct priority_queue_entry scrb_pqlink; | |
101 | /* (P) ageout deadline for this root bucket */ | |
102 | uint64_t scrb_deadline; | |
103 | /* (P) warped deadline for root bucket */ | |
104 | uint64_t scrb_warped_deadline; | |
105 | /* (P) warp remaining for root bucket */ | |
106 | uint64_t scrb_warp_remaining; | |
107 | }; | |
108 | typedef struct sched_clutch_root_bucket *sched_clutch_root_bucket_t; | |
109 | ||
110 | /* | |
111 | * struct sched_clutch_root | |
112 | * | |
113 | * A clutch_root represents the root of the hierarchy. It maintains a | |
114 | * priority queue of all runnable root buckets. The clutch_root also | |
115 | * maintains the information about the last clutch_root_bucket scheduled | |
116 | * in order to implement bucket level quantum. The bucket level quantums | |
117 | * allow low priority buckets to get a "fair" chance of using the CPU even | |
118 | * if they contain a bunch of short executing threads. The bucket quantums | |
119 | * are configured using sched_clutch_root_bucket_quantum[] | |
120 | */ | |
121 | struct sched_clutch_root { | |
122 | /* (P) root level priority; represents the highest runnable thread in the hierarchy */ | |
123 | int16_t scr_priority; | |
124 | /* (P) total number of runnable threads in the hierarchy */ | |
125 | uint16_t scr_thr_count; | |
126 | /* (P) root level urgency; represents the urgency of the whole hierarchy for pre-emption purposes */ | |
127 | int16_t scr_urgency; | |
128 | ||
129 | /* (I) processor set this hierarchy belongs to */ | |
130 | processor_set_t scr_pset; | |
131 | /* | |
132 | * (P) list of all runnable clutch buckets across the system; | |
133 | * allows easy iteration in the sched tick based timesharing code | |
134 | */ | |
135 | queue_head_t scr_clutch_buckets; | |
136 | /* | |
137 | * (P) list of all runnable foreign buckets in this hierarchy; | |
138 | * used for tracking thread groups which need to be migrated when | |
139 | * psets are available | |
140 | */ | |
141 | queue_head_t scr_foreign_buckets; | |
142 | ||
143 | /* Root level bucket management */ | |
144 | ||
145 | /* (P) bitmap of all runnable clutch_root_buckets; used for root pri calculation */ | |
146 | bitmap_t scr_runnable_bitmap[BITMAP_LEN(TH_BUCKET_SCHED_MAX)]; | |
147 | /* (P) bitmap of all runnable root buckets which have warps remaining */ | |
148 | bitmap_t scr_warp_available[BITMAP_LEN(TH_BUCKET_SCHED_MAX)]; | |
149 | /* (P) priority queue of all runnable clutch_root_buckets */ | |
150 | struct priority_queue scr_root_buckets; | |
151 | /* (P) storage for all possible clutch_root_buckets */ | |
152 | struct sched_clutch_root_bucket scr_buckets[TH_BUCKET_SCHED_MAX]; | |
153 | }; | |
154 | typedef struct sched_clutch_root *sched_clutch_root_t; | |
155 | ||
156 | /* forward declaration for sched_clutch */ | |
157 | struct sched_clutch; | |
158 | ||
159 | /* | |
160 | * sched_clutch_bucket_cpu_data_t | |
161 | * | |
162 | * Used for maintaining clutch bucket used and blocked time. The | |
163 | * values are used for calculating the interactivity score for the | |
164 | * clutch bucket. | |
165 | * | |
166 | * Since the CPU used/blocked calculation uses wide atomics, the data | |
167 | * types used are different based on the platform. | |
168 | */ | |
169 | ||
170 | #if __arm64__ | |
171 | ||
172 | #define CLUTCH_CPU_DATA_MAX (UINT64_MAX) | |
173 | typedef uint64_t clutch_cpu_data_t; | |
174 | typedef unsigned __int128 clutch_cpu_data_wide_t; | |
175 | ||
176 | #else /* __arm64__ */ | |
177 | ||
178 | #define CLUTCH_CPU_DATA_MAX (UINT32_MAX) | |
179 | typedef uint32_t clutch_cpu_data_t; | |
180 | typedef uint64_t clutch_cpu_data_wide_t; | |
181 | ||
182 | #endif /* __arm64__ */ | |
183 | ||
184 | typedef union sched_clutch_bucket_cpu_data { | |
185 | struct { | |
186 | /* Clutch bucket CPU used across all threads */ | |
187 | clutch_cpu_data_t scbcd_cpu_used; | |
188 | /* Clutch bucket voluntary blocked time */ | |
189 | clutch_cpu_data_t scbcd_cpu_blocked; | |
190 | } cpu_data; | |
191 | clutch_cpu_data_wide_t scbcd_cpu_data_packed; | |
192 | } sched_clutch_bucket_cpu_data_t; | |
193 | ||
194 | /* | |
195 | * struct sched_clutch_bucket | |
196 | * | |
197 | * A sched_clutch_bucket represents the set of threads for a thread | |
198 | * group at a particular scheduling bucket. It maintains information | |
199 | * about the CPU usage & blocking behavior of all threads part of | |
200 | * the clutch_bucket and maintains the timesharing attributes for | |
201 | * threads in its runq. It uses the decay based algorithm to timeshare | |
202 | * among threads in the runq. | |
203 | */ | |
204 | struct sched_clutch_bucket { | |
205 | /* (I) bucket for the clutch_bucket */ | |
206 | uint8_t scb_bucket; | |
207 | /* (P) priority of the clutch bucket */ | |
208 | uint8_t scb_priority; | |
209 | /* (P) interactivity score of the clutch bucket */ | |
210 | uint8_t scb_interactivity_score; | |
211 | /* (P) flag to indicate if the bucket is a foreign bucket */ | |
212 | bool scb_foreign; | |
213 | ||
214 | /* Properties used for timesharing threads in this clutch_bucket */ | |
215 | ||
216 | /* (P) number of threads in this clutch_bucket; should match runq.count */ | |
217 | uint16_t scb_thr_count; | |
218 | /* (A) run count (running + runnable) for this clutch_bucket */ | |
219 | uint16_t _Atomic scb_run_count; | |
220 | /* (A) sched tick when the clutch bucket load/shifts were updated */ | |
221 | uint32_t _Atomic scb_timeshare_tick; | |
222 | /* (A) priority shifts for threads in the clutch_bucket */ | |
223 | uint32_t _Atomic scb_pri_shift; | |
224 | /* (P) linkage for all clutch_buckets in a root bucket; used for tick operations */ | |
225 | queue_chain_t scb_listlink; | |
226 | ||
c6bf4f31 A |
227 | #if __AMP__ |
228 | /* (P) linkage for all "foreign" clutch buckets in the root clutch */ | |
229 | queue_chain_t scb_foreignlink; | |
230 | #endif /* __AMP__ */ | |
cb323159 A |
231 | |
232 | /* (P) timestamp for the last time the interactivity score was updated */ | |
233 | uint64_t scb_interactivity_ts; | |
234 | /* (P) timestamp for the last time the clutch_bucket blocked */ | |
235 | uint64_t scb_blocked_ts; | |
236 | ||
237 | /* (A) CPU usage information for the clutch bucket */ | |
238 | sched_clutch_bucket_cpu_data_t scb_cpu_data; | |
239 | ||
ea3f0419 A |
240 | /* (P) linkage for clutch_bucket in root_bucket runqueue */ |
241 | queue_chain_t scb_runqlink; | |
cb323159 A |
242 | /* (I) clutch to which this clutch bucket belongs */ |
243 | struct sched_clutch *scb_clutch; | |
244 | /* (A) pointer to the root of the hierarchy this bucket is in */ | |
245 | struct sched_clutch_root *scb_root; | |
246 | /* (P) priority queue of threads based on their promoted/base priority */ | |
247 | struct priority_queue scb_clutchpri_prioq; | |
248 | /* (P) runq of threads in clutch_bucket */ | |
249 | struct run_queue scb_runq; | |
250 | }; | |
251 | typedef struct sched_clutch_bucket *sched_clutch_bucket_t; | |
252 | ||
253 | ||
254 | /* | |
255 | * struct sched_clutch | |
256 | * | |
257 | * A sched_clutch is a 1:1 mapping to a thread group. It maintains the | |
258 | * storage for all clutch buckets for this thread group and some properties | |
259 | * of the thread group (such as flags etc.) | |
260 | */ | |
261 | struct sched_clutch { | |
262 | /* | |
263 | * (A) number of runnable threads in sched_clutch; needs to be atomic | |
264 | * to support cross cluster sched_clutch migrations. | |
265 | */ | |
266 | uint16_t _Atomic sc_thr_count; | |
267 | /* | |
268 | * Grouping specific parameters. Currently the implementation only | |
269 | * supports thread_group based grouping. | |
270 | */ | |
271 | union { | |
272 | /* (A) priority specified by the thread grouping mechanism */ | |
273 | uint8_t _Atomic sc_tg_priority; | |
274 | }; | |
275 | union { | |
276 | /* (I) Pointer to thread group */ | |
277 | struct thread_group *sc_tg; | |
278 | }; | |
279 | /* (I) storage for all clutch_buckets for this clutch */ | |
280 | struct sched_clutch_bucket sc_clutch_buckets[TH_BUCKET_SCHED_MAX]; | |
281 | }; | |
282 | typedef struct sched_clutch *sched_clutch_t; | |
283 | ||
284 | ||
285 | /* Clutch lifecycle management */ | |
286 | void sched_clutch_init_with_thread_group(sched_clutch_t, struct thread_group *); | |
287 | void sched_clutch_destroy(sched_clutch_t); | |
288 | ||
289 | /* Clutch thread membership management */ | |
290 | void sched_clutch_thread_clutch_update(thread_t, sched_clutch_t, sched_clutch_t); | |
291 | ||
292 | /* Clutch timesharing stats management */ | |
293 | uint32_t sched_clutch_thread_run_bucket_incr(thread_t, sched_bucket_t); | |
294 | uint32_t sched_clutch_thread_run_bucket_decr(thread_t, sched_bucket_t); | |
295 | void sched_clutch_cpu_usage_update(thread_t, uint64_t); | |
296 | uint32_t sched_clutch_thread_pri_shift(thread_t, sched_bucket_t); | |
297 | ||
298 | /* Clutch properties accessors */ | |
299 | uint32_t sched_clutch_root_count(sched_clutch_root_t); | |
300 | ||
301 | /* Grouping specific external routines */ | |
302 | extern sched_clutch_t sched_clutch_for_thread(thread_t); | |
303 | ||
304 | #endif /* CONFIG_SCHED_CLUTCH */ | |
305 | ||
306 | #endif /* _KERN_SCHED_CLUTCH_H_ */ |