]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_dualq.c
48ff5a038e509f680b27e9c8a08e4552aea6dda3
[apple/xnu.git] / osfmk / kern / sched_dualq.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31
32 #include <machine/machine_routines.h>
33 #include <machine/sched_param.h>
34 #include <machine/machine_cpu.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/debug.h>
38 #include <kern/machine.h>
39 #include <kern/misc_protos.h>
40 #include <kern/processor.h>
41 #include <kern/queue.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/task.h>
45 #include <kern/thread.h>
46
47 #include <sys/kdebug.h>
48
49 static void
50 sched_dualq_init(void);
51
52 static thread_t
53 sched_dualq_steal_thread(processor_set_t pset);
54
55 static void
56 sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context);
57
58 static boolean_t
59 sched_dualq_processor_enqueue(processor_t processor, thread_t thread, integer_t options);
60
61 static boolean_t
62 sched_dualq_processor_queue_remove(processor_t processor, thread_t thread);
63
64 static ast_t
65 sched_dualq_processor_csw_check(processor_t processor);
66
67 static boolean_t
68 sched_dualq_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
69
70 static int
71 sched_dualq_runq_count(processor_t processor);
72
73 static boolean_t
74 sched_dualq_processor_queue_empty(processor_t processor);
75
76 static uint64_t
77 sched_dualq_runq_stats_count_sum(processor_t processor);
78
79 static int
80 sched_dualq_processor_bound_count(processor_t processor);
81
82 static void
83 sched_dualq_pset_init(processor_set_t pset);
84
85 static void
86 sched_dualq_processor_init(processor_t processor);
87
88 static thread_t
89 sched_dualq_choose_thread(processor_t processor, int priority, ast_t reason);
90
91 static void
92 sched_dualq_processor_queue_shutdown(processor_t processor);
93
94 static sched_mode_t
95 sched_dualq_initial_thread_sched_mode(task_t parent_task);
96
97 const struct sched_dispatch_table sched_dualq_dispatch = {
98 .sched_name = "dualq",
99 .init = sched_dualq_init,
100 .timebase_init = sched_timeshare_timebase_init,
101 .processor_init = sched_dualq_processor_init,
102 .pset_init = sched_dualq_pset_init,
103 .maintenance_continuation = sched_timeshare_maintenance_continue,
104 .choose_thread = sched_dualq_choose_thread,
105 .steal_thread_enabled = TRUE,
106 .steal_thread = sched_dualq_steal_thread,
107 .compute_timeshare_priority = sched_compute_timeshare_priority,
108 .choose_processor = choose_processor,
109 .processor_enqueue = sched_dualq_processor_enqueue,
110 .processor_queue_shutdown = sched_dualq_processor_queue_shutdown,
111 .processor_queue_remove = sched_dualq_processor_queue_remove,
112 .processor_queue_empty = sched_dualq_processor_queue_empty,
113 .priority_is_urgent = priority_is_urgent,
114 .processor_csw_check = sched_dualq_processor_csw_check,
115 .processor_queue_has_priority = sched_dualq_processor_queue_has_priority,
116 .initial_quantum_size = sched_timeshare_initial_quantum_size,
117 .initial_thread_sched_mode = sched_dualq_initial_thread_sched_mode,
118 .can_update_priority = can_update_priority,
119 .update_priority = update_priority,
120 .lightweight_update_priority = lightweight_update_priority,
121 .quantum_expire = sched_default_quantum_expire,
122 .processor_runq_count = sched_dualq_runq_count,
123 .processor_runq_stats_count_sum = sched_dualq_runq_stats_count_sum,
124 .processor_bound_count = sched_dualq_processor_bound_count,
125 .thread_update_scan = sched_dualq_thread_update_scan,
126 .direct_dispatch_to_idle_processors = FALSE,
127 .multiple_psets_enabled = TRUE,
128 .sched_groups_enabled = FALSE,
129 };
130
131 __attribute__((always_inline))
132 static inline run_queue_t dualq_main_runq(processor_t processor)
133 {
134 return &processor->processor_set->pset_runq;
135 }
136
137 __attribute__((always_inline))
138 static inline run_queue_t dualq_bound_runq(processor_t processor)
139 {
140 return &processor->runq;
141 }
142
143 __attribute__((always_inline))
144 static inline run_queue_t dualq_runq_for_thread(processor_t processor, thread_t thread)
145 {
146 if (thread->bound_processor == PROCESSOR_NULL) {
147 return dualq_main_runq(processor);
148 } else {
149 assert(thread->bound_processor == processor);
150 return dualq_bound_runq(processor);
151 }
152 }
153
154 static sched_mode_t
155 sched_dualq_initial_thread_sched_mode(task_t parent_task)
156 {
157 if (parent_task == kernel_task)
158 return TH_MODE_FIXED;
159 else
160 return TH_MODE_TIMESHARE;
161 }
162
163 static void
164 sched_dualq_processor_init(processor_t processor)
165 {
166 run_queue_init(&processor->runq);
167 }
168
169 static void
170 sched_dualq_pset_init(processor_set_t pset)
171 {
172 run_queue_init(&pset->pset_runq);
173 }
174
175 static void
176 sched_dualq_init(void)
177 {
178 sched_timeshare_init();
179 }
180
181 static thread_t
182 sched_dualq_choose_thread(
183 processor_t processor,
184 int priority,
185 __unused ast_t reason)
186 {
187 run_queue_t main_runq = dualq_main_runq(processor);
188 run_queue_t bound_runq = dualq_bound_runq(processor);
189 run_queue_t chosen_runq;
190
191 if (bound_runq->highq < priority &&
192 main_runq->highq < priority)
193 return THREAD_NULL;
194
195 if (bound_runq->count && main_runq->count) {
196 if (bound_runq->highq >= main_runq->highq) {
197 chosen_runq = bound_runq;
198 } else {
199 chosen_runq = main_runq;
200 }
201 } else if (bound_runq->count) {
202 chosen_runq = bound_runq;
203 } else if (main_runq->count) {
204 chosen_runq = main_runq;
205 } else {
206 return (THREAD_NULL);
207 }
208
209 return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
210 }
211
212 static boolean_t
213 sched_dualq_processor_enqueue(
214 processor_t processor,
215 thread_t thread,
216 integer_t options)
217 {
218 run_queue_t rq = dualq_runq_for_thread(processor, thread);
219 boolean_t result;
220
221 result = run_queue_enqueue(rq, thread, options);
222 thread->runq = processor;
223
224 return (result);
225 }
226
227 static boolean_t
228 sched_dualq_processor_queue_empty(processor_t processor)
229 {
230 return dualq_main_runq(processor)->count == 0 &&
231 dualq_bound_runq(processor)->count == 0;
232 }
233
234 static ast_t
235 sched_dualq_processor_csw_check(processor_t processor)
236 {
237 boolean_t has_higher;
238 int pri;
239
240 run_queue_t main_runq = dualq_main_runq(processor);
241 run_queue_t bound_runq = dualq_bound_runq(processor);
242
243 assert(processor->active_thread != NULL);
244
245 pri = MAX(main_runq->highq, bound_runq->highq);
246
247 if (processor->first_timeslice) {
248 has_higher = (pri > processor->current_pri);
249 } else {
250 has_higher = (pri >= processor->current_pri);
251 }
252
253 if (has_higher) {
254 if (main_runq->urgency > 0)
255 return (AST_PREEMPT | AST_URGENT);
256
257 if (bound_runq->urgency > 0)
258 return (AST_PREEMPT | AST_URGENT);
259
260 return AST_PREEMPT;
261 }
262
263 return AST_NONE;
264 }
265
266 static boolean_t
267 sched_dualq_processor_queue_has_priority(processor_t processor,
268 int priority,
269 boolean_t gte)
270 {
271 run_queue_t main_runq = dualq_main_runq(processor);
272 run_queue_t bound_runq = dualq_bound_runq(processor);
273
274 if (main_runq->count == 0 && bound_runq->count == 0)
275 return FALSE;
276
277 int qpri = MAX(main_runq->highq, bound_runq->highq);
278
279 if (gte)
280 return qpri >= priority;
281 else
282 return qpri > priority;
283 }
284
285 static int
286 sched_dualq_runq_count(processor_t processor)
287 {
288 return dualq_main_runq(processor)->count + dualq_bound_runq(processor)->count;
289 }
290
291 static uint64_t
292 sched_dualq_runq_stats_count_sum(processor_t processor)
293 {
294 uint64_t bound_sum = dualq_bound_runq(processor)->runq_stats.count_sum;
295
296 if (processor->cpu_id == processor->processor_set->cpu_set_low)
297 return bound_sum + dualq_main_runq(processor)->runq_stats.count_sum;
298 else
299 return bound_sum;
300 }
301 static int
302 sched_dualq_processor_bound_count(processor_t processor)
303 {
304 return dualq_bound_runq(processor)->count;
305 }
306
307 static void
308 sched_dualq_processor_queue_shutdown(processor_t processor)
309 {
310 processor_set_t pset = processor->processor_set;
311 run_queue_t rq = dualq_main_runq(processor);
312 thread_t thread;
313 queue_head_t tqueue;
314
315 /* We only need to migrate threads if this is the last active processor in the pset */
316 if (pset->online_processor_count > 0) {
317 pset_unlock(pset);
318 return;
319 }
320
321 queue_init(&tqueue);
322
323 while (rq->count > 0) {
324 thread = run_queue_dequeue(rq, SCHED_HEADQ);
325 enqueue_tail(&tqueue, &thread->runq_links);
326 }
327
328 pset_unlock(pset);
329
330 qe_foreach_element_safe(thread, &tqueue, runq_links) {
331
332 remqueue(&thread->runq_links);
333
334 thread_lock(thread);
335
336 thread_setrun(thread, SCHED_TAILQ);
337
338 thread_unlock(thread);
339 }
340 }
341
342 static boolean_t
343 sched_dualq_processor_queue_remove(
344 processor_t processor,
345 thread_t thread)
346 {
347 run_queue_t rq;
348 processor_set_t pset = processor->processor_set;
349
350 pset_lock(pset);
351
352 rq = dualq_runq_for_thread(processor, thread);
353
354 if (processor == thread->runq) {
355 /*
356 * Thread is on a run queue and we have a lock on
357 * that run queue.
358 */
359 run_queue_remove(rq, thread);
360 }
361 else {
362 /*
363 * The thread left the run queue before we could
364 * lock the run queue.
365 */
366 assert(thread->runq == PROCESSOR_NULL);
367 processor = PROCESSOR_NULL;
368 }
369
370 pset_unlock(pset);
371
372 return (processor != PROCESSOR_NULL);
373 }
374
375 static thread_t
376 sched_dualq_steal_thread(processor_set_t pset)
377 {
378 processor_set_t nset, cset = pset;
379 thread_t thread;
380
381 do {
382 if (cset->pset_runq.count > 0) {
383 thread = run_queue_dequeue(&cset->pset_runq, SCHED_HEADQ);
384 pset_unlock(cset);
385 return (thread);
386 }
387
388 nset = next_pset(cset);
389
390 if (nset != pset) {
391 pset_unlock(cset);
392
393 cset = nset;
394 pset_lock(cset);
395 }
396 } while (nset != pset);
397
398 pset_unlock(cset);
399
400 return (THREAD_NULL);
401 }
402
403 static void
404 sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context)
405 {
406 boolean_t restart_needed = FALSE;
407 processor_t processor = processor_list;
408 processor_set_t pset;
409 thread_t thread;
410 spl_t s;
411
412 /*
413 * We update the threads associated with each processor (bound and idle threads)
414 * and then update the threads in each pset runqueue.
415 */
416
417 do {
418 do {
419 pset = processor->processor_set;
420
421 s = splsched();
422 pset_lock(pset);
423
424 restart_needed = runq_scan(dualq_bound_runq(processor), scan_context);
425
426 pset_unlock(pset);
427 splx(s);
428
429 if (restart_needed)
430 break;
431
432 thread = processor->idle_thread;
433 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
434 if (thread_update_add_thread(thread) == FALSE) {
435 restart_needed = TRUE;
436 break;
437 }
438 }
439 } while ((processor = processor->processor_list) != NULL);
440
441 /* Ok, we now have a collection of candidates -- fix them. */
442 thread_update_process_threads();
443
444 } while (restart_needed);
445
446 pset = &pset0;
447
448 do {
449 do {
450 s = splsched();
451 pset_lock(pset);
452
453 restart_needed = runq_scan(&pset->pset_runq, scan_context);
454
455 pset_unlock(pset);
456 splx(s);
457
458 if (restart_needed)
459 break;
460 } while ((pset = pset->pset_list) != NULL);
461
462 /* Ok, we now have a collection of candidates -- fix them. */
463 thread_update_process_threads();
464
465 } while (restart_needed);
466 }
467
468