]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_dualq.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / kern / sched_dualq.c
CommitLineData
fe8ab488
A
1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31
32#include <machine/machine_routines.h>
33#include <machine/sched_param.h>
34#include <machine/machine_cpu.h>
35
36#include <kern/kern_types.h>
37#include <kern/debug.h>
38#include <kern/machine.h>
39#include <kern/misc_protos.h>
40#include <kern/processor.h>
41#include <kern/queue.h>
42#include <kern/sched.h>
43#include <kern/sched_prim.h>
44#include <kern/task.h>
45#include <kern/thread.h>
46
47#include <sys/kdebug.h>
48
49static void
50sched_dualq_init(void);
51
52static thread_t
53sched_dualq_steal_thread(processor_set_t pset);
54
55static void
3e170ce0 56sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context);
fe8ab488
A
57
58static boolean_t
59sched_dualq_processor_enqueue(processor_t processor, thread_t thread, integer_t options);
60
61static boolean_t
62sched_dualq_processor_queue_remove(processor_t processor, thread_t thread);
63
64static ast_t
65sched_dualq_processor_csw_check(processor_t processor);
66
67static boolean_t
68sched_dualq_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
69
70static int
71sched_dualq_runq_count(processor_t processor);
72
73static boolean_t
74sched_dualq_processor_queue_empty(processor_t processor);
75
76static uint64_t
77sched_dualq_runq_stats_count_sum(processor_t processor);
78
79static int
80sched_dualq_processor_bound_count(processor_t processor);
81
82static void
83sched_dualq_pset_init(processor_set_t pset);
84
85static void
86sched_dualq_processor_init(processor_t processor);
87
88static thread_t
89sched_dualq_choose_thread(processor_t processor, int priority, ast_t reason);
90
91static void
92sched_dualq_processor_queue_shutdown(processor_t processor);
93
94static sched_mode_t
95sched_dualq_initial_thread_sched_mode(task_t parent_task);
96
fe8ab488 97const struct sched_dispatch_table sched_dualq_dispatch = {
3e170ce0 98 .sched_name = "dualq",
fe8ab488 99 .init = sched_dualq_init,
3e170ce0 100 .timebase_init = sched_timeshare_timebase_init,
fe8ab488
A
101 .processor_init = sched_dualq_processor_init,
102 .pset_init = sched_dualq_pset_init,
3e170ce0 103 .maintenance_continuation = sched_timeshare_maintenance_continue,
fe8ab488 104 .choose_thread = sched_dualq_choose_thread,
3e170ce0 105 .steal_thread_enabled = TRUE,
fe8ab488 106 .steal_thread = sched_dualq_steal_thread,
3e170ce0 107 .compute_timeshare_priority = sched_compute_timeshare_priority,
fe8ab488
A
108 .choose_processor = choose_processor,
109 .processor_enqueue = sched_dualq_processor_enqueue,
110 .processor_queue_shutdown = sched_dualq_processor_queue_shutdown,
111 .processor_queue_remove = sched_dualq_processor_queue_remove,
112 .processor_queue_empty = sched_dualq_processor_queue_empty,
113 .priority_is_urgent = priority_is_urgent,
114 .processor_csw_check = sched_dualq_processor_csw_check,
115 .processor_queue_has_priority = sched_dualq_processor_queue_has_priority,
3e170ce0 116 .initial_quantum_size = sched_timeshare_initial_quantum_size,
fe8ab488
A
117 .initial_thread_sched_mode = sched_dualq_initial_thread_sched_mode,
118 .can_update_priority = can_update_priority,
119 .update_priority = update_priority,
120 .lightweight_update_priority = lightweight_update_priority,
3e170ce0 121 .quantum_expire = sched_default_quantum_expire,
fe8ab488
A
122 .processor_runq_count = sched_dualq_runq_count,
123 .processor_runq_stats_count_sum = sched_dualq_runq_stats_count_sum,
fe8ab488
A
124 .processor_bound_count = sched_dualq_processor_bound_count,
125 .thread_update_scan = sched_dualq_thread_update_scan,
126 .direct_dispatch_to_idle_processors = FALSE,
3e170ce0
A
127 .multiple_psets_enabled = TRUE,
128 .sched_groups_enabled = FALSE,
5ba3f43e
A
129 .avoid_processor_enabled = FALSE,
130 .thread_avoid_processor = NULL,
131 .processor_balance = sched_SMT_balance,
132
133 .rt_runq = sched_rtglobal_runq,
134 .rt_init = sched_rtglobal_init,
135 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
136 .rt_runq_scan = sched_rtglobal_runq_scan,
137 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
138
139 .qos_max_parallelism = sched_qos_max_parallelism,
140 .check_spill = sched_check_spill,
141 .ipi_policy = sched_ipi_policy,
142 .thread_should_yield = sched_thread_should_yield,
fe8ab488
A
143};
144
145__attribute__((always_inline))
146static inline run_queue_t dualq_main_runq(processor_t processor)
147{
148 return &processor->processor_set->pset_runq;
149}
150
151__attribute__((always_inline))
152static inline run_queue_t dualq_bound_runq(processor_t processor)
153{
154 return &processor->runq;
155}
156
157__attribute__((always_inline))
158static inline run_queue_t dualq_runq_for_thread(processor_t processor, thread_t thread)
159{
160 if (thread->bound_processor == PROCESSOR_NULL) {
161 return dualq_main_runq(processor);
162 } else {
163 assert(thread->bound_processor == processor);
164 return dualq_bound_runq(processor);
165 }
166}
167
168static sched_mode_t
169sched_dualq_initial_thread_sched_mode(task_t parent_task)
170{
171 if (parent_task == kernel_task)
172 return TH_MODE_FIXED;
173 else
174 return TH_MODE_TIMESHARE;
175}
176
177static void
178sched_dualq_processor_init(processor_t processor)
179{
180 run_queue_init(&processor->runq);
181}
182
183static void
184sched_dualq_pset_init(processor_set_t pset)
185{
186 run_queue_init(&pset->pset_runq);
187}
188
189static void
190sched_dualq_init(void)
191{
3e170ce0 192 sched_timeshare_init();
fe8ab488
A
193}
194
195static thread_t
196sched_dualq_choose_thread(
197 processor_t processor,
198 int priority,
199 __unused ast_t reason)
200{
201 run_queue_t main_runq = dualq_main_runq(processor);
202 run_queue_t bound_runq = dualq_bound_runq(processor);
203 run_queue_t chosen_runq;
204
205 if (bound_runq->highq < priority &&
206 main_runq->highq < priority)
207 return THREAD_NULL;
208
209 if (bound_runq->count && main_runq->count) {
210 if (bound_runq->highq >= main_runq->highq) {
211 chosen_runq = bound_runq;
212 } else {
213 chosen_runq = main_runq;
214 }
215 } else if (bound_runq->count) {
216 chosen_runq = bound_runq;
217 } else if (main_runq->count) {
218 chosen_runq = main_runq;
219 } else {
220 return (THREAD_NULL);
221 }
222
223 return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
224}
225
226static boolean_t
227sched_dualq_processor_enqueue(
228 processor_t processor,
229 thread_t thread,
230 integer_t options)
231{
232 run_queue_t rq = dualq_runq_for_thread(processor, thread);
233 boolean_t result;
234
235 result = run_queue_enqueue(rq, thread, options);
236 thread->runq = processor;
237
238 return (result);
239}
240
241static boolean_t
242sched_dualq_processor_queue_empty(processor_t processor)
243{
244 return dualq_main_runq(processor)->count == 0 &&
245 dualq_bound_runq(processor)->count == 0;
246}
247
248static ast_t
249sched_dualq_processor_csw_check(processor_t processor)
250{
251 boolean_t has_higher;
252 int pri;
253
254 run_queue_t main_runq = dualq_main_runq(processor);
255 run_queue_t bound_runq = dualq_bound_runq(processor);
256
257 assert(processor->active_thread != NULL);
258
259 pri = MAX(main_runq->highq, bound_runq->highq);
260
3e170ce0 261 if (processor->first_timeslice) {
fe8ab488
A
262 has_higher = (pri > processor->current_pri);
263 } else {
264 has_higher = (pri >= processor->current_pri);
265 }
266
267 if (has_higher) {
268 if (main_runq->urgency > 0)
269 return (AST_PREEMPT | AST_URGENT);
270
271 if (bound_runq->urgency > 0)
272 return (AST_PREEMPT | AST_URGENT);
273
fe8ab488
A
274 return AST_PREEMPT;
275 }
276
277 return AST_NONE;
278}
279
280static boolean_t
281sched_dualq_processor_queue_has_priority(processor_t processor,
282 int priority,
283 boolean_t gte)
284{
39037602
A
285 run_queue_t main_runq = dualq_main_runq(processor);
286 run_queue_t bound_runq = dualq_bound_runq(processor);
287
39037602 288 int qpri = MAX(main_runq->highq, bound_runq->highq);
fe8ab488
A
289
290 if (gte)
291 return qpri >= priority;
292 else
293 return qpri > priority;
294}
295
fe8ab488
A
296static int
297sched_dualq_runq_count(processor_t processor)
298{
299 return dualq_main_runq(processor)->count + dualq_bound_runq(processor)->count;
300}
301
302static uint64_t
303sched_dualq_runq_stats_count_sum(processor_t processor)
304{
305 uint64_t bound_sum = dualq_bound_runq(processor)->runq_stats.count_sum;
306
307 if (processor->cpu_id == processor->processor_set->cpu_set_low)
308 return bound_sum + dualq_main_runq(processor)->runq_stats.count_sum;
309 else
310 return bound_sum;
311}
312static int
313sched_dualq_processor_bound_count(processor_t processor)
314{
315 return dualq_bound_runq(processor)->count;
316}
317
318static void
319sched_dualq_processor_queue_shutdown(processor_t processor)
320{
321 processor_set_t pset = processor->processor_set;
322 run_queue_t rq = dualq_main_runq(processor);
323 thread_t thread;
324 queue_head_t tqueue;
325
326 /* We only need to migrate threads if this is the last active processor in the pset */
327 if (pset->online_processor_count > 0) {
328 pset_unlock(pset);
329 return;
330 }
331
332 queue_init(&tqueue);
333
334 while (rq->count > 0) {
335 thread = run_queue_dequeue(rq, SCHED_HEADQ);
39037602 336 enqueue_tail(&tqueue, &thread->runq_links);
fe8ab488
A
337 }
338
339 pset_unlock(pset);
340
39037602
A
341 qe_foreach_element_safe(thread, &tqueue, runq_links) {
342
343 remqueue(&thread->runq_links);
344
fe8ab488
A
345 thread_lock(thread);
346
347 thread_setrun(thread, SCHED_TAILQ);
348
349 thread_unlock(thread);
350 }
351}
352
353static boolean_t
354sched_dualq_processor_queue_remove(
355 processor_t processor,
356 thread_t thread)
357{
358 run_queue_t rq;
359 processor_set_t pset = processor->processor_set;
360
361 pset_lock(pset);
362
363 rq = dualq_runq_for_thread(processor, thread);
364
365 if (processor == thread->runq) {
366 /*
367 * Thread is on a run queue and we have a lock on
368 * that run queue.
369 */
370 run_queue_remove(rq, thread);
371 }
372 else {
373 /*
374 * The thread left the run queue before we could
375 * lock the run queue.
376 */
377 assert(thread->runq == PROCESSOR_NULL);
378 processor = PROCESSOR_NULL;
379 }
380
381 pset_unlock(pset);
382
383 return (processor != PROCESSOR_NULL);
384}
385
386static thread_t
387sched_dualq_steal_thread(processor_set_t pset)
388{
389 processor_set_t nset, cset = pset;
390 thread_t thread;
391
392 do {
393 if (cset->pset_runq.count > 0) {
394 thread = run_queue_dequeue(&cset->pset_runq, SCHED_HEADQ);
395 pset_unlock(cset);
396 return (thread);
397 }
398
399 nset = next_pset(cset);
400
401 if (nset != pset) {
402 pset_unlock(cset);
403
404 cset = nset;
405 pset_lock(cset);
406 }
407 } while (nset != pset);
408
409 pset_unlock(cset);
410
411 return (THREAD_NULL);
412}
413
414static void
3e170ce0 415sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context)
fe8ab488
A
416{
417 boolean_t restart_needed = FALSE;
418 processor_t processor = processor_list;
419 processor_set_t pset;
420 thread_t thread;
421 spl_t s;
422
423 /*
424 * We update the threads associated with each processor (bound and idle threads)
425 * and then update the threads in each pset runqueue.
426 */
427
428 do {
429 do {
430 pset = processor->processor_set;
431
432 s = splsched();
433 pset_lock(pset);
434
3e170ce0 435 restart_needed = runq_scan(dualq_bound_runq(processor), scan_context);
fe8ab488
A
436
437 pset_unlock(pset);
438 splx(s);
439
440 if (restart_needed)
441 break;
442
443 thread = processor->idle_thread;
444 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
445 if (thread_update_add_thread(thread) == FALSE) {
446 restart_needed = TRUE;
447 break;
448 }
449 }
450 } while ((processor = processor->processor_list) != NULL);
451
452 /* Ok, we now have a collection of candidates -- fix them. */
453 thread_update_process_threads();
454
455 } while (restart_needed);
456
457 pset = &pset0;
458
459 do {
460 do {
461 s = splsched();
462 pset_lock(pset);
463
3e170ce0 464 restart_needed = runq_scan(&pset->pset_runq, scan_context);
fe8ab488
A
465
466 pset_unlock(pset);
467 splx(s);
468
469 if (restart_needed)
470 break;
471 } while ((pset = pset->pset_list) != NULL);
472
473 /* Ok, we now have a collection of candidates -- fix them. */
474 thread_update_process_threads();
475
476 } while (restart_needed);
477}
478
479