]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/sched_dualq.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_dualq.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31
32#include <machine/machine_routines.h>
33#include <machine/sched_param.h>
34#include <machine/machine_cpu.h>
35
36#include <kern/kern_types.h>
37#include <kern/debug.h>
38#include <kern/machine.h>
39#include <kern/misc_protos.h>
40#include <kern/processor.h>
41#include <kern/queue.h>
42#include <kern/sched.h>
43#include <kern/sched_prim.h>
44#include <kern/task.h>
45#include <kern/thread.h>
46
47#include <sys/kdebug.h>
48
49static void
50sched_dualq_init(void);
51
52static thread_t
53sched_dualq_steal_thread(processor_set_t pset);
54
55static void
56sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context);
57
58static boolean_t
59sched_dualq_processor_enqueue(processor_t processor, thread_t thread,
60 sched_options_t options);
61
62static boolean_t
63sched_dualq_processor_queue_remove(processor_t processor, thread_t thread);
64
65static ast_t
66sched_dualq_processor_csw_check(processor_t processor);
67
68static boolean_t
69sched_dualq_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
70
71static int
72sched_dualq_runq_count(processor_t processor);
73
74static boolean_t
75sched_dualq_processor_queue_empty(processor_t processor);
76
77static uint64_t
78sched_dualq_runq_stats_count_sum(processor_t processor);
79
80static int
81sched_dualq_processor_bound_count(processor_t processor);
82
83static void
84sched_dualq_pset_init(processor_set_t pset);
85
86static void
87sched_dualq_processor_init(processor_t processor);
88
89static thread_t
90sched_dualq_choose_thread(processor_t processor, int priority, ast_t reason);
91
92static void
93sched_dualq_processor_queue_shutdown(processor_t processor);
94
95static sched_mode_t
96sched_dualq_initial_thread_sched_mode(task_t parent_task);
97
98static bool
99sched_dualq_thread_avoid_processor(processor_t processor, thread_t thread);
100
101const struct sched_dispatch_table sched_dualq_dispatch = {
102 .sched_name = "dualq",
103 .init = sched_dualq_init,
104 .timebase_init = sched_timeshare_timebase_init,
105 .processor_init = sched_dualq_processor_init,
106 .pset_init = sched_dualq_pset_init,
107 .maintenance_continuation = sched_timeshare_maintenance_continue,
108 .choose_thread = sched_dualq_choose_thread,
109 .steal_thread_enabled = sched_steal_thread_enabled,
110 .steal_thread = sched_dualq_steal_thread,
111 .compute_timeshare_priority = sched_compute_timeshare_priority,
112 .choose_node = sched_choose_node,
113 .choose_processor = choose_processor,
114 .processor_enqueue = sched_dualq_processor_enqueue,
115 .processor_queue_shutdown = sched_dualq_processor_queue_shutdown,
116 .processor_queue_remove = sched_dualq_processor_queue_remove,
117 .processor_queue_empty = sched_dualq_processor_queue_empty,
118 .priority_is_urgent = priority_is_urgent,
119 .processor_csw_check = sched_dualq_processor_csw_check,
120 .processor_queue_has_priority = sched_dualq_processor_queue_has_priority,
121 .initial_quantum_size = sched_timeshare_initial_quantum_size,
122 .initial_thread_sched_mode = sched_dualq_initial_thread_sched_mode,
123 .can_update_priority = can_update_priority,
124 .update_priority = update_priority,
125 .lightweight_update_priority = lightweight_update_priority,
126 .quantum_expire = sched_default_quantum_expire,
127 .processor_runq_count = sched_dualq_runq_count,
128 .processor_runq_stats_count_sum = sched_dualq_runq_stats_count_sum,
129 .processor_bound_count = sched_dualq_processor_bound_count,
130 .thread_update_scan = sched_dualq_thread_update_scan,
131 .multiple_psets_enabled = TRUE,
132 .sched_groups_enabled = FALSE,
133 .avoid_processor_enabled = TRUE,
134 .thread_avoid_processor = sched_dualq_thread_avoid_processor,
135 .processor_balance = sched_SMT_balance,
136
137 .rt_runq = sched_rtlocal_runq,
138 .rt_init = sched_rtlocal_init,
139 .rt_queue_shutdown = sched_rtlocal_queue_shutdown,
140 .rt_runq_scan = sched_rtlocal_runq_scan,
141 .rt_runq_count_sum = sched_rtlocal_runq_count_sum,
142
143 .qos_max_parallelism = sched_qos_max_parallelism,
144 .check_spill = sched_check_spill,
145 .ipi_policy = sched_ipi_policy,
146 .thread_should_yield = sched_thread_should_yield,
147 .run_count_incr = sched_smt_run_incr,
148 .run_count_decr = sched_smt_run_decr,
149 .update_thread_bucket = sched_smt_update_thread_bucket,
150 .pset_made_schedulable = sched_pset_made_schedulable,
151};
152
153__attribute__((always_inline))
154static inline run_queue_t
155dualq_main_runq(processor_t processor)
156{
157 return &processor->processor_set->pset_runq;
158}
159
160__attribute__((always_inline))
161static inline run_queue_t
162dualq_bound_runq(processor_t processor)
163{
164 return &processor->runq;
165}
166
167__attribute__((always_inline))
168static inline run_queue_t
169dualq_runq_for_thread(processor_t processor, thread_t thread)
170{
171 if (thread->bound_processor == PROCESSOR_NULL) {
172 return dualq_main_runq(processor);
173 } else {
174 assert(thread->bound_processor == processor);
175 return dualq_bound_runq(processor);
176 }
177}
178
179static sched_mode_t
180sched_dualq_initial_thread_sched_mode(task_t parent_task)
181{
182 if (parent_task == kernel_task) {
183 return TH_MODE_FIXED;
184 } else {
185 return TH_MODE_TIMESHARE;
186 }
187}
188
189static void
190sched_dualq_processor_init(processor_t processor)
191{
192 run_queue_init(&processor->runq);
193}
194
195static void
196sched_dualq_pset_init(processor_set_t pset)
197{
198 run_queue_init(&pset->pset_runq);
199}
200
201extern int sched_allow_NO_SMT_threads;
202static void
203sched_dualq_init(void)
204{
205 sched_timeshare_init();
206
207 if (PE_parse_boot_argn("disable_NO_SMT_threads", NULL, 0)) {
208 sched_allow_NO_SMT_threads = 0;
209 }
210}
211
212static thread_t
213sched_dualq_choose_thread(
214 processor_t processor,
215 int priority,
216 __unused ast_t reason)
217{
218 run_queue_t main_runq = dualq_main_runq(processor);
219 run_queue_t bound_runq = dualq_bound_runq(processor);
220 run_queue_t chosen_runq;
221
222 if (bound_runq->highq < priority &&
223 main_runq->highq < priority) {
224 return THREAD_NULL;
225 }
226
227 if (bound_runq->count && main_runq->count) {
228 if (bound_runq->highq >= main_runq->highq) {
229 chosen_runq = bound_runq;
230 } else {
231 chosen_runq = main_runq;
232 }
233 } else if (bound_runq->count) {
234 chosen_runq = bound_runq;
235 } else if (main_runq->count) {
236 chosen_runq = main_runq;
237 } else {
238 return THREAD_NULL;
239 }
240
241 if (chosen_runq == bound_runq) {
242 return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
243 }
244
245 if (processor->is_SMT) {
246 thread_t potential_thread = run_queue_peek(chosen_runq);
247 if (potential_thread == THREAD_NULL) {
248 return THREAD_NULL;
249 }
250 if (processor->processor_primary != processor) {
251 /*
252 * Secondary processor may not run a NO_SMT thread,
253 * nor any thread if the primary is running a NO_SMT thread.
254 */
255 if (thread_no_smt(potential_thread)) {
256 processor->must_idle = true;
257 return THREAD_NULL;
258 }
259 processor_t primary = processor->processor_primary;
260 if (primary->state == PROCESSOR_RUNNING) {
261 if (processor_active_thread_no_smt(primary)) {
262 processor->must_idle = true;
263 return THREAD_NULL;
264 }
265 }
266 } else if (processor->processor_secondary != PROCESSOR_NULL) {
267 processor_t secondary = processor->processor_secondary;
268 /*
269 * Primary processor may not run a NO_SMT thread if
270 * its secondary is running a bound thread.
271 */
272 if (secondary->state == PROCESSOR_RUNNING) {
273 if (thread_no_smt(potential_thread) && secondary->current_is_bound) {
274 processor->must_idle = true;
275 return THREAD_NULL;
276 }
277 }
278 }
279 }
280
281 return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
282}
283
284static boolean_t
285sched_dualq_processor_enqueue(
286 processor_t processor,
287 thread_t thread,
288 sched_options_t options)
289{
290 run_queue_t rq = dualq_runq_for_thread(processor, thread);
291 boolean_t result;
292
293 result = run_queue_enqueue(rq, thread, options);
294 thread->runq = processor;
295
296 return result;
297}
298
299static boolean_t
300sched_dualq_processor_queue_empty(processor_t processor)
301{
302 return dualq_main_runq(processor)->count == 0 &&
303 dualq_bound_runq(processor)->count == 0;
304}
305
306static ast_t
307sched_dualq_processor_csw_check(processor_t processor)
308{
309 boolean_t has_higher;
310 int pri;
311
312 if (sched_dualq_thread_avoid_processor(processor, current_thread())) {
313 return AST_PREEMPT | AST_URGENT;
314 }
315
316 run_queue_t main_runq = dualq_main_runq(processor);
317 run_queue_t bound_runq = dualq_bound_runq(processor);
318
319 assert(processor->active_thread != NULL);
320
321 pri = MAX(main_runq->highq, bound_runq->highq);
322
323 if (processor->first_timeslice) {
324 has_higher = (pri > processor->current_pri);
325 } else {
326 has_higher = (pri >= processor->current_pri);
327 }
328
329 if (has_higher) {
330 if (main_runq->urgency > 0) {
331 return AST_PREEMPT | AST_URGENT;
332 }
333
334 if (bound_runq->urgency > 0) {
335 return AST_PREEMPT | AST_URGENT;
336 }
337
338 return AST_PREEMPT;
339 }
340
341 return AST_NONE;
342}
343
344static boolean_t
345sched_dualq_processor_queue_has_priority(processor_t processor,
346 int priority,
347 boolean_t gte)
348{
349 run_queue_t main_runq = dualq_main_runq(processor);
350 run_queue_t bound_runq = dualq_bound_runq(processor);
351
352 int qpri = MAX(main_runq->highq, bound_runq->highq);
353
354 if (gte) {
355 return qpri >= priority;
356 } else {
357 return qpri > priority;
358 }
359}
360
361static int
362sched_dualq_runq_count(processor_t processor)
363{
364 return dualq_main_runq(processor)->count + dualq_bound_runq(processor)->count;
365}
366
367static uint64_t
368sched_dualq_runq_stats_count_sum(processor_t processor)
369{
370 uint64_t bound_sum = dualq_bound_runq(processor)->runq_stats.count_sum;
371
372 if (processor->cpu_id == processor->processor_set->cpu_set_low) {
373 return bound_sum + dualq_main_runq(processor)->runq_stats.count_sum;
374 } else {
375 return bound_sum;
376 }
377}
378static int
379sched_dualq_processor_bound_count(processor_t processor)
380{
381 return dualq_bound_runq(processor)->count;
382}
383
384static void
385sched_dualq_processor_queue_shutdown(processor_t processor)
386{
387 processor_set_t pset = processor->processor_set;
388 run_queue_t rq = dualq_main_runq(processor);
389 thread_t thread;
390 queue_head_t tqueue;
391
392 /* We only need to migrate threads if this is the last active processor in the pset */
393 if (pset->online_processor_count > 0) {
394 pset_unlock(pset);
395 return;
396 }
397
398 queue_init(&tqueue);
399
400 while (rq->count > 0) {
401 thread = run_queue_dequeue(rq, SCHED_HEADQ);
402 enqueue_tail(&tqueue, &thread->runq_links);
403 }
404
405 pset_unlock(pset);
406
407 qe_foreach_element_safe(thread, &tqueue, runq_links) {
408 remqueue(&thread->runq_links);
409
410 thread_lock(thread);
411
412 thread_setrun(thread, SCHED_TAILQ);
413
414 thread_unlock(thread);
415 }
416}
417
418static boolean_t
419sched_dualq_processor_queue_remove(
420 processor_t processor,
421 thread_t thread)
422{
423 run_queue_t rq;
424 processor_set_t pset = processor->processor_set;
425
426 pset_lock(pset);
427
428 rq = dualq_runq_for_thread(processor, thread);
429
430 if (processor == thread->runq) {
431 /*
432 * Thread is on a run queue and we have a lock on
433 * that run queue.
434 */
435 run_queue_remove(rq, thread);
436 } else {
437 /*
438 * The thread left the run queue before we could
439 * lock the run queue.
440 */
441 assert(thread->runq == PROCESSOR_NULL);
442 processor = PROCESSOR_NULL;
443 }
444
445 pset_unlock(pset);
446
447 return processor != PROCESSOR_NULL;
448}
449
450static thread_t
451sched_dualq_steal_thread(processor_set_t pset)
452{
453 processor_set_t cset = pset;
454 processor_set_t nset = next_pset(cset);
455 thread_t thread;
456
457 /* Secondary processors on SMT systems never steal */
458 assert(current_processor()->processor_primary == current_processor());
459
460 while (nset != pset) {
461 pset_unlock(cset);
462 cset = nset;
463 pset_lock(cset);
464
465 if (pset_has_stealable_threads(cset)) {
466 /* Need task_restrict logic here */
467 thread = run_queue_dequeue(&cset->pset_runq, SCHED_HEADQ);
468 pset_unlock(cset);
469 return thread;
470 }
471
472 nset = next_pset(cset);
473 }
474
475 pset_unlock(cset);
476
477 return THREAD_NULL;
478}
479
480static void
481sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context)
482{
483 boolean_t restart_needed = FALSE;
484 processor_t processor = processor_list;
485 processor_set_t pset;
486 thread_t thread;
487 spl_t s;
488
489 /*
490 * We update the threads associated with each processor (bound and idle threads)
491 * and then update the threads in each pset runqueue.
492 */
493
494 do {
495 do {
496 pset = processor->processor_set;
497
498 s = splsched();
499 pset_lock(pset);
500
501 restart_needed = runq_scan(dualq_bound_runq(processor), scan_context);
502
503 pset_unlock(pset);
504 splx(s);
505
506 if (restart_needed) {
507 break;
508 }
509
510 thread = processor->idle_thread;
511 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
512 if (thread_update_add_thread(thread) == FALSE) {
513 restart_needed = TRUE;
514 break;
515 }
516 }
517 } while ((processor = processor->processor_list) != NULL);
518
519 /* Ok, we now have a collection of candidates -- fix them. */
520 thread_update_process_threads();
521 } while (restart_needed);
522
523 pset = &pset0;
524
525 do {
526 do {
527 s = splsched();
528 pset_lock(pset);
529
530 restart_needed = runq_scan(&pset->pset_runq, scan_context);
531
532 pset_unlock(pset);
533 splx(s);
534
535 if (restart_needed) {
536 break;
537 }
538 } while ((pset = pset->pset_list) != NULL);
539
540 /* Ok, we now have a collection of candidates -- fix them. */
541 thread_update_process_threads();
542 } while (restart_needed);
543}
544
545extern int sched_allow_rt_smt;
546
547/* Return true if this thread should not continue running on this processor */
548static bool
549sched_dualq_thread_avoid_processor(processor_t processor, thread_t thread)
550{
551 if (thread->bound_processor == processor) {
552 /* Thread is bound here */
553 return false;
554 }
555
556 if (processor->processor_primary != processor) {
557 /*
558 * This is a secondary SMT processor. If the primary is running
559 * a realtime thread, only allow realtime threads on the secondary.
560 */
561 processor_t primary = processor->processor_primary;
562 if ((primary->current_pri >= BASEPRI_RTQUEUES) && ((thread->sched_pri < BASEPRI_RTQUEUES) || !sched_allow_rt_smt)) {
563 return true;
564 }
565
566 /* NO_SMT threads are not allowed on secondary processors */
567 if (thread_no_smt(thread)) {
568 return true;
569 }
570
571 if (primary->state == PROCESSOR_RUNNING) {
572 if (processor_active_thread_no_smt(primary)) {
573 /* No threads allowed on secondary if primary has NO_SMT */
574 return true;
575 }
576 }
577 }
578
579 if (processor->processor_secondary != PROCESSOR_NULL) {
580 /*
581 * This is a primary SMT processor. If the secondary is running
582 * a bound thread, the primary may not run a NO_SMT thread.
583 */
584 processor_t secondary = processor->processor_secondary;
585
586 if (secondary->state == PROCESSOR_RUNNING) {
587 if (secondary->current_is_bound && thread_no_smt(thread)) {
588 return true;
589 }
590 }
591 }
592
593 return false;
594}