]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_dualq.c
e7f506c06d70d6d1f337ec549721095c971d5be4
[apple/xnu.git] / osfmk / kern / sched_dualq.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31
32 #include <machine/machine_routines.h>
33 #include <machine/sched_param.h>
34 #include <machine/machine_cpu.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/debug.h>
38 #include <kern/machine.h>
39 #include <kern/misc_protos.h>
40 #include <kern/processor.h>
41 #include <kern/queue.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/task.h>
45 #include <kern/thread.h>
46
47 #include <sys/kdebug.h>
48
49 static void
50 sched_dualq_init(void);
51
52 static thread_t
53 sched_dualq_steal_thread(processor_set_t pset);
54
55 static void
56 sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context);
57
58 static boolean_t
59 sched_dualq_processor_enqueue(processor_t processor, thread_t thread, integer_t options);
60
61 static boolean_t
62 sched_dualq_processor_queue_remove(processor_t processor, thread_t thread);
63
64 static ast_t
65 sched_dualq_processor_csw_check(processor_t processor);
66
67 static boolean_t
68 sched_dualq_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
69
70 static int
71 sched_dualq_runq_count(processor_t processor);
72
73 static boolean_t
74 sched_dualq_processor_queue_empty(processor_t processor);
75
76 static uint64_t
77 sched_dualq_runq_stats_count_sum(processor_t processor);
78
79 static int
80 sched_dualq_processor_bound_count(processor_t processor);
81
82 static void
83 sched_dualq_pset_init(processor_set_t pset);
84
85 static void
86 sched_dualq_processor_init(processor_t processor);
87
88 static thread_t
89 sched_dualq_choose_thread(processor_t processor, int priority, ast_t reason);
90
91 static void
92 sched_dualq_processor_queue_shutdown(processor_t processor);
93
94 static sched_mode_t
95 sched_dualq_initial_thread_sched_mode(task_t parent_task);
96
97 static bool
98 sched_dualq_thread_avoid_processor(processor_t processor, thread_t thread);
99
100 const struct sched_dispatch_table sched_dualq_dispatch = {
101 .sched_name = "dualq",
102 .init = sched_dualq_init,
103 .timebase_init = sched_timeshare_timebase_init,
104 .processor_init = sched_dualq_processor_init,
105 .pset_init = sched_dualq_pset_init,
106 .maintenance_continuation = sched_timeshare_maintenance_continue,
107 .choose_thread = sched_dualq_choose_thread,
108 .steal_thread_enabled = sched_steal_thread_enabled,
109 .steal_thread = sched_dualq_steal_thread,
110 .compute_timeshare_priority = sched_compute_timeshare_priority,
111 .choose_processor = choose_processor,
112 .processor_enqueue = sched_dualq_processor_enqueue,
113 .processor_queue_shutdown = sched_dualq_processor_queue_shutdown,
114 .processor_queue_remove = sched_dualq_processor_queue_remove,
115 .processor_queue_empty = sched_dualq_processor_queue_empty,
116 .priority_is_urgent = priority_is_urgent,
117 .processor_csw_check = sched_dualq_processor_csw_check,
118 .processor_queue_has_priority = sched_dualq_processor_queue_has_priority,
119 .initial_quantum_size = sched_timeshare_initial_quantum_size,
120 .initial_thread_sched_mode = sched_dualq_initial_thread_sched_mode,
121 .can_update_priority = can_update_priority,
122 .update_priority = update_priority,
123 .lightweight_update_priority = lightweight_update_priority,
124 .quantum_expire = sched_default_quantum_expire,
125 .processor_runq_count = sched_dualq_runq_count,
126 .processor_runq_stats_count_sum = sched_dualq_runq_stats_count_sum,
127 .processor_bound_count = sched_dualq_processor_bound_count,
128 .thread_update_scan = sched_dualq_thread_update_scan,
129 .direct_dispatch_to_idle_processors = FALSE,
130 .multiple_psets_enabled = TRUE,
131 .sched_groups_enabled = FALSE,
132 .avoid_processor_enabled = TRUE,
133 .thread_avoid_processor = sched_dualq_thread_avoid_processor,
134 .processor_balance = sched_SMT_balance,
135
136 .rt_runq = sched_rtglobal_runq,
137 .rt_init = sched_rtglobal_init,
138 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
139 .rt_runq_scan = sched_rtglobal_runq_scan,
140 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
141
142 .qos_max_parallelism = sched_qos_max_parallelism,
143 .check_spill = sched_check_spill,
144 .ipi_policy = sched_ipi_policy,
145 .thread_should_yield = sched_thread_should_yield,
146 };
147
148 __attribute__((always_inline))
149 static inline run_queue_t
150 dualq_main_runq(processor_t processor)
151 {
152 return &processor->processor_set->pset_runq;
153 }
154
155 __attribute__((always_inline))
156 static inline run_queue_t
157 dualq_bound_runq(processor_t processor)
158 {
159 return &processor->runq;
160 }
161
162 __attribute__((always_inline))
163 static inline run_queue_t
164 dualq_runq_for_thread(processor_t processor, thread_t thread)
165 {
166 if (thread->bound_processor == PROCESSOR_NULL) {
167 return dualq_main_runq(processor);
168 } else {
169 assert(thread->bound_processor == processor);
170 return dualq_bound_runq(processor);
171 }
172 }
173
174 static sched_mode_t
175 sched_dualq_initial_thread_sched_mode(task_t parent_task)
176 {
177 if (parent_task == kernel_task) {
178 return TH_MODE_FIXED;
179 } else {
180 return TH_MODE_TIMESHARE;
181 }
182 }
183
184 static void
185 sched_dualq_processor_init(processor_t processor)
186 {
187 run_queue_init(&processor->runq);
188 }
189
190 static void
191 sched_dualq_pset_init(processor_set_t pset)
192 {
193 run_queue_init(&pset->pset_runq);
194 }
195
196 extern int sched_allow_NO_SMT_threads;
197 static void
198 sched_dualq_init(void)
199 {
200 sched_timeshare_init();
201
202 if (PE_parse_boot_argn("disable_NO_SMT_threads", NULL, 0)) {
203 sched_allow_NO_SMT_threads = 0;
204 }
205 }
206
207 static thread_t
208 sched_dualq_choose_thread(
209 processor_t processor,
210 int priority,
211 __unused ast_t reason)
212 {
213 run_queue_t main_runq = dualq_main_runq(processor);
214 run_queue_t bound_runq = dualq_bound_runq(processor);
215 run_queue_t chosen_runq;
216
217 if (bound_runq->highq < priority &&
218 main_runq->highq < priority) {
219 return THREAD_NULL;
220 }
221
222 if (bound_runq->count && main_runq->count) {
223 if (bound_runq->highq >= main_runq->highq) {
224 chosen_runq = bound_runq;
225 } else {
226 chosen_runq = main_runq;
227 }
228 } else if (bound_runq->count) {
229 chosen_runq = bound_runq;
230 } else if (main_runq->count) {
231 chosen_runq = main_runq;
232 } else {
233 return THREAD_NULL;
234 }
235
236 if (chosen_runq == bound_runq) {
237 return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
238 }
239
240 if (processor->is_SMT) {
241 thread_t potential_thread = run_queue_dequeue(chosen_runq, SCHED_PEEK | SCHED_HEADQ);
242 if (potential_thread == THREAD_NULL) {
243 return THREAD_NULL;
244 }
245 if (processor->processor_primary != processor) {
246 /*
247 * Secondary processor may not run a NO_SMT thread,
248 * nor any thread if the primary is running a NO_SMT thread.
249 */
250 if (thread_no_smt(potential_thread)) {
251 processor->must_idle = true;
252 return THREAD_NULL;
253 }
254 processor_t primary = processor->processor_primary;
255 if (primary->state == PROCESSOR_RUNNING) {
256 if (processor_active_thread_no_smt(primary)) {
257 processor->must_idle = true;
258 return THREAD_NULL;
259 }
260 }
261 } else if (processor->processor_secondary != PROCESSOR_NULL) {
262 processor_t secondary = processor->processor_secondary;
263 /*
264 * Primary processor may not run a NO_SMT thread if
265 * its secondary is running a bound thread.
266 */
267 if (secondary->state == PROCESSOR_RUNNING) {
268 if (thread_no_smt(potential_thread) && secondary->current_is_bound) {
269 processor->must_idle = true;
270 return THREAD_NULL;
271 }
272 }
273 }
274 }
275
276 return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
277 }
278
279 static boolean_t
280 sched_dualq_processor_enqueue(
281 processor_t processor,
282 thread_t thread,
283 integer_t options)
284 {
285 run_queue_t rq = dualq_runq_for_thread(processor, thread);
286 boolean_t result;
287
288 result = run_queue_enqueue(rq, thread, options);
289 thread->runq = processor;
290
291 return result;
292 }
293
294 static boolean_t
295 sched_dualq_processor_queue_empty(processor_t processor)
296 {
297 return dualq_main_runq(processor)->count == 0 &&
298 dualq_bound_runq(processor)->count == 0;
299 }
300
301 static ast_t
302 sched_dualq_processor_csw_check(processor_t processor)
303 {
304 boolean_t has_higher;
305 int pri;
306
307 if (sched_dualq_thread_avoid_processor(processor, current_thread())) {
308 return AST_PREEMPT | AST_URGENT;
309 }
310
311 run_queue_t main_runq = dualq_main_runq(processor);
312 run_queue_t bound_runq = dualq_bound_runq(processor);
313
314 assert(processor->active_thread != NULL);
315
316 pri = MAX(main_runq->highq, bound_runq->highq);
317
318 if (processor->first_timeslice) {
319 has_higher = (pri > processor->current_pri);
320 } else {
321 has_higher = (pri >= processor->current_pri);
322 }
323
324 if (has_higher) {
325 if (main_runq->urgency > 0) {
326 return AST_PREEMPT | AST_URGENT;
327 }
328
329 if (bound_runq->urgency > 0) {
330 return AST_PREEMPT | AST_URGENT;
331 }
332
333 return AST_PREEMPT;
334 }
335
336 return AST_NONE;
337 }
338
339 static boolean_t
340 sched_dualq_processor_queue_has_priority(processor_t processor,
341 int priority,
342 boolean_t gte)
343 {
344 run_queue_t main_runq = dualq_main_runq(processor);
345 run_queue_t bound_runq = dualq_bound_runq(processor);
346
347 int qpri = MAX(main_runq->highq, bound_runq->highq);
348
349 if (gte) {
350 return qpri >= priority;
351 } else {
352 return qpri > priority;
353 }
354 }
355
356 static int
357 sched_dualq_runq_count(processor_t processor)
358 {
359 return dualq_main_runq(processor)->count + dualq_bound_runq(processor)->count;
360 }
361
362 static uint64_t
363 sched_dualq_runq_stats_count_sum(processor_t processor)
364 {
365 uint64_t bound_sum = dualq_bound_runq(processor)->runq_stats.count_sum;
366
367 if (processor->cpu_id == processor->processor_set->cpu_set_low) {
368 return bound_sum + dualq_main_runq(processor)->runq_stats.count_sum;
369 } else {
370 return bound_sum;
371 }
372 }
373 static int
374 sched_dualq_processor_bound_count(processor_t processor)
375 {
376 return dualq_bound_runq(processor)->count;
377 }
378
379 static void
380 sched_dualq_processor_queue_shutdown(processor_t processor)
381 {
382 processor_set_t pset = processor->processor_set;
383 run_queue_t rq = dualq_main_runq(processor);
384 thread_t thread;
385 queue_head_t tqueue;
386
387 /* We only need to migrate threads if this is the last active processor in the pset */
388 if (pset->online_processor_count > 0) {
389 pset_unlock(pset);
390 return;
391 }
392
393 queue_init(&tqueue);
394
395 while (rq->count > 0) {
396 thread = run_queue_dequeue(rq, SCHED_HEADQ);
397 enqueue_tail(&tqueue, &thread->runq_links);
398 }
399
400 pset_unlock(pset);
401
402 qe_foreach_element_safe(thread, &tqueue, runq_links) {
403 remqueue(&thread->runq_links);
404
405 thread_lock(thread);
406
407 thread_setrun(thread, SCHED_TAILQ);
408
409 thread_unlock(thread);
410 }
411 }
412
413 static boolean_t
414 sched_dualq_processor_queue_remove(
415 processor_t processor,
416 thread_t thread)
417 {
418 run_queue_t rq;
419 processor_set_t pset = processor->processor_set;
420
421 pset_lock(pset);
422
423 rq = dualq_runq_for_thread(processor, thread);
424
425 if (processor == thread->runq) {
426 /*
427 * Thread is on a run queue and we have a lock on
428 * that run queue.
429 */
430 run_queue_remove(rq, thread);
431 } else {
432 /*
433 * The thread left the run queue before we could
434 * lock the run queue.
435 */
436 assert(thread->runq == PROCESSOR_NULL);
437 processor = PROCESSOR_NULL;
438 }
439
440 pset_unlock(pset);
441
442 return processor != PROCESSOR_NULL;
443 }
444
445 static thread_t
446 sched_dualq_steal_thread(processor_set_t pset)
447 {
448 processor_set_t cset = pset;
449 processor_set_t nset = next_pset(cset);
450 thread_t thread;
451
452 while (nset != pset) {
453 pset_unlock(cset);
454 cset = nset;
455 pset_lock(cset);
456
457 if (cset->pset_runq.count > 0) {
458 /* Need task_restrict logic here */
459 thread = run_queue_dequeue(&cset->pset_runq, SCHED_HEADQ);
460 pset_unlock(cset);
461 return thread;
462 }
463
464 nset = next_pset(cset);
465 }
466
467 pset_unlock(cset);
468
469 return THREAD_NULL;
470 }
471
472 static void
473 sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context)
474 {
475 boolean_t restart_needed = FALSE;
476 processor_t processor = processor_list;
477 processor_set_t pset;
478 thread_t thread;
479 spl_t s;
480
481 /*
482 * We update the threads associated with each processor (bound and idle threads)
483 * and then update the threads in each pset runqueue.
484 */
485
486 do {
487 do {
488 pset = processor->processor_set;
489
490 s = splsched();
491 pset_lock(pset);
492
493 restart_needed = runq_scan(dualq_bound_runq(processor), scan_context);
494
495 pset_unlock(pset);
496 splx(s);
497
498 if (restart_needed) {
499 break;
500 }
501
502 thread = processor->idle_thread;
503 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
504 if (thread_update_add_thread(thread) == FALSE) {
505 restart_needed = TRUE;
506 break;
507 }
508 }
509 } while ((processor = processor->processor_list) != NULL);
510
511 /* Ok, we now have a collection of candidates -- fix them. */
512 thread_update_process_threads();
513 } while (restart_needed);
514
515 pset = &pset0;
516
517 do {
518 do {
519 s = splsched();
520 pset_lock(pset);
521
522 restart_needed = runq_scan(&pset->pset_runq, scan_context);
523
524 pset_unlock(pset);
525 splx(s);
526
527 if (restart_needed) {
528 break;
529 }
530 } while ((pset = pset->pset_list) != NULL);
531
532 /* Ok, we now have a collection of candidates -- fix them. */
533 thread_update_process_threads();
534 } while (restart_needed);
535 }
536
537 extern int sched_allow_rt_smt;
538
539 /* Return true if this thread should not continue running on this processor */
540 static bool
541 sched_dualq_thread_avoid_processor(processor_t processor, thread_t thread)
542 {
543 if (thread->bound_processor == processor) {
544 /* Thread is bound here */
545 return false;
546 }
547
548 if (processor->processor_primary != processor) {
549 /*
550 * This is a secondary SMT processor. If the primary is running
551 * a realtime thread, only allow realtime threads on the secondary.
552 */
553 processor_t primary = processor->processor_primary;
554 if ((primary->current_pri >= BASEPRI_RTQUEUES) && ((thread->sched_pri < BASEPRI_RTQUEUES) || !sched_allow_rt_smt)) {
555 return true;
556 }
557
558 /* NO_SMT threads are not allowed on secondary processors */
559 if (thread_no_smt(thread)) {
560 return true;
561 }
562
563 if (primary->state == PROCESSOR_RUNNING) {
564 if (processor_active_thread_no_smt(primary)) {
565 /* No threads allowed on secondary if primary has NO_SMT */
566 return true;
567 }
568 }
569 }
570
571 if (processor->processor_secondary != PROCESSOR_NULL) {
572 /*
573 * This is a primary SMT processor. If the secondary is running
574 * a bound thread, the primary may not run a NO_SMT thread.
575 */
576 processor_t secondary = processor->processor_secondary;
577
578 if (secondary->state == PROCESSOR_RUNNING) {
579 if (secondary->current_is_bound && thread_no_smt(thread)) {
580 return true;
581 }
582 }
583 }
584
585 return false;
586 }