]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_traditional.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / kern / sched_traditional.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach/mach_types.h>
58
59 #include <kern/sched.h>
60 #include <kern/sched_prim.h>
61
62 static boolean_t
63 sched_traditional_use_pset_runqueue = FALSE;
64
65 static void
66 sched_traditional_init(void);
67
68 static thread_t
69 sched_traditional_steal_thread(processor_set_t pset);
70
71 static thread_t
72 sched_traditional_steal_processor_thread(processor_t processor);
73
74 static void
75 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context);
76
77 static void
78 sched_traditional_processor_queue_shutdown(processor_t processor);
79
80 static boolean_t
81 sched_traditional_processor_enqueue(processor_t processor, thread_t thread, integer_t options);
82
83 static boolean_t
84 sched_traditional_processor_queue_remove(processor_t processor, thread_t thread);
85
86 static boolean_t
87 sched_traditional_processor_queue_empty(processor_t processor);
88
89 static ast_t
90 sched_traditional_processor_csw_check(processor_t processor);
91
92 static boolean_t
93 sched_traditional_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
94
95 static int
96 sched_traditional_processor_runq_count(processor_t processor);
97
98 static boolean_t
99 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor);
100
101 static uint64_t
102 sched_traditional_processor_runq_stats_count_sum(processor_t processor);
103
104 static uint64_t
105 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor);
106
107 static int
108 sched_traditional_processor_bound_count(processor_t processor);
109
110 extern void
111 sched_traditional_quantum_expire(thread_t thread);
112
113 static void
114 sched_traditional_processor_init(processor_t processor);
115
116 static void
117 sched_traditional_pset_init(processor_set_t pset);
118
119 static void
120 sched_traditional_with_pset_runqueue_init(void);
121
122 static sched_mode_t
123 sched_traditional_initial_thread_sched_mode(task_t parent_task);
124
125 static thread_t
126 sched_traditional_choose_thread(processor_t processor, int priority, ast_t reason);
127
128 /* Choose a thread from a processor's priority-based runq */
129 static thread_t sched_traditional_choose_thread_from_runq(processor_t processor, run_queue_t runq, int priority);
130
131 const struct sched_dispatch_table sched_traditional_dispatch = {
132 .sched_name = "traditional",
133 .init = sched_traditional_init,
134 .timebase_init = sched_timeshare_timebase_init,
135 .processor_init = sched_traditional_processor_init,
136 .pset_init = sched_traditional_pset_init,
137 .maintenance_continuation = sched_timeshare_maintenance_continue,
138 .choose_thread = sched_traditional_choose_thread,
139 .steal_thread_enabled = TRUE,
140 .steal_thread = sched_traditional_steal_thread,
141 .compute_timeshare_priority = sched_compute_timeshare_priority,
142 .choose_processor = choose_processor,
143 .processor_enqueue = sched_traditional_processor_enqueue,
144 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
145 .processor_queue_remove = sched_traditional_processor_queue_remove,
146 .processor_queue_empty = sched_traditional_processor_queue_empty,
147 .priority_is_urgent = priority_is_urgent,
148 .processor_csw_check = sched_traditional_processor_csw_check,
149 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
150 .initial_quantum_size = sched_timeshare_initial_quantum_size,
151 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
152 .can_update_priority = can_update_priority,
153 .update_priority = update_priority,
154 .lightweight_update_priority = lightweight_update_priority,
155 .quantum_expire = sched_default_quantum_expire,
156 .processor_runq_count = sched_traditional_processor_runq_count,
157 .processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum,
158 .processor_bound_count = sched_traditional_processor_bound_count,
159 .thread_update_scan = sched_traditional_thread_update_scan,
160 .direct_dispatch_to_idle_processors = TRUE,
161 .multiple_psets_enabled = TRUE,
162 .sched_groups_enabled = FALSE,
163 };
164
165 const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = {
166 .sched_name = "traditional_with_pset_runqueue",
167 .init = sched_traditional_with_pset_runqueue_init,
168 .timebase_init = sched_timeshare_timebase_init,
169 .processor_init = sched_traditional_processor_init,
170 .pset_init = sched_traditional_pset_init,
171 .maintenance_continuation = sched_timeshare_maintenance_continue,
172 .choose_thread = sched_traditional_choose_thread,
173 .steal_thread_enabled = TRUE,
174 .steal_thread = sched_traditional_steal_thread,
175 .compute_timeshare_priority = sched_compute_timeshare_priority,
176 .choose_processor = choose_processor,
177 .processor_enqueue = sched_traditional_processor_enqueue,
178 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
179 .processor_queue_remove = sched_traditional_processor_queue_remove,
180 .processor_queue_empty = sched_traditional_with_pset_runqueue_processor_queue_empty,
181 .priority_is_urgent = priority_is_urgent,
182 .processor_csw_check = sched_traditional_processor_csw_check,
183 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
184 .initial_quantum_size = sched_timeshare_initial_quantum_size,
185 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
186 .can_update_priority = can_update_priority,
187 .update_priority = update_priority,
188 .lightweight_update_priority = lightweight_update_priority,
189 .quantum_expire = sched_default_quantum_expire,
190 .processor_runq_count = sched_traditional_processor_runq_count,
191 .processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum,
192 .processor_bound_count = sched_traditional_processor_bound_count,
193 .thread_update_scan = sched_traditional_thread_update_scan,
194 .direct_dispatch_to_idle_processors = FALSE,
195 .multiple_psets_enabled = TRUE,
196 .sched_groups_enabled = FALSE,
197 };
198
199 static void
200 sched_traditional_init(void)
201 {
202 sched_timeshare_init();
203 }
204
205 static void
206 sched_traditional_with_pset_runqueue_init(void)
207 {
208 sched_timeshare_init();
209 sched_traditional_use_pset_runqueue = TRUE;
210 }
211
212 static void
213 sched_traditional_processor_init(processor_t processor)
214 {
215 if (!sched_traditional_use_pset_runqueue) {
216 run_queue_init(&processor->runq);
217 }
218 processor->runq_bound_count = 0;
219 }
220
221 static void
222 sched_traditional_pset_init(processor_set_t pset)
223 {
224 if (sched_traditional_use_pset_runqueue) {
225 run_queue_init(&pset->pset_runq);
226 }
227 pset->pset_runq_bound_count = 0;
228 }
229
230 __attribute__((always_inline))
231 static inline run_queue_t runq_for_processor(processor_t processor)
232 {
233 if (sched_traditional_use_pset_runqueue)
234 return &processor->processor_set->pset_runq;
235 else
236 return &processor->runq;
237 }
238
239 __attribute__((always_inline))
240 static inline void runq_consider_incr_bound_count(processor_t processor,
241 thread_t thread)
242 {
243 if (thread->bound_processor == PROCESSOR_NULL)
244 return;
245
246 assert(thread->bound_processor == processor);
247
248 if (sched_traditional_use_pset_runqueue)
249 processor->processor_set->pset_runq_bound_count++;
250
251 processor->runq_bound_count++;
252 }
253
254 __attribute__((always_inline))
255 static inline void runq_consider_decr_bound_count(processor_t processor,
256 thread_t thread)
257 {
258 if (thread->bound_processor == PROCESSOR_NULL)
259 return;
260
261 assert(thread->bound_processor == processor);
262
263 if (sched_traditional_use_pset_runqueue)
264 processor->processor_set->pset_runq_bound_count--;
265
266 processor->runq_bound_count--;
267 }
268
269 static thread_t
270 sched_traditional_choose_thread(
271 processor_t processor,
272 int priority,
273 __unused ast_t reason)
274 {
275 thread_t thread;
276
277 thread = sched_traditional_choose_thread_from_runq(processor, runq_for_processor(processor), priority);
278 if (thread != THREAD_NULL) {
279 runq_consider_decr_bound_count(processor, thread);
280 }
281
282 return thread;
283 }
284
285 /*
286 * sched_traditional_choose_thread_from_runq:
287 *
288 * Locate a thread to execute from the processor run queue
289 * and return it. Only choose a thread with greater or equal
290 * priority.
291 *
292 * Associated pset must be locked. Returns THREAD_NULL
293 * on failure.
294 */
295 static thread_t
296 sched_traditional_choose_thread_from_runq(
297 processor_t processor,
298 run_queue_t rq,
299 int priority)
300 {
301 queue_t queue = rq->queues + rq->highq;
302 int pri = rq->highq;
303 int count = rq->count;
304 thread_t thread;
305
306 while (count > 0 && pri >= priority) {
307 thread = (thread_t)(uintptr_t)queue_first(queue);
308 while (!queue_end(queue, (queue_entry_t)thread)) {
309 if (thread->bound_processor == PROCESSOR_NULL ||
310 thread->bound_processor == processor) {
311 remqueue((queue_entry_t)thread);
312
313 thread->runq = PROCESSOR_NULL;
314 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
315 rq->count--;
316 if (SCHED(priority_is_urgent)(pri)) {
317 rq->urgency--; assert(rq->urgency >= 0);
318 }
319 if (queue_empty(queue)) {
320 bitmap_clear(rq->bitmap, pri);
321 rq->highq = bitmap_first(rq->bitmap, NRQS);
322 }
323
324 return (thread);
325 }
326 count--;
327
328 thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
329 }
330
331 queue--; pri--;
332 }
333
334 return (THREAD_NULL);
335 }
336
337 static sched_mode_t
338 sched_traditional_initial_thread_sched_mode(task_t parent_task)
339 {
340 if (parent_task == kernel_task)
341 return TH_MODE_FIXED;
342 else
343 return TH_MODE_TIMESHARE;
344 }
345
346 /*
347 * sched_traditional_processor_enqueue:
348 *
349 * Enqueue thread on a processor run queue. Thread must be locked,
350 * and not already be on a run queue.
351 *
352 * Returns TRUE if a preemption is indicated based on the state
353 * of the run queue.
354 *
355 * The run queue must be locked (see thread_run_queue_remove()
356 * for more info).
357 */
358 static boolean_t
359 sched_traditional_processor_enqueue(processor_t processor,
360 thread_t thread,
361 integer_t options)
362 {
363 run_queue_t rq = runq_for_processor(processor);
364 boolean_t result;
365
366 result = run_queue_enqueue(rq, thread, options);
367 thread->runq = processor;
368 runq_consider_incr_bound_count(processor, thread);
369
370 return (result);
371 }
372
373 static boolean_t
374 sched_traditional_processor_queue_empty(processor_t processor)
375 {
376 return runq_for_processor(processor)->count == 0;
377 }
378
379 static boolean_t
380 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor)
381 {
382 processor_set_t pset = processor->processor_set;
383 int count = runq_for_processor(processor)->count;
384
385 /*
386 * The pset runq contains the count of all runnable threads
387 * for all processors in the pset. However, for threads that
388 * are bound to another processor, the current "processor"
389 * is not eligible to execute the thread. So we only
390 * include bound threads that our bound to the current
391 * "processor". This allows the processor to idle when the
392 * count of eligible threads drops to 0, even if there's
393 * a runnable thread bound to a different processor in the
394 * shared runq.
395 */
396
397 count -= pset->pset_runq_bound_count;
398 count += processor->runq_bound_count;
399
400 return count == 0;
401 }
402
403 static ast_t
404 sched_traditional_processor_csw_check(processor_t processor)
405 {
406 run_queue_t runq;
407 boolean_t has_higher;
408
409 assert(processor->active_thread != NULL);
410
411 runq = runq_for_processor(processor);
412
413 if (processor->first_timeslice) {
414 has_higher = (runq->highq > processor->current_pri);
415 } else {
416 has_higher = (runq->highq >= processor->current_pri);
417 }
418
419 if (has_higher) {
420 if (runq->urgency > 0)
421 return (AST_PREEMPT | AST_URGENT);
422
423 return AST_PREEMPT;
424 }
425
426 return AST_NONE;
427 }
428
429 static boolean_t
430 sched_traditional_processor_queue_has_priority(processor_t processor,
431 int priority,
432 boolean_t gte)
433 {
434 run_queue_t runq = runq_for_processor(processor);
435
436 if (runq->count == 0)
437 return FALSE;
438 else if (gte)
439 return runq_for_processor(processor)->highq >= priority;
440 else
441 return runq_for_processor(processor)->highq > priority;
442 }
443
444 static int
445 sched_traditional_processor_runq_count(processor_t processor)
446 {
447 return runq_for_processor(processor)->count;
448 }
449
450 static uint64_t
451 sched_traditional_processor_runq_stats_count_sum(processor_t processor)
452 {
453 return runq_for_processor(processor)->runq_stats.count_sum;
454 }
455
456 static uint64_t
457 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor)
458 {
459 if (processor->cpu_id == processor->processor_set->cpu_set_low)
460 return runq_for_processor(processor)->runq_stats.count_sum;
461 else
462 return 0ULL;
463 }
464
465 static int
466 sched_traditional_processor_bound_count(processor_t processor)
467 {
468 return processor->runq_bound_count;
469 }
470
471 /*
472 * sched_traditional_processor_queue_shutdown:
473 *
474 * Shutdown a processor run queue by
475 * re-dispatching non-bound threads.
476 *
477 * Associated pset must be locked, and is
478 * returned unlocked.
479 */
480 static void
481 sched_traditional_processor_queue_shutdown(processor_t processor)
482 {
483 processor_set_t pset = processor->processor_set;
484 run_queue_t rq = runq_for_processor(processor);
485 queue_t queue = rq->queues + rq->highq;
486 int pri = rq->highq;
487 int count = rq->count;
488 thread_t next, thread;
489 queue_head_t tqueue;
490
491 queue_init(&tqueue);
492
493 while (count > 0) {
494 thread = (thread_t)(uintptr_t)queue_first(queue);
495 while (!queue_end(queue, (queue_entry_t)thread)) {
496 next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
497
498 if (thread->bound_processor == PROCESSOR_NULL) {
499 remqueue((queue_entry_t)thread);
500
501 thread->runq = PROCESSOR_NULL;
502 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
503 runq_consider_decr_bound_count(processor, thread);
504 rq->count--;
505 if (SCHED(priority_is_urgent)(pri)) {
506 rq->urgency--; assert(rq->urgency >= 0);
507 }
508 if (queue_empty(queue)) {
509 bitmap_clear(rq->bitmap, pri);
510 rq->highq = bitmap_first(rq->bitmap, NRQS);
511 }
512
513 enqueue_tail(&tqueue, (queue_entry_t)thread);
514 }
515 count--;
516
517 thread = next;
518 }
519
520 queue--; pri--;
521 }
522
523 pset_unlock(pset);
524
525 while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) {
526 thread_lock(thread);
527
528 thread_setrun(thread, SCHED_TAILQ);
529
530 thread_unlock(thread);
531 }
532 }
533
534 #if 0
535 static void
536 run_queue_check(
537 run_queue_t rq,
538 thread_t thread)
539 {
540 queue_t q;
541 queue_entry_t qe;
542
543 if (rq != thread->runq)
544 panic("run_queue_check: thread runq");
545
546 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
547 panic("run_queue_check: thread sched_pri");
548
549 q = &rq->queues[thread->sched_pri];
550 qe = queue_first(q);
551 while (!queue_end(q, qe)) {
552 if (qe == (queue_entry_t)thread)
553 return;
554
555 qe = queue_next(qe);
556 }
557
558 panic("run_queue_check: end");
559 }
560 #endif /* 0 */
561
562 /*
563 * Locks the runqueue itself.
564 *
565 * Thread must be locked.
566 */
567 static boolean_t
568 sched_traditional_processor_queue_remove(processor_t processor,
569 thread_t thread)
570 {
571 processor_set_t pset;
572 run_queue_t rq;
573
574 pset = processor->processor_set;
575 pset_lock(pset);
576
577 rq = runq_for_processor(processor);
578
579 if (processor == thread->runq) {
580 /*
581 * Thread is on a run queue and we have a lock on
582 * that run queue.
583 */
584 runq_consider_decr_bound_count(processor, thread);
585 run_queue_remove(rq, thread);
586 }
587 else {
588 /*
589 * The thread left the run queue before we could
590 * lock the run queue.
591 */
592 assert(thread->runq == PROCESSOR_NULL);
593 processor = PROCESSOR_NULL;
594 }
595
596 pset_unlock(pset);
597
598 return (processor != PROCESSOR_NULL);
599 }
600
601 /*
602 * sched_traditional_steal_processor_thread:
603 *
604 * Locate a thread to steal from the processor and
605 * return it.
606 *
607 * Associated pset must be locked. Returns THREAD_NULL
608 * on failure.
609 */
610 static thread_t
611 sched_traditional_steal_processor_thread(processor_t processor)
612 {
613 run_queue_t rq = runq_for_processor(processor);
614 queue_t queue = rq->queues + rq->highq;
615 int pri = rq->highq;
616 int count = rq->count;
617 thread_t thread;
618
619 while (count > 0) {
620 thread = (thread_t)(uintptr_t)queue_first(queue);
621 while (!queue_end(queue, (queue_entry_t)thread)) {
622 if (thread->bound_processor == PROCESSOR_NULL) {
623 remqueue((queue_entry_t)thread);
624
625 thread->runq = PROCESSOR_NULL;
626 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
627 runq_consider_decr_bound_count(processor, thread);
628 rq->count--;
629 if (SCHED(priority_is_urgent)(pri)) {
630 rq->urgency--; assert(rq->urgency >= 0);
631 }
632 if (queue_empty(queue)) {
633 bitmap_clear(rq->bitmap, pri);
634 rq->highq = bitmap_first(rq->bitmap, NRQS);
635 }
636
637 return (thread);
638 }
639 count--;
640
641 thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
642 }
643
644 queue--; pri--;
645 }
646
647 return (THREAD_NULL);
648 }
649
650 /*
651 * Locate and steal a thread, beginning
652 * at the pset.
653 *
654 * The pset must be locked, and is returned
655 * unlocked.
656 *
657 * Returns the stolen thread, or THREAD_NULL on
658 * failure.
659 */
660 static thread_t
661 sched_traditional_steal_thread(processor_set_t pset)
662 {
663 processor_set_t nset, cset = pset;
664 processor_t processor;
665 thread_t thread;
666
667 do {
668 processor = (processor_t)(uintptr_t)queue_first(&cset->active_queue);
669 while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) {
670 if (runq_for_processor(processor)->count > 0) {
671 thread = sched_traditional_steal_processor_thread(processor);
672 if (thread != THREAD_NULL) {
673 remqueue((queue_entry_t)processor);
674 enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
675
676 pset_unlock(cset);
677
678 return (thread);
679 }
680 }
681
682 processor = (processor_t)(uintptr_t)queue_next((queue_entry_t)processor);
683 }
684
685 nset = next_pset(cset);
686
687 if (nset != pset) {
688 pset_unlock(cset);
689
690 cset = nset;
691 pset_lock(cset);
692 }
693 } while (nset != pset);
694
695 pset_unlock(cset);
696
697 return (THREAD_NULL);
698 }
699
700 static void
701 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context)
702 {
703 boolean_t restart_needed = FALSE;
704 processor_t processor = processor_list;
705 processor_set_t pset;
706 thread_t thread;
707 spl_t s;
708
709 do {
710 do {
711 /*
712 * TODO: in sched_traditional_use_pset_runqueue case,
713 * avoid scanning the same runq multiple times
714 */
715 pset = processor->processor_set;
716
717 s = splsched();
718 pset_lock(pset);
719
720 restart_needed = runq_scan(runq_for_processor(processor), scan_context);
721
722 pset_unlock(pset);
723 splx(s);
724
725 if (restart_needed)
726 break;
727
728 thread = processor->idle_thread;
729 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
730 if (thread_update_add_thread(thread) == FALSE) {
731 restart_needed = TRUE;
732 break;
733 }
734 }
735 } while ((processor = processor->processor_list) != NULL);
736
737 /* Ok, we now have a collection of candidates -- fix them. */
738 thread_update_process_threads();
739 } while (restart_needed);
740 }
741