]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_traditional.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_traditional.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach/mach_types.h>
58
59 #include <kern/sched.h>
60 #include <kern/sched_prim.h>
61
62 static boolean_t
63 sched_traditional_use_pset_runqueue = FALSE;
64
65 static void
66 sched_traditional_init(void);
67
68 static bool
69 sched_traditional_steal_thread_enabled(processor_set_t pset);
70
71 static thread_t
72 sched_traditional_steal_thread(processor_set_t pset);
73
74 static thread_t
75 sched_traditional_steal_processor_thread(processor_t processor);
76
77 static void
78 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context);
79
80 static void
81 sched_traditional_processor_queue_shutdown(processor_t processor);
82
83 static boolean_t
84 sched_traditional_processor_enqueue(processor_t processor, thread_t thread,
85 sched_options_t options);
86
87 static boolean_t
88 sched_traditional_processor_queue_remove(processor_t processor, thread_t thread);
89
90 static boolean_t
91 sched_traditional_processor_queue_empty(processor_t processor);
92
93 static ast_t
94 sched_traditional_processor_csw_check(processor_t processor);
95
96 static boolean_t
97 sched_traditional_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
98
99 static int
100 sched_traditional_processor_runq_count(processor_t processor);
101
102 static boolean_t
103 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor);
104
105 static uint64_t
106 sched_traditional_processor_runq_stats_count_sum(processor_t processor);
107
108 static uint64_t
109 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor);
110
111 static int
112 sched_traditional_processor_bound_count(processor_t processor);
113
114 extern void
115 sched_traditional_quantum_expire(thread_t thread);
116
117 static void
118 sched_traditional_processor_init(processor_t processor);
119
120 static void
121 sched_traditional_pset_init(processor_set_t pset);
122
123 static void
124 sched_traditional_with_pset_runqueue_init(void);
125
126 static sched_mode_t
127 sched_traditional_initial_thread_sched_mode(task_t parent_task);
128
129 static thread_t
130 sched_traditional_choose_thread(processor_t processor, int priority, ast_t reason);
131
132 /* Choose a thread from a processor's priority-based runq */
133 static thread_t sched_traditional_choose_thread_from_runq(processor_t processor, run_queue_t runq, int priority);
134
135 const struct sched_dispatch_table sched_traditional_dispatch = {
136 .sched_name = "traditional",
137 .init = sched_traditional_init,
138 .timebase_init = sched_timeshare_timebase_init,
139 .processor_init = sched_traditional_processor_init,
140 .pset_init = sched_traditional_pset_init,
141 .maintenance_continuation = sched_timeshare_maintenance_continue,
142 .choose_thread = sched_traditional_choose_thread,
143 .steal_thread_enabled = sched_traditional_steal_thread_enabled,
144 .steal_thread = sched_traditional_steal_thread,
145 .compute_timeshare_priority = sched_compute_timeshare_priority,
146 .choose_processor = choose_processor,
147 .processor_enqueue = sched_traditional_processor_enqueue,
148 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
149 .processor_queue_remove = sched_traditional_processor_queue_remove,
150 .processor_queue_empty = sched_traditional_processor_queue_empty,
151 .priority_is_urgent = priority_is_urgent,
152 .processor_csw_check = sched_traditional_processor_csw_check,
153 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
154 .initial_quantum_size = sched_timeshare_initial_quantum_size,
155 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
156 .can_update_priority = can_update_priority,
157 .update_priority = update_priority,
158 .lightweight_update_priority = lightweight_update_priority,
159 .quantum_expire = sched_default_quantum_expire,
160 .processor_runq_count = sched_traditional_processor_runq_count,
161 .processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum,
162 .processor_bound_count = sched_traditional_processor_bound_count,
163 .thread_update_scan = sched_traditional_thread_update_scan,
164 .multiple_psets_enabled = TRUE,
165 .sched_groups_enabled = FALSE,
166 .avoid_processor_enabled = FALSE,
167 .thread_avoid_processor = NULL,
168 .processor_balance = sched_SMT_balance,
169
170 .rt_runq = sched_rtglobal_runq,
171 .rt_init = sched_rtglobal_init,
172 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
173 .rt_runq_scan = sched_rtglobal_runq_scan,
174 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
175
176 .qos_max_parallelism = sched_qos_max_parallelism,
177 .check_spill = sched_check_spill,
178 .ipi_policy = sched_ipi_policy,
179 .thread_should_yield = sched_thread_should_yield,
180 .run_count_incr = sched_run_incr,
181 .run_count_decr = sched_run_decr,
182 .update_thread_bucket = sched_update_thread_bucket,
183 .pset_made_schedulable = sched_pset_made_schedulable,
184 };
185
186 const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = {
187 .sched_name = "traditional_with_pset_runqueue",
188 .init = sched_traditional_with_pset_runqueue_init,
189 .timebase_init = sched_timeshare_timebase_init,
190 .processor_init = sched_traditional_processor_init,
191 .pset_init = sched_traditional_pset_init,
192 .maintenance_continuation = sched_timeshare_maintenance_continue,
193 .choose_thread = sched_traditional_choose_thread,
194 .steal_thread_enabled = sched_steal_thread_enabled,
195 .steal_thread = sched_traditional_steal_thread,
196 .compute_timeshare_priority = sched_compute_timeshare_priority,
197 .choose_processor = choose_processor,
198 .processor_enqueue = sched_traditional_processor_enqueue,
199 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
200 .processor_queue_remove = sched_traditional_processor_queue_remove,
201 .processor_queue_empty = sched_traditional_with_pset_runqueue_processor_queue_empty,
202 .priority_is_urgent = priority_is_urgent,
203 .processor_csw_check = sched_traditional_processor_csw_check,
204 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
205 .initial_quantum_size = sched_timeshare_initial_quantum_size,
206 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
207 .can_update_priority = can_update_priority,
208 .update_priority = update_priority,
209 .lightweight_update_priority = lightweight_update_priority,
210 .quantum_expire = sched_default_quantum_expire,
211 .processor_runq_count = sched_traditional_processor_runq_count,
212 .processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum,
213 .processor_bound_count = sched_traditional_processor_bound_count,
214 .thread_update_scan = sched_traditional_thread_update_scan,
215 .multiple_psets_enabled = TRUE,
216 .sched_groups_enabled = FALSE,
217 .avoid_processor_enabled = FALSE,
218 .thread_avoid_processor = NULL,
219 .processor_balance = sched_SMT_balance,
220
221 .rt_runq = sched_rtglobal_runq,
222 .rt_init = sched_rtglobal_init,
223 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
224 .rt_runq_scan = sched_rtglobal_runq_scan,
225 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
226
227 .qos_max_parallelism = sched_qos_max_parallelism,
228 .check_spill = sched_check_spill,
229 .ipi_policy = sched_ipi_policy,
230 .thread_should_yield = sched_thread_should_yield,
231 .run_count_incr = sched_run_incr,
232 .run_count_decr = sched_run_decr,
233 .update_thread_bucket = sched_update_thread_bucket,
234 .pset_made_schedulable = sched_pset_made_schedulable,
235 };
236
237 static void
238 sched_traditional_init(void)
239 {
240 sched_timeshare_init();
241 }
242
243 static void
244 sched_traditional_with_pset_runqueue_init(void)
245 {
246 sched_timeshare_init();
247 sched_traditional_use_pset_runqueue = TRUE;
248 }
249
250 static void
251 sched_traditional_processor_init(processor_t processor)
252 {
253 if (!sched_traditional_use_pset_runqueue) {
254 run_queue_init(&processor->runq);
255 }
256 processor->runq_bound_count = 0;
257 }
258
259 static void
260 sched_traditional_pset_init(processor_set_t pset)
261 {
262 if (sched_traditional_use_pset_runqueue) {
263 run_queue_init(&pset->pset_runq);
264 }
265 pset->pset_runq_bound_count = 0;
266 }
267
268 __attribute__((always_inline))
269 static inline run_queue_t
270 runq_for_processor(processor_t processor)
271 {
272 if (sched_traditional_use_pset_runqueue) {
273 return &processor->processor_set->pset_runq;
274 } else {
275 return &processor->runq;
276 }
277 }
278
279 __attribute__((always_inline))
280 static inline void
281 runq_consider_incr_bound_count(processor_t processor,
282 thread_t thread)
283 {
284 if (thread->bound_processor == PROCESSOR_NULL) {
285 return;
286 }
287
288 assert(thread->bound_processor == processor);
289
290 if (sched_traditional_use_pset_runqueue) {
291 processor->processor_set->pset_runq_bound_count++;
292 }
293
294 processor->runq_bound_count++;
295 }
296
297 __attribute__((always_inline))
298 static inline void
299 runq_consider_decr_bound_count(processor_t processor,
300 thread_t thread)
301 {
302 if (thread->bound_processor == PROCESSOR_NULL) {
303 return;
304 }
305
306 assert(thread->bound_processor == processor);
307
308 if (sched_traditional_use_pset_runqueue) {
309 processor->processor_set->pset_runq_bound_count--;
310 }
311
312 processor->runq_bound_count--;
313 }
314
315 static thread_t
316 sched_traditional_choose_thread(
317 processor_t processor,
318 int priority,
319 __unused ast_t reason)
320 {
321 thread_t thread;
322
323 thread = sched_traditional_choose_thread_from_runq(processor, runq_for_processor(processor), priority);
324 if (thread != THREAD_NULL) {
325 runq_consider_decr_bound_count(processor, thread);
326 }
327
328 return thread;
329 }
330
331 /*
332 * sched_traditional_choose_thread_from_runq:
333 *
334 * Locate a thread to execute from the processor run queue
335 * and return it. Only choose a thread with greater or equal
336 * priority.
337 *
338 * Associated pset must be locked. Returns THREAD_NULL
339 * on failure.
340 */
341 static thread_t
342 sched_traditional_choose_thread_from_runq(
343 processor_t processor,
344 run_queue_t rq,
345 int priority)
346 {
347 circle_queue_t queue = rq->queues + rq->highq;
348 int pri = rq->highq;
349 int count = rq->count;
350 thread_t thread;
351
352 while (count > 0 && pri >= priority) {
353 cqe_foreach_element_safe(thread, queue, runq_links) {
354 if (thread->bound_processor == PROCESSOR_NULL ||
355 thread->bound_processor == processor) {
356 circle_dequeue(queue, &thread->runq_links);
357
358 thread->runq = PROCESSOR_NULL;
359 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
360 rq->count--;
361 if (SCHED(priority_is_urgent)(pri)) {
362 rq->urgency--; assert(rq->urgency >= 0);
363 }
364 if (circle_queue_empty(queue)) {
365 bitmap_clear(rq->bitmap, pri);
366 rq->highq = bitmap_first(rq->bitmap, NRQS);
367 }
368 return thread;
369 }
370 count--;
371 }
372
373 queue--; pri--;
374 }
375
376 return THREAD_NULL;
377 }
378
379 static sched_mode_t
380 sched_traditional_initial_thread_sched_mode(task_t parent_task)
381 {
382 if (parent_task == kernel_task) {
383 return TH_MODE_FIXED;
384 } else {
385 return TH_MODE_TIMESHARE;
386 }
387 }
388
389 /*
390 * sched_traditional_processor_enqueue:
391 *
392 * Enqueue thread on a processor run queue. Thread must be locked,
393 * and not already be on a run queue.
394 *
395 * Returns TRUE if a preemption is indicated based on the state
396 * of the run queue.
397 *
398 * The run queue must be locked (see thread_run_queue_remove()
399 * for more info).
400 */
401 static boolean_t
402 sched_traditional_processor_enqueue(processor_t processor,
403 thread_t thread,
404 sched_options_t options)
405 {
406 run_queue_t rq = runq_for_processor(processor);
407 boolean_t result;
408
409 result = run_queue_enqueue(rq, thread, options);
410 thread->runq = processor;
411 runq_consider_incr_bound_count(processor, thread);
412
413 return result;
414 }
415
416 static boolean_t
417 sched_traditional_processor_queue_empty(processor_t processor)
418 {
419 return runq_for_processor(processor)->count == 0;
420 }
421
422 static boolean_t
423 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor)
424 {
425 processor_set_t pset = processor->processor_set;
426 int count = runq_for_processor(processor)->count;
427
428 /*
429 * The pset runq contains the count of all runnable threads
430 * for all processors in the pset. However, for threads that
431 * are bound to another processor, the current "processor"
432 * is not eligible to execute the thread. So we only
433 * include bound threads that our bound to the current
434 * "processor". This allows the processor to idle when the
435 * count of eligible threads drops to 0, even if there's
436 * a runnable thread bound to a different processor in the
437 * shared runq.
438 */
439
440 count -= pset->pset_runq_bound_count;
441 count += processor->runq_bound_count;
442
443 return count == 0;
444 }
445
446 static ast_t
447 sched_traditional_processor_csw_check(processor_t processor)
448 {
449 run_queue_t runq;
450 boolean_t has_higher;
451
452 assert(processor->active_thread != NULL);
453
454 runq = runq_for_processor(processor);
455
456 if (processor->first_timeslice) {
457 has_higher = (runq->highq > processor->current_pri);
458 } else {
459 has_higher = (runq->highq >= processor->current_pri);
460 }
461
462 if (has_higher) {
463 if (runq->urgency > 0) {
464 return AST_PREEMPT | AST_URGENT;
465 }
466
467 return AST_PREEMPT;
468 }
469
470 return AST_NONE;
471 }
472
473 static boolean_t
474 sched_traditional_processor_queue_has_priority(processor_t processor,
475 int priority,
476 boolean_t gte)
477 {
478 if (gte) {
479 return runq_for_processor(processor)->highq >= priority;
480 } else {
481 return runq_for_processor(processor)->highq > priority;
482 }
483 }
484
485 static int
486 sched_traditional_processor_runq_count(processor_t processor)
487 {
488 return runq_for_processor(processor)->count;
489 }
490
491 static uint64_t
492 sched_traditional_processor_runq_stats_count_sum(processor_t processor)
493 {
494 return runq_for_processor(processor)->runq_stats.count_sum;
495 }
496
497 static uint64_t
498 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor)
499 {
500 if (processor->cpu_id == processor->processor_set->cpu_set_low) {
501 return runq_for_processor(processor)->runq_stats.count_sum;
502 } else {
503 return 0ULL;
504 }
505 }
506
507 static int
508 sched_traditional_processor_bound_count(processor_t processor)
509 {
510 return processor->runq_bound_count;
511 }
512
513 /*
514 * sched_traditional_processor_queue_shutdown:
515 *
516 * Shutdown a processor run queue by
517 * re-dispatching non-bound threads.
518 *
519 * Associated pset must be locked, and is
520 * returned unlocked.
521 */
522 static void
523 sched_traditional_processor_queue_shutdown(processor_t processor)
524 {
525 processor_set_t pset = processor->processor_set;
526 run_queue_t rq = runq_for_processor(processor);
527 circle_queue_t queue = rq->queues + rq->highq;
528 int pri = rq->highq;
529 int count = rq->count;
530 thread_t thread;
531 circle_queue_head_t tqueue;
532
533 circle_queue_init(&tqueue);
534
535 while (count > 0) {
536 cqe_foreach_element_safe(thread, queue, runq_links) {
537 if (thread->bound_processor == PROCESSOR_NULL) {
538 circle_dequeue(queue, &thread->runq_links);
539
540 thread->runq = PROCESSOR_NULL;
541 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
542 runq_consider_decr_bound_count(processor, thread);
543 rq->count--;
544 if (SCHED(priority_is_urgent)(pri)) {
545 rq->urgency--; assert(rq->urgency >= 0);
546 }
547 if (circle_queue_empty(queue)) {
548 bitmap_clear(rq->bitmap, pri);
549 rq->highq = bitmap_first(rq->bitmap, NRQS);
550 }
551
552 circle_enqueue_tail(&tqueue, &thread->runq_links);
553 }
554 count--;
555 }
556
557 queue--; pri--;
558 }
559
560 pset_unlock(pset);
561
562 while ((thread = cqe_dequeue_head(&tqueue, struct thread, runq_links)) != THREAD_NULL) {
563 thread_lock(thread);
564
565 thread_setrun(thread, SCHED_TAILQ);
566
567 thread_unlock(thread);
568 }
569 }
570
571 #if 0
572 static void
573 run_queue_check(
574 run_queue_t rq,
575 thread_t thread)
576 {
577 queue_t q;
578 queue_entry_t qe;
579
580 if (rq != thread->runq) {
581 panic("run_queue_check: thread runq");
582 }
583
584 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI) {
585 panic("run_queue_check: thread sched_pri");
586 }
587
588 q = &rq->queues[thread->sched_pri];
589 qe = queue_first(q);
590 while (!queue_end(q, qe)) {
591 if (qe == (queue_entry_t)thread) {
592 return;
593 }
594
595 qe = queue_next(qe);
596 }
597
598 panic("run_queue_check: end");
599 }
600 #endif /* 0 */
601
602 /*
603 * Locks the runqueue itself.
604 *
605 * Thread must be locked.
606 */
607 static boolean_t
608 sched_traditional_processor_queue_remove(processor_t processor,
609 thread_t thread)
610 {
611 processor_set_t pset;
612 run_queue_t rq;
613
614 pset = processor->processor_set;
615 pset_lock(pset);
616
617 rq = runq_for_processor(processor);
618
619 if (processor == thread->runq) {
620 /*
621 * Thread is on a run queue and we have a lock on
622 * that run queue.
623 */
624 runq_consider_decr_bound_count(processor, thread);
625 run_queue_remove(rq, thread);
626 } else {
627 /*
628 * The thread left the run queue before we could
629 * lock the run queue.
630 */
631 assert(thread->runq == PROCESSOR_NULL);
632 processor = PROCESSOR_NULL;
633 }
634
635 pset_unlock(pset);
636
637 return processor != PROCESSOR_NULL;
638 }
639
640 /*
641 * sched_traditional_steal_processor_thread:
642 *
643 * Locate a thread to steal from the processor and
644 * return it.
645 *
646 * Associated pset must be locked. Returns THREAD_NULL
647 * on failure.
648 */
649 static thread_t
650 sched_traditional_steal_processor_thread(processor_t processor)
651 {
652 run_queue_t rq = runq_for_processor(processor);
653 circle_queue_t queue = rq->queues + rq->highq;
654 int pri = rq->highq;
655 int count = rq->count;
656 thread_t thread;
657
658 while (count > 0) {
659 cqe_foreach_element_safe(thread, queue, runq_links) {
660 if (thread->bound_processor == PROCESSOR_NULL) {
661 circle_dequeue(queue, &thread->runq_links);
662
663 thread->runq = PROCESSOR_NULL;
664 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
665 runq_consider_decr_bound_count(processor, thread);
666 rq->count--;
667 if (SCHED(priority_is_urgent)(pri)) {
668 rq->urgency--; assert(rq->urgency >= 0);
669 }
670 if (circle_queue_empty(queue)) {
671 bitmap_clear(rq->bitmap, pri);
672 rq->highq = bitmap_first(rq->bitmap, NRQS);
673 }
674
675 return thread;
676 }
677 count--;
678 }
679
680 queue--; pri--;
681 }
682
683 return THREAD_NULL;
684 }
685
686 static bool
687 sched_traditional_steal_thread_enabled(processor_set_t pset)
688 {
689 (void)pset;
690 return true;
691 }
692
693 /*
694 * Locate and steal a thread, beginning
695 * at the pset.
696 *
697 * The pset must be locked, and is returned
698 * unlocked.
699 *
700 * Returns the stolen thread, or THREAD_NULL on
701 * failure.
702 */
703 static thread_t
704 sched_traditional_steal_thread(processor_set_t pset)
705 {
706 processor_set_t nset, cset = pset;
707 processor_t processor;
708 thread_t thread;
709
710 do {
711 uint64_t active_map = (pset->cpu_state_map[PROCESSOR_RUNNING] |
712 pset->cpu_state_map[PROCESSOR_DISPATCHING]);
713 for (int cpuid = lsb_first(active_map); cpuid >= 0; cpuid = lsb_next(active_map, cpuid)) {
714 processor = processor_array[cpuid];
715 if (runq_for_processor(processor)->count > 0) {
716 thread = sched_traditional_steal_processor_thread(processor);
717 if (thread != THREAD_NULL) {
718 pset_unlock(cset);
719
720 return thread;
721 }
722 }
723 }
724
725 nset = next_pset(cset);
726
727 if (nset != pset) {
728 pset_unlock(cset);
729
730 cset = nset;
731 pset_lock(cset);
732 }
733 } while (nset != pset);
734
735 pset_unlock(cset);
736
737 return THREAD_NULL;
738 }
739
740 static void
741 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context)
742 {
743 boolean_t restart_needed = FALSE;
744 processor_t processor = processor_list;
745 processor_set_t pset;
746 thread_t thread;
747 spl_t s;
748
749 do {
750 do {
751 /*
752 * TODO: in sched_traditional_use_pset_runqueue case,
753 * avoid scanning the same runq multiple times
754 */
755 pset = processor->processor_set;
756
757 s = splsched();
758 pset_lock(pset);
759
760 restart_needed = runq_scan(runq_for_processor(processor), scan_context);
761
762 pset_unlock(pset);
763 splx(s);
764
765 if (restart_needed) {
766 break;
767 }
768
769 thread = processor->idle_thread;
770 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
771 if (thread_update_add_thread(thread) == FALSE) {
772 restart_needed = TRUE;
773 break;
774 }
775 }
776 } while ((processor = processor->processor_list) != NULL);
777
778 /* Ok, we now have a collection of candidates -- fix them. */
779 thread_update_process_threads();
780 } while (restart_needed);
781 }