]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_traditional.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_traditional.c
CommitLineData
3e170ce0
A
1/*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach/mach_types.h>
58
59#include <kern/sched.h>
60#include <kern/sched_prim.h>
61
62static boolean_t
0a7de745 63 sched_traditional_use_pset_runqueue = FALSE;
3e170ce0
A
64
65static void
66sched_traditional_init(void);
67
0a7de745
A
68static bool
69sched_traditional_steal_thread_enabled(processor_set_t pset);
70
3e170ce0
A
71static thread_t
72sched_traditional_steal_thread(processor_set_t pset);
73
74static thread_t
75sched_traditional_steal_processor_thread(processor_t processor);
76
77static void
78sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context);
79
80static void
81sched_traditional_processor_queue_shutdown(processor_t processor);
82
83static boolean_t
cb323159
A
84sched_traditional_processor_enqueue(processor_t processor, thread_t thread,
85 sched_options_t options);
3e170ce0
A
86
87static boolean_t
88sched_traditional_processor_queue_remove(processor_t processor, thread_t thread);
89
90static boolean_t
91sched_traditional_processor_queue_empty(processor_t processor);
92
93static ast_t
94sched_traditional_processor_csw_check(processor_t processor);
95
96static boolean_t
97sched_traditional_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
98
99static int
100sched_traditional_processor_runq_count(processor_t processor);
101
102static boolean_t
103sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor);
104
105static uint64_t
106sched_traditional_processor_runq_stats_count_sum(processor_t processor);
107
108static uint64_t
109sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor);
110
111static int
112sched_traditional_processor_bound_count(processor_t processor);
113
114extern void
115sched_traditional_quantum_expire(thread_t thread);
116
117static void
118sched_traditional_processor_init(processor_t processor);
119
120static void
121sched_traditional_pset_init(processor_set_t pset);
122
123static void
124sched_traditional_with_pset_runqueue_init(void);
125
126static sched_mode_t
127sched_traditional_initial_thread_sched_mode(task_t parent_task);
128
129static thread_t
130sched_traditional_choose_thread(processor_t processor, int priority, ast_t reason);
131
132/* Choose a thread from a processor's priority-based runq */
133static thread_t sched_traditional_choose_thread_from_runq(processor_t processor, run_queue_t runq, int priority);
134
135const struct sched_dispatch_table sched_traditional_dispatch = {
136 .sched_name = "traditional",
137 .init = sched_traditional_init,
138 .timebase_init = sched_timeshare_timebase_init,
139 .processor_init = sched_traditional_processor_init,
140 .pset_init = sched_traditional_pset_init,
141 .maintenance_continuation = sched_timeshare_maintenance_continue,
142 .choose_thread = sched_traditional_choose_thread,
0a7de745 143 .steal_thread_enabled = sched_traditional_steal_thread_enabled,
3e170ce0
A
144 .steal_thread = sched_traditional_steal_thread,
145 .compute_timeshare_priority = sched_compute_timeshare_priority,
f427ee49 146 .choose_node = sched_choose_node,
3e170ce0
A
147 .choose_processor = choose_processor,
148 .processor_enqueue = sched_traditional_processor_enqueue,
149 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
150 .processor_queue_remove = sched_traditional_processor_queue_remove,
151 .processor_queue_empty = sched_traditional_processor_queue_empty,
152 .priority_is_urgent = priority_is_urgent,
153 .processor_csw_check = sched_traditional_processor_csw_check,
154 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
155 .initial_quantum_size = sched_timeshare_initial_quantum_size,
156 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
157 .can_update_priority = can_update_priority,
158 .update_priority = update_priority,
159 .lightweight_update_priority = lightweight_update_priority,
160 .quantum_expire = sched_default_quantum_expire,
161 .processor_runq_count = sched_traditional_processor_runq_count,
162 .processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum,
163 .processor_bound_count = sched_traditional_processor_bound_count,
164 .thread_update_scan = sched_traditional_thread_update_scan,
3e170ce0
A
165 .multiple_psets_enabled = TRUE,
166 .sched_groups_enabled = FALSE,
5ba3f43e
A
167 .avoid_processor_enabled = FALSE,
168 .thread_avoid_processor = NULL,
169 .processor_balance = sched_SMT_balance,
170
f427ee49
A
171 .rt_runq = sched_rtlocal_runq,
172 .rt_init = sched_rtlocal_init,
173 .rt_queue_shutdown = sched_rtlocal_queue_shutdown,
174 .rt_runq_scan = sched_rtlocal_runq_scan,
175 .rt_runq_count_sum = sched_rtlocal_runq_count_sum,
5ba3f43e
A
176
177 .qos_max_parallelism = sched_qos_max_parallelism,
178 .check_spill = sched_check_spill,
179 .ipi_policy = sched_ipi_policy,
180 .thread_should_yield = sched_thread_should_yield,
cb323159
A
181 .run_count_incr = sched_run_incr,
182 .run_count_decr = sched_run_decr,
183 .update_thread_bucket = sched_update_thread_bucket,
184 .pset_made_schedulable = sched_pset_made_schedulable,
3e170ce0
A
185};
186
187const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = {
188 .sched_name = "traditional_with_pset_runqueue",
189 .init = sched_traditional_with_pset_runqueue_init,
190 .timebase_init = sched_timeshare_timebase_init,
191 .processor_init = sched_traditional_processor_init,
192 .pset_init = sched_traditional_pset_init,
193 .maintenance_continuation = sched_timeshare_maintenance_continue,
194 .choose_thread = sched_traditional_choose_thread,
0a7de745 195 .steal_thread_enabled = sched_steal_thread_enabled,
3e170ce0
A
196 .steal_thread = sched_traditional_steal_thread,
197 .compute_timeshare_priority = sched_compute_timeshare_priority,
f427ee49 198 .choose_node = sched_choose_node,
3e170ce0
A
199 .choose_processor = choose_processor,
200 .processor_enqueue = sched_traditional_processor_enqueue,
201 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
202 .processor_queue_remove = sched_traditional_processor_queue_remove,
203 .processor_queue_empty = sched_traditional_with_pset_runqueue_processor_queue_empty,
204 .priority_is_urgent = priority_is_urgent,
205 .processor_csw_check = sched_traditional_processor_csw_check,
206 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
207 .initial_quantum_size = sched_timeshare_initial_quantum_size,
208 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
209 .can_update_priority = can_update_priority,
210 .update_priority = update_priority,
211 .lightweight_update_priority = lightweight_update_priority,
212 .quantum_expire = sched_default_quantum_expire,
213 .processor_runq_count = sched_traditional_processor_runq_count,
214 .processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum,
215 .processor_bound_count = sched_traditional_processor_bound_count,
216 .thread_update_scan = sched_traditional_thread_update_scan,
3e170ce0
A
217 .multiple_psets_enabled = TRUE,
218 .sched_groups_enabled = FALSE,
5ba3f43e
A
219 .avoid_processor_enabled = FALSE,
220 .thread_avoid_processor = NULL,
221 .processor_balance = sched_SMT_balance,
222
f427ee49
A
223 .rt_runq = sched_rtlocal_runq,
224 .rt_init = sched_rtlocal_init,
225 .rt_queue_shutdown = sched_rtlocal_queue_shutdown,
226 .rt_runq_scan = sched_rtlocal_runq_scan,
227 .rt_runq_count_sum = sched_rtlocal_runq_count_sum,
5ba3f43e
A
228
229 .qos_max_parallelism = sched_qos_max_parallelism,
230 .check_spill = sched_check_spill,
0a7de745 231 .ipi_policy = sched_ipi_policy,
5ba3f43e 232 .thread_should_yield = sched_thread_should_yield,
cb323159
A
233 .run_count_incr = sched_run_incr,
234 .run_count_decr = sched_run_decr,
235 .update_thread_bucket = sched_update_thread_bucket,
236 .pset_made_schedulable = sched_pset_made_schedulable,
3e170ce0
A
237};
238
239static void
240sched_traditional_init(void)
241{
242 sched_timeshare_init();
243}
244
245static void
246sched_traditional_with_pset_runqueue_init(void)
247{
248 sched_timeshare_init();
249 sched_traditional_use_pset_runqueue = TRUE;
250}
251
252static void
253sched_traditional_processor_init(processor_t processor)
254{
255 if (!sched_traditional_use_pset_runqueue) {
256 run_queue_init(&processor->runq);
257 }
258 processor->runq_bound_count = 0;
259}
260
261static void
262sched_traditional_pset_init(processor_set_t pset)
263{
264 if (sched_traditional_use_pset_runqueue) {
265 run_queue_init(&pset->pset_runq);
266 }
267 pset->pset_runq_bound_count = 0;
268}
269
270__attribute__((always_inline))
0a7de745
A
271static inline run_queue_t
272runq_for_processor(processor_t processor)
3e170ce0 273{
0a7de745 274 if (sched_traditional_use_pset_runqueue) {
3e170ce0 275 return &processor->processor_set->pset_runq;
0a7de745 276 } else {
3e170ce0 277 return &processor->runq;
0a7de745 278 }
3e170ce0
A
279}
280
281__attribute__((always_inline))
0a7de745
A
282static inline void
283runq_consider_incr_bound_count(processor_t processor,
284 thread_t thread)
3e170ce0 285{
0a7de745 286 if (thread->bound_processor == PROCESSOR_NULL) {
3e170ce0 287 return;
0a7de745 288 }
3e170ce0
A
289
290 assert(thread->bound_processor == processor);
291
0a7de745 292 if (sched_traditional_use_pset_runqueue) {
3e170ce0 293 processor->processor_set->pset_runq_bound_count++;
0a7de745 294 }
3e170ce0
A
295
296 processor->runq_bound_count++;
297}
298
299__attribute__((always_inline))
0a7de745
A
300static inline void
301runq_consider_decr_bound_count(processor_t processor,
302 thread_t thread)
3e170ce0 303{
0a7de745 304 if (thread->bound_processor == PROCESSOR_NULL) {
3e170ce0 305 return;
0a7de745 306 }
3e170ce0
A
307
308 assert(thread->bound_processor == processor);
309
0a7de745 310 if (sched_traditional_use_pset_runqueue) {
3e170ce0 311 processor->processor_set->pset_runq_bound_count--;
0a7de745 312 }
3e170ce0
A
313
314 processor->runq_bound_count--;
315}
316
317static thread_t
318sched_traditional_choose_thread(
0a7de745
A
319 processor_t processor,
320 int priority,
321 __unused ast_t reason)
3e170ce0
A
322{
323 thread_t thread;
324
325 thread = sched_traditional_choose_thread_from_runq(processor, runq_for_processor(processor), priority);
326 if (thread != THREAD_NULL) {
327 runq_consider_decr_bound_count(processor, thread);
328 }
329
330 return thread;
331}
332
333/*
334 * sched_traditional_choose_thread_from_runq:
335 *
336 * Locate a thread to execute from the processor run queue
337 * and return it. Only choose a thread with greater or equal
338 * priority.
339 *
340 * Associated pset must be locked. Returns THREAD_NULL
341 * on failure.
342 */
343static thread_t
344sched_traditional_choose_thread_from_runq(
0a7de745
A
345 processor_t processor,
346 run_queue_t rq,
347 int priority)
3e170ce0 348{
cb323159 349 circle_queue_t queue = rq->queues + rq->highq;
3e170ce0
A
350 int pri = rq->highq;
351 int count = rq->count;
352 thread_t thread;
353
354 while (count > 0 && pri >= priority) {
cb323159 355 cqe_foreach_element_safe(thread, queue, runq_links) {
3e170ce0
A
356 if (thread->bound_processor == PROCESSOR_NULL ||
357 thread->bound_processor == processor) {
cb323159 358 circle_dequeue(queue, &thread->runq_links);
3e170ce0
A
359
360 thread->runq = PROCESSOR_NULL;
361 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
362 rq->count--;
363 if (SCHED(priority_is_urgent)(pri)) {
364 rq->urgency--; assert(rq->urgency >= 0);
365 }
cb323159 366 if (circle_queue_empty(queue)) {
39037602
A
367 bitmap_clear(rq->bitmap, pri);
368 rq->highq = bitmap_first(rq->bitmap, NRQS);
3e170ce0 369 }
0a7de745 370 return thread;
3e170ce0
A
371 }
372 count--;
3e170ce0
A
373 }
374
375 queue--; pri--;
376 }
377
0a7de745 378 return THREAD_NULL;
3e170ce0
A
379}
380
381static sched_mode_t
382sched_traditional_initial_thread_sched_mode(task_t parent_task)
383{
0a7de745 384 if (parent_task == kernel_task) {
3e170ce0 385 return TH_MODE_FIXED;
0a7de745 386 } else {
3e170ce0 387 return TH_MODE_TIMESHARE;
0a7de745 388 }
3e170ce0
A
389}
390
391/*
392 * sched_traditional_processor_enqueue:
393 *
394 * Enqueue thread on a processor run queue. Thread must be locked,
395 * and not already be on a run queue.
396 *
397 * Returns TRUE if a preemption is indicated based on the state
398 * of the run queue.
399 *
400 * The run queue must be locked (see thread_run_queue_remove()
401 * for more info).
402 */
403static boolean_t
404sched_traditional_processor_enqueue(processor_t processor,
cb323159
A
405 thread_t thread,
406 sched_options_t options)
3e170ce0
A
407{
408 run_queue_t rq = runq_for_processor(processor);
409 boolean_t result;
410
411 result = run_queue_enqueue(rq, thread, options);
412 thread->runq = processor;
413 runq_consider_incr_bound_count(processor, thread);
414
0a7de745 415 return result;
3e170ce0
A
416}
417
418static boolean_t
419sched_traditional_processor_queue_empty(processor_t processor)
420{
421 return runq_for_processor(processor)->count == 0;
422}
423
424static boolean_t
425sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor)
426{
427 processor_set_t pset = processor->processor_set;
428 int count = runq_for_processor(processor)->count;
429
430 /*
431 * The pset runq contains the count of all runnable threads
432 * for all processors in the pset. However, for threads that
433 * are bound to another processor, the current "processor"
434 * is not eligible to execute the thread. So we only
435 * include bound threads that our bound to the current
436 * "processor". This allows the processor to idle when the
437 * count of eligible threads drops to 0, even if there's
438 * a runnable thread bound to a different processor in the
439 * shared runq.
440 */
441
442 count -= pset->pset_runq_bound_count;
443 count += processor->runq_bound_count;
444
445 return count == 0;
446}
447
448static ast_t
449sched_traditional_processor_csw_check(processor_t processor)
450{
451 run_queue_t runq;
452 boolean_t has_higher;
453
454 assert(processor->active_thread != NULL);
455
456 runq = runq_for_processor(processor);
457
458 if (processor->first_timeslice) {
459 has_higher = (runq->highq > processor->current_pri);
460 } else {
461 has_higher = (runq->highq >= processor->current_pri);
462 }
463
464 if (has_higher) {
0a7de745
A
465 if (runq->urgency > 0) {
466 return AST_PREEMPT | AST_URGENT;
467 }
3e170ce0
A
468
469 return AST_PREEMPT;
470 }
471
472 return AST_NONE;
473}
474
475static boolean_t
476sched_traditional_processor_queue_has_priority(processor_t processor,
0a7de745
A
477 int priority,
478 boolean_t gte)
3e170ce0 479{
0a7de745 480 if (gte) {
3e170ce0 481 return runq_for_processor(processor)->highq >= priority;
0a7de745 482 } else {
3e170ce0 483 return runq_for_processor(processor)->highq > priority;
0a7de745 484 }
3e170ce0
A
485}
486
487static int
488sched_traditional_processor_runq_count(processor_t processor)
489{
490 return runq_for_processor(processor)->count;
491}
492
493static uint64_t
494sched_traditional_processor_runq_stats_count_sum(processor_t processor)
495{
496 return runq_for_processor(processor)->runq_stats.count_sum;
497}
498
499static uint64_t
500sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor)
501{
0a7de745 502 if (processor->cpu_id == processor->processor_set->cpu_set_low) {
3e170ce0 503 return runq_for_processor(processor)->runq_stats.count_sum;
0a7de745 504 } else {
3e170ce0 505 return 0ULL;
0a7de745 506 }
3e170ce0
A
507}
508
509static int
510sched_traditional_processor_bound_count(processor_t processor)
511{
512 return processor->runq_bound_count;
513}
514
515/*
516 * sched_traditional_processor_queue_shutdown:
517 *
518 * Shutdown a processor run queue by
519 * re-dispatching non-bound threads.
520 *
521 * Associated pset must be locked, and is
522 * returned unlocked.
523 */
524static void
525sched_traditional_processor_queue_shutdown(processor_t processor)
526{
527 processor_set_t pset = processor->processor_set;
528 run_queue_t rq = runq_for_processor(processor);
cb323159 529 circle_queue_t queue = rq->queues + rq->highq;
3e170ce0
A
530 int pri = rq->highq;
531 int count = rq->count;
cb323159
A
532 thread_t thread;
533 circle_queue_head_t tqueue;
3e170ce0 534
cb323159 535 circle_queue_init(&tqueue);
3e170ce0
A
536
537 while (count > 0) {
cb323159 538 cqe_foreach_element_safe(thread, queue, runq_links) {
3e170ce0 539 if (thread->bound_processor == PROCESSOR_NULL) {
cb323159 540 circle_dequeue(queue, &thread->runq_links);
3e170ce0
A
541
542 thread->runq = PROCESSOR_NULL;
543 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
544 runq_consider_decr_bound_count(processor, thread);
545 rq->count--;
546 if (SCHED(priority_is_urgent)(pri)) {
547 rq->urgency--; assert(rq->urgency >= 0);
548 }
cb323159 549 if (circle_queue_empty(queue)) {
39037602
A
550 bitmap_clear(rq->bitmap, pri);
551 rq->highq = bitmap_first(rq->bitmap, NRQS);
3e170ce0
A
552 }
553
cb323159 554 circle_enqueue_tail(&tqueue, &thread->runq_links);
3e170ce0
A
555 }
556 count--;
3e170ce0
A
557 }
558
559 queue--; pri--;
560 }
561
562 pset_unlock(pset);
563
cb323159 564 while ((thread = cqe_dequeue_head(&tqueue, struct thread, runq_links)) != THREAD_NULL) {
3e170ce0
A
565 thread_lock(thread);
566
567 thread_setrun(thread, SCHED_TAILQ);
568
569 thread_unlock(thread);
570 }
571}
572
573#if 0
574static void
575run_queue_check(
0a7de745
A
576 run_queue_t rq,
577 thread_t thread)
3e170ce0
A
578{
579 queue_t q;
580 queue_entry_t qe;
581
0a7de745 582 if (rq != thread->runq) {
3e170ce0 583 panic("run_queue_check: thread runq");
0a7de745 584 }
3e170ce0 585
0a7de745 586 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI) {
3e170ce0 587 panic("run_queue_check: thread sched_pri");
0a7de745 588 }
3e170ce0
A
589
590 q = &rq->queues[thread->sched_pri];
591 qe = queue_first(q);
592 while (!queue_end(q, qe)) {
0a7de745 593 if (qe == (queue_entry_t)thread) {
3e170ce0 594 return;
0a7de745 595 }
3e170ce0
A
596
597 qe = queue_next(qe);
598 }
599
600 panic("run_queue_check: end");
601}
602#endif /* 0 */
603
604/*
605 * Locks the runqueue itself.
606 *
607 * Thread must be locked.
608 */
609static boolean_t
610sched_traditional_processor_queue_remove(processor_t processor,
0a7de745 611 thread_t thread)
3e170ce0
A
612{
613 processor_set_t pset;
614 run_queue_t rq;
615
616 pset = processor->processor_set;
617 pset_lock(pset);
618
619 rq = runq_for_processor(processor);
620
621 if (processor == thread->runq) {
622 /*
623 * Thread is on a run queue and we have a lock on
624 * that run queue.
625 */
626 runq_consider_decr_bound_count(processor, thread);
627 run_queue_remove(rq, thread);
0a7de745 628 } else {
3e170ce0
A
629 /*
630 * The thread left the run queue before we could
631 * lock the run queue.
632 */
633 assert(thread->runq == PROCESSOR_NULL);
634 processor = PROCESSOR_NULL;
635 }
636
637 pset_unlock(pset);
638
0a7de745 639 return processor != PROCESSOR_NULL;
3e170ce0
A
640}
641
642/*
643 * sched_traditional_steal_processor_thread:
644 *
645 * Locate a thread to steal from the processor and
646 * return it.
647 *
648 * Associated pset must be locked. Returns THREAD_NULL
649 * on failure.
650 */
651static thread_t
652sched_traditional_steal_processor_thread(processor_t processor)
653{
654 run_queue_t rq = runq_for_processor(processor);
cb323159 655 circle_queue_t queue = rq->queues + rq->highq;
3e170ce0
A
656 int pri = rq->highq;
657 int count = rq->count;
658 thread_t thread;
659
660 while (count > 0) {
cb323159 661 cqe_foreach_element_safe(thread, queue, runq_links) {
3e170ce0 662 if (thread->bound_processor == PROCESSOR_NULL) {
cb323159 663 circle_dequeue(queue, &thread->runq_links);
3e170ce0
A
664
665 thread->runq = PROCESSOR_NULL;
666 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
667 runq_consider_decr_bound_count(processor, thread);
668 rq->count--;
669 if (SCHED(priority_is_urgent)(pri)) {
670 rq->urgency--; assert(rq->urgency >= 0);
671 }
cb323159 672 if (circle_queue_empty(queue)) {
39037602
A
673 bitmap_clear(rq->bitmap, pri);
674 rq->highq = bitmap_first(rq->bitmap, NRQS);
3e170ce0
A
675 }
676
0a7de745 677 return thread;
3e170ce0
A
678 }
679 count--;
3e170ce0
A
680 }
681
682 queue--; pri--;
683 }
684
0a7de745
A
685 return THREAD_NULL;
686}
687
688static bool
689sched_traditional_steal_thread_enabled(processor_set_t pset)
690{
691 (void)pset;
692 return true;
3e170ce0
A
693}
694
695/*
696 * Locate and steal a thread, beginning
697 * at the pset.
698 *
699 * The pset must be locked, and is returned
700 * unlocked.
701 *
702 * Returns the stolen thread, or THREAD_NULL on
703 * failure.
704 */
705static thread_t
706sched_traditional_steal_thread(processor_set_t pset)
707{
708 processor_set_t nset, cset = pset;
709 processor_t processor;
710 thread_t thread;
711
712 do {
d9a64523 713 uint64_t active_map = (pset->cpu_state_map[PROCESSOR_RUNNING] |
0a7de745 714 pset->cpu_state_map[PROCESSOR_DISPATCHING]);
d9a64523
A
715 for (int cpuid = lsb_first(active_map); cpuid >= 0; cpuid = lsb_next(active_map, cpuid)) {
716 processor = processor_array[cpuid];
3e170ce0
A
717 if (runq_for_processor(processor)->count > 0) {
718 thread = sched_traditional_steal_processor_thread(processor);
719 if (thread != THREAD_NULL) {
3e170ce0
A
720 pset_unlock(cset);
721
0a7de745 722 return thread;
3e170ce0
A
723 }
724 }
3e170ce0
A
725 }
726
727 nset = next_pset(cset);
728
729 if (nset != pset) {
730 pset_unlock(cset);
731
732 cset = nset;
733 pset_lock(cset);
734 }
735 } while (nset != pset);
736
737 pset_unlock(cset);
738
0a7de745 739 return THREAD_NULL;
3e170ce0
A
740}
741
742static void
743sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context)
744{
745 boolean_t restart_needed = FALSE;
746 processor_t processor = processor_list;
747 processor_set_t pset;
748 thread_t thread;
749 spl_t s;
750
751 do {
752 do {
753 /*
754 * TODO: in sched_traditional_use_pset_runqueue case,
755 * avoid scanning the same runq multiple times
756 */
757 pset = processor->processor_set;
758
759 s = splsched();
760 pset_lock(pset);
761
762 restart_needed = runq_scan(runq_for_processor(processor), scan_context);
763
764 pset_unlock(pset);
765 splx(s);
766
0a7de745 767 if (restart_needed) {
3e170ce0 768 break;
0a7de745 769 }
3e170ce0
A
770
771 thread = processor->idle_thread;
772 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
773 if (thread_update_add_thread(thread) == FALSE) {
774 restart_needed = TRUE;
775 break;
776 }
777 }
778 } while ((processor = processor->processor_list) != NULL);
779
780 /* Ok, we now have a collection of candidates -- fix them. */
781 thread_update_process_threads();
782 } while (restart_needed);
783}