2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach/mach_types.h>
59 #include <kern/sched.h>
60 #include <kern/sched_prim.h>
63 sched_traditional_use_pset_runqueue
= FALSE
;
66 sched_traditional_init(void);
69 sched_traditional_steal_thread_enabled(processor_set_t pset
);
72 sched_traditional_steal_thread(processor_set_t pset
);
75 sched_traditional_steal_processor_thread(processor_t processor
);
78 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
);
81 sched_traditional_processor_queue_shutdown(processor_t processor
);
84 sched_traditional_processor_enqueue(processor_t processor
, thread_t thread
,
85 sched_options_t options
);
88 sched_traditional_processor_queue_remove(processor_t processor
, thread_t thread
);
91 sched_traditional_processor_queue_empty(processor_t processor
);
94 sched_traditional_processor_csw_check(processor_t processor
);
97 sched_traditional_processor_queue_has_priority(processor_t processor
, int priority
, boolean_t gte
);
100 sched_traditional_processor_runq_count(processor_t processor
);
103 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
);
106 sched_traditional_processor_runq_stats_count_sum(processor_t processor
);
109 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
);
112 sched_traditional_processor_bound_count(processor_t processor
);
115 sched_traditional_quantum_expire(thread_t thread
);
118 sched_traditional_processor_init(processor_t processor
);
121 sched_traditional_pset_init(processor_set_t pset
);
124 sched_traditional_with_pset_runqueue_init(void);
127 sched_traditional_initial_thread_sched_mode(task_t parent_task
);
130 sched_traditional_choose_thread(processor_t processor
, int priority
, ast_t reason
);
132 /* Choose a thread from a processor's priority-based runq */
133 static thread_t
sched_traditional_choose_thread_from_runq(processor_t processor
, run_queue_t runq
, int priority
);
135 const struct sched_dispatch_table sched_traditional_dispatch
= {
136 .sched_name
= "traditional",
137 .init
= sched_traditional_init
,
138 .timebase_init
= sched_timeshare_timebase_init
,
139 .processor_init
= sched_traditional_processor_init
,
140 .pset_init
= sched_traditional_pset_init
,
141 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
142 .choose_thread
= sched_traditional_choose_thread
,
143 .steal_thread_enabled
= sched_traditional_steal_thread_enabled
,
144 .steal_thread
= sched_traditional_steal_thread
,
145 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
146 .choose_node
= sched_choose_node
,
147 .choose_processor
= choose_processor
,
148 .processor_enqueue
= sched_traditional_processor_enqueue
,
149 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
150 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
151 .processor_queue_empty
= sched_traditional_processor_queue_empty
,
152 .priority_is_urgent
= priority_is_urgent
,
153 .processor_csw_check
= sched_traditional_processor_csw_check
,
154 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
155 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
156 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
157 .can_update_priority
= can_update_priority
,
158 .update_priority
= update_priority
,
159 .lightweight_update_priority
= lightweight_update_priority
,
160 .quantum_expire
= sched_default_quantum_expire
,
161 .processor_runq_count
= sched_traditional_processor_runq_count
,
162 .processor_runq_stats_count_sum
= sched_traditional_processor_runq_stats_count_sum
,
163 .processor_bound_count
= sched_traditional_processor_bound_count
,
164 .thread_update_scan
= sched_traditional_thread_update_scan
,
165 .multiple_psets_enabled
= TRUE
,
166 .sched_groups_enabled
= FALSE
,
167 .avoid_processor_enabled
= FALSE
,
168 .thread_avoid_processor
= NULL
,
169 .processor_balance
= sched_SMT_balance
,
171 .rt_runq
= sched_rtlocal_runq
,
172 .rt_init
= sched_rtlocal_init
,
173 .rt_queue_shutdown
= sched_rtlocal_queue_shutdown
,
174 .rt_runq_scan
= sched_rtlocal_runq_scan
,
175 .rt_runq_count_sum
= sched_rtlocal_runq_count_sum
,
177 .qos_max_parallelism
= sched_qos_max_parallelism
,
178 .check_spill
= sched_check_spill
,
179 .ipi_policy
= sched_ipi_policy
,
180 .thread_should_yield
= sched_thread_should_yield
,
181 .run_count_incr
= sched_run_incr
,
182 .run_count_decr
= sched_run_decr
,
183 .update_thread_bucket
= sched_update_thread_bucket
,
184 .pset_made_schedulable
= sched_pset_made_schedulable
,
187 const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
= {
188 .sched_name
= "traditional_with_pset_runqueue",
189 .init
= sched_traditional_with_pset_runqueue_init
,
190 .timebase_init
= sched_timeshare_timebase_init
,
191 .processor_init
= sched_traditional_processor_init
,
192 .pset_init
= sched_traditional_pset_init
,
193 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
194 .choose_thread
= sched_traditional_choose_thread
,
195 .steal_thread_enabled
= sched_steal_thread_enabled
,
196 .steal_thread
= sched_traditional_steal_thread
,
197 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
198 .choose_node
= sched_choose_node
,
199 .choose_processor
= choose_processor
,
200 .processor_enqueue
= sched_traditional_processor_enqueue
,
201 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
202 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
203 .processor_queue_empty
= sched_traditional_with_pset_runqueue_processor_queue_empty
,
204 .priority_is_urgent
= priority_is_urgent
,
205 .processor_csw_check
= sched_traditional_processor_csw_check
,
206 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
207 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
208 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
209 .can_update_priority
= can_update_priority
,
210 .update_priority
= update_priority
,
211 .lightweight_update_priority
= lightweight_update_priority
,
212 .quantum_expire
= sched_default_quantum_expire
,
213 .processor_runq_count
= sched_traditional_processor_runq_count
,
214 .processor_runq_stats_count_sum
= sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum
,
215 .processor_bound_count
= sched_traditional_processor_bound_count
,
216 .thread_update_scan
= sched_traditional_thread_update_scan
,
217 .multiple_psets_enabled
= TRUE
,
218 .sched_groups_enabled
= FALSE
,
219 .avoid_processor_enabled
= FALSE
,
220 .thread_avoid_processor
= NULL
,
221 .processor_balance
= sched_SMT_balance
,
223 .rt_runq
= sched_rtlocal_runq
,
224 .rt_init
= sched_rtlocal_init
,
225 .rt_queue_shutdown
= sched_rtlocal_queue_shutdown
,
226 .rt_runq_scan
= sched_rtlocal_runq_scan
,
227 .rt_runq_count_sum
= sched_rtlocal_runq_count_sum
,
229 .qos_max_parallelism
= sched_qos_max_parallelism
,
230 .check_spill
= sched_check_spill
,
231 .ipi_policy
= sched_ipi_policy
,
232 .thread_should_yield
= sched_thread_should_yield
,
233 .run_count_incr
= sched_run_incr
,
234 .run_count_decr
= sched_run_decr
,
235 .update_thread_bucket
= sched_update_thread_bucket
,
236 .pset_made_schedulable
= sched_pset_made_schedulable
,
240 sched_traditional_init(void)
242 sched_timeshare_init();
246 sched_traditional_with_pset_runqueue_init(void)
248 sched_timeshare_init();
249 sched_traditional_use_pset_runqueue
= TRUE
;
253 sched_traditional_processor_init(processor_t processor
)
255 if (!sched_traditional_use_pset_runqueue
) {
256 run_queue_init(&processor
->runq
);
258 processor
->runq_bound_count
= 0;
262 sched_traditional_pset_init(processor_set_t pset
)
264 if (sched_traditional_use_pset_runqueue
) {
265 run_queue_init(&pset
->pset_runq
);
267 pset
->pset_runq_bound_count
= 0;
270 __attribute__((always_inline
))
271 static inline run_queue_t
272 runq_for_processor(processor_t processor
)
274 if (sched_traditional_use_pset_runqueue
) {
275 return &processor
->processor_set
->pset_runq
;
277 return &processor
->runq
;
281 __attribute__((always_inline
))
283 runq_consider_incr_bound_count(processor_t processor
,
286 if (thread
->bound_processor
== PROCESSOR_NULL
) {
290 assert(thread
->bound_processor
== processor
);
292 if (sched_traditional_use_pset_runqueue
) {
293 processor
->processor_set
->pset_runq_bound_count
++;
296 processor
->runq_bound_count
++;
299 __attribute__((always_inline
))
301 runq_consider_decr_bound_count(processor_t processor
,
304 if (thread
->bound_processor
== PROCESSOR_NULL
) {
308 assert(thread
->bound_processor
== processor
);
310 if (sched_traditional_use_pset_runqueue
) {
311 processor
->processor_set
->pset_runq_bound_count
--;
314 processor
->runq_bound_count
--;
318 sched_traditional_choose_thread(
319 processor_t processor
,
321 __unused ast_t reason
)
325 thread
= sched_traditional_choose_thread_from_runq(processor
, runq_for_processor(processor
), priority
);
326 if (thread
!= THREAD_NULL
) {
327 runq_consider_decr_bound_count(processor
, thread
);
334 * sched_traditional_choose_thread_from_runq:
336 * Locate a thread to execute from the processor run queue
337 * and return it. Only choose a thread with greater or equal
340 * Associated pset must be locked. Returns THREAD_NULL
344 sched_traditional_choose_thread_from_runq(
345 processor_t processor
,
349 circle_queue_t queue
= rq
->queues
+ rq
->highq
;
351 int count
= rq
->count
;
354 while (count
> 0 && pri
>= priority
) {
355 cqe_foreach_element_safe(thread
, queue
, runq_links
) {
356 if (thread
->bound_processor
== PROCESSOR_NULL
||
357 thread
->bound_processor
== processor
) {
358 circle_dequeue(queue
, &thread
->runq_links
);
360 thread
->runq
= PROCESSOR_NULL
;
361 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
363 if (SCHED(priority_is_urgent
)(pri
)) {
364 rq
->urgency
--; assert(rq
->urgency
>= 0);
366 if (circle_queue_empty(queue
)) {
367 bitmap_clear(rq
->bitmap
, pri
);
368 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
382 sched_traditional_initial_thread_sched_mode(task_t parent_task
)
384 if (parent_task
== kernel_task
) {
385 return TH_MODE_FIXED
;
387 return TH_MODE_TIMESHARE
;
392 * sched_traditional_processor_enqueue:
394 * Enqueue thread on a processor run queue. Thread must be locked,
395 * and not already be on a run queue.
397 * Returns TRUE if a preemption is indicated based on the state
400 * The run queue must be locked (see thread_run_queue_remove()
404 sched_traditional_processor_enqueue(processor_t processor
,
406 sched_options_t options
)
408 run_queue_t rq
= runq_for_processor(processor
);
411 result
= run_queue_enqueue(rq
, thread
, options
);
412 thread
->runq
= processor
;
413 runq_consider_incr_bound_count(processor
, thread
);
419 sched_traditional_processor_queue_empty(processor_t processor
)
421 return runq_for_processor(processor
)->count
== 0;
425 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
)
427 processor_set_t pset
= processor
->processor_set
;
428 int count
= runq_for_processor(processor
)->count
;
431 * The pset runq contains the count of all runnable threads
432 * for all processors in the pset. However, for threads that
433 * are bound to another processor, the current "processor"
434 * is not eligible to execute the thread. So we only
435 * include bound threads that our bound to the current
436 * "processor". This allows the processor to idle when the
437 * count of eligible threads drops to 0, even if there's
438 * a runnable thread bound to a different processor in the
442 count
-= pset
->pset_runq_bound_count
;
443 count
+= processor
->runq_bound_count
;
449 sched_traditional_processor_csw_check(processor_t processor
)
452 boolean_t has_higher
;
454 assert(processor
->active_thread
!= NULL
);
456 runq
= runq_for_processor(processor
);
458 if (processor
->first_timeslice
) {
459 has_higher
= (runq
->highq
> processor
->current_pri
);
461 has_higher
= (runq
->highq
>= processor
->current_pri
);
465 if (runq
->urgency
> 0) {
466 return AST_PREEMPT
| AST_URGENT
;
476 sched_traditional_processor_queue_has_priority(processor_t processor
,
481 return runq_for_processor(processor
)->highq
>= priority
;
483 return runq_for_processor(processor
)->highq
> priority
;
488 sched_traditional_processor_runq_count(processor_t processor
)
490 return runq_for_processor(processor
)->count
;
494 sched_traditional_processor_runq_stats_count_sum(processor_t processor
)
496 return runq_for_processor(processor
)->runq_stats
.count_sum
;
500 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
)
502 if (processor
->cpu_id
== processor
->processor_set
->cpu_set_low
) {
503 return runq_for_processor(processor
)->runq_stats
.count_sum
;
510 sched_traditional_processor_bound_count(processor_t processor
)
512 return processor
->runq_bound_count
;
516 * sched_traditional_processor_queue_shutdown:
518 * Shutdown a processor run queue by
519 * re-dispatching non-bound threads.
521 * Associated pset must be locked, and is
525 sched_traditional_processor_queue_shutdown(processor_t processor
)
527 processor_set_t pset
= processor
->processor_set
;
528 run_queue_t rq
= runq_for_processor(processor
);
529 circle_queue_t queue
= rq
->queues
+ rq
->highq
;
531 int count
= rq
->count
;
533 circle_queue_head_t tqueue
;
535 circle_queue_init(&tqueue
);
538 cqe_foreach_element_safe(thread
, queue
, runq_links
) {
539 if (thread
->bound_processor
== PROCESSOR_NULL
) {
540 circle_dequeue(queue
, &thread
->runq_links
);
542 thread
->runq
= PROCESSOR_NULL
;
543 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
544 runq_consider_decr_bound_count(processor
, thread
);
546 if (SCHED(priority_is_urgent
)(pri
)) {
547 rq
->urgency
--; assert(rq
->urgency
>= 0);
549 if (circle_queue_empty(queue
)) {
550 bitmap_clear(rq
->bitmap
, pri
);
551 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
554 circle_enqueue_tail(&tqueue
, &thread
->runq_links
);
564 while ((thread
= cqe_dequeue_head(&tqueue
, struct thread
, runq_links
)) != THREAD_NULL
) {
567 thread_setrun(thread
, SCHED_TAILQ
);
569 thread_unlock(thread
);
582 if (rq
!= thread
->runq
) {
583 panic("run_queue_check: thread runq");
586 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
) {
587 panic("run_queue_check: thread sched_pri");
590 q
= &rq
->queues
[thread
->sched_pri
];
592 while (!queue_end(q
, qe
)) {
593 if (qe
== (queue_entry_t
)thread
) {
600 panic("run_queue_check: end");
605 * Locks the runqueue itself.
607 * Thread must be locked.
610 sched_traditional_processor_queue_remove(processor_t processor
,
613 processor_set_t pset
;
616 pset
= processor
->processor_set
;
619 rq
= runq_for_processor(processor
);
621 if (processor
== thread
->runq
) {
623 * Thread is on a run queue and we have a lock on
626 runq_consider_decr_bound_count(processor
, thread
);
627 run_queue_remove(rq
, thread
);
630 * The thread left the run queue before we could
631 * lock the run queue.
633 assert(thread
->runq
== PROCESSOR_NULL
);
634 processor
= PROCESSOR_NULL
;
639 return processor
!= PROCESSOR_NULL
;
643 * sched_traditional_steal_processor_thread:
645 * Locate a thread to steal from the processor and
648 * Associated pset must be locked. Returns THREAD_NULL
652 sched_traditional_steal_processor_thread(processor_t processor
)
654 run_queue_t rq
= runq_for_processor(processor
);
655 circle_queue_t queue
= rq
->queues
+ rq
->highq
;
657 int count
= rq
->count
;
661 cqe_foreach_element_safe(thread
, queue
, runq_links
) {
662 if (thread
->bound_processor
== PROCESSOR_NULL
) {
663 circle_dequeue(queue
, &thread
->runq_links
);
665 thread
->runq
= PROCESSOR_NULL
;
666 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
667 runq_consider_decr_bound_count(processor
, thread
);
669 if (SCHED(priority_is_urgent
)(pri
)) {
670 rq
->urgency
--; assert(rq
->urgency
>= 0);
672 if (circle_queue_empty(queue
)) {
673 bitmap_clear(rq
->bitmap
, pri
);
674 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
689 sched_traditional_steal_thread_enabled(processor_set_t pset
)
696 * Locate and steal a thread, beginning
699 * The pset must be locked, and is returned
702 * Returns the stolen thread, or THREAD_NULL on
706 sched_traditional_steal_thread(processor_set_t pset
)
708 processor_set_t nset
, cset
= pset
;
709 processor_t processor
;
713 uint64_t active_map
= (pset
->cpu_state_map
[PROCESSOR_RUNNING
] |
714 pset
->cpu_state_map
[PROCESSOR_DISPATCHING
]);
715 for (int cpuid
= lsb_first(active_map
); cpuid
>= 0; cpuid
= lsb_next(active_map
, cpuid
)) {
716 processor
= processor_array
[cpuid
];
717 if (runq_for_processor(processor
)->count
> 0) {
718 thread
= sched_traditional_steal_processor_thread(processor
);
719 if (thread
!= THREAD_NULL
) {
727 nset
= next_pset(cset
);
735 } while (nset
!= pset
);
743 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
)
745 boolean_t restart_needed
= FALSE
;
746 processor_t processor
= processor_list
;
747 processor_set_t pset
;
754 * TODO: in sched_traditional_use_pset_runqueue case,
755 * avoid scanning the same runq multiple times
757 pset
= processor
->processor_set
;
762 restart_needed
= runq_scan(runq_for_processor(processor
), scan_context
);
767 if (restart_needed
) {
771 thread
= processor
->idle_thread
;
772 if (thread
!= THREAD_NULL
&& thread
->sched_stamp
!= sched_tick
) {
773 if (thread_update_add_thread(thread
) == FALSE
) {
774 restart_needed
= TRUE
;
778 } while ((processor
= processor
->processor_list
) != NULL
);
780 /* Ok, we now have a collection of candidates -- fix them. */
781 thread_update_process_threads();
782 } while (restart_needed
);