2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach/mach_types.h>
59 #include <kern/sched.h>
60 #include <kern/sched_prim.h>
63 sched_traditional_use_pset_runqueue
= FALSE
;
66 sched_traditional_init(void);
69 sched_traditional_steal_thread_enabled(processor_set_t pset
);
72 sched_traditional_steal_thread(processor_set_t pset
);
75 sched_traditional_steal_processor_thread(processor_t processor
);
78 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
);
81 sched_traditional_processor_queue_shutdown(processor_t processor
);
84 sched_traditional_processor_enqueue(processor_t processor
, thread_t thread
,
85 sched_options_t options
);
88 sched_traditional_processor_queue_remove(processor_t processor
, thread_t thread
);
91 sched_traditional_processor_queue_empty(processor_t processor
);
94 sched_traditional_processor_csw_check(processor_t processor
);
97 sched_traditional_processor_queue_has_priority(processor_t processor
, int priority
, boolean_t gte
);
100 sched_traditional_processor_runq_count(processor_t processor
);
103 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
);
106 sched_traditional_processor_runq_stats_count_sum(processor_t processor
);
109 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
);
112 sched_traditional_processor_bound_count(processor_t processor
);
115 sched_traditional_quantum_expire(thread_t thread
);
118 sched_traditional_processor_init(processor_t processor
);
121 sched_traditional_pset_init(processor_set_t pset
);
124 sched_traditional_with_pset_runqueue_init(void);
127 sched_traditional_initial_thread_sched_mode(task_t parent_task
);
130 sched_traditional_choose_thread(processor_t processor
, int priority
, ast_t reason
);
132 /* Choose a thread from a processor's priority-based runq */
133 static thread_t
sched_traditional_choose_thread_from_runq(processor_t processor
, run_queue_t runq
, int priority
);
135 const struct sched_dispatch_table sched_traditional_dispatch
= {
136 .sched_name
= "traditional",
137 .init
= sched_traditional_init
,
138 .timebase_init
= sched_timeshare_timebase_init
,
139 .processor_init
= sched_traditional_processor_init
,
140 .pset_init
= sched_traditional_pset_init
,
141 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
142 .choose_thread
= sched_traditional_choose_thread
,
143 .steal_thread_enabled
= sched_traditional_steal_thread_enabled
,
144 .steal_thread
= sched_traditional_steal_thread
,
145 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
146 .choose_processor
= choose_processor
,
147 .processor_enqueue
= sched_traditional_processor_enqueue
,
148 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
149 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
150 .processor_queue_empty
= sched_traditional_processor_queue_empty
,
151 .priority_is_urgent
= priority_is_urgent
,
152 .processor_csw_check
= sched_traditional_processor_csw_check
,
153 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
154 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
155 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
156 .can_update_priority
= can_update_priority
,
157 .update_priority
= update_priority
,
158 .lightweight_update_priority
= lightweight_update_priority
,
159 .quantum_expire
= sched_default_quantum_expire
,
160 .processor_runq_count
= sched_traditional_processor_runq_count
,
161 .processor_runq_stats_count_sum
= sched_traditional_processor_runq_stats_count_sum
,
162 .processor_bound_count
= sched_traditional_processor_bound_count
,
163 .thread_update_scan
= sched_traditional_thread_update_scan
,
164 .multiple_psets_enabled
= TRUE
,
165 .sched_groups_enabled
= FALSE
,
166 .avoid_processor_enabled
= FALSE
,
167 .thread_avoid_processor
= NULL
,
168 .processor_balance
= sched_SMT_balance
,
170 .rt_runq
= sched_rtglobal_runq
,
171 .rt_init
= sched_rtglobal_init
,
172 .rt_queue_shutdown
= sched_rtglobal_queue_shutdown
,
173 .rt_runq_scan
= sched_rtglobal_runq_scan
,
174 .rt_runq_count_sum
= sched_rtglobal_runq_count_sum
,
176 .qos_max_parallelism
= sched_qos_max_parallelism
,
177 .check_spill
= sched_check_spill
,
178 .ipi_policy
= sched_ipi_policy
,
179 .thread_should_yield
= sched_thread_should_yield
,
180 .run_count_incr
= sched_run_incr
,
181 .run_count_decr
= sched_run_decr
,
182 .update_thread_bucket
= sched_update_thread_bucket
,
183 .pset_made_schedulable
= sched_pset_made_schedulable
,
186 const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
= {
187 .sched_name
= "traditional_with_pset_runqueue",
188 .init
= sched_traditional_with_pset_runqueue_init
,
189 .timebase_init
= sched_timeshare_timebase_init
,
190 .processor_init
= sched_traditional_processor_init
,
191 .pset_init
= sched_traditional_pset_init
,
192 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
193 .choose_thread
= sched_traditional_choose_thread
,
194 .steal_thread_enabled
= sched_steal_thread_enabled
,
195 .steal_thread
= sched_traditional_steal_thread
,
196 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
197 .choose_processor
= choose_processor
,
198 .processor_enqueue
= sched_traditional_processor_enqueue
,
199 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
200 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
201 .processor_queue_empty
= sched_traditional_with_pset_runqueue_processor_queue_empty
,
202 .priority_is_urgent
= priority_is_urgent
,
203 .processor_csw_check
= sched_traditional_processor_csw_check
,
204 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
205 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
206 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
207 .can_update_priority
= can_update_priority
,
208 .update_priority
= update_priority
,
209 .lightweight_update_priority
= lightweight_update_priority
,
210 .quantum_expire
= sched_default_quantum_expire
,
211 .processor_runq_count
= sched_traditional_processor_runq_count
,
212 .processor_runq_stats_count_sum
= sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum
,
213 .processor_bound_count
= sched_traditional_processor_bound_count
,
214 .thread_update_scan
= sched_traditional_thread_update_scan
,
215 .multiple_psets_enabled
= TRUE
,
216 .sched_groups_enabled
= FALSE
,
217 .avoid_processor_enabled
= FALSE
,
218 .thread_avoid_processor
= NULL
,
219 .processor_balance
= sched_SMT_balance
,
221 .rt_runq
= sched_rtglobal_runq
,
222 .rt_init
= sched_rtglobal_init
,
223 .rt_queue_shutdown
= sched_rtglobal_queue_shutdown
,
224 .rt_runq_scan
= sched_rtglobal_runq_scan
,
225 .rt_runq_count_sum
= sched_rtglobal_runq_count_sum
,
227 .qos_max_parallelism
= sched_qos_max_parallelism
,
228 .check_spill
= sched_check_spill
,
229 .ipi_policy
= sched_ipi_policy
,
230 .thread_should_yield
= sched_thread_should_yield
,
231 .run_count_incr
= sched_run_incr
,
232 .run_count_decr
= sched_run_decr
,
233 .update_thread_bucket
= sched_update_thread_bucket
,
234 .pset_made_schedulable
= sched_pset_made_schedulable
,
238 sched_traditional_init(void)
240 sched_timeshare_init();
244 sched_traditional_with_pset_runqueue_init(void)
246 sched_timeshare_init();
247 sched_traditional_use_pset_runqueue
= TRUE
;
251 sched_traditional_processor_init(processor_t processor
)
253 if (!sched_traditional_use_pset_runqueue
) {
254 run_queue_init(&processor
->runq
);
256 processor
->runq_bound_count
= 0;
260 sched_traditional_pset_init(processor_set_t pset
)
262 if (sched_traditional_use_pset_runqueue
) {
263 run_queue_init(&pset
->pset_runq
);
265 pset
->pset_runq_bound_count
= 0;
268 __attribute__((always_inline
))
269 static inline run_queue_t
270 runq_for_processor(processor_t processor
)
272 if (sched_traditional_use_pset_runqueue
) {
273 return &processor
->processor_set
->pset_runq
;
275 return &processor
->runq
;
279 __attribute__((always_inline
))
281 runq_consider_incr_bound_count(processor_t processor
,
284 if (thread
->bound_processor
== PROCESSOR_NULL
) {
288 assert(thread
->bound_processor
== processor
);
290 if (sched_traditional_use_pset_runqueue
) {
291 processor
->processor_set
->pset_runq_bound_count
++;
294 processor
->runq_bound_count
++;
297 __attribute__((always_inline
))
299 runq_consider_decr_bound_count(processor_t processor
,
302 if (thread
->bound_processor
== PROCESSOR_NULL
) {
306 assert(thread
->bound_processor
== processor
);
308 if (sched_traditional_use_pset_runqueue
) {
309 processor
->processor_set
->pset_runq_bound_count
--;
312 processor
->runq_bound_count
--;
316 sched_traditional_choose_thread(
317 processor_t processor
,
319 __unused ast_t reason
)
323 thread
= sched_traditional_choose_thread_from_runq(processor
, runq_for_processor(processor
), priority
);
324 if (thread
!= THREAD_NULL
) {
325 runq_consider_decr_bound_count(processor
, thread
);
332 * sched_traditional_choose_thread_from_runq:
334 * Locate a thread to execute from the processor run queue
335 * and return it. Only choose a thread with greater or equal
338 * Associated pset must be locked. Returns THREAD_NULL
342 sched_traditional_choose_thread_from_runq(
343 processor_t processor
,
347 circle_queue_t queue
= rq
->queues
+ rq
->highq
;
349 int count
= rq
->count
;
352 while (count
> 0 && pri
>= priority
) {
353 cqe_foreach_element_safe(thread
, queue
, runq_links
) {
354 if (thread
->bound_processor
== PROCESSOR_NULL
||
355 thread
->bound_processor
== processor
) {
356 circle_dequeue(queue
, &thread
->runq_links
);
358 thread
->runq
= PROCESSOR_NULL
;
359 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
361 if (SCHED(priority_is_urgent
)(pri
)) {
362 rq
->urgency
--; assert(rq
->urgency
>= 0);
364 if (circle_queue_empty(queue
)) {
365 bitmap_clear(rq
->bitmap
, pri
);
366 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
380 sched_traditional_initial_thread_sched_mode(task_t parent_task
)
382 if (parent_task
== kernel_task
) {
383 return TH_MODE_FIXED
;
385 return TH_MODE_TIMESHARE
;
390 * sched_traditional_processor_enqueue:
392 * Enqueue thread on a processor run queue. Thread must be locked,
393 * and not already be on a run queue.
395 * Returns TRUE if a preemption is indicated based on the state
398 * The run queue must be locked (see thread_run_queue_remove()
402 sched_traditional_processor_enqueue(processor_t processor
,
404 sched_options_t options
)
406 run_queue_t rq
= runq_for_processor(processor
);
409 result
= run_queue_enqueue(rq
, thread
, options
);
410 thread
->runq
= processor
;
411 runq_consider_incr_bound_count(processor
, thread
);
417 sched_traditional_processor_queue_empty(processor_t processor
)
419 return runq_for_processor(processor
)->count
== 0;
423 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
)
425 processor_set_t pset
= processor
->processor_set
;
426 int count
= runq_for_processor(processor
)->count
;
429 * The pset runq contains the count of all runnable threads
430 * for all processors in the pset. However, for threads that
431 * are bound to another processor, the current "processor"
432 * is not eligible to execute the thread. So we only
433 * include bound threads that our bound to the current
434 * "processor". This allows the processor to idle when the
435 * count of eligible threads drops to 0, even if there's
436 * a runnable thread bound to a different processor in the
440 count
-= pset
->pset_runq_bound_count
;
441 count
+= processor
->runq_bound_count
;
447 sched_traditional_processor_csw_check(processor_t processor
)
450 boolean_t has_higher
;
452 assert(processor
->active_thread
!= NULL
);
454 runq
= runq_for_processor(processor
);
456 if (processor
->first_timeslice
) {
457 has_higher
= (runq
->highq
> processor
->current_pri
);
459 has_higher
= (runq
->highq
>= processor
->current_pri
);
463 if (runq
->urgency
> 0) {
464 return AST_PREEMPT
| AST_URGENT
;
474 sched_traditional_processor_queue_has_priority(processor_t processor
,
479 return runq_for_processor(processor
)->highq
>= priority
;
481 return runq_for_processor(processor
)->highq
> priority
;
486 sched_traditional_processor_runq_count(processor_t processor
)
488 return runq_for_processor(processor
)->count
;
492 sched_traditional_processor_runq_stats_count_sum(processor_t processor
)
494 return runq_for_processor(processor
)->runq_stats
.count_sum
;
498 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
)
500 if (processor
->cpu_id
== processor
->processor_set
->cpu_set_low
) {
501 return runq_for_processor(processor
)->runq_stats
.count_sum
;
508 sched_traditional_processor_bound_count(processor_t processor
)
510 return processor
->runq_bound_count
;
514 * sched_traditional_processor_queue_shutdown:
516 * Shutdown a processor run queue by
517 * re-dispatching non-bound threads.
519 * Associated pset must be locked, and is
523 sched_traditional_processor_queue_shutdown(processor_t processor
)
525 processor_set_t pset
= processor
->processor_set
;
526 run_queue_t rq
= runq_for_processor(processor
);
527 circle_queue_t queue
= rq
->queues
+ rq
->highq
;
529 int count
= rq
->count
;
531 circle_queue_head_t tqueue
;
533 circle_queue_init(&tqueue
);
536 cqe_foreach_element_safe(thread
, queue
, runq_links
) {
537 if (thread
->bound_processor
== PROCESSOR_NULL
) {
538 circle_dequeue(queue
, &thread
->runq_links
);
540 thread
->runq
= PROCESSOR_NULL
;
541 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
542 runq_consider_decr_bound_count(processor
, thread
);
544 if (SCHED(priority_is_urgent
)(pri
)) {
545 rq
->urgency
--; assert(rq
->urgency
>= 0);
547 if (circle_queue_empty(queue
)) {
548 bitmap_clear(rq
->bitmap
, pri
);
549 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
552 circle_enqueue_tail(&tqueue
, &thread
->runq_links
);
562 while ((thread
= cqe_dequeue_head(&tqueue
, struct thread
, runq_links
)) != THREAD_NULL
) {
565 thread_setrun(thread
, SCHED_TAILQ
);
567 thread_unlock(thread
);
580 if (rq
!= thread
->runq
) {
581 panic("run_queue_check: thread runq");
584 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
) {
585 panic("run_queue_check: thread sched_pri");
588 q
= &rq
->queues
[thread
->sched_pri
];
590 while (!queue_end(q
, qe
)) {
591 if (qe
== (queue_entry_t
)thread
) {
598 panic("run_queue_check: end");
603 * Locks the runqueue itself.
605 * Thread must be locked.
608 sched_traditional_processor_queue_remove(processor_t processor
,
611 processor_set_t pset
;
614 pset
= processor
->processor_set
;
617 rq
= runq_for_processor(processor
);
619 if (processor
== thread
->runq
) {
621 * Thread is on a run queue and we have a lock on
624 runq_consider_decr_bound_count(processor
, thread
);
625 run_queue_remove(rq
, thread
);
628 * The thread left the run queue before we could
629 * lock the run queue.
631 assert(thread
->runq
== PROCESSOR_NULL
);
632 processor
= PROCESSOR_NULL
;
637 return processor
!= PROCESSOR_NULL
;
641 * sched_traditional_steal_processor_thread:
643 * Locate a thread to steal from the processor and
646 * Associated pset must be locked. Returns THREAD_NULL
650 sched_traditional_steal_processor_thread(processor_t processor
)
652 run_queue_t rq
= runq_for_processor(processor
);
653 circle_queue_t queue
= rq
->queues
+ rq
->highq
;
655 int count
= rq
->count
;
659 cqe_foreach_element_safe(thread
, queue
, runq_links
) {
660 if (thread
->bound_processor
== PROCESSOR_NULL
) {
661 circle_dequeue(queue
, &thread
->runq_links
);
663 thread
->runq
= PROCESSOR_NULL
;
664 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
665 runq_consider_decr_bound_count(processor
, thread
);
667 if (SCHED(priority_is_urgent
)(pri
)) {
668 rq
->urgency
--; assert(rq
->urgency
>= 0);
670 if (circle_queue_empty(queue
)) {
671 bitmap_clear(rq
->bitmap
, pri
);
672 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
687 sched_traditional_steal_thread_enabled(processor_set_t pset
)
694 * Locate and steal a thread, beginning
697 * The pset must be locked, and is returned
700 * Returns the stolen thread, or THREAD_NULL on
704 sched_traditional_steal_thread(processor_set_t pset
)
706 processor_set_t nset
, cset
= pset
;
707 processor_t processor
;
711 uint64_t active_map
= (pset
->cpu_state_map
[PROCESSOR_RUNNING
] |
712 pset
->cpu_state_map
[PROCESSOR_DISPATCHING
]);
713 for (int cpuid
= lsb_first(active_map
); cpuid
>= 0; cpuid
= lsb_next(active_map
, cpuid
)) {
714 processor
= processor_array
[cpuid
];
715 if (runq_for_processor(processor
)->count
> 0) {
716 thread
= sched_traditional_steal_processor_thread(processor
);
717 if (thread
!= THREAD_NULL
) {
725 nset
= next_pset(cset
);
733 } while (nset
!= pset
);
741 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
)
743 boolean_t restart_needed
= FALSE
;
744 processor_t processor
= processor_list
;
745 processor_set_t pset
;
752 * TODO: in sched_traditional_use_pset_runqueue case,
753 * avoid scanning the same runq multiple times
755 pset
= processor
->processor_set
;
760 restart_needed
= runq_scan(runq_for_processor(processor
), scan_context
);
765 if (restart_needed
) {
769 thread
= processor
->idle_thread
;
770 if (thread
!= THREAD_NULL
&& thread
->sched_stamp
!= sched_tick
) {
771 if (thread_update_add_thread(thread
) == FALSE
) {
772 restart_needed
= TRUE
;
776 } while ((processor
= processor
->processor_list
) != NULL
);
778 /* Ok, we now have a collection of candidates -- fix them. */
779 thread_update_process_threads();
780 } while (restart_needed
);