2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach/mach_types.h>
59 #include <kern/sched.h>
60 #include <kern/sched_prim.h>
63 sched_traditional_use_pset_runqueue
= FALSE
;
66 sched_traditional_init(void);
69 sched_traditional_steal_thread(processor_set_t pset
);
72 sched_traditional_steal_processor_thread(processor_t processor
);
75 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
);
78 sched_traditional_processor_queue_shutdown(processor_t processor
);
81 sched_traditional_processor_enqueue(processor_t processor
, thread_t thread
, integer_t options
);
84 sched_traditional_processor_queue_remove(processor_t processor
, thread_t thread
);
87 sched_traditional_processor_queue_empty(processor_t processor
);
90 sched_traditional_processor_csw_check(processor_t processor
);
93 sched_traditional_processor_queue_has_priority(processor_t processor
, int priority
, boolean_t gte
);
96 sched_traditional_processor_runq_count(processor_t processor
);
99 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
);
102 sched_traditional_processor_runq_stats_count_sum(processor_t processor
);
105 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
);
108 sched_traditional_processor_bound_count(processor_t processor
);
111 sched_traditional_quantum_expire(thread_t thread
);
114 sched_traditional_processor_init(processor_t processor
);
117 sched_traditional_pset_init(processor_set_t pset
);
120 sched_traditional_with_pset_runqueue_init(void);
123 sched_traditional_initial_thread_sched_mode(task_t parent_task
);
126 sched_traditional_choose_thread(processor_t processor
, int priority
, ast_t reason
);
128 /* Choose a thread from a processor's priority-based runq */
129 static thread_t
sched_traditional_choose_thread_from_runq(processor_t processor
, run_queue_t runq
, int priority
);
131 const struct sched_dispatch_table sched_traditional_dispatch
= {
132 .sched_name
= "traditional",
133 .init
= sched_traditional_init
,
134 .timebase_init
= sched_timeshare_timebase_init
,
135 .processor_init
= sched_traditional_processor_init
,
136 .pset_init
= sched_traditional_pset_init
,
137 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
138 .choose_thread
= sched_traditional_choose_thread
,
139 .steal_thread_enabled
= TRUE
,
140 .steal_thread
= sched_traditional_steal_thread
,
141 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
142 .choose_processor
= choose_processor
,
143 .processor_enqueue
= sched_traditional_processor_enqueue
,
144 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
145 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
146 .processor_queue_empty
= sched_traditional_processor_queue_empty
,
147 .priority_is_urgent
= priority_is_urgent
,
148 .processor_csw_check
= sched_traditional_processor_csw_check
,
149 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
150 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
151 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
152 .can_update_priority
= can_update_priority
,
153 .update_priority
= update_priority
,
154 .lightweight_update_priority
= lightweight_update_priority
,
155 .quantum_expire
= sched_default_quantum_expire
,
156 .processor_runq_count
= sched_traditional_processor_runq_count
,
157 .processor_runq_stats_count_sum
= sched_traditional_processor_runq_stats_count_sum
,
158 .processor_bound_count
= sched_traditional_processor_bound_count
,
159 .thread_update_scan
= sched_traditional_thread_update_scan
,
160 .direct_dispatch_to_idle_processors
= TRUE
,
161 .multiple_psets_enabled
= TRUE
,
162 .sched_groups_enabled
= FALSE
,
163 .avoid_processor_enabled
= FALSE
,
164 .thread_avoid_processor
= NULL
,
165 .processor_balance
= sched_SMT_balance
,
167 .rt_runq
= sched_rtglobal_runq
,
168 .rt_init
= sched_rtglobal_init
,
169 .rt_queue_shutdown
= sched_rtglobal_queue_shutdown
,
170 .rt_runq_scan
= sched_rtglobal_runq_scan
,
171 .rt_runq_count_sum
= sched_rtglobal_runq_count_sum
,
173 .qos_max_parallelism
= sched_qos_max_parallelism
,
174 .check_spill
= sched_check_spill
,
175 .ipi_policy
= sched_ipi_policy
,
176 .thread_should_yield
= sched_thread_should_yield
,
179 const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
= {
180 .sched_name
= "traditional_with_pset_runqueue",
181 .init
= sched_traditional_with_pset_runqueue_init
,
182 .timebase_init
= sched_timeshare_timebase_init
,
183 .processor_init
= sched_traditional_processor_init
,
184 .pset_init
= sched_traditional_pset_init
,
185 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
186 .choose_thread
= sched_traditional_choose_thread
,
187 .steal_thread_enabled
= TRUE
,
188 .steal_thread
= sched_traditional_steal_thread
,
189 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
190 .choose_processor
= choose_processor
,
191 .processor_enqueue
= sched_traditional_processor_enqueue
,
192 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
193 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
194 .processor_queue_empty
= sched_traditional_with_pset_runqueue_processor_queue_empty
,
195 .priority_is_urgent
= priority_is_urgent
,
196 .processor_csw_check
= sched_traditional_processor_csw_check
,
197 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
198 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
199 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
200 .can_update_priority
= can_update_priority
,
201 .update_priority
= update_priority
,
202 .lightweight_update_priority
= lightweight_update_priority
,
203 .quantum_expire
= sched_default_quantum_expire
,
204 .processor_runq_count
= sched_traditional_processor_runq_count
,
205 .processor_runq_stats_count_sum
= sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum
,
206 .processor_bound_count
= sched_traditional_processor_bound_count
,
207 .thread_update_scan
= sched_traditional_thread_update_scan
,
208 .direct_dispatch_to_idle_processors
= FALSE
,
209 .multiple_psets_enabled
= TRUE
,
210 .sched_groups_enabled
= FALSE
,
211 .avoid_processor_enabled
= FALSE
,
212 .thread_avoid_processor
= NULL
,
213 .processor_balance
= sched_SMT_balance
,
215 .rt_runq
= sched_rtglobal_runq
,
216 .rt_init
= sched_rtglobal_init
,
217 .rt_queue_shutdown
= sched_rtglobal_queue_shutdown
,
218 .rt_runq_scan
= sched_rtglobal_runq_scan
,
219 .rt_runq_count_sum
= sched_rtglobal_runq_count_sum
,
221 .qos_max_parallelism
= sched_qos_max_parallelism
,
222 .check_spill
= sched_check_spill
,
223 .ipi_policy
= sched_ipi_policy
,
224 .thread_should_yield
= sched_thread_should_yield
,
228 sched_traditional_init(void)
230 sched_timeshare_init();
234 sched_traditional_with_pset_runqueue_init(void)
236 sched_timeshare_init();
237 sched_traditional_use_pset_runqueue
= TRUE
;
241 sched_traditional_processor_init(processor_t processor
)
243 if (!sched_traditional_use_pset_runqueue
) {
244 run_queue_init(&processor
->runq
);
246 processor
->runq_bound_count
= 0;
250 sched_traditional_pset_init(processor_set_t pset
)
252 if (sched_traditional_use_pset_runqueue
) {
253 run_queue_init(&pset
->pset_runq
);
255 pset
->pset_runq_bound_count
= 0;
258 __attribute__((always_inline
))
259 static inline run_queue_t
runq_for_processor(processor_t processor
)
261 if (sched_traditional_use_pset_runqueue
)
262 return &processor
->processor_set
->pset_runq
;
264 return &processor
->runq
;
267 __attribute__((always_inline
))
268 static inline void runq_consider_incr_bound_count(processor_t processor
,
271 if (thread
->bound_processor
== PROCESSOR_NULL
)
274 assert(thread
->bound_processor
== processor
);
276 if (sched_traditional_use_pset_runqueue
)
277 processor
->processor_set
->pset_runq_bound_count
++;
279 processor
->runq_bound_count
++;
282 __attribute__((always_inline
))
283 static inline void runq_consider_decr_bound_count(processor_t processor
,
286 if (thread
->bound_processor
== PROCESSOR_NULL
)
289 assert(thread
->bound_processor
== processor
);
291 if (sched_traditional_use_pset_runqueue
)
292 processor
->processor_set
->pset_runq_bound_count
--;
294 processor
->runq_bound_count
--;
298 sched_traditional_choose_thread(
299 processor_t processor
,
301 __unused ast_t reason
)
305 thread
= sched_traditional_choose_thread_from_runq(processor
, runq_for_processor(processor
), priority
);
306 if (thread
!= THREAD_NULL
) {
307 runq_consider_decr_bound_count(processor
, thread
);
314 * sched_traditional_choose_thread_from_runq:
316 * Locate a thread to execute from the processor run queue
317 * and return it. Only choose a thread with greater or equal
320 * Associated pset must be locked. Returns THREAD_NULL
324 sched_traditional_choose_thread_from_runq(
325 processor_t processor
,
329 queue_t queue
= rq
->queues
+ rq
->highq
;
331 int count
= rq
->count
;
334 while (count
> 0 && pri
>= priority
) {
335 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
336 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
337 if (thread
->bound_processor
== PROCESSOR_NULL
||
338 thread
->bound_processor
== processor
) {
339 remqueue((queue_entry_t
)thread
);
341 thread
->runq
= PROCESSOR_NULL
;
342 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
344 if (SCHED(priority_is_urgent
)(pri
)) {
345 rq
->urgency
--; assert(rq
->urgency
>= 0);
347 if (queue_empty(queue
)) {
348 bitmap_clear(rq
->bitmap
, pri
);
349 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
356 thread
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
362 return (THREAD_NULL
);
366 sched_traditional_initial_thread_sched_mode(task_t parent_task
)
368 if (parent_task
== kernel_task
)
369 return TH_MODE_FIXED
;
371 return TH_MODE_TIMESHARE
;
375 * sched_traditional_processor_enqueue:
377 * Enqueue thread on a processor run queue. Thread must be locked,
378 * and not already be on a run queue.
380 * Returns TRUE if a preemption is indicated based on the state
383 * The run queue must be locked (see thread_run_queue_remove()
387 sched_traditional_processor_enqueue(processor_t processor
,
391 run_queue_t rq
= runq_for_processor(processor
);
394 result
= run_queue_enqueue(rq
, thread
, options
);
395 thread
->runq
= processor
;
396 runq_consider_incr_bound_count(processor
, thread
);
402 sched_traditional_processor_queue_empty(processor_t processor
)
404 return runq_for_processor(processor
)->count
== 0;
408 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
)
410 processor_set_t pset
= processor
->processor_set
;
411 int count
= runq_for_processor(processor
)->count
;
414 * The pset runq contains the count of all runnable threads
415 * for all processors in the pset. However, for threads that
416 * are bound to another processor, the current "processor"
417 * is not eligible to execute the thread. So we only
418 * include bound threads that our bound to the current
419 * "processor". This allows the processor to idle when the
420 * count of eligible threads drops to 0, even if there's
421 * a runnable thread bound to a different processor in the
425 count
-= pset
->pset_runq_bound_count
;
426 count
+= processor
->runq_bound_count
;
432 sched_traditional_processor_csw_check(processor_t processor
)
435 boolean_t has_higher
;
437 assert(processor
->active_thread
!= NULL
);
439 runq
= runq_for_processor(processor
);
441 if (processor
->first_timeslice
) {
442 has_higher
= (runq
->highq
> processor
->current_pri
);
444 has_higher
= (runq
->highq
>= processor
->current_pri
);
448 if (runq
->urgency
> 0)
449 return (AST_PREEMPT
| AST_URGENT
);
458 sched_traditional_processor_queue_has_priority(processor_t processor
,
463 return runq_for_processor(processor
)->highq
>= priority
;
465 return runq_for_processor(processor
)->highq
> priority
;
469 sched_traditional_processor_runq_count(processor_t processor
)
471 return runq_for_processor(processor
)->count
;
475 sched_traditional_processor_runq_stats_count_sum(processor_t processor
)
477 return runq_for_processor(processor
)->runq_stats
.count_sum
;
481 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
)
483 if (processor
->cpu_id
== processor
->processor_set
->cpu_set_low
)
484 return runq_for_processor(processor
)->runq_stats
.count_sum
;
490 sched_traditional_processor_bound_count(processor_t processor
)
492 return processor
->runq_bound_count
;
496 * sched_traditional_processor_queue_shutdown:
498 * Shutdown a processor run queue by
499 * re-dispatching non-bound threads.
501 * Associated pset must be locked, and is
505 sched_traditional_processor_queue_shutdown(processor_t processor
)
507 processor_set_t pset
= processor
->processor_set
;
508 run_queue_t rq
= runq_for_processor(processor
);
509 queue_t queue
= rq
->queues
+ rq
->highq
;
511 int count
= rq
->count
;
512 thread_t next
, thread
;
518 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
519 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
520 next
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
522 if (thread
->bound_processor
== PROCESSOR_NULL
) {
523 remqueue((queue_entry_t
)thread
);
525 thread
->runq
= PROCESSOR_NULL
;
526 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
527 runq_consider_decr_bound_count(processor
, thread
);
529 if (SCHED(priority_is_urgent
)(pri
)) {
530 rq
->urgency
--; assert(rq
->urgency
>= 0);
532 if (queue_empty(queue
)) {
533 bitmap_clear(rq
->bitmap
, pri
);
534 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
537 enqueue_tail(&tqueue
, (queue_entry_t
)thread
);
549 while ((thread
= (thread_t
)(uintptr_t)dequeue_head(&tqueue
)) != THREAD_NULL
) {
552 thread_setrun(thread
, SCHED_TAILQ
);
554 thread_unlock(thread
);
567 if (rq
!= thread
->runq
)
568 panic("run_queue_check: thread runq");
570 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
)
571 panic("run_queue_check: thread sched_pri");
573 q
= &rq
->queues
[thread
->sched_pri
];
575 while (!queue_end(q
, qe
)) {
576 if (qe
== (queue_entry_t
)thread
)
582 panic("run_queue_check: end");
587 * Locks the runqueue itself.
589 * Thread must be locked.
592 sched_traditional_processor_queue_remove(processor_t processor
,
595 processor_set_t pset
;
598 pset
= processor
->processor_set
;
601 rq
= runq_for_processor(processor
);
603 if (processor
== thread
->runq
) {
605 * Thread is on a run queue and we have a lock on
608 runq_consider_decr_bound_count(processor
, thread
);
609 run_queue_remove(rq
, thread
);
613 * The thread left the run queue before we could
614 * lock the run queue.
616 assert(thread
->runq
== PROCESSOR_NULL
);
617 processor
= PROCESSOR_NULL
;
622 return (processor
!= PROCESSOR_NULL
);
626 * sched_traditional_steal_processor_thread:
628 * Locate a thread to steal from the processor and
631 * Associated pset must be locked. Returns THREAD_NULL
635 sched_traditional_steal_processor_thread(processor_t processor
)
637 run_queue_t rq
= runq_for_processor(processor
);
638 queue_t queue
= rq
->queues
+ rq
->highq
;
640 int count
= rq
->count
;
644 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
645 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
646 if (thread
->bound_processor
== PROCESSOR_NULL
) {
647 remqueue((queue_entry_t
)thread
);
649 thread
->runq
= PROCESSOR_NULL
;
650 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
651 runq_consider_decr_bound_count(processor
, thread
);
653 if (SCHED(priority_is_urgent
)(pri
)) {
654 rq
->urgency
--; assert(rq
->urgency
>= 0);
656 if (queue_empty(queue
)) {
657 bitmap_clear(rq
->bitmap
, pri
);
658 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
665 thread
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
671 return (THREAD_NULL
);
675 * Locate and steal a thread, beginning
678 * The pset must be locked, and is returned
681 * Returns the stolen thread, or THREAD_NULL on
685 sched_traditional_steal_thread(processor_set_t pset
)
687 processor_set_t nset
, cset
= pset
;
688 processor_t processor
;
692 uint64_t active_map
= (pset
->cpu_state_map
[PROCESSOR_RUNNING
] |
693 pset
->cpu_state_map
[PROCESSOR_DISPATCHING
]);
694 for (int cpuid
= lsb_first(active_map
); cpuid
>= 0; cpuid
= lsb_next(active_map
, cpuid
)) {
695 processor
= processor_array
[cpuid
];
696 if (runq_for_processor(processor
)->count
> 0) {
697 thread
= sched_traditional_steal_processor_thread(processor
);
698 if (thread
!= THREAD_NULL
) {
706 nset
= next_pset(cset
);
714 } while (nset
!= pset
);
718 return (THREAD_NULL
);
722 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
)
724 boolean_t restart_needed
= FALSE
;
725 processor_t processor
= processor_list
;
726 processor_set_t pset
;
733 * TODO: in sched_traditional_use_pset_runqueue case,
734 * avoid scanning the same runq multiple times
736 pset
= processor
->processor_set
;
741 restart_needed
= runq_scan(runq_for_processor(processor
), scan_context
);
749 thread
= processor
->idle_thread
;
750 if (thread
!= THREAD_NULL
&& thread
->sched_stamp
!= sched_tick
) {
751 if (thread_update_add_thread(thread
) == FALSE
) {
752 restart_needed
= TRUE
;
756 } while ((processor
= processor
->processor_list
) != NULL
);
758 /* Ok, we now have a collection of candidates -- fix them. */
759 thread_update_process_threads();
760 } while (restart_needed
);