2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach/mach_types.h>
59 #include <kern/sched.h>
60 #include <kern/sched_prim.h>
63 sched_traditional_use_pset_runqueue
= FALSE
;
66 sched_traditional_init(void);
69 sched_traditional_steal_thread(processor_set_t pset
);
72 sched_traditional_steal_processor_thread(processor_t processor
);
75 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
);
78 sched_traditional_processor_queue_shutdown(processor_t processor
);
81 sched_traditional_processor_enqueue(processor_t processor
, thread_t thread
, integer_t options
);
84 sched_traditional_processor_queue_remove(processor_t processor
, thread_t thread
);
87 sched_traditional_processor_queue_empty(processor_t processor
);
90 sched_traditional_processor_csw_check(processor_t processor
);
93 sched_traditional_processor_queue_has_priority(processor_t processor
, int priority
, boolean_t gte
);
96 sched_traditional_processor_runq_count(processor_t processor
);
99 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
);
102 sched_traditional_processor_runq_stats_count_sum(processor_t processor
);
105 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
);
108 sched_traditional_processor_bound_count(processor_t processor
);
111 sched_traditional_quantum_expire(thread_t thread
);
114 sched_traditional_processor_init(processor_t processor
);
117 sched_traditional_pset_init(processor_set_t pset
);
120 sched_traditional_with_pset_runqueue_init(void);
123 sched_traditional_initial_thread_sched_mode(task_t parent_task
);
126 sched_traditional_choose_thread(processor_t processor
, int priority
, ast_t reason
);
128 /* Choose a thread from a processor's priority-based runq */
129 static thread_t
sched_traditional_choose_thread_from_runq(processor_t processor
, run_queue_t runq
, int priority
);
131 const struct sched_dispatch_table sched_traditional_dispatch
= {
132 .sched_name
= "traditional",
133 .init
= sched_traditional_init
,
134 .timebase_init
= sched_timeshare_timebase_init
,
135 .processor_init
= sched_traditional_processor_init
,
136 .pset_init
= sched_traditional_pset_init
,
137 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
138 .choose_thread
= sched_traditional_choose_thread
,
139 .steal_thread_enabled
= TRUE
,
140 .steal_thread
= sched_traditional_steal_thread
,
141 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
142 .choose_processor
= choose_processor
,
143 .processor_enqueue
= sched_traditional_processor_enqueue
,
144 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
145 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
146 .processor_queue_empty
= sched_traditional_processor_queue_empty
,
147 .priority_is_urgent
= priority_is_urgent
,
148 .processor_csw_check
= sched_traditional_processor_csw_check
,
149 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
150 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
151 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
152 .can_update_priority
= can_update_priority
,
153 .update_priority
= update_priority
,
154 .lightweight_update_priority
= lightweight_update_priority
,
155 .quantum_expire
= sched_default_quantum_expire
,
156 .processor_runq_count
= sched_traditional_processor_runq_count
,
157 .processor_runq_stats_count_sum
= sched_traditional_processor_runq_stats_count_sum
,
158 .processor_bound_count
= sched_traditional_processor_bound_count
,
159 .thread_update_scan
= sched_traditional_thread_update_scan
,
160 .direct_dispatch_to_idle_processors
= TRUE
,
161 .multiple_psets_enabled
= TRUE
,
162 .sched_groups_enabled
= FALSE
,
165 const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
= {
166 .sched_name
= "traditional_with_pset_runqueue",
167 .init
= sched_traditional_with_pset_runqueue_init
,
168 .timebase_init
= sched_timeshare_timebase_init
,
169 .processor_init
= sched_traditional_processor_init
,
170 .pset_init
= sched_traditional_pset_init
,
171 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
172 .choose_thread
= sched_traditional_choose_thread
,
173 .steal_thread_enabled
= TRUE
,
174 .steal_thread
= sched_traditional_steal_thread
,
175 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
176 .choose_processor
= choose_processor
,
177 .processor_enqueue
= sched_traditional_processor_enqueue
,
178 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
179 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
180 .processor_queue_empty
= sched_traditional_with_pset_runqueue_processor_queue_empty
,
181 .priority_is_urgent
= priority_is_urgent
,
182 .processor_csw_check
= sched_traditional_processor_csw_check
,
183 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
184 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
185 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
186 .can_update_priority
= can_update_priority
,
187 .update_priority
= update_priority
,
188 .lightweight_update_priority
= lightweight_update_priority
,
189 .quantum_expire
= sched_default_quantum_expire
,
190 .processor_runq_count
= sched_traditional_processor_runq_count
,
191 .processor_runq_stats_count_sum
= sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum
,
192 .processor_bound_count
= sched_traditional_processor_bound_count
,
193 .thread_update_scan
= sched_traditional_thread_update_scan
,
194 .direct_dispatch_to_idle_processors
= FALSE
,
195 .multiple_psets_enabled
= TRUE
,
196 .sched_groups_enabled
= FALSE
,
200 sched_traditional_init(void)
202 sched_timeshare_init();
206 sched_traditional_with_pset_runqueue_init(void)
208 sched_timeshare_init();
209 sched_traditional_use_pset_runqueue
= TRUE
;
213 sched_traditional_processor_init(processor_t processor
)
215 if (!sched_traditional_use_pset_runqueue
) {
216 run_queue_init(&processor
->runq
);
218 processor
->runq_bound_count
= 0;
222 sched_traditional_pset_init(processor_set_t pset
)
224 if (sched_traditional_use_pset_runqueue
) {
225 run_queue_init(&pset
->pset_runq
);
227 pset
->pset_runq_bound_count
= 0;
230 __attribute__((always_inline
))
231 static inline run_queue_t
runq_for_processor(processor_t processor
)
233 if (sched_traditional_use_pset_runqueue
)
234 return &processor
->processor_set
->pset_runq
;
236 return &processor
->runq
;
239 __attribute__((always_inline
))
240 static inline void runq_consider_incr_bound_count(processor_t processor
,
243 if (thread
->bound_processor
== PROCESSOR_NULL
)
246 assert(thread
->bound_processor
== processor
);
248 if (sched_traditional_use_pset_runqueue
)
249 processor
->processor_set
->pset_runq_bound_count
++;
251 processor
->runq_bound_count
++;
254 __attribute__((always_inline
))
255 static inline void runq_consider_decr_bound_count(processor_t processor
,
258 if (thread
->bound_processor
== PROCESSOR_NULL
)
261 assert(thread
->bound_processor
== processor
);
263 if (sched_traditional_use_pset_runqueue
)
264 processor
->processor_set
->pset_runq_bound_count
--;
266 processor
->runq_bound_count
--;
270 sched_traditional_choose_thread(
271 processor_t processor
,
273 __unused ast_t reason
)
277 thread
= sched_traditional_choose_thread_from_runq(processor
, runq_for_processor(processor
), priority
);
278 if (thread
!= THREAD_NULL
) {
279 runq_consider_decr_bound_count(processor
, thread
);
286 * sched_traditional_choose_thread_from_runq:
288 * Locate a thread to execute from the processor run queue
289 * and return it. Only choose a thread with greater or equal
292 * Associated pset must be locked. Returns THREAD_NULL
296 sched_traditional_choose_thread_from_runq(
297 processor_t processor
,
301 queue_t queue
= rq
->queues
+ rq
->highq
;
303 int count
= rq
->count
;
306 while (count
> 0 && pri
>= priority
) {
307 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
308 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
309 if (thread
->bound_processor
== PROCESSOR_NULL
||
310 thread
->bound_processor
== processor
) {
311 remqueue((queue_entry_t
)thread
);
313 thread
->runq
= PROCESSOR_NULL
;
314 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
316 if (SCHED(priority_is_urgent
)(pri
)) {
317 rq
->urgency
--; assert(rq
->urgency
>= 0);
319 if (queue_empty(queue
)) {
321 clrbit(MAXPRI
- pri
, rq
->bitmap
);
322 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
329 thread
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
335 return (THREAD_NULL
);
339 sched_traditional_initial_thread_sched_mode(task_t parent_task
)
341 if (parent_task
== kernel_task
)
342 return TH_MODE_FIXED
;
344 return TH_MODE_TIMESHARE
;
348 * sched_traditional_processor_enqueue:
350 * Enqueue thread on a processor run queue. Thread must be locked,
351 * and not already be on a run queue.
353 * Returns TRUE if a preemption is indicated based on the state
356 * The run queue must be locked (see thread_run_queue_remove()
360 sched_traditional_processor_enqueue(processor_t processor
,
364 run_queue_t rq
= runq_for_processor(processor
);
367 result
= run_queue_enqueue(rq
, thread
, options
);
368 thread
->runq
= processor
;
369 runq_consider_incr_bound_count(processor
, thread
);
375 sched_traditional_processor_queue_empty(processor_t processor
)
377 return runq_for_processor(processor
)->count
== 0;
381 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
)
383 processor_set_t pset
= processor
->processor_set
;
384 int count
= runq_for_processor(processor
)->count
;
387 * The pset runq contains the count of all runnable threads
388 * for all processors in the pset. However, for threads that
389 * are bound to another processor, the current "processor"
390 * is not eligible to execute the thread. So we only
391 * include bound threads that our bound to the current
392 * "processor". This allows the processor to idle when the
393 * count of eligible threads drops to 0, even if there's
394 * a runnable thread bound to a different processor in the
398 count
-= pset
->pset_runq_bound_count
;
399 count
+= processor
->runq_bound_count
;
405 sched_traditional_processor_csw_check(processor_t processor
)
408 boolean_t has_higher
;
410 assert(processor
->active_thread
!= NULL
);
412 runq
= runq_for_processor(processor
);
414 if (processor
->first_timeslice
) {
415 has_higher
= (runq
->highq
> processor
->current_pri
);
417 has_higher
= (runq
->highq
>= processor
->current_pri
);
421 if (runq
->urgency
> 0)
422 return (AST_PREEMPT
| AST_URGENT
);
431 sched_traditional_processor_queue_has_priority(processor_t processor
,
436 return runq_for_processor(processor
)->highq
>= priority
;
438 return runq_for_processor(processor
)->highq
> priority
;
442 sched_traditional_processor_runq_count(processor_t processor
)
444 return runq_for_processor(processor
)->count
;
448 sched_traditional_processor_runq_stats_count_sum(processor_t processor
)
450 return runq_for_processor(processor
)->runq_stats
.count_sum
;
454 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
)
456 if (processor
->cpu_id
== processor
->processor_set
->cpu_set_low
)
457 return runq_for_processor(processor
)->runq_stats
.count_sum
;
463 sched_traditional_processor_bound_count(processor_t processor
)
465 return processor
->runq_bound_count
;
469 * sched_traditional_processor_queue_shutdown:
471 * Shutdown a processor run queue by
472 * re-dispatching non-bound threads.
474 * Associated pset must be locked, and is
478 sched_traditional_processor_queue_shutdown(processor_t processor
)
480 processor_set_t pset
= processor
->processor_set
;
481 run_queue_t rq
= runq_for_processor(processor
);
482 queue_t queue
= rq
->queues
+ rq
->highq
;
484 int count
= rq
->count
;
485 thread_t next
, thread
;
491 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
492 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
493 next
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
495 if (thread
->bound_processor
== PROCESSOR_NULL
) {
496 remqueue((queue_entry_t
)thread
);
498 thread
->runq
= PROCESSOR_NULL
;
499 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
500 runq_consider_decr_bound_count(processor
, thread
);
502 if (SCHED(priority_is_urgent
)(pri
)) {
503 rq
->urgency
--; assert(rq
->urgency
>= 0);
505 if (queue_empty(queue
)) {
507 clrbit(MAXPRI
- pri
, rq
->bitmap
);
508 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
511 enqueue_tail(&tqueue
, (queue_entry_t
)thread
);
523 while ((thread
= (thread_t
)(uintptr_t)dequeue_head(&tqueue
)) != THREAD_NULL
) {
526 thread_setrun(thread
, SCHED_TAILQ
);
528 thread_unlock(thread
);
541 if (rq
!= thread
->runq
)
542 panic("run_queue_check: thread runq");
544 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
)
545 panic("run_queue_check: thread sched_pri");
547 q
= &rq
->queues
[thread
->sched_pri
];
549 while (!queue_end(q
, qe
)) {
550 if (qe
== (queue_entry_t
)thread
)
556 panic("run_queue_check: end");
561 * Locks the runqueue itself.
563 * Thread must be locked.
566 sched_traditional_processor_queue_remove(processor_t processor
,
569 processor_set_t pset
;
572 pset
= processor
->processor_set
;
575 rq
= runq_for_processor(processor
);
577 if (processor
== thread
->runq
) {
579 * Thread is on a run queue and we have a lock on
582 runq_consider_decr_bound_count(processor
, thread
);
583 run_queue_remove(rq
, thread
);
587 * The thread left the run queue before we could
588 * lock the run queue.
590 assert(thread
->runq
== PROCESSOR_NULL
);
591 processor
= PROCESSOR_NULL
;
596 return (processor
!= PROCESSOR_NULL
);
600 * sched_traditional_steal_processor_thread:
602 * Locate a thread to steal from the processor and
605 * Associated pset must be locked. Returns THREAD_NULL
609 sched_traditional_steal_processor_thread(processor_t processor
)
611 run_queue_t rq
= runq_for_processor(processor
);
612 queue_t queue
= rq
->queues
+ rq
->highq
;
614 int count
= rq
->count
;
618 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
619 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
620 if (thread
->bound_processor
== PROCESSOR_NULL
) {
621 remqueue((queue_entry_t
)thread
);
623 thread
->runq
= PROCESSOR_NULL
;
624 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
625 runq_consider_decr_bound_count(processor
, thread
);
627 if (SCHED(priority_is_urgent
)(pri
)) {
628 rq
->urgency
--; assert(rq
->urgency
>= 0);
630 if (queue_empty(queue
)) {
632 clrbit(MAXPRI
- pri
, rq
->bitmap
);
633 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
640 thread
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
646 return (THREAD_NULL
);
650 * Locate and steal a thread, beginning
653 * The pset must be locked, and is returned
656 * Returns the stolen thread, or THREAD_NULL on
660 sched_traditional_steal_thread(processor_set_t pset
)
662 processor_set_t nset
, cset
= pset
;
663 processor_t processor
;
667 processor
= (processor_t
)(uintptr_t)queue_first(&cset
->active_queue
);
668 while (!queue_end(&cset
->active_queue
, (queue_entry_t
)processor
)) {
669 if (runq_for_processor(processor
)->count
> 0) {
670 thread
= sched_traditional_steal_processor_thread(processor
);
671 if (thread
!= THREAD_NULL
) {
672 remqueue((queue_entry_t
)processor
);
673 enqueue_tail(&cset
->active_queue
, (queue_entry_t
)processor
);
681 processor
= (processor_t
)(uintptr_t)queue_next((queue_entry_t
)processor
);
684 nset
= next_pset(cset
);
692 } while (nset
!= pset
);
696 return (THREAD_NULL
);
700 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
)
702 boolean_t restart_needed
= FALSE
;
703 processor_t processor
= processor_list
;
704 processor_set_t pset
;
711 * TODO: in sched_traditional_use_pset_runqueue case,
712 * avoid scanning the same runq multiple times
714 pset
= processor
->processor_set
;
719 restart_needed
= runq_scan(runq_for_processor(processor
), scan_context
);
727 thread
= processor
->idle_thread
;
728 if (thread
!= THREAD_NULL
&& thread
->sched_stamp
!= sched_tick
) {
729 if (thread_update_add_thread(thread
) == FALSE
) {
730 restart_needed
= TRUE
;
734 } while ((processor
= processor
->processor_list
) != NULL
);
736 /* Ok, we now have a collection of candidates -- fix them. */
737 thread_update_process_threads();
738 } while (restart_needed
);