2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach/mach_types.h>
59 #include <kern/sched.h>
60 #include <kern/sched_prim.h>
63 sched_traditional_use_pset_runqueue
= FALSE
;
66 sched_traditional_init(void);
69 sched_traditional_steal_thread(processor_set_t pset
);
72 sched_traditional_steal_processor_thread(processor_t processor
);
75 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
);
78 sched_traditional_processor_queue_shutdown(processor_t processor
);
81 sched_traditional_processor_enqueue(processor_t processor
, thread_t thread
, integer_t options
);
84 sched_traditional_processor_queue_remove(processor_t processor
, thread_t thread
);
87 sched_traditional_processor_queue_empty(processor_t processor
);
90 sched_traditional_processor_csw_check(processor_t processor
);
93 sched_traditional_processor_queue_has_priority(processor_t processor
, int priority
, boolean_t gte
);
96 sched_traditional_processor_runq_count(processor_t processor
);
99 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
);
102 sched_traditional_processor_runq_stats_count_sum(processor_t processor
);
105 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
);
108 sched_traditional_processor_bound_count(processor_t processor
);
111 sched_traditional_quantum_expire(thread_t thread
);
114 sched_traditional_processor_init(processor_t processor
);
117 sched_traditional_pset_init(processor_set_t pset
);
120 sched_traditional_with_pset_runqueue_init(void);
123 sched_traditional_initial_thread_sched_mode(task_t parent_task
);
126 sched_traditional_choose_thread(processor_t processor
, int priority
, ast_t reason
);
128 /* Choose a thread from a processor's priority-based runq */
129 static thread_t
sched_traditional_choose_thread_from_runq(processor_t processor
, run_queue_t runq
, int priority
);
131 const struct sched_dispatch_table sched_traditional_dispatch
= {
132 .sched_name
= "traditional",
133 .init
= sched_traditional_init
,
134 .timebase_init
= sched_timeshare_timebase_init
,
135 .processor_init
= sched_traditional_processor_init
,
136 .pset_init
= sched_traditional_pset_init
,
137 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
138 .choose_thread
= sched_traditional_choose_thread
,
139 .steal_thread_enabled
= TRUE
,
140 .steal_thread
= sched_traditional_steal_thread
,
141 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
142 .choose_processor
= choose_processor
,
143 .processor_enqueue
= sched_traditional_processor_enqueue
,
144 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
145 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
146 .processor_queue_empty
= sched_traditional_processor_queue_empty
,
147 .priority_is_urgent
= priority_is_urgent
,
148 .processor_csw_check
= sched_traditional_processor_csw_check
,
149 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
150 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
151 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
152 .can_update_priority
= can_update_priority
,
153 .update_priority
= update_priority
,
154 .lightweight_update_priority
= lightweight_update_priority
,
155 .quantum_expire
= sched_default_quantum_expire
,
156 .processor_runq_count
= sched_traditional_processor_runq_count
,
157 .processor_runq_stats_count_sum
= sched_traditional_processor_runq_stats_count_sum
,
158 .processor_bound_count
= sched_traditional_processor_bound_count
,
159 .thread_update_scan
= sched_traditional_thread_update_scan
,
160 .direct_dispatch_to_idle_processors
= TRUE
,
161 .multiple_psets_enabled
= TRUE
,
162 .sched_groups_enabled
= FALSE
,
165 const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
= {
166 .sched_name
= "traditional_with_pset_runqueue",
167 .init
= sched_traditional_with_pset_runqueue_init
,
168 .timebase_init
= sched_timeshare_timebase_init
,
169 .processor_init
= sched_traditional_processor_init
,
170 .pset_init
= sched_traditional_pset_init
,
171 .maintenance_continuation
= sched_timeshare_maintenance_continue
,
172 .choose_thread
= sched_traditional_choose_thread
,
173 .steal_thread_enabled
= TRUE
,
174 .steal_thread
= sched_traditional_steal_thread
,
175 .compute_timeshare_priority
= sched_compute_timeshare_priority
,
176 .choose_processor
= choose_processor
,
177 .processor_enqueue
= sched_traditional_processor_enqueue
,
178 .processor_queue_shutdown
= sched_traditional_processor_queue_shutdown
,
179 .processor_queue_remove
= sched_traditional_processor_queue_remove
,
180 .processor_queue_empty
= sched_traditional_with_pset_runqueue_processor_queue_empty
,
181 .priority_is_urgent
= priority_is_urgent
,
182 .processor_csw_check
= sched_traditional_processor_csw_check
,
183 .processor_queue_has_priority
= sched_traditional_processor_queue_has_priority
,
184 .initial_quantum_size
= sched_timeshare_initial_quantum_size
,
185 .initial_thread_sched_mode
= sched_traditional_initial_thread_sched_mode
,
186 .can_update_priority
= can_update_priority
,
187 .update_priority
= update_priority
,
188 .lightweight_update_priority
= lightweight_update_priority
,
189 .quantum_expire
= sched_default_quantum_expire
,
190 .processor_runq_count
= sched_traditional_processor_runq_count
,
191 .processor_runq_stats_count_sum
= sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum
,
192 .processor_bound_count
= sched_traditional_processor_bound_count
,
193 .thread_update_scan
= sched_traditional_thread_update_scan
,
194 .direct_dispatch_to_idle_processors
= FALSE
,
195 .multiple_psets_enabled
= TRUE
,
196 .sched_groups_enabled
= FALSE
,
200 sched_traditional_init(void)
202 sched_timeshare_init();
206 sched_traditional_with_pset_runqueue_init(void)
208 sched_timeshare_init();
209 sched_traditional_use_pset_runqueue
= TRUE
;
213 sched_traditional_processor_init(processor_t processor
)
215 if (!sched_traditional_use_pset_runqueue
) {
216 run_queue_init(&processor
->runq
);
218 processor
->runq_bound_count
= 0;
222 sched_traditional_pset_init(processor_set_t pset
)
224 if (sched_traditional_use_pset_runqueue
) {
225 run_queue_init(&pset
->pset_runq
);
227 pset
->pset_runq_bound_count
= 0;
230 __attribute__((always_inline
))
231 static inline run_queue_t
runq_for_processor(processor_t processor
)
233 if (sched_traditional_use_pset_runqueue
)
234 return &processor
->processor_set
->pset_runq
;
236 return &processor
->runq
;
239 __attribute__((always_inline
))
240 static inline void runq_consider_incr_bound_count(processor_t processor
,
243 if (thread
->bound_processor
== PROCESSOR_NULL
)
246 assert(thread
->bound_processor
== processor
);
248 if (sched_traditional_use_pset_runqueue
)
249 processor
->processor_set
->pset_runq_bound_count
++;
251 processor
->runq_bound_count
++;
254 __attribute__((always_inline
))
255 static inline void runq_consider_decr_bound_count(processor_t processor
,
258 if (thread
->bound_processor
== PROCESSOR_NULL
)
261 assert(thread
->bound_processor
== processor
);
263 if (sched_traditional_use_pset_runqueue
)
264 processor
->processor_set
->pset_runq_bound_count
--;
266 processor
->runq_bound_count
--;
270 sched_traditional_choose_thread(
271 processor_t processor
,
273 __unused ast_t reason
)
277 thread
= sched_traditional_choose_thread_from_runq(processor
, runq_for_processor(processor
), priority
);
278 if (thread
!= THREAD_NULL
) {
279 runq_consider_decr_bound_count(processor
, thread
);
286 * sched_traditional_choose_thread_from_runq:
288 * Locate a thread to execute from the processor run queue
289 * and return it. Only choose a thread with greater or equal
292 * Associated pset must be locked. Returns THREAD_NULL
296 sched_traditional_choose_thread_from_runq(
297 processor_t processor
,
301 queue_t queue
= rq
->queues
+ rq
->highq
;
303 int count
= rq
->count
;
306 while (count
> 0 && pri
>= priority
) {
307 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
308 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
309 if (thread
->bound_processor
== PROCESSOR_NULL
||
310 thread
->bound_processor
== processor
) {
311 remqueue((queue_entry_t
)thread
);
313 thread
->runq
= PROCESSOR_NULL
;
314 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
316 if (SCHED(priority_is_urgent
)(pri
)) {
317 rq
->urgency
--; assert(rq
->urgency
>= 0);
319 if (queue_empty(queue
)) {
320 bitmap_clear(rq
->bitmap
, pri
);
321 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
328 thread
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
334 return (THREAD_NULL
);
338 sched_traditional_initial_thread_sched_mode(task_t parent_task
)
340 if (parent_task
== kernel_task
)
341 return TH_MODE_FIXED
;
343 return TH_MODE_TIMESHARE
;
347 * sched_traditional_processor_enqueue:
349 * Enqueue thread on a processor run queue. Thread must be locked,
350 * and not already be on a run queue.
352 * Returns TRUE if a preemption is indicated based on the state
355 * The run queue must be locked (see thread_run_queue_remove()
359 sched_traditional_processor_enqueue(processor_t processor
,
363 run_queue_t rq
= runq_for_processor(processor
);
366 result
= run_queue_enqueue(rq
, thread
, options
);
367 thread
->runq
= processor
;
368 runq_consider_incr_bound_count(processor
, thread
);
374 sched_traditional_processor_queue_empty(processor_t processor
)
376 return runq_for_processor(processor
)->count
== 0;
380 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
)
382 processor_set_t pset
= processor
->processor_set
;
383 int count
= runq_for_processor(processor
)->count
;
386 * The pset runq contains the count of all runnable threads
387 * for all processors in the pset. However, for threads that
388 * are bound to another processor, the current "processor"
389 * is not eligible to execute the thread. So we only
390 * include bound threads that our bound to the current
391 * "processor". This allows the processor to idle when the
392 * count of eligible threads drops to 0, even if there's
393 * a runnable thread bound to a different processor in the
397 count
-= pset
->pset_runq_bound_count
;
398 count
+= processor
->runq_bound_count
;
404 sched_traditional_processor_csw_check(processor_t processor
)
407 boolean_t has_higher
;
409 assert(processor
->active_thread
!= NULL
);
411 runq
= runq_for_processor(processor
);
413 if (processor
->first_timeslice
) {
414 has_higher
= (runq
->highq
> processor
->current_pri
);
416 has_higher
= (runq
->highq
>= processor
->current_pri
);
420 if (runq
->urgency
> 0)
421 return (AST_PREEMPT
| AST_URGENT
);
430 sched_traditional_processor_queue_has_priority(processor_t processor
,
434 run_queue_t runq
= runq_for_processor(processor
);
436 if (runq
->count
== 0)
439 return runq_for_processor(processor
)->highq
>= priority
;
441 return runq_for_processor(processor
)->highq
> priority
;
445 sched_traditional_processor_runq_count(processor_t processor
)
447 return runq_for_processor(processor
)->count
;
451 sched_traditional_processor_runq_stats_count_sum(processor_t processor
)
453 return runq_for_processor(processor
)->runq_stats
.count_sum
;
457 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
)
459 if (processor
->cpu_id
== processor
->processor_set
->cpu_set_low
)
460 return runq_for_processor(processor
)->runq_stats
.count_sum
;
466 sched_traditional_processor_bound_count(processor_t processor
)
468 return processor
->runq_bound_count
;
472 * sched_traditional_processor_queue_shutdown:
474 * Shutdown a processor run queue by
475 * re-dispatching non-bound threads.
477 * Associated pset must be locked, and is
481 sched_traditional_processor_queue_shutdown(processor_t processor
)
483 processor_set_t pset
= processor
->processor_set
;
484 run_queue_t rq
= runq_for_processor(processor
);
485 queue_t queue
= rq
->queues
+ rq
->highq
;
487 int count
= rq
->count
;
488 thread_t next
, thread
;
494 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
495 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
496 next
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
498 if (thread
->bound_processor
== PROCESSOR_NULL
) {
499 remqueue((queue_entry_t
)thread
);
501 thread
->runq
= PROCESSOR_NULL
;
502 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
503 runq_consider_decr_bound_count(processor
, thread
);
505 if (SCHED(priority_is_urgent
)(pri
)) {
506 rq
->urgency
--; assert(rq
->urgency
>= 0);
508 if (queue_empty(queue
)) {
509 bitmap_clear(rq
->bitmap
, pri
);
510 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
513 enqueue_tail(&tqueue
, (queue_entry_t
)thread
);
525 while ((thread
= (thread_t
)(uintptr_t)dequeue_head(&tqueue
)) != THREAD_NULL
) {
528 thread_setrun(thread
, SCHED_TAILQ
);
530 thread_unlock(thread
);
543 if (rq
!= thread
->runq
)
544 panic("run_queue_check: thread runq");
546 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
)
547 panic("run_queue_check: thread sched_pri");
549 q
= &rq
->queues
[thread
->sched_pri
];
551 while (!queue_end(q
, qe
)) {
552 if (qe
== (queue_entry_t
)thread
)
558 panic("run_queue_check: end");
563 * Locks the runqueue itself.
565 * Thread must be locked.
568 sched_traditional_processor_queue_remove(processor_t processor
,
571 processor_set_t pset
;
574 pset
= processor
->processor_set
;
577 rq
= runq_for_processor(processor
);
579 if (processor
== thread
->runq
) {
581 * Thread is on a run queue and we have a lock on
584 runq_consider_decr_bound_count(processor
, thread
);
585 run_queue_remove(rq
, thread
);
589 * The thread left the run queue before we could
590 * lock the run queue.
592 assert(thread
->runq
== PROCESSOR_NULL
);
593 processor
= PROCESSOR_NULL
;
598 return (processor
!= PROCESSOR_NULL
);
602 * sched_traditional_steal_processor_thread:
604 * Locate a thread to steal from the processor and
607 * Associated pset must be locked. Returns THREAD_NULL
611 sched_traditional_steal_processor_thread(processor_t processor
)
613 run_queue_t rq
= runq_for_processor(processor
);
614 queue_t queue
= rq
->queues
+ rq
->highq
;
616 int count
= rq
->count
;
620 thread
= (thread_t
)(uintptr_t)queue_first(queue
);
621 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
622 if (thread
->bound_processor
== PROCESSOR_NULL
) {
623 remqueue((queue_entry_t
)thread
);
625 thread
->runq
= PROCESSOR_NULL
;
626 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
627 runq_consider_decr_bound_count(processor
, thread
);
629 if (SCHED(priority_is_urgent
)(pri
)) {
630 rq
->urgency
--; assert(rq
->urgency
>= 0);
632 if (queue_empty(queue
)) {
633 bitmap_clear(rq
->bitmap
, pri
);
634 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
641 thread
= (thread_t
)(uintptr_t)queue_next((queue_entry_t
)thread
);
647 return (THREAD_NULL
);
651 * Locate and steal a thread, beginning
654 * The pset must be locked, and is returned
657 * Returns the stolen thread, or THREAD_NULL on
661 sched_traditional_steal_thread(processor_set_t pset
)
663 processor_set_t nset
, cset
= pset
;
664 processor_t processor
;
668 processor
= (processor_t
)(uintptr_t)queue_first(&cset
->active_queue
);
669 while (!queue_end(&cset
->active_queue
, (queue_entry_t
)processor
)) {
670 if (runq_for_processor(processor
)->count
> 0) {
671 thread
= sched_traditional_steal_processor_thread(processor
);
672 if (thread
!= THREAD_NULL
) {
673 remqueue((queue_entry_t
)processor
);
674 enqueue_tail(&cset
->active_queue
, (queue_entry_t
)processor
);
682 processor
= (processor_t
)(uintptr_t)queue_next((queue_entry_t
)processor
);
685 nset
= next_pset(cset
);
693 } while (nset
!= pset
);
697 return (THREAD_NULL
);
701 sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context
)
703 boolean_t restart_needed
= FALSE
;
704 processor_t processor
= processor_list
;
705 processor_set_t pset
;
712 * TODO: in sched_traditional_use_pset_runqueue case,
713 * avoid scanning the same runq multiple times
715 pset
= processor
->processor_set
;
720 restart_needed
= runq_scan(runq_for_processor(processor
), scan_context
);
728 thread
= processor
->idle_thread
;
729 if (thread
!= THREAD_NULL
&& thread
->sched_stamp
!= sched_tick
) {
730 if (thread_update_add_thread(thread
) == FALSE
) {
731 restart_needed
= TRUE
;
735 } while ((processor
= processor
->processor_list
) != NULL
);
737 /* Ok, we now have a collection of candidates -- fix them. */
738 thread_update_process_threads();
739 } while (restart_needed
);