2 * Copyright (c) 2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
32 #include <machine/machine_routines.h>
33 #include <machine/sched_param.h>
34 #include <machine/machine_cpu.h>
36 #include <kern/kern_types.h>
37 #include <kern/debug.h>
38 #include <kern/machine.h>
39 #include <kern/misc_protos.h>
40 #include <kern/processor.h>
41 #include <kern/queue.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/task.h>
45 #include <kern/thread.h>
47 #include <sys/kdebug.h>
50 sched_dualq_init(void);
53 sched_dualq_steal_thread(processor_set_t pset
);
56 sched_dualq_thread_update_scan(void);
59 sched_dualq_processor_enqueue(processor_t processor
, thread_t thread
, integer_t options
);
62 sched_dualq_processor_queue_remove(processor_t processor
, thread_t thread
);
65 sched_dualq_processor_csw_check(processor_t processor
);
68 sched_dualq_processor_queue_has_priority(processor_t processor
, int priority
, boolean_t gte
);
71 sched_dualq_runq_count(processor_t processor
);
74 sched_dualq_processor_queue_empty(processor_t processor
);
77 sched_dualq_runq_stats_count_sum(processor_t processor
);
80 sched_dualq_processor_bound_count(processor_t processor
);
83 sched_dualq_pset_init(processor_set_t pset
);
86 sched_dualq_processor_init(processor_t processor
);
89 sched_dualq_choose_thread(processor_t processor
, int priority
, ast_t reason
);
92 sched_dualq_processor_queue_shutdown(processor_t processor
);
95 sched_dualq_initial_thread_sched_mode(task_t parent_task
);
98 sched_dualq_should_current_thread_rechoose_processor(processor_t processor
);
100 const struct sched_dispatch_table sched_dualq_dispatch
= {
101 .init
= sched_dualq_init
,
102 .timebase_init
= sched_traditional_timebase_init
,
103 .processor_init
= sched_dualq_processor_init
,
104 .pset_init
= sched_dualq_pset_init
,
105 .maintenance_continuation
= sched_traditional_maintenance_continue
,
106 .choose_thread
= sched_dualq_choose_thread
,
107 .steal_thread
= sched_dualq_steal_thread
,
108 .compute_priority
= compute_priority
,
109 .choose_processor
= choose_processor
,
110 .processor_enqueue
= sched_dualq_processor_enqueue
,
111 .processor_queue_shutdown
= sched_dualq_processor_queue_shutdown
,
112 .processor_queue_remove
= sched_dualq_processor_queue_remove
,
113 .processor_queue_empty
= sched_dualq_processor_queue_empty
,
114 .priority_is_urgent
= priority_is_urgent
,
115 .processor_csw_check
= sched_dualq_processor_csw_check
,
116 .processor_queue_has_priority
= sched_dualq_processor_queue_has_priority
,
117 .initial_quantum_size
= sched_traditional_initial_quantum_size
,
118 .initial_thread_sched_mode
= sched_dualq_initial_thread_sched_mode
,
119 .can_update_priority
= can_update_priority
,
120 .update_priority
= update_priority
,
121 .lightweight_update_priority
= lightweight_update_priority
,
122 .quantum_expire
= sched_traditional_quantum_expire
,
123 .should_current_thread_rechoose_processor
= sched_dualq_should_current_thread_rechoose_processor
,
124 .processor_runq_count
= sched_dualq_runq_count
,
125 .processor_runq_stats_count_sum
= sched_dualq_runq_stats_count_sum
,
126 .fairshare_init
= sched_traditional_fairshare_init
,
127 .fairshare_runq_count
= sched_traditional_fairshare_runq_count
,
128 .fairshare_runq_stats_count_sum
= sched_traditional_fairshare_runq_stats_count_sum
,
129 .fairshare_enqueue
= sched_traditional_fairshare_enqueue
,
130 .fairshare_dequeue
= sched_traditional_fairshare_dequeue
,
131 .fairshare_queue_remove
= sched_traditional_fairshare_queue_remove
,
132 .processor_bound_count
= sched_dualq_processor_bound_count
,
133 .thread_update_scan
= sched_dualq_thread_update_scan
,
134 .direct_dispatch_to_idle_processors
= FALSE
,
137 __attribute__((always_inline
))
138 static inline run_queue_t
dualq_main_runq(processor_t processor
)
140 return &processor
->processor_set
->pset_runq
;
143 __attribute__((always_inline
))
144 static inline run_queue_t
dualq_bound_runq(processor_t processor
)
146 return &processor
->runq
;
149 __attribute__((always_inline
))
150 static inline run_queue_t
dualq_runq_for_thread(processor_t processor
, thread_t thread
)
152 if (thread
->bound_processor
== PROCESSOR_NULL
) {
153 return dualq_main_runq(processor
);
155 assert(thread
->bound_processor
== processor
);
156 return dualq_bound_runq(processor
);
161 sched_dualq_initial_thread_sched_mode(task_t parent_task
)
163 if (parent_task
== kernel_task
)
164 return TH_MODE_FIXED
;
166 return TH_MODE_TIMESHARE
;
170 sched_dualq_processor_init(processor_t processor
)
172 run_queue_init(&processor
->runq
);
176 sched_dualq_pset_init(processor_set_t pset
)
178 run_queue_init(&pset
->pset_runq
);
182 sched_dualq_init(void)
184 sched_traditional_init();
188 sched_dualq_choose_thread(
189 processor_t processor
,
191 __unused ast_t reason
)
193 run_queue_t main_runq
= dualq_main_runq(processor
);
194 run_queue_t bound_runq
= dualq_bound_runq(processor
);
195 run_queue_t chosen_runq
;
197 if (bound_runq
->highq
< priority
&&
198 main_runq
->highq
< priority
)
201 if (bound_runq
->count
&& main_runq
->count
) {
202 if (bound_runq
->highq
>= main_runq
->highq
) {
203 chosen_runq
= bound_runq
;
205 chosen_runq
= main_runq
;
207 } else if (bound_runq
->count
) {
208 chosen_runq
= bound_runq
;
209 } else if (main_runq
->count
) {
210 chosen_runq
= main_runq
;
212 return (THREAD_NULL
);
215 return run_queue_dequeue(chosen_runq
, SCHED_HEADQ
);
219 sched_dualq_processor_enqueue(
220 processor_t processor
,
224 run_queue_t rq
= dualq_runq_for_thread(processor
, thread
);
227 result
= run_queue_enqueue(rq
, thread
, options
);
228 thread
->runq
= processor
;
234 sched_dualq_processor_queue_empty(processor_t processor
)
236 return dualq_main_runq(processor
)->count
== 0 &&
237 dualq_bound_runq(processor
)->count
== 0;
241 sched_dualq_processor_csw_check(processor_t processor
)
243 boolean_t has_higher
;
246 run_queue_t main_runq
= dualq_main_runq(processor
);
247 run_queue_t bound_runq
= dualq_bound_runq(processor
);
249 assert(processor
->active_thread
!= NULL
);
251 pri
= MAX(main_runq
->highq
, bound_runq
->highq
);
253 if (first_timeslice(processor
)) {
254 has_higher
= (pri
> processor
->current_pri
);
256 has_higher
= (pri
>= processor
->current_pri
);
260 if (main_runq
->urgency
> 0)
261 return (AST_PREEMPT
| AST_URGENT
);
263 if (bound_runq
->urgency
> 0)
264 return (AST_PREEMPT
| AST_URGENT
);
266 if (processor
->active_thread
&& thread_eager_preemption(processor
->active_thread
))
267 return (AST_PREEMPT
| AST_URGENT
);
276 sched_dualq_processor_queue_has_priority(processor_t processor
,
280 int qpri
= MAX(dualq_main_runq(processor
)->highq
, dualq_bound_runq(processor
)->highq
);
283 return qpri
>= priority
;
285 return qpri
> priority
;
289 sched_dualq_should_current_thread_rechoose_processor(processor_t processor
)
291 return (processor
->current_pri
< BASEPRI_RTQUEUES
&& processor
->processor_primary
!= processor
);
295 sched_dualq_runq_count(processor_t processor
)
297 return dualq_main_runq(processor
)->count
+ dualq_bound_runq(processor
)->count
;
301 sched_dualq_runq_stats_count_sum(processor_t processor
)
303 uint64_t bound_sum
= dualq_bound_runq(processor
)->runq_stats
.count_sum
;
305 if (processor
->cpu_id
== processor
->processor_set
->cpu_set_low
)
306 return bound_sum
+ dualq_main_runq(processor
)->runq_stats
.count_sum
;
311 sched_dualq_processor_bound_count(processor_t processor
)
313 return dualq_bound_runq(processor
)->count
;
317 sched_dualq_processor_queue_shutdown(processor_t processor
)
319 processor_set_t pset
= processor
->processor_set
;
320 run_queue_t rq
= dualq_main_runq(processor
);
324 /* We only need to migrate threads if this is the last active processor in the pset */
325 if (pset
->online_processor_count
> 0) {
332 while (rq
->count
> 0) {
333 thread
= run_queue_dequeue(rq
, SCHED_HEADQ
);
334 enqueue_tail(&tqueue
, (queue_entry_t
)thread
);
339 while ((thread
= (thread_t
)(void*)dequeue_head(&tqueue
)) != THREAD_NULL
) {
342 thread_setrun(thread
, SCHED_TAILQ
);
344 thread_unlock(thread
);
349 sched_dualq_processor_queue_remove(
350 processor_t processor
,
354 processor_set_t pset
= processor
->processor_set
;
358 rq
= dualq_runq_for_thread(processor
, thread
);
360 if (processor
== thread
->runq
) {
362 * Thread is on a run queue and we have a lock on
365 run_queue_remove(rq
, thread
);
369 * The thread left the run queue before we could
370 * lock the run queue.
372 assert(thread
->runq
== PROCESSOR_NULL
);
373 processor
= PROCESSOR_NULL
;
378 return (processor
!= PROCESSOR_NULL
);
382 sched_dualq_steal_thread(processor_set_t pset
)
384 processor_set_t nset
, cset
= pset
;
388 if (cset
->pset_runq
.count
> 0) {
389 thread
= run_queue_dequeue(&cset
->pset_runq
, SCHED_HEADQ
);
394 nset
= next_pset(cset
);
402 } while (nset
!= pset
);
406 return (THREAD_NULL
);
410 sched_dualq_thread_update_scan(void)
412 boolean_t restart_needed
= FALSE
;
413 processor_t processor
= processor_list
;
414 processor_set_t pset
;
419 * We update the threads associated with each processor (bound and idle threads)
420 * and then update the threads in each pset runqueue.
425 pset
= processor
->processor_set
;
430 restart_needed
= runq_scan(dualq_bound_runq(processor
));
438 thread
= processor
->idle_thread
;
439 if (thread
!= THREAD_NULL
&& thread
->sched_stamp
!= sched_tick
) {
440 if (thread_update_add_thread(thread
) == FALSE
) {
441 restart_needed
= TRUE
;
445 } while ((processor
= processor
->processor_list
) != NULL
);
447 /* Ok, we now have a collection of candidates -- fix them. */
448 thread_update_process_threads();
450 } while (restart_needed
);
459 restart_needed
= runq_scan(&pset
->pset_runq
);
466 } while ((pset
= pset
->pset_list
) != NULL
);
468 /* Ok, we now have a collection of candidates -- fix them. */
469 thread_update_process_threads();
471 } while (restart_needed
);