2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/macro_help.h>
46 #include <kern/machine.h>
47 #include <kern/misc_protos.h>
48 #include <kern/processor.h>
49 #include <kern/queue.h>
50 #include <kern/sched.h>
51 #include <kern/sched_prim.h>
52 #include <kern/syscall_subr.h>
53 #include <kern/task.h>
54 #include <kern/thread.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
62 #include <sys/kdebug.h>
65 sched_proto_init(void);
68 sched_proto_timebase_init(void);
71 sched_proto_processor_init(processor_t processor
);
74 sched_proto_pset_init(processor_set_t pset
);
77 sched_proto_maintenance_continuation(void);
80 sched_proto_choose_thread(processor_t processor
,
85 sched_proto_steal_thread(processor_set_t pset
);
88 sched_proto_compute_priority(thread_t thread
);
91 sched_proto_choose_processor( processor_set_t pset
,
92 processor_t processor
,
97 sched_proto_processor_enqueue(
98 processor_t processor
,
100 sched_options_t options
);
103 sched_proto_processor_queue_shutdown(
104 processor_t processor
);
107 sched_proto_processor_queue_remove(
108 processor_t processor
,
112 sched_proto_processor_queue_empty(processor_t processor
);
115 sched_proto_processor_queue_has_priority(processor_t processor
,
120 sched_proto_priority_is_urgent(int priority
);
123 sched_proto_processor_csw_check(processor_t processor
);
126 sched_proto_initial_quantum_size(thread_t thread
);
129 sched_proto_initial_thread_sched_mode(task_t parent_task
);
132 sched_proto_can_update_priority(thread_t thread
);
135 sched_proto_update_priority(thread_t thread
);
138 sched_proto_lightweight_update_priority(thread_t thread
);
141 sched_proto_quantum_expire(thread_t thread
);
144 sched_proto_processor_runq_count(processor_t processor
);
147 sched_proto_processor_runq_stats_count_sum(processor_t processor
);
150 sched_proto_processor_bound_count(processor_t processor
);
153 sched_proto_thread_update_scan(sched_update_scan_context_t scan_context
);
156 const struct sched_dispatch_table sched_proto_dispatch
= {
157 .sched_name
= "proto",
158 .init
= sched_proto_init
,
159 .timebase_init
= sched_proto_timebase_init
,
160 .processor_init
= sched_proto_processor_init
,
161 .pset_init
= sched_proto_pset_init
,
162 .maintenance_continuation
= sched_proto_maintenance_continuation
,
163 .choose_thread
= sched_proto_choose_thread
,
164 .steal_thread_enabled
= sched_steal_thread_DISABLED
,
165 .steal_thread
= sched_proto_steal_thread
,
166 .compute_timeshare_priority
= sched_proto_compute_priority
,
167 .choose_node
= sched_choose_node
,
168 .choose_processor
= sched_proto_choose_processor
,
169 .processor_enqueue
= sched_proto_processor_enqueue
,
170 .processor_queue_shutdown
= sched_proto_processor_queue_shutdown
,
171 .processor_queue_remove
= sched_proto_processor_queue_remove
,
172 .processor_queue_empty
= sched_proto_processor_queue_empty
,
173 .priority_is_urgent
= sched_proto_priority_is_urgent
,
174 .processor_csw_check
= sched_proto_processor_csw_check
,
175 .processor_queue_has_priority
= sched_proto_processor_queue_has_priority
,
176 .initial_quantum_size
= sched_proto_initial_quantum_size
,
177 .initial_thread_sched_mode
= sched_proto_initial_thread_sched_mode
,
178 .can_update_priority
= sched_proto_can_update_priority
,
179 .update_priority
= sched_proto_update_priority
,
180 .lightweight_update_priority
= sched_proto_lightweight_update_priority
,
181 .quantum_expire
= sched_proto_quantum_expire
,
182 .processor_runq_count
= sched_proto_processor_runq_count
,
183 .processor_runq_stats_count_sum
= sched_proto_processor_runq_stats_count_sum
,
184 .processor_bound_count
= sched_proto_processor_bound_count
,
185 .thread_update_scan
= sched_proto_thread_update_scan
,
186 .multiple_psets_enabled
= TRUE
,
187 .sched_groups_enabled
= FALSE
,
188 .avoid_processor_enabled
= FALSE
,
189 .thread_avoid_processor
= NULL
,
190 .processor_balance
= sched_SMT_balance
,
192 .rt_runq
= sched_rtlocal_runq
,
193 .rt_init
= sched_rtlocal_init
,
194 .rt_queue_shutdown
= sched_rtlocal_queue_shutdown
,
195 .rt_runq_scan
= sched_rtlocal_runq_scan
,
196 .rt_runq_count_sum
= sched_rtlocal_runq_count_sum
,
198 .qos_max_parallelism
= sched_qos_max_parallelism
,
199 .check_spill
= sched_check_spill
,
200 .ipi_policy
= sched_ipi_policy
,
201 .thread_should_yield
= sched_thread_should_yield
,
202 .run_count_incr
= sched_run_incr
,
203 .run_count_decr
= sched_run_decr
,
204 .update_thread_bucket
= sched_update_thread_bucket
,
205 .pset_made_schedulable
= sched_pset_made_schedulable
,
208 static struct run_queue
*global_runq
;
209 static struct run_queue global_runq_storage
;
211 #define GLOBAL_RUNQ ((processor_t)-2)
212 decl_simple_lock_data(static, global_runq_lock
);
214 extern int max_unsafe_quanta
;
216 static uint32_t proto_quantum_us
;
217 static uint32_t proto_quantum
;
219 static uint32_t runqueue_generation
;
221 static processor_t proto_processor
;
223 static uint64_t sched_proto_tick_deadline
;
224 static uint32_t sched_proto_tick
;
227 sched_proto_init(void)
229 proto_quantum_us
= 10 * 1000;
231 printf("standard proto timeslicing quantum is %d us\n", proto_quantum_us
);
233 simple_lock_init(&global_runq_lock
, 0);
234 global_runq
= &global_runq_storage
;
235 run_queue_init(global_runq
);
236 runqueue_generation
= 0;
238 proto_processor
= master_processor
;
242 sched_proto_timebase_init(void)
246 /* standard timeslicing quantum */
247 clock_interval_to_absolutetime_interval(
248 proto_quantum_us
, NSEC_PER_USEC
, &abstime
);
249 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
250 proto_quantum
= (uint32_t)abstime
;
252 thread_depress_time
= 1 * proto_quantum
;
253 default_timeshare_computation
= proto_quantum
/ 2;
254 default_timeshare_constraint
= proto_quantum
;
256 max_unsafe_computation
= max_unsafe_quanta
* proto_quantum
;
257 sched_safe_duration
= 2 * max_unsafe_quanta
* proto_quantum
;
261 sched_proto_processor_init(processor_t processor __unused
)
263 /* No per-processor state */
267 sched_proto_pset_init(processor_set_t pset __unused
)
272 sched_proto_maintenance_continuation(void)
274 uint64_t abstime
= mach_absolute_time();
278 /* Every 8 seconds, switch to another processor */
279 if ((sched_proto_tick
& 0x7) == 0) {
280 processor_t new_processor
;
282 new_processor
= proto_processor
->processor_list
;
283 if (new_processor
== PROCESSOR_NULL
) {
284 proto_processor
= master_processor
;
286 proto_processor
= new_processor
;
292 * Compute various averages.
296 if (sched_proto_tick_deadline
== 0) {
297 sched_proto_tick_deadline
= abstime
;
300 clock_deadline_for_periodic_event(sched_one_second_interval
, abstime
,
301 &sched_proto_tick_deadline
);
303 assert_wait_deadline((event_t
)sched_proto_maintenance_continuation
, THREAD_UNINT
, sched_proto_tick_deadline
);
304 thread_block((thread_continue_t
)sched_proto_maintenance_continuation
);
309 sched_proto_choose_thread(processor_t processor
,
311 ast_t reason __unused
)
313 run_queue_t rq
= global_runq
;
314 circle_queue_t queue
;
319 simple_lock(&global_runq_lock
, LCK_GRP_NULL
);
321 queue
= rq
->queues
+ rq
->highq
;
326 * Since we don't depress priorities, a high priority thread
327 * may get selected over and over again. Put a runqueue
328 * generation number in the thread structure so that we
329 * can ensure that we've cycled through all runnable tasks
330 * before coming back to a high priority thread. This isn't
331 * perfect, especially if the number of runnable threads always
332 * stays high, but is a workable approximation
335 while (count
> 0 && pri
>= priority
) {
336 cqe_foreach_element_safe(thread
, queue
, runq_links
) {
337 if ((thread
->bound_processor
== PROCESSOR_NULL
||
338 thread
->bound_processor
== processor
) &&
339 runqueue_generation
!= thread
->runqueue_generation
) {
340 circle_dequeue(queue
, &thread
->runq_links
);
342 thread
->runq
= PROCESSOR_NULL
;
343 thread
->runqueue_generation
= runqueue_generation
;
344 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
346 if (circle_queue_empty(queue
)) {
347 bitmap_clear(rq
->bitmap
, pri
);
348 rq
->highq
= bitmap_first(rq
->bitmap
, NRQS
);
351 simple_unlock(&global_runq_lock
);
356 thread
= (thread_t
)queue_next((queue_entry_t
)thread
);
362 runqueue_generation
++;
364 simple_unlock(&global_runq_lock
);
369 sched_proto_steal_thread(processor_set_t pset
)
377 sched_proto_compute_priority(thread_t thread
)
379 return thread
->base_pri
;
383 sched_proto_choose_processor( processor_set_t pset
,
384 processor_t processor
,
385 thread_t thread __unused
)
387 processor
= proto_processor
;
390 * Check that the correct processor set is
393 if (pset
!= processor
->processor_set
) {
396 pset
= processor
->processor_set
;
404 sched_proto_processor_enqueue(
405 processor_t processor __unused
,
407 sched_options_t options
)
409 run_queue_t rq
= global_runq
;
412 simple_lock(&global_runq_lock
, LCK_GRP_NULL
);
413 result
= run_queue_enqueue(rq
, thread
, options
);
414 thread
->runq
= GLOBAL_RUNQ
;
415 simple_unlock(&global_runq_lock
);
421 sched_proto_processor_queue_shutdown(
422 processor_t processor
)
424 /* With a global runqueue, just stop choosing this processor */
429 sched_proto_processor_queue_remove(
430 processor_t processor
,
436 rqlock
= &global_runq_lock
;
439 simple_lock(rqlock
, LCK_GRP_NULL
);
440 if (processor
== thread
->runq
) {
442 * Thread is on a run queue and we have a lock on
445 run_queue_remove(rq
, thread
);
448 * The thread left the run queue before we could
449 * lock the run queue.
451 assert(thread
->runq
== PROCESSOR_NULL
);
452 processor
= PROCESSOR_NULL
;
455 simple_unlock(rqlock
);
457 return processor
!= PROCESSOR_NULL
;
461 sched_proto_processor_queue_empty(processor_t processor __unused
)
465 result
= (global_runq
->count
== 0);
471 sched_proto_processor_queue_has_priority(processor_t processor __unused
,
477 simple_lock(&global_runq_lock
, LCK_GRP_NULL
);
480 result
= global_runq
->highq
>= priority
;
482 result
= global_runq
->highq
> priority
;
485 simple_unlock(&global_runq_lock
);
490 /* Implement sched_preempt_pri in code */
492 sched_proto_priority_is_urgent(int priority
)
494 if (priority
<= BASEPRI_FOREGROUND
) {
498 if (priority
< MINPRI_KERNEL
) {
502 if (priority
>= BASEPRI_PREEMPT
) {
510 sched_proto_processor_csw_check(processor_t processor
)
517 urgency
= runq
->urgency
;
521 return AST_PREEMPT
| AST_URGENT
;
527 if (proto_processor
!= processor
) {
535 sched_proto_initial_quantum_size(thread_t thread __unused
)
537 return proto_quantum
;
541 sched_proto_initial_thread_sched_mode(task_t parent_task
)
543 if (parent_task
== kernel_task
) {
544 return TH_MODE_FIXED
;
546 return TH_MODE_TIMESHARE
;
551 sched_proto_can_update_priority(thread_t thread __unused
)
557 sched_proto_update_priority(thread_t thread __unused
)
562 sched_proto_lightweight_update_priority(thread_t thread __unused
)
567 sched_proto_quantum_expire(thread_t thread __unused
)
572 sched_proto_processor_runq_count(processor_t processor
)
574 if (master_processor
== processor
) {
575 return global_runq
->count
;
582 sched_proto_processor_runq_stats_count_sum(processor_t processor
)
584 if (master_processor
== processor
) {
585 return global_runq
->runq_stats
.count_sum
;
592 sched_proto_processor_bound_count(__unused processor_t processor
)
598 sched_proto_thread_update_scan(__unused sched_update_scan_context_t scan_context
)