2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/counters.h>
42 #include <kern/cpu_number.h>
43 #include <kern/cpu_data.h>
44 #include <kern/debug.h>
45 #include <kern/lock.h>
46 #include <kern/macro_help.h>
47 #include <kern/machine.h>
48 #include <kern/misc_protos.h>
49 #include <kern/processor.h>
50 #include <kern/queue.h>
51 #include <kern/sched.h>
52 #include <kern/sched_prim.h>
53 #include <kern/syscall_subr.h>
54 #include <kern/task.h>
55 #include <kern/thread.h>
56 #include <kern/wait_queue.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_map.h>
64 #include <sys/kdebug.h>
67 sched_proto_init(void);
70 sched_proto_timebase_init(void);
73 sched_proto_processor_init(processor_t processor
);
76 sched_proto_pset_init(processor_set_t pset
);
79 sched_proto_maintenance_continuation(void);
82 sched_proto_choose_thread(processor_t processor
,
86 sched_proto_steal_thread(processor_set_t pset
);
89 sched_proto_compute_priority(thread_t thread
,
90 boolean_t override_depress
);
93 sched_proto_choose_processor( processor_set_t pset
,
94 processor_t processor
,
99 sched_proto_processor_enqueue(
100 processor_t processor
,
105 sched_proto_processor_queue_shutdown(
106 processor_t processor
);
109 sched_proto_processor_queue_remove(
110 processor_t processor
,
114 sched_proto_processor_queue_empty(processor_t processor
);
117 sched_proto_processor_queue_has_priority(processor_t processor
,
122 sched_proto_priority_is_urgent(int priority
);
125 sched_proto_processor_csw_check(processor_t processor
);
128 sched_proto_initial_quantum_size(thread_t thread
);
131 sched_proto_initial_thread_sched_mode(task_t parent_task
);
134 sched_proto_supports_timeshare_mode(void);
137 sched_proto_can_update_priority(thread_t thread
);
140 sched_proto_update_priority(thread_t thread
);
143 sched_proto_lightweight_update_priority(thread_t thread
);
146 sched_proto_quantum_expire(thread_t thread
);
149 sched_proto_should_current_thread_rechoose_processor(processor_t processor
);
152 sched_proto_processor_runq_count(processor_t processor
);
155 sched_proto_processor_runq_stats_count_sum(processor_t processor
);
157 const struct sched_dispatch_table sched_proto_dispatch
= {
159 sched_proto_timebase_init
,
160 sched_proto_processor_init
,
161 sched_proto_pset_init
,
162 sched_proto_maintenance_continuation
,
163 sched_proto_choose_thread
,
164 sched_proto_steal_thread
,
165 sched_proto_compute_priority
,
166 sched_proto_choose_processor
,
167 sched_proto_processor_enqueue
,
168 sched_proto_processor_queue_shutdown
,
169 sched_proto_processor_queue_remove
,
170 sched_proto_processor_queue_empty
,
171 sched_proto_priority_is_urgent
,
172 sched_proto_processor_csw_check
,
173 sched_proto_processor_queue_has_priority
,
174 sched_proto_initial_quantum_size
,
175 sched_proto_initial_thread_sched_mode
,
176 sched_proto_supports_timeshare_mode
,
177 sched_proto_can_update_priority
,
178 sched_proto_update_priority
,
179 sched_proto_lightweight_update_priority
,
180 sched_proto_quantum_expire
,
181 sched_proto_should_current_thread_rechoose_processor
,
182 sched_proto_processor_runq_count
,
183 sched_proto_processor_runq_stats_count_sum
,
184 sched_traditional_fairshare_init
,
185 sched_traditional_fairshare_runq_count
,
186 sched_traditional_fairshare_runq_stats_count_sum
,
187 sched_traditional_fairshare_enqueue
,
188 sched_traditional_fairshare_dequeue
,
189 sched_traditional_fairshare_queue_remove
,
190 TRUE
/* direct_dispatch_to_idle_processors */
193 static struct run_queue
*global_runq
;
194 static struct run_queue global_runq_storage
;
196 #define GLOBAL_RUNQ ((processor_t)-2)
197 decl_simple_lock_data(static,global_runq_lock
);
199 extern int max_unsafe_quanta
;
201 static uint32_t proto_quantum_us
;
202 static uint32_t proto_quantum
;
204 static uint32_t runqueue_generation
;
206 static processor_t proto_processor
;
208 static uint64_t sched_proto_tick_deadline
;
209 static uint32_t sched_proto_tick
;
212 sched_proto_init(void)
214 proto_quantum_us
= 10*1000;
216 printf("standard proto timeslicing quantum is %d us\n", proto_quantum_us
);
218 simple_lock_init(&global_runq_lock
, 0);
219 global_runq
= &global_runq_storage
;
220 run_queue_init(global_runq
);
221 runqueue_generation
= 0;
223 proto_processor
= master_processor
;
227 sched_proto_timebase_init(void)
231 /* standard timeslicing quantum */
232 clock_interval_to_absolutetime_interval(
233 proto_quantum_us
, NSEC_PER_USEC
, &abstime
);
234 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
235 proto_quantum
= (uint32_t)abstime
;
237 thread_depress_time
= 1 * proto_quantum
;
238 default_timeshare_computation
= proto_quantum
/ 2;
239 default_timeshare_constraint
= proto_quantum
;
241 max_unsafe_computation
= max_unsafe_quanta
* proto_quantum
;
242 sched_safe_duration
= 2 * max_unsafe_quanta
* proto_quantum
;
247 sched_proto_processor_init(processor_t processor __unused
)
249 /* No per-processor state */
253 sched_proto_pset_init(processor_set_t pset __unused
)
258 sched_proto_maintenance_continuation(void)
260 uint64_t abstime
= mach_absolute_time();
264 /* Every 8 seconds, switch to another processor */
265 if ((sched_proto_tick
& 0x7) == 0) {
266 processor_t new_processor
;
268 new_processor
= proto_processor
->processor_list
;
269 if (new_processor
== PROCESSOR_NULL
)
270 proto_processor
= master_processor
;
272 proto_processor
= new_processor
;
277 * Compute various averages.
281 if (sched_proto_tick_deadline
== 0)
282 sched_proto_tick_deadline
= abstime
;
284 clock_deadline_for_periodic_event(sched_one_second_interval
, abstime
,
285 &sched_proto_tick_deadline
);
287 assert_wait_deadline((event_t
)sched_proto_maintenance_continuation
, THREAD_UNINT
, sched_proto_tick_deadline
);
288 thread_block((thread_continue_t
)sched_proto_maintenance_continuation
);
293 sched_proto_choose_thread(processor_t processor
,
296 run_queue_t rq
= global_runq
;
302 simple_lock(&global_runq_lock
);
304 queue
= rq
->queues
+ rq
->highq
;
309 * Since we don't depress priorities, a high priority thread
310 * may get selected over and over again. Put a runqueue
311 * generation number in the thread structure so that we
312 * can ensure that we've cycled through all runnable tasks
313 * before coming back to a high priority thread. This isn't
314 * perfect, especially if the number of runnable threads always
315 * stays high, but is a workable approximation
318 while (count
> 0 && pri
>= priority
) {
319 thread
= (thread_t
)queue_first(queue
);
320 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
321 if ((thread
->bound_processor
== PROCESSOR_NULL
||
322 thread
->bound_processor
== processor
) &&
323 runqueue_generation
!= thread
->runqueue_generation
) {
324 remqueue((queue_entry_t
)thread
);
326 thread
->runq
= PROCESSOR_NULL
;
327 thread
->runqueue_generation
= runqueue_generation
;
328 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
330 if (queue_empty(queue
)) {
332 clrbit(MAXPRI
- pri
, rq
->bitmap
);
333 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
336 simple_unlock(&global_runq_lock
);
341 thread
= (thread_t
)queue_next((queue_entry_t
)thread
);
347 runqueue_generation
++;
349 simple_unlock(&global_runq_lock
);
350 return (THREAD_NULL
);
354 sched_proto_steal_thread(processor_set_t pset
)
358 return (THREAD_NULL
);
363 sched_proto_compute_priority(thread_t thread
,
364 boolean_t override_depress __unused
)
366 set_sched_pri(thread
, thread
->priority
);
370 sched_proto_choose_processor( processor_set_t pset
,
371 processor_t processor
,
372 thread_t thread __unused
)
374 processor
= proto_processor
;
377 * Check that the correct processor set is
380 if (pset
!= processor
->processor_set
) {
383 pset
= processor
->processor_set
;
391 sched_proto_processor_enqueue(
392 processor_t processor __unused
,
396 run_queue_t rq
= global_runq
;
399 simple_lock(&global_runq_lock
);
400 result
= run_queue_enqueue(rq
, thread
, options
);
401 thread
->runq
= GLOBAL_RUNQ
;
402 simple_unlock(&global_runq_lock
);
408 sched_proto_processor_queue_shutdown(
409 processor_t processor
)
411 /* With a global runqueue, just stop choosing this processor */
416 sched_proto_processor_queue_remove(
417 processor_t processor
,
423 rqlock
= &global_runq_lock
;
427 if (processor
== thread
->runq
) {
429 * Thread is on a run queue and we have a lock on
432 remqueue((queue_entry_t
)thread
);
433 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
435 if (SCHED(priority_is_urgent
)(thread
->sched_pri
)) {
436 rq
->urgency
--; assert(rq
->urgency
>= 0);
439 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
440 /* update run queue status */
441 if (thread
->sched_pri
!= IDLEPRI
)
442 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
443 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
446 thread
->runq
= PROCESSOR_NULL
;
450 * The thread left the run queue before we could
451 * lock the run queue.
453 assert(thread
->runq
== PROCESSOR_NULL
);
454 processor
= PROCESSOR_NULL
;
457 simple_unlock(rqlock
);
459 return (processor
!= PROCESSOR_NULL
);
463 sched_proto_processor_queue_empty(processor_t processor __unused
)
467 result
= (global_runq
->count
== 0);
473 sched_proto_processor_queue_has_priority(processor_t processor __unused
,
479 simple_lock(&global_runq_lock
);
482 result
= global_runq
->highq
>= priority
;
484 result
= global_runq
->highq
>= priority
;
486 simple_unlock(&global_runq_lock
);
491 /* Implement sched_preempt_pri in code */
493 sched_proto_priority_is_urgent(int priority
)
495 if (priority
<= BASEPRI_FOREGROUND
)
498 if (priority
< MINPRI_KERNEL
)
501 if (priority
>= BASEPRI_PREEMPT
)
508 sched_proto_processor_csw_check(processor_t processor __unused
)
515 urgency
= runq
->urgency
;
519 return (AST_PREEMPT
| AST_URGENT
);
528 sched_proto_initial_quantum_size(thread_t thread __unused
)
530 return proto_quantum
;
534 sched_proto_initial_thread_sched_mode(task_t parent_task
)
536 if (parent_task
== kernel_task
)
537 return TH_MODE_FIXED
;
539 return TH_MODE_TIMESHARE
;
543 sched_proto_supports_timeshare_mode(void)
549 sched_proto_can_update_priority(thread_t thread __unused
)
555 sched_proto_update_priority(thread_t thread __unused
)
561 sched_proto_lightweight_update_priority(thread_t thread __unused
)
567 sched_proto_quantum_expire(thread_t thread __unused
)
573 sched_proto_should_current_thread_rechoose_processor(processor_t processor
)
575 return (proto_processor
!= processor
);
579 sched_proto_processor_runq_count(processor_t processor
)
581 if (master_processor
== processor
) {
582 return global_runq
->count
;
589 sched_proto_processor_runq_stats_count_sum(processor_t processor
)
591 if (master_processor
== processor
) {
592 return global_runq
->runq_stats
.count_sum
;