2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <machine/machparam.h>
79 * thread_quantum_expire:
81 * Recalculate the quantum and priority for a thread.
87 thread_quantum_expire(
88 timer_call_param_t p0
,
89 timer_call_param_t p1
)
91 processor_t processor
= p0
;
98 * We've run up until our quantum expiration, and will (potentially)
99 * continue without re-entering the scheduler, so update this now.
101 thread
->last_run_time
= processor
->quantum_end
;
104 * Check for fail-safe trip.
106 if ((thread
->sched_mode
== TH_MODE_REALTIME
|| thread
->sched_mode
== TH_MODE_FIXED
) &&
107 !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
108 !(thread
->options
& TH_OPT_SYSTEM_CRITICAL
)) {
109 uint64_t new_computation
;
111 new_computation
= processor
->quantum_end
- thread
->computation_epoch
;
112 new_computation
+= thread
->computation_metered
;
113 if (new_computation
> max_unsafe_computation
) {
115 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_FAILSAFE
)|DBG_FUNC_NONE
,
116 (uintptr_t)thread
->sched_pri
, (uintptr_t)thread
->sched_mode
, 0, 0, 0);
118 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
119 thread
->priority
= DEPRESSPRI
;
122 thread
->saved_mode
= thread
->sched_mode
;
124 if (SCHED(supports_timeshare_mode
)) {
126 thread
->sched_mode
= TH_MODE_TIMESHARE
;
128 /* XXX handle fixed->fixed case */
129 thread
->sched_mode
= TH_MODE_FIXED
;
132 thread
->safe_release
= processor
->quantum_end
+ sched_safe_duration
;
133 thread
->sched_flags
|= TH_SFLAG_FAILSAFE
;
138 * Recompute scheduled priority if appropriate.
140 if (SCHED(can_update_priority
)(thread
))
141 SCHED(update_priority
)(thread
);
143 SCHED(lightweight_update_priority
)(thread
);
145 SCHED(quantum_expire
)(thread
);
147 processor
->current_pri
= thread
->sched_pri
;
148 processor
->current_thmode
= thread
->sched_mode
;
151 * This quantum is up, give this thread another.
153 if (first_timeslice(processor
))
154 processor
->timeslice
--;
156 thread_quantum_init(thread
);
157 thread
->last_quantum_refill_time
= processor
->quantum_end
;
159 processor
->quantum_end
+= thread
->current_quantum
;
160 timer_call_enter1(&processor
->quantum_timer
,
161 thread
, processor
->quantum_end
, 0);
164 * Context switch check.
166 if ((preempt
= csw_check(processor
)) != AST_NONE
)
169 processor_set_t pset
= processor
->processor_set
;
173 pset_pri_hint(pset
, processor
, processor
->current_pri
);
174 pset_count_hint(pset
, processor
, SCHED(processor_runq_count
)(processor
));
179 thread_unlock(thread
);
182 #if defined(CONFIG_SCHED_TRADITIONAL)
185 sched_traditional_quantum_expire(thread_t thread __unused
)
188 * No special behavior when a timeshare, fixed, or realtime thread
189 * uses up its entire quantum
194 lightweight_update_priority(thread_t thread
)
196 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
197 register uint32_t delta
;
199 thread_timer_delta(thread
, delta
);
202 * Accumulate timesharing usage only
203 * during contention for processor
206 if (thread
->pri_shift
< INT8_MAX
)
207 thread
->sched_usage
+= delta
;
209 thread
->cpu_delta
+= delta
;
212 * Adjust the scheduled priority if
213 * the thread has not been promoted
214 * and is not depressed.
216 if ( !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
217 !(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) )
218 compute_my_priority(thread
);
223 * Define shifts for simulating (5/8) ** n
225 * Shift structures for holding update shifts. Actual computation
226 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
227 * +/- is determined by the sign of shift 2.
234 #define SCHED_DECAY_TICKS 32
235 static struct shift_data sched_decay_shifts
[SCHED_DECAY_TICKS
] = {
236 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
237 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
238 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
239 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
243 * do_priority_computation:
245 * Calculate the timesharing priority based upon usage and load.
247 #ifdef CONFIG_EMBEDDED
249 #define do_priority_computation(thread, pri) \
251 (pri) = (thread)->priority /* start with base priority */ \
252 - ((thread)->sched_usage >> (thread)->pri_shift); \
253 if ((pri) < MAXPRI_THROTTLE) { \
254 if ((thread)->task->max_priority > MAXPRI_THROTTLE) \
255 (pri) = MAXPRI_THROTTLE; \
257 if ((pri) < MINPRI_USER) \
258 (pri) = MINPRI_USER; \
260 if ((pri) > MAXPRI_KERNEL) \
261 (pri) = MAXPRI_KERNEL; \
266 #define do_priority_computation(thread, pri) \
268 (pri) = (thread)->priority /* start with base priority */ \
269 - ((thread)->sched_usage >> (thread)->pri_shift); \
270 if ((pri) < MINPRI_USER) \
271 (pri) = MINPRI_USER; \
273 if ((pri) > MAXPRI_KERNEL) \
274 (pri) = MAXPRI_KERNEL; \
277 #endif /* defined(CONFIG_SCHED_TRADITIONAL) */
284 * Set the base priority of the thread
285 * and reset its scheduled priority.
287 * Called with the thread locked.
291 register thread_t thread
,
292 register int priority
)
294 thread
->priority
= priority
;
295 SCHED(compute_priority
)(thread
, FALSE
);
298 #if defined(CONFIG_SCHED_TRADITIONAL)
303 * Reset the scheduled priority of the thread
304 * according to its base priority if the
305 * thread has not been promoted or depressed.
307 * Called with the thread locked.
311 register thread_t thread
,
312 boolean_t override_depress
)
314 register int priority
;
316 if ( !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
317 (!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) ||
318 override_depress
) ) {
319 if (thread
->sched_mode
== TH_MODE_TIMESHARE
)
320 do_priority_computation(thread
, priority
);
322 priority
= thread
->priority
;
324 set_sched_pri(thread
, priority
);
329 * compute_my_priority:
331 * Reset the scheduled priority for
332 * a timesharing thread.
334 * Only for use on the current thread
335 * if timesharing and not depressed.
337 * Called with the thread locked.
341 register thread_t thread
)
343 register int priority
;
345 do_priority_computation(thread
, priority
);
346 assert(thread
->runq
== PROCESSOR_NULL
);
347 thread
->sched_pri
= priority
;
351 * can_update_priority
353 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
355 * Called with the thread locked.
361 if (sched_tick
== thread
->sched_stamp
)
370 * Perform housekeeping operations driven by scheduler tick.
372 * Called with the thread locked.
376 register thread_t thread
)
378 register unsigned ticks
;
379 register uint32_t delta
;
381 ticks
= sched_tick
- thread
->sched_stamp
;
383 thread
->sched_stamp
+= ticks
;
384 thread
->pri_shift
= sched_pri_shift
;
387 * Gather cpu usage data.
389 thread_timer_delta(thread
, delta
);
390 if (ticks
< SCHED_DECAY_TICKS
) {
391 register struct shift_data
*shiftp
;
394 * Accumulate timesharing usage only
395 * during contention for processor
398 if (thread
->pri_shift
< INT8_MAX
)
399 thread
->sched_usage
+= delta
;
401 thread
->cpu_usage
+= delta
+ thread
->cpu_delta
;
402 thread
->cpu_delta
= 0;
404 shiftp
= &sched_decay_shifts
[ticks
];
405 if (shiftp
->shift2
> 0) {
407 (thread
->cpu_usage
>> shiftp
->shift1
) +
408 (thread
->cpu_usage
>> shiftp
->shift2
);
409 thread
->sched_usage
=
410 (thread
->sched_usage
>> shiftp
->shift1
) +
411 (thread
->sched_usage
>> shiftp
->shift2
);
415 (thread
->cpu_usage
>> shiftp
->shift1
) -
416 (thread
->cpu_usage
>> -(shiftp
->shift2
));
417 thread
->sched_usage
=
418 (thread
->sched_usage
>> shiftp
->shift1
) -
419 (thread
->sched_usage
>> -(shiftp
->shift2
));
423 thread
->cpu_usage
= thread
->cpu_delta
= 0;
424 thread
->sched_usage
= 0;
428 * Check for fail-safe release.
430 if ( (thread
->sched_flags
& TH_SFLAG_FAILSAFE
) &&
431 mach_absolute_time() >= thread
->safe_release
) {
432 if (thread
->saved_mode
!= TH_MODE_TIMESHARE
) {
433 if (thread
->saved_mode
== TH_MODE_REALTIME
) {
434 thread
->priority
= BASEPRI_RTQUEUES
;
437 thread
->sched_mode
= thread
->saved_mode
;
438 thread
->saved_mode
= TH_MODE_NONE
;
440 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
443 if (!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
))
444 set_sched_pri(thread
, thread
->priority
);
447 thread
->sched_flags
&= ~TH_SFLAG_FAILSAFE
;
451 * Recompute scheduled priority if appropriate.
453 if ( (thread
->sched_mode
== TH_MODE_TIMESHARE
) &&
454 !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
455 !(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) ) {
456 register int new_pri
;
458 do_priority_computation(thread
, new_pri
);
459 if (new_pri
!= thread
->sched_pri
) {
460 boolean_t removed
= thread_run_queue_remove(thread
);
462 thread
->sched_pri
= new_pri
;
464 thread_setrun(thread
, SCHED_TAILQ
);
471 #endif /* CONFIG_SCHED_TRADITIONAL */