2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <machine/machparam.h>
79 * thread_quantum_expire:
81 * Recalculate the quantum and priority for a thread.
87 thread_quantum_expire(
88 timer_call_param_t p0
,
89 timer_call_param_t p1
)
91 processor_t processor
= p0
;
95 SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor
);
100 * We've run up until our quantum expiration, and will (potentially)
101 * continue without re-entering the scheduler, so update this now.
103 thread
->last_run_time
= processor
->quantum_end
;
106 * Check for fail-safe trip.
108 if ((thread
->sched_mode
== TH_MODE_REALTIME
|| thread
->sched_mode
== TH_MODE_FIXED
) &&
109 !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
110 !(thread
->options
& TH_OPT_SYSTEM_CRITICAL
)) {
111 uint64_t new_computation
;
113 new_computation
= processor
->quantum_end
- thread
->computation_epoch
;
114 new_computation
+= thread
->computation_metered
;
115 if (new_computation
> max_unsafe_computation
) {
117 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_FAILSAFE
)|DBG_FUNC_NONE
,
118 (uintptr_t)thread
->sched_pri
, (uintptr_t)thread
->sched_mode
, 0, 0, 0);
120 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
121 thread
->priority
= DEPRESSPRI
;
124 thread
->saved_mode
= thread
->sched_mode
;
126 if (SCHED(supports_timeshare_mode
)) {
128 thread
->sched_mode
= TH_MODE_TIMESHARE
;
130 /* XXX handle fixed->fixed case */
131 thread
->sched_mode
= TH_MODE_FIXED
;
134 thread
->safe_release
= processor
->quantum_end
+ sched_safe_duration
;
135 thread
->sched_flags
|= TH_SFLAG_FAILSAFE
;
140 * Recompute scheduled priority if appropriate.
142 if (SCHED(can_update_priority
)(thread
))
143 SCHED(update_priority
)(thread
);
145 SCHED(lightweight_update_priority
)(thread
);
147 SCHED(quantum_expire
)(thread
);
149 processor
->current_pri
= thread
->sched_pri
;
150 processor
->current_thmode
= thread
->sched_mode
;
153 * This quantum is up, give this thread another.
155 if (first_timeslice(processor
))
156 processor
->timeslice
--;
158 thread_quantum_init(thread
);
159 thread
->last_quantum_refill_time
= processor
->quantum_end
;
161 processor
->quantum_end
+= thread
->current_quantum
;
162 timer_call_enter1(&processor
->quantum_timer
, thread
,
163 processor
->quantum_end
, TIMER_CALL_CRITICAL
);
166 * Context switch check.
168 if ((preempt
= csw_check(processor
)) != AST_NONE
)
171 processor_set_t pset
= processor
->processor_set
;
175 pset_pri_hint(pset
, processor
, processor
->current_pri
);
176 pset_count_hint(pset
, processor
, SCHED(processor_runq_count
)(processor
));
181 thread_unlock(thread
);
184 #if defined(CONFIG_SCHED_TRADITIONAL)
187 sched_traditional_quantum_expire(thread_t thread __unused
)
190 * No special behavior when a timeshare, fixed, or realtime thread
191 * uses up its entire quantum
196 lightweight_update_priority(thread_t thread
)
198 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
199 register uint32_t delta
;
201 thread_timer_delta(thread
, delta
);
204 * Accumulate timesharing usage only
205 * during contention for processor
208 if (thread
->pri_shift
< INT8_MAX
)
209 thread
->sched_usage
+= delta
;
211 thread
->cpu_delta
+= delta
;
214 * Adjust the scheduled priority if
215 * the thread has not been promoted
216 * and is not depressed.
218 if ( !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
219 !(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) )
220 compute_my_priority(thread
);
225 * Define shifts for simulating (5/8) ** n
227 * Shift structures for holding update shifts. Actual computation
228 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
229 * +/- is determined by the sign of shift 2.
236 #define SCHED_DECAY_TICKS 32
237 static struct shift_data sched_decay_shifts
[SCHED_DECAY_TICKS
] = {
238 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
239 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
240 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
241 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
245 * do_priority_computation:
247 * Calculate the timesharing priority based upon usage and load.
249 #ifdef CONFIG_EMBEDDED
251 #define do_priority_computation(thread, pri) \
253 (pri) = (thread)->priority /* start with base priority */ \
254 - ((thread)->sched_usage >> (thread)->pri_shift); \
255 if ((pri) < MAXPRI_THROTTLE) { \
256 if ((thread)->task->max_priority > MAXPRI_THROTTLE) \
257 (pri) = MAXPRI_THROTTLE; \
259 if ((pri) < MINPRI_USER) \
260 (pri) = MINPRI_USER; \
262 if ((pri) > MAXPRI_KERNEL) \
263 (pri) = MAXPRI_KERNEL; \
268 #define do_priority_computation(thread, pri) \
270 (pri) = (thread)->priority /* start with base priority */ \
271 - ((thread)->sched_usage >> (thread)->pri_shift); \
272 if ((pri) < MINPRI_USER) \
273 (pri) = MINPRI_USER; \
275 if ((pri) > MAXPRI_KERNEL) \
276 (pri) = MAXPRI_KERNEL; \
279 #endif /* defined(CONFIG_SCHED_TRADITIONAL) */
286 * Set the base priority of the thread
287 * and reset its scheduled priority.
289 * Called with the thread locked.
293 register thread_t thread
,
294 register int priority
)
296 thread
->priority
= priority
;
297 SCHED(compute_priority
)(thread
, FALSE
);
300 #if defined(CONFIG_SCHED_TRADITIONAL)
305 * Reset the scheduled priority of the thread
306 * according to its base priority if the
307 * thread has not been promoted or depressed.
309 * Called with the thread locked.
313 register thread_t thread
,
314 boolean_t override_depress
)
316 register int priority
;
318 if ( !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
319 (!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) ||
320 override_depress
) ) {
321 if (thread
->sched_mode
== TH_MODE_TIMESHARE
)
322 do_priority_computation(thread
, priority
);
324 priority
= thread
->priority
;
326 set_sched_pri(thread
, priority
);
331 * compute_my_priority:
333 * Reset the scheduled priority for
334 * a timesharing thread.
336 * Only for use on the current thread
337 * if timesharing and not depressed.
339 * Called with the thread locked.
343 register thread_t thread
)
345 register int priority
;
347 do_priority_computation(thread
, priority
);
348 assert(thread
->runq
== PROCESSOR_NULL
);
349 thread
->sched_pri
= priority
;
353 * can_update_priority
355 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
357 * Called with the thread locked.
363 if (sched_tick
== thread
->sched_stamp
)
372 * Perform housekeeping operations driven by scheduler tick.
374 * Called with the thread locked.
378 register thread_t thread
)
380 register unsigned ticks
;
381 register uint32_t delta
;
383 ticks
= sched_tick
- thread
->sched_stamp
;
385 thread
->sched_stamp
+= ticks
;
386 thread
->pri_shift
= sched_pri_shift
;
389 * Gather cpu usage data.
391 thread_timer_delta(thread
, delta
);
392 if (ticks
< SCHED_DECAY_TICKS
) {
393 register struct shift_data
*shiftp
;
396 * Accumulate timesharing usage only
397 * during contention for processor
400 if (thread
->pri_shift
< INT8_MAX
)
401 thread
->sched_usage
+= delta
;
403 thread
->cpu_usage
+= delta
+ thread
->cpu_delta
;
404 thread
->cpu_delta
= 0;
406 shiftp
= &sched_decay_shifts
[ticks
];
407 if (shiftp
->shift2
> 0) {
409 (thread
->cpu_usage
>> shiftp
->shift1
) +
410 (thread
->cpu_usage
>> shiftp
->shift2
);
411 thread
->sched_usage
=
412 (thread
->sched_usage
>> shiftp
->shift1
) +
413 (thread
->sched_usage
>> shiftp
->shift2
);
417 (thread
->cpu_usage
>> shiftp
->shift1
) -
418 (thread
->cpu_usage
>> -(shiftp
->shift2
));
419 thread
->sched_usage
=
420 (thread
->sched_usage
>> shiftp
->shift1
) -
421 (thread
->sched_usage
>> -(shiftp
->shift2
));
425 thread
->cpu_usage
= thread
->cpu_delta
= 0;
426 thread
->sched_usage
= 0;
430 * Check for fail-safe release.
432 if ( (thread
->sched_flags
& TH_SFLAG_FAILSAFE
) &&
433 mach_absolute_time() >= thread
->safe_release
) {
434 if (thread
->saved_mode
!= TH_MODE_TIMESHARE
) {
435 if (thread
->saved_mode
== TH_MODE_REALTIME
) {
436 thread
->priority
= BASEPRI_RTQUEUES
;
439 thread
->sched_mode
= thread
->saved_mode
;
440 thread
->saved_mode
= TH_MODE_NONE
;
442 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
445 if (!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
))
446 set_sched_pri(thread
, thread
->priority
);
449 thread
->sched_flags
&= ~TH_SFLAG_FAILSAFE
;
453 * Recompute scheduled priority if appropriate.
455 if ( (thread
->sched_mode
== TH_MODE_TIMESHARE
) &&
456 !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
457 !(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) ) {
458 register int new_pri
;
460 do_priority_computation(thread
, new_pri
);
461 if (new_pri
!= thread
->sched_pri
) {
462 boolean_t removed
= thread_run_queue_remove(thread
);
464 thread
->sched_pri
= new_pri
;
466 thread_setrun(thread
, SCHED_TAILQ
);
473 #endif /* CONFIG_SCHED_TRADITIONAL */