2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <kern/ledger.h>
77 #include <machine/machparam.h>
80 * thread_quantum_expire:
82 * Recalculate the quantum and priority for a thread.
88 thread_quantum_expire(
89 timer_call_param_t p0
,
90 timer_call_param_t p1
)
92 processor_t processor
= p0
;
96 SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor
);
99 * We bill CPU time to both the individual thread and its task.
101 * Because this balance adjustment could potentially attempt to wake this very
102 * thread, we must credit the ledger before taking the thread lock. The ledger
103 * pointers are only manipulated by the thread itself at the ast boundary.
105 ledger_credit(thread
->t_ledger
, task_ledgers
.cpu_time
, thread
->current_quantum
);
106 ledger_credit(thread
->t_threadledger
, thread_ledgers
.cpu_time
, thread
->current_quantum
);
111 * We've run up until our quantum expiration, and will (potentially)
112 * continue without re-entering the scheduler, so update this now.
114 thread
->last_run_time
= processor
->quantum_end
;
117 * Check for fail-safe trip.
119 if ((thread
->sched_mode
== TH_MODE_REALTIME
|| thread
->sched_mode
== TH_MODE_FIXED
) &&
120 !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
121 !(thread
->options
& TH_OPT_SYSTEM_CRITICAL
)) {
122 uint64_t new_computation
;
124 new_computation
= processor
->quantum_end
- thread
->computation_epoch
;
125 new_computation
+= thread
->computation_metered
;
126 if (new_computation
> max_unsafe_computation
) {
127 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_FAILSAFE
)|DBG_FUNC_NONE
,
128 (uintptr_t)thread
->sched_pri
, (uintptr_t)thread
->sched_mode
, 0, 0, 0);
130 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
131 thread
->priority
= DEPRESSPRI
;
134 thread
->saved_mode
= thread
->sched_mode
;
136 if (SCHED(supports_timeshare_mode
)) {
138 thread
->sched_mode
= TH_MODE_TIMESHARE
;
140 /* XXX handle fixed->fixed case */
141 thread
->sched_mode
= TH_MODE_FIXED
;
144 thread
->safe_release
= processor
->quantum_end
+ sched_safe_duration
;
145 thread
->sched_flags
|= TH_SFLAG_FAILSAFE
;
150 * Recompute scheduled priority if appropriate.
152 if (SCHED(can_update_priority
)(thread
))
153 SCHED(update_priority
)(thread
);
155 SCHED(lightweight_update_priority
)(thread
);
157 SCHED(quantum_expire
)(thread
);
159 processor
->current_pri
= thread
->sched_pri
;
160 processor
->current_thmode
= thread
->sched_mode
;
163 * This quantum is up, give this thread another.
165 if (first_timeslice(processor
))
166 processor
->timeslice
--;
168 thread_quantum_init(thread
);
169 thread
->last_quantum_refill_time
= processor
->quantum_end
;
171 /* Reload precise timing global policy to thread-local policy */
172 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
175 * Since non-precise user/kernel time doesn't update the state/thread timer
176 * during privilege transitions, synthesize an event now.
178 if (!thread
->precise_user_kernel_time
) {
179 timer_switch(PROCESSOR_DATA(processor
, current_state
),
180 processor
->quantum_end
,
181 PROCESSOR_DATA(processor
, current_state
));
182 timer_switch(PROCESSOR_DATA(processor
, thread_timer
),
183 processor
->quantum_end
,
184 PROCESSOR_DATA(processor
, thread_timer
));
187 processor
->quantum_end
= mach_absolute_time() + thread
->current_quantum
;
188 timer_call_enter1(&processor
->quantum_timer
, thread
,
189 processor
->quantum_end
, TIMER_CALL_CRITICAL
);
192 * Context switch check.
194 if ((preempt
= csw_check(processor
)) != AST_NONE
)
197 processor_set_t pset
= processor
->processor_set
;
201 pset_pri_hint(pset
, processor
, processor
->current_pri
);
202 pset_count_hint(pset
, processor
, SCHED(processor_runq_count
)(processor
));
207 thread_unlock(thread
);
210 #if defined(CONFIG_SCHED_TRADITIONAL)
213 sched_traditional_quantum_expire(thread_t thread __unused
)
216 * No special behavior when a timeshare, fixed, or realtime thread
217 * uses up its entire quantum
222 lightweight_update_priority(thread_t thread
)
224 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
225 register uint32_t delta
;
227 thread_timer_delta(thread
, delta
);
230 * Accumulate timesharing usage only
231 * during contention for processor
234 if (thread
->pri_shift
< INT8_MAX
)
235 thread
->sched_usage
+= delta
;
237 thread
->cpu_delta
+= delta
;
240 * Adjust the scheduled priority if
241 * the thread has not been promoted
242 * and is not depressed.
244 if ( !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
245 !(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) )
246 compute_my_priority(thread
);
251 * Define shifts for simulating (5/8) ** n
253 * Shift structures for holding update shifts. Actual computation
254 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
255 * +/- is determined by the sign of shift 2.
262 #define SCHED_DECAY_TICKS 32
263 static struct shift_data sched_decay_shifts
[SCHED_DECAY_TICKS
] = {
264 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
265 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
266 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
267 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
271 * do_priority_computation:
273 * Calculate the timesharing priority based upon usage and load.
275 #ifdef CONFIG_EMBEDDED
277 #define do_priority_computation(thread, pri) \
279 (pri) = (thread)->priority /* start with base priority */ \
280 - ((thread)->sched_usage >> (thread)->pri_shift); \
281 if ((pri) < MAXPRI_THROTTLE) { \
282 if ((thread)->task->max_priority > MAXPRI_THROTTLE) \
283 (pri) = MAXPRI_THROTTLE; \
285 if ((pri) < MINPRI_USER) \
286 (pri) = MINPRI_USER; \
288 if ((pri) > MAXPRI_KERNEL) \
289 (pri) = MAXPRI_KERNEL; \
294 #define do_priority_computation(thread, pri) \
296 (pri) = (thread)->priority /* start with base priority */ \
297 - ((thread)->sched_usage >> (thread)->pri_shift); \
298 if ((pri) < MINPRI_USER) \
299 (pri) = MINPRI_USER; \
301 if ((pri) > MAXPRI_KERNEL) \
302 (pri) = MAXPRI_KERNEL; \
305 #endif /* defined(CONFIG_SCHED_TRADITIONAL) */
312 * Set the base priority of the thread
313 * and reset its scheduled priority.
315 * Called with the thread locked.
319 register thread_t thread
,
320 register int priority
)
322 thread
->priority
= priority
;
323 SCHED(compute_priority
)(thread
, FALSE
);
326 #if defined(CONFIG_SCHED_TRADITIONAL)
331 * Reset the scheduled priority of the thread
332 * according to its base priority if the
333 * thread has not been promoted or depressed.
335 * Called with the thread locked.
339 register thread_t thread
,
340 boolean_t override_depress
)
342 register int priority
;
344 if ( !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
345 (!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) ||
346 override_depress
) ) {
347 if (thread
->sched_mode
== TH_MODE_TIMESHARE
)
348 do_priority_computation(thread
, priority
);
350 priority
= thread
->priority
;
352 set_sched_pri(thread
, priority
);
357 * compute_my_priority:
359 * Reset the scheduled priority for
360 * a timesharing thread.
362 * Only for use on the current thread
363 * if timesharing and not depressed.
365 * Called with the thread locked.
369 register thread_t thread
)
371 register int priority
;
373 do_priority_computation(thread
, priority
);
374 assert(thread
->runq
== PROCESSOR_NULL
);
375 thread
->sched_pri
= priority
;
379 * can_update_priority
381 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
383 * Called with the thread locked.
389 if (sched_tick
== thread
->sched_stamp
)
398 * Perform housekeeping operations driven by scheduler tick.
400 * Called with the thread locked.
404 register thread_t thread
)
406 register unsigned ticks
;
407 register uint32_t delta
;
409 ticks
= sched_tick
- thread
->sched_stamp
;
411 thread
->sched_stamp
+= ticks
;
412 thread
->pri_shift
= sched_pri_shift
;
415 * Gather cpu usage data.
417 thread_timer_delta(thread
, delta
);
418 if (ticks
< SCHED_DECAY_TICKS
) {
419 register struct shift_data
*shiftp
;
422 * Accumulate timesharing usage only
423 * during contention for processor
426 if (thread
->pri_shift
< INT8_MAX
)
427 thread
->sched_usage
+= delta
;
429 thread
->cpu_usage
+= delta
+ thread
->cpu_delta
;
430 thread
->cpu_delta
= 0;
432 shiftp
= &sched_decay_shifts
[ticks
];
433 if (shiftp
->shift2
> 0) {
435 (thread
->cpu_usage
>> shiftp
->shift1
) +
436 (thread
->cpu_usage
>> shiftp
->shift2
);
437 thread
->sched_usage
=
438 (thread
->sched_usage
>> shiftp
->shift1
) +
439 (thread
->sched_usage
>> shiftp
->shift2
);
443 (thread
->cpu_usage
>> shiftp
->shift1
) -
444 (thread
->cpu_usage
>> -(shiftp
->shift2
));
445 thread
->sched_usage
=
446 (thread
->sched_usage
>> shiftp
->shift1
) -
447 (thread
->sched_usage
>> -(shiftp
->shift2
));
451 thread
->cpu_usage
= thread
->cpu_delta
= 0;
452 thread
->sched_usage
= 0;
456 * Check for fail-safe release.
458 if ( (thread
->sched_flags
& TH_SFLAG_FAILSAFE
) &&
459 mach_absolute_time() >= thread
->safe_release
) {
460 if (thread
->saved_mode
!= TH_MODE_TIMESHARE
) {
461 if (thread
->saved_mode
== TH_MODE_REALTIME
) {
462 thread
->priority
= BASEPRI_RTQUEUES
;
465 thread
->sched_mode
= thread
->saved_mode
;
466 thread
->saved_mode
= TH_MODE_NONE
;
468 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
471 if (!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
))
472 set_sched_pri(thread
, thread
->priority
);
475 thread
->sched_flags
&= ~TH_SFLAG_FAILSAFE
;
479 /* Check for pending throttle transitions, and safely switch queues */
480 if (thread
->sched_flags
& TH_SFLAG_PENDING_THROTTLE_MASK
) {
481 boolean_t removed
= thread_run_queue_remove(thread
);
483 if (thread
->sched_flags
& TH_SFLAG_PENDING_THROTTLE_DEMOTION
) {
484 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
485 thread
->saved_mode
= thread
->sched_mode
;
486 thread
->sched_mode
= TH_MODE_TIMESHARE
;
488 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
492 * It's possible that this is a realtime thread that has
493 * already tripped the failsafe, in which case saved_mode
494 * is already set correctly.
496 if (!(thread
->sched_flags
& TH_SFLAG_FAILSAFE
)) {
497 thread
->saved_mode
= thread
->sched_mode
;
499 thread
->sched_flags
&= ~TH_SFLAG_FAILSAFE
;
501 thread
->sched_flags
|= TH_SFLAG_THROTTLED
;
504 if ((thread
->sched_mode
== TH_MODE_TIMESHARE
)
505 && (thread
->saved_mode
== TH_MODE_REALTIME
)) {
506 if ((thread
->state
& (TH_RUN
|TH_IDLE
)) == TH_RUN
)
510 thread
->sched_mode
= thread
->saved_mode
;
511 thread
->saved_mode
= TH_MODE_NONE
;
512 thread
->sched_flags
&= ~TH_SFLAG_THROTTLED
;
515 thread
->sched_flags
&= ~(TH_SFLAG_PENDING_THROTTLE_MASK
);
518 thread_setrun(thread
, SCHED_TAILQ
);
523 * Recompute scheduled priority if appropriate.
525 if ( (thread
->sched_mode
== TH_MODE_TIMESHARE
) &&
526 !(thread
->sched_flags
& TH_SFLAG_PROMOTED
) &&
527 !(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) ) {
528 register int new_pri
;
530 do_priority_computation(thread
, new_pri
);
531 if (new_pri
!= thread
->sched_pri
) {
532 boolean_t removed
= thread_run_queue_remove(thread
);
534 thread
->sched_pri
= new_pri
;
536 thread_setrun(thread
, SCHED_TAILQ
);
543 #endif /* CONFIG_SCHED_TRADITIONAL */