]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/priority.c
3273a4f6c57f64639a2e5a0108ce54ea20db643b
[apple/xnu.git] / osfmk / kern / priority.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: clock_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Clock primitives.
64 */
65
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
73 #include <kern/spl.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <machine/machparam.h>
77
78 /*
79 * thread_quantum_expire:
80 *
81 * Recalculate the quantum and priority for a thread.
82 *
83 * Called at splsched.
84 */
85
86 void
87 thread_quantum_expire(
88 timer_call_param_t p0,
89 timer_call_param_t p1)
90 {
91 processor_t processor = p0;
92 thread_t thread = p1;
93 ast_t preempt;
94
95 SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor);
96
97 thread_lock(thread);
98
99 /*
100 * We've run up until our quantum expiration, and will (potentially)
101 * continue without re-entering the scheduler, so update this now.
102 */
103 thread->last_run_time = processor->quantum_end;
104
105 /*
106 * Check for fail-safe trip.
107 */
108 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
109 !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
110 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
111 uint64_t new_computation;
112
113 new_computation = processor->quantum_end - thread->computation_epoch;
114 new_computation += thread->computation_metered;
115 if (new_computation > max_unsafe_computation) {
116
117 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
118 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
119
120 if (thread->sched_mode == TH_MODE_REALTIME) {
121 thread->priority = DEPRESSPRI;
122 }
123
124 thread->saved_mode = thread->sched_mode;
125
126 if (SCHED(supports_timeshare_mode)) {
127 sched_share_incr();
128 thread->sched_mode = TH_MODE_TIMESHARE;
129 } else {
130 /* XXX handle fixed->fixed case */
131 thread->sched_mode = TH_MODE_FIXED;
132 }
133
134 thread->safe_release = processor->quantum_end + sched_safe_duration;
135 thread->sched_flags |= TH_SFLAG_FAILSAFE;
136 }
137 }
138
139 /*
140 * Recompute scheduled priority if appropriate.
141 */
142 if (SCHED(can_update_priority)(thread))
143 SCHED(update_priority)(thread);
144 else
145 SCHED(lightweight_update_priority)(thread);
146
147 SCHED(quantum_expire)(thread);
148
149 processor->current_pri = thread->sched_pri;
150 processor->current_thmode = thread->sched_mode;
151
152 /*
153 * This quantum is up, give this thread another.
154 */
155 if (first_timeslice(processor))
156 processor->timeslice--;
157
158 thread_quantum_init(thread);
159 thread->last_quantum_refill_time = processor->quantum_end;
160
161 processor->quantum_end += thread->current_quantum;
162 timer_call_enter1(&processor->quantum_timer, thread,
163 processor->quantum_end, TIMER_CALL_CRITICAL);
164
165 /*
166 * Context switch check.
167 */
168 if ((preempt = csw_check(processor)) != AST_NONE)
169 ast_on(preempt);
170 else {
171 processor_set_t pset = processor->processor_set;
172
173 pset_lock(pset);
174
175 pset_pri_hint(pset, processor, processor->current_pri);
176 pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor));
177
178 pset_unlock(pset);
179 }
180
181 thread_unlock(thread);
182 }
183
184 #if defined(CONFIG_SCHED_TRADITIONAL)
185
186 void
187 sched_traditional_quantum_expire(thread_t thread __unused)
188 {
189 /*
190 * No special behavior when a timeshare, fixed, or realtime thread
191 * uses up its entire quantum
192 */
193 }
194
195 void
196 lightweight_update_priority(thread_t thread)
197 {
198 if (thread->sched_mode == TH_MODE_TIMESHARE) {
199 register uint32_t delta;
200
201 thread_timer_delta(thread, delta);
202
203 /*
204 * Accumulate timesharing usage only
205 * during contention for processor
206 * resources.
207 */
208 if (thread->pri_shift < INT8_MAX)
209 thread->sched_usage += delta;
210
211 thread->cpu_delta += delta;
212
213 /*
214 * Adjust the scheduled priority if
215 * the thread has not been promoted
216 * and is not depressed.
217 */
218 if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
219 !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) )
220 compute_my_priority(thread);
221 }
222 }
223
224 /*
225 * Define shifts for simulating (5/8) ** n
226 *
227 * Shift structures for holding update shifts. Actual computation
228 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
229 * +/- is determined by the sign of shift 2.
230 */
231 struct shift_data {
232 int shift1;
233 int shift2;
234 };
235
236 #define SCHED_DECAY_TICKS 32
237 static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
238 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
239 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
240 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
241 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
242 };
243
244 /*
245 * do_priority_computation:
246 *
247 * Calculate the timesharing priority based upon usage and load.
248 */
249 #ifdef CONFIG_EMBEDDED
250
251 #define do_priority_computation(thread, pri) \
252 MACRO_BEGIN \
253 (pri) = (thread)->priority /* start with base priority */ \
254 - ((thread)->sched_usage >> (thread)->pri_shift); \
255 if ((pri) < MAXPRI_THROTTLE) { \
256 if ((thread)->task->max_priority > MAXPRI_THROTTLE) \
257 (pri) = MAXPRI_THROTTLE; \
258 else \
259 if ((pri) < MINPRI_USER) \
260 (pri) = MINPRI_USER; \
261 } else \
262 if ((pri) > MAXPRI_KERNEL) \
263 (pri) = MAXPRI_KERNEL; \
264 MACRO_END
265
266 #else
267
268 #define do_priority_computation(thread, pri) \
269 MACRO_BEGIN \
270 (pri) = (thread)->priority /* start with base priority */ \
271 - ((thread)->sched_usage >> (thread)->pri_shift); \
272 if ((pri) < MINPRI_USER) \
273 (pri) = MINPRI_USER; \
274 else \
275 if ((pri) > MAXPRI_KERNEL) \
276 (pri) = MAXPRI_KERNEL; \
277 MACRO_END
278
279 #endif /* defined(CONFIG_SCHED_TRADITIONAL) */
280
281 #endif
282
283 /*
284 * set_priority:
285 *
286 * Set the base priority of the thread
287 * and reset its scheduled priority.
288 *
289 * Called with the thread locked.
290 */
291 void
292 set_priority(
293 register thread_t thread,
294 register int priority)
295 {
296 thread->priority = priority;
297 SCHED(compute_priority)(thread, FALSE);
298 }
299
300 #if defined(CONFIG_SCHED_TRADITIONAL)
301
302 /*
303 * compute_priority:
304 *
305 * Reset the scheduled priority of the thread
306 * according to its base priority if the
307 * thread has not been promoted or depressed.
308 *
309 * Called with the thread locked.
310 */
311 void
312 compute_priority(
313 register thread_t thread,
314 boolean_t override_depress)
315 {
316 register int priority;
317
318 if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
319 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ||
320 override_depress ) ) {
321 if (thread->sched_mode == TH_MODE_TIMESHARE)
322 do_priority_computation(thread, priority);
323 else
324 priority = thread->priority;
325
326 set_sched_pri(thread, priority);
327 }
328 }
329
330 /*
331 * compute_my_priority:
332 *
333 * Reset the scheduled priority for
334 * a timesharing thread.
335 *
336 * Only for use on the current thread
337 * if timesharing and not depressed.
338 *
339 * Called with the thread locked.
340 */
341 void
342 compute_my_priority(
343 register thread_t thread)
344 {
345 register int priority;
346
347 do_priority_computation(thread, priority);
348 assert(thread->runq == PROCESSOR_NULL);
349 thread->sched_pri = priority;
350 }
351
352 /*
353 * can_update_priority
354 *
355 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
356 *
357 * Called with the thread locked.
358 */
359 boolean_t
360 can_update_priority(
361 thread_t thread)
362 {
363 if (sched_tick == thread->sched_stamp)
364 return (FALSE);
365 else
366 return (TRUE);
367 }
368
369 /*
370 * update_priority
371 *
372 * Perform housekeeping operations driven by scheduler tick.
373 *
374 * Called with the thread locked.
375 */
376 void
377 update_priority(
378 register thread_t thread)
379 {
380 register unsigned ticks;
381 register uint32_t delta;
382
383 ticks = sched_tick - thread->sched_stamp;
384 assert(ticks != 0);
385 thread->sched_stamp += ticks;
386 thread->pri_shift = sched_pri_shift;
387
388 /*
389 * Gather cpu usage data.
390 */
391 thread_timer_delta(thread, delta);
392 if (ticks < SCHED_DECAY_TICKS) {
393 register struct shift_data *shiftp;
394
395 /*
396 * Accumulate timesharing usage only
397 * during contention for processor
398 * resources.
399 */
400 if (thread->pri_shift < INT8_MAX)
401 thread->sched_usage += delta;
402
403 thread->cpu_usage += delta + thread->cpu_delta;
404 thread->cpu_delta = 0;
405
406 shiftp = &sched_decay_shifts[ticks];
407 if (shiftp->shift2 > 0) {
408 thread->cpu_usage =
409 (thread->cpu_usage >> shiftp->shift1) +
410 (thread->cpu_usage >> shiftp->shift2);
411 thread->sched_usage =
412 (thread->sched_usage >> shiftp->shift1) +
413 (thread->sched_usage >> shiftp->shift2);
414 }
415 else {
416 thread->cpu_usage =
417 (thread->cpu_usage >> shiftp->shift1) -
418 (thread->cpu_usage >> -(shiftp->shift2));
419 thread->sched_usage =
420 (thread->sched_usage >> shiftp->shift1) -
421 (thread->sched_usage >> -(shiftp->shift2));
422 }
423 }
424 else {
425 thread->cpu_usage = thread->cpu_delta = 0;
426 thread->sched_usage = 0;
427 }
428
429 /*
430 * Check for fail-safe release.
431 */
432 if ( (thread->sched_flags & TH_SFLAG_FAILSAFE) &&
433 mach_absolute_time() >= thread->safe_release ) {
434 if (thread->saved_mode != TH_MODE_TIMESHARE) {
435 if (thread->saved_mode == TH_MODE_REALTIME) {
436 thread->priority = BASEPRI_RTQUEUES;
437 }
438
439 thread->sched_mode = thread->saved_mode;
440 thread->saved_mode = TH_MODE_NONE;
441
442 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
443 sched_share_decr();
444
445 if (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK))
446 set_sched_pri(thread, thread->priority);
447 }
448
449 thread->sched_flags &= ~TH_SFLAG_FAILSAFE;
450 }
451
452 /*
453 * Recompute scheduled priority if appropriate.
454 */
455 if ( (thread->sched_mode == TH_MODE_TIMESHARE) &&
456 !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
457 !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ) {
458 register int new_pri;
459
460 do_priority_computation(thread, new_pri);
461 if (new_pri != thread->sched_pri) {
462 boolean_t removed = thread_run_queue_remove(thread);
463
464 thread->sched_pri = new_pri;
465 if (removed)
466 thread_setrun(thread, SCHED_TAILQ);
467 }
468 }
469
470 return;
471 }
472
473 #endif /* CONFIG_SCHED_TRADITIONAL */