]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/priority.c
74b90dfa8d41418faa9beee90d17b4cae3dce471
[apple/xnu.git] / osfmk / kern / priority.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: clock_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Clock primitives.
64 */
65
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
73 #include <kern/spl.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <machine/machparam.h>
77
78 /*
79 * thread_quantum_expire:
80 *
81 * Recalculate the quantum and priority for a thread.
82 *
83 * Called at splsched.
84 */
85
86 void
87 thread_quantum_expire(
88 timer_call_param_t p0,
89 timer_call_param_t p1)
90 {
91 processor_t processor = p0;
92 thread_t thread = p1;
93 ast_t preempt;
94
95 thread_lock(thread);
96
97 /*
98 * We've run up until our quantum expiration, and will (potentially)
99 * continue without re-entering the scheduler, so update this now.
100 */
101 thread->last_run_time = processor->quantum_end;
102
103 /*
104 * Check for fail-safe trip.
105 */
106 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
107 !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
108 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
109 uint64_t new_computation;
110
111 new_computation = processor->quantum_end - thread->computation_epoch;
112 new_computation += thread->computation_metered;
113 if (new_computation > max_unsafe_computation) {
114
115 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
116 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
117
118 if (thread->sched_mode == TH_MODE_REALTIME) {
119 thread->priority = DEPRESSPRI;
120 }
121
122 thread->saved_mode = thread->sched_mode;
123
124 if (SCHED(supports_timeshare_mode)) {
125 sched_share_incr();
126 thread->sched_mode = TH_MODE_TIMESHARE;
127 } else {
128 /* XXX handle fixed->fixed case */
129 thread->sched_mode = TH_MODE_FIXED;
130 }
131
132 thread->safe_release = processor->quantum_end + sched_safe_duration;
133 thread->sched_flags |= TH_SFLAG_FAILSAFE;
134 }
135 }
136
137 /*
138 * Recompute scheduled priority if appropriate.
139 */
140 if (SCHED(can_update_priority)(thread))
141 SCHED(update_priority)(thread);
142 else
143 SCHED(lightweight_update_priority)(thread);
144
145 SCHED(quantum_expire)(thread);
146
147 processor->current_pri = thread->sched_pri;
148 processor->current_thmode = thread->sched_mode;
149
150 /*
151 * This quantum is up, give this thread another.
152 */
153 if (first_timeslice(processor))
154 processor->timeslice--;
155
156 thread_quantum_init(thread);
157 thread->last_quantum_refill_time = processor->quantum_end;
158
159 processor->quantum_end += thread->current_quantum;
160 timer_call_enter1(&processor->quantum_timer,
161 thread, processor->quantum_end, 0);
162
163 /*
164 * Context switch check.
165 */
166 if ((preempt = csw_check(processor)) != AST_NONE)
167 ast_on(preempt);
168 else {
169 processor_set_t pset = processor->processor_set;
170
171 pset_lock(pset);
172
173 pset_pri_hint(pset, processor, processor->current_pri);
174 pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor));
175
176 pset_unlock(pset);
177 }
178
179 thread_unlock(thread);
180 }
181
182 #if defined(CONFIG_SCHED_TRADITIONAL)
183
184 void
185 sched_traditional_quantum_expire(thread_t thread __unused)
186 {
187 /*
188 * No special behavior when a timeshare, fixed, or realtime thread
189 * uses up its entire quantum
190 */
191 }
192
193 void
194 lightweight_update_priority(thread_t thread)
195 {
196 if (thread->sched_mode == TH_MODE_TIMESHARE) {
197 register uint32_t delta;
198
199 thread_timer_delta(thread, delta);
200
201 /*
202 * Accumulate timesharing usage only
203 * during contention for processor
204 * resources.
205 */
206 if (thread->pri_shift < INT8_MAX)
207 thread->sched_usage += delta;
208
209 thread->cpu_delta += delta;
210
211 /*
212 * Adjust the scheduled priority if
213 * the thread has not been promoted
214 * and is not depressed.
215 */
216 if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
217 !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) )
218 compute_my_priority(thread);
219 }
220 }
221
222 /*
223 * Define shifts for simulating (5/8) ** n
224 *
225 * Shift structures for holding update shifts. Actual computation
226 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
227 * +/- is determined by the sign of shift 2.
228 */
229 struct shift_data {
230 int shift1;
231 int shift2;
232 };
233
234 #define SCHED_DECAY_TICKS 32
235 static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
236 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
237 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
238 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
239 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
240 };
241
242 /*
243 * do_priority_computation:
244 *
245 * Calculate the timesharing priority based upon usage and load.
246 */
247 #ifdef CONFIG_EMBEDDED
248
249 #define do_priority_computation(thread, pri) \
250 MACRO_BEGIN \
251 (pri) = (thread)->priority /* start with base priority */ \
252 - ((thread)->sched_usage >> (thread)->pri_shift); \
253 if ((pri) < MAXPRI_THROTTLE) { \
254 if ((thread)->task->max_priority > MAXPRI_THROTTLE) \
255 (pri) = MAXPRI_THROTTLE; \
256 else \
257 if ((pri) < MINPRI_USER) \
258 (pri) = MINPRI_USER; \
259 } else \
260 if ((pri) > MAXPRI_KERNEL) \
261 (pri) = MAXPRI_KERNEL; \
262 MACRO_END
263
264 #else
265
266 #define do_priority_computation(thread, pri) \
267 MACRO_BEGIN \
268 (pri) = (thread)->priority /* start with base priority */ \
269 - ((thread)->sched_usage >> (thread)->pri_shift); \
270 if ((pri) < MINPRI_USER) \
271 (pri) = MINPRI_USER; \
272 else \
273 if ((pri) > MAXPRI_KERNEL) \
274 (pri) = MAXPRI_KERNEL; \
275 MACRO_END
276
277 #endif /* defined(CONFIG_SCHED_TRADITIONAL) */
278
279 #endif
280
281 /*
282 * set_priority:
283 *
284 * Set the base priority of the thread
285 * and reset its scheduled priority.
286 *
287 * Called with the thread locked.
288 */
289 void
290 set_priority(
291 register thread_t thread,
292 register int priority)
293 {
294 thread->priority = priority;
295 SCHED(compute_priority)(thread, FALSE);
296 }
297
298 #if defined(CONFIG_SCHED_TRADITIONAL)
299
300 /*
301 * compute_priority:
302 *
303 * Reset the scheduled priority of the thread
304 * according to its base priority if the
305 * thread has not been promoted or depressed.
306 *
307 * Called with the thread locked.
308 */
309 void
310 compute_priority(
311 register thread_t thread,
312 boolean_t override_depress)
313 {
314 register int priority;
315
316 if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
317 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ||
318 override_depress ) ) {
319 if (thread->sched_mode == TH_MODE_TIMESHARE)
320 do_priority_computation(thread, priority);
321 else
322 priority = thread->priority;
323
324 set_sched_pri(thread, priority);
325 }
326 }
327
328 /*
329 * compute_my_priority:
330 *
331 * Reset the scheduled priority for
332 * a timesharing thread.
333 *
334 * Only for use on the current thread
335 * if timesharing and not depressed.
336 *
337 * Called with the thread locked.
338 */
339 void
340 compute_my_priority(
341 register thread_t thread)
342 {
343 register int priority;
344
345 do_priority_computation(thread, priority);
346 assert(thread->runq == PROCESSOR_NULL);
347 thread->sched_pri = priority;
348 }
349
350 /*
351 * can_update_priority
352 *
353 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
354 *
355 * Called with the thread locked.
356 */
357 boolean_t
358 can_update_priority(
359 thread_t thread)
360 {
361 if (sched_tick == thread->sched_stamp)
362 return (FALSE);
363 else
364 return (TRUE);
365 }
366
367 /*
368 * update_priority
369 *
370 * Perform housekeeping operations driven by scheduler tick.
371 *
372 * Called with the thread locked.
373 */
374 void
375 update_priority(
376 register thread_t thread)
377 {
378 register unsigned ticks;
379 register uint32_t delta;
380
381 ticks = sched_tick - thread->sched_stamp;
382 assert(ticks != 0);
383 thread->sched_stamp += ticks;
384 thread->pri_shift = sched_pri_shift;
385
386 /*
387 * Gather cpu usage data.
388 */
389 thread_timer_delta(thread, delta);
390 if (ticks < SCHED_DECAY_TICKS) {
391 register struct shift_data *shiftp;
392
393 /*
394 * Accumulate timesharing usage only
395 * during contention for processor
396 * resources.
397 */
398 if (thread->pri_shift < INT8_MAX)
399 thread->sched_usage += delta;
400
401 thread->cpu_usage += delta + thread->cpu_delta;
402 thread->cpu_delta = 0;
403
404 shiftp = &sched_decay_shifts[ticks];
405 if (shiftp->shift2 > 0) {
406 thread->cpu_usage =
407 (thread->cpu_usage >> shiftp->shift1) +
408 (thread->cpu_usage >> shiftp->shift2);
409 thread->sched_usage =
410 (thread->sched_usage >> shiftp->shift1) +
411 (thread->sched_usage >> shiftp->shift2);
412 }
413 else {
414 thread->cpu_usage =
415 (thread->cpu_usage >> shiftp->shift1) -
416 (thread->cpu_usage >> -(shiftp->shift2));
417 thread->sched_usage =
418 (thread->sched_usage >> shiftp->shift1) -
419 (thread->sched_usage >> -(shiftp->shift2));
420 }
421 }
422 else {
423 thread->cpu_usage = thread->cpu_delta = 0;
424 thread->sched_usage = 0;
425 }
426
427 /*
428 * Check for fail-safe release.
429 */
430 if ( (thread->sched_flags & TH_SFLAG_FAILSAFE) &&
431 mach_absolute_time() >= thread->safe_release ) {
432 if (thread->saved_mode != TH_MODE_TIMESHARE) {
433 if (thread->saved_mode == TH_MODE_REALTIME) {
434 thread->priority = BASEPRI_RTQUEUES;
435 }
436
437 thread->sched_mode = thread->saved_mode;
438 thread->saved_mode = TH_MODE_NONE;
439
440 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
441 sched_share_decr();
442
443 if (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK))
444 set_sched_pri(thread, thread->priority);
445 }
446
447 thread->sched_flags &= ~TH_SFLAG_FAILSAFE;
448 }
449
450 /*
451 * Recompute scheduled priority if appropriate.
452 */
453 if ( (thread->sched_mode == TH_MODE_TIMESHARE) &&
454 !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
455 !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ) {
456 register int new_pri;
457
458 do_priority_computation(thread, new_pri);
459 if (new_pri != thread->sched_pri) {
460 boolean_t removed = thread_run_queue_remove(thread);
461
462 thread->sched_pri = new_pri;
463 if (removed)
464 thread_setrun(thread, SCHED_TAILQ);
465 }
466 }
467
468 return;
469 }
470
471 #endif /* CONFIG_SCHED_TRADITIONAL */