]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/priority.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / kern / priority.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: clock_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Clock primitives.
64 */
65
1c79356b
A
66#include <mach/boolean.h>
67#include <mach/kern_return.h>
68#include <mach/machine.h>
69#include <kern/host.h>
70#include <kern/mach_param.h>
71#include <kern/sched.h>
6d2010ae 72#include <sys/kdebug.h>
1c79356b
A
73#include <kern/spl.h>
74#include <kern/thread.h>
75#include <kern/processor.h>
316670eb 76#include <kern/ledger.h>
1c79356b 77#include <machine/machparam.h>
1c79356b
A
78
79/*
0b4e3aa0 80 * thread_quantum_expire:
1c79356b
A
81 *
82 * Recalculate the quantum and priority for a thread.
2d21ac55
A
83 *
84 * Called at splsched.
1c79356b
A
85 */
86
87void
0b4e3aa0
A
88thread_quantum_expire(
89 timer_call_param_t p0,
90 timer_call_param_t p1)
1c79356b 91{
2d21ac55
A
92 processor_t processor = p0;
93 thread_t thread = p1;
94 ast_t preempt;
1c79356b 95
ebb1b9f4
A
96 SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor);
97
316670eb
A
98 /*
99 * We bill CPU time to both the individual thread and its task.
100 *
101 * Because this balance adjustment could potentially attempt to wake this very
102 * thread, we must credit the ledger before taking the thread lock. The ledger
103 * pointers are only manipulated by the thread itself at the ast boundary.
104 */
105 ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->current_quantum);
106 ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->current_quantum);
107
0b4e3aa0 108 thread_lock(thread);
1c79356b 109
6d2010ae
A
110 /*
111 * We've run up until our quantum expiration, and will (potentially)
112 * continue without re-entering the scheduler, so update this now.
113 */
114 thread->last_run_time = processor->quantum_end;
316670eb 115
1c79356b 116 /*
9bccf70c 117 * Check for fail-safe trip.
1c79356b 118 */
316670eb
A
119 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
120 !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
121 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
122 uint64_t new_computation;
123
124 new_computation = processor->quantum_end - thread->computation_epoch;
125 new_computation += thread->computation_metered;
126 if (new_computation > max_unsafe_computation) {
6d2010ae
A
127 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
128 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
0b4e3aa0 129
6d2010ae
A
130 if (thread->sched_mode == TH_MODE_REALTIME) {
131 thread->priority = DEPRESSPRI;
132 }
133
134 thread->saved_mode = thread->sched_mode;
135
136 if (SCHED(supports_timeshare_mode)) {
137 sched_share_incr();
138 thread->sched_mode = TH_MODE_TIMESHARE;
139 } else {
140 /* XXX handle fixed->fixed case */
141 thread->sched_mode = TH_MODE_FIXED;
0b4e3aa0 142 }
1c79356b 143
6d2010ae
A
144 thread->safe_release = processor->quantum_end + sched_safe_duration;
145 thread->sched_flags |= TH_SFLAG_FAILSAFE;
0b4e3aa0
A
146 }
147 }
1c79356b
A
148
149 /*
9bccf70c 150 * Recompute scheduled priority if appropriate.
1c79356b 151 */
6d2010ae
A
152 if (SCHED(can_update_priority)(thread))
153 SCHED(update_priority)(thread);
0b4e3aa0 154 else
6d2010ae 155 SCHED(lightweight_update_priority)(thread);
1c79356b 156
6d2010ae
A
157 SCHED(quantum_expire)(thread);
158
2d21ac55 159 processor->current_pri = thread->sched_pri;
6d2010ae 160 processor->current_thmode = thread->sched_mode;
2d21ac55 161
0b4e3aa0
A
162 /*
163 * This quantum is up, give this thread another.
164 */
2d21ac55
A
165 if (first_timeslice(processor))
166 processor->timeslice--;
1c79356b 167
55e303ae 168 thread_quantum_init(thread);
6d2010ae
A
169 thread->last_quantum_refill_time = processor->quantum_end;
170
316670eb
A
171 /* Reload precise timing global policy to thread-local policy */
172 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
173
174 /*
175 * Since non-precise user/kernel time doesn't update the state/thread timer
176 * during privilege transitions, synthesize an event now.
177 */
178 if (!thread->precise_user_kernel_time) {
179 timer_switch(PROCESSOR_DATA(processor, current_state),
180 processor->quantum_end,
181 PROCESSOR_DATA(processor, current_state));
182 timer_switch(PROCESSOR_DATA(processor, thread_timer),
183 processor->quantum_end,
184 PROCESSOR_DATA(processor, thread_timer));
185 }
186
187 processor->quantum_end = mach_absolute_time() + thread->current_quantum;
ebb1b9f4
A
188 timer_call_enter1(&processor->quantum_timer, thread,
189 processor->quantum_end, TIMER_CALL_CRITICAL);
1c79356b 190
0b4e3aa0 191 /*
2d21ac55 192 * Context switch check.
0b4e3aa0 193 */
c910b4d9 194 if ((preempt = csw_check(processor)) != AST_NONE)
2d21ac55
A
195 ast_on(preempt);
196 else {
197 processor_set_t pset = processor->processor_set;
9bccf70c 198
2d21ac55
A
199 pset_lock(pset);
200
cf7d32b8 201 pset_pri_hint(pset, processor, processor->current_pri);
6d2010ae 202 pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor));
2d21ac55
A
203
204 pset_unlock(pset);
205 }
206
207 thread_unlock(thread);
1c79356b 208}
91447636 209
6d2010ae
A
210#if defined(CONFIG_SCHED_TRADITIONAL)
211
212void
213sched_traditional_quantum_expire(thread_t thread __unused)
214{
215 /*
216 * No special behavior when a timeshare, fixed, or realtime thread
217 * uses up its entire quantum
218 */
219}
220
221void
222lightweight_update_priority(thread_t thread)
223{
224 if (thread->sched_mode == TH_MODE_TIMESHARE) {
225 register uint32_t delta;
226
227 thread_timer_delta(thread, delta);
228
229 /*
230 * Accumulate timesharing usage only
231 * during contention for processor
232 * resources.
233 */
234 if (thread->pri_shift < INT8_MAX)
235 thread->sched_usage += delta;
236
237 thread->cpu_delta += delta;
238
239 /*
240 * Adjust the scheduled priority if
241 * the thread has not been promoted
242 * and is not depressed.
243 */
244 if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
245 !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) )
246 compute_my_priority(thread);
247 }
248}
249
91447636
A
250/*
251 * Define shifts for simulating (5/8) ** n
252 *
253 * Shift structures for holding update shifts. Actual computation
254 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
255 * +/- is determined by the sign of shift 2.
256 */
257struct shift_data {
258 int shift1;
259 int shift2;
260};
261
262#define SCHED_DECAY_TICKS 32
263static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
264 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
265 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
266 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
267 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
268};
269
270/*
271 * do_priority_computation:
272 *
273 * Calculate the timesharing priority based upon usage and load.
274 */
d1ecb069
A
275#ifdef CONFIG_EMBEDDED
276
277#define do_priority_computation(thread, pri) \
278 MACRO_BEGIN \
279 (pri) = (thread)->priority /* start with base priority */ \
280 - ((thread)->sched_usage >> (thread)->pri_shift); \
281 if ((pri) < MAXPRI_THROTTLE) { \
282 if ((thread)->task->max_priority > MAXPRI_THROTTLE) \
283 (pri) = MAXPRI_THROTTLE; \
284 else \
285 if ((pri) < MINPRI_USER) \
286 (pri) = MINPRI_USER; \
287 } else \
288 if ((pri) > MAXPRI_KERNEL) \
289 (pri) = MAXPRI_KERNEL; \
290 MACRO_END
291
292#else
293
91447636
A
294#define do_priority_computation(thread, pri) \
295 MACRO_BEGIN \
296 (pri) = (thread)->priority /* start with base priority */ \
297 - ((thread)->sched_usage >> (thread)->pri_shift); \
298 if ((pri) < MINPRI_USER) \
299 (pri) = MINPRI_USER; \
300 else \
301 if ((pri) > MAXPRI_KERNEL) \
302 (pri) = MAXPRI_KERNEL; \
303 MACRO_END
304
6d2010ae
A
305#endif /* defined(CONFIG_SCHED_TRADITIONAL) */
306
d1ecb069
A
307#endif
308
91447636
A
309/*
310 * set_priority:
311 *
312 * Set the base priority of the thread
313 * and reset its scheduled priority.
314 *
315 * Called with the thread locked.
316 */
317void
318set_priority(
319 register thread_t thread,
320 register int priority)
321{
322 thread->priority = priority;
6d2010ae 323 SCHED(compute_priority)(thread, FALSE);
91447636
A
324}
325
6d2010ae
A
326#if defined(CONFIG_SCHED_TRADITIONAL)
327
91447636
A
328/*
329 * compute_priority:
330 *
331 * Reset the scheduled priority of the thread
332 * according to its base priority if the
333 * thread has not been promoted or depressed.
334 *
335 * Called with the thread locked.
336 */
337void
338compute_priority(
339 register thread_t thread,
340 boolean_t override_depress)
341{
342 register int priority;
343
6d2010ae
A
344 if ( !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
345 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ||
91447636 346 override_depress ) ) {
6d2010ae 347 if (thread->sched_mode == TH_MODE_TIMESHARE)
91447636
A
348 do_priority_computation(thread, priority);
349 else
350 priority = thread->priority;
351
352 set_sched_pri(thread, priority);
353 }
354}
355
356/*
357 * compute_my_priority:
358 *
359 * Reset the scheduled priority for
360 * a timesharing thread.
361 *
362 * Only for use on the current thread
363 * if timesharing and not depressed.
364 *
365 * Called with the thread locked.
366 */
367void
368compute_my_priority(
369 register thread_t thread)
370{
371 register int priority;
372
373 do_priority_computation(thread, priority);
2d21ac55 374 assert(thread->runq == PROCESSOR_NULL);
91447636
A
375 thread->sched_pri = priority;
376}
377
6d2010ae
A
378/*
379 * can_update_priority
380 *
381 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
382 *
383 * Called with the thread locked.
384 */
385boolean_t
386can_update_priority(
387 thread_t thread)
388{
389 if (sched_tick == thread->sched_stamp)
390 return (FALSE);
391 else
392 return (TRUE);
393}
394
91447636
A
395/*
396 * update_priority
397 *
398 * Perform housekeeping operations driven by scheduler tick.
399 *
400 * Called with the thread locked.
401 */
402void
403update_priority(
404 register thread_t thread)
405{
406 register unsigned ticks;
407 register uint32_t delta;
408
409 ticks = sched_tick - thread->sched_stamp;
410 assert(ticks != 0);
411 thread->sched_stamp += ticks;
2d21ac55 412 thread->pri_shift = sched_pri_shift;
91447636
A
413
414 /*
415 * Gather cpu usage data.
416 */
417 thread_timer_delta(thread, delta);
418 if (ticks < SCHED_DECAY_TICKS) {
419 register struct shift_data *shiftp;
420
421 /*
422 * Accumulate timesharing usage only
423 * during contention for processor
424 * resources.
425 */
426 if (thread->pri_shift < INT8_MAX)
427 thread->sched_usage += delta;
428
429 thread->cpu_usage += delta + thread->cpu_delta;
430 thread->cpu_delta = 0;
431
432 shiftp = &sched_decay_shifts[ticks];
433 if (shiftp->shift2 > 0) {
434 thread->cpu_usage =
435 (thread->cpu_usage >> shiftp->shift1) +
436 (thread->cpu_usage >> shiftp->shift2);
437 thread->sched_usage =
438 (thread->sched_usage >> shiftp->shift1) +
439 (thread->sched_usage >> shiftp->shift2);
440 }
441 else {
442 thread->cpu_usage =
443 (thread->cpu_usage >> shiftp->shift1) -
444 (thread->cpu_usage >> -(shiftp->shift2));
445 thread->sched_usage =
446 (thread->sched_usage >> shiftp->shift1) -
447 (thread->sched_usage >> -(shiftp->shift2));
448 }
449 }
450 else {
451 thread->cpu_usage = thread->cpu_delta = 0;
452 thread->sched_usage = 0;
453 }
454
455 /*
456 * Check for fail-safe release.
457 */
6d2010ae
A
458 if ( (thread->sched_flags & TH_SFLAG_FAILSAFE) &&
459 mach_absolute_time() >= thread->safe_release ) {
460 if (thread->saved_mode != TH_MODE_TIMESHARE) {
461 if (thread->saved_mode == TH_MODE_REALTIME) {
91447636 462 thread->priority = BASEPRI_RTQUEUES;
91447636
A
463 }
464
6d2010ae
A
465 thread->sched_mode = thread->saved_mode;
466 thread->saved_mode = TH_MODE_NONE;
91447636 467
2d21ac55
A
468 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
469 sched_share_decr();
91447636 470
6d2010ae 471 if (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK))
91447636
A
472 set_sched_pri(thread, thread->priority);
473 }
474
6d2010ae 475 thread->sched_flags &= ~TH_SFLAG_FAILSAFE;
91447636
A
476 }
477
316670eb
A
478#if CONFIG_EMBEDDED
479 /* Check for pending throttle transitions, and safely switch queues */
480 if (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_MASK) {
481 boolean_t removed = thread_run_queue_remove(thread);
482
483 if (thread->sched_flags & TH_SFLAG_PENDING_THROTTLE_DEMOTION) {
484 if (thread->sched_mode == TH_MODE_REALTIME) {
485 thread->saved_mode = thread->sched_mode;
486 thread->sched_mode = TH_MODE_TIMESHARE;
487
488 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
489 sched_share_incr();
490 } else {
491 /*
492 * It's possible that this is a realtime thread that has
493 * already tripped the failsafe, in which case saved_mode
494 * is already set correctly.
495 */
496 if (!(thread->sched_flags & TH_SFLAG_FAILSAFE)) {
497 thread->saved_mode = thread->sched_mode;
498 }
499 thread->sched_flags &= ~TH_SFLAG_FAILSAFE;
500 }
501 thread->sched_flags |= TH_SFLAG_THROTTLED;
502
503 } else {
504 if ((thread->sched_mode == TH_MODE_TIMESHARE)
505 && (thread->saved_mode == TH_MODE_REALTIME)) {
506 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
507 sched_share_decr();
508 }
509
510 thread->sched_mode = thread->saved_mode;
511 thread->saved_mode = TH_MODE_NONE;
512 thread->sched_flags &= ~TH_SFLAG_THROTTLED;
513 }
514
515 thread->sched_flags &= ~(TH_SFLAG_PENDING_THROTTLE_MASK);
516
517 if (removed)
518 thread_setrun(thread, SCHED_TAILQ);
519 }
520#endif
521
91447636
A
522 /*
523 * Recompute scheduled priority if appropriate.
524 */
6d2010ae
A
525 if ( (thread->sched_mode == TH_MODE_TIMESHARE) &&
526 !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
527 !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ) {
91447636
A
528 register int new_pri;
529
530 do_priority_computation(thread, new_pri);
531 if (new_pri != thread->sched_pri) {
6d2010ae 532 boolean_t removed = thread_run_queue_remove(thread);
91447636 533
91447636 534 thread->sched_pri = new_pri;
2d21ac55 535 if (removed)
91447636
A
536 thread_setrun(thread, SCHED_TAILQ);
537 }
538 }
6d2010ae
A
539
540 return;
91447636 541}
6d2010ae
A
542
543#endif /* CONFIG_SCHED_TRADITIONAL */