]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/priority.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / kern / priority.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
fe8ab488 59 * File: priority.c
1c79356b
A
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
fe8ab488 63 * Priority related scheduler bits.
1c79356b
A
64 */
65
1c79356b
A
66#include <mach/boolean.h>
67#include <mach/kern_return.h>
68#include <mach/machine.h>
69#include <kern/host.h>
70#include <kern/mach_param.h>
71#include <kern/sched.h>
6d2010ae 72#include <sys/kdebug.h>
1c79356b
A
73#include <kern/spl.h>
74#include <kern/thread.h>
75#include <kern/processor.h>
316670eb 76#include <kern/ledger.h>
1c79356b 77#include <machine/machparam.h>
3e170ce0 78#include <kern/machine.h>
1c79356b 79
fe8ab488
A
80#ifdef CONFIG_MACH_APPROXIMATE_TIME
81#include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
82#endif
83
39037602
A
84static void sched_update_thread_bucket(thread_t thread);
85
1c79356b 86/*
0b4e3aa0 87 * thread_quantum_expire:
1c79356b
A
88 *
89 * Recalculate the quantum and priority for a thread.
2d21ac55
A
90 *
91 * Called at splsched.
1c79356b
A
92 */
93
94void
0b4e3aa0
A
95thread_quantum_expire(
96 timer_call_param_t p0,
97 timer_call_param_t p1)
1c79356b 98{
2d21ac55
A
99 processor_t processor = p0;
100 thread_t thread = p1;
101 ast_t preempt;
39236c6e 102 uint64_t ctime;
3e170ce0
A
103 int urgency;
104 uint64_t ignore1, ignore2;
1c79356b 105
fe8ab488 106 assert(processor == current_processor());
3e170ce0 107 assert(thread == current_thread());
fe8ab488 108
490019cf
A
109 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0);
110
ebb1b9f4
A
111 SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor);
112
316670eb
A
113 /*
114 * We bill CPU time to both the individual thread and its task.
115 *
116 * Because this balance adjustment could potentially attempt to wake this very
117 * thread, we must credit the ledger before taking the thread lock. The ledger
118 * pointers are only manipulated by the thread itself at the ast boundary.
39037602
A
119 *
120 * TODO: This fails to account for the time between when the timer was armed and when it fired.
121 * It should be based on the system_timer and running a thread_timer_event operation here.
316670eb 122 */
fe8ab488
A
123 ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
124 ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
125#ifdef CONFIG_BANK
126 if (thread->t_bankledger) {
127 ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
128 (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
129 }
130 thread->t_deduct_bank_ledger_time = 0;
131#endif
132
133 ctime = mach_absolute_time();
316670eb 134
3e170ce0
A
135#ifdef CONFIG_MACH_APPROXIMATE_TIME
136 commpage_update_mach_approximate_time(ctime);
137#endif
138
0b4e3aa0 139 thread_lock(thread);
1c79356b 140
6d2010ae
A
141 /*
142 * We've run up until our quantum expiration, and will (potentially)
143 * continue without re-entering the scheduler, so update this now.
144 */
3e170ce0 145 processor->last_dispatch = ctime;
fe8ab488 146 thread->last_run_time = ctime;
316670eb 147
1c79356b 148 /*
9bccf70c 149 * Check for fail-safe trip.
1c79356b 150 */
316670eb 151 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
39236c6e 152 !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
316670eb
A
153 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
154 uint64_t new_computation;
155
fe8ab488 156 new_computation = ctime - thread->computation_epoch;
316670eb
A
157 new_computation += thread->computation_metered;
158 if (new_computation > max_unsafe_computation) {
6d2010ae
A
159 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
160 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
0b4e3aa0 161
fe8ab488 162 thread->safe_release = ctime + sched_safe_duration;
1c79356b 163
fe8ab488 164 sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE);
0b4e3aa0
A
165 }
166 }
fe8ab488 167
1c79356b 168 /*
9bccf70c 169 * Recompute scheduled priority if appropriate.
1c79356b 170 */
6d2010ae
A
171 if (SCHED(can_update_priority)(thread))
172 SCHED(update_priority)(thread);
0b4e3aa0 173 else
6d2010ae 174 SCHED(lightweight_update_priority)(thread);
1c79356b 175
3e170ce0
A
176 if (thread->sched_mode != TH_MODE_REALTIME)
177 SCHED(quantum_expire)(thread);
178
2d21ac55 179 processor->current_pri = thread->sched_pri;
6d2010ae 180 processor->current_thmode = thread->sched_mode;
2d21ac55 181
3e170ce0
A
182 /* Tell platform layer that we are still running this thread */
183 urgency = thread_get_urgency(thread, &ignore1, &ignore2);
184 machine_thread_going_on_core(thread, urgency, 0);
185
0b4e3aa0
A
186 /*
187 * This quantum is up, give this thread another.
188 */
3e170ce0 189 processor->first_timeslice = FALSE;
1c79356b 190
55e303ae 191 thread_quantum_init(thread);
6d2010ae 192
316670eb
A
193 /* Reload precise timing global policy to thread-local policy */
194 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
195
196 /*
197 * Since non-precise user/kernel time doesn't update the state/thread timer
198 * during privilege transitions, synthesize an event now.
199 */
200 if (!thread->precise_user_kernel_time) {
201 timer_switch(PROCESSOR_DATA(processor, current_state),
fe8ab488 202 ctime,
316670eb
A
203 PROCESSOR_DATA(processor, current_state));
204 timer_switch(PROCESSOR_DATA(processor, thread_timer),
fe8ab488 205 ctime,
316670eb
A
206 PROCESSOR_DATA(processor, thread_timer));
207 }
208
fe8ab488 209 processor->quantum_end = ctime + thread->quantum_remaining;
1c79356b 210
0b4e3aa0 211 /*
2d21ac55 212 * Context switch check.
0b4e3aa0 213 */
fe8ab488 214 if ((preempt = csw_check(processor, AST_QUANTUM)) != AST_NONE)
2d21ac55 215 ast_on(preempt);
2d21ac55
A
216
217 thread_unlock(thread);
39236c6e 218
3e170ce0
A
219 timer_call_enter1(&processor->quantum_timer, thread,
220 processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
221
fe8ab488 222#if defined(CONFIG_SCHED_TIMESHARE_CORE)
3e170ce0
A
223 sched_timeshare_consider_maintenance(ctime);
224#endif /* CONFIG_SCHED_TIMESHARE_CORE */
225
490019cf
A
226
227 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
fe8ab488
A
228}
229
230/*
231 * sched_set_thread_base_priority:
232 *
233 * Set the base priority of the thread
234 * and reset its scheduled priority.
235 *
3e170ce0
A
236 * This is the only path to change base_pri.
237 *
fe8ab488
A
238 * Called with the thread locked.
239 */
240void
241sched_set_thread_base_priority(thread_t thread, int priority)
242{
39037602
A
243 assert(priority >= MINPRI);
244
245 if (thread->sched_mode == TH_MODE_REALTIME)
246 assert(priority <= BASEPRI_RTQUEUES);
247 else
248 assert(priority < BASEPRI_RTQUEUES);
249
3e170ce0
A
250 thread->base_pri = priority;
251
39037602 252 sched_update_thread_bucket(thread);
490019cf 253
3e170ce0 254 thread_recompute_sched_pri(thread, FALSE);
1c79356b 255}
91447636 256
3e170ce0
A
257/*
258 * thread_recompute_sched_pri:
259 *
260 * Reset the scheduled priority of the thread
261 * according to its base priority if the
262 * thread has not been promoted or depressed.
263 *
264 * This is the standard way to push base_pri changes into sched_pri,
265 * or to recalculate the appropriate sched_pri after clearing
266 * a promotion or depression.
267 *
268 * Called at splsched with the thread locked.
269 */
270void
271thread_recompute_sched_pri(
272 thread_t thread,
273 boolean_t override_depress)
274{
275 int priority;
276
277 if (thread->sched_mode == TH_MODE_TIMESHARE)
278 priority = SCHED(compute_timeshare_priority)(thread);
279 else
280 priority = thread->base_pri;
fe8ab488 281
3e170ce0
A
282 if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
283 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
284 set_sched_pri(thread, priority);
285 }
286}
6d2010ae
A
287
288void
3e170ce0 289sched_default_quantum_expire(thread_t thread __unused)
6d2010ae 290{
3e170ce0
A
291 /*
292 * No special behavior when a timeshare, fixed, or realtime thread
293 * uses up its entire quantum
294 */
6d2010ae
A
295}
296
3e170ce0
A
297#if defined(CONFIG_SCHED_TIMESHARE_CORE)
298
299/*
300 * lightweight_update_priority:
301 *
302 * Update the scheduled priority for
303 * a timesharing thread.
304 *
305 * Only for use on the current thread.
306 *
307 * Called with the thread locked.
308 */
6d2010ae
A
309void
310lightweight_update_priority(thread_t thread)
311{
3e170ce0
A
312 assert(thread->runq == PROCESSOR_NULL);
313 assert(thread == current_thread());
314
6d2010ae 315 if (thread->sched_mode == TH_MODE_TIMESHARE) {
3e170ce0
A
316 int priority;
317 uint32_t delta;
318
6d2010ae 319 thread_timer_delta(thread, delta);
3e170ce0 320
6d2010ae
A
321 /*
322 * Accumulate timesharing usage only
323 * during contention for processor
324 * resources.
325 */
326 if (thread->pri_shift < INT8_MAX)
327 thread->sched_usage += delta;
3e170ce0 328
6d2010ae 329 thread->cpu_delta += delta;
3e170ce0
A
330
331 priority = sched_compute_timeshare_priority(thread);
332
6d2010ae 333 /*
3e170ce0
A
334 * Adjust the scheduled priority like thread_recompute_sched_pri,
335 * except with the benefit of knowing the thread is on this core.
6d2010ae 336 */
3e170ce0
A
337 if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
338 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
339 priority != thread->sched_pri) {
340
341 thread->sched_pri = priority;
342
343 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
344 (uintptr_t)thread_tid(thread),
345 thread->base_pri,
346 thread->sched_pri,
347 0, /* eventually, 'reason' */
348 0);
349 }
350 }
6d2010ae
A
351}
352
91447636
A
353/*
354 * Define shifts for simulating (5/8) ** n
355 *
356 * Shift structures for holding update shifts. Actual computation
357 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
358 * +/- is determined by the sign of shift 2.
359 */
360struct shift_data {
361 int shift1;
362 int shift2;
363};
364
365#define SCHED_DECAY_TICKS 32
366static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
367 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
368 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
369 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
370 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
371};
372
373/*
3e170ce0 374 * sched_compute_timeshare_priority:
91447636
A
375 *
376 * Calculate the timesharing priority based upon usage and load.
377 */
fe8ab488 378extern int sched_pri_decay_band_limit;
d1ecb069 379
91447636 380
3e170ce0
A
381int
382sched_compute_timeshare_priority(thread_t thread)
91447636 383{
3e170ce0
A
384 /* start with base priority */
385 int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift);
91447636 386
3e170ce0
A
387 if (priority < MINPRI_USER)
388 priority = MINPRI_USER;
389 else if (priority > MAXPRI_KERNEL)
390 priority = MAXPRI_KERNEL;
91447636 391
3e170ce0 392 return priority;
91447636
A
393}
394
91447636 395
6d2010ae
A
396/*
397 * can_update_priority
398 *
399 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
400 *
401 * Called with the thread locked.
402 */
403boolean_t
404can_update_priority(
405 thread_t thread)
406{
407 if (sched_tick == thread->sched_stamp)
408 return (FALSE);
409 else
410 return (TRUE);
411}
412
91447636
A
413/*
414 * update_priority
415 *
416 * Perform housekeeping operations driven by scheduler tick.
417 *
418 * Called with the thread locked.
419 */
420void
421update_priority(
39037602 422 thread_t thread)
91447636 423{
39037602 424 uint32_t ticks, delta;
91447636
A
425
426 ticks = sched_tick - thread->sched_stamp;
427 assert(ticks != 0);
39037602 428
91447636 429 thread->sched_stamp += ticks;
39037602
A
430
431 thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
39236c6e
A
432
433 /* If requested, accelerate aging of sched_usage */
434 if (sched_decay_usage_age_factor > 1)
435 ticks *= sched_decay_usage_age_factor;
91447636
A
436
437 /*
438 * Gather cpu usage data.
439 */
440 thread_timer_delta(thread, delta);
441 if (ticks < SCHED_DECAY_TICKS) {
91447636
A
442 /*
443 * Accumulate timesharing usage only
444 * during contention for processor
445 * resources.
446 */
447 if (thread->pri_shift < INT8_MAX)
448 thread->sched_usage += delta;
449
450 thread->cpu_usage += delta + thread->cpu_delta;
451 thread->cpu_delta = 0;
452
39037602
A
453 struct shift_data *shiftp = &sched_decay_shifts[ticks];
454
91447636 455 if (shiftp->shift2 > 0) {
39037602
A
456 thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) +
457 (thread->cpu_usage >> shiftp->shift2);
458 thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
459 (thread->sched_usage >> shiftp->shift2);
460 } else {
461 thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) -
462 (thread->cpu_usage >> -(shiftp->shift2));
463 thread->sched_usage = (thread->sched_usage >> shiftp->shift1) -
464 (thread->sched_usage >> -(shiftp->shift2));
91447636 465 }
39037602 466 } else {
91447636
A
467 thread->cpu_usage = thread->cpu_delta = 0;
468 thread->sched_usage = 0;
469 }
470
471 /*
472 * Check for fail-safe release.
473 */
fe8ab488
A
474 if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
475 mach_absolute_time() >= thread->safe_release) {
476 sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
91447636
A
477 }
478
479 /*
480 * Recompute scheduled priority if appropriate.
481 */
3e170ce0
A
482 if (thread->sched_mode == TH_MODE_TIMESHARE) {
483 int priority = sched_compute_timeshare_priority(thread);
484
485 /*
486 * Adjust the scheduled priority like thread_recompute_sched_pri,
487 * except without setting an AST.
488 */
489 if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
490 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
491 priority != thread->sched_pri) {
492
493 boolean_t removed = thread_run_queue_remove(thread);
494
495 thread->sched_pri = priority;
39236c6e 496
3e170ce0
A
497 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
498 (uintptr_t)thread_tid(thread),
499 thread->base_pri,
500 thread->sched_pri,
501 0, /* eventually, 'reason' */
502 0);
39236c6e 503
2d21ac55 504 if (removed)
3e170ce0 505 thread_run_queue_reinsert(thread, SCHED_TAILQ);
91447636
A
506 }
507 }
3e170ce0 508
6d2010ae 509 return;
91447636 510}
6d2010ae 511
fe8ab488
A
512#endif /* CONFIG_SCHED_TIMESHARE_CORE */
513
fe8ab488 514
39037602
A
515/*
516 * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
517 * Each other bucket is a count of the runnable non-idle threads
518 * with that property.
519 */
520volatile uint32_t sched_run_buckets[TH_BUCKET_MAX];
521
522static void
523sched_incr_bucket(sched_bucket_t bucket)
524{
525 assert(bucket >= TH_BUCKET_FIXPRI &&
526 bucket <= TH_BUCKET_SHARE_BG);
527
528 hw_atomic_add(&sched_run_buckets[bucket], 1);
fe8ab488
A
529}
530
39037602
A
531static void
532sched_decr_bucket(sched_bucket_t bucket)
533{
534 assert(bucket >= TH_BUCKET_FIXPRI &&
535 bucket <= TH_BUCKET_SHARE_BG);
536
537 assert(sched_run_buckets[bucket] > 0);
538
539 hw_atomic_sub(&sched_run_buckets[bucket], 1);
fe8ab488
A
540}
541
39037602 542/* TH_RUN & !TH_IDLE controls whether a thread has a run count */
fe8ab488 543
39037602
A
544uint32_t
545sched_run_incr(thread_t thread)
546{
fe8ab488 547 assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
fe8ab488 548
39037602 549 uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1);
fe8ab488 550
39037602 551 sched_incr_bucket(thread->th_sched_bucket);
fe8ab488 552
39037602 553 return new_count;
fe8ab488
A
554}
555
39037602
A
556uint32_t
557sched_run_decr(thread_t thread)
558{
559 assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN);
fe8ab488 560
39037602
A
561 sched_decr_bucket(thread->th_sched_bucket);
562
563 uint32_t new_count = hw_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], 1);
564
565 return new_count;
fe8ab488
A
566}
567
39037602
A
568static void
569sched_update_thread_bucket(thread_t thread)
570{
571 sched_bucket_t old_bucket = thread->th_sched_bucket;
572 sched_bucket_t new_bucket = TH_BUCKET_RUN;
573
574 switch (thread->sched_mode) {
575 case TH_MODE_FIXED:
576 case TH_MODE_REALTIME:
577 new_bucket = TH_BUCKET_FIXPRI;
578 break;
579
580 case TH_MODE_TIMESHARE:
581 if (thread->base_pri > BASEPRI_UTILITY)
582 new_bucket = TH_BUCKET_SHARE_FG;
583 else if (thread->base_pri > MAXPRI_THROTTLE)
584 new_bucket = TH_BUCKET_SHARE_UT;
585 else
586 new_bucket = TH_BUCKET_SHARE_BG;
587 break;
588
589 default:
590 panic("unexpected mode: %d", thread->sched_mode);
591 break;
592 }
593
594 if (old_bucket != new_bucket) {
595 thread->th_sched_bucket = new_bucket;
596 thread->pri_shift = sched_pri_shifts[new_bucket];
597
598 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
599 sched_decr_bucket(old_bucket);
600 sched_incr_bucket(new_bucket);
601 }
602 }
603}
fe8ab488
A
604
605/*
606 * Set the thread's true scheduling mode
607 * Called with thread mutex and thread locked
608 * The thread has already been removed from the runqueue.
609 *
610 * (saved_mode is handled before this point)
611 */
612void
613sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
614{
3e170ce0 615 assert(thread->runq == PROCESSOR_NULL);
fe8ab488 616
fe8ab488 617 switch (new_mode) {
39037602
A
618 case TH_MODE_FIXED:
619 case TH_MODE_REALTIME:
620 case TH_MODE_TIMESHARE:
621 break;
622
623 default:
624 panic("unexpected mode: %d", new_mode);
625 break;
fe8ab488
A
626 }
627
39037602
A
628 thread->sched_mode = new_mode;
629
630 sched_update_thread_bucket(thread);
fe8ab488
A
631}
632
633/*
634 * Demote the true scheduler mode to timeshare (called with the thread locked)
635 */
636void
637sched_thread_mode_demote(thread_t thread, uint32_t reason)
638{
639 assert(reason & TH_SFLAG_DEMOTED_MASK);
640 assert((thread->sched_flags & reason) != reason);
fe8ab488
A
641
642 if (thread->policy_reset)
643 return;
644
645 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
646 /* Another demotion reason is already active */
647 thread->sched_flags |= reason;
648 return;
649 }
650
651 assert(thread->saved_mode == TH_MODE_NONE);
652
653 boolean_t removed = thread_run_queue_remove(thread);
654
fe8ab488
A
655 thread->sched_flags |= reason;
656
657 thread->saved_mode = thread->sched_mode;
658
659 sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
660
3e170ce0
A
661 thread_recompute_priority(thread);
662
fe8ab488 663 if (removed)
3e170ce0 664 thread_run_queue_reinsert(thread, SCHED_TAILQ);
fe8ab488
A
665}
666
667/*
668 * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
669 */
670void
671sched_thread_mode_undemote(thread_t thread, uint32_t reason)
672{
673 assert(reason & TH_SFLAG_DEMOTED_MASK);
674 assert((thread->sched_flags & reason) == reason);
675 assert(thread->saved_mode != TH_MODE_NONE);
676 assert(thread->sched_mode == TH_MODE_TIMESHARE);
677 assert(thread->policy_reset == 0);
678
fe8ab488
A
679 thread->sched_flags &= ~reason;
680
681 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
682 /* Another demotion reason is still active */
683 return;
684 }
685
686 boolean_t removed = thread_run_queue_remove(thread);
687
688 sched_set_thread_mode(thread, thread->saved_mode);
689
690 thread->saved_mode = TH_MODE_NONE;
691
3e170ce0 692 thread_recompute_priority(thread);
fe8ab488
A
693
694 if (removed)
3e170ce0 695 thread_run_queue_reinsert(thread, SCHED_TAILQ);
fe8ab488
A
696}
697
fe8ab488 698