]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/priority.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / kern / priority.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
fe8ab488 59 * File: priority.c
1c79356b
A
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
fe8ab488 63 * Priority related scheduler bits.
1c79356b
A
64 */
65
1c79356b
A
66#include <mach/boolean.h>
67#include <mach/kern_return.h>
68#include <mach/machine.h>
69#include <kern/host.h>
70#include <kern/mach_param.h>
71#include <kern/sched.h>
6d2010ae 72#include <sys/kdebug.h>
1c79356b
A
73#include <kern/spl.h>
74#include <kern/thread.h>
75#include <kern/processor.h>
316670eb 76#include <kern/ledger.h>
1c79356b 77#include <machine/machparam.h>
3e170ce0 78#include <kern/machine.h>
1c79356b 79
fe8ab488
A
80#ifdef CONFIG_MACH_APPROXIMATE_TIME
81#include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
82#endif
83
1c79356b 84/*
0b4e3aa0 85 * thread_quantum_expire:
1c79356b
A
86 *
87 * Recalculate the quantum and priority for a thread.
2d21ac55
A
88 *
89 * Called at splsched.
1c79356b
A
90 */
91
92void
0b4e3aa0
A
93thread_quantum_expire(
94 timer_call_param_t p0,
95 timer_call_param_t p1)
1c79356b 96{
2d21ac55
A
97 processor_t processor = p0;
98 thread_t thread = p1;
99 ast_t preempt;
39236c6e 100 uint64_t ctime;
3e170ce0
A
101 int urgency;
102 uint64_t ignore1, ignore2;
1c79356b 103
fe8ab488 104 assert(processor == current_processor());
3e170ce0 105 assert(thread == current_thread());
fe8ab488 106
ebb1b9f4
A
107 SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor);
108
316670eb
A
109 /*
110 * We bill CPU time to both the individual thread and its task.
111 *
112 * Because this balance adjustment could potentially attempt to wake this very
113 * thread, we must credit the ledger before taking the thread lock. The ledger
114 * pointers are only manipulated by the thread itself at the ast boundary.
115 */
fe8ab488
A
116 ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
117 ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
118#ifdef CONFIG_BANK
119 if (thread->t_bankledger) {
120 ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
121 (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
122 }
123 thread->t_deduct_bank_ledger_time = 0;
124#endif
125
126 ctime = mach_absolute_time();
316670eb 127
3e170ce0
A
128#ifdef CONFIG_MACH_APPROXIMATE_TIME
129 commpage_update_mach_approximate_time(ctime);
130#endif
131
0b4e3aa0 132 thread_lock(thread);
1c79356b 133
6d2010ae
A
134 /*
135 * We've run up until our quantum expiration, and will (potentially)
136 * continue without re-entering the scheduler, so update this now.
137 */
3e170ce0 138 processor->last_dispatch = ctime;
fe8ab488 139 thread->last_run_time = ctime;
316670eb 140
1c79356b 141 /*
9bccf70c 142 * Check for fail-safe trip.
1c79356b 143 */
316670eb 144 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
39236c6e 145 !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
316670eb
A
146 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
147 uint64_t new_computation;
148
fe8ab488 149 new_computation = ctime - thread->computation_epoch;
316670eb
A
150 new_computation += thread->computation_metered;
151 if (new_computation > max_unsafe_computation) {
6d2010ae
A
152 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
153 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
0b4e3aa0 154
fe8ab488 155 thread->safe_release = ctime + sched_safe_duration;
1c79356b 156
fe8ab488 157 sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE);
0b4e3aa0
A
158 }
159 }
fe8ab488 160
1c79356b 161 /*
9bccf70c 162 * Recompute scheduled priority if appropriate.
1c79356b 163 */
6d2010ae
A
164 if (SCHED(can_update_priority)(thread))
165 SCHED(update_priority)(thread);
0b4e3aa0 166 else
6d2010ae 167 SCHED(lightweight_update_priority)(thread);
1c79356b 168
3e170ce0
A
169 if (thread->sched_mode != TH_MODE_REALTIME)
170 SCHED(quantum_expire)(thread);
171
2d21ac55 172 processor->current_pri = thread->sched_pri;
6d2010ae 173 processor->current_thmode = thread->sched_mode;
2d21ac55 174
3e170ce0
A
175 /* Tell platform layer that we are still running this thread */
176 urgency = thread_get_urgency(thread, &ignore1, &ignore2);
177 machine_thread_going_on_core(thread, urgency, 0);
178
0b4e3aa0
A
179 /*
180 * This quantum is up, give this thread another.
181 */
3e170ce0 182 processor->first_timeslice = FALSE;
1c79356b 183
55e303ae 184 thread_quantum_init(thread);
6d2010ae 185
316670eb
A
186 /* Reload precise timing global policy to thread-local policy */
187 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
188
189 /*
190 * Since non-precise user/kernel time doesn't update the state/thread timer
191 * during privilege transitions, synthesize an event now.
192 */
193 if (!thread->precise_user_kernel_time) {
194 timer_switch(PROCESSOR_DATA(processor, current_state),
fe8ab488 195 ctime,
316670eb
A
196 PROCESSOR_DATA(processor, current_state));
197 timer_switch(PROCESSOR_DATA(processor, thread_timer),
fe8ab488 198 ctime,
316670eb
A
199 PROCESSOR_DATA(processor, thread_timer));
200 }
201
fe8ab488 202 processor->quantum_end = ctime + thread->quantum_remaining;
1c79356b 203
0b4e3aa0 204 /*
2d21ac55 205 * Context switch check.
0b4e3aa0 206 */
fe8ab488 207 if ((preempt = csw_check(processor, AST_QUANTUM)) != AST_NONE)
2d21ac55 208 ast_on(preempt);
2d21ac55
A
209
210 thread_unlock(thread);
39236c6e 211
3e170ce0
A
212 timer_call_enter1(&processor->quantum_timer, thread,
213 processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
214
fe8ab488 215#if defined(CONFIG_SCHED_TIMESHARE_CORE)
3e170ce0
A
216 sched_timeshare_consider_maintenance(ctime);
217#endif /* CONFIG_SCHED_TIMESHARE_CORE */
218
fe8ab488
A
219}
220
221/*
222 * sched_set_thread_base_priority:
223 *
224 * Set the base priority of the thread
225 * and reset its scheduled priority.
226 *
3e170ce0
A
227 * This is the only path to change base_pri.
228 *
fe8ab488
A
229 * Called with the thread locked.
230 */
231void
232sched_set_thread_base_priority(thread_t thread, int priority)
233{
3e170ce0
A
234 thread->base_pri = priority;
235
236 thread_recompute_sched_pri(thread, FALSE);
1c79356b 237}
91447636 238
3e170ce0
A
239/*
240 * thread_recompute_sched_pri:
241 *
242 * Reset the scheduled priority of the thread
243 * according to its base priority if the
244 * thread has not been promoted or depressed.
245 *
246 * This is the standard way to push base_pri changes into sched_pri,
247 * or to recalculate the appropriate sched_pri after clearing
248 * a promotion or depression.
249 *
250 * Called at splsched with the thread locked.
251 */
252void
253thread_recompute_sched_pri(
254 thread_t thread,
255 boolean_t override_depress)
256{
257 int priority;
258
259 if (thread->sched_mode == TH_MODE_TIMESHARE)
260 priority = SCHED(compute_timeshare_priority)(thread);
261 else
262 priority = thread->base_pri;
fe8ab488 263
3e170ce0
A
264 if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
265 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
266 set_sched_pri(thread, priority);
267 }
268}
6d2010ae
A
269
270void
3e170ce0 271sched_default_quantum_expire(thread_t thread __unused)
6d2010ae 272{
3e170ce0
A
273 /*
274 * No special behavior when a timeshare, fixed, or realtime thread
275 * uses up its entire quantum
276 */
6d2010ae
A
277}
278
3e170ce0
A
279#if defined(CONFIG_SCHED_TIMESHARE_CORE)
280
281/*
282 * lightweight_update_priority:
283 *
284 * Update the scheduled priority for
285 * a timesharing thread.
286 *
287 * Only for use on the current thread.
288 *
289 * Called with the thread locked.
290 */
6d2010ae
A
291void
292lightweight_update_priority(thread_t thread)
293{
3e170ce0
A
294 assert(thread->runq == PROCESSOR_NULL);
295 assert(thread == current_thread());
296
6d2010ae 297 if (thread->sched_mode == TH_MODE_TIMESHARE) {
3e170ce0
A
298 int priority;
299 uint32_t delta;
300
6d2010ae 301 thread_timer_delta(thread, delta);
3e170ce0 302
6d2010ae
A
303 /*
304 * Accumulate timesharing usage only
305 * during contention for processor
306 * resources.
307 */
308 if (thread->pri_shift < INT8_MAX)
309 thread->sched_usage += delta;
3e170ce0 310
6d2010ae 311 thread->cpu_delta += delta;
3e170ce0
A
312
313 priority = sched_compute_timeshare_priority(thread);
314
6d2010ae 315 /*
3e170ce0
A
316 * Adjust the scheduled priority like thread_recompute_sched_pri,
317 * except with the benefit of knowing the thread is on this core.
6d2010ae 318 */
3e170ce0
A
319 if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
320 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
321 priority != thread->sched_pri) {
322
323 thread->sched_pri = priority;
324
325 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
326 (uintptr_t)thread_tid(thread),
327 thread->base_pri,
328 thread->sched_pri,
329 0, /* eventually, 'reason' */
330 0);
331 }
332 }
6d2010ae
A
333}
334
91447636
A
335/*
336 * Define shifts for simulating (5/8) ** n
337 *
338 * Shift structures for holding update shifts. Actual computation
339 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
340 * +/- is determined by the sign of shift 2.
341 */
342struct shift_data {
343 int shift1;
344 int shift2;
345};
346
347#define SCHED_DECAY_TICKS 32
348static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
349 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
350 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
351 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
352 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
353};
354
355/*
3e170ce0 356 * sched_compute_timeshare_priority:
91447636
A
357 *
358 * Calculate the timesharing priority based upon usage and load.
359 */
fe8ab488 360extern int sched_pri_decay_band_limit;
d1ecb069 361
91447636 362
3e170ce0
A
363int
364sched_compute_timeshare_priority(thread_t thread)
91447636 365{
3e170ce0
A
366 /* start with base priority */
367 int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift);
91447636 368
3e170ce0
A
369 if (priority < MINPRI_USER)
370 priority = MINPRI_USER;
371 else if (priority > MAXPRI_KERNEL)
372 priority = MAXPRI_KERNEL;
91447636 373
3e170ce0 374 return priority;
91447636
A
375}
376
91447636 377
6d2010ae
A
378/*
379 * can_update_priority
380 *
381 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
382 *
383 * Called with the thread locked.
384 */
385boolean_t
386can_update_priority(
387 thread_t thread)
388{
389 if (sched_tick == thread->sched_stamp)
390 return (FALSE);
391 else
392 return (TRUE);
393}
394
91447636
A
395/*
396 * update_priority
397 *
398 * Perform housekeeping operations driven by scheduler tick.
399 *
400 * Called with the thread locked.
401 */
402void
403update_priority(
404 register thread_t thread)
405{
406 register unsigned ticks;
407 register uint32_t delta;
408
409 ticks = sched_tick - thread->sched_stamp;
410 assert(ticks != 0);
411 thread->sched_stamp += ticks;
39236c6e
A
412 if (sched_use_combined_fgbg_decay)
413 thread->pri_shift = sched_combined_fgbg_pri_shift;
fe8ab488 414 else if (thread->sched_flags & TH_SFLAG_THROTTLED)
39236c6e
A
415 thread->pri_shift = sched_background_pri_shift;
416 else
417 thread->pri_shift = sched_pri_shift;
418
419 /* If requested, accelerate aging of sched_usage */
420 if (sched_decay_usage_age_factor > 1)
421 ticks *= sched_decay_usage_age_factor;
91447636
A
422
423 /*
424 * Gather cpu usage data.
425 */
426 thread_timer_delta(thread, delta);
427 if (ticks < SCHED_DECAY_TICKS) {
428 register struct shift_data *shiftp;
429
430 /*
431 * Accumulate timesharing usage only
432 * during contention for processor
433 * resources.
434 */
435 if (thread->pri_shift < INT8_MAX)
436 thread->sched_usage += delta;
437
438 thread->cpu_usage += delta + thread->cpu_delta;
439 thread->cpu_delta = 0;
440
441 shiftp = &sched_decay_shifts[ticks];
442 if (shiftp->shift2 > 0) {
443 thread->cpu_usage =
444 (thread->cpu_usage >> shiftp->shift1) +
445 (thread->cpu_usage >> shiftp->shift2);
446 thread->sched_usage =
447 (thread->sched_usage >> shiftp->shift1) +
448 (thread->sched_usage >> shiftp->shift2);
449 }
450 else {
451 thread->cpu_usage =
452 (thread->cpu_usage >> shiftp->shift1) -
453 (thread->cpu_usage >> -(shiftp->shift2));
454 thread->sched_usage =
455 (thread->sched_usage >> shiftp->shift1) -
456 (thread->sched_usage >> -(shiftp->shift2));
457 }
458 }
459 else {
460 thread->cpu_usage = thread->cpu_delta = 0;
461 thread->sched_usage = 0;
462 }
463
464 /*
465 * Check for fail-safe release.
466 */
fe8ab488
A
467 if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
468 mach_absolute_time() >= thread->safe_release) {
469 sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
91447636
A
470 }
471
472 /*
473 * Recompute scheduled priority if appropriate.
474 */
3e170ce0
A
475 if (thread->sched_mode == TH_MODE_TIMESHARE) {
476 int priority = sched_compute_timeshare_priority(thread);
477
478 /*
479 * Adjust the scheduled priority like thread_recompute_sched_pri,
480 * except without setting an AST.
481 */
482 if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
483 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) &&
484 priority != thread->sched_pri) {
485
486 boolean_t removed = thread_run_queue_remove(thread);
487
488 thread->sched_pri = priority;
39236c6e 489
3e170ce0
A
490 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
491 (uintptr_t)thread_tid(thread),
492 thread->base_pri,
493 thread->sched_pri,
494 0, /* eventually, 'reason' */
495 0);
39236c6e 496
2d21ac55 497 if (removed)
3e170ce0 498 thread_run_queue_reinsert(thread, SCHED_TAILQ);
91447636
A
499 }
500 }
3e170ce0 501
6d2010ae 502 return;
91447636 503}
6d2010ae 504
fe8ab488
A
505#endif /* CONFIG_SCHED_TIMESHARE_CORE */
506
507#if MACH_ASSERT
508/* sched_mode == TH_MODE_TIMESHARE controls whether a thread has a timeshare count when it has a run count */
509
510void sched_share_incr(thread_t thread) {
511 assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
512 assert(thread->sched_mode == TH_MODE_TIMESHARE);
513 assert(thread->SHARE_COUNT == 0);
514 thread->SHARE_COUNT++;
515 (void)hw_atomic_add(&sched_share_count, 1);
516}
517
518void sched_share_decr(thread_t thread) {
519 assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN || thread->sched_mode != TH_MODE_TIMESHARE);
520 assert(thread->SHARE_COUNT == 1);
521 (void)hw_atomic_sub(&sched_share_count, 1);
522 thread->SHARE_COUNT--;
523}
524
525/* TH_SFLAG_THROTTLED controls whether a thread has a background count when it has a run count and a share count */
526
527void sched_background_incr(thread_t thread) {
528 assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
529 assert(thread->sched_mode == TH_MODE_TIMESHARE);
530 assert((thread->sched_flags & TH_SFLAG_THROTTLED) == TH_SFLAG_THROTTLED);
531
532 assert(thread->BG_COUNT == 0);
533 thread->BG_COUNT++;
534 int val = hw_atomic_add(&sched_background_count, 1);
535 assert(val >= 0);
536
537 /* Always do the background change while holding a share count */
538 assert(thread->SHARE_COUNT == 1);
539}
540
541void sched_background_decr(thread_t thread) {
542 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE)
543 assert((thread->sched_flags & TH_SFLAG_THROTTLED) != TH_SFLAG_THROTTLED);
544 assert(thread->BG_COUNT == 1);
545 int val = hw_atomic_sub(&sched_background_count, 1);
546 thread->BG_COUNT--;
547 assert(val >= 0);
548 assert(thread->BG_COUNT == 0);
549
550 /* Always do the background change while holding a share count */
551 assert(thread->SHARE_COUNT == 1);
552}
553
554
555void
556assert_thread_sched_count(thread_t thread) {
557 /* Only 0 or 1 are acceptable values */
558 assert(thread->BG_COUNT == 0 || thread->BG_COUNT == 1);
559 assert(thread->SHARE_COUNT == 0 || thread->SHARE_COUNT == 1);
560
561 /* BG is only allowed when you already have a share count */
562 if (thread->BG_COUNT == 1)
563 assert(thread->SHARE_COUNT == 1);
564 if (thread->SHARE_COUNT == 0)
565 assert(thread->BG_COUNT == 0);
566
567 if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
568 (thread->sched_mode != TH_MODE_TIMESHARE))
569 assert(thread->SHARE_COUNT == 0);
570
571 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
572 (thread->sched_mode == TH_MODE_TIMESHARE))
573 assert(thread->SHARE_COUNT == 1);
574
575 if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
576 (thread->sched_mode != TH_MODE_TIMESHARE) ||
577 !(thread->sched_flags & TH_SFLAG_THROTTLED))
578 assert(thread->BG_COUNT == 0);
579
580 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
581 (thread->sched_mode == TH_MODE_TIMESHARE) &&
582 (thread->sched_flags & TH_SFLAG_THROTTLED))
583 assert(thread->BG_COUNT == 1);
584}
585
586#endif /* MACH_ASSERT */
587
588/*
589 * Set the thread's true scheduling mode
590 * Called with thread mutex and thread locked
591 * The thread has already been removed from the runqueue.
592 *
593 * (saved_mode is handled before this point)
594 */
595void
596sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
597{
598 assert_thread_sched_count(thread);
3e170ce0 599 assert(thread->runq == PROCESSOR_NULL);
fe8ab488
A
600
601 sched_mode_t old_mode = thread->sched_mode;
602
603 thread->sched_mode = new_mode;
604
605 switch (new_mode) {
606 case TH_MODE_FIXED:
607 case TH_MODE_REALTIME:
608 if (old_mode == TH_MODE_TIMESHARE) {
609 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
610 if (thread->sched_flags & TH_SFLAG_THROTTLED)
611 sched_background_decr(thread);
612
613 sched_share_decr(thread);
614 }
615 }
616 break;
617
618 case TH_MODE_TIMESHARE:
619 if (old_mode != TH_MODE_TIMESHARE) {
620 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
621 sched_share_incr(thread);
622
623 if (thread->sched_flags & TH_SFLAG_THROTTLED)
624 sched_background_incr(thread);
625 }
626 }
627 break;
628
629 default:
630 panic("unexpected mode: %d", new_mode);
631 break;
632 }
633
634 assert_thread_sched_count(thread);
635}
636
637/*
638 * Demote the true scheduler mode to timeshare (called with the thread locked)
639 */
640void
641sched_thread_mode_demote(thread_t thread, uint32_t reason)
642{
643 assert(reason & TH_SFLAG_DEMOTED_MASK);
644 assert((thread->sched_flags & reason) != reason);
645 assert_thread_sched_count(thread);
646
647 if (thread->policy_reset)
648 return;
649
650 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
651 /* Another demotion reason is already active */
652 thread->sched_flags |= reason;
653 return;
654 }
655
656 assert(thread->saved_mode == TH_MODE_NONE);
657
658 boolean_t removed = thread_run_queue_remove(thread);
659
fe8ab488
A
660 thread->sched_flags |= reason;
661
662 thread->saved_mode = thread->sched_mode;
663
664 sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
665
3e170ce0
A
666 thread_recompute_priority(thread);
667
fe8ab488 668 if (removed)
3e170ce0 669 thread_run_queue_reinsert(thread, SCHED_TAILQ);
fe8ab488
A
670
671 assert_thread_sched_count(thread);
672}
673
674/*
675 * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
676 */
677void
678sched_thread_mode_undemote(thread_t thread, uint32_t reason)
679{
680 assert(reason & TH_SFLAG_DEMOTED_MASK);
681 assert((thread->sched_flags & reason) == reason);
682 assert(thread->saved_mode != TH_MODE_NONE);
683 assert(thread->sched_mode == TH_MODE_TIMESHARE);
684 assert(thread->policy_reset == 0);
685
686 assert_thread_sched_count(thread);
687
688 thread->sched_flags &= ~reason;
689
690 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
691 /* Another demotion reason is still active */
692 return;
693 }
694
695 boolean_t removed = thread_run_queue_remove(thread);
696
697 sched_set_thread_mode(thread, thread->saved_mode);
698
699 thread->saved_mode = TH_MODE_NONE;
700
3e170ce0 701 thread_recompute_priority(thread);
fe8ab488
A
702
703 if (removed)
3e170ce0 704 thread_run_queue_reinsert(thread, SCHED_TAILQ);
fe8ab488
A
705}
706
707/*
708 * Set the thread to be categorized as 'background'
709 * Called with thread mutex and thread lock held
710 *
711 * TODO: Eventually, 'background' should be a true sched_mode.
712 */
713void
714sched_set_thread_throttled(thread_t thread, boolean_t wants_throttle)
715{
716 if (thread->policy_reset)
717 return;
718
719 assert(((thread->sched_flags & TH_SFLAG_THROTTLED) ? TRUE : FALSE) != wants_throttle);
720
721 assert_thread_sched_count(thread);
722
723 /*
724 * When backgrounding a thread, iOS has the semantic that
725 * realtime and fixed priority threads should be demoted
726 * to timeshare background threads.
727 *
728 * On OSX, realtime and fixed priority threads don't lose their mode.
729 */
730
731 if (wants_throttle) {
732 thread->sched_flags |= TH_SFLAG_THROTTLED;
733 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
734 sched_background_incr(thread);
735 }
736
737 assert_thread_sched_count(thread);
738
739 } else {
740 thread->sched_flags &= ~TH_SFLAG_THROTTLED;
741 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
742 sched_background_decr(thread);
743 }
744
745 assert_thread_sched_count(thread);
746
747 }
748
749 assert_thread_sched_count(thread);
750}
751