]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/priority.c
xnu-2782.1.97.tar.gz
[apple/xnu.git] / osfmk / kern / priority.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
fe8ab488 59 * File: priority.c
1c79356b
A
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
fe8ab488 63 * Priority related scheduler bits.
1c79356b
A
64 */
65
1c79356b
A
66#include <mach/boolean.h>
67#include <mach/kern_return.h>
68#include <mach/machine.h>
69#include <kern/host.h>
70#include <kern/mach_param.h>
71#include <kern/sched.h>
6d2010ae 72#include <sys/kdebug.h>
1c79356b
A
73#include <kern/spl.h>
74#include <kern/thread.h>
75#include <kern/processor.h>
316670eb 76#include <kern/ledger.h>
1c79356b 77#include <machine/machparam.h>
1c79356b 78
fe8ab488
A
79#ifdef CONFIG_MACH_APPROXIMATE_TIME
80#include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
81#endif
82
1c79356b 83/*
0b4e3aa0 84 * thread_quantum_expire:
1c79356b
A
85 *
86 * Recalculate the quantum and priority for a thread.
2d21ac55
A
87 *
88 * Called at splsched.
1c79356b
A
89 */
90
91void
0b4e3aa0
A
92thread_quantum_expire(
93 timer_call_param_t p0,
94 timer_call_param_t p1)
1c79356b 95{
2d21ac55
A
96 processor_t processor = p0;
97 thread_t thread = p1;
98 ast_t preempt;
39236c6e 99 uint64_t ctime;
1c79356b 100
fe8ab488
A
101 assert(processor == current_processor());
102
ebb1b9f4
A
103 SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor);
104
316670eb
A
105 /*
106 * We bill CPU time to both the individual thread and its task.
107 *
108 * Because this balance adjustment could potentially attempt to wake this very
109 * thread, we must credit the ledger before taking the thread lock. The ledger
110 * pointers are only manipulated by the thread itself at the ast boundary.
111 */
fe8ab488
A
112 ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
113 ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
114#ifdef CONFIG_BANK
115 if (thread->t_bankledger) {
116 ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
117 (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
118 }
119 thread->t_deduct_bank_ledger_time = 0;
120#endif
121
122 ctime = mach_absolute_time();
316670eb 123
0b4e3aa0 124 thread_lock(thread);
1c79356b 125
6d2010ae
A
126 /*
127 * We've run up until our quantum expiration, and will (potentially)
128 * continue without re-entering the scheduler, so update this now.
129 */
fe8ab488 130 thread->last_run_time = ctime;
316670eb 131
fe8ab488
A
132#ifdef CONFIG_MACH_APPROXIMATE_TIME
133 commpage_update_mach_approximate_time(ctime);
134#endif
1c79356b 135 /*
9bccf70c 136 * Check for fail-safe trip.
1c79356b 137 */
316670eb 138 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
39236c6e 139 !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
316670eb
A
140 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
141 uint64_t new_computation;
142
fe8ab488 143 new_computation = ctime - thread->computation_epoch;
316670eb
A
144 new_computation += thread->computation_metered;
145 if (new_computation > max_unsafe_computation) {
6d2010ae
A
146 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE,
147 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
0b4e3aa0 148
fe8ab488 149 thread->safe_release = ctime + sched_safe_duration;
1c79356b 150
fe8ab488 151 sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE);
0b4e3aa0
A
152 }
153 }
fe8ab488 154
1c79356b 155 /*
9bccf70c 156 * Recompute scheduled priority if appropriate.
1c79356b 157 */
6d2010ae
A
158 if (SCHED(can_update_priority)(thread))
159 SCHED(update_priority)(thread);
0b4e3aa0 160 else
6d2010ae 161 SCHED(lightweight_update_priority)(thread);
1c79356b 162
6d2010ae
A
163 SCHED(quantum_expire)(thread);
164
2d21ac55 165 processor->current_pri = thread->sched_pri;
6d2010ae 166 processor->current_thmode = thread->sched_mode;
2d21ac55 167
0b4e3aa0
A
168 /*
169 * This quantum is up, give this thread another.
170 */
2d21ac55
A
171 if (first_timeslice(processor))
172 processor->timeslice--;
1c79356b 173
55e303ae 174 thread_quantum_init(thread);
6d2010ae 175
316670eb
A
176 /* Reload precise timing global policy to thread-local policy */
177 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
178
179 /*
180 * Since non-precise user/kernel time doesn't update the state/thread timer
181 * during privilege transitions, synthesize an event now.
182 */
183 if (!thread->precise_user_kernel_time) {
184 timer_switch(PROCESSOR_DATA(processor, current_state),
fe8ab488 185 ctime,
316670eb
A
186 PROCESSOR_DATA(processor, current_state));
187 timer_switch(PROCESSOR_DATA(processor, thread_timer),
fe8ab488 188 ctime,
316670eb
A
189 PROCESSOR_DATA(processor, thread_timer));
190 }
191
fe8ab488 192 processor->quantum_end = ctime + thread->quantum_remaining;
ebb1b9f4 193 timer_call_enter1(&processor->quantum_timer, thread,
fe8ab488 194 processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
1c79356b 195
0b4e3aa0 196 /*
2d21ac55 197 * Context switch check.
0b4e3aa0 198 */
fe8ab488 199 if ((preempt = csw_check(processor, AST_QUANTUM)) != AST_NONE)
2d21ac55 200 ast_on(preempt);
2d21ac55
A
201
202 thread_unlock(thread);
39236c6e 203
fe8ab488 204#if defined(CONFIG_SCHED_TIMESHARE_CORE)
39236c6e 205 sched_traditional_consider_maintenance(ctime);
fe8ab488
A
206#endif /* CONFIG_SCHED_TIMESHARE_CORE */
207}
208
209/*
210 * sched_set_thread_base_priority:
211 *
212 * Set the base priority of the thread
213 * and reset its scheduled priority.
214 *
215 * Called with the thread locked.
216 */
217void
218sched_set_thread_base_priority(thread_t thread, int priority)
219{
220 thread->priority = priority;
221 SCHED(compute_priority)(thread, FALSE);
1c79356b 222}
91447636 223
fe8ab488
A
224
225#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae
A
226
227void
228sched_traditional_quantum_expire(thread_t thread __unused)
229{
230 /*
231 * No special behavior when a timeshare, fixed, or realtime thread
232 * uses up its entire quantum
233 */
234}
235
236void
237lightweight_update_priority(thread_t thread)
238{
239 if (thread->sched_mode == TH_MODE_TIMESHARE) {
240 register uint32_t delta;
241
242 thread_timer_delta(thread, delta);
243
244 /*
245 * Accumulate timesharing usage only
246 * during contention for processor
247 * resources.
248 */
249 if (thread->pri_shift < INT8_MAX)
250 thread->sched_usage += delta;
251
252 thread->cpu_delta += delta;
253
254 /*
255 * Adjust the scheduled priority if
256 * the thread has not been promoted
257 * and is not depressed.
258 */
39236c6e 259 if ( !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
6d2010ae
A
260 !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) )
261 compute_my_priority(thread);
262 }
263}
264
91447636
A
265/*
266 * Define shifts for simulating (5/8) ** n
267 *
268 * Shift structures for holding update shifts. Actual computation
269 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
270 * +/- is determined by the sign of shift 2.
271 */
272struct shift_data {
273 int shift1;
274 int shift2;
275};
276
277#define SCHED_DECAY_TICKS 32
278static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
279 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
280 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
281 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
282 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
283};
284
285/*
286 * do_priority_computation:
287 *
288 * Calculate the timesharing priority based upon usage and load.
289 */
fe8ab488 290extern int sched_pri_decay_band_limit;
d1ecb069 291
91447636 292
fe8ab488
A
293static int do_priority_computation(thread_t th) {
294 register int priority = th->priority /* start with base priority */
295 - (th->sched_usage >> th->pri_shift);
296 if (priority < MINPRI_USER)
297 priority = MINPRI_USER;
298 else
299 if (priority > MAXPRI_KERNEL)
300 priority = MAXPRI_KERNEL;
6d2010ae 301
fe8ab488 302 return priority;
91447636
A
303}
304
6d2010ae 305
91447636
A
306/*
307 * compute_priority:
308 *
309 * Reset the scheduled priority of the thread
310 * according to its base priority if the
311 * thread has not been promoted or depressed.
312 *
313 * Called with the thread locked.
314 */
315void
316compute_priority(
317 register thread_t thread,
318 boolean_t override_depress)
319{
320 register int priority;
321
fe8ab488
A
322 if (thread->sched_mode == TH_MODE_TIMESHARE)
323 priority = do_priority_computation(thread);
324 else
325 priority = thread->priority;
91447636 326
fe8ab488
A
327 if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) &&
328 (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) {
91447636
A
329 set_sched_pri(thread, priority);
330 }
331}
332
333/*
334 * compute_my_priority:
335 *
336 * Reset the scheduled priority for
337 * a timesharing thread.
338 *
339 * Only for use on the current thread
340 * if timesharing and not depressed.
341 *
342 * Called with the thread locked.
343 */
344void
345compute_my_priority(
346 register thread_t thread)
347{
348 register int priority;
349
fe8ab488 350 priority = do_priority_computation(thread);
2d21ac55 351 assert(thread->runq == PROCESSOR_NULL);
fe8ab488
A
352
353 if (priority != thread->sched_pri) {
354 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_DECAY_PRIORITY)|DBG_FUNC_NONE,
355 (uintptr_t)thread_tid(thread),
356 thread->priority,
357 thread->sched_pri,
358 priority,
359 0);
360 }
91447636
A
361 thread->sched_pri = priority;
362}
363
6d2010ae
A
364/*
365 * can_update_priority
366 *
367 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
368 *
369 * Called with the thread locked.
370 */
371boolean_t
372can_update_priority(
373 thread_t thread)
374{
375 if (sched_tick == thread->sched_stamp)
376 return (FALSE);
377 else
378 return (TRUE);
379}
380
91447636
A
381/*
382 * update_priority
383 *
384 * Perform housekeeping operations driven by scheduler tick.
385 *
386 * Called with the thread locked.
387 */
388void
389update_priority(
390 register thread_t thread)
391{
392 register unsigned ticks;
393 register uint32_t delta;
394
395 ticks = sched_tick - thread->sched_stamp;
396 assert(ticks != 0);
397 thread->sched_stamp += ticks;
39236c6e
A
398 if (sched_use_combined_fgbg_decay)
399 thread->pri_shift = sched_combined_fgbg_pri_shift;
fe8ab488 400 else if (thread->sched_flags & TH_SFLAG_THROTTLED)
39236c6e
A
401 thread->pri_shift = sched_background_pri_shift;
402 else
403 thread->pri_shift = sched_pri_shift;
404
405 /* If requested, accelerate aging of sched_usage */
406 if (sched_decay_usage_age_factor > 1)
407 ticks *= sched_decay_usage_age_factor;
91447636
A
408
409 /*
410 * Gather cpu usage data.
411 */
412 thread_timer_delta(thread, delta);
413 if (ticks < SCHED_DECAY_TICKS) {
414 register struct shift_data *shiftp;
415
416 /*
417 * Accumulate timesharing usage only
418 * during contention for processor
419 * resources.
420 */
421 if (thread->pri_shift < INT8_MAX)
422 thread->sched_usage += delta;
423
424 thread->cpu_usage += delta + thread->cpu_delta;
425 thread->cpu_delta = 0;
426
427 shiftp = &sched_decay_shifts[ticks];
428 if (shiftp->shift2 > 0) {
429 thread->cpu_usage =
430 (thread->cpu_usage >> shiftp->shift1) +
431 (thread->cpu_usage >> shiftp->shift2);
432 thread->sched_usage =
433 (thread->sched_usage >> shiftp->shift1) +
434 (thread->sched_usage >> shiftp->shift2);
435 }
436 else {
437 thread->cpu_usage =
438 (thread->cpu_usage >> shiftp->shift1) -
439 (thread->cpu_usage >> -(shiftp->shift2));
440 thread->sched_usage =
441 (thread->sched_usage >> shiftp->shift1) -
442 (thread->sched_usage >> -(shiftp->shift2));
443 }
444 }
445 else {
446 thread->cpu_usage = thread->cpu_delta = 0;
447 thread->sched_usage = 0;
448 }
449
450 /*
451 * Check for fail-safe release.
452 */
fe8ab488
A
453 if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
454 mach_absolute_time() >= thread->safe_release) {
455 sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
91447636
A
456 }
457
316670eb 458
91447636
A
459 /*
460 * Recompute scheduled priority if appropriate.
461 */
6d2010ae 462 if ( (thread->sched_mode == TH_MODE_TIMESHARE) &&
39236c6e 463 !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) &&
6d2010ae 464 !(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) ) {
91447636
A
465 register int new_pri;
466
fe8ab488 467 new_pri = do_priority_computation(thread);
91447636 468 if (new_pri != thread->sched_pri) {
6d2010ae 469 boolean_t removed = thread_run_queue_remove(thread);
91447636 470
39236c6e
A
471#if 0
472 if (sched_use_combined_fgbg_decay && ((thread)->task->max_priority > MAXPRI_THROTTLE) && (new_pri == MAXPRI_THROTTLE)) {
473 /* with the alternate (new) algorithm, would we have decayed this far? */
474 int alt_pri = thread->priority - (thread->sched_usage >> sched_pri_shift);
475 if ((alt_pri > new_pri) && (sched_background_count > 0)) {
476 printf("thread %p would have decayed to only %d instead of %d\n", thread, alt_pri, new_pri);
477 }
478 }
479#endif
480
481 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_DECAY_PRIORITY)|DBG_FUNC_NONE,
482 (uintptr_t)thread_tid(thread),
483 thread->priority,
484 thread->sched_pri,
485 new_pri,
486 0);
91447636 487 thread->sched_pri = new_pri;
39236c6e 488
2d21ac55 489 if (removed)
91447636
A
490 thread_setrun(thread, SCHED_TAILQ);
491 }
492 }
6d2010ae
A
493
494 return;
91447636 495}
6d2010ae 496
fe8ab488
A
497#endif /* CONFIG_SCHED_TIMESHARE_CORE */
498
499#if MACH_ASSERT
500/* sched_mode == TH_MODE_TIMESHARE controls whether a thread has a timeshare count when it has a run count */
501
502void sched_share_incr(thread_t thread) {
503 assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
504 assert(thread->sched_mode == TH_MODE_TIMESHARE);
505 assert(thread->SHARE_COUNT == 0);
506 thread->SHARE_COUNT++;
507 (void)hw_atomic_add(&sched_share_count, 1);
508}
509
510void sched_share_decr(thread_t thread) {
511 assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN || thread->sched_mode != TH_MODE_TIMESHARE);
512 assert(thread->SHARE_COUNT == 1);
513 (void)hw_atomic_sub(&sched_share_count, 1);
514 thread->SHARE_COUNT--;
515}
516
517/* TH_SFLAG_THROTTLED controls whether a thread has a background count when it has a run count and a share count */
518
519void sched_background_incr(thread_t thread) {
520 assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN);
521 assert(thread->sched_mode == TH_MODE_TIMESHARE);
522 assert((thread->sched_flags & TH_SFLAG_THROTTLED) == TH_SFLAG_THROTTLED);
523
524 assert(thread->BG_COUNT == 0);
525 thread->BG_COUNT++;
526 int val = hw_atomic_add(&sched_background_count, 1);
527 assert(val >= 0);
528
529 /* Always do the background change while holding a share count */
530 assert(thread->SHARE_COUNT == 1);
531}
532
533void sched_background_decr(thread_t thread) {
534 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE)
535 assert((thread->sched_flags & TH_SFLAG_THROTTLED) != TH_SFLAG_THROTTLED);
536 assert(thread->BG_COUNT == 1);
537 int val = hw_atomic_sub(&sched_background_count, 1);
538 thread->BG_COUNT--;
539 assert(val >= 0);
540 assert(thread->BG_COUNT == 0);
541
542 /* Always do the background change while holding a share count */
543 assert(thread->SHARE_COUNT == 1);
544}
545
546
547void
548assert_thread_sched_count(thread_t thread) {
549 /* Only 0 or 1 are acceptable values */
550 assert(thread->BG_COUNT == 0 || thread->BG_COUNT == 1);
551 assert(thread->SHARE_COUNT == 0 || thread->SHARE_COUNT == 1);
552
553 /* BG is only allowed when you already have a share count */
554 if (thread->BG_COUNT == 1)
555 assert(thread->SHARE_COUNT == 1);
556 if (thread->SHARE_COUNT == 0)
557 assert(thread->BG_COUNT == 0);
558
559 if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
560 (thread->sched_mode != TH_MODE_TIMESHARE))
561 assert(thread->SHARE_COUNT == 0);
562
563 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
564 (thread->sched_mode == TH_MODE_TIMESHARE))
565 assert(thread->SHARE_COUNT == 1);
566
567 if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN ||
568 (thread->sched_mode != TH_MODE_TIMESHARE) ||
569 !(thread->sched_flags & TH_SFLAG_THROTTLED))
570 assert(thread->BG_COUNT == 0);
571
572 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN &&
573 (thread->sched_mode == TH_MODE_TIMESHARE) &&
574 (thread->sched_flags & TH_SFLAG_THROTTLED))
575 assert(thread->BG_COUNT == 1);
576}
577
578#endif /* MACH_ASSERT */
579
580/*
581 * Set the thread's true scheduling mode
582 * Called with thread mutex and thread locked
583 * The thread has already been removed from the runqueue.
584 *
585 * (saved_mode is handled before this point)
586 */
587void
588sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
589{
590 assert_thread_sched_count(thread);
591
592 sched_mode_t old_mode = thread->sched_mode;
593
594 thread->sched_mode = new_mode;
595
596 switch (new_mode) {
597 case TH_MODE_FIXED:
598 case TH_MODE_REALTIME:
599 if (old_mode == TH_MODE_TIMESHARE) {
600 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
601 if (thread->sched_flags & TH_SFLAG_THROTTLED)
602 sched_background_decr(thread);
603
604 sched_share_decr(thread);
605 }
606 }
607 break;
608
609 case TH_MODE_TIMESHARE:
610 if (old_mode != TH_MODE_TIMESHARE) {
611 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) {
612 sched_share_incr(thread);
613
614 if (thread->sched_flags & TH_SFLAG_THROTTLED)
615 sched_background_incr(thread);
616 }
617 }
618 break;
619
620 default:
621 panic("unexpected mode: %d", new_mode);
622 break;
623 }
624
625 assert_thread_sched_count(thread);
626}
627
628/*
629 * Demote the true scheduler mode to timeshare (called with the thread locked)
630 */
631void
632sched_thread_mode_demote(thread_t thread, uint32_t reason)
633{
634 assert(reason & TH_SFLAG_DEMOTED_MASK);
635 assert((thread->sched_flags & reason) != reason);
636 assert_thread_sched_count(thread);
637
638 if (thread->policy_reset)
639 return;
640
641 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
642 /* Another demotion reason is already active */
643 thread->sched_flags |= reason;
644 return;
645 }
646
647 assert(thread->saved_mode == TH_MODE_NONE);
648
649 boolean_t removed = thread_run_queue_remove(thread);
650
651 if (thread->sched_mode == TH_MODE_REALTIME)
652 thread->priority = DEPRESSPRI;
653
654 thread->sched_flags |= reason;
655
656 thread->saved_mode = thread->sched_mode;
657
658 sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
659
660 if (removed)
661 thread_setrun(thread, SCHED_TAILQ);
662
663 assert_thread_sched_count(thread);
664}
665
666/*
667 * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
668 */
669void
670sched_thread_mode_undemote(thread_t thread, uint32_t reason)
671{
672 assert(reason & TH_SFLAG_DEMOTED_MASK);
673 assert((thread->sched_flags & reason) == reason);
674 assert(thread->saved_mode != TH_MODE_NONE);
675 assert(thread->sched_mode == TH_MODE_TIMESHARE);
676 assert(thread->policy_reset == 0);
677
678 assert_thread_sched_count(thread);
679
680 thread->sched_flags &= ~reason;
681
682 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
683 /* Another demotion reason is still active */
684 return;
685 }
686
687 boolean_t removed = thread_run_queue_remove(thread);
688
689 sched_set_thread_mode(thread, thread->saved_mode);
690
691 thread->saved_mode = TH_MODE_NONE;
692
693 if (thread->sched_mode == TH_MODE_REALTIME) {
694 thread->priority = BASEPRI_RTQUEUES;
695 }
696
697 SCHED(compute_priority)(thread, FALSE);
698
699 if (removed)
700 thread_setrun(thread, SCHED_TAILQ);
701}
702
703/*
704 * Set the thread to be categorized as 'background'
705 * Called with thread mutex and thread lock held
706 *
707 * TODO: Eventually, 'background' should be a true sched_mode.
708 */
709void
710sched_set_thread_throttled(thread_t thread, boolean_t wants_throttle)
711{
712 if (thread->policy_reset)
713 return;
714
715 assert(((thread->sched_flags & TH_SFLAG_THROTTLED) ? TRUE : FALSE) != wants_throttle);
716
717 assert_thread_sched_count(thread);
718
719 /*
720 * When backgrounding a thread, iOS has the semantic that
721 * realtime and fixed priority threads should be demoted
722 * to timeshare background threads.
723 *
724 * On OSX, realtime and fixed priority threads don't lose their mode.
725 */
726
727 if (wants_throttle) {
728 thread->sched_flags |= TH_SFLAG_THROTTLED;
729 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
730 sched_background_incr(thread);
731 }
732
733 assert_thread_sched_count(thread);
734
735 } else {
736 thread->sched_flags &= ~TH_SFLAG_THROTTLED;
737 if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) {
738 sched_background_decr(thread);
739 }
740
741 assert_thread_sched_count(thread);
742
743 }
744
745 assert_thread_sched_count(thread);
746}
747