]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
316670eb | 2 | * Copyright (c) 2000-2010 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
fe8ab488 | 59 | * File: priority.c |
1c79356b A |
60 | * Author: Avadis Tevanian, Jr. |
61 | * Date: 1986 | |
62 | * | |
fe8ab488 | 63 | * Priority related scheduler bits. |
1c79356b A |
64 | */ |
65 | ||
1c79356b A |
66 | #include <mach/boolean.h> |
67 | #include <mach/kern_return.h> | |
68 | #include <mach/machine.h> | |
69 | #include <kern/host.h> | |
70 | #include <kern/mach_param.h> | |
71 | #include <kern/sched.h> | |
6d2010ae | 72 | #include <sys/kdebug.h> |
1c79356b A |
73 | #include <kern/spl.h> |
74 | #include <kern/thread.h> | |
75 | #include <kern/processor.h> | |
316670eb | 76 | #include <kern/ledger.h> |
1c79356b | 77 | #include <machine/machparam.h> |
3e170ce0 | 78 | #include <kern/machine.h> |
1c79356b | 79 | |
fe8ab488 A |
80 | #ifdef CONFIG_MACH_APPROXIMATE_TIME |
81 | #include <machine/commpage.h> /* for commpage_update_mach_approximate_time */ | |
82 | #endif | |
83 | ||
1c79356b | 84 | /* |
0b4e3aa0 | 85 | * thread_quantum_expire: |
1c79356b A |
86 | * |
87 | * Recalculate the quantum and priority for a thread. | |
2d21ac55 A |
88 | * |
89 | * Called at splsched. | |
1c79356b A |
90 | */ |
91 | ||
92 | void | |
0b4e3aa0 A |
93 | thread_quantum_expire( |
94 | timer_call_param_t p0, | |
95 | timer_call_param_t p1) | |
1c79356b | 96 | { |
2d21ac55 A |
97 | processor_t processor = p0; |
98 | thread_t thread = p1; | |
99 | ast_t preempt; | |
39236c6e | 100 | uint64_t ctime; |
3e170ce0 A |
101 | int urgency; |
102 | uint64_t ignore1, ignore2; | |
1c79356b | 103 | |
fe8ab488 | 104 | assert(processor == current_processor()); |
3e170ce0 | 105 | assert(thread == current_thread()); |
fe8ab488 | 106 | |
490019cf A |
107 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0); |
108 | ||
ebb1b9f4 A |
109 | SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor); |
110 | ||
316670eb A |
111 | /* |
112 | * We bill CPU time to both the individual thread and its task. | |
113 | * | |
114 | * Because this balance adjustment could potentially attempt to wake this very | |
115 | * thread, we must credit the ledger before taking the thread lock. The ledger | |
116 | * pointers are only manipulated by the thread itself at the ast boundary. | |
117 | */ | |
fe8ab488 A |
118 | ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining); |
119 | ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining); | |
120 | #ifdef CONFIG_BANK | |
121 | if (thread->t_bankledger) { | |
122 | ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time, | |
123 | (thread->quantum_remaining - thread->t_deduct_bank_ledger_time)); | |
124 | } | |
125 | thread->t_deduct_bank_ledger_time = 0; | |
126 | #endif | |
127 | ||
128 | ctime = mach_absolute_time(); | |
316670eb | 129 | |
3e170ce0 A |
130 | #ifdef CONFIG_MACH_APPROXIMATE_TIME |
131 | commpage_update_mach_approximate_time(ctime); | |
132 | #endif | |
133 | ||
0b4e3aa0 | 134 | thread_lock(thread); |
1c79356b | 135 | |
6d2010ae A |
136 | /* |
137 | * We've run up until our quantum expiration, and will (potentially) | |
138 | * continue without re-entering the scheduler, so update this now. | |
139 | */ | |
3e170ce0 | 140 | processor->last_dispatch = ctime; |
fe8ab488 | 141 | thread->last_run_time = ctime; |
316670eb | 142 | |
1c79356b | 143 | /* |
9bccf70c | 144 | * Check for fail-safe trip. |
1c79356b | 145 | */ |
316670eb | 146 | if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) && |
39236c6e | 147 | !(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) && |
316670eb A |
148 | !(thread->options & TH_OPT_SYSTEM_CRITICAL)) { |
149 | uint64_t new_computation; | |
150 | ||
fe8ab488 | 151 | new_computation = ctime - thread->computation_epoch; |
316670eb A |
152 | new_computation += thread->computation_metered; |
153 | if (new_computation > max_unsafe_computation) { | |
6d2010ae A |
154 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE, |
155 | (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0); | |
0b4e3aa0 | 156 | |
fe8ab488 | 157 | thread->safe_release = ctime + sched_safe_duration; |
1c79356b | 158 | |
fe8ab488 | 159 | sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE); |
0b4e3aa0 A |
160 | } |
161 | } | |
fe8ab488 | 162 | |
1c79356b | 163 | /* |
9bccf70c | 164 | * Recompute scheduled priority if appropriate. |
1c79356b | 165 | */ |
6d2010ae A |
166 | if (SCHED(can_update_priority)(thread)) |
167 | SCHED(update_priority)(thread); | |
0b4e3aa0 | 168 | else |
6d2010ae | 169 | SCHED(lightweight_update_priority)(thread); |
1c79356b | 170 | |
3e170ce0 A |
171 | if (thread->sched_mode != TH_MODE_REALTIME) |
172 | SCHED(quantum_expire)(thread); | |
173 | ||
2d21ac55 | 174 | processor->current_pri = thread->sched_pri; |
6d2010ae | 175 | processor->current_thmode = thread->sched_mode; |
2d21ac55 | 176 | |
3e170ce0 A |
177 | /* Tell platform layer that we are still running this thread */ |
178 | urgency = thread_get_urgency(thread, &ignore1, &ignore2); | |
179 | machine_thread_going_on_core(thread, urgency, 0); | |
180 | ||
0b4e3aa0 A |
181 | /* |
182 | * This quantum is up, give this thread another. | |
183 | */ | |
3e170ce0 | 184 | processor->first_timeslice = FALSE; |
1c79356b | 185 | |
55e303ae | 186 | thread_quantum_init(thread); |
6d2010ae | 187 | |
316670eb A |
188 | /* Reload precise timing global policy to thread-local policy */ |
189 | thread->precise_user_kernel_time = use_precise_user_kernel_time(thread); | |
190 | ||
191 | /* | |
192 | * Since non-precise user/kernel time doesn't update the state/thread timer | |
193 | * during privilege transitions, synthesize an event now. | |
194 | */ | |
195 | if (!thread->precise_user_kernel_time) { | |
196 | timer_switch(PROCESSOR_DATA(processor, current_state), | |
fe8ab488 | 197 | ctime, |
316670eb A |
198 | PROCESSOR_DATA(processor, current_state)); |
199 | timer_switch(PROCESSOR_DATA(processor, thread_timer), | |
fe8ab488 | 200 | ctime, |
316670eb A |
201 | PROCESSOR_DATA(processor, thread_timer)); |
202 | } | |
203 | ||
fe8ab488 | 204 | processor->quantum_end = ctime + thread->quantum_remaining; |
1c79356b | 205 | |
0b4e3aa0 | 206 | /* |
2d21ac55 | 207 | * Context switch check. |
0b4e3aa0 | 208 | */ |
fe8ab488 | 209 | if ((preempt = csw_check(processor, AST_QUANTUM)) != AST_NONE) |
2d21ac55 | 210 | ast_on(preempt); |
2d21ac55 A |
211 | |
212 | thread_unlock(thread); | |
39236c6e | 213 | |
3e170ce0 A |
214 | timer_call_enter1(&processor->quantum_timer, thread, |
215 | processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL); | |
216 | ||
fe8ab488 | 217 | #if defined(CONFIG_SCHED_TIMESHARE_CORE) |
3e170ce0 A |
218 | sched_timeshare_consider_maintenance(ctime); |
219 | #endif /* CONFIG_SCHED_TIMESHARE_CORE */ | |
220 | ||
490019cf A |
221 | |
222 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0); | |
fe8ab488 A |
223 | } |
224 | ||
225 | /* | |
226 | * sched_set_thread_base_priority: | |
227 | * | |
228 | * Set the base priority of the thread | |
229 | * and reset its scheduled priority. | |
230 | * | |
3e170ce0 A |
231 | * This is the only path to change base_pri. |
232 | * | |
fe8ab488 A |
233 | * Called with the thread locked. |
234 | */ | |
235 | void | |
236 | sched_set_thread_base_priority(thread_t thread, int priority) | |
237 | { | |
490019cf | 238 | int old_priority = thread->base_pri; |
3e170ce0 A |
239 | thread->base_pri = priority; |
240 | ||
490019cf A |
241 | /* A thread is 'throttled' when its base priority is at or below MAXPRI_THROTTLE */ |
242 | if ((priority > MAXPRI_THROTTLE) && (old_priority <= MAXPRI_THROTTLE)) { | |
243 | sched_set_thread_throttled(thread, FALSE); | |
244 | } else if ((priority <= MAXPRI_THROTTLE) && (old_priority > MAXPRI_THROTTLE)) { | |
245 | sched_set_thread_throttled(thread, TRUE); | |
246 | } | |
247 | ||
3e170ce0 | 248 | thread_recompute_sched_pri(thread, FALSE); |
1c79356b | 249 | } |
91447636 | 250 | |
3e170ce0 A |
251 | /* |
252 | * thread_recompute_sched_pri: | |
253 | * | |
254 | * Reset the scheduled priority of the thread | |
255 | * according to its base priority if the | |
256 | * thread has not been promoted or depressed. | |
257 | * | |
258 | * This is the standard way to push base_pri changes into sched_pri, | |
259 | * or to recalculate the appropriate sched_pri after clearing | |
260 | * a promotion or depression. | |
261 | * | |
262 | * Called at splsched with the thread locked. | |
263 | */ | |
264 | void | |
265 | thread_recompute_sched_pri( | |
266 | thread_t thread, | |
267 | boolean_t override_depress) | |
268 | { | |
269 | int priority; | |
270 | ||
271 | if (thread->sched_mode == TH_MODE_TIMESHARE) | |
272 | priority = SCHED(compute_timeshare_priority)(thread); | |
273 | else | |
274 | priority = thread->base_pri; | |
fe8ab488 | 275 | |
3e170ce0 A |
276 | if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) && |
277 | (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) || override_depress)) { | |
278 | set_sched_pri(thread, priority); | |
279 | } | |
280 | } | |
6d2010ae A |
281 | |
282 | void | |
3e170ce0 | 283 | sched_default_quantum_expire(thread_t thread __unused) |
6d2010ae | 284 | { |
3e170ce0 A |
285 | /* |
286 | * No special behavior when a timeshare, fixed, or realtime thread | |
287 | * uses up its entire quantum | |
288 | */ | |
6d2010ae A |
289 | } |
290 | ||
3e170ce0 A |
291 | #if defined(CONFIG_SCHED_TIMESHARE_CORE) |
292 | ||
293 | /* | |
294 | * lightweight_update_priority: | |
295 | * | |
296 | * Update the scheduled priority for | |
297 | * a timesharing thread. | |
298 | * | |
299 | * Only for use on the current thread. | |
300 | * | |
301 | * Called with the thread locked. | |
302 | */ | |
6d2010ae A |
303 | void |
304 | lightweight_update_priority(thread_t thread) | |
305 | { | |
3e170ce0 A |
306 | assert(thread->runq == PROCESSOR_NULL); |
307 | assert(thread == current_thread()); | |
308 | ||
6d2010ae | 309 | if (thread->sched_mode == TH_MODE_TIMESHARE) { |
3e170ce0 A |
310 | int priority; |
311 | uint32_t delta; | |
312 | ||
6d2010ae | 313 | thread_timer_delta(thread, delta); |
3e170ce0 | 314 | |
6d2010ae A |
315 | /* |
316 | * Accumulate timesharing usage only | |
317 | * during contention for processor | |
318 | * resources. | |
319 | */ | |
320 | if (thread->pri_shift < INT8_MAX) | |
321 | thread->sched_usage += delta; | |
3e170ce0 | 322 | |
6d2010ae | 323 | thread->cpu_delta += delta; |
3e170ce0 A |
324 | |
325 | priority = sched_compute_timeshare_priority(thread); | |
326 | ||
6d2010ae | 327 | /* |
3e170ce0 A |
328 | * Adjust the scheduled priority like thread_recompute_sched_pri, |
329 | * except with the benefit of knowing the thread is on this core. | |
6d2010ae | 330 | */ |
3e170ce0 A |
331 | if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) && |
332 | (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) && | |
333 | priority != thread->sched_pri) { | |
334 | ||
335 | thread->sched_pri = priority; | |
336 | ||
337 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY), | |
338 | (uintptr_t)thread_tid(thread), | |
339 | thread->base_pri, | |
340 | thread->sched_pri, | |
341 | 0, /* eventually, 'reason' */ | |
342 | 0); | |
343 | } | |
344 | } | |
6d2010ae A |
345 | } |
346 | ||
91447636 A |
347 | /* |
348 | * Define shifts for simulating (5/8) ** n | |
349 | * | |
350 | * Shift structures for holding update shifts. Actual computation | |
351 | * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the | |
352 | * +/- is determined by the sign of shift 2. | |
353 | */ | |
354 | struct shift_data { | |
355 | int shift1; | |
356 | int shift2; | |
357 | }; | |
358 | ||
359 | #define SCHED_DECAY_TICKS 32 | |
360 | static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { | |
361 | {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7}, | |
362 | {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13}, | |
363 | {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18}, | |
364 | {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27} | |
365 | }; | |
366 | ||
367 | /* | |
3e170ce0 | 368 | * sched_compute_timeshare_priority: |
91447636 A |
369 | * |
370 | * Calculate the timesharing priority based upon usage and load. | |
371 | */ | |
fe8ab488 | 372 | extern int sched_pri_decay_band_limit; |
d1ecb069 | 373 | |
91447636 | 374 | |
3e170ce0 A |
375 | int |
376 | sched_compute_timeshare_priority(thread_t thread) | |
91447636 | 377 | { |
3e170ce0 A |
378 | /* start with base priority */ |
379 | int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift); | |
91447636 | 380 | |
3e170ce0 A |
381 | if (priority < MINPRI_USER) |
382 | priority = MINPRI_USER; | |
383 | else if (priority > MAXPRI_KERNEL) | |
384 | priority = MAXPRI_KERNEL; | |
91447636 | 385 | |
3e170ce0 | 386 | return priority; |
91447636 A |
387 | } |
388 | ||
91447636 | 389 | |
6d2010ae A |
390 | /* |
391 | * can_update_priority | |
392 | * | |
393 | * Make sure we don't do re-dispatches more frequently than a scheduler tick. | |
394 | * | |
395 | * Called with the thread locked. | |
396 | */ | |
397 | boolean_t | |
398 | can_update_priority( | |
399 | thread_t thread) | |
400 | { | |
401 | if (sched_tick == thread->sched_stamp) | |
402 | return (FALSE); | |
403 | else | |
404 | return (TRUE); | |
405 | } | |
406 | ||
91447636 A |
407 | /* |
408 | * update_priority | |
409 | * | |
410 | * Perform housekeeping operations driven by scheduler tick. | |
411 | * | |
412 | * Called with the thread locked. | |
413 | */ | |
414 | void | |
415 | update_priority( | |
416 | register thread_t thread) | |
417 | { | |
418 | register unsigned ticks; | |
419 | register uint32_t delta; | |
420 | ||
421 | ticks = sched_tick - thread->sched_stamp; | |
422 | assert(ticks != 0); | |
423 | thread->sched_stamp += ticks; | |
39236c6e A |
424 | if (sched_use_combined_fgbg_decay) |
425 | thread->pri_shift = sched_combined_fgbg_pri_shift; | |
fe8ab488 | 426 | else if (thread->sched_flags & TH_SFLAG_THROTTLED) |
39236c6e A |
427 | thread->pri_shift = sched_background_pri_shift; |
428 | else | |
429 | thread->pri_shift = sched_pri_shift; | |
430 | ||
431 | /* If requested, accelerate aging of sched_usage */ | |
432 | if (sched_decay_usage_age_factor > 1) | |
433 | ticks *= sched_decay_usage_age_factor; | |
91447636 A |
434 | |
435 | /* | |
436 | * Gather cpu usage data. | |
437 | */ | |
438 | thread_timer_delta(thread, delta); | |
439 | if (ticks < SCHED_DECAY_TICKS) { | |
440 | register struct shift_data *shiftp; | |
441 | ||
442 | /* | |
443 | * Accumulate timesharing usage only | |
444 | * during contention for processor | |
445 | * resources. | |
446 | */ | |
447 | if (thread->pri_shift < INT8_MAX) | |
448 | thread->sched_usage += delta; | |
449 | ||
450 | thread->cpu_usage += delta + thread->cpu_delta; | |
451 | thread->cpu_delta = 0; | |
452 | ||
453 | shiftp = &sched_decay_shifts[ticks]; | |
454 | if (shiftp->shift2 > 0) { | |
455 | thread->cpu_usage = | |
456 | (thread->cpu_usage >> shiftp->shift1) + | |
457 | (thread->cpu_usage >> shiftp->shift2); | |
458 | thread->sched_usage = | |
459 | (thread->sched_usage >> shiftp->shift1) + | |
460 | (thread->sched_usage >> shiftp->shift2); | |
461 | } | |
462 | else { | |
463 | thread->cpu_usage = | |
464 | (thread->cpu_usage >> shiftp->shift1) - | |
465 | (thread->cpu_usage >> -(shiftp->shift2)); | |
466 | thread->sched_usage = | |
467 | (thread->sched_usage >> shiftp->shift1) - | |
468 | (thread->sched_usage >> -(shiftp->shift2)); | |
469 | } | |
470 | } | |
471 | else { | |
472 | thread->cpu_usage = thread->cpu_delta = 0; | |
473 | thread->sched_usage = 0; | |
474 | } | |
475 | ||
476 | /* | |
477 | * Check for fail-safe release. | |
478 | */ | |
fe8ab488 A |
479 | if ((thread->sched_flags & TH_SFLAG_FAILSAFE) && |
480 | mach_absolute_time() >= thread->safe_release) { | |
481 | sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE); | |
91447636 A |
482 | } |
483 | ||
484 | /* | |
485 | * Recompute scheduled priority if appropriate. | |
486 | */ | |
3e170ce0 A |
487 | if (thread->sched_mode == TH_MODE_TIMESHARE) { |
488 | int priority = sched_compute_timeshare_priority(thread); | |
489 | ||
490 | /* | |
491 | * Adjust the scheduled priority like thread_recompute_sched_pri, | |
492 | * except without setting an AST. | |
493 | */ | |
494 | if ((!(thread->sched_flags & TH_SFLAG_PROMOTED_MASK) || (priority > thread->sched_pri)) && | |
495 | (!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)) && | |
496 | priority != thread->sched_pri) { | |
497 | ||
498 | boolean_t removed = thread_run_queue_remove(thread); | |
499 | ||
500 | thread->sched_pri = priority; | |
39236c6e | 501 | |
3e170ce0 A |
502 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY), |
503 | (uintptr_t)thread_tid(thread), | |
504 | thread->base_pri, | |
505 | thread->sched_pri, | |
506 | 0, /* eventually, 'reason' */ | |
507 | 0); | |
39236c6e | 508 | |
2d21ac55 | 509 | if (removed) |
3e170ce0 | 510 | thread_run_queue_reinsert(thread, SCHED_TAILQ); |
91447636 A |
511 | } |
512 | } | |
3e170ce0 | 513 | |
6d2010ae | 514 | return; |
91447636 | 515 | } |
6d2010ae | 516 | |
fe8ab488 A |
517 | #endif /* CONFIG_SCHED_TIMESHARE_CORE */ |
518 | ||
519 | #if MACH_ASSERT | |
520 | /* sched_mode == TH_MODE_TIMESHARE controls whether a thread has a timeshare count when it has a run count */ | |
521 | ||
522 | void sched_share_incr(thread_t thread) { | |
523 | assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN); | |
524 | assert(thread->sched_mode == TH_MODE_TIMESHARE); | |
525 | assert(thread->SHARE_COUNT == 0); | |
526 | thread->SHARE_COUNT++; | |
527 | (void)hw_atomic_add(&sched_share_count, 1); | |
528 | } | |
529 | ||
530 | void sched_share_decr(thread_t thread) { | |
531 | assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN || thread->sched_mode != TH_MODE_TIMESHARE); | |
532 | assert(thread->SHARE_COUNT == 1); | |
533 | (void)hw_atomic_sub(&sched_share_count, 1); | |
534 | thread->SHARE_COUNT--; | |
535 | } | |
536 | ||
537 | /* TH_SFLAG_THROTTLED controls whether a thread has a background count when it has a run count and a share count */ | |
538 | ||
539 | void sched_background_incr(thread_t thread) { | |
540 | assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN); | |
541 | assert(thread->sched_mode == TH_MODE_TIMESHARE); | |
542 | assert((thread->sched_flags & TH_SFLAG_THROTTLED) == TH_SFLAG_THROTTLED); | |
543 | ||
544 | assert(thread->BG_COUNT == 0); | |
545 | thread->BG_COUNT++; | |
546 | int val = hw_atomic_add(&sched_background_count, 1); | |
547 | assert(val >= 0); | |
548 | ||
549 | /* Always do the background change while holding a share count */ | |
550 | assert(thread->SHARE_COUNT == 1); | |
551 | } | |
552 | ||
553 | void sched_background_decr(thread_t thread) { | |
554 | if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) | |
555 | assert((thread->sched_flags & TH_SFLAG_THROTTLED) != TH_SFLAG_THROTTLED); | |
556 | assert(thread->BG_COUNT == 1); | |
557 | int val = hw_atomic_sub(&sched_background_count, 1); | |
558 | thread->BG_COUNT--; | |
559 | assert(val >= 0); | |
560 | assert(thread->BG_COUNT == 0); | |
561 | ||
562 | /* Always do the background change while holding a share count */ | |
563 | assert(thread->SHARE_COUNT == 1); | |
564 | } | |
565 | ||
566 | ||
567 | void | |
568 | assert_thread_sched_count(thread_t thread) { | |
569 | /* Only 0 or 1 are acceptable values */ | |
570 | assert(thread->BG_COUNT == 0 || thread->BG_COUNT == 1); | |
571 | assert(thread->SHARE_COUNT == 0 || thread->SHARE_COUNT == 1); | |
572 | ||
573 | /* BG is only allowed when you already have a share count */ | |
574 | if (thread->BG_COUNT == 1) | |
575 | assert(thread->SHARE_COUNT == 1); | |
576 | if (thread->SHARE_COUNT == 0) | |
577 | assert(thread->BG_COUNT == 0); | |
578 | ||
579 | if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN || | |
580 | (thread->sched_mode != TH_MODE_TIMESHARE)) | |
581 | assert(thread->SHARE_COUNT == 0); | |
582 | ||
583 | if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && | |
584 | (thread->sched_mode == TH_MODE_TIMESHARE)) | |
585 | assert(thread->SHARE_COUNT == 1); | |
586 | ||
587 | if ((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN || | |
588 | (thread->sched_mode != TH_MODE_TIMESHARE) || | |
589 | !(thread->sched_flags & TH_SFLAG_THROTTLED)) | |
590 | assert(thread->BG_COUNT == 0); | |
591 | ||
592 | if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && | |
593 | (thread->sched_mode == TH_MODE_TIMESHARE) && | |
594 | (thread->sched_flags & TH_SFLAG_THROTTLED)) | |
595 | assert(thread->BG_COUNT == 1); | |
596 | } | |
597 | ||
598 | #endif /* MACH_ASSERT */ | |
599 | ||
600 | /* | |
601 | * Set the thread's true scheduling mode | |
602 | * Called with thread mutex and thread locked | |
603 | * The thread has already been removed from the runqueue. | |
604 | * | |
605 | * (saved_mode is handled before this point) | |
606 | */ | |
607 | void | |
608 | sched_set_thread_mode(thread_t thread, sched_mode_t new_mode) | |
609 | { | |
610 | assert_thread_sched_count(thread); | |
3e170ce0 | 611 | assert(thread->runq == PROCESSOR_NULL); |
fe8ab488 A |
612 | |
613 | sched_mode_t old_mode = thread->sched_mode; | |
614 | ||
615 | thread->sched_mode = new_mode; | |
616 | ||
617 | switch (new_mode) { | |
618 | case TH_MODE_FIXED: | |
619 | case TH_MODE_REALTIME: | |
620 | if (old_mode == TH_MODE_TIMESHARE) { | |
621 | if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) { | |
622 | if (thread->sched_flags & TH_SFLAG_THROTTLED) | |
623 | sched_background_decr(thread); | |
624 | ||
625 | sched_share_decr(thread); | |
626 | } | |
627 | } | |
628 | break; | |
629 | ||
630 | case TH_MODE_TIMESHARE: | |
631 | if (old_mode != TH_MODE_TIMESHARE) { | |
632 | if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) { | |
633 | sched_share_incr(thread); | |
634 | ||
635 | if (thread->sched_flags & TH_SFLAG_THROTTLED) | |
636 | sched_background_incr(thread); | |
637 | } | |
638 | } | |
639 | break; | |
640 | ||
641 | default: | |
642 | panic("unexpected mode: %d", new_mode); | |
643 | break; | |
644 | } | |
645 | ||
646 | assert_thread_sched_count(thread); | |
647 | } | |
648 | ||
649 | /* | |
650 | * Demote the true scheduler mode to timeshare (called with the thread locked) | |
651 | */ | |
652 | void | |
653 | sched_thread_mode_demote(thread_t thread, uint32_t reason) | |
654 | { | |
655 | assert(reason & TH_SFLAG_DEMOTED_MASK); | |
656 | assert((thread->sched_flags & reason) != reason); | |
657 | assert_thread_sched_count(thread); | |
658 | ||
659 | if (thread->policy_reset) | |
660 | return; | |
661 | ||
662 | if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) { | |
663 | /* Another demotion reason is already active */ | |
664 | thread->sched_flags |= reason; | |
665 | return; | |
666 | } | |
667 | ||
668 | assert(thread->saved_mode == TH_MODE_NONE); | |
669 | ||
670 | boolean_t removed = thread_run_queue_remove(thread); | |
671 | ||
fe8ab488 A |
672 | thread->sched_flags |= reason; |
673 | ||
674 | thread->saved_mode = thread->sched_mode; | |
675 | ||
676 | sched_set_thread_mode(thread, TH_MODE_TIMESHARE); | |
677 | ||
3e170ce0 A |
678 | thread_recompute_priority(thread); |
679 | ||
fe8ab488 | 680 | if (removed) |
3e170ce0 | 681 | thread_run_queue_reinsert(thread, SCHED_TAILQ); |
fe8ab488 A |
682 | |
683 | assert_thread_sched_count(thread); | |
684 | } | |
685 | ||
686 | /* | |
687 | * Un-demote the true scheduler mode back to the saved mode (called with the thread locked) | |
688 | */ | |
689 | void | |
690 | sched_thread_mode_undemote(thread_t thread, uint32_t reason) | |
691 | { | |
692 | assert(reason & TH_SFLAG_DEMOTED_MASK); | |
693 | assert((thread->sched_flags & reason) == reason); | |
694 | assert(thread->saved_mode != TH_MODE_NONE); | |
695 | assert(thread->sched_mode == TH_MODE_TIMESHARE); | |
696 | assert(thread->policy_reset == 0); | |
697 | ||
698 | assert_thread_sched_count(thread); | |
699 | ||
700 | thread->sched_flags &= ~reason; | |
701 | ||
702 | if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) { | |
703 | /* Another demotion reason is still active */ | |
704 | return; | |
705 | } | |
706 | ||
707 | boolean_t removed = thread_run_queue_remove(thread); | |
708 | ||
709 | sched_set_thread_mode(thread, thread->saved_mode); | |
710 | ||
711 | thread->saved_mode = TH_MODE_NONE; | |
712 | ||
3e170ce0 | 713 | thread_recompute_priority(thread); |
fe8ab488 A |
714 | |
715 | if (removed) | |
3e170ce0 | 716 | thread_run_queue_reinsert(thread, SCHED_TAILQ); |
fe8ab488 A |
717 | } |
718 | ||
719 | /* | |
720 | * Set the thread to be categorized as 'background' | |
721 | * Called with thread mutex and thread lock held | |
722 | * | |
723 | * TODO: Eventually, 'background' should be a true sched_mode. | |
724 | */ | |
725 | void | |
726 | sched_set_thread_throttled(thread_t thread, boolean_t wants_throttle) | |
727 | { | |
728 | if (thread->policy_reset) | |
729 | return; | |
730 | ||
731 | assert(((thread->sched_flags & TH_SFLAG_THROTTLED) ? TRUE : FALSE) != wants_throttle); | |
732 | ||
733 | assert_thread_sched_count(thread); | |
734 | ||
fe8ab488 A |
735 | if (wants_throttle) { |
736 | thread->sched_flags |= TH_SFLAG_THROTTLED; | |
737 | if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) { | |
738 | sched_background_incr(thread); | |
739 | } | |
fe8ab488 A |
740 | } else { |
741 | thread->sched_flags &= ~TH_SFLAG_THROTTLED; | |
742 | if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN && thread->sched_mode == TH_MODE_TIMESHARE) { | |
743 | sched_background_decr(thread); | |
744 | } | |
fe8ab488 A |
745 | } |
746 | ||
747 | assert_thread_sched_count(thread); | |
748 | } | |
749 |