]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_policy.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / osfmk / kern / thread_policy.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b 28
91447636
A
29#include <mach/mach_types.h>
30#include <mach/thread_act_server.h>
31
32#include <kern/kern_types.h>
55e303ae 33#include <kern/processor.h>
1c79356b 34#include <kern/thread.h>
2d21ac55 35#include <kern/affinity.h>
fe8ab488
A
36#include <mach/task_policy.h>
37#include <kern/sfi.h>
38
39#include <mach/machine/sdt.h>
40
41#define QOS_EXTRACT(q) ((q) & 0xff)
42
43/*
44 * THREAD_QOS_UNSPECIFIED is assigned the highest tier available, so it does not provide a limit
45 * to threads that don't have a QoS class set.
46 */
47const qos_policy_params_t thread_qos_policy_params = {
48 /*
49 * This table defines the starting base priority of the thread,
50 * which will be modified by the thread importance and the task max priority
51 * before being applied.
52 */
53 .qos_pri[THREAD_QOS_UNSPECIFIED] = 0, /* not consulted */
54 .qos_pri[THREAD_QOS_USER_INTERACTIVE] = BASEPRI_BACKGROUND, /* i.e. 46 */
55 .qos_pri[THREAD_QOS_USER_INITIATED] = BASEPRI_USER_INITIATED,
56 .qos_pri[THREAD_QOS_LEGACY] = BASEPRI_DEFAULT,
57 .qos_pri[THREAD_QOS_UTILITY] = BASEPRI_UTILITY,
58 .qos_pri[THREAD_QOS_BACKGROUND] = MAXPRI_THROTTLE,
59 .qos_pri[THREAD_QOS_MAINTENANCE] = MAXPRI_THROTTLE,
60
61 /*
62 * This table defines the highest IO priority that a thread marked with this
63 * QoS class can have.
64 */
65 .qos_iotier[THREAD_QOS_UNSPECIFIED] = THROTTLE_LEVEL_TIER0,
66 .qos_iotier[THREAD_QOS_USER_INTERACTIVE] = THROTTLE_LEVEL_TIER0,
67 .qos_iotier[THREAD_QOS_USER_INITIATED] = THROTTLE_LEVEL_TIER0,
68 .qos_iotier[THREAD_QOS_LEGACY] = THROTTLE_LEVEL_TIER0,
69 .qos_iotier[THREAD_QOS_UTILITY] = THROTTLE_LEVEL_TIER1,
70 .qos_iotier[THREAD_QOS_BACKGROUND] = THROTTLE_LEVEL_TIER2, /* possibly overridden by bg_iotier */
71 .qos_iotier[THREAD_QOS_MAINTENANCE] = THROTTLE_LEVEL_TIER3,
72
73 /*
74 * This table defines the highest QoS level that
75 * a thread marked with this QoS class can have.
76 */
77
78 .qos_through_qos[THREAD_QOS_UNSPECIFIED] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_UNSPECIFIED),
79 .qos_through_qos[THREAD_QOS_USER_INTERACTIVE] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_0),
80 .qos_through_qos[THREAD_QOS_USER_INITIATED] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_1),
81 .qos_through_qos[THREAD_QOS_LEGACY] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_1),
82 .qos_through_qos[THREAD_QOS_UTILITY] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_2),
83 .qos_through_qos[THREAD_QOS_BACKGROUND] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_5),
84 .qos_through_qos[THREAD_QOS_MAINTENANCE] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_5),
85
86 .qos_latency_qos[THREAD_QOS_UNSPECIFIED] = QOS_EXTRACT(LATENCY_QOS_TIER_UNSPECIFIED),
87 .qos_latency_qos[THREAD_QOS_USER_INTERACTIVE] = QOS_EXTRACT(LATENCY_QOS_TIER_0),
88 .qos_latency_qos[THREAD_QOS_USER_INITIATED] = QOS_EXTRACT(LATENCY_QOS_TIER_1),
89 .qos_latency_qos[THREAD_QOS_LEGACY] = QOS_EXTRACT(LATENCY_QOS_TIER_1),
90 .qos_latency_qos[THREAD_QOS_UTILITY] = QOS_EXTRACT(LATENCY_QOS_TIER_3),
91 .qos_latency_qos[THREAD_QOS_BACKGROUND] = QOS_EXTRACT(LATENCY_QOS_TIER_3),
92 .qos_latency_qos[THREAD_QOS_MAINTENANCE] = QOS_EXTRACT(LATENCY_QOS_TIER_3),
93};
94
0b4e3aa0 95static void
3e170ce0 96thread_set_user_sched_mode_and_recompute_pri(thread_t thread, sched_mode_t mode);
fe8ab488
A
97
98static int
99thread_qos_scaled_relative_priority(int qos, int qos_relprio);
100
b0d623f7 101
39236c6e 102extern void proc_get_thread_policy(thread_t thread, thread_policy_state_t info);
b0d623f7 103
fe8ab488
A
104boolean_t
105thread_has_qos_policy(thread_t thread) {
106 return (proc_get_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS) != THREAD_QOS_UNSPECIFIED) ? TRUE : FALSE;
107}
108
109kern_return_t
110thread_remove_qos_policy(thread_t thread)
111{
112 thread_qos_policy_data_t unspec_qos;
113 unspec_qos.qos_tier = THREAD_QOS_UNSPECIFIED;
114 unspec_qos.tier_importance = 0;
115
116 __unused int prev_qos = thread->requested_policy.thrp_qos;
117
118 DTRACE_PROC2(qos__remove, thread_t, thread, int, prev_qos);
119
120 return thread_policy_set_internal(thread, THREAD_QOS_POLICY, (thread_policy_t)&unspec_qos, THREAD_QOS_POLICY_COUNT);
121}
122
123boolean_t
124thread_is_static_param(thread_t thread)
125{
126 if (thread->static_param) {
127 DTRACE_PROC1(qos__legacy__denied, thread_t, thread);
128 return TRUE;
129 }
130 return FALSE;
131}
132
133/*
134 * Relative priorities can range between 0REL and -15REL. These
135 * map to QoS-specific ranges, to create non-overlapping priority
136 * ranges.
137 */
138static int
139thread_qos_scaled_relative_priority(int qos, int qos_relprio)
140{
141 int next_lower_qos;
142
143 /* Fast path, since no validation or scaling is needed */
144 if (qos_relprio == 0) return 0;
145
146 switch (qos) {
147 case THREAD_QOS_USER_INTERACTIVE:
148 next_lower_qos = THREAD_QOS_USER_INITIATED;
149 break;
150 case THREAD_QOS_USER_INITIATED:
151 next_lower_qos = THREAD_QOS_LEGACY;
152 break;
153 case THREAD_QOS_LEGACY:
154 next_lower_qos = THREAD_QOS_UTILITY;
155 break;
156 case THREAD_QOS_UTILITY:
157 next_lower_qos = THREAD_QOS_BACKGROUND;
158 break;
159 case THREAD_QOS_MAINTENANCE:
160 case THREAD_QOS_BACKGROUND:
161 next_lower_qos = 0;
162 break;
163 default:
164 panic("Unrecognized QoS %d", qos);
165 return 0;
166 }
167
168 int prio_range_max = thread_qos_policy_params.qos_pri[qos];
169 int prio_range_min = next_lower_qos ? thread_qos_policy_params.qos_pri[next_lower_qos] : 0;
170
171 /*
172 * We now have the valid range that the scaled relative priority can map to. Note
173 * that the lower bound is exclusive, but the upper bound is inclusive. If the
174 * range is (21,31], 0REL should map to 31 and -15REL should map to 22. We use the
175 * fact that the max relative priority is -15 and use ">>4" to divide by 16 and discard
176 * remainder.
177 */
178 int scaled_relprio = -(((prio_range_max - prio_range_min) * (-qos_relprio)) >> 4);
179
180 return scaled_relprio;
181}
182
183/*
184 * flag set by -qos-policy-allow boot-arg to allow
185 * testing thread qos policy from userspace
186 */
187boolean_t allow_qos_policy_set = FALSE;
188
1c79356b
A
189kern_return_t
190thread_policy_set(
91447636 191 thread_t thread,
1c79356b
A
192 thread_policy_flavor_t flavor,
193 thread_policy_t policy_info,
194 mach_msg_type_number_t count)
195{
fe8ab488
A
196 thread_qos_policy_data_t req_qos;
197 kern_return_t kr;
198
199 req_qos.qos_tier = THREAD_QOS_UNSPECIFIED;
1c79356b 200
91447636 201 if (thread == THREAD_NULL)
1c79356b
A
202 return (KERN_INVALID_ARGUMENT);
203
fe8ab488
A
204 if (allow_qos_policy_set == FALSE) {
205 if (thread_is_static_param(thread))
206 return (KERN_POLICY_STATIC);
207
208 if (flavor == THREAD_QOS_POLICY || flavor == THREAD_QOS_POLICY_OVERRIDE)
209 return (KERN_INVALID_ARGUMENT);
210 }
211
212 /* Threads without static_param set reset their QoS when other policies are applied. */
213 if (thread->requested_policy.thrp_qos != THREAD_QOS_UNSPECIFIED) {
214 /* Store the existing tier, if we fail this call it is used to reset back. */
215 req_qos.qos_tier = thread->requested_policy.thrp_qos;
216 req_qos.tier_importance = thread->requested_policy.thrp_qos_relprio;
b0d623f7 217
fe8ab488
A
218 kr = thread_remove_qos_policy(thread);
219 if (kr != KERN_SUCCESS) {
220 return kr;
221 }
222 }
223
224 kr = thread_policy_set_internal(thread, flavor, policy_info, count);
225
226 /* Return KERN_QOS_REMOVED instead of KERN_SUCCESS if we succeeded. */
227 if (req_qos.qos_tier != THREAD_QOS_UNSPECIFIED) {
228 if (kr != KERN_SUCCESS) {
229 /* Reset back to our original tier as the set failed. */
230 (void)thread_policy_set_internal(thread, THREAD_QOS_POLICY, (thread_policy_t)&req_qos, THREAD_QOS_POLICY_COUNT);
231 }
232 }
233
234 return kr;
b0d623f7
A
235}
236
237kern_return_t
238thread_policy_set_internal(
239 thread_t thread,
240 thread_policy_flavor_t flavor,
241 thread_policy_t policy_info,
242 mach_msg_type_number_t count)
243{
244 kern_return_t result = KERN_SUCCESS;
245 spl_t s;
246
91447636
A
247 thread_mtx_lock(thread);
248 if (!thread->active) {
249 thread_mtx_unlock(thread);
1c79356b
A
250
251 return (KERN_TERMINATED);
252 }
fe8ab488 253
1c79356b
A
254 switch (flavor) {
255
0b4e3aa0 256 case THREAD_EXTENDED_POLICY:
1c79356b 257 {
0b4e3aa0
A
258 boolean_t timeshare = TRUE;
259
260 if (count >= THREAD_EXTENDED_POLICY_COUNT) {
261 thread_extended_policy_t info;
262
263 info = (thread_extended_policy_t)policy_info;
264 timeshare = info->timeshare;
265 }
1c79356b 266
fe8ab488
A
267 sched_mode_t mode = (timeshare == TRUE) ? TH_MODE_TIMESHARE : TH_MODE_FIXED;
268
1c79356b
A
269 s = splsched();
270 thread_lock(thread);
271
3e170ce0 272 thread_set_user_sched_mode_and_recompute_pri(thread, mode);
1c79356b
A
273
274 thread_unlock(thread);
275 splx(s);
276
fe8ab488
A
277 sfi_reevaluate(thread);
278
1c79356b
A
279 break;
280 }
281
282 case THREAD_TIME_CONSTRAINT_POLICY:
283 {
284 thread_time_constraint_policy_t info;
285
286 if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
287 result = KERN_INVALID_ARGUMENT;
288 break;
289 }
290
291 info = (thread_time_constraint_policy_t)policy_info;
55e303ae
A
292 if ( info->constraint < info->computation ||
293 info->computation > max_rt_quantum ||
0b4e3aa0
A
294 info->computation < min_rt_quantum ) {
295 result = KERN_INVALID_ARGUMENT;
296 break;
297 }
1c79356b
A
298
299 s = splsched();
300 thread_lock(thread);
301
1c79356b
A
302 thread->realtime.period = info->period;
303 thread->realtime.computation = info->computation;
304 thread->realtime.constraint = info->constraint;
305 thread->realtime.preemptible = info->preemptible;
306
3e170ce0 307 thread_set_user_sched_mode_and_recompute_pri(thread, TH_MODE_REALTIME);
1c79356b
A
308
309 thread_unlock(thread);
310 splx(s);
311
3e170ce0 312 sfi_reevaluate(thread);
fe8ab488 313
1c79356b
A
314 break;
315 }
316
317 case THREAD_PRECEDENCE_POLICY:
318 {
319 thread_precedence_policy_t info;
320
321 if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
322 result = KERN_INVALID_ARGUMENT;
323 break;
324 }
1c79356b
A
325 info = (thread_precedence_policy_t)policy_info;
326
327 s = splsched();
328 thread_lock(thread);
329
330 thread->importance = info->importance;
331
0b4e3aa0 332 thread_recompute_priority(thread);
1c79356b
A
333
334 thread_unlock(thread);
335 splx(s);
336
337 break;
338 }
339
2d21ac55
A
340 case THREAD_AFFINITY_POLICY:
341 {
342 thread_affinity_policy_t info;
343
344 if (!thread_affinity_is_supported()) {
345 result = KERN_NOT_SUPPORTED;
346 break;
347 }
348 if (count < THREAD_AFFINITY_POLICY_COUNT) {
349 result = KERN_INVALID_ARGUMENT;
350 break;
351 }
352
353 info = (thread_affinity_policy_t) policy_info;
354 /*
355 * Unlock the thread mutex here and
356 * return directly after calling thread_affinity_set().
357 * This is necessary for correct lock ordering because
358 * thread_affinity_set() takes the task lock.
359 */
360 thread_mtx_unlock(thread);
361 return thread_affinity_set(thread, info->affinity_tag);
362 }
6d2010ae 363
fe8ab488
A
364 case THREAD_THROUGHPUT_QOS_POLICY:
365 {
366 thread_throughput_qos_policy_t info = (thread_throughput_qos_policy_t) policy_info;
367 int tqos;
368
369 if (count < THREAD_LATENCY_QOS_POLICY_COUNT) {
370 result = KERN_INVALID_ARGUMENT;
371 break;
372 }
373
374 if ((result = qos_throughput_policy_validate(info->thread_throughput_qos_tier)) !=
375 KERN_SUCCESS) {
376 break;
377 }
378
379 tqos = qos_extract(info->thread_throughput_qos_tier);
380 thread->effective_policy.t_through_qos = tqos;
381 }
382 break;
383
384 case THREAD_LATENCY_QOS_POLICY:
385 {
386 thread_latency_qos_policy_t info = (thread_latency_qos_policy_t) policy_info;
387 int lqos;
388
389 if (count < THREAD_THROUGHPUT_QOS_POLICY_COUNT) {
390 result = KERN_INVALID_ARGUMENT;
391 break;
392 }
393
394 if ((result = qos_latency_policy_validate(info->thread_latency_qos_tier)) !=
395 KERN_SUCCESS) {
396 break;
397 }
398
399 lqos = qos_extract(info->thread_latency_qos_tier);
400/* The expected use cases (opt-in) of per-thread latency QoS would seem to
401 * preclude any requirement at present to re-evaluate timers on a thread level
402 * latency QoS change.
403 */
404 thread->effective_policy.t_latency_qos = lqos;
405
406 }
407 break;
408
409 case THREAD_QOS_POLICY:
410 case THREAD_QOS_POLICY_OVERRIDE:
411 {
412 thread_qos_policy_t info = (thread_qos_policy_t)policy_info;
413
414 if (count < THREAD_QOS_POLICY_COUNT) {
415 result = KERN_INVALID_ARGUMENT;
416 break;
417 }
418
419 if (info->qos_tier < 0 || info->qos_tier >= THREAD_QOS_LAST) {
420 result = KERN_INVALID_ARGUMENT;
421 break;
422 }
423
424 if (info->tier_importance > 0 || info->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
425 result = KERN_INVALID_ARGUMENT;
426 break;
427 }
428
429 if (info->qos_tier == THREAD_QOS_UNSPECIFIED && info->tier_importance != 0) {
430 result = KERN_INVALID_ARGUMENT;
431 break;
432 }
433
434 /*
435 * Going into task policy requires the task mutex,
436 * because of the way synchronization against the IO policy
437 * subsystem works.
438 *
439 * We need to move thread policy to the thread mutex instead.
440 * <rdar://problem/15831652> separate thread policy from task policy
441 */
442
443 if (flavor == THREAD_QOS_POLICY_OVERRIDE) {
444 int strongest_override = info->qos_tier;
445
446 if (info->qos_tier != THREAD_QOS_UNSPECIFIED &&
447 thread->requested_policy.thrp_qos_override != THREAD_QOS_UNSPECIFIED)
448 strongest_override = MAX(thread->requested_policy.thrp_qos_override, info->qos_tier);
449
450 thread_mtx_unlock(thread);
451
452 /* There is a race here. To be closed in <rdar://problem/15831652> separate thread policy from task policy */
453
454 proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, strongest_override);
455
456 return (result);
457 }
458
459 thread_mtx_unlock(thread);
460
461 proc_set_task_policy2(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, info->qos_tier, -info->tier_importance);
462
463 thread_mtx_lock(thread);
464 if (!thread->active) {
465 thread_mtx_unlock(thread);
466 return (KERN_TERMINATED);
467 }
468
469 break;
470 }
6d2010ae 471
1c79356b
A
472 default:
473 result = KERN_INVALID_ARGUMENT;
474 break;
475 }
476
91447636 477 thread_mtx_unlock(thread);
1c79356b
A
478 return (result);
479}
480
fe8ab488
A
481/*
482 * thread_set_mode_and_absolute_pri:
483 *
484 * Set scheduling policy & absolute priority for thread, for deprecated
485 * thread_set_policy and thread_policy interfaces.
486 *
487 * Note that there is no implemented difference between POLICY_RR and POLICY_FIFO.
488 * Both result in FIXED mode scheduling.
489 *
490 * Called with thread mutex locked.
491 */
492kern_return_t
493thread_set_mode_and_absolute_pri(
494 thread_t thread,
495 integer_t policy,
496 integer_t priority)
497{
498 spl_t s;
499 sched_mode_t mode;
500 kern_return_t kr = KERN_SUCCESS;
501
502 if (thread_is_static_param(thread))
503 return (KERN_POLICY_STATIC);
504
505 if (thread->policy_reset)
506 return (KERN_SUCCESS);
507
508 /* Setting legacy policies on threads kills the current QoS */
509 if (thread->requested_policy.thrp_qos != THREAD_QOS_UNSPECIFIED) {
510 thread_mtx_unlock(thread);
511
512 kr = thread_remove_qos_policy(thread);
513
514 thread_mtx_lock(thread);
515 if (!thread->active) {
516 return (KERN_TERMINATED);
517 }
518 }
519
520 switch (policy) {
521 case POLICY_TIMESHARE:
522 mode = TH_MODE_TIMESHARE;
523 break;
524 case POLICY_RR:
525 case POLICY_FIFO:
526 mode = TH_MODE_FIXED;
527 break;
528 default:
529 panic("unexpected sched policy: %d", policy);
530 break;
531 }
532
533 s = splsched();
534 thread_lock(thread);
535
536 /* This path isn't allowed to change a thread out of realtime. */
537 if ((thread->sched_mode != TH_MODE_REALTIME) &&
538 (thread->saved_mode != TH_MODE_REALTIME)) {
539
540 /*
541 * Reverse engineer and apply the correct importance value
542 * from the requested absolute priority value.
543 */
544
545 if (priority >= thread->max_priority)
546 priority = thread->max_priority - thread->task_priority;
547 else if (priority >= MINPRI_KERNEL)
548 priority -= MINPRI_KERNEL;
549 else if (priority >= MINPRI_RESERVED)
550 priority -= MINPRI_RESERVED;
551 else
552 priority -= BASEPRI_DEFAULT;
553
554 priority += thread->task_priority;
555
556 if (priority > thread->max_priority)
557 priority = thread->max_priority;
558 else if (priority < MINPRI)
559 priority = MINPRI;
560
561 thread->importance = priority - thread->task_priority;
562
3e170ce0 563 thread_set_user_sched_mode_and_recompute_pri(thread, mode);
fe8ab488
A
564 }
565
566 thread_unlock(thread);
567 splx(s);
568
569 sfi_reevaluate(thread);
570
571 return (kr);
572}
573
574/*
3e170ce0 575 * Set the thread's requested mode and recompute priority
fe8ab488 576 * Called with thread mutex and thread locked
3e170ce0
A
577 *
578 * TODO: Mitigate potential problems caused by moving thread to end of runq
579 * whenever its priority is recomputed
580 * Only remove when it actually changes? Attempt to re-insert at appropriate location?
fe8ab488
A
581 */
582static void
3e170ce0 583thread_set_user_sched_mode_and_recompute_pri(thread_t thread, sched_mode_t mode)
fe8ab488
A
584{
585 if (thread->policy_reset)
586 return;
587
3e170ce0
A
588 boolean_t removed = thread_run_queue_remove(thread);
589
fe8ab488
A
590 /*
591 * TODO: Instead of having saved mode, have 'user mode' and 'true mode'.
592 * That way there's zero confusion over which the user wants
593 * and which the kernel wants.
594 */
595 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK)
596 thread->saved_mode = mode;
597 else
598 sched_set_thread_mode(thread, mode);
3e170ce0
A
599
600 thread_recompute_priority(thread);
601
602 if (removed)
603 thread_run_queue_reinsert(thread, SCHED_TAILQ);
fe8ab488
A
604}
605
606/* called with task lock locked */
607void
608thread_recompute_qos(thread_t thread) {
609 spl_t s;
610
611 thread_mtx_lock(thread);
612
613 if (!thread->active) {
614 thread_mtx_unlock(thread);
615 return;
616 }
617
618 s = splsched();
619 thread_lock(thread);
620
621 thread_recompute_priority(thread);
622
623 thread_unlock(thread);
624 splx(s);
625
626 thread_mtx_unlock(thread);
627}
628
629/* called with task lock locked and thread_mtx_lock locked */
630void
631thread_update_qos_cpu_time(thread_t thread, boolean_t lock_needed)
632{
633 uint64_t last_qos_change_balance;
634 ledger_amount_t thread_balance_credit;
635 ledger_amount_t thread_balance_debit;
636 ledger_amount_t effective_qos_time;
637 uint64_t ctime;
638 uint64_t remainder = 0, consumed = 0;
639 processor_t processor;
640 spl_t s;
641 kern_return_t kr;
642
643 if (lock_needed) {
644 s = splsched();
645 thread_lock(thread);
646 }
647
648 /*
649 * Calculation of time elapsed by the thread in the current qos.
650 * Following is the timeline which shows all the variables used in the calculation below.
651 *
652 * thread ledger thread ledger
653 * cpu_time_last_qos cpu_time
654 * | |<- consumed ->|<- remainder ->|
655 * timeline ----------------------------------------------------------->
656 * | | |
657 * thread_dispatch ctime quantum end
658 *
659 * |<----- effective qos time ----->|
660 */
661
662 /*
663 * Calculate time elapsed since last qos change on this thread.
664 * For cpu time on thread ledger, do not use ledger_get_balance,
665 * only use credit field of ledger, since
666 * debit is used by per thread cpu limits and is not zero.
667 */
668 kr = ledger_get_entries(thread->t_threadledger, thread_ledgers.cpu_time, &thread_balance_credit, &thread_balance_debit);
669 if (kr != KERN_SUCCESS)
670 goto out;
671 last_qos_change_balance = thread->cpu_time_last_qos;
672
673 /*
674 * If thread running on CPU, calculate time elapsed since this thread was last dispatched on cpu.
675 * The thread ledger is only updated at context switch, the time since last context swicth is not
676 * updated in the thread ledger cpu time.
677 */
678 processor = thread->last_processor;
679 if ((processor != PROCESSOR_NULL) && (processor->state == PROCESSOR_RUNNING) &&
680 (processor->active_thread == thread)) {
681 ctime = mach_absolute_time();
682
683 if (processor->quantum_end > ctime)
684 remainder = processor->quantum_end - ctime;
685
686 consumed = thread->quantum_remaining - remainder;
687 }
688 /*
689 * There can be multiple qos change in a quantum and in that case the cpu_time_last_qos will
690 * lie between cpu_time marker and ctime marker shown below. The output of
691 * thread_balance - last_qos_change_balance will be negative in such case, but overall outcome
692 * when consumed is added to it would be positive.
693 *
694 * thread ledger
695 * cpu_time
696 * |<------------ consumed --------->|<- remainder ->|
697 * timeline ----------------------------------------------------------->
698 * | | | |
699 * thread_dispatch thread ledger ctime quantum end
700 * cpu_time_last_qos
701 *
702 * |<-effective qos time->|
703 */
704 effective_qos_time = (ledger_amount_t) consumed;
705 effective_qos_time += thread_balance_credit - last_qos_change_balance;
706
707 if (lock_needed) {
708 thread_unlock(thread);
709 splx(s);
710 }
711
712 if (effective_qos_time < 0)
713 return;
714
715 thread->cpu_time_last_qos += (uint64_t)effective_qos_time;
716
717 /*
718 * Update the task-level qos stats. Its safe to perform operations on these fields, since we
719 * hold the task lock.
720 */
721 switch (thread->effective_policy.thep_qos) {
722
723 case THREAD_QOS_DEFAULT:
724 thread->task->cpu_time_qos_stats.cpu_time_qos_default += effective_qos_time;
725 break;
726
727 case THREAD_QOS_MAINTENANCE:
728 thread->task->cpu_time_qos_stats.cpu_time_qos_maintenance += effective_qos_time;
729 break;
730
731 case THREAD_QOS_BACKGROUND:
732 thread->task->cpu_time_qos_stats.cpu_time_qos_background += effective_qos_time;
733 break;
734
735 case THREAD_QOS_UTILITY:
736 thread->task->cpu_time_qos_stats.cpu_time_qos_utility += effective_qos_time;
737 break;
738
739 case THREAD_QOS_LEGACY:
740 thread->task->cpu_time_qos_stats.cpu_time_qos_legacy += effective_qos_time;
741 break;
742
743 case THREAD_QOS_USER_INITIATED:
744 thread->task->cpu_time_qos_stats.cpu_time_qos_user_initiated += effective_qos_time;
745 break;
746
747 case THREAD_QOS_USER_INTERACTIVE:
748 thread->task->cpu_time_qos_stats.cpu_time_qos_user_interactive += effective_qos_time;
749 break;
750 }
751
752 return;
753
754out:
755 if (lock_needed) {
756 thread_unlock(thread);
757 splx(s);
758 }
759}
760
761/*
762 * Calculate base priority from thread attributes, and set it on the thread
763 *
764 * Called with thread_lock and thread mutex held.
765 */
3e170ce0 766void
0b4e3aa0
A
767thread_recompute_priority(
768 thread_t thread)
769{
770 integer_t priority;
771
fe8ab488
A
772 if (thread->policy_reset)
773 return;
774
775 if (thread->sched_mode == TH_MODE_REALTIME) {
776 sched_set_thread_base_priority(thread, BASEPRI_RTQUEUES);
777 return;
778 } else if (thread->effective_policy.thep_qos != THREAD_QOS_UNSPECIFIED) {
779 int qos = thread->effective_policy.thep_qos;
780 int qos_ui_is_urgent = thread->effective_policy.qos_ui_is_urgent;
781 int qos_relprio = -(thread->effective_policy.thep_qos_relprio); /* stored in task policy inverted */
782 int qos_scaled_relprio;
783
784 assert(qos >= 0 && qos < THREAD_QOS_LAST);
785 assert(qos_relprio <= 0 && qos_relprio >= THREAD_QOS_MIN_TIER_IMPORTANCE);
786
787 priority = thread_qos_policy_params.qos_pri[qos];
788 qos_scaled_relprio = thread_qos_scaled_relative_priority(qos, qos_relprio);
789
790 if (qos == THREAD_QOS_USER_INTERACTIVE && qos_ui_is_urgent == 1) {
791 /* Bump priority 46 to 47 when in a frontmost app */
792 qos_scaled_relprio += 1;
793 }
794
795 priority += qos_scaled_relprio;
796 } else {
0b4e3aa0
A
797 if (thread->importance > MAXPRI)
798 priority = MAXPRI;
fe8ab488 799 else if (thread->importance < -MAXPRI)
0b4e3aa0
A
800 priority = -MAXPRI;
801 else
802 priority = thread->importance;
803
804 priority += thread->task_priority;
0b4e3aa0
A
805 }
806
3e170ce0
A
807 if (thread->saved_mode == TH_MODE_REALTIME &&
808 thread->sched_flags & TH_SFLAG_FAILSAFE)
809 priority = DEPRESSPRI;
810
811 if (thread->effective_policy.terminated == TRUE && priority < thread->task_priority) {
812 priority = thread->task_priority;
813 }
814
fe8ab488
A
815 if (priority > thread->max_priority)
816 priority = thread->max_priority;
817 else if (priority < MINPRI)
818 priority = MINPRI;
0b4e3aa0 819
6d2010ae 820
fe8ab488
A
821 sched_set_thread_base_priority(thread, priority);
822}
823
824/* Called with the thread mutex held */
0b4e3aa0
A
825void
826thread_task_priority(
827 thread_t thread,
828 integer_t priority,
829 integer_t max_priority)
830{
fe8ab488 831 spl_t s;
0b4e3aa0
A
832
833 assert(thread != THREAD_NULL);
834
fe8ab488
A
835 if (!thread->active || thread->policy_reset)
836 return;
837
0b4e3aa0
A
838 s = splsched();
839 thread_lock(thread);
840
fe8ab488 841 integer_t old_max_priority = thread->max_priority;
6d2010ae 842
0b4e3aa0
A
843 thread->task_priority = priority;
844 thread->max_priority = max_priority;
845
fe8ab488
A
846 /* A thread is 'throttled' when its max priority is below MAXPRI_THROTTLE */
847 if ((max_priority > MAXPRI_THROTTLE) && (old_max_priority <= MAXPRI_THROTTLE)) {
848 sched_set_thread_throttled(thread, FALSE);
849 } else if ((max_priority <= MAXPRI_THROTTLE) && (old_max_priority > MAXPRI_THROTTLE)) {
850 sched_set_thread_throttled(thread, TRUE);
851 }
852
0b4e3aa0
A
853 thread_recompute_priority(thread);
854
855 thread_unlock(thread);
856 splx(s);
857}
858
fe8ab488
A
859/*
860 * Reset thread to default state in preparation for termination
861 * Called with thread mutex locked
862 *
863 * Always called on current thread, so we don't need a run queue remove
864 */
91447636
A
865void
866thread_policy_reset(
867 thread_t thread)
868{
2d21ac55
A
869 spl_t s;
870
fe8ab488
A
871 assert(thread == current_thread());
872
2d21ac55
A
873 s = splsched();
874 thread_lock(thread);
875
fe8ab488 876 assert_thread_sched_count(thread);
91447636 877
fe8ab488
A
878 if (thread->sched_flags & TH_SFLAG_FAILSAFE)
879 sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
91447636 880
fe8ab488 881 assert_thread_sched_count(thread);
39236c6e 882
fe8ab488
A
883 if (thread->sched_flags & TH_SFLAG_THROTTLED)
884 sched_set_thread_throttled(thread, FALSE);
885
886 assert_thread_sched_count(thread);
887
888 assert(thread->BG_COUNT == 0);
889
890 /* At this point, the various demotions should be inactive */
891 assert(!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK));
892 assert(!(thread->sched_flags & TH_SFLAG_THROTTLED));
893 assert(!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK));
894
895 /* Reset thread back to task-default basepri and mode */
896 sched_mode_t newmode = SCHED(initial_thread_sched_mode)(thread->task);
897
898 sched_set_thread_mode(thread, newmode);
91447636
A
899
900 thread->importance = 0;
901
fe8ab488
A
902 sched_set_thread_base_priority(thread, thread->task_priority);
903
904 /* Prevent further changes to thread base priority or mode */
905 thread->policy_reset = 1;
906
907 assert(thread->BG_COUNT == 0);
908 assert_thread_sched_count(thread);
2d21ac55
A
909
910 thread_unlock(thread);
911 splx(s);
91447636
A
912}
913
1c79356b
A
914kern_return_t
915thread_policy_get(
91447636 916 thread_t thread,
1c79356b
A
917 thread_policy_flavor_t flavor,
918 thread_policy_t policy_info,
919 mach_msg_type_number_t *count,
920 boolean_t *get_default)
921{
922 kern_return_t result = KERN_SUCCESS;
1c79356b
A
923 spl_t s;
924
91447636 925 if (thread == THREAD_NULL)
1c79356b
A
926 return (KERN_INVALID_ARGUMENT);
927
91447636
A
928 thread_mtx_lock(thread);
929 if (!thread->active) {
930 thread_mtx_unlock(thread);
1c79356b
A
931
932 return (KERN_TERMINATED);
933 }
934
1c79356b
A
935 switch (flavor) {
936
0b4e3aa0
A
937 case THREAD_EXTENDED_POLICY:
938 {
939 boolean_t timeshare = TRUE;
1c79356b 940
0b4e3aa0
A
941 if (!(*get_default)) {
942 s = splsched();
943 thread_lock(thread);
944
6d2010ae
A
945 if ( (thread->sched_mode != TH_MODE_REALTIME) &&
946 (thread->saved_mode != TH_MODE_REALTIME) ) {
947 if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK))
948 timeshare = (thread->sched_mode == TH_MODE_TIMESHARE) != 0;
0b4e3aa0 949 else
6d2010ae 950 timeshare = (thread->saved_mode == TH_MODE_TIMESHARE) != 0;
0b4e3aa0
A
951 }
952 else
953 *get_default = TRUE;
954
955 thread_unlock(thread);
956 splx(s);
957 }
958
959 if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
960 thread_extended_policy_t info;
961
962 info = (thread_extended_policy_t)policy_info;
963 info->timeshare = timeshare;
964 }
1c79356b 965
1c79356b 966 break;
0b4e3aa0 967 }
1c79356b
A
968
969 case THREAD_TIME_CONSTRAINT_POLICY:
970 {
971 thread_time_constraint_policy_t info;
972
973 if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
974 result = KERN_INVALID_ARGUMENT;
975 break;
976 }
977
978 info = (thread_time_constraint_policy_t)policy_info;
979
0b4e3aa0
A
980 if (!(*get_default)) {
981 s = splsched();
982 thread_lock(thread);
1c79356b 983
6d2010ae
A
984 if ( (thread->sched_mode == TH_MODE_REALTIME) ||
985 (thread->saved_mode == TH_MODE_REALTIME) ) {
0b4e3aa0
A
986 info->period = thread->realtime.period;
987 info->computation = thread->realtime.computation;
988 info->constraint = thread->realtime.constraint;
989 info->preemptible = thread->realtime.preemptible;
990 }
991 else
992 *get_default = TRUE;
1c79356b 993
0b4e3aa0
A
994 thread_unlock(thread);
995 splx(s);
996 }
1c79356b 997
0b4e3aa0 998 if (*get_default) {
1c79356b 999 info->period = 0;
6d2010ae
A
1000 info->computation = default_timeshare_computation;
1001 info->constraint = default_timeshare_constraint;
1c79356b
A
1002 info->preemptible = TRUE;
1003 }
1004
1c79356b
A
1005 break;
1006 }
1007
1008 case THREAD_PRECEDENCE_POLICY:
1009 {
1010 thread_precedence_policy_t info;
1011
1012 if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
1013 result = KERN_INVALID_ARGUMENT;
1014 break;
1015 }
1016
1017 info = (thread_precedence_policy_t)policy_info;
1018
0b4e3aa0 1019 if (!(*get_default)) {
1c79356b
A
1020 s = splsched();
1021 thread_lock(thread);
1022
1023 info->importance = thread->importance;
1024
1025 thread_unlock(thread);
1026 splx(s);
1027 }
0b4e3aa0
A
1028 else
1029 info->importance = 0;
1c79356b
A
1030
1031 break;
1032 }
1033
2d21ac55
A
1034 case THREAD_AFFINITY_POLICY:
1035 {
1036 thread_affinity_policy_t info;
1037
1038 if (!thread_affinity_is_supported()) {
1039 result = KERN_NOT_SUPPORTED;
1040 break;
1041 }
1042 if (*count < THREAD_AFFINITY_POLICY_COUNT) {
1043 result = KERN_INVALID_ARGUMENT;
1044 break;
1045 }
1046
1047 info = (thread_affinity_policy_t)policy_info;
1048
1049 if (!(*get_default))
1050 info->affinity_tag = thread_affinity_get(thread);
1051 else
1052 info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
1053
1054 break;
1055 }
1056
39236c6e
A
1057 case THREAD_POLICY_STATE:
1058 {
1059 thread_policy_state_t info;
1060
1061 if (*count < THREAD_POLICY_STATE_COUNT) {
1062 result = KERN_INVALID_ARGUMENT;
1063 break;
1064 }
1065
1066 /* Only root can get this info */
1067 if (current_task()->sec_token.val[0] != 0) {
1068 result = KERN_PROTECTION_FAILURE;
1069 break;
1070 }
1071
1072 info = (thread_policy_state_t)policy_info;
1073
1074 if (!(*get_default)) {
fe8ab488
A
1075 info->flags = 0;
1076
1077 info->flags |= (thread->static_param ? THREAD_POLICY_STATE_FLAG_STATIC_PARAM : 0);
1078
39236c6e
A
1079 /*
1080 * Unlock the thread mutex and directly return.
1081 * This is necessary because proc_get_thread_policy()
1082 * takes the task lock.
1083 */
1084 thread_mtx_unlock(thread);
1085 proc_get_thread_policy(thread, info);
1086 return (result);
1087 } else {
1088 info->requested = 0;
1089 info->effective = 0;
1090 info->pending = 0;
1091 }
1092
1093 break;
1094 }
1095
fe8ab488
A
1096 case THREAD_LATENCY_QOS_POLICY:
1097 {
1098 thread_latency_qos_policy_t info = (thread_latency_qos_policy_t) policy_info;
1099 uint32_t plqos;
1100
1101 if (*count < THREAD_LATENCY_QOS_POLICY_COUNT) {
1102 result = KERN_INVALID_ARGUMENT;
1103 break;
1104 }
1105
1106 if (*get_default) {
1107 plqos = 0;
1108 } else {
1109 plqos = thread->effective_policy.t_latency_qos;
1110 }
1111
1112 info->thread_latency_qos_tier = qos_latency_policy_package(plqos);
1113 }
1114 break;
1115
1116 case THREAD_THROUGHPUT_QOS_POLICY:
1117 {
1118 thread_throughput_qos_policy_t info = (thread_throughput_qos_policy_t) policy_info;
1119 uint32_t ptqos;
1120
1121 if (*count < THREAD_THROUGHPUT_QOS_POLICY_COUNT) {
1122 result = KERN_INVALID_ARGUMENT;
1123 break;
1124 }
1125
1126 if (*get_default) {
1127 ptqos = 0;
1128 } else {
1129 ptqos = thread->effective_policy.t_through_qos;
1130 }
1131
1132 info->thread_throughput_qos_tier = qos_throughput_policy_package(ptqos);
1133 }
1134 break;
1135
1136 case THREAD_QOS_POLICY:
1137 case THREAD_QOS_POLICY_OVERRIDE:
1138 {
1139 thread_qos_policy_t info = (thread_qos_policy_t)policy_info;
1140
1141 if (*count < THREAD_QOS_POLICY_COUNT) {
1142 result = KERN_INVALID_ARGUMENT;
1143 break;
1144 }
1145
1146 if (!(*get_default)) {
1147 if (flavor == THREAD_QOS_POLICY_OVERRIDE) {
1148 info->qos_tier = thread->requested_policy.thrp_qos_override;
1149 /* TODO: handle importance overrides */
1150 info->tier_importance = 0;
1151 } else {
1152 info->qos_tier = thread->requested_policy.thrp_qos;
1153 info->tier_importance = thread->importance;
1154 }
1155 } else {
1156 info->qos_tier = THREAD_QOS_UNSPECIFIED;
1157 info->tier_importance = 0;
1158 }
1159
1160 break;
1161 }
39236c6e 1162
1c79356b
A
1163 default:
1164 result = KERN_INVALID_ARGUMENT;
1165 break;
1166 }
1167
91447636 1168 thread_mtx_unlock(thread);
1c79356b
A
1169
1170 return (result);
1171}
3e170ce0
A
1172
1173static volatile uint64_t unique_work_interval_id = 1; /* Start at 1, 0 is not a valid work interval ID */
1174
1175kern_return_t
1176thread_policy_create_work_interval(
1177 thread_t thread,
1178 uint64_t *work_interval_id)
1179{
1180 thread_mtx_lock(thread);
1181 if (thread->work_interval_id) {
1182 /* already assigned a work interval ID */
1183 thread_mtx_unlock(thread);
1184 return (KERN_INVALID_VALUE);
1185 }
1186
1187 thread->work_interval_id = OSIncrementAtomic64((volatile int64_t *)&unique_work_interval_id);
1188 *work_interval_id = thread->work_interval_id;
1189
1190 thread_mtx_unlock(thread);
1191 return KERN_SUCCESS;
1192}
1193
1194kern_return_t
1195thread_policy_destroy_work_interval(
1196 thread_t thread,
1197 uint64_t work_interval_id)
1198{
1199 thread_mtx_lock(thread);
1200 if (work_interval_id == 0 || thread->work_interval_id == 0 || thread->work_interval_id != work_interval_id) {
1201 /* work ID isn't valid or doesn't match previously assigned work interval ID */
1202 thread_mtx_unlock(thread);
1203 return (KERN_INVALID_ARGUMENT);
1204 }
1205
1206 thread->work_interval_id = 0;
1207
1208 thread_mtx_unlock(thread);
1209 return KERN_SUCCESS;
1210}