2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act_server.h>
32 #include <kern/kern_types.h>
33 #include <kern/processor.h>
34 #include <kern/thread.h>
35 #include <kern/affinity.h>
36 #include <mach/task_policy.h>
39 #include <mach/machine/sdt.h>
41 #define QOS_EXTRACT(q) ((q) & 0xff)
44 * THREAD_QOS_UNSPECIFIED is assigned the highest tier available, so it does not provide a limit
45 * to threads that don't have a QoS class set.
47 const qos_policy_params_t thread_qos_policy_params
= {
49 * This table defines the starting base priority of the thread,
50 * which will be modified by the thread importance and the task max priority
51 * before being applied.
53 .qos_pri
[THREAD_QOS_UNSPECIFIED
] = 0, /* not consulted */
54 .qos_pri
[THREAD_QOS_USER_INTERACTIVE
] = BASEPRI_BACKGROUND
, /* i.e. 46 */
55 .qos_pri
[THREAD_QOS_USER_INITIATED
] = BASEPRI_USER_INITIATED
,
56 .qos_pri
[THREAD_QOS_LEGACY
] = BASEPRI_DEFAULT
,
57 .qos_pri
[THREAD_QOS_UTILITY
] = BASEPRI_UTILITY
,
58 .qos_pri
[THREAD_QOS_BACKGROUND
] = MAXPRI_THROTTLE
,
59 .qos_pri
[THREAD_QOS_MAINTENANCE
] = MAXPRI_THROTTLE
,
62 * This table defines the highest IO priority that a thread marked with this
65 .qos_iotier
[THREAD_QOS_UNSPECIFIED
] = THROTTLE_LEVEL_TIER0
,
66 .qos_iotier
[THREAD_QOS_USER_INTERACTIVE
] = THROTTLE_LEVEL_TIER0
,
67 .qos_iotier
[THREAD_QOS_USER_INITIATED
] = THROTTLE_LEVEL_TIER0
,
68 .qos_iotier
[THREAD_QOS_LEGACY
] = THROTTLE_LEVEL_TIER0
,
69 .qos_iotier
[THREAD_QOS_UTILITY
] = THROTTLE_LEVEL_TIER1
,
70 .qos_iotier
[THREAD_QOS_BACKGROUND
] = THROTTLE_LEVEL_TIER2
, /* possibly overridden by bg_iotier */
71 .qos_iotier
[THREAD_QOS_MAINTENANCE
] = THROTTLE_LEVEL_TIER3
,
74 * This table defines the highest QoS level that
75 * a thread marked with this QoS class can have.
78 .qos_through_qos
[THREAD_QOS_UNSPECIFIED
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_UNSPECIFIED
),
79 .qos_through_qos
[THREAD_QOS_USER_INTERACTIVE
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_0
),
80 .qos_through_qos
[THREAD_QOS_USER_INITIATED
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_1
),
81 .qos_through_qos
[THREAD_QOS_LEGACY
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_1
),
82 .qos_through_qos
[THREAD_QOS_UTILITY
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_2
),
83 .qos_through_qos
[THREAD_QOS_BACKGROUND
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_5
),
84 .qos_through_qos
[THREAD_QOS_MAINTENANCE
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_5
),
86 .qos_latency_qos
[THREAD_QOS_UNSPECIFIED
] = QOS_EXTRACT(LATENCY_QOS_TIER_UNSPECIFIED
),
87 .qos_latency_qos
[THREAD_QOS_USER_INTERACTIVE
] = QOS_EXTRACT(LATENCY_QOS_TIER_0
),
88 .qos_latency_qos
[THREAD_QOS_USER_INITIATED
] = QOS_EXTRACT(LATENCY_QOS_TIER_1
),
89 .qos_latency_qos
[THREAD_QOS_LEGACY
] = QOS_EXTRACT(LATENCY_QOS_TIER_1
),
90 .qos_latency_qos
[THREAD_QOS_UTILITY
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
91 .qos_latency_qos
[THREAD_QOS_BACKGROUND
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
92 .qos_latency_qos
[THREAD_QOS_MAINTENANCE
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
96 thread_set_user_sched_mode_and_recompute_pri(thread_t thread
, sched_mode_t mode
);
99 thread_qos_scaled_relative_priority(int qos
, int qos_relprio
);
102 extern void proc_get_thread_policy(thread_t thread
, thread_policy_state_t info
);
105 thread_has_qos_policy(thread_t thread
) {
106 return (proc_get_task_policy(thread
->task
, thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS
) != THREAD_QOS_UNSPECIFIED
) ? TRUE
: FALSE
;
110 thread_remove_qos_policy(thread_t thread
)
112 thread_qos_policy_data_t unspec_qos
;
113 unspec_qos
.qos_tier
= THREAD_QOS_UNSPECIFIED
;
114 unspec_qos
.tier_importance
= 0;
116 __unused
int prev_qos
= thread
->requested_policy
.thrp_qos
;
118 DTRACE_PROC2(qos__remove
, thread_t
, thread
, int, prev_qos
);
120 return thread_policy_set_internal(thread
, THREAD_QOS_POLICY
, (thread_policy_t
)&unspec_qos
, THREAD_QOS_POLICY_COUNT
);
124 thread_is_static_param(thread_t thread
)
126 if (thread
->static_param
) {
127 DTRACE_PROC1(qos__legacy__denied
, thread_t
, thread
);
134 * Relative priorities can range between 0REL and -15REL. These
135 * map to QoS-specific ranges, to create non-overlapping priority
139 thread_qos_scaled_relative_priority(int qos
, int qos_relprio
)
143 /* Fast path, since no validation or scaling is needed */
144 if (qos_relprio
== 0) return 0;
147 case THREAD_QOS_USER_INTERACTIVE
:
148 next_lower_qos
= THREAD_QOS_USER_INITIATED
;
150 case THREAD_QOS_USER_INITIATED
:
151 next_lower_qos
= THREAD_QOS_LEGACY
;
153 case THREAD_QOS_LEGACY
:
154 next_lower_qos
= THREAD_QOS_UTILITY
;
156 case THREAD_QOS_UTILITY
:
157 next_lower_qos
= THREAD_QOS_BACKGROUND
;
159 case THREAD_QOS_MAINTENANCE
:
160 case THREAD_QOS_BACKGROUND
:
164 panic("Unrecognized QoS %d", qos
);
168 int prio_range_max
= thread_qos_policy_params
.qos_pri
[qos
];
169 int prio_range_min
= next_lower_qos
? thread_qos_policy_params
.qos_pri
[next_lower_qos
] : 0;
172 * We now have the valid range that the scaled relative priority can map to. Note
173 * that the lower bound is exclusive, but the upper bound is inclusive. If the
174 * range is (21,31], 0REL should map to 31 and -15REL should map to 22. We use the
175 * fact that the max relative priority is -15 and use ">>4" to divide by 16 and discard
178 int scaled_relprio
= -(((prio_range_max
- prio_range_min
) * (-qos_relprio
)) >> 4);
180 return scaled_relprio
;
184 * flag set by -qos-policy-allow boot-arg to allow
185 * testing thread qos policy from userspace
187 boolean_t allow_qos_policy_set
= FALSE
;
192 thread_policy_flavor_t flavor
,
193 thread_policy_t policy_info
,
194 mach_msg_type_number_t count
)
196 thread_qos_policy_data_t req_qos
;
199 req_qos
.qos_tier
= THREAD_QOS_UNSPECIFIED
;
201 if (thread
== THREAD_NULL
)
202 return (KERN_INVALID_ARGUMENT
);
204 if (allow_qos_policy_set
== FALSE
) {
205 if (thread_is_static_param(thread
))
206 return (KERN_POLICY_STATIC
);
208 if (flavor
== THREAD_QOS_POLICY
|| flavor
== THREAD_QOS_POLICY_OVERRIDE
)
209 return (KERN_INVALID_ARGUMENT
);
212 /* Threads without static_param set reset their QoS when other policies are applied. */
213 if (thread
->requested_policy
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) {
214 /* Store the existing tier, if we fail this call it is used to reset back. */
215 req_qos
.qos_tier
= thread
->requested_policy
.thrp_qos
;
216 req_qos
.tier_importance
= thread
->requested_policy
.thrp_qos_relprio
;
218 kr
= thread_remove_qos_policy(thread
);
219 if (kr
!= KERN_SUCCESS
) {
224 kr
= thread_policy_set_internal(thread
, flavor
, policy_info
, count
);
226 /* Return KERN_QOS_REMOVED instead of KERN_SUCCESS if we succeeded. */
227 if (req_qos
.qos_tier
!= THREAD_QOS_UNSPECIFIED
) {
228 if (kr
!= KERN_SUCCESS
) {
229 /* Reset back to our original tier as the set failed. */
230 (void)thread_policy_set_internal(thread
, THREAD_QOS_POLICY
, (thread_policy_t
)&req_qos
, THREAD_QOS_POLICY_COUNT
);
238 thread_policy_set_internal(
240 thread_policy_flavor_t flavor
,
241 thread_policy_t policy_info
,
242 mach_msg_type_number_t count
)
244 kern_return_t result
= KERN_SUCCESS
;
247 thread_mtx_lock(thread
);
248 if (!thread
->active
) {
249 thread_mtx_unlock(thread
);
251 return (KERN_TERMINATED
);
256 case THREAD_EXTENDED_POLICY
:
258 boolean_t timeshare
= TRUE
;
260 if (count
>= THREAD_EXTENDED_POLICY_COUNT
) {
261 thread_extended_policy_t info
;
263 info
= (thread_extended_policy_t
)policy_info
;
264 timeshare
= info
->timeshare
;
267 sched_mode_t mode
= (timeshare
== TRUE
) ? TH_MODE_TIMESHARE
: TH_MODE_FIXED
;
272 thread_set_user_sched_mode_and_recompute_pri(thread
, mode
);
274 thread_unlock(thread
);
277 sfi_reevaluate(thread
);
282 case THREAD_TIME_CONSTRAINT_POLICY
:
284 thread_time_constraint_policy_t info
;
286 if (count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
287 result
= KERN_INVALID_ARGUMENT
;
291 info
= (thread_time_constraint_policy_t
)policy_info
;
292 if ( info
->constraint
< info
->computation
||
293 info
->computation
> max_rt_quantum
||
294 info
->computation
< min_rt_quantum
) {
295 result
= KERN_INVALID_ARGUMENT
;
302 thread
->realtime
.period
= info
->period
;
303 thread
->realtime
.computation
= info
->computation
;
304 thread
->realtime
.constraint
= info
->constraint
;
305 thread
->realtime
.preemptible
= info
->preemptible
;
307 thread_set_user_sched_mode_and_recompute_pri(thread
, TH_MODE_REALTIME
);
309 thread_unlock(thread
);
312 sfi_reevaluate(thread
);
317 case THREAD_PRECEDENCE_POLICY
:
319 thread_precedence_policy_t info
;
321 if (count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
322 result
= KERN_INVALID_ARGUMENT
;
325 info
= (thread_precedence_policy_t
)policy_info
;
330 thread
->importance
= info
->importance
;
332 thread_recompute_priority(thread
);
334 thread_unlock(thread
);
340 case THREAD_AFFINITY_POLICY
:
342 thread_affinity_policy_t info
;
344 if (!thread_affinity_is_supported()) {
345 result
= KERN_NOT_SUPPORTED
;
348 if (count
< THREAD_AFFINITY_POLICY_COUNT
) {
349 result
= KERN_INVALID_ARGUMENT
;
353 info
= (thread_affinity_policy_t
) policy_info
;
355 * Unlock the thread mutex here and
356 * return directly after calling thread_affinity_set().
357 * This is necessary for correct lock ordering because
358 * thread_affinity_set() takes the task lock.
360 thread_mtx_unlock(thread
);
361 return thread_affinity_set(thread
, info
->affinity_tag
);
364 case THREAD_THROUGHPUT_QOS_POLICY
:
366 thread_throughput_qos_policy_t info
= (thread_throughput_qos_policy_t
) policy_info
;
369 if (count
< THREAD_LATENCY_QOS_POLICY_COUNT
) {
370 result
= KERN_INVALID_ARGUMENT
;
374 if ((result
= qos_throughput_policy_validate(info
->thread_throughput_qos_tier
)) !=
379 tqos
= qos_extract(info
->thread_throughput_qos_tier
);
380 thread
->effective_policy
.t_through_qos
= tqos
;
384 case THREAD_LATENCY_QOS_POLICY
:
386 thread_latency_qos_policy_t info
= (thread_latency_qos_policy_t
) policy_info
;
389 if (count
< THREAD_THROUGHPUT_QOS_POLICY_COUNT
) {
390 result
= KERN_INVALID_ARGUMENT
;
394 if ((result
= qos_latency_policy_validate(info
->thread_latency_qos_tier
)) !=
399 lqos
= qos_extract(info
->thread_latency_qos_tier
);
400 /* The expected use cases (opt-in) of per-thread latency QoS would seem to
401 * preclude any requirement at present to re-evaluate timers on a thread level
402 * latency QoS change.
404 thread
->effective_policy
.t_latency_qos
= lqos
;
409 case THREAD_QOS_POLICY
:
410 case THREAD_QOS_POLICY_OVERRIDE
:
412 thread_qos_policy_t info
= (thread_qos_policy_t
)policy_info
;
414 if (count
< THREAD_QOS_POLICY_COUNT
) {
415 result
= KERN_INVALID_ARGUMENT
;
419 if (info
->qos_tier
< 0 || info
->qos_tier
>= THREAD_QOS_LAST
) {
420 result
= KERN_INVALID_ARGUMENT
;
424 if (info
->tier_importance
> 0 || info
->tier_importance
< THREAD_QOS_MIN_TIER_IMPORTANCE
) {
425 result
= KERN_INVALID_ARGUMENT
;
429 if (info
->qos_tier
== THREAD_QOS_UNSPECIFIED
&& info
->tier_importance
!= 0) {
430 result
= KERN_INVALID_ARGUMENT
;
435 * Going into task policy requires the task mutex,
436 * because of the way synchronization against the IO policy
439 * We need to move thread policy to the thread mutex instead.
440 * <rdar://problem/15831652> separate thread policy from task policy
443 if (flavor
== THREAD_QOS_POLICY_OVERRIDE
) {
444 int strongest_override
= info
->qos_tier
;
446 if (info
->qos_tier
!= THREAD_QOS_UNSPECIFIED
&&
447 thread
->requested_policy
.thrp_qos_override
!= THREAD_QOS_UNSPECIFIED
)
448 strongest_override
= MAX(thread
->requested_policy
.thrp_qos_override
, info
->qos_tier
);
450 thread_mtx_unlock(thread
);
452 /* There is a race here. To be closed in <rdar://problem/15831652> separate thread policy from task policy */
454 proc_set_task_policy(thread
->task
, thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_OVERRIDE
, strongest_override
);
459 thread_mtx_unlock(thread
);
461 proc_set_task_policy2(thread
->task
, thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_AND_RELPRIO
, info
->qos_tier
, -info
->tier_importance
);
463 thread_mtx_lock(thread
);
464 if (!thread
->active
) {
465 thread_mtx_unlock(thread
);
466 return (KERN_TERMINATED
);
473 result
= KERN_INVALID_ARGUMENT
;
477 thread_mtx_unlock(thread
);
482 * thread_set_mode_and_absolute_pri:
484 * Set scheduling policy & absolute priority for thread, for deprecated
485 * thread_set_policy and thread_policy interfaces.
487 * Note that there is no implemented difference between POLICY_RR and POLICY_FIFO.
488 * Both result in FIXED mode scheduling.
490 * Called with thread mutex locked.
493 thread_set_mode_and_absolute_pri(
500 kern_return_t kr
= KERN_SUCCESS
;
502 if (thread_is_static_param(thread
))
503 return (KERN_POLICY_STATIC
);
505 if (thread
->policy_reset
)
506 return (KERN_SUCCESS
);
508 /* Setting legacy policies on threads kills the current QoS */
509 if (thread
->requested_policy
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) {
510 thread_mtx_unlock(thread
);
512 kr
= thread_remove_qos_policy(thread
);
514 thread_mtx_lock(thread
);
515 if (!thread
->active
) {
516 return (KERN_TERMINATED
);
521 case POLICY_TIMESHARE
:
522 mode
= TH_MODE_TIMESHARE
;
526 mode
= TH_MODE_FIXED
;
529 panic("unexpected sched policy: %d", policy
);
536 /* This path isn't allowed to change a thread out of realtime. */
537 if ((thread
->sched_mode
!= TH_MODE_REALTIME
) &&
538 (thread
->saved_mode
!= TH_MODE_REALTIME
)) {
541 * Reverse engineer and apply the correct importance value
542 * from the requested absolute priority value.
545 if (priority
>= thread
->max_priority
)
546 priority
= thread
->max_priority
- thread
->task_priority
;
547 else if (priority
>= MINPRI_KERNEL
)
548 priority
-= MINPRI_KERNEL
;
549 else if (priority
>= MINPRI_RESERVED
)
550 priority
-= MINPRI_RESERVED
;
552 priority
-= BASEPRI_DEFAULT
;
554 priority
+= thread
->task_priority
;
556 if (priority
> thread
->max_priority
)
557 priority
= thread
->max_priority
;
558 else if (priority
< MINPRI
)
561 thread
->importance
= priority
- thread
->task_priority
;
563 thread_set_user_sched_mode_and_recompute_pri(thread
, mode
);
566 thread_unlock(thread
);
569 sfi_reevaluate(thread
);
575 * Set the thread's requested mode and recompute priority
576 * Called with thread mutex and thread locked
578 * TODO: Mitigate potential problems caused by moving thread to end of runq
579 * whenever its priority is recomputed
580 * Only remove when it actually changes? Attempt to re-insert at appropriate location?
583 thread_set_user_sched_mode_and_recompute_pri(thread_t thread
, sched_mode_t mode
)
585 if (thread
->policy_reset
)
588 boolean_t removed
= thread_run_queue_remove(thread
);
591 * TODO: Instead of having saved mode, have 'user mode' and 'true mode'.
592 * That way there's zero confusion over which the user wants
593 * and which the kernel wants.
595 if (thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)
596 thread
->saved_mode
= mode
;
598 sched_set_thread_mode(thread
, mode
);
600 thread_recompute_priority(thread
);
603 thread_run_queue_reinsert(thread
, SCHED_TAILQ
);
606 /* called with task lock locked */
608 thread_recompute_qos(thread_t thread
) {
611 thread_mtx_lock(thread
);
613 if (!thread
->active
) {
614 thread_mtx_unlock(thread
);
621 thread_recompute_priority(thread
);
623 thread_unlock(thread
);
626 thread_mtx_unlock(thread
);
629 /* called with task lock locked and thread_mtx_lock locked */
631 thread_update_qos_cpu_time(thread_t thread
, boolean_t lock_needed
)
633 uint64_t last_qos_change_balance
;
634 ledger_amount_t thread_balance_credit
;
635 ledger_amount_t thread_balance_debit
;
636 ledger_amount_t effective_qos_time
;
638 uint64_t remainder
= 0, consumed
= 0;
639 processor_t processor
;
649 * Calculation of time elapsed by the thread in the current qos.
650 * Following is the timeline which shows all the variables used in the calculation below.
652 * thread ledger thread ledger
653 * cpu_time_last_qos cpu_time
654 * | |<- consumed ->|<- remainder ->|
655 * timeline ----------------------------------------------------------->
657 * thread_dispatch ctime quantum end
659 * |<----- effective qos time ----->|
663 * Calculate time elapsed since last qos change on this thread.
664 * For cpu time on thread ledger, do not use ledger_get_balance,
665 * only use credit field of ledger, since
666 * debit is used by per thread cpu limits and is not zero.
668 kr
= ledger_get_entries(thread
->t_threadledger
, thread_ledgers
.cpu_time
, &thread_balance_credit
, &thread_balance_debit
);
669 if (kr
!= KERN_SUCCESS
)
671 last_qos_change_balance
= thread
->cpu_time_last_qos
;
674 * If thread running on CPU, calculate time elapsed since this thread was last dispatched on cpu.
675 * The thread ledger is only updated at context switch, the time since last context swicth is not
676 * updated in the thread ledger cpu time.
678 processor
= thread
->last_processor
;
679 if ((processor
!= PROCESSOR_NULL
) && (processor
->state
== PROCESSOR_RUNNING
) &&
680 (processor
->active_thread
== thread
)) {
681 ctime
= mach_absolute_time();
683 if (processor
->quantum_end
> ctime
)
684 remainder
= processor
->quantum_end
- ctime
;
686 consumed
= thread
->quantum_remaining
- remainder
;
689 * There can be multiple qos change in a quantum and in that case the cpu_time_last_qos will
690 * lie between cpu_time marker and ctime marker shown below. The output of
691 * thread_balance - last_qos_change_balance will be negative in such case, but overall outcome
692 * when consumed is added to it would be positive.
696 * |<------------ consumed --------->|<- remainder ->|
697 * timeline ----------------------------------------------------------->
699 * thread_dispatch thread ledger ctime quantum end
702 * |<-effective qos time->|
704 effective_qos_time
= (ledger_amount_t
) consumed
;
705 effective_qos_time
+= thread_balance_credit
- last_qos_change_balance
;
708 thread_unlock(thread
);
712 if (effective_qos_time
< 0)
715 thread
->cpu_time_last_qos
+= (uint64_t)effective_qos_time
;
718 * Update the task-level qos stats. Its safe to perform operations on these fields, since we
719 * hold the task lock.
721 switch (thread
->effective_policy
.thep_qos
) {
723 case THREAD_QOS_DEFAULT
:
724 thread
->task
->cpu_time_qos_stats
.cpu_time_qos_default
+= effective_qos_time
;
727 case THREAD_QOS_MAINTENANCE
:
728 thread
->task
->cpu_time_qos_stats
.cpu_time_qos_maintenance
+= effective_qos_time
;
731 case THREAD_QOS_BACKGROUND
:
732 thread
->task
->cpu_time_qos_stats
.cpu_time_qos_background
+= effective_qos_time
;
735 case THREAD_QOS_UTILITY
:
736 thread
->task
->cpu_time_qos_stats
.cpu_time_qos_utility
+= effective_qos_time
;
739 case THREAD_QOS_LEGACY
:
740 thread
->task
->cpu_time_qos_stats
.cpu_time_qos_legacy
+= effective_qos_time
;
743 case THREAD_QOS_USER_INITIATED
:
744 thread
->task
->cpu_time_qos_stats
.cpu_time_qos_user_initiated
+= effective_qos_time
;
747 case THREAD_QOS_USER_INTERACTIVE
:
748 thread
->task
->cpu_time_qos_stats
.cpu_time_qos_user_interactive
+= effective_qos_time
;
756 thread_unlock(thread
);
762 * Calculate base priority from thread attributes, and set it on the thread
764 * Called with thread_lock and thread mutex held.
767 thread_recompute_priority(
772 if (thread
->policy_reset
)
775 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
776 sched_set_thread_base_priority(thread
, BASEPRI_RTQUEUES
);
778 } else if (thread
->effective_policy
.thep_qos
!= THREAD_QOS_UNSPECIFIED
) {
779 int qos
= thread
->effective_policy
.thep_qos
;
780 int qos_ui_is_urgent
= thread
->effective_policy
.qos_ui_is_urgent
;
781 int qos_relprio
= -(thread
->effective_policy
.thep_qos_relprio
); /* stored in task policy inverted */
782 int qos_scaled_relprio
;
784 assert(qos
>= 0 && qos
< THREAD_QOS_LAST
);
785 assert(qos_relprio
<= 0 && qos_relprio
>= THREAD_QOS_MIN_TIER_IMPORTANCE
);
787 priority
= thread_qos_policy_params
.qos_pri
[qos
];
788 qos_scaled_relprio
= thread_qos_scaled_relative_priority(qos
, qos_relprio
);
790 if (qos
== THREAD_QOS_USER_INTERACTIVE
&& qos_ui_is_urgent
== 1) {
791 /* Bump priority 46 to 47 when in a frontmost app */
792 qos_scaled_relprio
+= 1;
795 priority
+= qos_scaled_relprio
;
797 if (thread
->importance
> MAXPRI
)
799 else if (thread
->importance
< -MAXPRI
)
802 priority
= thread
->importance
;
804 priority
+= thread
->task_priority
;
807 if (thread
->saved_mode
== TH_MODE_REALTIME
&&
808 thread
->sched_flags
& TH_SFLAG_FAILSAFE
)
809 priority
= DEPRESSPRI
;
811 if (thread
->effective_policy
.terminated
== TRUE
&& priority
< thread
->task_priority
) {
812 priority
= thread
->task_priority
;
815 if (priority
> thread
->max_priority
)
816 priority
= thread
->max_priority
;
817 else if (priority
< MINPRI
)
821 sched_set_thread_base_priority(thread
, priority
);
824 /* Called with the thread mutex held */
826 thread_task_priority(
829 integer_t max_priority
)
833 assert(thread
!= THREAD_NULL
);
835 if (!thread
->active
|| thread
->policy_reset
)
841 integer_t old_max_priority
= thread
->max_priority
;
843 thread
->task_priority
= priority
;
844 thread
->max_priority
= max_priority
;
846 /* A thread is 'throttled' when its max priority is below MAXPRI_THROTTLE */
847 if ((max_priority
> MAXPRI_THROTTLE
) && (old_max_priority
<= MAXPRI_THROTTLE
)) {
848 sched_set_thread_throttled(thread
, FALSE
);
849 } else if ((max_priority
<= MAXPRI_THROTTLE
) && (old_max_priority
> MAXPRI_THROTTLE
)) {
850 sched_set_thread_throttled(thread
, TRUE
);
853 thread_recompute_priority(thread
);
855 thread_unlock(thread
);
860 * Reset thread to default state in preparation for termination
861 * Called with thread mutex locked
863 * Always called on current thread, so we don't need a run queue remove
871 assert(thread
== current_thread());
876 assert_thread_sched_count(thread
);
878 if (thread
->sched_flags
& TH_SFLAG_FAILSAFE
)
879 sched_thread_mode_undemote(thread
, TH_SFLAG_FAILSAFE
);
881 assert_thread_sched_count(thread
);
883 if (thread
->sched_flags
& TH_SFLAG_THROTTLED
)
884 sched_set_thread_throttled(thread
, FALSE
);
886 assert_thread_sched_count(thread
);
888 assert(thread
->BG_COUNT
== 0);
890 /* At this point, the various demotions should be inactive */
891 assert(!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
));
892 assert(!(thread
->sched_flags
& TH_SFLAG_THROTTLED
));
893 assert(!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
));
895 /* Reset thread back to task-default basepri and mode */
896 sched_mode_t newmode
= SCHED(initial_thread_sched_mode
)(thread
->task
);
898 sched_set_thread_mode(thread
, newmode
);
900 thread
->importance
= 0;
902 sched_set_thread_base_priority(thread
, thread
->task_priority
);
904 /* Prevent further changes to thread base priority or mode */
905 thread
->policy_reset
= 1;
907 assert(thread
->BG_COUNT
== 0);
908 assert_thread_sched_count(thread
);
910 thread_unlock(thread
);
917 thread_policy_flavor_t flavor
,
918 thread_policy_t policy_info
,
919 mach_msg_type_number_t
*count
,
920 boolean_t
*get_default
)
922 kern_return_t result
= KERN_SUCCESS
;
925 if (thread
== THREAD_NULL
)
926 return (KERN_INVALID_ARGUMENT
);
928 thread_mtx_lock(thread
);
929 if (!thread
->active
) {
930 thread_mtx_unlock(thread
);
932 return (KERN_TERMINATED
);
937 case THREAD_EXTENDED_POLICY
:
939 boolean_t timeshare
= TRUE
;
941 if (!(*get_default
)) {
945 if ( (thread
->sched_mode
!= TH_MODE_REALTIME
) &&
946 (thread
->saved_mode
!= TH_MODE_REALTIME
) ) {
947 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
))
948 timeshare
= (thread
->sched_mode
== TH_MODE_TIMESHARE
) != 0;
950 timeshare
= (thread
->saved_mode
== TH_MODE_TIMESHARE
) != 0;
955 thread_unlock(thread
);
959 if (*count
>= THREAD_EXTENDED_POLICY_COUNT
) {
960 thread_extended_policy_t info
;
962 info
= (thread_extended_policy_t
)policy_info
;
963 info
->timeshare
= timeshare
;
969 case THREAD_TIME_CONSTRAINT_POLICY
:
971 thread_time_constraint_policy_t info
;
973 if (*count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
974 result
= KERN_INVALID_ARGUMENT
;
978 info
= (thread_time_constraint_policy_t
)policy_info
;
980 if (!(*get_default
)) {
984 if ( (thread
->sched_mode
== TH_MODE_REALTIME
) ||
985 (thread
->saved_mode
== TH_MODE_REALTIME
) ) {
986 info
->period
= thread
->realtime
.period
;
987 info
->computation
= thread
->realtime
.computation
;
988 info
->constraint
= thread
->realtime
.constraint
;
989 info
->preemptible
= thread
->realtime
.preemptible
;
994 thread_unlock(thread
);
1000 info
->computation
= default_timeshare_computation
;
1001 info
->constraint
= default_timeshare_constraint
;
1002 info
->preemptible
= TRUE
;
1008 case THREAD_PRECEDENCE_POLICY
:
1010 thread_precedence_policy_t info
;
1012 if (*count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
1013 result
= KERN_INVALID_ARGUMENT
;
1017 info
= (thread_precedence_policy_t
)policy_info
;
1019 if (!(*get_default
)) {
1021 thread_lock(thread
);
1023 info
->importance
= thread
->importance
;
1025 thread_unlock(thread
);
1029 info
->importance
= 0;
1034 case THREAD_AFFINITY_POLICY
:
1036 thread_affinity_policy_t info
;
1038 if (!thread_affinity_is_supported()) {
1039 result
= KERN_NOT_SUPPORTED
;
1042 if (*count
< THREAD_AFFINITY_POLICY_COUNT
) {
1043 result
= KERN_INVALID_ARGUMENT
;
1047 info
= (thread_affinity_policy_t
)policy_info
;
1049 if (!(*get_default
))
1050 info
->affinity_tag
= thread_affinity_get(thread
);
1052 info
->affinity_tag
= THREAD_AFFINITY_TAG_NULL
;
1057 case THREAD_POLICY_STATE
:
1059 thread_policy_state_t info
;
1061 if (*count
< THREAD_POLICY_STATE_COUNT
) {
1062 result
= KERN_INVALID_ARGUMENT
;
1066 /* Only root can get this info */
1067 if (current_task()->sec_token
.val
[0] != 0) {
1068 result
= KERN_PROTECTION_FAILURE
;
1072 info
= (thread_policy_state_t
)policy_info
;
1074 if (!(*get_default
)) {
1077 info
->flags
|= (thread
->static_param
? THREAD_POLICY_STATE_FLAG_STATIC_PARAM
: 0);
1080 * Unlock the thread mutex and directly return.
1081 * This is necessary because proc_get_thread_policy()
1082 * takes the task lock.
1084 thread_mtx_unlock(thread
);
1085 proc_get_thread_policy(thread
, info
);
1088 info
->requested
= 0;
1089 info
->effective
= 0;
1096 case THREAD_LATENCY_QOS_POLICY
:
1098 thread_latency_qos_policy_t info
= (thread_latency_qos_policy_t
) policy_info
;
1101 if (*count
< THREAD_LATENCY_QOS_POLICY_COUNT
) {
1102 result
= KERN_INVALID_ARGUMENT
;
1109 plqos
= thread
->effective_policy
.t_latency_qos
;
1112 info
->thread_latency_qos_tier
= qos_latency_policy_package(plqos
);
1116 case THREAD_THROUGHPUT_QOS_POLICY
:
1118 thread_throughput_qos_policy_t info
= (thread_throughput_qos_policy_t
) policy_info
;
1121 if (*count
< THREAD_THROUGHPUT_QOS_POLICY_COUNT
) {
1122 result
= KERN_INVALID_ARGUMENT
;
1129 ptqos
= thread
->effective_policy
.t_through_qos
;
1132 info
->thread_throughput_qos_tier
= qos_throughput_policy_package(ptqos
);
1136 case THREAD_QOS_POLICY
:
1137 case THREAD_QOS_POLICY_OVERRIDE
:
1139 thread_qos_policy_t info
= (thread_qos_policy_t
)policy_info
;
1141 if (*count
< THREAD_QOS_POLICY_COUNT
) {
1142 result
= KERN_INVALID_ARGUMENT
;
1146 if (!(*get_default
)) {
1147 if (flavor
== THREAD_QOS_POLICY_OVERRIDE
) {
1148 info
->qos_tier
= thread
->requested_policy
.thrp_qos_override
;
1149 /* TODO: handle importance overrides */
1150 info
->tier_importance
= 0;
1152 info
->qos_tier
= thread
->requested_policy
.thrp_qos
;
1153 info
->tier_importance
= thread
->importance
;
1156 info
->qos_tier
= THREAD_QOS_UNSPECIFIED
;
1157 info
->tier_importance
= 0;
1164 result
= KERN_INVALID_ARGUMENT
;
1168 thread_mtx_unlock(thread
);
1173 static volatile uint64_t unique_work_interval_id
= 1; /* Start at 1, 0 is not a valid work interval ID */
1176 thread_policy_create_work_interval(
1178 uint64_t *work_interval_id
)
1180 thread_mtx_lock(thread
);
1181 if (thread
->work_interval_id
) {
1182 /* already assigned a work interval ID */
1183 thread_mtx_unlock(thread
);
1184 return (KERN_INVALID_VALUE
);
1187 thread
->work_interval_id
= OSIncrementAtomic64((volatile int64_t *)&unique_work_interval_id
);
1188 *work_interval_id
= thread
->work_interval_id
;
1190 thread_mtx_unlock(thread
);
1191 return KERN_SUCCESS
;
1195 thread_policy_destroy_work_interval(
1197 uint64_t work_interval_id
)
1199 thread_mtx_lock(thread
);
1200 if (work_interval_id
== 0 || thread
->work_interval_id
== 0 || thread
->work_interval_id
!= work_interval_id
) {
1201 /* work ID isn't valid or doesn't match previously assigned work interval ID */
1202 thread_mtx_unlock(thread
);
1203 return (KERN_INVALID_ARGUMENT
);
1206 thread
->work_interval_id
= 0;
1208 thread_mtx_unlock(thread
);
1209 return KERN_SUCCESS
;