2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act_server.h>
32 #include <kern/kern_types.h>
33 #include <kern/processor.h>
34 #include <kern/thread.h>
35 #include <kern/affinity.h>
36 #include <mach/task_policy.h>
38 #include <kern/policy_internal.h>
39 #include <sys/errno.h>
40 #include <sys/ulock.h>
42 #include <mach/machine/sdt.h>
45 extern int proc_selfpid(void);
46 extern char * proc_name_address(void *p
);
47 extern void rethrottle_thread(void * uthread
);
50 #define QOS_EXTRACT(q) ((q) & 0xff)
52 uint32_t qos_override_mode
;
53 #define QOS_OVERRIDE_MODE_OVERHANG_PEAK 0
54 #define QOS_OVERRIDE_MODE_IGNORE_OVERRIDE 1
55 #define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE 2
56 #define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH 3
57 #define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE 4
59 extern zone_t thread_qos_override_zone
;
62 proc_thread_qos_remove_override_internal(thread_t thread
, user_addr_t resource
, int resource_type
, boolean_t reset
, boolean_t squash
);
65 * THREAD_QOS_UNSPECIFIED is assigned the highest tier available, so it does not provide a limit
66 * to threads that don't have a QoS class set.
68 const qos_policy_params_t thread_qos_policy_params
= {
70 * This table defines the starting base priority of the thread,
71 * which will be modified by the thread importance and the task max priority
72 * before being applied.
74 .qos_pri
[THREAD_QOS_UNSPECIFIED
] = 0, /* not consulted */
75 .qos_pri
[THREAD_QOS_USER_INTERACTIVE
] = BASEPRI_BACKGROUND
, /* i.e. 46 */
76 .qos_pri
[THREAD_QOS_USER_INITIATED
] = BASEPRI_USER_INITIATED
,
77 .qos_pri
[THREAD_QOS_LEGACY
] = BASEPRI_DEFAULT
,
78 .qos_pri
[THREAD_QOS_UTILITY
] = BASEPRI_UTILITY
,
79 .qos_pri
[THREAD_QOS_BACKGROUND
] = MAXPRI_THROTTLE
,
80 .qos_pri
[THREAD_QOS_MAINTENANCE
] = MAXPRI_THROTTLE
,
83 * This table defines the highest IO priority that a thread marked with this
86 .qos_iotier
[THREAD_QOS_UNSPECIFIED
] = THROTTLE_LEVEL_TIER0
,
87 .qos_iotier
[THREAD_QOS_USER_INTERACTIVE
] = THROTTLE_LEVEL_TIER0
,
88 .qos_iotier
[THREAD_QOS_USER_INITIATED
] = THROTTLE_LEVEL_TIER0
,
89 .qos_iotier
[THREAD_QOS_LEGACY
] = THROTTLE_LEVEL_TIER0
,
90 .qos_iotier
[THREAD_QOS_UTILITY
] = THROTTLE_LEVEL_TIER1
,
91 .qos_iotier
[THREAD_QOS_BACKGROUND
] = THROTTLE_LEVEL_TIER2
, /* possibly overridden by bg_iotier */
92 .qos_iotier
[THREAD_QOS_MAINTENANCE
] = THROTTLE_LEVEL_TIER3
,
95 * This table defines the highest QoS level that
96 * a thread marked with this QoS class can have.
99 .qos_through_qos
[THREAD_QOS_UNSPECIFIED
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_UNSPECIFIED
),
100 .qos_through_qos
[THREAD_QOS_USER_INTERACTIVE
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_0
),
101 .qos_through_qos
[THREAD_QOS_USER_INITIATED
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_1
),
102 .qos_through_qos
[THREAD_QOS_LEGACY
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_1
),
103 .qos_through_qos
[THREAD_QOS_UTILITY
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_2
),
104 .qos_through_qos
[THREAD_QOS_BACKGROUND
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_5
),
105 .qos_through_qos
[THREAD_QOS_MAINTENANCE
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_5
),
107 .qos_latency_qos
[THREAD_QOS_UNSPECIFIED
] = QOS_EXTRACT(LATENCY_QOS_TIER_UNSPECIFIED
),
108 .qos_latency_qos
[THREAD_QOS_USER_INTERACTIVE
] = QOS_EXTRACT(LATENCY_QOS_TIER_0
),
109 .qos_latency_qos
[THREAD_QOS_USER_INITIATED
] = QOS_EXTRACT(LATENCY_QOS_TIER_1
),
110 .qos_latency_qos
[THREAD_QOS_LEGACY
] = QOS_EXTRACT(LATENCY_QOS_TIER_1
),
111 .qos_latency_qos
[THREAD_QOS_UTILITY
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
112 .qos_latency_qos
[THREAD_QOS_BACKGROUND
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
113 .qos_latency_qos
[THREAD_QOS_MAINTENANCE
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
117 thread_set_user_sched_mode_and_recompute_pri(thread_t thread
, sched_mode_t mode
);
120 thread_qos_scaled_relative_priority(int qos
, int qos_relprio
);
123 proc_get_thread_policy_bitfield(thread_t thread
, thread_policy_state_t info
);
126 proc_set_thread_policy_locked(thread_t thread
, int category
, int flavor
, int value
, int value2
, task_pend_token_t pend_token
);
129 proc_set_thread_policy_spinlocked(thread_t thread
, int category
, int flavor
, int value
, int value2
, task_pend_token_t pend_token
);
132 thread_set_requested_policy_spinlocked(thread_t thread
, int category
, int flavor
, int value
, int value2
);
135 thread_get_requested_policy_spinlocked(thread_t thread
, int category
, int flavor
, int* value2
);
138 proc_get_thread_policy_locked(thread_t thread
, int category
, int flavor
, int* value2
);
141 thread_policy_update_spinlocked(thread_t thread
, boolean_t recompute_priority
, task_pend_token_t pend_token
);
144 thread_policy_update_internal_spinlocked(thread_t thread
, boolean_t recompute_priority
, task_pend_token_t pend_token
);
147 thread_policy_init(void) {
148 if (PE_parse_boot_argn("qos_override_mode", &qos_override_mode
, sizeof(qos_override_mode
))) {
149 printf("QOS override mode: 0x%08x\n", qos_override_mode
);
151 qos_override_mode
= QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE
;
156 thread_has_qos_policy(thread_t thread
) {
157 return (proc_get_thread_policy(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS
) != THREAD_QOS_UNSPECIFIED
) ? TRUE
: FALSE
;
162 thread_remove_qos_policy_locked(thread_t thread
,
163 task_pend_token_t pend_token
)
166 __unused
int prev_qos
= thread
->requested_policy
.thrp_qos
;
168 DTRACE_PROC2(qos__remove
, thread_t
, thread
, int, prev_qos
);
170 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_AND_RELPRIO
,
171 THREAD_QOS_UNSPECIFIED
, 0, pend_token
);
175 thread_remove_qos_policy(thread_t thread
)
177 struct task_pend_token pend_token
= {};
179 thread_mtx_lock(thread
);
180 if (!thread
->active
) {
181 thread_mtx_unlock(thread
);
182 return KERN_TERMINATED
;
185 thread_remove_qos_policy_locked(thread
, &pend_token
);
187 thread_mtx_unlock(thread
);
189 thread_policy_update_complete_unlocked(thread
, &pend_token
);
196 thread_is_static_param(thread_t thread
)
198 if (thread
->static_param
) {
199 DTRACE_PROC1(qos__legacy__denied
, thread_t
, thread
);
206 * Relative priorities can range between 0REL and -15REL. These
207 * map to QoS-specific ranges, to create non-overlapping priority
211 thread_qos_scaled_relative_priority(int qos
, int qos_relprio
)
215 /* Fast path, since no validation or scaling is needed */
216 if (qos_relprio
== 0) return 0;
219 case THREAD_QOS_USER_INTERACTIVE
:
220 next_lower_qos
= THREAD_QOS_USER_INITIATED
;
222 case THREAD_QOS_USER_INITIATED
:
223 next_lower_qos
= THREAD_QOS_LEGACY
;
225 case THREAD_QOS_LEGACY
:
226 next_lower_qos
= THREAD_QOS_UTILITY
;
228 case THREAD_QOS_UTILITY
:
229 next_lower_qos
= THREAD_QOS_BACKGROUND
;
231 case THREAD_QOS_MAINTENANCE
:
232 case THREAD_QOS_BACKGROUND
:
236 panic("Unrecognized QoS %d", qos
);
240 int prio_range_max
= thread_qos_policy_params
.qos_pri
[qos
];
241 int prio_range_min
= next_lower_qos
? thread_qos_policy_params
.qos_pri
[next_lower_qos
] : 0;
244 * We now have the valid range that the scaled relative priority can map to. Note
245 * that the lower bound is exclusive, but the upper bound is inclusive. If the
246 * range is (21,31], 0REL should map to 31 and -15REL should map to 22. We use the
247 * fact that the max relative priority is -15 and use ">>4" to divide by 16 and discard
250 int scaled_relprio
= -(((prio_range_max
- prio_range_min
) * (-qos_relprio
)) >> 4);
252 return scaled_relprio
;
256 * flag set by -qos-policy-allow boot-arg to allow
257 * testing thread qos policy from userspace
259 boolean_t allow_qos_policy_set
= FALSE
;
264 thread_policy_flavor_t flavor
,
265 thread_policy_t policy_info
,
266 mach_msg_type_number_t count
)
268 thread_qos_policy_data_t req_qos
;
271 req_qos
.qos_tier
= THREAD_QOS_UNSPECIFIED
;
273 if (thread
== THREAD_NULL
)
274 return (KERN_INVALID_ARGUMENT
);
276 if (allow_qos_policy_set
== FALSE
) {
277 if (thread_is_static_param(thread
))
278 return (KERN_POLICY_STATIC
);
280 if (flavor
== THREAD_QOS_POLICY
)
281 return (KERN_INVALID_ARGUMENT
);
284 /* Threads without static_param set reset their QoS when other policies are applied. */
285 if (thread
->requested_policy
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) {
286 /* Store the existing tier, if we fail this call it is used to reset back. */
287 req_qos
.qos_tier
= thread
->requested_policy
.thrp_qos
;
288 req_qos
.tier_importance
= thread
->requested_policy
.thrp_qos_relprio
;
290 kr
= thread_remove_qos_policy(thread
);
291 if (kr
!= KERN_SUCCESS
) {
296 kr
= thread_policy_set_internal(thread
, flavor
, policy_info
, count
);
298 /* Return KERN_QOS_REMOVED instead of KERN_SUCCESS if we succeeded. */
299 if (req_qos
.qos_tier
!= THREAD_QOS_UNSPECIFIED
) {
300 if (kr
!= KERN_SUCCESS
) {
301 /* Reset back to our original tier as the set failed. */
302 (void)thread_policy_set_internal(thread
, THREAD_QOS_POLICY
, (thread_policy_t
)&req_qos
, THREAD_QOS_POLICY_COUNT
);
310 thread_policy_set_internal(
312 thread_policy_flavor_t flavor
,
313 thread_policy_t policy_info
,
314 mach_msg_type_number_t count
)
316 kern_return_t result
= KERN_SUCCESS
;
317 struct task_pend_token pend_token
= {};
319 thread_mtx_lock(thread
);
320 if (!thread
->active
) {
321 thread_mtx_unlock(thread
);
323 return (KERN_TERMINATED
);
328 case THREAD_EXTENDED_POLICY
:
330 boolean_t timeshare
= TRUE
;
332 if (count
>= THREAD_EXTENDED_POLICY_COUNT
) {
333 thread_extended_policy_t info
;
335 info
= (thread_extended_policy_t
)policy_info
;
336 timeshare
= info
->timeshare
;
339 sched_mode_t mode
= (timeshare
== TRUE
) ? TH_MODE_TIMESHARE
: TH_MODE_FIXED
;
341 spl_t s
= splsched();
344 thread_set_user_sched_mode_and_recompute_pri(thread
, mode
);
346 thread_unlock(thread
);
349 pend_token
.tpt_update_thread_sfi
= 1;
354 case THREAD_TIME_CONSTRAINT_POLICY
:
356 thread_time_constraint_policy_t info
;
358 if (count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
359 result
= KERN_INVALID_ARGUMENT
;
363 info
= (thread_time_constraint_policy_t
)policy_info
;
364 if (info
->constraint
< info
->computation
||
365 info
->computation
> max_rt_quantum
||
366 info
->computation
< min_rt_quantum
) {
367 result
= KERN_INVALID_ARGUMENT
;
371 spl_t s
= splsched();
374 thread
->realtime
.period
= info
->period
;
375 thread
->realtime
.computation
= info
->computation
;
376 thread
->realtime
.constraint
= info
->constraint
;
377 thread
->realtime
.preemptible
= info
->preemptible
;
379 thread_set_user_sched_mode_and_recompute_pri(thread
, TH_MODE_REALTIME
);
381 thread_unlock(thread
);
384 pend_token
.tpt_update_thread_sfi
= 1;
389 case THREAD_PRECEDENCE_POLICY
:
391 thread_precedence_policy_t info
;
393 if (count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
394 result
= KERN_INVALID_ARGUMENT
;
397 info
= (thread_precedence_policy_t
)policy_info
;
399 spl_t s
= splsched();
402 thread
->importance
= info
->importance
;
404 thread_recompute_priority(thread
);
406 thread_unlock(thread
);
412 case THREAD_AFFINITY_POLICY
:
414 thread_affinity_policy_t info
;
416 if (!thread_affinity_is_supported()) {
417 result
= KERN_NOT_SUPPORTED
;
420 if (count
< THREAD_AFFINITY_POLICY_COUNT
) {
421 result
= KERN_INVALID_ARGUMENT
;
425 info
= (thread_affinity_policy_t
) policy_info
;
427 * Unlock the thread mutex here and
428 * return directly after calling thread_affinity_set().
429 * This is necessary for correct lock ordering because
430 * thread_affinity_set() takes the task lock.
432 thread_mtx_unlock(thread
);
433 return thread_affinity_set(thread
, info
->affinity_tag
);
437 case THREAD_THROUGHPUT_QOS_POLICY
:
439 thread_throughput_qos_policy_t info
= (thread_throughput_qos_policy_t
) policy_info
;
440 thread_throughput_qos_t tqos
;
442 if (count
< THREAD_THROUGHPUT_QOS_POLICY_COUNT
) {
443 result
= KERN_INVALID_ARGUMENT
;
447 if ((result
= qos_throughput_policy_validate(info
->thread_throughput_qos_tier
)) != KERN_SUCCESS
)
450 tqos
= qos_extract(info
->thread_throughput_qos_tier
);
452 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
453 TASK_POLICY_THROUGH_QOS
, tqos
, 0, &pend_token
);
458 case THREAD_LATENCY_QOS_POLICY
:
460 thread_latency_qos_policy_t info
= (thread_latency_qos_policy_t
) policy_info
;
461 thread_latency_qos_t lqos
;
463 if (count
< THREAD_LATENCY_QOS_POLICY_COUNT
) {
464 result
= KERN_INVALID_ARGUMENT
;
468 if ((result
= qos_latency_policy_validate(info
->thread_latency_qos_tier
)) != KERN_SUCCESS
)
471 lqos
= qos_extract(info
->thread_latency_qos_tier
);
473 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
474 TASK_POLICY_LATENCY_QOS
, lqos
, 0, &pend_token
);
479 case THREAD_QOS_POLICY
:
481 thread_qos_policy_t info
= (thread_qos_policy_t
)policy_info
;
483 if (count
< THREAD_QOS_POLICY_COUNT
) {
484 result
= KERN_INVALID_ARGUMENT
;
488 if (info
->qos_tier
< 0 || info
->qos_tier
>= THREAD_QOS_LAST
) {
489 result
= KERN_INVALID_ARGUMENT
;
493 if (info
->tier_importance
> 0 || info
->tier_importance
< THREAD_QOS_MIN_TIER_IMPORTANCE
) {
494 result
= KERN_INVALID_ARGUMENT
;
498 if (info
->qos_tier
== THREAD_QOS_UNSPECIFIED
&& info
->tier_importance
!= 0) {
499 result
= KERN_INVALID_ARGUMENT
;
503 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_AND_RELPRIO
,
504 info
->qos_tier
, -info
->tier_importance
, &pend_token
);
510 result
= KERN_INVALID_ARGUMENT
;
514 thread_mtx_unlock(thread
);
516 thread_policy_update_complete_unlocked(thread
, &pend_token
);
522 * Note that there is no implemented difference between POLICY_RR and POLICY_FIFO.
523 * Both result in FIXED mode scheduling.
526 convert_policy_to_sched_mode(integer_t policy
) {
528 case POLICY_TIMESHARE
:
529 return TH_MODE_TIMESHARE
;
532 return TH_MODE_FIXED
;
534 panic("unexpected sched policy: %d", policy
);
540 * Called either with the thread mutex locked
541 * or from the pthread kext in a 'safe place'.
544 thread_set_mode_and_absolute_pri_internal(thread_t thread
,
547 task_pend_token_t pend_token
)
549 kern_return_t kr
= KERN_SUCCESS
;
551 spl_t s
= splsched();
554 /* This path isn't allowed to change a thread out of realtime. */
555 if ((thread
->sched_mode
== TH_MODE_REALTIME
) ||
556 (thread
->saved_mode
== TH_MODE_REALTIME
)) {
561 if (thread
->policy_reset
) {
566 sched_mode_t old_mode
= thread
->sched_mode
;
569 * Reverse engineer and apply the correct importance value
570 * from the requested absolute priority value.
572 * TODO: Store the absolute priority value instead
575 if (priority
>= thread
->max_priority
)
576 priority
= thread
->max_priority
- thread
->task_priority
;
577 else if (priority
>= MINPRI_KERNEL
)
578 priority
-= MINPRI_KERNEL
;
579 else if (priority
>= MINPRI_RESERVED
)
580 priority
-= MINPRI_RESERVED
;
582 priority
-= BASEPRI_DEFAULT
;
584 priority
+= thread
->task_priority
;
586 if (priority
> thread
->max_priority
)
587 priority
= thread
->max_priority
;
588 else if (priority
< MINPRI
)
591 thread
->importance
= priority
- thread
->task_priority
;
593 thread_set_user_sched_mode_and_recompute_pri(thread
, mode
);
595 if (mode
!= old_mode
)
596 pend_token
->tpt_update_thread_sfi
= 1;
599 thread_unlock(thread
);
606 * KPI for pthread kext
608 * Set scheduling policy & absolute priority for thread
609 * May be called from waitqueue callout context with spinlocks held
610 * Thread mutex lock is not held
613 thread_set_workq_pri(thread_t thread
,
617 struct task_pend_token pend_token
= {};
618 sched_mode_t mode
= convert_policy_to_sched_mode(policy
);
620 assert(thread
->static_param
);
621 if (!thread
->static_param
)
624 /* Concern: this doesn't hold the mutex... */
626 return KERN_TERMINATED
;
628 kern_return_t kr
= thread_set_mode_and_absolute_pri_internal(thread
, mode
, priority
, &pend_token
);
630 if (pend_token
.tpt_update_thread_sfi
)
631 sfi_reevaluate(thread
);
637 * thread_set_mode_and_absolute_pri:
639 * Set scheduling policy & absolute priority for thread, for deprecated
640 * thread_set_policy and thread_policy interfaces.
642 * Called with nothing locked.
645 thread_set_mode_and_absolute_pri(thread_t thread
,
649 kern_return_t kr
= KERN_SUCCESS
;
650 struct task_pend_token pend_token
= {};
652 sched_mode_t mode
= convert_policy_to_sched_mode(policy
);
654 thread_mtx_lock(thread
);
656 if (!thread
->active
) {
657 kr
= KERN_TERMINATED
;
661 if (thread_is_static_param(thread
)) {
662 kr
= KERN_POLICY_STATIC
;
666 /* Setting legacy policies on threads kills the current QoS */
667 if (thread
->requested_policy
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
)
668 thread_remove_qos_policy_locked(thread
, &pend_token
);
670 kr
= thread_set_mode_and_absolute_pri_internal(thread
, mode
, priority
, &pend_token
);
673 thread_mtx_unlock(thread
);
675 thread_policy_update_complete_unlocked(thread
, &pend_token
);
681 * Set the thread's requested mode and recompute priority
682 * Called with thread mutex and thread locked
684 * TODO: Mitigate potential problems caused by moving thread to end of runq
685 * whenever its priority is recomputed
686 * Only remove when it actually changes? Attempt to re-insert at appropriate location?
689 thread_set_user_sched_mode_and_recompute_pri(thread_t thread
, sched_mode_t mode
)
691 if (thread
->policy_reset
)
694 boolean_t removed
= thread_run_queue_remove(thread
);
697 * TODO: Instead of having saved mode, have 'user mode' and 'true mode'.
698 * That way there's zero confusion over which the user wants
699 * and which the kernel wants.
701 if (thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)
702 thread
->saved_mode
= mode
;
704 sched_set_thread_mode(thread
, mode
);
706 thread_recompute_priority(thread
);
709 thread_run_queue_reinsert(thread
, SCHED_TAILQ
);
712 /* called at splsched with thread lock locked */
714 thread_update_qos_cpu_time_locked(thread_t thread
)
716 task_t task
= thread
->task
;
717 uint64_t timer_sum
, timer_delta
;
720 * This is only as accurate as the distance between
721 * last context switch (embedded) or last user/kernel boundary transition (desktop)
722 * because user_timer and system_timer are only updated then.
724 * TODO: Consider running a thread_timer_event operation here to update it first.
725 * Maybe doable with interrupts disabled from current thread.
726 * If the thread is on a different core, may not be easy to get right.
728 * TODO: There should be a function for this in timer.c
731 timer_sum
= timer_grab(&thread
->user_timer
);
732 timer_sum
+= timer_grab(&thread
->system_timer
);
733 timer_delta
= timer_sum
- thread
->vtimer_qos_save
;
735 thread
->vtimer_qos_save
= timer_sum
;
737 uint64_t* task_counter
= NULL
;
739 /* Update the task-level qos stats atomically, because we don't have the task lock. */
740 switch (thread
->effective_policy
.thep_qos
) {
741 case THREAD_QOS_DEFAULT
: task_counter
= &task
->cpu_time_qos_stats
.cpu_time_qos_default
; break;
742 case THREAD_QOS_MAINTENANCE
: task_counter
= &task
->cpu_time_qos_stats
.cpu_time_qos_maintenance
; break;
743 case THREAD_QOS_BACKGROUND
: task_counter
= &task
->cpu_time_qos_stats
.cpu_time_qos_background
; break;
744 case THREAD_QOS_UTILITY
: task_counter
= &task
->cpu_time_qos_stats
.cpu_time_qos_utility
; break;
745 case THREAD_QOS_LEGACY
: task_counter
= &task
->cpu_time_qos_stats
.cpu_time_qos_legacy
; break;
746 case THREAD_QOS_USER_INITIATED
: task_counter
= &task
->cpu_time_qos_stats
.cpu_time_qos_user_initiated
; break;
747 case THREAD_QOS_USER_INTERACTIVE
: task_counter
= &task
->cpu_time_qos_stats
.cpu_time_qos_user_interactive
; break;
749 panic("unknown effective QoS: %d", thread
->effective_policy
.thep_qos
);
752 OSAddAtomic64(timer_delta
, task_counter
);
756 * called with no thread locks held
760 thread_update_qos_cpu_time(thread_t thread
)
762 thread_mtx_lock(thread
);
764 spl_t s
= splsched();
767 thread_update_qos_cpu_time_locked(thread
);
769 thread_unlock(thread
);
772 thread_mtx_unlock(thread
);
776 * Calculate base priority from thread attributes, and set it on the thread
778 * Called with thread_lock and thread mutex held.
781 thread_recompute_priority(
786 if (thread
->policy_reset
)
789 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
790 sched_set_thread_base_priority(thread
, BASEPRI_RTQUEUES
);
792 } else if (thread
->effective_policy
.thep_qos
!= THREAD_QOS_UNSPECIFIED
) {
793 int qos
= thread
->effective_policy
.thep_qos
;
794 int qos_ui_is_urgent
= thread
->effective_policy
.thep_qos_ui_is_urgent
;
795 int qos_relprio
= -(thread
->effective_policy
.thep_qos_relprio
); /* stored in task policy inverted */
796 int qos_scaled_relprio
;
798 assert(qos
>= 0 && qos
< THREAD_QOS_LAST
);
799 assert(qos_relprio
<= 0 && qos_relprio
>= THREAD_QOS_MIN_TIER_IMPORTANCE
);
801 priority
= thread_qos_policy_params
.qos_pri
[qos
];
802 qos_scaled_relprio
= thread_qos_scaled_relative_priority(qos
, qos_relprio
);
804 if (qos
== THREAD_QOS_USER_INTERACTIVE
&& qos_ui_is_urgent
== 1) {
805 /* Bump priority 46 to 47 when in a frontmost app */
806 qos_scaled_relprio
+= 1;
809 /* TODO: factor in renice priority here? */
811 priority
+= qos_scaled_relprio
;
813 if (thread
->importance
> MAXPRI
)
815 else if (thread
->importance
< -MAXPRI
)
818 priority
= thread
->importance
;
820 priority
+= thread
->task_priority
;
823 priority
= MAX(priority
, thread
->user_promotion_basepri
);
826 * Clamp priority back into the allowed range for this task.
827 * The initial priority value could be out of this range due to:
828 * Task clamped to BG or Utility (max-pri is 4, or 20)
829 * Task is user task (max-pri is 63)
830 * Task is kernel task (max-pri is 95)
831 * Note that thread->importance is user-settable to any integer
832 * via THREAD_PRECEDENCE_POLICY.
834 if (priority
> thread
->max_priority
)
835 priority
= thread
->max_priority
;
836 else if (priority
< MINPRI
)
839 if (thread
->saved_mode
== TH_MODE_REALTIME
&&
840 thread
->sched_flags
& TH_SFLAG_FAILSAFE
)
841 priority
= DEPRESSPRI
;
843 if (thread
->effective_policy
.thep_terminated
== TRUE
) {
845 * We temporarily want to override the expected priority to
846 * ensure that the thread exits in a timely manner.
847 * Note that this is allowed to exceed thread->max_priority
848 * so that the thread is no longer clamped to background
849 * during the final exit phase.
851 if (priority
< thread
->task_priority
)
852 priority
= thread
->task_priority
;
853 if (priority
< BASEPRI_DEFAULT
)
854 priority
= BASEPRI_DEFAULT
;
858 sched_set_thread_base_priority(thread
, priority
);
861 /* Called with the task lock held, but not the thread mutex or spinlock */
863 thread_policy_update_tasklocked(
866 integer_t max_priority
,
867 task_pend_token_t pend_token
)
869 thread_mtx_lock(thread
);
871 if (!thread
->active
|| thread
->policy_reset
) {
872 thread_mtx_unlock(thread
);
876 spl_t s
= splsched();
880 integer_t old_max_priority
= thread
->max_priority
;
882 thread
->task_priority
= priority
;
883 thread
->max_priority
= max_priority
;
886 thread_policy_update_spinlocked(thread
, TRUE
, pend_token
);
888 thread_unlock(thread
);
891 thread_mtx_unlock(thread
);
895 * Reset thread to default state in preparation for termination
896 * Called with thread mutex locked
898 * Always called on current thread, so we don't need a run queue remove
906 assert(thread
== current_thread());
911 if (thread
->sched_flags
& TH_SFLAG_FAILSAFE
)
912 sched_thread_mode_undemote(thread
, TH_SFLAG_FAILSAFE
);
914 if (thread
->sched_flags
& TH_SFLAG_THROTTLED
)
915 sched_thread_mode_undemote(thread
, TH_SFLAG_THROTTLED
);
917 /* At this point, the various demotions should be inactive */
918 assert(!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
));
919 assert(!(thread
->sched_flags
& TH_SFLAG_THROTTLED
));
920 assert(!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
));
922 /* Reset thread back to task-default basepri and mode */
923 sched_mode_t newmode
= SCHED(initial_thread_sched_mode
)(thread
->task
);
925 sched_set_thread_mode(thread
, newmode
);
927 thread
->importance
= 0;
929 /* Prevent further changes to thread base priority or mode */
930 thread
->policy_reset
= 1;
932 sched_set_thread_base_priority(thread
, thread
->task_priority
);
934 thread_unlock(thread
);
941 thread_policy_flavor_t flavor
,
942 thread_policy_t policy_info
,
943 mach_msg_type_number_t
*count
,
944 boolean_t
*get_default
)
946 kern_return_t result
= KERN_SUCCESS
;
948 if (thread
== THREAD_NULL
)
949 return (KERN_INVALID_ARGUMENT
);
951 thread_mtx_lock(thread
);
952 if (!thread
->active
) {
953 thread_mtx_unlock(thread
);
955 return (KERN_TERMINATED
);
960 case THREAD_EXTENDED_POLICY
:
962 boolean_t timeshare
= TRUE
;
964 if (!(*get_default
)) {
965 spl_t s
= splsched();
968 if ( (thread
->sched_mode
!= TH_MODE_REALTIME
) &&
969 (thread
->saved_mode
!= TH_MODE_REALTIME
) ) {
970 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
))
971 timeshare
= (thread
->sched_mode
== TH_MODE_TIMESHARE
) != 0;
973 timeshare
= (thread
->saved_mode
== TH_MODE_TIMESHARE
) != 0;
978 thread_unlock(thread
);
982 if (*count
>= THREAD_EXTENDED_POLICY_COUNT
) {
983 thread_extended_policy_t info
;
985 info
= (thread_extended_policy_t
)policy_info
;
986 info
->timeshare
= timeshare
;
992 case THREAD_TIME_CONSTRAINT_POLICY
:
994 thread_time_constraint_policy_t info
;
996 if (*count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
997 result
= KERN_INVALID_ARGUMENT
;
1001 info
= (thread_time_constraint_policy_t
)policy_info
;
1003 if (!(*get_default
)) {
1004 spl_t s
= splsched();
1005 thread_lock(thread
);
1007 if ( (thread
->sched_mode
== TH_MODE_REALTIME
) ||
1008 (thread
->saved_mode
== TH_MODE_REALTIME
) ) {
1009 info
->period
= thread
->realtime
.period
;
1010 info
->computation
= thread
->realtime
.computation
;
1011 info
->constraint
= thread
->realtime
.constraint
;
1012 info
->preemptible
= thread
->realtime
.preemptible
;
1015 *get_default
= TRUE
;
1017 thread_unlock(thread
);
1023 info
->computation
= default_timeshare_computation
;
1024 info
->constraint
= default_timeshare_constraint
;
1025 info
->preemptible
= TRUE
;
1031 case THREAD_PRECEDENCE_POLICY
:
1033 thread_precedence_policy_t info
;
1035 if (*count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
1036 result
= KERN_INVALID_ARGUMENT
;
1040 info
= (thread_precedence_policy_t
)policy_info
;
1042 if (!(*get_default
)) {
1043 spl_t s
= splsched();
1044 thread_lock(thread
);
1046 info
->importance
= thread
->importance
;
1048 thread_unlock(thread
);
1052 info
->importance
= 0;
1057 case THREAD_AFFINITY_POLICY
:
1059 thread_affinity_policy_t info
;
1061 if (!thread_affinity_is_supported()) {
1062 result
= KERN_NOT_SUPPORTED
;
1065 if (*count
< THREAD_AFFINITY_POLICY_COUNT
) {
1066 result
= KERN_INVALID_ARGUMENT
;
1070 info
= (thread_affinity_policy_t
)policy_info
;
1072 if (!(*get_default
))
1073 info
->affinity_tag
= thread_affinity_get(thread
);
1075 info
->affinity_tag
= THREAD_AFFINITY_TAG_NULL
;
1080 case THREAD_POLICY_STATE
:
1082 thread_policy_state_t info
;
1084 if (*count
< THREAD_POLICY_STATE_COUNT
) {
1085 result
= KERN_INVALID_ARGUMENT
;
1089 /* Only root can get this info */
1090 if (current_task()->sec_token
.val
[0] != 0) {
1091 result
= KERN_PROTECTION_FAILURE
;
1095 info
= (thread_policy_state_t
)(void*)policy_info
;
1097 if (!(*get_default
)) {
1100 spl_t s
= splsched();
1101 thread_lock(thread
);
1103 info
->flags
|= (thread
->static_param
? THREAD_POLICY_STATE_FLAG_STATIC_PARAM
: 0);
1105 info
->thps_requested_policy
= *(uint64_t*)(void*)(&thread
->requested_policy
);
1106 info
->thps_effective_policy
= *(uint64_t*)(void*)(&thread
->effective_policy
);
1108 info
->thps_user_promotions
= thread
->user_promotions
;
1109 info
->thps_user_promotion_basepri
= thread
->user_promotion_basepri
;
1110 info
->thps_ipc_overrides
= thread
->ipc_overrides
;
1112 proc_get_thread_policy_bitfield(thread
, info
);
1114 thread_unlock(thread
);
1117 info
->requested
= 0;
1118 info
->effective
= 0;
1125 case THREAD_LATENCY_QOS_POLICY
:
1127 thread_latency_qos_policy_t info
= (thread_latency_qos_policy_t
) policy_info
;
1128 thread_latency_qos_t plqos
;
1130 if (*count
< THREAD_LATENCY_QOS_POLICY_COUNT
) {
1131 result
= KERN_INVALID_ARGUMENT
;
1138 plqos
= proc_get_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_LATENCY_QOS
, NULL
);
1141 info
->thread_latency_qos_tier
= qos_latency_policy_package(plqos
);
1145 case THREAD_THROUGHPUT_QOS_POLICY
:
1147 thread_throughput_qos_policy_t info
= (thread_throughput_qos_policy_t
) policy_info
;
1148 thread_throughput_qos_t ptqos
;
1150 if (*count
< THREAD_THROUGHPUT_QOS_POLICY_COUNT
) {
1151 result
= KERN_INVALID_ARGUMENT
;
1158 ptqos
= proc_get_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_THROUGH_QOS
, NULL
);
1161 info
->thread_throughput_qos_tier
= qos_throughput_policy_package(ptqos
);
1165 case THREAD_QOS_POLICY
:
1167 thread_qos_policy_t info
= (thread_qos_policy_t
)policy_info
;
1169 if (*count
< THREAD_QOS_POLICY_COUNT
) {
1170 result
= KERN_INVALID_ARGUMENT
;
1174 if (!(*get_default
)) {
1175 int relprio_value
= 0;
1176 info
->qos_tier
= proc_get_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
1177 TASK_POLICY_QOS_AND_RELPRIO
, &relprio_value
);
1179 info
->tier_importance
= -relprio_value
;
1181 info
->qos_tier
= THREAD_QOS_UNSPECIFIED
;
1182 info
->tier_importance
= 0;
1189 result
= KERN_INVALID_ARGUMENT
;
1193 thread_mtx_unlock(thread
);
1198 static volatile uint64_t unique_work_interval_id
= 1; /* Start at 1, 0 is not a valid work interval ID */
1201 thread_policy_create_work_interval(
1203 uint64_t *work_interval_id
)
1205 thread_mtx_lock(thread
);
1206 if (thread
->work_interval_id
) {
1207 /* already assigned a work interval ID */
1208 thread_mtx_unlock(thread
);
1209 return (KERN_INVALID_VALUE
);
1212 thread
->work_interval_id
= OSIncrementAtomic64((volatile int64_t *)&unique_work_interval_id
);
1213 *work_interval_id
= thread
->work_interval_id
;
1215 thread_mtx_unlock(thread
);
1216 return KERN_SUCCESS
;
1220 thread_policy_destroy_work_interval(
1222 uint64_t work_interval_id
)
1224 thread_mtx_lock(thread
);
1225 if (work_interval_id
== 0 || thread
->work_interval_id
== 0 || thread
->work_interval_id
!= work_interval_id
) {
1226 /* work ID isn't valid or doesn't match previously assigned work interval ID */
1227 thread_mtx_unlock(thread
);
1228 return (KERN_INVALID_ARGUMENT
);
1231 thread
->work_interval_id
= 0;
1233 thread_mtx_unlock(thread
);
1234 return KERN_SUCCESS
;
1238 thread_policy_create(thread_t thread
)
1240 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1241 (IMPORTANCE_CODE(IMP_UPDATE
, (IMP_UPDATE_TASK_CREATE
| TASK_POLICY_THREAD
))) | DBG_FUNC_START
,
1242 thread_tid(thread
), theffective_0(thread
),
1243 theffective_1(thread
), thread
->base_pri
, 0);
1245 /* We pass a pend token but ignore it */
1246 struct task_pend_token pend_token
= {};
1248 thread_policy_update_internal_spinlocked(thread
, TRUE
, &pend_token
);
1250 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1251 (IMPORTANCE_CODE(IMP_UPDATE
, (IMP_UPDATE_TASK_CREATE
| TASK_POLICY_THREAD
))) | DBG_FUNC_END
,
1252 thread_tid(thread
), theffective_0(thread
),
1253 theffective_1(thread
), thread
->base_pri
, 0);
1257 thread_policy_update_spinlocked(thread_t thread
, boolean_t recompute_priority
, task_pend_token_t pend_token
)
1259 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1260 (IMPORTANCE_CODE(IMP_UPDATE
, TASK_POLICY_THREAD
) | DBG_FUNC_START
),
1261 thread_tid(thread
), theffective_0(thread
),
1262 theffective_1(thread
), thread
->base_pri
, 0);
1264 thread_policy_update_internal_spinlocked(thread
, recompute_priority
, pend_token
);
1266 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1267 (IMPORTANCE_CODE(IMP_UPDATE
, TASK_POLICY_THREAD
)) | DBG_FUNC_END
,
1268 thread_tid(thread
), theffective_0(thread
),
1269 theffective_1(thread
), thread
->base_pri
, 0);
1275 * One thread state update function TO RULE THEM ALL
1277 * This function updates the thread effective policy fields
1278 * and pushes the results to the relevant subsystems.
1280 * Returns TRUE if a pended action needs to be run.
1282 * Called with thread spinlock locked, task may be locked, thread mutex may be locked
1285 thread_policy_update_internal_spinlocked(thread_t thread
, boolean_t recompute_priority
,
1286 task_pend_token_t pend_token
)
1290 * Gather requested policy and effective task state
1293 struct thread_requested_policy requested
= thread
->requested_policy
;
1294 struct task_effective_policy task_effective
= thread
->task
->effective_policy
;
1298 * Calculate new effective policies from requested policy, task and thread state
1300 * Don't change requested, it won't take effect
1303 struct thread_effective_policy next
= {};
1305 next
.thep_qos_ui_is_urgent
= task_effective
.tep_qos_ui_is_urgent
;
1307 uint32_t next_qos
= requested
.thrp_qos
;
1309 if (requested
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) {
1310 if (requested
.thrp_qos_override
!= THREAD_QOS_UNSPECIFIED
)
1311 next_qos
= MAX(requested
.thrp_qos_override
, next_qos
);
1313 if (requested
.thrp_qos_promote
!= THREAD_QOS_UNSPECIFIED
)
1314 next_qos
= MAX(requested
.thrp_qos_promote
, next_qos
);
1316 if (requested
.thrp_qos_ipc_override
!= THREAD_QOS_UNSPECIFIED
)
1317 next_qos
= MAX(requested
.thrp_qos_ipc_override
, next_qos
);
1320 next
.thep_qos
= next_qos
;
1322 /* A task clamp will result in an effective QoS even when requested is UNSPECIFIED */
1323 if (task_effective
.tep_qos_clamp
!= THREAD_QOS_UNSPECIFIED
) {
1324 if (next
.thep_qos
!= THREAD_QOS_UNSPECIFIED
)
1325 next
.thep_qos
= MIN(task_effective
.tep_qos_clamp
, next
.thep_qos
);
1327 next
.thep_qos
= task_effective
.tep_qos_clamp
;
1331 * Extract outbound-promotion QoS before applying task ceiling or BG clamp
1332 * This allows QoS promotions to work properly even after the process is unclamped.
1334 next
.thep_qos_promote
= next
.thep_qos
;
1336 /* The ceiling only applies to threads that are in the QoS world */
1337 if (task_effective
.tep_qos_ceiling
!= THREAD_QOS_UNSPECIFIED
&&
1338 next
.thep_qos
!= THREAD_QOS_UNSPECIFIED
) {
1339 next
.thep_qos
= MIN(task_effective
.tep_qos_ceiling
, next
.thep_qos
);
1343 * The QoS relative priority is only applicable when the original programmer's
1344 * intended (requested) QoS is in effect. When the QoS is clamped (e.g.
1345 * USER_INITIATED-13REL clamped to UTILITY), the relative priority is not honored,
1346 * since otherwise it would be lower than unclamped threads. Similarly, in the
1347 * presence of boosting, the programmer doesn't know what other actors
1348 * are boosting the thread.
1350 if ((requested
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) &&
1351 (requested
.thrp_qos
== next
.thep_qos
) &&
1352 (requested
.thrp_qos_override
== THREAD_QOS_UNSPECIFIED
)) {
1353 next
.thep_qos_relprio
= requested
.thrp_qos_relprio
;
1355 next
.thep_qos_relprio
= 0;
1358 /* Calculate DARWIN_BG */
1359 boolean_t wants_darwinbg
= FALSE
;
1360 boolean_t wants_all_sockets_bg
= FALSE
; /* Do I want my existing sockets to be bg */
1363 * If DARWIN_BG has been requested at either level, it's engaged.
1364 * darwinbg threads always create bg sockets,
1365 * but only some types of darwinbg change the sockets
1366 * after they're created
1368 if (requested
.thrp_int_darwinbg
|| requested
.thrp_ext_darwinbg
)
1369 wants_all_sockets_bg
= wants_darwinbg
= TRUE
;
1371 if (requested
.thrp_pidbind_bg
)
1372 wants_all_sockets_bg
= wants_darwinbg
= TRUE
;
1374 if (task_effective
.tep_darwinbg
)
1375 wants_darwinbg
= TRUE
;
1377 if (next
.thep_qos
== THREAD_QOS_BACKGROUND
||
1378 next
.thep_qos
== THREAD_QOS_MAINTENANCE
)
1379 wants_darwinbg
= TRUE
;
1381 /* Calculate side effects of DARWIN_BG */
1384 next
.thep_darwinbg
= 1;
1386 if (next
.thep_darwinbg
|| task_effective
.tep_new_sockets_bg
)
1387 next
.thep_new_sockets_bg
= 1;
1389 /* Don't use task_effective.tep_all_sockets_bg here */
1390 if (wants_all_sockets_bg
)
1391 next
.thep_all_sockets_bg
= 1;
1393 /* darwinbg implies background QOS (or lower) */
1394 if (next
.thep_darwinbg
&&
1395 (next
.thep_qos
> THREAD_QOS_BACKGROUND
|| next
.thep_qos
== THREAD_QOS_UNSPECIFIED
)) {
1396 next
.thep_qos
= THREAD_QOS_BACKGROUND
;
1397 next
.thep_qos_relprio
= 0;
1400 /* Calculate IO policy */
1402 int iopol
= THROTTLE_LEVEL_TIER0
;
1404 /* Factor in the task's IO policy */
1405 if (next
.thep_darwinbg
)
1406 iopol
= MAX(iopol
, task_effective
.tep_bg_iotier
);
1408 iopol
= MAX(iopol
, task_effective
.tep_io_tier
);
1410 /* Look up the associated IO tier value for the QoS class */
1411 iopol
= MAX(iopol
, thread_qos_policy_params
.qos_iotier
[next
.thep_qos
]);
1413 iopol
= MAX(iopol
, requested
.thrp_int_iotier
);
1414 iopol
= MAX(iopol
, requested
.thrp_ext_iotier
);
1416 next
.thep_io_tier
= iopol
;
1419 * If a QoS override is causing IO to go into a lower tier, we also set
1420 * the passive bit so that a thread doesn't end up stuck in its own throttle
1421 * window when the override goes away.
1423 boolean_t qos_io_override_active
= FALSE
;
1424 if (thread_qos_policy_params
.qos_iotier
[next
.thep_qos
] <
1425 thread_qos_policy_params
.qos_iotier
[requested
.thrp_qos
])
1426 qos_io_override_active
= TRUE
;
1428 /* Calculate Passive IO policy */
1429 if (requested
.thrp_ext_iopassive
||
1430 requested
.thrp_int_iopassive
||
1431 qos_io_override_active
||
1432 task_effective
.tep_io_passive
)
1433 next
.thep_io_passive
= 1;
1435 /* Calculate timer QOS */
1436 uint32_t latency_qos
= requested
.thrp_latency_qos
;
1438 latency_qos
= MAX(latency_qos
, task_effective
.tep_latency_qos
);
1439 latency_qos
= MAX(latency_qos
, thread_qos_policy_params
.qos_latency_qos
[next
.thep_qos
]);
1441 next
.thep_latency_qos
= latency_qos
;
1443 /* Calculate throughput QOS */
1444 uint32_t through_qos
= requested
.thrp_through_qos
;
1446 through_qos
= MAX(through_qos
, task_effective
.tep_through_qos
);
1447 through_qos
= MAX(through_qos
, thread_qos_policy_params
.qos_through_qos
[next
.thep_qos
]);
1449 next
.thep_through_qos
= through_qos
;
1451 if (task_effective
.tep_terminated
|| requested
.thrp_terminated
) {
1452 /* Shoot down the throttles that slow down exit or response to SIGTERM */
1453 next
.thep_terminated
= 1;
1454 next
.thep_darwinbg
= 0;
1455 next
.thep_io_tier
= THROTTLE_LEVEL_TIER0
;
1456 next
.thep_qos
= THREAD_QOS_UNSPECIFIED
;
1457 next
.thep_latency_qos
= LATENCY_QOS_TIER_UNSPECIFIED
;
1458 next
.thep_through_qos
= THROUGHPUT_QOS_TIER_UNSPECIFIED
;
1463 * Swap out old policy for new policy
1466 struct thread_effective_policy prev
= thread
->effective_policy
;
1468 thread_update_qos_cpu_time_locked(thread
);
1470 /* This is the point where the new values become visible to other threads */
1471 thread
->effective_policy
= next
;
1475 * Pend updates that can't be done while holding the thread lock
1478 if (prev
.thep_all_sockets_bg
!= next
.thep_all_sockets_bg
)
1479 pend_token
->tpt_update_sockets
= 1;
1481 /* TODO: Doesn't this only need to be done if the throttle went up? */
1482 if (prev
.thep_io_tier
!= next
.thep_io_tier
)
1483 pend_token
->tpt_update_throttle
= 1;
1486 * Check for the attributes that sfi_thread_classify() consults,
1487 * and trigger SFI re-evaluation.
1489 if (prev
.thep_qos
!= next
.thep_qos
||
1490 prev
.thep_darwinbg
!= next
.thep_darwinbg
)
1491 pend_token
->tpt_update_thread_sfi
= 1;
1495 * Update other subsystems as necessary if something has changed
1498 /* Check for the attributes that thread_recompute_priority() consults */
1499 if (prev
.thep_qos
!= next
.thep_qos
||
1500 prev
.thep_qos_relprio
!= next
.thep_qos_relprio
||
1501 prev
.thep_qos_ui_is_urgent
!= next
.thep_qos_ui_is_urgent
||
1502 prev
.thep_terminated
!= next
.thep_terminated
||
1503 pend_token
->tpt_force_recompute_pri
== 1 ||
1504 recompute_priority
) {
1505 thread_recompute_priority(thread
);
1511 * Initiate a thread policy state transition on a thread with its TID
1512 * Useful if you cannot guarantee the thread won't get terminated
1513 * Precondition: No locks are held
1514 * Will take task lock - using the non-tid variant is faster
1515 * if you already have a thread ref.
1518 proc_set_thread_policy_with_tid(task_t task
,
1524 /* takes task lock, returns ref'ed thread or NULL */
1525 thread_t thread
= task_findtid(task
, tid
);
1527 if (thread
== THREAD_NULL
)
1530 proc_set_thread_policy(thread
, category
, flavor
, value
);
1532 thread_deallocate(thread
);
1536 * Initiate a thread policy transition on a thread
1537 * This path supports networking transitions (i.e. darwinbg transitions)
1538 * Precondition: No locks are held
1541 proc_set_thread_policy(thread_t thread
,
1546 struct task_pend_token pend_token
= {};
1548 thread_mtx_lock(thread
);
1550 proc_set_thread_policy_locked(thread
, category
, flavor
, value
, 0, &pend_token
);
1552 thread_mtx_unlock(thread
);
1554 thread_policy_update_complete_unlocked(thread
, &pend_token
);
1558 * KPI for pthread kext to call to set thread base QoS values during a workq wakeup
1559 * May be called with interrupts disabled and workqueue/waitqueue/kqueue locks held
1561 * Does NOT do update completion, so the thread MUST be in a safe place WRT
1562 * IO throttling and SFI.
1564 * TODO: Can I assert 'it must be in a safe place'?
1567 thread_set_workq_qos(thread_t thread
,
1569 int relprio
) /* relprio is -16 to 0 */
1571 assert(qos_tier
>= 0 && qos_tier
<= THREAD_QOS_LAST
);
1572 assert(relprio
<= 0 && relprio
>= THREAD_QOS_MIN_TIER_IMPORTANCE
);
1574 if (!(qos_tier
>= 0 && qos_tier
<= THREAD_QOS_LAST
))
1575 return KERN_FAILURE
;
1576 if (!(relprio
<= 0 && relprio
>= THREAD_QOS_MIN_TIER_IMPORTANCE
))
1577 return KERN_FAILURE
;
1579 if (qos_tier
== THREAD_QOS_UNSPECIFIED
) {
1580 assert(relprio
== 0);
1582 return KERN_FAILURE
;
1585 assert(thread
->static_param
);
1586 if (!thread
->static_param
) {
1587 return KERN_FAILURE
;
1590 /* Concern: this doesn't hold the mutex... */
1591 //if (!thread->active)
1592 // return KERN_TERMINATED;
1594 struct task_pend_token pend_token
= {};
1596 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_AND_RELPRIO
, qos_tier
, -relprio
, &pend_token
);
1598 assert(pend_token
.tpt_update_sockets
== 0);
1599 /* we don't need to update throttle or sfi because pthread kext promises the thread is in a safe place */
1600 /* TODO: Do we need to update SFI to ensure it gets tagged with the AST? */
1602 return KERN_SUCCESS
;
1607 * Do the things that can't be done while holding a thread mutex.
1608 * These are set up to call back into thread policy to get the latest value,
1609 * so they don't have to be synchronized with the update.
1610 * The only required semantic is 'call this sometime after updating effective policy'
1612 * Precondition: Thread mutex is not held
1614 * This may be called with the task lock held, but in that case it won't be
1615 * called with tpt_update_sockets set.
1618 thread_policy_update_complete_unlocked(thread_t thread
, task_pend_token_t pend_token
)
1621 if (pend_token
->tpt_update_sockets
)
1622 proc_apply_task_networkbg(thread
->task
->bsd_info
, thread
);
1623 #endif /* MACH_BSD */
1625 if (pend_token
->tpt_update_throttle
)
1626 rethrottle_thread(thread
->uthread
);
1628 if (pend_token
->tpt_update_thread_sfi
)
1629 sfi_reevaluate(thread
);
1633 * Set and update thread policy
1634 * Thread mutex might be held
1637 proc_set_thread_policy_locked(thread_t thread
,
1642 task_pend_token_t pend_token
)
1644 spl_t s
= splsched();
1645 thread_lock(thread
);
1647 proc_set_thread_policy_spinlocked(thread
, category
, flavor
, value
, value2
, pend_token
);
1649 thread_unlock(thread
);
1654 * Set and update thread policy
1655 * Thread spinlock is held
1658 proc_set_thread_policy_spinlocked(thread_t thread
,
1663 task_pend_token_t pend_token
)
1665 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1666 (IMPORTANCE_CODE(flavor
, (category
| TASK_POLICY_THREAD
))) | DBG_FUNC_START
,
1667 thread_tid(thread
), threquested_0(thread
),
1668 threquested_1(thread
), value
, 0);
1670 thread_set_requested_policy_spinlocked(thread
, category
, flavor
, value
, value2
);
1672 thread_policy_update_spinlocked(thread
, FALSE
, pend_token
);
1674 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1675 (IMPORTANCE_CODE(flavor
, (category
| TASK_POLICY_THREAD
))) | DBG_FUNC_END
,
1676 thread_tid(thread
), threquested_0(thread
),
1677 threquested_1(thread
), tpending(pend_token
), 0);
1681 * Set the requested state for a specific flavor to a specific value.
1684 thread_set_requested_policy_spinlocked(thread_t thread
,
1692 struct thread_requested_policy requested
= thread
->requested_policy
;
1696 /* Category: EXTERNAL and INTERNAL, thread and task */
1698 case TASK_POLICY_DARWIN_BG
:
1699 if (category
== TASK_POLICY_EXTERNAL
)
1700 requested
.thrp_ext_darwinbg
= value
;
1702 requested
.thrp_int_darwinbg
= value
;
1705 case TASK_POLICY_IOPOL
:
1706 proc_iopol_to_tier(value
, &tier
, &passive
);
1707 if (category
== TASK_POLICY_EXTERNAL
) {
1708 requested
.thrp_ext_iotier
= tier
;
1709 requested
.thrp_ext_iopassive
= passive
;
1711 requested
.thrp_int_iotier
= tier
;
1712 requested
.thrp_int_iopassive
= passive
;
1716 case TASK_POLICY_IO
:
1717 if (category
== TASK_POLICY_EXTERNAL
)
1718 requested
.thrp_ext_iotier
= value
;
1720 requested
.thrp_int_iotier
= value
;
1723 case TASK_POLICY_PASSIVE_IO
:
1724 if (category
== TASK_POLICY_EXTERNAL
)
1725 requested
.thrp_ext_iopassive
= value
;
1727 requested
.thrp_int_iopassive
= value
;
1730 /* Category: ATTRIBUTE, thread only */
1732 case TASK_POLICY_PIDBIND_BG
:
1733 assert(category
== TASK_POLICY_ATTRIBUTE
);
1734 requested
.thrp_pidbind_bg
= value
;
1737 case TASK_POLICY_LATENCY_QOS
:
1738 assert(category
== TASK_POLICY_ATTRIBUTE
);
1739 requested
.thrp_latency_qos
= value
;
1742 case TASK_POLICY_THROUGH_QOS
:
1743 assert(category
== TASK_POLICY_ATTRIBUTE
);
1744 requested
.thrp_through_qos
= value
;
1747 case TASK_POLICY_QOS
:
1748 assert(category
== TASK_POLICY_ATTRIBUTE
);
1749 requested
.thrp_qos
= value
;
1752 case TASK_POLICY_QOS_OVERRIDE
:
1753 assert(category
== TASK_POLICY_ATTRIBUTE
);
1754 requested
.thrp_qos_override
= value
;
1757 case TASK_POLICY_QOS_AND_RELPRIO
:
1758 assert(category
== TASK_POLICY_ATTRIBUTE
);
1759 requested
.thrp_qos
= value
;
1760 requested
.thrp_qos_relprio
= value2
;
1761 DTRACE_BOOST3(qos_set
, uint64_t, thread
->thread_id
, int, requested
.thrp_qos
, int, requested
.thrp_qos_relprio
);
1764 case TASK_POLICY_QOS_PROMOTE
:
1765 assert(category
== TASK_POLICY_ATTRIBUTE
);
1766 requested
.thrp_qos_promote
= value
;
1769 case TASK_POLICY_QOS_IPC_OVERRIDE
:
1770 assert(category
== TASK_POLICY_ATTRIBUTE
);
1771 requested
.thrp_qos_ipc_override
= value
;
1774 case TASK_POLICY_TERMINATED
:
1775 assert(category
== TASK_POLICY_ATTRIBUTE
);
1776 requested
.thrp_terminated
= value
;
1780 panic("unknown task policy: %d %d %d", category
, flavor
, value
);
1784 thread
->requested_policy
= requested
;
1788 * Gets what you set. Effective values may be different.
1789 * Precondition: No locks are held
1792 proc_get_thread_policy(thread_t thread
,
1797 thread_mtx_lock(thread
);
1798 value
= proc_get_thread_policy_locked(thread
, category
, flavor
, NULL
);
1799 thread_mtx_unlock(thread
);
1804 proc_get_thread_policy_locked(thread_t thread
,
1811 spl_t s
= splsched();
1812 thread_lock(thread
);
1814 value
= thread_get_requested_policy_spinlocked(thread
, category
, flavor
, value2
);
1816 thread_unlock(thread
);
1823 * Gets what you set. Effective values may be different.
1826 thread_get_requested_policy_spinlocked(thread_t thread
,
1833 struct thread_requested_policy requested
= thread
->requested_policy
;
1836 case TASK_POLICY_DARWIN_BG
:
1837 if (category
== TASK_POLICY_EXTERNAL
)
1838 value
= requested
.thrp_ext_darwinbg
;
1840 value
= requested
.thrp_int_darwinbg
;
1842 case TASK_POLICY_IOPOL
:
1843 if (category
== TASK_POLICY_EXTERNAL
)
1844 value
= proc_tier_to_iopol(requested
.thrp_ext_iotier
,
1845 requested
.thrp_ext_iopassive
);
1847 value
= proc_tier_to_iopol(requested
.thrp_int_iotier
,
1848 requested
.thrp_int_iopassive
);
1850 case TASK_POLICY_IO
:
1851 if (category
== TASK_POLICY_EXTERNAL
)
1852 value
= requested
.thrp_ext_iotier
;
1854 value
= requested
.thrp_int_iotier
;
1856 case TASK_POLICY_PASSIVE_IO
:
1857 if (category
== TASK_POLICY_EXTERNAL
)
1858 value
= requested
.thrp_ext_iopassive
;
1860 value
= requested
.thrp_int_iopassive
;
1862 case TASK_POLICY_QOS
:
1863 assert(category
== TASK_POLICY_ATTRIBUTE
);
1864 value
= requested
.thrp_qos
;
1866 case TASK_POLICY_QOS_OVERRIDE
:
1867 assert(category
== TASK_POLICY_ATTRIBUTE
);
1868 value
= requested
.thrp_qos_override
;
1870 case TASK_POLICY_LATENCY_QOS
:
1871 assert(category
== TASK_POLICY_ATTRIBUTE
);
1872 value
= requested
.thrp_latency_qos
;
1874 case TASK_POLICY_THROUGH_QOS
:
1875 assert(category
== TASK_POLICY_ATTRIBUTE
);
1876 value
= requested
.thrp_through_qos
;
1878 case TASK_POLICY_QOS_AND_RELPRIO
:
1879 assert(category
== TASK_POLICY_ATTRIBUTE
);
1880 assert(value2
!= NULL
);
1881 value
= requested
.thrp_qos
;
1882 *value2
= requested
.thrp_qos_relprio
;
1884 case TASK_POLICY_QOS_PROMOTE
:
1885 assert(category
== TASK_POLICY_ATTRIBUTE
);
1886 value
= requested
.thrp_qos_promote
;
1888 case TASK_POLICY_QOS_IPC_OVERRIDE
:
1889 assert(category
== TASK_POLICY_ATTRIBUTE
);
1890 value
= requested
.thrp_qos_ipc_override
;
1892 case TASK_POLICY_TERMINATED
:
1893 assert(category
== TASK_POLICY_ATTRIBUTE
);
1894 value
= requested
.thrp_terminated
;
1898 panic("unknown policy_flavor %d", flavor
);
1906 * Gets what is actually in effect, for subsystems which pull policy instead of receive updates.
1908 * NOTE: This accessor does not take the task or thread lock.
1909 * Notifications of state updates need to be externally synchronized with state queries.
1910 * This routine *MUST* remain interrupt safe, as it is potentially invoked
1911 * within the context of a timer interrupt.
1913 * TODO: I think we can get away with architecting this such that we don't need to look at the task ever.
1914 * Is that a good idea? Maybe it's best to avoid evaluate-all-the-threads updates.
1915 * I don't think that cost is worth not having the right answer.
1918 proc_get_effective_thread_policy(thread_t thread
,
1924 case TASK_POLICY_DARWIN_BG
:
1926 * This call is used within the timer layer, as well as
1927 * prioritizing requests to the graphics system.
1928 * It also informs SFI and originator-bg-state.
1929 * Returns 1 for background mode, 0 for normal mode
1932 value
= thread
->effective_policy
.thep_darwinbg
? 1 : 0;
1934 case TASK_POLICY_IO
:
1936 * The I/O system calls here to find out what throttling tier to apply to an operation.
1937 * Returns THROTTLE_LEVEL_* values
1939 value
= thread
->effective_policy
.thep_io_tier
;
1940 if (thread
->iotier_override
!= THROTTLE_LEVEL_NONE
)
1941 value
= MIN(value
, thread
->iotier_override
);
1943 case TASK_POLICY_PASSIVE_IO
:
1945 * The I/O system calls here to find out whether an operation should be passive.
1946 * (i.e. not cause operations with lower throttle tiers to be throttled)
1947 * Returns 1 for passive mode, 0 for normal mode
1949 * If an override is causing IO to go into a lower tier, we also set
1950 * the passive bit so that a thread doesn't end up stuck in its own throttle
1951 * window when the override goes away.
1953 value
= thread
->effective_policy
.thep_io_passive
? 1 : 0;
1954 if (thread
->iotier_override
!= THROTTLE_LEVEL_NONE
&&
1955 thread
->iotier_override
< thread
->effective_policy
.thep_io_tier
)
1958 case TASK_POLICY_ALL_SOCKETS_BG
:
1960 * do_background_socket() calls this to determine whether
1961 * it should change the thread's sockets
1962 * Returns 1 for background mode, 0 for normal mode
1963 * This consults both thread and task so un-DBGing a thread while the task is BG
1964 * doesn't get you out of the network throttle.
1966 value
= (thread
->effective_policy
.thep_all_sockets_bg
||
1967 thread
->task
->effective_policy
.tep_all_sockets_bg
) ? 1 : 0;
1969 case TASK_POLICY_NEW_SOCKETS_BG
:
1971 * socreate() calls this to determine if it should mark a new socket as background
1972 * Returns 1 for background mode, 0 for normal mode
1974 value
= thread
->effective_policy
.thep_new_sockets_bg
? 1 : 0;
1976 case TASK_POLICY_LATENCY_QOS
:
1978 * timer arming calls into here to find out the timer coalescing level
1979 * Returns a latency QoS tier (0-6)
1981 value
= thread
->effective_policy
.thep_latency_qos
;
1983 case TASK_POLICY_THROUGH_QOS
:
1985 * This value is passed into the urgency callout from the scheduler
1986 * to the performance management subsystem.
1988 * Returns a throughput QoS tier (0-6)
1990 value
= thread
->effective_policy
.thep_through_qos
;
1992 case TASK_POLICY_QOS
:
1994 * This is communicated to the performance management layer and SFI.
1996 * Returns a QoS policy tier
1998 value
= thread
->effective_policy
.thep_qos
;
2001 panic("unknown thread policy flavor %d", flavor
);
2010 * (integer_t) casts limit the number of bits we can fit here
2011 * this interface is deprecated and replaced by the _EXT struct ?
2014 proc_get_thread_policy_bitfield(thread_t thread
, thread_policy_state_t info
)
2017 struct thread_requested_policy requested
= thread
->requested_policy
;
2019 bits
|= (requested
.thrp_int_darwinbg
? POLICY_REQ_INT_DARWIN_BG
: 0);
2020 bits
|= (requested
.thrp_ext_darwinbg
? POLICY_REQ_EXT_DARWIN_BG
: 0);
2021 bits
|= (requested
.thrp_int_iotier
? (((uint64_t)requested
.thrp_int_iotier
) << POLICY_REQ_INT_IO_TIER_SHIFT
) : 0);
2022 bits
|= (requested
.thrp_ext_iotier
? (((uint64_t)requested
.thrp_ext_iotier
) << POLICY_REQ_EXT_IO_TIER_SHIFT
) : 0);
2023 bits
|= (requested
.thrp_int_iopassive
? POLICY_REQ_INT_PASSIVE_IO
: 0);
2024 bits
|= (requested
.thrp_ext_iopassive
? POLICY_REQ_EXT_PASSIVE_IO
: 0);
2026 bits
|= (requested
.thrp_qos
? (((uint64_t)requested
.thrp_qos
) << POLICY_REQ_TH_QOS_SHIFT
) : 0);
2027 bits
|= (requested
.thrp_qos_override
? (((uint64_t)requested
.thrp_qos_override
) << POLICY_REQ_TH_QOS_OVER_SHIFT
) : 0);
2029 bits
|= (requested
.thrp_pidbind_bg
? POLICY_REQ_PIDBIND_BG
: 0);
2031 bits
|= (requested
.thrp_latency_qos
? (((uint64_t)requested
.thrp_latency_qos
) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT
) : 0);
2032 bits
|= (requested
.thrp_through_qos
? (((uint64_t)requested
.thrp_through_qos
) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT
) : 0);
2034 info
->requested
= (integer_t
) bits
;
2037 struct thread_effective_policy effective
= thread
->effective_policy
;
2039 bits
|= (effective
.thep_darwinbg
? POLICY_EFF_DARWIN_BG
: 0);
2041 bits
|= (effective
.thep_io_tier
? (((uint64_t)effective
.thep_io_tier
) << POLICY_EFF_IO_TIER_SHIFT
) : 0);
2042 bits
|= (effective
.thep_io_passive
? POLICY_EFF_IO_PASSIVE
: 0);
2043 bits
|= (effective
.thep_all_sockets_bg
? POLICY_EFF_ALL_SOCKETS_BG
: 0);
2044 bits
|= (effective
.thep_new_sockets_bg
? POLICY_EFF_NEW_SOCKETS_BG
: 0);
2046 bits
|= (effective
.thep_qos
? (((uint64_t)effective
.thep_qos
) << POLICY_EFF_TH_QOS_SHIFT
) : 0);
2048 bits
|= (effective
.thep_latency_qos
? (((uint64_t)effective
.thep_latency_qos
) << POLICY_EFF_LATENCY_QOS_SHIFT
) : 0);
2049 bits
|= (effective
.thep_through_qos
? (((uint64_t)effective
.thep_through_qos
) << POLICY_EFF_THROUGH_QOS_SHIFT
) : 0);
2051 info
->effective
= (integer_t
)bits
;
2058 * Sneakily trace either the task and thread requested
2059 * or just the thread requested, depending on if we have enough room.
2060 * We do have room on LP64. On LP32, we have to split it between two uintptr_t's.
2063 * threquested_0(thread) thread[0] task[0]
2064 * threquested_1(thread) thread[1] thread[0]
2069 threquested_0(thread_t thread
)
2071 static_assert(sizeof(struct thread_requested_policy
) == sizeof(uint64_t), "size invariant violated");
2073 uintptr_t* raw
= (uintptr_t*)(void*)&thread
->requested_policy
;
2079 threquested_1(thread_t thread
)
2081 #if defined __LP64__
2082 return *(uintptr_t*)&thread
->task
->requested_policy
;
2084 uintptr_t* raw
= (uintptr_t*)(void*)&thread
->requested_policy
;
2090 theffective_0(thread_t thread
)
2092 static_assert(sizeof(struct thread_effective_policy
) == sizeof(uint64_t), "size invariant violated");
2094 uintptr_t* raw
= (uintptr_t*)(void*)&thread
->effective_policy
;
2099 theffective_1(thread_t thread
)
2101 #if defined __LP64__
2102 return *(uintptr_t*)&thread
->task
->effective_policy
;
2104 uintptr_t* raw
= (uintptr_t*)(void*)&thread
->effective_policy
;
2111 * Set an override on the thread which is consulted with a
2112 * higher priority than the task/thread policy. This should
2113 * only be set for temporary grants until the thread
2114 * returns to the userspace boundary
2116 * We use atomic operations to swap in the override, with
2117 * the assumption that the thread itself can
2118 * read the override and clear it on return to userspace.
2120 * No locking is performed, since it is acceptable to see
2121 * a stale override for one loop through throttle_lowpri_io().
2122 * However a thread reference must be held on the thread.
2125 void set_thread_iotier_override(thread_t thread
, int policy
)
2127 int current_override
;
2129 /* Let most aggressive I/O policy win until user boundary */
2131 current_override
= thread
->iotier_override
;
2133 if (current_override
!= THROTTLE_LEVEL_NONE
)
2134 policy
= MIN(current_override
, policy
);
2136 if (current_override
== policy
) {
2137 /* no effective change */
2140 } while (!OSCompareAndSwap(current_override
, policy
, &thread
->iotier_override
));
2143 * Since the thread may be currently throttled,
2144 * re-evaluate tiers and potentially break out
2147 rethrottle_thread(thread
->uthread
);
2151 * Userspace synchronization routines (like pthread mutexes, pthread reader-writer locks,
2152 * semaphores, dispatch_sync) may result in priority inversions where a higher priority
2153 * (i.e. scheduler priority, I/O tier, QoS tier) is waiting on a resource owned by a lower
2154 * priority thread. In these cases, we attempt to propagate the priority token, as long
2155 * as the subsystem informs us of the relationships between the threads. The userspace
2156 * synchronization subsystem should maintain the information of owner->resource and
2157 * resource->waiters itself.
2161 * This helper canonicalizes the resource/resource_type given the current qos_override_mode
2162 * in effect. Note that wildcards (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD) may need
2163 * to be handled specially in the future, but for now it's fine to slam
2164 * *resource to USER_ADDR_NULL even if it was previously a wildcard.
2166 static void canonicalize_resource_and_type(user_addr_t
*resource
, int *resource_type
) {
2167 if (qos_override_mode
== QOS_OVERRIDE_MODE_OVERHANG_PEAK
|| qos_override_mode
== QOS_OVERRIDE_MODE_IGNORE_OVERRIDE
) {
2168 /* Map all input resource/type to a single one */
2169 *resource
= USER_ADDR_NULL
;
2170 *resource_type
= THREAD_QOS_OVERRIDE_TYPE_UNKNOWN
;
2171 } else if (qos_override_mode
== QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE
) {
2173 } else if (qos_override_mode
== QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH
) {
2174 /* Map all dispatch overrides to a single one, to avoid memory overhead */
2175 if (*resource_type
== THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE
) {
2176 *resource
= USER_ADDR_NULL
;
2178 } else if (qos_override_mode
== QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE
) {
2179 /* Map all mutex overrides to a single one, to avoid memory overhead */
2180 if (*resource_type
== THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX
) {
2181 *resource
= USER_ADDR_NULL
;
2186 /* This helper routine finds an existing override if known. Locking should be done by caller */
2187 static struct thread_qos_override
*
2188 find_qos_override(thread_t thread
,
2189 user_addr_t resource
,
2192 struct thread_qos_override
*override
;
2194 override
= thread
->overrides
;
2196 if (override
->override_resource
== resource
&&
2197 override
->override_resource_type
== resource_type
) {
2201 override
= override
->override_next
;
2208 find_and_decrement_qos_override(thread_t thread
,
2209 user_addr_t resource
,
2212 struct thread_qos_override
**free_override_list
)
2214 struct thread_qos_override
*override
, *override_prev
;
2216 override_prev
= NULL
;
2217 override
= thread
->overrides
;
2219 struct thread_qos_override
*override_next
= override
->override_next
;
2221 if ((THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD
== resource
|| override
->override_resource
== resource
) &&
2222 (THREAD_QOS_OVERRIDE_TYPE_WILDCARD
== resource_type
|| override
->override_resource_type
== resource_type
)) {
2225 override
->override_contended_resource_count
= 0;
2227 override
->override_contended_resource_count
--;
2230 if (override
->override_contended_resource_count
== 0) {
2231 if (override_prev
== NULL
) {
2232 thread
->overrides
= override_next
;
2234 override_prev
->override_next
= override_next
;
2237 /* Add to out-param for later zfree */
2238 override
->override_next
= *free_override_list
;
2239 *free_override_list
= override
;
2241 override_prev
= override
;
2244 if (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD
!= resource
) {
2248 override_prev
= override
;
2251 override
= override_next
;
2255 /* This helper recalculates the current requested override using the policy selected at boot */
2257 calculate_requested_qos_override(thread_t thread
)
2259 if (qos_override_mode
== QOS_OVERRIDE_MODE_IGNORE_OVERRIDE
) {
2260 return THREAD_QOS_UNSPECIFIED
;
2263 /* iterate over all overrides and calculate MAX */
2264 struct thread_qos_override
*override
;
2265 int qos_override
= THREAD_QOS_UNSPECIFIED
;
2267 override
= thread
->overrides
;
2269 if (qos_override_mode
!= QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH
||
2270 override
->override_resource_type
!= THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE
) {
2271 qos_override
= MAX(qos_override
, override
->override_qos
);
2274 override
= override
->override_next
;
2277 return qos_override
;
2283 * - EINVAL if some invalid input was passed
2284 * - EFAULT if user_lock_addr != NULL and needs to be faulted (userland has to
2286 * - ESTALE if user_lock_addr != NULL &&
2287 * ulock_owner_value_to_port_name(*user_lock_addr) != user_lock_owner
2290 proc_thread_qos_add_override_internal(thread_t thread
,
2292 boolean_t first_override_for_resource
,
2293 user_addr_t resource
,
2295 user_addr_t user_lock_addr
,
2296 mach_port_name_t user_lock_owner
)
2298 struct task_pend_token pend_token
= {};
2301 thread_mtx_lock(thread
);
2303 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_START
,
2304 thread_tid(thread
), override_qos
, first_override_for_resource
? 1 : 0, 0, 0);
2306 DTRACE_BOOST5(qos_add_override_pre
, uint64_t, thread_tid(thread
),
2307 uint64_t, thread
->requested_policy
.thrp_qos
,
2308 uint64_t, thread
->effective_policy
.thep_qos
,
2309 int, override_qos
, boolean_t
, first_override_for_resource
);
2311 struct thread_qos_override
*override
;
2312 struct thread_qos_override
*override_new
= NULL
;
2313 int new_qos_override
, prev_qos_override
;
2314 int new_effective_qos
;
2316 canonicalize_resource_and_type(&resource
, &resource_type
);
2318 override
= find_qos_override(thread
, resource
, resource_type
);
2319 if (first_override_for_resource
&& !override
) {
2320 /* We need to allocate a new object. Drop the thread lock and
2321 * recheck afterwards in case someone else added the override
2323 thread_mtx_unlock(thread
);
2324 override_new
= zalloc(thread_qos_override_zone
);
2325 thread_mtx_lock(thread
);
2326 override
= find_qos_override(thread
, resource
, resource_type
);
2328 if (user_lock_addr
) {
2330 /* Workaround lack of explicit support for 'no-fault copyin'
2331 * <rdar://problem/24999882>, as disabling preemption prevents paging in
2333 disable_preemption();
2334 rc
= copyin_word(user_lock_addr
, &val
, sizeof(user_lock_owner
));
2335 enable_preemption();
2336 if (rc
== 0 && ulock_owner_value_to_port_name((uint32_t)val
) != user_lock_owner
) {
2340 prev_qos_override
= proc_get_thread_policy_locked(thread
,
2341 TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_OVERRIDE
, NULL
);
2342 new_qos_override
= prev_qos_override
;
2343 new_effective_qos
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
);
2344 thread_mtx_unlock(thread
);
2348 if (first_override_for_resource
&& override
) {
2349 /* Someone else already allocated while the thread lock was dropped */
2350 override
->override_contended_resource_count
++;
2351 } else if (!override
&& override_new
) {
2352 override
= override_new
;
2353 override_new
= NULL
;
2354 override
->override_next
= thread
->overrides
;
2355 /* since first_override_for_resource was TRUE */
2356 override
->override_contended_resource_count
= 1;
2357 override
->override_resource
= resource
;
2358 override
->override_resource_type
= resource_type
;
2359 override
->override_qos
= THREAD_QOS_UNSPECIFIED
;
2360 thread
->overrides
= override
;
2364 if (override
->override_qos
== THREAD_QOS_UNSPECIFIED
)
2365 override
->override_qos
= override_qos
;
2367 override
->override_qos
= MAX(override
->override_qos
, override_qos
);
2370 /* Determine how to combine the various overrides into a single current
2371 * requested override
2373 new_qos_override
= calculate_requested_qos_override(thread
);
2375 prev_qos_override
= proc_get_thread_policy_locked(thread
,
2376 TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_OVERRIDE
, NULL
);
2378 if (new_qos_override
!= prev_qos_override
) {
2379 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
2380 TASK_POLICY_QOS_OVERRIDE
,
2381 new_qos_override
, 0, &pend_token
);
2384 new_effective_qos
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
);
2386 thread_mtx_unlock(thread
);
2388 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2392 zfree(thread_qos_override_zone
, override_new
);
2395 DTRACE_BOOST4(qos_add_override_post
, int, prev_qos_override
,
2396 int, new_qos_override
, int, new_effective_qos
, int, rc
);
2398 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_END
,
2399 new_qos_override
, resource
, resource_type
, 0, 0);
2405 proc_thread_qos_add_override_check_owner(thread_t thread
,
2407 boolean_t first_override_for_resource
,
2408 user_addr_t resource
,
2410 user_addr_t user_lock_addr
,
2411 mach_port_name_t user_lock_owner
)
2413 return proc_thread_qos_add_override_internal(thread
, override_qos
,
2414 first_override_for_resource
, resource
, resource_type
,
2415 user_lock_addr
, user_lock_owner
);
2419 proc_thread_qos_add_override(task_t task
,
2423 boolean_t first_override_for_resource
,
2424 user_addr_t resource
,
2427 boolean_t has_thread_reference
= FALSE
;
2430 if (thread
== THREAD_NULL
) {
2431 thread
= task_findtid(task
, tid
);
2432 /* returns referenced thread */
2434 if (thread
== THREAD_NULL
) {
2435 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_NONE
,
2436 tid
, 0, 0xdead, 0, 0);
2439 has_thread_reference
= TRUE
;
2441 assert(thread
->task
== task
);
2443 rc
= proc_thread_qos_add_override_internal(thread
, override_qos
,
2444 first_override_for_resource
, resource
, resource_type
, 0, 0);
2445 if (has_thread_reference
) {
2446 thread_deallocate(thread
);
2453 proc_thread_qos_remove_override_internal(thread_t thread
,
2454 user_addr_t resource
,
2459 struct task_pend_token pend_token
= {};
2461 struct thread_qos_override
*deferred_free_override_list
= NULL
;
2462 int new_qos_override
, prev_qos_override
, new_effective_qos
, prev_qos
;
2463 int new_qos
= THREAD_QOS_UNSPECIFIED
;
2465 thread_mtx_lock(thread
);
2467 canonicalize_resource_and_type(&resource
, &resource_type
);
2469 find_and_decrement_qos_override(thread
, resource
, resource_type
, reset
, &deferred_free_override_list
);
2471 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_START
,
2472 thread_tid(thread
), resource
, reset
, 0, 0);
2474 DTRACE_BOOST3(qos_remove_override_pre
, uint64_t, thread_tid(thread
),
2475 uint64_t, thread
->requested_policy
.thrp_qos
,
2476 uint64_t, thread
->effective_policy
.thep_qos
);
2478 /* Determine how to combine the various overrides into a single current requested override */
2479 new_qos_override
= calculate_requested_qos_override(thread
);
2481 spl_t s
= splsched();
2482 thread_lock(thread
);
2485 * The override chain and therefore the value of the current override is locked with thread mutex,
2486 * so we can do a get/set without races. However, the rest of thread policy is locked under the spinlock.
2487 * This means you can't change the current override from a spinlock-only setter.
2489 prev_qos_override
= thread_get_requested_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_OVERRIDE
, NULL
);
2493 * Remove the specified overrides, and set the current override as the new base QoS.
2494 * Return the new QoS value.
2496 prev_qos
= thread_get_requested_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS
, NULL
);
2498 new_qos
= MAX(prev_qos
, prev_qos_override
);
2499 if (new_qos
!= prev_qos
)
2500 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS
, new_qos
, 0, &pend_token
);
2503 if (new_qos_override
!= prev_qos_override
)
2504 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_OVERRIDE
, new_qos_override
, 0, &pend_token
);
2506 new_effective_qos
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
);
2508 thread_unlock(thread
);
2511 thread_mtx_unlock(thread
);
2513 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2515 while (deferred_free_override_list
) {
2516 struct thread_qos_override
*override_next
= deferred_free_override_list
->override_next
;
2518 zfree(thread_qos_override_zone
, deferred_free_override_list
);
2519 deferred_free_override_list
= override_next
;
2522 DTRACE_BOOST3(qos_remove_override_post
, int, prev_qos_override
,
2523 int, new_qos_override
, int, new_effective_qos
);
2525 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_END
,
2526 thread_tid(thread
), squash
, 0, 0, 0);
2532 proc_thread_qos_remove_override(task_t task
,
2535 user_addr_t resource
,
2538 boolean_t has_thread_reference
= FALSE
;
2540 if (thread
== THREAD_NULL
) {
2541 thread
= task_findtid(task
, tid
);
2542 /* returns referenced thread */
2544 if (thread
== THREAD_NULL
) {
2545 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_NONE
,
2546 tid
, 0, 0xdead, 0, 0);
2549 has_thread_reference
= TRUE
;
2551 assert(task
== thread
->task
);
2554 proc_thread_qos_remove_override_internal(thread
, resource
, resource_type
, FALSE
, FALSE
);
2556 if (has_thread_reference
)
2557 thread_deallocate(thread
);
2563 proc_thread_qos_reset_override(task_t task
,
2566 user_addr_t resource
,
2570 boolean_t has_thread_reference
= FALSE
;
2572 if (thread
== THREAD_NULL
) {
2573 thread
= task_findtid(task
, tid
);
2574 /* returns referenced thread */
2576 if (thread
== THREAD_NULL
) {
2577 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_NONE
,
2578 tid
, 0, 0xdead, 0, 0);
2581 has_thread_reference
= TRUE
;
2583 assert(task
== thread
->task
);
2586 proc_thread_qos_remove_override_internal(thread
, resource
, resource_type
, TRUE
, FALSE
);
2588 if (has_thread_reference
)
2589 thread_deallocate(thread
);
2595 * Clears the requested overrides, and replaces the current QoS with the max
2596 * of the current QoS and the current override, then returns the new QoS.
2598 * This is useful in order to reset overrides before parking a workqueue thread,
2599 * but avoid dropping priority and getting preempted right before parking.
2601 * Called without any locks held.
2604 proc_thread_qos_squash_override(thread_t thread
, user_addr_t resource
, int resource_type
)
2606 return proc_thread_qos_remove_override_internal(thread
, resource
, resource_type
, TRUE
, TRUE
);
2609 /* Deallocate before thread termination */
2610 void proc_thread_qos_deallocate(thread_t thread
)
2613 * There are no more references to this thread,
2614 * therefore this thread must not own any more locks,
2615 * therefore there must not be any more user promotions.
2617 assert(thread
->user_promotions
== 0);
2618 assert(thread
->requested_policy
.thrp_qos_promote
== THREAD_QOS_UNSPECIFIED
);
2619 assert(thread
->user_promotion_basepri
== 0);
2621 /* This thread must have no more IPC overrides. */
2622 assert(thread
->ipc_overrides
== 0);
2623 assert(thread
->requested_policy
.thrp_qos_ipc_override
== THREAD_QOS_UNSPECIFIED
);
2626 * Clear out any lingering override objects.
2628 struct thread_qos_override
*override
;
2630 thread_mtx_lock(thread
);
2631 override
= thread
->overrides
;
2632 thread
->overrides
= NULL
;
2633 thread
->requested_policy
.thrp_qos_override
= THREAD_QOS_UNSPECIFIED
;
2634 /* We don't need to re-evaluate thread policy here because the thread has already exited */
2635 thread_mtx_unlock(thread
);
2638 struct thread_qos_override
*override_next
= override
->override_next
;
2640 zfree(thread_qos_override_zone
, override
);
2641 override
= override_next
;
2646 * Set up the primordial thread's QoS
2649 task_set_main_thread_qos(task_t task
, thread_t thread
) {
2650 struct task_pend_token pend_token
= {};
2652 assert(thread
->task
== task
);
2654 thread_mtx_lock(thread
);
2656 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2657 (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS
, 0)) | DBG_FUNC_START
,
2658 thread_tid(thread
), threquested_0(thread
), threquested_1(thread
),
2659 thread
->requested_policy
.thrp_qos
, 0);
2661 int primordial_qos
= task_compute_main_thread_qos(task
);
2663 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS
,
2664 primordial_qos
, 0, &pend_token
);
2666 thread_mtx_unlock(thread
);
2668 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2670 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2671 (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS
, 0)) | DBG_FUNC_END
,
2672 thread_tid(thread
), threquested_0(thread
), threquested_1(thread
),
2677 * KPI for pthread kext
2679 * Return a good guess at what the initial manager QoS will be
2680 * Dispatch can override this in userspace if it so chooses
2683 task_get_default_manager_qos(task_t task
)
2685 int primordial_qos
= task_compute_main_thread_qos(task
);
2687 if (primordial_qos
== THREAD_QOS_LEGACY
)
2688 primordial_qos
= THREAD_QOS_USER_INITIATED
;
2690 return primordial_qos
;
2695 * Promote thread with the user level properties of 'promoter'
2696 * Mutexes may be held, but it's OK to take the throttle lock
2698 * if 'new_promotion' is TRUE, this is a new promotion.
2699 * if FALSE, we are updating an existing promotion.
2702 thread_user_promotion_promote(thread_t thread
,
2704 struct promote_token
* promote_token
,
2705 boolean_t new_promotion
)
2707 struct task_pend_token pend_token
= {};
2709 uint32_t promoter_base_pri
= 0, promoter_qos
= THREAD_QOS_UNSPECIFIED
;
2711 spl_t s
= splsched();
2712 thread_lock(promoter
);
2715 * We capture the 'promotion qos' here, which is captured
2716 * before task-level clamping.
2718 * This means that if the process gets unclamped while a promotion,
2719 * is in effect, the owning thread ends up with the correct QoS.
2721 * This does NOT work correctly across processes, as the correct QoS
2722 * in one is not necessarily the correct QoS in another.
2723 * When we add support for multi-process ulock boosting, we need to
2724 * do something more complex.
2726 promoter_qos
= promoter
->effective_policy
.thep_qos_promote
;
2728 /* TODO: extract 'effective unclamped base pri' instead */
2729 promoter_base_pri
= promoter
->base_pri
;
2731 thread_unlock(promoter
);
2734 /* clamp out realtime to max user pri */
2735 promoter_base_pri
= MIN(promoter_base_pri
, MAXPRI_USER
);
2737 /* add in the saved promotion token */
2738 assert(promote_token
->pt_basepri
<= MAXPRI_USER
);
2740 promoter_base_pri
= MAX(promoter_base_pri
, promote_token
->pt_basepri
);
2741 promoter_qos
= MAX(promoter_qos
, promote_token
->pt_qos
);
2743 /* save the max for later */
2744 promote_token
->pt_basepri
= promoter_base_pri
;
2745 promote_token
->pt_qos
= promoter_qos
;
2748 thread_lock(thread
);
2750 if (new_promotion
) {
2751 if (thread
->user_promotions
== 0) {
2752 assert(thread
->requested_policy
.thrp_qos_promote
== THREAD_QOS_UNSPECIFIED
);
2753 assert(thread
->user_promotion_basepri
== 0);
2756 thread
->user_promotions
++;
2758 assert(thread
->user_promotions
> 0);
2761 uint32_t thread_qos
= thread
->requested_policy
.thrp_qos_promote
;
2762 uint32_t thread_basepri
= thread
->user_promotion_basepri
;
2764 uint32_t new_qos
= MAX(thread_qos
, promoter_qos
);
2765 uint32_t new_basepri
= MAX(thread_basepri
, promoter_base_pri
);
2767 /* TODO: Fast path the 'new is lower than effective' case to avoid full reevaluation */
2768 if (thread_qos
!= new_qos
|| thread_basepri
!= new_basepri
) {
2770 thread
->user_promotion_basepri
= new_basepri
;
2772 pend_token
.tpt_force_recompute_pri
= 1;
2774 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
2775 TASK_POLICY_QOS_PROMOTE
, new_qos
,
2779 thread_unlock(thread
);
2782 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2785 /* Add a user promotion to thread */
2787 thread_user_promotion_add(thread_t thread
,
2789 struct promote_token
* promote_token
)
2791 thread_user_promotion_promote(thread
, promoter
, promote_token
, TRUE
);
2794 /* Update an existing user promotion on thread */
2796 thread_user_promotion_update(thread_t thread
,
2798 struct promote_token
* promote_token
)
2800 thread_user_promotion_promote(thread
, promoter
, promote_token
, FALSE
);
2804 * Drop a user promotion on thread
2805 * Mutexes may be held, but it's OK to take the throttle lock
2808 thread_user_promotion_drop(thread_t thread
)
2810 struct task_pend_token pend_token
= {};
2812 spl_t s
= splsched();
2813 thread_lock(thread
);
2815 assert(thread
->user_promotions
> 0);
2817 if (--thread
->user_promotions
== 0) {
2818 thread
->requested_policy
.thrp_qos_promote
= THREAD_QOS_UNSPECIFIED
;
2819 thread
->user_promotion_basepri
= 0;
2821 pend_token
.tpt_force_recompute_pri
= 1;
2823 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
2824 TASK_POLICY_QOS_PROMOTE
, THREAD_QOS_UNSPECIFIED
,
2828 thread_unlock(thread
);
2831 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2836 * Set the thread's QoS IPC override
2837 * Owned by the IPC subsystem
2839 * May be called with spinlocks held, but not spinlocks
2840 * that may deadlock against the thread lock, the throttle lock, or the SFI lock.
2842 * One 'add' must be balanced by one 'drop'.
2843 * Between 'add' and 'drop', the overide QoS value may be updated with an 'update'.
2844 * Before the thread is deallocated, there must be 0 remaining overrides.
2847 thread_ipc_override(thread_t thread
,
2848 uint32_t qos_override
,
2849 boolean_t is_new_override
)
2851 struct task_pend_token pend_token
= {};
2853 spl_t s
= splsched();
2854 thread_lock(thread
);
2856 uint32_t old_override
= thread
->requested_policy
.thrp_qos_ipc_override
;
2858 if (is_new_override
) {
2859 if (thread
->ipc_overrides
++ == 0) {
2860 /* This add is the first override for this thread */
2861 assert(old_override
== THREAD_QOS_UNSPECIFIED
);
2863 /* There are already other overrides in effect for this thread */
2864 assert(old_override
> THREAD_QOS_UNSPECIFIED
);
2867 /* There must be at least one override (the previous add call) in effect */
2868 assert(thread
->ipc_overrides
> 0);
2869 assert(old_override
> THREAD_QOS_UNSPECIFIED
);
2872 uint32_t new_override
= MAX(old_override
, qos_override
);
2874 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
2875 TASK_POLICY_QOS_IPC_OVERRIDE
,
2876 new_override
, 0, &pend_token
);
2878 assert(pend_token
.tpt_update_sockets
== 0);
2880 thread_unlock(thread
);
2884 * this is only safe after rethrottle_thread supports
2885 * being called from spinlock context
2887 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2891 thread_add_ipc_override(thread_t thread
,
2892 uint32_t qos_override
)
2894 thread_ipc_override(thread
, qos_override
, TRUE
);
2898 thread_update_ipc_override(thread_t thread
,
2899 uint32_t qos_override
)
2901 thread_ipc_override(thread
, qos_override
, FALSE
);
2905 thread_drop_ipc_override(thread_t thread
)
2907 struct task_pend_token pend_token
= {};
2909 spl_t s
= splsched();
2910 thread_lock(thread
);
2912 assert(thread
->ipc_overrides
> 0);
2914 if (--thread
->ipc_overrides
== 0) {
2916 * There are no more overrides for this thread, so we should
2917 * clear out the saturated override value
2920 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
2921 TASK_POLICY_QOS_IPC_OVERRIDE
, THREAD_QOS_UNSPECIFIED
,
2925 thread_unlock(thread
);
2929 * this is only safe after rethrottle_thread supports
2930 * being called from spinlock context
2932 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2935 /* Get current IPC override, may be called from spinlock context */
2937 thread_get_ipc_override(thread_t thread
)
2939 return proc_get_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_IPC_OVERRIDE
, NULL
);