2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act_server.h>
32 #include <kern/kern_types.h>
33 #include <kern/processor.h>
34 #include <kern/thread.h>
35 #include <kern/affinity.h>
36 #include <mach/task_policy.h>
38 #include <kern/policy_internal.h>
39 #include <sys/errno.h>
40 #include <sys/ulock.h>
42 #include <mach/machine/sdt.h>
45 extern int proc_selfpid(void);
46 extern char * proc_name_address(void *p
);
47 extern void rethrottle_thread(void * uthread
);
50 #define QOS_EXTRACT(q) ((q) & 0xff)
52 uint32_t qos_override_mode
;
53 #define QOS_OVERRIDE_MODE_OVERHANG_PEAK 0
54 #define QOS_OVERRIDE_MODE_IGNORE_OVERRIDE 1
55 #define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE 2
56 #define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE 3
58 extern zone_t thread_qos_override_zone
;
61 proc_thread_qos_remove_override_internal(thread_t thread
, user_addr_t resource
, int resource_type
, boolean_t reset
);
64 * THREAD_QOS_UNSPECIFIED is assigned the highest tier available, so it does not provide a limit
65 * to threads that don't have a QoS class set.
67 const qos_policy_params_t thread_qos_policy_params
= {
69 * This table defines the starting base priority of the thread,
70 * which will be modified by the thread importance and the task max priority
71 * before being applied.
73 .qos_pri
[THREAD_QOS_UNSPECIFIED
] = 0, /* not consulted */
74 .qos_pri
[THREAD_QOS_USER_INTERACTIVE
] = BASEPRI_BACKGROUND
, /* i.e. 46 */
75 .qos_pri
[THREAD_QOS_USER_INITIATED
] = BASEPRI_USER_INITIATED
,
76 .qos_pri
[THREAD_QOS_LEGACY
] = BASEPRI_DEFAULT
,
77 .qos_pri
[THREAD_QOS_UTILITY
] = BASEPRI_UTILITY
,
78 .qos_pri
[THREAD_QOS_BACKGROUND
] = MAXPRI_THROTTLE
,
79 .qos_pri
[THREAD_QOS_MAINTENANCE
] = MAXPRI_THROTTLE
,
82 * This table defines the highest IO priority that a thread marked with this
85 .qos_iotier
[THREAD_QOS_UNSPECIFIED
] = THROTTLE_LEVEL_TIER0
,
86 .qos_iotier
[THREAD_QOS_USER_INTERACTIVE
] = THROTTLE_LEVEL_TIER0
,
87 .qos_iotier
[THREAD_QOS_USER_INITIATED
] = THROTTLE_LEVEL_TIER0
,
88 .qos_iotier
[THREAD_QOS_LEGACY
] = THROTTLE_LEVEL_TIER0
,
89 .qos_iotier
[THREAD_QOS_UTILITY
] = THROTTLE_LEVEL_TIER1
,
90 .qos_iotier
[THREAD_QOS_BACKGROUND
] = THROTTLE_LEVEL_TIER2
, /* possibly overridden by bg_iotier */
91 .qos_iotier
[THREAD_QOS_MAINTENANCE
] = THROTTLE_LEVEL_TIER3
,
94 * This table defines the highest QoS level that
95 * a thread marked with this QoS class can have.
98 .qos_through_qos
[THREAD_QOS_UNSPECIFIED
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_UNSPECIFIED
),
99 .qos_through_qos
[THREAD_QOS_USER_INTERACTIVE
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_0
),
100 .qos_through_qos
[THREAD_QOS_USER_INITIATED
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_1
),
101 .qos_through_qos
[THREAD_QOS_LEGACY
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_1
),
102 .qos_through_qos
[THREAD_QOS_UTILITY
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_2
),
103 .qos_through_qos
[THREAD_QOS_BACKGROUND
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_5
),
104 .qos_through_qos
[THREAD_QOS_MAINTENANCE
] = QOS_EXTRACT(THROUGHPUT_QOS_TIER_5
),
106 .qos_latency_qos
[THREAD_QOS_UNSPECIFIED
] = QOS_EXTRACT(LATENCY_QOS_TIER_UNSPECIFIED
),
107 .qos_latency_qos
[THREAD_QOS_USER_INTERACTIVE
] = QOS_EXTRACT(LATENCY_QOS_TIER_0
),
108 .qos_latency_qos
[THREAD_QOS_USER_INITIATED
] = QOS_EXTRACT(LATENCY_QOS_TIER_1
),
109 .qos_latency_qos
[THREAD_QOS_LEGACY
] = QOS_EXTRACT(LATENCY_QOS_TIER_1
),
110 .qos_latency_qos
[THREAD_QOS_UTILITY
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
111 .qos_latency_qos
[THREAD_QOS_BACKGROUND
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
112 .qos_latency_qos
[THREAD_QOS_MAINTENANCE
] = QOS_EXTRACT(LATENCY_QOS_TIER_3
),
116 thread_set_user_sched_mode_and_recompute_pri(thread_t thread
, sched_mode_t mode
);
119 thread_qos_scaled_relative_priority(int qos
, int qos_relprio
);
122 proc_get_thread_policy_bitfield(thread_t thread
, thread_policy_state_t info
);
125 proc_set_thread_policy_locked(thread_t thread
, int category
, int flavor
, int value
, int value2
, task_pend_token_t pend_token
);
128 proc_set_thread_policy_spinlocked(thread_t thread
, int category
, int flavor
, int value
, int value2
, task_pend_token_t pend_token
);
131 thread_set_requested_policy_spinlocked(thread_t thread
, int category
, int flavor
, int value
, int value2
);
134 thread_get_requested_policy_spinlocked(thread_t thread
, int category
, int flavor
, int* value2
);
137 proc_get_thread_policy_locked(thread_t thread
, int category
, int flavor
, int* value2
);
140 thread_policy_update_spinlocked(thread_t thread
, boolean_t recompute_priority
, task_pend_token_t pend_token
);
143 thread_policy_update_internal_spinlocked(thread_t thread
, boolean_t recompute_priority
, task_pend_token_t pend_token
);
146 thread_policy_init(void) {
147 if (PE_parse_boot_argn("qos_override_mode", &qos_override_mode
, sizeof(qos_override_mode
))) {
148 printf("QOS override mode: 0x%08x\n", qos_override_mode
);
150 qos_override_mode
= QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE
;
155 thread_has_qos_policy(thread_t thread
) {
156 return (proc_get_thread_policy(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS
) != THREAD_QOS_UNSPECIFIED
) ? TRUE
: FALSE
;
161 thread_remove_qos_policy_locked(thread_t thread
,
162 task_pend_token_t pend_token
)
165 __unused
int prev_qos
= thread
->requested_policy
.thrp_qos
;
167 DTRACE_PROC2(qos__remove
, thread_t
, thread
, int, prev_qos
);
169 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_AND_RELPRIO
,
170 THREAD_QOS_UNSPECIFIED
, 0, pend_token
);
174 thread_remove_qos_policy(thread_t thread
)
176 struct task_pend_token pend_token
= {};
178 thread_mtx_lock(thread
);
179 if (!thread
->active
) {
180 thread_mtx_unlock(thread
);
181 return KERN_TERMINATED
;
184 thread_remove_qos_policy_locked(thread
, &pend_token
);
186 thread_mtx_unlock(thread
);
188 thread_policy_update_complete_unlocked(thread
, &pend_token
);
195 thread_is_static_param(thread_t thread
)
197 if (thread
->static_param
) {
198 DTRACE_PROC1(qos__legacy__denied
, thread_t
, thread
);
205 * Relative priorities can range between 0REL and -15REL. These
206 * map to QoS-specific ranges, to create non-overlapping priority
210 thread_qos_scaled_relative_priority(int qos
, int qos_relprio
)
214 /* Fast path, since no validation or scaling is needed */
215 if (qos_relprio
== 0) return 0;
218 case THREAD_QOS_USER_INTERACTIVE
:
219 next_lower_qos
= THREAD_QOS_USER_INITIATED
;
221 case THREAD_QOS_USER_INITIATED
:
222 next_lower_qos
= THREAD_QOS_LEGACY
;
224 case THREAD_QOS_LEGACY
:
225 next_lower_qos
= THREAD_QOS_UTILITY
;
227 case THREAD_QOS_UTILITY
:
228 next_lower_qos
= THREAD_QOS_BACKGROUND
;
230 case THREAD_QOS_MAINTENANCE
:
231 case THREAD_QOS_BACKGROUND
:
235 panic("Unrecognized QoS %d", qos
);
239 int prio_range_max
= thread_qos_policy_params
.qos_pri
[qos
];
240 int prio_range_min
= next_lower_qos
? thread_qos_policy_params
.qos_pri
[next_lower_qos
] : 0;
243 * We now have the valid range that the scaled relative priority can map to. Note
244 * that the lower bound is exclusive, but the upper bound is inclusive. If the
245 * range is (21,31], 0REL should map to 31 and -15REL should map to 22. We use the
246 * fact that the max relative priority is -15 and use ">>4" to divide by 16 and discard
249 int scaled_relprio
= -(((prio_range_max
- prio_range_min
) * (-qos_relprio
)) >> 4);
251 return scaled_relprio
;
255 * flag set by -qos-policy-allow boot-arg to allow
256 * testing thread qos policy from userspace
258 boolean_t allow_qos_policy_set
= FALSE
;
263 thread_policy_flavor_t flavor
,
264 thread_policy_t policy_info
,
265 mach_msg_type_number_t count
)
267 thread_qos_policy_data_t req_qos
;
270 req_qos
.qos_tier
= THREAD_QOS_UNSPECIFIED
;
272 if (thread
== THREAD_NULL
)
273 return (KERN_INVALID_ARGUMENT
);
275 if (allow_qos_policy_set
== FALSE
) {
276 if (thread_is_static_param(thread
))
277 return (KERN_POLICY_STATIC
);
279 if (flavor
== THREAD_QOS_POLICY
)
280 return (KERN_INVALID_ARGUMENT
);
283 /* Threads without static_param set reset their QoS when other policies are applied. */
284 if (thread
->requested_policy
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) {
285 /* Store the existing tier, if we fail this call it is used to reset back. */
286 req_qos
.qos_tier
= thread
->requested_policy
.thrp_qos
;
287 req_qos
.tier_importance
= thread
->requested_policy
.thrp_qos_relprio
;
289 kr
= thread_remove_qos_policy(thread
);
290 if (kr
!= KERN_SUCCESS
) {
295 kr
= thread_policy_set_internal(thread
, flavor
, policy_info
, count
);
297 /* Return KERN_QOS_REMOVED instead of KERN_SUCCESS if we succeeded. */
298 if (req_qos
.qos_tier
!= THREAD_QOS_UNSPECIFIED
) {
299 if (kr
!= KERN_SUCCESS
) {
300 /* Reset back to our original tier as the set failed. */
301 (void)thread_policy_set_internal(thread
, THREAD_QOS_POLICY
, (thread_policy_t
)&req_qos
, THREAD_QOS_POLICY_COUNT
);
309 thread_policy_set_internal(
311 thread_policy_flavor_t flavor
,
312 thread_policy_t policy_info
,
313 mach_msg_type_number_t count
)
315 kern_return_t result
= KERN_SUCCESS
;
316 struct task_pend_token pend_token
= {};
318 thread_mtx_lock(thread
);
319 if (!thread
->active
) {
320 thread_mtx_unlock(thread
);
322 return (KERN_TERMINATED
);
327 case THREAD_EXTENDED_POLICY
:
329 boolean_t timeshare
= TRUE
;
331 if (count
>= THREAD_EXTENDED_POLICY_COUNT
) {
332 thread_extended_policy_t info
;
334 info
= (thread_extended_policy_t
)policy_info
;
335 timeshare
= info
->timeshare
;
338 sched_mode_t mode
= (timeshare
== TRUE
) ? TH_MODE_TIMESHARE
: TH_MODE_FIXED
;
340 spl_t s
= splsched();
343 thread_set_user_sched_mode_and_recompute_pri(thread
, mode
);
345 thread_unlock(thread
);
348 pend_token
.tpt_update_thread_sfi
= 1;
353 case THREAD_TIME_CONSTRAINT_POLICY
:
355 thread_time_constraint_policy_t info
;
357 if (count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
358 result
= KERN_INVALID_ARGUMENT
;
362 info
= (thread_time_constraint_policy_t
)policy_info
;
363 if (info
->constraint
< info
->computation
||
364 info
->computation
> max_rt_quantum
||
365 info
->computation
< min_rt_quantum
) {
366 result
= KERN_INVALID_ARGUMENT
;
370 spl_t s
= splsched();
373 thread
->realtime
.period
= info
->period
;
374 thread
->realtime
.computation
= info
->computation
;
375 thread
->realtime
.constraint
= info
->constraint
;
376 thread
->realtime
.preemptible
= info
->preemptible
;
378 thread_set_user_sched_mode_and_recompute_pri(thread
, TH_MODE_REALTIME
);
380 thread_unlock(thread
);
383 pend_token
.tpt_update_thread_sfi
= 1;
388 case THREAD_PRECEDENCE_POLICY
:
390 thread_precedence_policy_t info
;
392 if (count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
393 result
= KERN_INVALID_ARGUMENT
;
396 info
= (thread_precedence_policy_t
)policy_info
;
398 spl_t s
= splsched();
401 thread
->importance
= info
->importance
;
403 thread_recompute_priority(thread
);
405 thread_unlock(thread
);
411 case THREAD_AFFINITY_POLICY
:
413 thread_affinity_policy_t info
;
415 if (!thread_affinity_is_supported()) {
416 result
= KERN_NOT_SUPPORTED
;
419 if (count
< THREAD_AFFINITY_POLICY_COUNT
) {
420 result
= KERN_INVALID_ARGUMENT
;
424 info
= (thread_affinity_policy_t
) policy_info
;
426 * Unlock the thread mutex here and
427 * return directly after calling thread_affinity_set().
428 * This is necessary for correct lock ordering because
429 * thread_affinity_set() takes the task lock.
431 thread_mtx_unlock(thread
);
432 return thread_affinity_set(thread
, info
->affinity_tag
);
436 case THREAD_BACKGROUND_POLICY
:
438 thread_background_policy_t info
;
440 if (count
< THREAD_BACKGROUND_POLICY_COUNT
) {
441 result
= KERN_INVALID_ARGUMENT
;
445 if (thread
->task
!= current_task()) {
446 result
= KERN_PROTECTION_FAILURE
;
450 info
= (thread_background_policy_t
) policy_info
;
454 if (info
->priority
== THREAD_BACKGROUND_POLICY_DARWIN_BG
)
455 enable
= TASK_POLICY_ENABLE
;
457 enable
= TASK_POLICY_DISABLE
;
459 int category
= (current_thread() == thread
) ? TASK_POLICY_INTERNAL
: TASK_POLICY_EXTERNAL
;
461 proc_set_thread_policy_locked(thread
, category
, TASK_POLICY_DARWIN_BG
, enable
, 0, &pend_token
);
465 #endif /* CONFIG_EMBEDDED */
467 case THREAD_THROUGHPUT_QOS_POLICY
:
469 thread_throughput_qos_policy_t info
= (thread_throughput_qos_policy_t
) policy_info
;
470 thread_throughput_qos_t tqos
;
472 if (count
< THREAD_THROUGHPUT_QOS_POLICY_COUNT
) {
473 result
= KERN_INVALID_ARGUMENT
;
477 if ((result
= qos_throughput_policy_validate(info
->thread_throughput_qos_tier
)) != KERN_SUCCESS
)
480 tqos
= qos_extract(info
->thread_throughput_qos_tier
);
482 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
483 TASK_POLICY_THROUGH_QOS
, tqos
, 0, &pend_token
);
488 case THREAD_LATENCY_QOS_POLICY
:
490 thread_latency_qos_policy_t info
= (thread_latency_qos_policy_t
) policy_info
;
491 thread_latency_qos_t lqos
;
493 if (count
< THREAD_LATENCY_QOS_POLICY_COUNT
) {
494 result
= KERN_INVALID_ARGUMENT
;
498 if ((result
= qos_latency_policy_validate(info
->thread_latency_qos_tier
)) != KERN_SUCCESS
)
501 lqos
= qos_extract(info
->thread_latency_qos_tier
);
503 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
504 TASK_POLICY_LATENCY_QOS
, lqos
, 0, &pend_token
);
509 case THREAD_QOS_POLICY
:
511 thread_qos_policy_t info
= (thread_qos_policy_t
)policy_info
;
513 if (count
< THREAD_QOS_POLICY_COUNT
) {
514 result
= KERN_INVALID_ARGUMENT
;
518 if (info
->qos_tier
< 0 || info
->qos_tier
>= THREAD_QOS_LAST
) {
519 result
= KERN_INVALID_ARGUMENT
;
523 if (info
->tier_importance
> 0 || info
->tier_importance
< THREAD_QOS_MIN_TIER_IMPORTANCE
) {
524 result
= KERN_INVALID_ARGUMENT
;
528 if (info
->qos_tier
== THREAD_QOS_UNSPECIFIED
&& info
->tier_importance
!= 0) {
529 result
= KERN_INVALID_ARGUMENT
;
533 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_AND_RELPRIO
,
534 info
->qos_tier
, -info
->tier_importance
, &pend_token
);
540 result
= KERN_INVALID_ARGUMENT
;
544 thread_mtx_unlock(thread
);
546 thread_policy_update_complete_unlocked(thread
, &pend_token
);
552 * Note that there is no implemented difference between POLICY_RR and POLICY_FIFO.
553 * Both result in FIXED mode scheduling.
556 convert_policy_to_sched_mode(integer_t policy
) {
558 case POLICY_TIMESHARE
:
559 return TH_MODE_TIMESHARE
;
562 return TH_MODE_FIXED
;
564 panic("unexpected sched policy: %d", policy
);
570 * Called either with the thread mutex locked
571 * or from the pthread kext in a 'safe place'.
574 thread_set_mode_and_absolute_pri_internal(thread_t thread
,
577 task_pend_token_t pend_token
)
579 kern_return_t kr
= KERN_SUCCESS
;
581 spl_t s
= splsched();
584 /* This path isn't allowed to change a thread out of realtime. */
585 if ((thread
->sched_mode
== TH_MODE_REALTIME
) ||
586 (thread
->saved_mode
== TH_MODE_REALTIME
)) {
591 if (thread
->policy_reset
) {
596 sched_mode_t old_mode
= thread
->sched_mode
;
599 * Reverse engineer and apply the correct importance value
600 * from the requested absolute priority value.
602 * TODO: Store the absolute priority value instead
605 if (priority
>= thread
->max_priority
)
606 priority
= thread
->max_priority
- thread
->task_priority
;
607 else if (priority
>= MINPRI_KERNEL
)
608 priority
-= MINPRI_KERNEL
;
609 else if (priority
>= MINPRI_RESERVED
)
610 priority
-= MINPRI_RESERVED
;
612 priority
-= BASEPRI_DEFAULT
;
614 priority
+= thread
->task_priority
;
616 if (priority
> thread
->max_priority
)
617 priority
= thread
->max_priority
;
618 else if (priority
< MINPRI
)
621 thread
->importance
= priority
- thread
->task_priority
;
623 thread_set_user_sched_mode_and_recompute_pri(thread
, mode
);
625 if (mode
!= old_mode
)
626 pend_token
->tpt_update_thread_sfi
= 1;
629 thread_unlock(thread
);
636 thread_workq_pri_for_qos(thread_qos_t qos
)
638 assert(qos
< THREAD_QOS_LAST
);
639 return (uint8_t)thread_qos_policy_params
.qos_pri
[qos
];
643 thread_workq_qos_for_pri(int priority
)
646 if (priority
> thread_qos_policy_params
.qos_pri
[THREAD_QOS_USER_INTERACTIVE
]) {
647 // indicate that workq should map >UI threads to workq's
648 // internal notation for above-UI work.
649 return THREAD_QOS_UNSPECIFIED
;
651 for (qos
= THREAD_QOS_USER_INTERACTIVE
; qos
> THREAD_QOS_MAINTENANCE
; qos
--) {
652 // map a given priority up to the next nearest qos band.
653 if (thread_qos_policy_params
.qos_pri
[qos
- 1] < priority
) {
657 return THREAD_QOS_MAINTENANCE
;
661 * private interface for pthread workqueues
663 * Set scheduling policy & absolute priority for thread
664 * May be called with spinlocks held
665 * Thread mutex lock is not held
668 thread_reset_workq_qos(thread_t thread
, uint32_t qos
)
670 struct task_pend_token pend_token
= {};
672 assert(qos
< THREAD_QOS_LAST
);
674 spl_t s
= splsched();
677 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
678 TASK_POLICY_QOS_AND_RELPRIO
, qos
, 0, &pend_token
);
679 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
680 TASK_POLICY_QOS_WORKQ_OVERRIDE
, THREAD_QOS_UNSPECIFIED
, 0,
683 assert(pend_token
.tpt_update_sockets
== 0);
685 thread_unlock(thread
);
688 thread_policy_update_complete_unlocked(thread
, &pend_token
);
692 * private interface for pthread workqueues
694 * Set scheduling policy & absolute priority for thread
695 * May be called with spinlocks held
696 * Thread mutex lock is held
699 thread_set_workq_override(thread_t thread
, uint32_t qos
)
701 struct task_pend_token pend_token
= {};
703 assert(qos
< THREAD_QOS_LAST
);
705 spl_t s
= splsched();
708 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
709 TASK_POLICY_QOS_WORKQ_OVERRIDE
, qos
, 0, &pend_token
);
711 assert(pend_token
.tpt_update_sockets
== 0);
713 thread_unlock(thread
);
716 thread_policy_update_complete_unlocked(thread
, &pend_token
);
720 * private interface for pthread workqueues
722 * Set scheduling policy & absolute priority for thread
723 * May be called with spinlocks held
724 * Thread mutex lock is not held
727 thread_set_workq_pri(thread_t thread
,
732 struct task_pend_token pend_token
= {};
733 sched_mode_t mode
= convert_policy_to_sched_mode(policy
);
735 assert(qos
< THREAD_QOS_LAST
);
736 assert(thread
->static_param
);
738 if (!thread
->static_param
|| !thread
->active
)
741 spl_t s
= splsched();
744 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
745 TASK_POLICY_QOS_AND_RELPRIO
, qos
, 0, &pend_token
);
746 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
747 TASK_POLICY_QOS_WORKQ_OVERRIDE
, THREAD_QOS_UNSPECIFIED
,
750 thread_unlock(thread
);
753 /* Concern: this doesn't hold the mutex... */
755 __assert_only kern_return_t kr
;
756 kr
= thread_set_mode_and_absolute_pri_internal(thread
, mode
, priority
,
758 assert(kr
== KERN_SUCCESS
);
760 if (pend_token
.tpt_update_thread_sfi
)
761 sfi_reevaluate(thread
);
765 * thread_set_mode_and_absolute_pri:
767 * Set scheduling policy & absolute priority for thread, for deprecated
768 * thread_set_policy and thread_policy interfaces.
770 * Called with nothing locked.
773 thread_set_mode_and_absolute_pri(thread_t thread
,
777 kern_return_t kr
= KERN_SUCCESS
;
778 struct task_pend_token pend_token
= {};
780 sched_mode_t mode
= convert_policy_to_sched_mode(policy
);
782 thread_mtx_lock(thread
);
784 if (!thread
->active
) {
785 kr
= KERN_TERMINATED
;
789 if (thread_is_static_param(thread
)) {
790 kr
= KERN_POLICY_STATIC
;
794 /* Setting legacy policies on threads kills the current QoS */
795 if (thread
->requested_policy
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
)
796 thread_remove_qos_policy_locked(thread
, &pend_token
);
798 kr
= thread_set_mode_and_absolute_pri_internal(thread
, mode
, priority
, &pend_token
);
801 thread_mtx_unlock(thread
);
803 thread_policy_update_complete_unlocked(thread
, &pend_token
);
809 * Set the thread's requested mode and recompute priority
810 * Called with thread mutex and thread locked
812 * TODO: Mitigate potential problems caused by moving thread to end of runq
813 * whenever its priority is recomputed
814 * Only remove when it actually changes? Attempt to re-insert at appropriate location?
817 thread_set_user_sched_mode_and_recompute_pri(thread_t thread
, sched_mode_t mode
)
819 if (thread
->policy_reset
)
822 boolean_t removed
= thread_run_queue_remove(thread
);
825 * TODO: Instead of having saved mode, have 'user mode' and 'true mode'.
826 * That way there's zero confusion over which the user wants
827 * and which the kernel wants.
829 if (thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
)
830 thread
->saved_mode
= mode
;
832 sched_set_thread_mode(thread
, mode
);
834 thread_recompute_priority(thread
);
837 thread_run_queue_reinsert(thread
, SCHED_TAILQ
);
840 /* called at splsched with thread lock locked */
842 thread_update_qos_cpu_time_locked(thread_t thread
)
844 task_t task
= thread
->task
;
845 uint64_t timer_sum
, timer_delta
;
848 * This is only as accurate as the distance between
849 * last context switch (embedded) or last user/kernel boundary transition (desktop)
850 * because user_timer and system_timer are only updated then.
852 * TODO: Consider running a timer_update operation here to update it first.
853 * Maybe doable with interrupts disabled from current thread.
854 * If the thread is on a different core, may not be easy to get right.
856 * TODO: There should be a function for this in timer.c
859 timer_sum
= timer_grab(&thread
->user_timer
);
860 timer_sum
+= timer_grab(&thread
->system_timer
);
861 timer_delta
= timer_sum
- thread
->vtimer_qos_save
;
863 thread
->vtimer_qos_save
= timer_sum
;
865 uint64_t* task_counter
= NULL
;
867 /* Update the task-level effective and requested qos stats atomically, because we don't have the task lock. */
868 switch (thread
->effective_policy
.thep_qos
) {
869 case THREAD_QOS_UNSPECIFIED
: task_counter
= &task
->cpu_time_eqos_stats
.cpu_time_qos_default
; break;
870 case THREAD_QOS_MAINTENANCE
: task_counter
= &task
->cpu_time_eqos_stats
.cpu_time_qos_maintenance
; break;
871 case THREAD_QOS_BACKGROUND
: task_counter
= &task
->cpu_time_eqos_stats
.cpu_time_qos_background
; break;
872 case THREAD_QOS_UTILITY
: task_counter
= &task
->cpu_time_eqos_stats
.cpu_time_qos_utility
; break;
873 case THREAD_QOS_LEGACY
: task_counter
= &task
->cpu_time_eqos_stats
.cpu_time_qos_legacy
; break;
874 case THREAD_QOS_USER_INITIATED
: task_counter
= &task
->cpu_time_eqos_stats
.cpu_time_qos_user_initiated
; break;
875 case THREAD_QOS_USER_INTERACTIVE
: task_counter
= &task
->cpu_time_eqos_stats
.cpu_time_qos_user_interactive
; break;
877 panic("unknown effective QoS: %d", thread
->effective_policy
.thep_qos
);
880 OSAddAtomic64(timer_delta
, task_counter
);
882 /* Update the task-level qos stats atomically, because we don't have the task lock. */
883 switch (thread
->requested_policy
.thrp_qos
) {
884 case THREAD_QOS_UNSPECIFIED
: task_counter
= &task
->cpu_time_rqos_stats
.cpu_time_qos_default
; break;
885 case THREAD_QOS_MAINTENANCE
: task_counter
= &task
->cpu_time_rqos_stats
.cpu_time_qos_maintenance
; break;
886 case THREAD_QOS_BACKGROUND
: task_counter
= &task
->cpu_time_rqos_stats
.cpu_time_qos_background
; break;
887 case THREAD_QOS_UTILITY
: task_counter
= &task
->cpu_time_rqos_stats
.cpu_time_qos_utility
; break;
888 case THREAD_QOS_LEGACY
: task_counter
= &task
->cpu_time_rqos_stats
.cpu_time_qos_legacy
; break;
889 case THREAD_QOS_USER_INITIATED
: task_counter
= &task
->cpu_time_rqos_stats
.cpu_time_qos_user_initiated
; break;
890 case THREAD_QOS_USER_INTERACTIVE
: task_counter
= &task
->cpu_time_rqos_stats
.cpu_time_qos_user_interactive
; break;
892 panic("unknown requested QoS: %d", thread
->requested_policy
.thrp_qos
);
895 OSAddAtomic64(timer_delta
, task_counter
);
899 * called with no thread locks held
903 thread_update_qos_cpu_time(thread_t thread
)
905 thread_mtx_lock(thread
);
907 spl_t s
= splsched();
910 thread_update_qos_cpu_time_locked(thread
);
912 thread_unlock(thread
);
915 thread_mtx_unlock(thread
);
919 * Calculate base priority from thread attributes, and set it on the thread
921 * Called with thread_lock and thread mutex held.
924 thread_recompute_priority(
929 if (thread
->policy_reset
)
932 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
933 sched_set_thread_base_priority(thread
, BASEPRI_RTQUEUES
);
935 } else if (thread
->effective_policy
.thep_qos
!= THREAD_QOS_UNSPECIFIED
) {
936 int qos
= thread
->effective_policy
.thep_qos
;
937 int qos_ui_is_urgent
= thread
->effective_policy
.thep_qos_ui_is_urgent
;
938 int qos_relprio
= -(thread
->effective_policy
.thep_qos_relprio
); /* stored in task policy inverted */
939 int qos_scaled_relprio
;
941 assert(qos
>= 0 && qos
< THREAD_QOS_LAST
);
942 assert(qos_relprio
<= 0 && qos_relprio
>= THREAD_QOS_MIN_TIER_IMPORTANCE
);
944 priority
= thread_qos_policy_params
.qos_pri
[qos
];
945 qos_scaled_relprio
= thread_qos_scaled_relative_priority(qos
, qos_relprio
);
947 if (qos
== THREAD_QOS_USER_INTERACTIVE
&& qos_ui_is_urgent
== 1) {
948 /* Bump priority 46 to 47 when in a frontmost app */
949 qos_scaled_relprio
+= 1;
952 /* TODO: factor in renice priority here? */
954 priority
+= qos_scaled_relprio
;
956 if (thread
->importance
> MAXPRI
)
958 else if (thread
->importance
< -MAXPRI
)
961 priority
= thread
->importance
;
963 priority
+= thread
->task_priority
;
966 priority
= MAX(priority
, thread
->user_promotion_basepri
);
969 * Clamp priority back into the allowed range for this task.
970 * The initial priority value could be out of this range due to:
971 * Task clamped to BG or Utility (max-pri is 4, or 20)
972 * Task is user task (max-pri is 63)
973 * Task is kernel task (max-pri is 95)
974 * Note that thread->importance is user-settable to any integer
975 * via THREAD_PRECEDENCE_POLICY.
977 if (priority
> thread
->max_priority
)
978 priority
= thread
->max_priority
;
979 else if (priority
< MINPRI
)
982 if (thread
->saved_mode
== TH_MODE_REALTIME
&&
983 thread
->sched_flags
& TH_SFLAG_FAILSAFE
)
984 priority
= DEPRESSPRI
;
986 if (thread
->effective_policy
.thep_terminated
== TRUE
) {
988 * We temporarily want to override the expected priority to
989 * ensure that the thread exits in a timely manner.
990 * Note that this is allowed to exceed thread->max_priority
991 * so that the thread is no longer clamped to background
992 * during the final exit phase.
994 if (priority
< thread
->task_priority
)
995 priority
= thread
->task_priority
;
996 if (priority
< BASEPRI_DEFAULT
)
997 priority
= BASEPRI_DEFAULT
;
1001 /* No one can have a base priority less than MAXPRI_THROTTLE */
1002 if (priority
< MAXPRI_THROTTLE
)
1003 priority
= MAXPRI_THROTTLE
;
1004 #endif /* CONFIG_EMBEDDED */
1006 sched_set_thread_base_priority(thread
, priority
);
1009 /* Called with the task lock held, but not the thread mutex or spinlock */
1011 thread_policy_update_tasklocked(
1014 integer_t max_priority
,
1015 task_pend_token_t pend_token
)
1017 thread_mtx_lock(thread
);
1019 if (!thread
->active
|| thread
->policy_reset
) {
1020 thread_mtx_unlock(thread
);
1024 spl_t s
= splsched();
1025 thread_lock(thread
);
1028 integer_t old_max_priority
= thread
->max_priority
;
1030 thread
->task_priority
= priority
;
1031 thread
->max_priority
= max_priority
;
1035 * When backgrounding a thread, iOS has the semantic that
1036 * realtime and fixed priority threads should be demoted
1037 * to timeshare background threads.
1039 * On OSX, realtime and fixed priority threads don't lose their mode.
1041 * TODO: Do this inside the thread policy update routine in order to avoid double
1042 * remove/reinsert for a runnable thread
1044 if ((max_priority
<= MAXPRI_THROTTLE
) && (old_max_priority
> MAXPRI_THROTTLE
)) {
1045 sched_thread_mode_demote(thread
, TH_SFLAG_THROTTLED
);
1046 } else if ((max_priority
> MAXPRI_THROTTLE
) && (old_max_priority
<= MAXPRI_THROTTLE
)) {
1047 sched_thread_mode_undemote(thread
, TH_SFLAG_THROTTLED
);
1049 #endif /* CONFIG_EMBEDDED */
1051 thread_policy_update_spinlocked(thread
, TRUE
, pend_token
);
1053 thread_unlock(thread
);
1056 thread_mtx_unlock(thread
);
1060 * Reset thread to default state in preparation for termination
1061 * Called with thread mutex locked
1063 * Always called on current thread, so we don't need a run queue remove
1066 thread_policy_reset(
1071 assert(thread
== current_thread());
1074 thread_lock(thread
);
1076 if (thread
->sched_flags
& TH_SFLAG_FAILSAFE
)
1077 sched_thread_mode_undemote(thread
, TH_SFLAG_FAILSAFE
);
1079 if (thread
->sched_flags
& TH_SFLAG_THROTTLED
)
1080 sched_thread_mode_undemote(thread
, TH_SFLAG_THROTTLED
);
1082 /* At this point, the various demotions should be inactive */
1083 assert(!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
));
1084 assert(!(thread
->sched_flags
& TH_SFLAG_THROTTLED
));
1085 assert(!(thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
));
1087 /* Reset thread back to task-default basepri and mode */
1088 sched_mode_t newmode
= SCHED(initial_thread_sched_mode
)(thread
->task
);
1090 sched_set_thread_mode(thread
, newmode
);
1092 thread
->importance
= 0;
1094 /* Prevent further changes to thread base priority or mode */
1095 thread
->policy_reset
= 1;
1097 sched_set_thread_base_priority(thread
, thread
->task_priority
);
1099 thread_unlock(thread
);
1106 thread_policy_flavor_t flavor
,
1107 thread_policy_t policy_info
,
1108 mach_msg_type_number_t
*count
,
1109 boolean_t
*get_default
)
1111 kern_return_t result
= KERN_SUCCESS
;
1113 if (thread
== THREAD_NULL
)
1114 return (KERN_INVALID_ARGUMENT
);
1116 thread_mtx_lock(thread
);
1117 if (!thread
->active
) {
1118 thread_mtx_unlock(thread
);
1120 return (KERN_TERMINATED
);
1125 case THREAD_EXTENDED_POLICY
:
1127 boolean_t timeshare
= TRUE
;
1129 if (!(*get_default
)) {
1130 spl_t s
= splsched();
1131 thread_lock(thread
);
1133 if ( (thread
->sched_mode
!= TH_MODE_REALTIME
) &&
1134 (thread
->saved_mode
!= TH_MODE_REALTIME
) ) {
1135 if (!(thread
->sched_flags
& TH_SFLAG_DEMOTED_MASK
))
1136 timeshare
= (thread
->sched_mode
== TH_MODE_TIMESHARE
) != 0;
1138 timeshare
= (thread
->saved_mode
== TH_MODE_TIMESHARE
) != 0;
1141 *get_default
= TRUE
;
1143 thread_unlock(thread
);
1147 if (*count
>= THREAD_EXTENDED_POLICY_COUNT
) {
1148 thread_extended_policy_t info
;
1150 info
= (thread_extended_policy_t
)policy_info
;
1151 info
->timeshare
= timeshare
;
1157 case THREAD_TIME_CONSTRAINT_POLICY
:
1159 thread_time_constraint_policy_t info
;
1161 if (*count
< THREAD_TIME_CONSTRAINT_POLICY_COUNT
) {
1162 result
= KERN_INVALID_ARGUMENT
;
1166 info
= (thread_time_constraint_policy_t
)policy_info
;
1168 if (!(*get_default
)) {
1169 spl_t s
= splsched();
1170 thread_lock(thread
);
1172 if ( (thread
->sched_mode
== TH_MODE_REALTIME
) ||
1173 (thread
->saved_mode
== TH_MODE_REALTIME
) ) {
1174 info
->period
= thread
->realtime
.period
;
1175 info
->computation
= thread
->realtime
.computation
;
1176 info
->constraint
= thread
->realtime
.constraint
;
1177 info
->preemptible
= thread
->realtime
.preemptible
;
1180 *get_default
= TRUE
;
1182 thread_unlock(thread
);
1188 info
->computation
= default_timeshare_computation
;
1189 info
->constraint
= default_timeshare_constraint
;
1190 info
->preemptible
= TRUE
;
1196 case THREAD_PRECEDENCE_POLICY
:
1198 thread_precedence_policy_t info
;
1200 if (*count
< THREAD_PRECEDENCE_POLICY_COUNT
) {
1201 result
= KERN_INVALID_ARGUMENT
;
1205 info
= (thread_precedence_policy_t
)policy_info
;
1207 if (!(*get_default
)) {
1208 spl_t s
= splsched();
1209 thread_lock(thread
);
1211 info
->importance
= thread
->importance
;
1213 thread_unlock(thread
);
1217 info
->importance
= 0;
1222 case THREAD_AFFINITY_POLICY
:
1224 thread_affinity_policy_t info
;
1226 if (!thread_affinity_is_supported()) {
1227 result
= KERN_NOT_SUPPORTED
;
1230 if (*count
< THREAD_AFFINITY_POLICY_COUNT
) {
1231 result
= KERN_INVALID_ARGUMENT
;
1235 info
= (thread_affinity_policy_t
)policy_info
;
1237 if (!(*get_default
))
1238 info
->affinity_tag
= thread_affinity_get(thread
);
1240 info
->affinity_tag
= THREAD_AFFINITY_TAG_NULL
;
1245 case THREAD_POLICY_STATE
:
1247 thread_policy_state_t info
;
1249 if (*count
< THREAD_POLICY_STATE_COUNT
) {
1250 result
= KERN_INVALID_ARGUMENT
;
1254 /* Only root can get this info */
1255 if (current_task()->sec_token
.val
[0] != 0) {
1256 result
= KERN_PROTECTION_FAILURE
;
1260 info
= (thread_policy_state_t
)(void*)policy_info
;
1262 if (!(*get_default
)) {
1265 spl_t s
= splsched();
1266 thread_lock(thread
);
1268 info
->flags
|= (thread
->static_param
? THREAD_POLICY_STATE_FLAG_STATIC_PARAM
: 0);
1270 info
->thps_requested_policy
= *(uint64_t*)(void*)(&thread
->requested_policy
);
1271 info
->thps_effective_policy
= *(uint64_t*)(void*)(&thread
->effective_policy
);
1273 info
->thps_user_promotions
= 0;
1274 info
->thps_user_promotion_basepri
= thread
->user_promotion_basepri
;
1275 info
->thps_ipc_overrides
= thread
->ipc_overrides
;
1277 proc_get_thread_policy_bitfield(thread
, info
);
1279 thread_unlock(thread
);
1282 info
->requested
= 0;
1283 info
->effective
= 0;
1290 case THREAD_LATENCY_QOS_POLICY
:
1292 thread_latency_qos_policy_t info
= (thread_latency_qos_policy_t
) policy_info
;
1293 thread_latency_qos_t plqos
;
1295 if (*count
< THREAD_LATENCY_QOS_POLICY_COUNT
) {
1296 result
= KERN_INVALID_ARGUMENT
;
1303 plqos
= proc_get_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_LATENCY_QOS
, NULL
);
1306 info
->thread_latency_qos_tier
= qos_latency_policy_package(plqos
);
1310 case THREAD_THROUGHPUT_QOS_POLICY
:
1312 thread_throughput_qos_policy_t info
= (thread_throughput_qos_policy_t
) policy_info
;
1313 thread_throughput_qos_t ptqos
;
1315 if (*count
< THREAD_THROUGHPUT_QOS_POLICY_COUNT
) {
1316 result
= KERN_INVALID_ARGUMENT
;
1323 ptqos
= proc_get_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_THROUGH_QOS
, NULL
);
1326 info
->thread_throughput_qos_tier
= qos_throughput_policy_package(ptqos
);
1330 case THREAD_QOS_POLICY
:
1332 thread_qos_policy_t info
= (thread_qos_policy_t
)policy_info
;
1334 if (*count
< THREAD_QOS_POLICY_COUNT
) {
1335 result
= KERN_INVALID_ARGUMENT
;
1339 if (!(*get_default
)) {
1340 int relprio_value
= 0;
1341 info
->qos_tier
= proc_get_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
1342 TASK_POLICY_QOS_AND_RELPRIO
, &relprio_value
);
1344 info
->tier_importance
= -relprio_value
;
1346 info
->qos_tier
= THREAD_QOS_UNSPECIFIED
;
1347 info
->tier_importance
= 0;
1354 result
= KERN_INVALID_ARGUMENT
;
1358 thread_mtx_unlock(thread
);
1364 thread_policy_create(thread_t thread
)
1366 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1367 (IMPORTANCE_CODE(IMP_UPDATE
, (IMP_UPDATE_TASK_CREATE
| TASK_POLICY_THREAD
))) | DBG_FUNC_START
,
1368 thread_tid(thread
), theffective_0(thread
),
1369 theffective_1(thread
), thread
->base_pri
, 0);
1371 /* We pass a pend token but ignore it */
1372 struct task_pend_token pend_token
= {};
1374 thread_policy_update_internal_spinlocked(thread
, TRUE
, &pend_token
);
1376 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1377 (IMPORTANCE_CODE(IMP_UPDATE
, (IMP_UPDATE_TASK_CREATE
| TASK_POLICY_THREAD
))) | DBG_FUNC_END
,
1378 thread_tid(thread
), theffective_0(thread
),
1379 theffective_1(thread
), thread
->base_pri
, 0);
1383 thread_policy_update_spinlocked(thread_t thread
, boolean_t recompute_priority
, task_pend_token_t pend_token
)
1385 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1386 (IMPORTANCE_CODE(IMP_UPDATE
, TASK_POLICY_THREAD
) | DBG_FUNC_START
),
1387 thread_tid(thread
), theffective_0(thread
),
1388 theffective_1(thread
), thread
->base_pri
, 0);
1390 thread_policy_update_internal_spinlocked(thread
, recompute_priority
, pend_token
);
1392 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1393 (IMPORTANCE_CODE(IMP_UPDATE
, TASK_POLICY_THREAD
)) | DBG_FUNC_END
,
1394 thread_tid(thread
), theffective_0(thread
),
1395 theffective_1(thread
), thread
->base_pri
, 0);
1401 * One thread state update function TO RULE THEM ALL
1403 * This function updates the thread effective policy fields
1404 * and pushes the results to the relevant subsystems.
1406 * Returns TRUE if a pended action needs to be run.
1408 * Called with thread spinlock locked, task may be locked, thread mutex may be locked
1411 thread_policy_update_internal_spinlocked(thread_t thread
, boolean_t recompute_priority
,
1412 task_pend_token_t pend_token
)
1416 * Gather requested policy and effective task state
1419 struct thread_requested_policy requested
= thread
->requested_policy
;
1420 struct task_effective_policy task_effective
= thread
->task
->effective_policy
;
1424 * Calculate new effective policies from requested policy, task and thread state
1426 * Don't change requested, it won't take effect
1429 struct thread_effective_policy next
= {};
1431 next
.thep_qos_ui_is_urgent
= task_effective
.tep_qos_ui_is_urgent
;
1433 uint32_t next_qos
= requested
.thrp_qos
;
1435 if (requested
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) {
1436 next_qos
= MAX(requested
.thrp_qos_override
, next_qos
);
1437 next_qos
= MAX(requested
.thrp_qos_promote
, next_qos
);
1438 next_qos
= MAX(requested
.thrp_qos_ipc_override
, next_qos
);
1439 next_qos
= MAX(requested
.thrp_qos_workq_override
, next_qos
);
1442 next
.thep_qos
= next_qos
;
1444 /* A task clamp will result in an effective QoS even when requested is UNSPECIFIED */
1445 if (task_effective
.tep_qos_clamp
!= THREAD_QOS_UNSPECIFIED
) {
1446 if (next
.thep_qos
!= THREAD_QOS_UNSPECIFIED
)
1447 next
.thep_qos
= MIN(task_effective
.tep_qos_clamp
, next
.thep_qos
);
1449 next
.thep_qos
= task_effective
.tep_qos_clamp
;
1453 * Extract outbound-promotion QoS before applying task ceiling or BG clamp
1454 * This allows QoS promotions to work properly even after the process is unclamped.
1456 next
.thep_qos_promote
= next
.thep_qos
;
1458 /* The ceiling only applies to threads that are in the QoS world */
1459 if (task_effective
.tep_qos_ceiling
!= THREAD_QOS_UNSPECIFIED
&&
1460 next
.thep_qos
!= THREAD_QOS_UNSPECIFIED
) {
1461 next
.thep_qos
= MIN(task_effective
.tep_qos_ceiling
, next
.thep_qos
);
1464 /* Apply the sync ipc qos override */
1465 assert(requested
.thrp_qos_sync_ipc_override
== THREAD_QOS_UNSPECIFIED
);
1468 * The QoS relative priority is only applicable when the original programmer's
1469 * intended (requested) QoS is in effect. When the QoS is clamped (e.g.
1470 * USER_INITIATED-13REL clamped to UTILITY), the relative priority is not honored,
1471 * since otherwise it would be lower than unclamped threads. Similarly, in the
1472 * presence of boosting, the programmer doesn't know what other actors
1473 * are boosting the thread.
1475 if ((requested
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) &&
1476 (requested
.thrp_qos
== next
.thep_qos
) &&
1477 (requested
.thrp_qos_override
== THREAD_QOS_UNSPECIFIED
)) {
1478 next
.thep_qos_relprio
= requested
.thrp_qos_relprio
;
1480 next
.thep_qos_relprio
= 0;
1483 /* Calculate DARWIN_BG */
1484 boolean_t wants_darwinbg
= FALSE
;
1485 boolean_t wants_all_sockets_bg
= FALSE
; /* Do I want my existing sockets to be bg */
1488 * If DARWIN_BG has been requested at either level, it's engaged.
1489 * darwinbg threads always create bg sockets,
1490 * but only some types of darwinbg change the sockets
1491 * after they're created
1493 if (requested
.thrp_int_darwinbg
|| requested
.thrp_ext_darwinbg
)
1494 wants_all_sockets_bg
= wants_darwinbg
= TRUE
;
1496 if (requested
.thrp_pidbind_bg
)
1497 wants_all_sockets_bg
= wants_darwinbg
= TRUE
;
1499 if (task_effective
.tep_darwinbg
)
1500 wants_darwinbg
= TRUE
;
1502 if (next
.thep_qos
== THREAD_QOS_BACKGROUND
||
1503 next
.thep_qos
== THREAD_QOS_MAINTENANCE
)
1504 wants_darwinbg
= TRUE
;
1506 /* Calculate side effects of DARWIN_BG */
1509 next
.thep_darwinbg
= 1;
1511 if (next
.thep_darwinbg
|| task_effective
.tep_new_sockets_bg
)
1512 next
.thep_new_sockets_bg
= 1;
1514 /* Don't use task_effective.tep_all_sockets_bg here */
1515 if (wants_all_sockets_bg
)
1516 next
.thep_all_sockets_bg
= 1;
1518 /* darwinbg implies background QOS (or lower) */
1519 if (next
.thep_darwinbg
&&
1520 (next
.thep_qos
> THREAD_QOS_BACKGROUND
|| next
.thep_qos
== THREAD_QOS_UNSPECIFIED
)) {
1521 next
.thep_qos
= THREAD_QOS_BACKGROUND
;
1522 next
.thep_qos_relprio
= 0;
1525 /* Calculate IO policy */
1527 int iopol
= THROTTLE_LEVEL_TIER0
;
1529 /* Factor in the task's IO policy */
1530 if (next
.thep_darwinbg
)
1531 iopol
= MAX(iopol
, task_effective
.tep_bg_iotier
);
1533 iopol
= MAX(iopol
, task_effective
.tep_io_tier
);
1535 /* Look up the associated IO tier value for the QoS class */
1536 iopol
= MAX(iopol
, thread_qos_policy_params
.qos_iotier
[next
.thep_qos
]);
1538 iopol
= MAX(iopol
, requested
.thrp_int_iotier
);
1539 iopol
= MAX(iopol
, requested
.thrp_ext_iotier
);
1541 next
.thep_io_tier
= iopol
;
1544 * If a QoS override is causing IO to go into a lower tier, we also set
1545 * the passive bit so that a thread doesn't end up stuck in its own throttle
1546 * window when the override goes away.
1548 boolean_t qos_io_override_active
= FALSE
;
1549 if (thread_qos_policy_params
.qos_iotier
[next
.thep_qos
] <
1550 thread_qos_policy_params
.qos_iotier
[requested
.thrp_qos
])
1551 qos_io_override_active
= TRUE
;
1553 /* Calculate Passive IO policy */
1554 if (requested
.thrp_ext_iopassive
||
1555 requested
.thrp_int_iopassive
||
1556 qos_io_override_active
||
1557 task_effective
.tep_io_passive
)
1558 next
.thep_io_passive
= 1;
1560 /* Calculate timer QOS */
1561 uint32_t latency_qos
= requested
.thrp_latency_qos
;
1563 latency_qos
= MAX(latency_qos
, task_effective
.tep_latency_qos
);
1564 latency_qos
= MAX(latency_qos
, thread_qos_policy_params
.qos_latency_qos
[next
.thep_qos
]);
1566 next
.thep_latency_qos
= latency_qos
;
1568 /* Calculate throughput QOS */
1569 uint32_t through_qos
= requested
.thrp_through_qos
;
1571 through_qos
= MAX(through_qos
, task_effective
.tep_through_qos
);
1572 through_qos
= MAX(through_qos
, thread_qos_policy_params
.qos_through_qos
[next
.thep_qos
]);
1574 next
.thep_through_qos
= through_qos
;
1576 if (task_effective
.tep_terminated
|| requested
.thrp_terminated
) {
1577 /* Shoot down the throttles that slow down exit or response to SIGTERM */
1578 next
.thep_terminated
= 1;
1579 next
.thep_darwinbg
= 0;
1580 next
.thep_io_tier
= THROTTLE_LEVEL_TIER0
;
1581 next
.thep_qos
= THREAD_QOS_UNSPECIFIED
;
1582 next
.thep_latency_qos
= LATENCY_QOS_TIER_UNSPECIFIED
;
1583 next
.thep_through_qos
= THROUGHPUT_QOS_TIER_UNSPECIFIED
;
1588 * Swap out old policy for new policy
1591 struct thread_effective_policy prev
= thread
->effective_policy
;
1593 thread_update_qos_cpu_time_locked(thread
);
1595 /* This is the point where the new values become visible to other threads */
1596 thread
->effective_policy
= next
;
1600 * Pend updates that can't be done while holding the thread lock
1603 if (prev
.thep_all_sockets_bg
!= next
.thep_all_sockets_bg
)
1604 pend_token
->tpt_update_sockets
= 1;
1606 /* TODO: Doesn't this only need to be done if the throttle went up? */
1607 if (prev
.thep_io_tier
!= next
.thep_io_tier
)
1608 pend_token
->tpt_update_throttle
= 1;
1611 * Check for the attributes that sfi_thread_classify() consults,
1612 * and trigger SFI re-evaluation.
1614 if (prev
.thep_qos
!= next
.thep_qos
||
1615 prev
.thep_darwinbg
!= next
.thep_darwinbg
)
1616 pend_token
->tpt_update_thread_sfi
= 1;
1620 * Update other subsystems as necessary if something has changed
1623 /* Check for the attributes that thread_recompute_priority() consults */
1624 if (prev
.thep_qos
!= next
.thep_qos
||
1625 prev
.thep_qos_relprio
!= next
.thep_qos_relprio
||
1626 prev
.thep_qos_ui_is_urgent
!= next
.thep_qos_ui_is_urgent
||
1627 prev
.thep_terminated
!= next
.thep_terminated
||
1628 pend_token
->tpt_force_recompute_pri
== 1 ||
1629 recompute_priority
) {
1630 thread_recompute_priority(thread
);
1636 * Initiate a thread policy state transition on a thread with its TID
1637 * Useful if you cannot guarantee the thread won't get terminated
1638 * Precondition: No locks are held
1639 * Will take task lock - using the non-tid variant is faster
1640 * if you already have a thread ref.
1643 proc_set_thread_policy_with_tid(task_t task
,
1649 /* takes task lock, returns ref'ed thread or NULL */
1650 thread_t thread
= task_findtid(task
, tid
);
1652 if (thread
== THREAD_NULL
)
1655 proc_set_thread_policy(thread
, category
, flavor
, value
);
1657 thread_deallocate(thread
);
1661 * Initiate a thread policy transition on a thread
1662 * This path supports networking transitions (i.e. darwinbg transitions)
1663 * Precondition: No locks are held
1666 proc_set_thread_policy(thread_t thread
,
1671 struct task_pend_token pend_token
= {};
1673 thread_mtx_lock(thread
);
1675 proc_set_thread_policy_locked(thread
, category
, flavor
, value
, 0, &pend_token
);
1677 thread_mtx_unlock(thread
);
1679 thread_policy_update_complete_unlocked(thread
, &pend_token
);
1683 * Do the things that can't be done while holding a thread mutex.
1684 * These are set up to call back into thread policy to get the latest value,
1685 * so they don't have to be synchronized with the update.
1686 * The only required semantic is 'call this sometime after updating effective policy'
1688 * Precondition: Thread mutex is not held
1690 * This may be called with the task lock held, but in that case it won't be
1691 * called with tpt_update_sockets set.
1694 thread_policy_update_complete_unlocked(thread_t thread
, task_pend_token_t pend_token
)
1697 if (pend_token
->tpt_update_sockets
)
1698 proc_apply_task_networkbg(thread
->task
->bsd_info
, thread
);
1699 #endif /* MACH_BSD */
1701 if (pend_token
->tpt_update_throttle
)
1702 rethrottle_thread(thread
->uthread
);
1704 if (pend_token
->tpt_update_thread_sfi
)
1705 sfi_reevaluate(thread
);
1709 * Set and update thread policy
1710 * Thread mutex might be held
1713 proc_set_thread_policy_locked(thread_t thread
,
1718 task_pend_token_t pend_token
)
1720 spl_t s
= splsched();
1721 thread_lock(thread
);
1723 proc_set_thread_policy_spinlocked(thread
, category
, flavor
, value
, value2
, pend_token
);
1725 thread_unlock(thread
);
1730 * Set and update thread policy
1731 * Thread spinlock is held
1734 proc_set_thread_policy_spinlocked(thread_t thread
,
1739 task_pend_token_t pend_token
)
1741 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1742 (IMPORTANCE_CODE(flavor
, (category
| TASK_POLICY_THREAD
))) | DBG_FUNC_START
,
1743 thread_tid(thread
), threquested_0(thread
),
1744 threquested_1(thread
), value
, 0);
1746 thread_set_requested_policy_spinlocked(thread
, category
, flavor
, value
, value2
);
1748 thread_policy_update_spinlocked(thread
, FALSE
, pend_token
);
1750 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1751 (IMPORTANCE_CODE(flavor
, (category
| TASK_POLICY_THREAD
))) | DBG_FUNC_END
,
1752 thread_tid(thread
), threquested_0(thread
),
1753 threquested_1(thread
), tpending(pend_token
), 0);
1757 * Set the requested state for a specific flavor to a specific value.
1760 thread_set_requested_policy_spinlocked(thread_t thread
,
1768 struct thread_requested_policy requested
= thread
->requested_policy
;
1772 /* Category: EXTERNAL and INTERNAL, thread and task */
1774 case TASK_POLICY_DARWIN_BG
:
1775 if (category
== TASK_POLICY_EXTERNAL
)
1776 requested
.thrp_ext_darwinbg
= value
;
1778 requested
.thrp_int_darwinbg
= value
;
1781 case TASK_POLICY_IOPOL
:
1782 proc_iopol_to_tier(value
, &tier
, &passive
);
1783 if (category
== TASK_POLICY_EXTERNAL
) {
1784 requested
.thrp_ext_iotier
= tier
;
1785 requested
.thrp_ext_iopassive
= passive
;
1787 requested
.thrp_int_iotier
= tier
;
1788 requested
.thrp_int_iopassive
= passive
;
1792 case TASK_POLICY_IO
:
1793 if (category
== TASK_POLICY_EXTERNAL
)
1794 requested
.thrp_ext_iotier
= value
;
1796 requested
.thrp_int_iotier
= value
;
1799 case TASK_POLICY_PASSIVE_IO
:
1800 if (category
== TASK_POLICY_EXTERNAL
)
1801 requested
.thrp_ext_iopassive
= value
;
1803 requested
.thrp_int_iopassive
= value
;
1806 /* Category: ATTRIBUTE, thread only */
1808 case TASK_POLICY_PIDBIND_BG
:
1809 assert(category
== TASK_POLICY_ATTRIBUTE
);
1810 requested
.thrp_pidbind_bg
= value
;
1813 case TASK_POLICY_LATENCY_QOS
:
1814 assert(category
== TASK_POLICY_ATTRIBUTE
);
1815 requested
.thrp_latency_qos
= value
;
1818 case TASK_POLICY_THROUGH_QOS
:
1819 assert(category
== TASK_POLICY_ATTRIBUTE
);
1820 requested
.thrp_through_qos
= value
;
1823 case TASK_POLICY_QOS
:
1824 assert(category
== TASK_POLICY_ATTRIBUTE
);
1825 requested
.thrp_qos
= value
;
1828 case TASK_POLICY_QOS_OVERRIDE
:
1829 assert(category
== TASK_POLICY_ATTRIBUTE
);
1830 requested
.thrp_qos_override
= value
;
1833 case TASK_POLICY_QOS_AND_RELPRIO
:
1834 assert(category
== TASK_POLICY_ATTRIBUTE
);
1835 requested
.thrp_qos
= value
;
1836 requested
.thrp_qos_relprio
= value2
;
1837 DTRACE_BOOST3(qos_set
, uint64_t, thread
->thread_id
, int, requested
.thrp_qos
, int, requested
.thrp_qos_relprio
);
1840 case TASK_POLICY_QOS_WORKQ_OVERRIDE
:
1841 assert(category
== TASK_POLICY_ATTRIBUTE
);
1842 requested
.thrp_qos_workq_override
= value
;
1845 case TASK_POLICY_QOS_PROMOTE
:
1846 assert(category
== TASK_POLICY_ATTRIBUTE
);
1847 requested
.thrp_qos_promote
= value
;
1850 case TASK_POLICY_QOS_IPC_OVERRIDE
:
1851 assert(category
== TASK_POLICY_ATTRIBUTE
);
1852 requested
.thrp_qos_ipc_override
= value
;
1855 case TASK_POLICY_TERMINATED
:
1856 assert(category
== TASK_POLICY_ATTRIBUTE
);
1857 requested
.thrp_terminated
= value
;
1861 panic("unknown task policy: %d %d %d", category
, flavor
, value
);
1865 thread
->requested_policy
= requested
;
1869 * Gets what you set. Effective values may be different.
1870 * Precondition: No locks are held
1873 proc_get_thread_policy(thread_t thread
,
1878 thread_mtx_lock(thread
);
1879 value
= proc_get_thread_policy_locked(thread
, category
, flavor
, NULL
);
1880 thread_mtx_unlock(thread
);
1885 proc_get_thread_policy_locked(thread_t thread
,
1892 spl_t s
= splsched();
1893 thread_lock(thread
);
1895 value
= thread_get_requested_policy_spinlocked(thread
, category
, flavor
, value2
);
1897 thread_unlock(thread
);
1904 * Gets what you set. Effective values may be different.
1907 thread_get_requested_policy_spinlocked(thread_t thread
,
1914 struct thread_requested_policy requested
= thread
->requested_policy
;
1917 case TASK_POLICY_DARWIN_BG
:
1918 if (category
== TASK_POLICY_EXTERNAL
)
1919 value
= requested
.thrp_ext_darwinbg
;
1921 value
= requested
.thrp_int_darwinbg
;
1923 case TASK_POLICY_IOPOL
:
1924 if (category
== TASK_POLICY_EXTERNAL
)
1925 value
= proc_tier_to_iopol(requested
.thrp_ext_iotier
,
1926 requested
.thrp_ext_iopassive
);
1928 value
= proc_tier_to_iopol(requested
.thrp_int_iotier
,
1929 requested
.thrp_int_iopassive
);
1931 case TASK_POLICY_IO
:
1932 if (category
== TASK_POLICY_EXTERNAL
)
1933 value
= requested
.thrp_ext_iotier
;
1935 value
= requested
.thrp_int_iotier
;
1937 case TASK_POLICY_PASSIVE_IO
:
1938 if (category
== TASK_POLICY_EXTERNAL
)
1939 value
= requested
.thrp_ext_iopassive
;
1941 value
= requested
.thrp_int_iopassive
;
1943 case TASK_POLICY_QOS
:
1944 assert(category
== TASK_POLICY_ATTRIBUTE
);
1945 value
= requested
.thrp_qos
;
1947 case TASK_POLICY_QOS_OVERRIDE
:
1948 assert(category
== TASK_POLICY_ATTRIBUTE
);
1949 value
= requested
.thrp_qos_override
;
1951 case TASK_POLICY_LATENCY_QOS
:
1952 assert(category
== TASK_POLICY_ATTRIBUTE
);
1953 value
= requested
.thrp_latency_qos
;
1955 case TASK_POLICY_THROUGH_QOS
:
1956 assert(category
== TASK_POLICY_ATTRIBUTE
);
1957 value
= requested
.thrp_through_qos
;
1959 case TASK_POLICY_QOS_WORKQ_OVERRIDE
:
1960 assert(category
== TASK_POLICY_ATTRIBUTE
);
1961 value
= requested
.thrp_qos_workq_override
;
1963 case TASK_POLICY_QOS_AND_RELPRIO
:
1964 assert(category
== TASK_POLICY_ATTRIBUTE
);
1965 assert(value2
!= NULL
);
1966 value
= requested
.thrp_qos
;
1967 *value2
= requested
.thrp_qos_relprio
;
1969 case TASK_POLICY_QOS_PROMOTE
:
1970 assert(category
== TASK_POLICY_ATTRIBUTE
);
1971 value
= requested
.thrp_qos_promote
;
1973 case TASK_POLICY_QOS_IPC_OVERRIDE
:
1974 assert(category
== TASK_POLICY_ATTRIBUTE
);
1975 value
= requested
.thrp_qos_ipc_override
;
1977 case TASK_POLICY_TERMINATED
:
1978 assert(category
== TASK_POLICY_ATTRIBUTE
);
1979 value
= requested
.thrp_terminated
;
1983 panic("unknown policy_flavor %d", flavor
);
1991 * Gets what is actually in effect, for subsystems which pull policy instead of receive updates.
1993 * NOTE: This accessor does not take the task or thread lock.
1994 * Notifications of state updates need to be externally synchronized with state queries.
1995 * This routine *MUST* remain interrupt safe, as it is potentially invoked
1996 * within the context of a timer interrupt.
1998 * TODO: I think we can get away with architecting this such that we don't need to look at the task ever.
1999 * Is that a good idea? Maybe it's best to avoid evaluate-all-the-threads updates.
2000 * I don't think that cost is worth not having the right answer.
2003 proc_get_effective_thread_policy(thread_t thread
,
2009 case TASK_POLICY_DARWIN_BG
:
2011 * This call is used within the timer layer, as well as
2012 * prioritizing requests to the graphics system.
2013 * It also informs SFI and originator-bg-state.
2014 * Returns 1 for background mode, 0 for normal mode
2017 value
= thread
->effective_policy
.thep_darwinbg
? 1 : 0;
2019 case TASK_POLICY_IO
:
2021 * The I/O system calls here to find out what throttling tier to apply to an operation.
2022 * Returns THROTTLE_LEVEL_* values
2024 value
= thread
->effective_policy
.thep_io_tier
;
2025 if (thread
->iotier_override
!= THROTTLE_LEVEL_NONE
)
2026 value
= MIN(value
, thread
->iotier_override
);
2028 case TASK_POLICY_PASSIVE_IO
:
2030 * The I/O system calls here to find out whether an operation should be passive.
2031 * (i.e. not cause operations with lower throttle tiers to be throttled)
2032 * Returns 1 for passive mode, 0 for normal mode
2034 * If an override is causing IO to go into a lower tier, we also set
2035 * the passive bit so that a thread doesn't end up stuck in its own throttle
2036 * window when the override goes away.
2038 value
= thread
->effective_policy
.thep_io_passive
? 1 : 0;
2039 if (thread
->iotier_override
!= THROTTLE_LEVEL_NONE
&&
2040 thread
->iotier_override
< thread
->effective_policy
.thep_io_tier
)
2043 case TASK_POLICY_ALL_SOCKETS_BG
:
2045 * do_background_socket() calls this to determine whether
2046 * it should change the thread's sockets
2047 * Returns 1 for background mode, 0 for normal mode
2048 * This consults both thread and task so un-DBGing a thread while the task is BG
2049 * doesn't get you out of the network throttle.
2051 value
= (thread
->effective_policy
.thep_all_sockets_bg
||
2052 thread
->task
->effective_policy
.tep_all_sockets_bg
) ? 1 : 0;
2054 case TASK_POLICY_NEW_SOCKETS_BG
:
2056 * socreate() calls this to determine if it should mark a new socket as background
2057 * Returns 1 for background mode, 0 for normal mode
2059 value
= thread
->effective_policy
.thep_new_sockets_bg
? 1 : 0;
2061 case TASK_POLICY_LATENCY_QOS
:
2063 * timer arming calls into here to find out the timer coalescing level
2064 * Returns a latency QoS tier (0-6)
2066 value
= thread
->effective_policy
.thep_latency_qos
;
2068 case TASK_POLICY_THROUGH_QOS
:
2070 * This value is passed into the urgency callout from the scheduler
2071 * to the performance management subsystem.
2073 * Returns a throughput QoS tier (0-6)
2075 value
= thread
->effective_policy
.thep_through_qos
;
2077 case TASK_POLICY_QOS
:
2079 * This is communicated to the performance management layer and SFI.
2081 * Returns a QoS policy tier
2083 value
= thread
->effective_policy
.thep_qos
;
2086 panic("unknown thread policy flavor %d", flavor
);
2095 * (integer_t) casts limit the number of bits we can fit here
2096 * this interface is deprecated and replaced by the _EXT struct ?
2099 proc_get_thread_policy_bitfield(thread_t thread
, thread_policy_state_t info
)
2102 struct thread_requested_policy requested
= thread
->requested_policy
;
2104 bits
|= (requested
.thrp_int_darwinbg
? POLICY_REQ_INT_DARWIN_BG
: 0);
2105 bits
|= (requested
.thrp_ext_darwinbg
? POLICY_REQ_EXT_DARWIN_BG
: 0);
2106 bits
|= (requested
.thrp_int_iotier
? (((uint64_t)requested
.thrp_int_iotier
) << POLICY_REQ_INT_IO_TIER_SHIFT
) : 0);
2107 bits
|= (requested
.thrp_ext_iotier
? (((uint64_t)requested
.thrp_ext_iotier
) << POLICY_REQ_EXT_IO_TIER_SHIFT
) : 0);
2108 bits
|= (requested
.thrp_int_iopassive
? POLICY_REQ_INT_PASSIVE_IO
: 0);
2109 bits
|= (requested
.thrp_ext_iopassive
? POLICY_REQ_EXT_PASSIVE_IO
: 0);
2111 bits
|= (requested
.thrp_qos
? (((uint64_t)requested
.thrp_qos
) << POLICY_REQ_TH_QOS_SHIFT
) : 0);
2112 bits
|= (requested
.thrp_qos_override
? (((uint64_t)requested
.thrp_qos_override
) << POLICY_REQ_TH_QOS_OVER_SHIFT
) : 0);
2114 bits
|= (requested
.thrp_pidbind_bg
? POLICY_REQ_PIDBIND_BG
: 0);
2116 bits
|= (requested
.thrp_latency_qos
? (((uint64_t)requested
.thrp_latency_qos
) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT
) : 0);
2117 bits
|= (requested
.thrp_through_qos
? (((uint64_t)requested
.thrp_through_qos
) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT
) : 0);
2119 info
->requested
= (integer_t
) bits
;
2122 struct thread_effective_policy effective
= thread
->effective_policy
;
2124 bits
|= (effective
.thep_darwinbg
? POLICY_EFF_DARWIN_BG
: 0);
2126 bits
|= (effective
.thep_io_tier
? (((uint64_t)effective
.thep_io_tier
) << POLICY_EFF_IO_TIER_SHIFT
) : 0);
2127 bits
|= (effective
.thep_io_passive
? POLICY_EFF_IO_PASSIVE
: 0);
2128 bits
|= (effective
.thep_all_sockets_bg
? POLICY_EFF_ALL_SOCKETS_BG
: 0);
2129 bits
|= (effective
.thep_new_sockets_bg
? POLICY_EFF_NEW_SOCKETS_BG
: 0);
2131 bits
|= (effective
.thep_qos
? (((uint64_t)effective
.thep_qos
) << POLICY_EFF_TH_QOS_SHIFT
) : 0);
2133 bits
|= (effective
.thep_latency_qos
? (((uint64_t)effective
.thep_latency_qos
) << POLICY_EFF_LATENCY_QOS_SHIFT
) : 0);
2134 bits
|= (effective
.thep_through_qos
? (((uint64_t)effective
.thep_through_qos
) << POLICY_EFF_THROUGH_QOS_SHIFT
) : 0);
2136 info
->effective
= (integer_t
)bits
;
2143 * Sneakily trace either the task and thread requested
2144 * or just the thread requested, depending on if we have enough room.
2145 * We do have room on LP64. On LP32, we have to split it between two uintptr_t's.
2148 * threquested_0(thread) thread[0] task[0]
2149 * threquested_1(thread) thread[1] thread[0]
2154 threquested_0(thread_t thread
)
2156 static_assert(sizeof(struct thread_requested_policy
) == sizeof(uint64_t), "size invariant violated");
2158 uintptr_t* raw
= (uintptr_t*)(void*)&thread
->requested_policy
;
2164 threquested_1(thread_t thread
)
2166 #if defined __LP64__
2167 return *(uintptr_t*)&thread
->task
->requested_policy
;
2169 uintptr_t* raw
= (uintptr_t*)(void*)&thread
->requested_policy
;
2175 theffective_0(thread_t thread
)
2177 static_assert(sizeof(struct thread_effective_policy
) == sizeof(uint64_t), "size invariant violated");
2179 uintptr_t* raw
= (uintptr_t*)(void*)&thread
->effective_policy
;
2184 theffective_1(thread_t thread
)
2186 #if defined __LP64__
2187 return *(uintptr_t*)&thread
->task
->effective_policy
;
2189 uintptr_t* raw
= (uintptr_t*)(void*)&thread
->effective_policy
;
2196 * Set an override on the thread which is consulted with a
2197 * higher priority than the task/thread policy. This should
2198 * only be set for temporary grants until the thread
2199 * returns to the userspace boundary
2201 * We use atomic operations to swap in the override, with
2202 * the assumption that the thread itself can
2203 * read the override and clear it on return to userspace.
2205 * No locking is performed, since it is acceptable to see
2206 * a stale override for one loop through throttle_lowpri_io().
2207 * However a thread reference must be held on the thread.
2210 void set_thread_iotier_override(thread_t thread
, int policy
)
2212 int current_override
;
2214 /* Let most aggressive I/O policy win until user boundary */
2216 current_override
= thread
->iotier_override
;
2218 if (current_override
!= THROTTLE_LEVEL_NONE
)
2219 policy
= MIN(current_override
, policy
);
2221 if (current_override
== policy
) {
2222 /* no effective change */
2225 } while (!OSCompareAndSwap(current_override
, policy
, &thread
->iotier_override
));
2228 * Since the thread may be currently throttled,
2229 * re-evaluate tiers and potentially break out
2232 rethrottle_thread(thread
->uthread
);
2236 * Userspace synchronization routines (like pthread mutexes, pthread reader-writer locks,
2237 * semaphores, dispatch_sync) may result in priority inversions where a higher priority
2238 * (i.e. scheduler priority, I/O tier, QoS tier) is waiting on a resource owned by a lower
2239 * priority thread. In these cases, we attempt to propagate the priority token, as long
2240 * as the subsystem informs us of the relationships between the threads. The userspace
2241 * synchronization subsystem should maintain the information of owner->resource and
2242 * resource->waiters itself.
2246 * This helper canonicalizes the resource/resource_type given the current qos_override_mode
2247 * in effect. Note that wildcards (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD) may need
2248 * to be handled specially in the future, but for now it's fine to slam
2249 * *resource to USER_ADDR_NULL even if it was previously a wildcard.
2251 static void canonicalize_resource_and_type(user_addr_t
*resource
, int *resource_type
) {
2252 if (qos_override_mode
== QOS_OVERRIDE_MODE_OVERHANG_PEAK
|| qos_override_mode
== QOS_OVERRIDE_MODE_IGNORE_OVERRIDE
) {
2253 /* Map all input resource/type to a single one */
2254 *resource
= USER_ADDR_NULL
;
2255 *resource_type
= THREAD_QOS_OVERRIDE_TYPE_UNKNOWN
;
2256 } else if (qos_override_mode
== QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE
) {
2258 } else if (qos_override_mode
== QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE
) {
2259 /* Map all mutex overrides to a single one, to avoid memory overhead */
2260 if (*resource_type
== THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX
) {
2261 *resource
= USER_ADDR_NULL
;
2266 /* This helper routine finds an existing override if known. Locking should be done by caller */
2267 static struct thread_qos_override
*
2268 find_qos_override(thread_t thread
,
2269 user_addr_t resource
,
2272 struct thread_qos_override
*override
;
2274 override
= thread
->overrides
;
2276 if (override
->override_resource
== resource
&&
2277 override
->override_resource_type
== resource_type
) {
2281 override
= override
->override_next
;
2288 find_and_decrement_qos_override(thread_t thread
,
2289 user_addr_t resource
,
2292 struct thread_qos_override
**free_override_list
)
2294 struct thread_qos_override
*override
, *override_prev
;
2296 override_prev
= NULL
;
2297 override
= thread
->overrides
;
2299 struct thread_qos_override
*override_next
= override
->override_next
;
2301 if ((THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD
== resource
|| override
->override_resource
== resource
) &&
2302 (THREAD_QOS_OVERRIDE_TYPE_WILDCARD
== resource_type
|| override
->override_resource_type
== resource_type
)) {
2305 override
->override_contended_resource_count
= 0;
2307 override
->override_contended_resource_count
--;
2310 if (override
->override_contended_resource_count
== 0) {
2311 if (override_prev
== NULL
) {
2312 thread
->overrides
= override_next
;
2314 override_prev
->override_next
= override_next
;
2317 /* Add to out-param for later zfree */
2318 override
->override_next
= *free_override_list
;
2319 *free_override_list
= override
;
2321 override_prev
= override
;
2324 if (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD
!= resource
) {
2328 override_prev
= override
;
2331 override
= override_next
;
2335 /* This helper recalculates the current requested override using the policy selected at boot */
2337 calculate_requested_qos_override(thread_t thread
)
2339 if (qos_override_mode
== QOS_OVERRIDE_MODE_IGNORE_OVERRIDE
) {
2340 return THREAD_QOS_UNSPECIFIED
;
2343 /* iterate over all overrides and calculate MAX */
2344 struct thread_qos_override
*override
;
2345 int qos_override
= THREAD_QOS_UNSPECIFIED
;
2347 override
= thread
->overrides
;
2349 qos_override
= MAX(qos_override
, override
->override_qos
);
2350 override
= override
->override_next
;
2353 return qos_override
;
2359 * - EINVAL if some invalid input was passed
2362 proc_thread_qos_add_override_internal(thread_t thread
,
2364 boolean_t first_override_for_resource
,
2365 user_addr_t resource
,
2368 struct task_pend_token pend_token
= {};
2371 thread_mtx_lock(thread
);
2373 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_START
,
2374 thread_tid(thread
), override_qos
, first_override_for_resource
? 1 : 0, 0, 0);
2376 DTRACE_BOOST5(qos_add_override_pre
, uint64_t, thread_tid(thread
),
2377 uint64_t, thread
->requested_policy
.thrp_qos
,
2378 uint64_t, thread
->effective_policy
.thep_qos
,
2379 int, override_qos
, boolean_t
, first_override_for_resource
);
2381 struct thread_qos_override
*override
;
2382 struct thread_qos_override
*override_new
= NULL
;
2383 int new_qos_override
, prev_qos_override
;
2384 int new_effective_qos
;
2386 canonicalize_resource_and_type(&resource
, &resource_type
);
2388 override
= find_qos_override(thread
, resource
, resource_type
);
2389 if (first_override_for_resource
&& !override
) {
2390 /* We need to allocate a new object. Drop the thread lock and
2391 * recheck afterwards in case someone else added the override
2393 thread_mtx_unlock(thread
);
2394 override_new
= zalloc(thread_qos_override_zone
);
2395 thread_mtx_lock(thread
);
2396 override
= find_qos_override(thread
, resource
, resource_type
);
2398 if (first_override_for_resource
&& override
) {
2399 /* Someone else already allocated while the thread lock was dropped */
2400 override
->override_contended_resource_count
++;
2401 } else if (!override
&& override_new
) {
2402 override
= override_new
;
2403 override_new
= NULL
;
2404 override
->override_next
= thread
->overrides
;
2405 /* since first_override_for_resource was TRUE */
2406 override
->override_contended_resource_count
= 1;
2407 override
->override_resource
= resource
;
2408 override
->override_resource_type
= resource_type
;
2409 override
->override_qos
= THREAD_QOS_UNSPECIFIED
;
2410 thread
->overrides
= override
;
2414 if (override
->override_qos
== THREAD_QOS_UNSPECIFIED
)
2415 override
->override_qos
= override_qos
;
2417 override
->override_qos
= MAX(override
->override_qos
, override_qos
);
2420 /* Determine how to combine the various overrides into a single current
2421 * requested override
2423 new_qos_override
= calculate_requested_qos_override(thread
);
2425 prev_qos_override
= proc_get_thread_policy_locked(thread
,
2426 TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_OVERRIDE
, NULL
);
2428 if (new_qos_override
!= prev_qos_override
) {
2429 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
2430 TASK_POLICY_QOS_OVERRIDE
,
2431 new_qos_override
, 0, &pend_token
);
2434 new_effective_qos
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
);
2436 thread_mtx_unlock(thread
);
2438 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2441 zfree(thread_qos_override_zone
, override_new
);
2444 DTRACE_BOOST4(qos_add_override_post
, int, prev_qos_override
,
2445 int, new_qos_override
, int, new_effective_qos
, int, rc
);
2447 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_END
,
2448 new_qos_override
, resource
, resource_type
, 0, 0);
2454 proc_thread_qos_add_override(task_t task
,
2458 boolean_t first_override_for_resource
,
2459 user_addr_t resource
,
2462 boolean_t has_thread_reference
= FALSE
;
2465 if (thread
== THREAD_NULL
) {
2466 thread
= task_findtid(task
, tid
);
2467 /* returns referenced thread */
2469 if (thread
== THREAD_NULL
) {
2470 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_NONE
,
2471 tid
, 0, 0xdead, 0, 0);
2474 has_thread_reference
= TRUE
;
2476 assert(thread
->task
== task
);
2478 rc
= proc_thread_qos_add_override_internal(thread
, override_qos
,
2479 first_override_for_resource
, resource
, resource_type
);
2480 if (has_thread_reference
) {
2481 thread_deallocate(thread
);
2488 proc_thread_qos_remove_override_internal(thread_t thread
,
2489 user_addr_t resource
,
2493 struct task_pend_token pend_token
= {};
2495 struct thread_qos_override
*deferred_free_override_list
= NULL
;
2496 int new_qos_override
, prev_qos_override
, new_effective_qos
;
2498 thread_mtx_lock(thread
);
2500 canonicalize_resource_and_type(&resource
, &resource_type
);
2502 find_and_decrement_qos_override(thread
, resource
, resource_type
, reset
, &deferred_free_override_list
);
2504 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_START
,
2505 thread_tid(thread
), resource
, reset
, 0, 0);
2507 DTRACE_BOOST3(qos_remove_override_pre
, uint64_t, thread_tid(thread
),
2508 uint64_t, thread
->requested_policy
.thrp_qos
,
2509 uint64_t, thread
->effective_policy
.thep_qos
);
2511 /* Determine how to combine the various overrides into a single current requested override */
2512 new_qos_override
= calculate_requested_qos_override(thread
);
2514 spl_t s
= splsched();
2515 thread_lock(thread
);
2518 * The override chain and therefore the value of the current override is locked with thread mutex,
2519 * so we can do a get/set without races. However, the rest of thread policy is locked under the spinlock.
2520 * This means you can't change the current override from a spinlock-only setter.
2522 prev_qos_override
= thread_get_requested_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_OVERRIDE
, NULL
);
2524 if (new_qos_override
!= prev_qos_override
)
2525 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS_OVERRIDE
, new_qos_override
, 0, &pend_token
);
2527 new_effective_qos
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
);
2529 thread_unlock(thread
);
2532 thread_mtx_unlock(thread
);
2534 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2536 while (deferred_free_override_list
) {
2537 struct thread_qos_override
*override_next
= deferred_free_override_list
->override_next
;
2539 zfree(thread_qos_override_zone
, deferred_free_override_list
);
2540 deferred_free_override_list
= override_next
;
2543 DTRACE_BOOST3(qos_remove_override_post
, int, prev_qos_override
,
2544 int, new_qos_override
, int, new_effective_qos
);
2546 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_END
,
2547 thread_tid(thread
), 0, 0, 0, 0);
2551 proc_thread_qos_remove_override(task_t task
,
2554 user_addr_t resource
,
2557 boolean_t has_thread_reference
= FALSE
;
2559 if (thread
== THREAD_NULL
) {
2560 thread
= task_findtid(task
, tid
);
2561 /* returns referenced thread */
2563 if (thread
== THREAD_NULL
) {
2564 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_NONE
,
2565 tid
, 0, 0xdead, 0, 0);
2568 has_thread_reference
= TRUE
;
2570 assert(task
== thread
->task
);
2573 proc_thread_qos_remove_override_internal(thread
, resource
, resource_type
, FALSE
);
2575 if (has_thread_reference
)
2576 thread_deallocate(thread
);
2581 /* Deallocate before thread termination */
2582 void proc_thread_qos_deallocate(thread_t thread
)
2584 /* This thread must have no more IPC overrides. */
2585 assert(thread
->ipc_overrides
== 0);
2586 assert(thread
->requested_policy
.thrp_qos_ipc_override
== THREAD_QOS_UNSPECIFIED
);
2587 assert(thread
->sync_ipc_overrides
== 0);
2588 assert(thread
->requested_policy
.thrp_qos_sync_ipc_override
== THREAD_QOS_UNSPECIFIED
);
2591 * Clear out any lingering override objects.
2593 struct thread_qos_override
*override
;
2595 thread_mtx_lock(thread
);
2596 override
= thread
->overrides
;
2597 thread
->overrides
= NULL
;
2598 thread
->requested_policy
.thrp_qos_override
= THREAD_QOS_UNSPECIFIED
;
2599 /* We don't need to re-evaluate thread policy here because the thread has already exited */
2600 thread_mtx_unlock(thread
);
2603 struct thread_qos_override
*override_next
= override
->override_next
;
2605 zfree(thread_qos_override_zone
, override
);
2606 override
= override_next
;
2611 * Set up the primordial thread's QoS
2614 task_set_main_thread_qos(task_t task
, thread_t thread
) {
2615 struct task_pend_token pend_token
= {};
2617 assert(thread
->task
== task
);
2619 thread_mtx_lock(thread
);
2621 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2622 (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS
, 0)) | DBG_FUNC_START
,
2623 thread_tid(thread
), threquested_0(thread
), threquested_1(thread
),
2624 thread
->requested_policy
.thrp_qos
, 0);
2626 int primordial_qos
= task_compute_main_thread_qos(task
);
2628 proc_set_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS
,
2629 primordial_qos
, 0, &pend_token
);
2631 thread_mtx_unlock(thread
);
2633 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2635 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2636 (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS
, 0)) | DBG_FUNC_END
,
2637 thread_tid(thread
), threquested_0(thread
), threquested_1(thread
),
2642 * KPI for pthread kext
2644 * Return a good guess at what the initial manager QoS will be
2645 * Dispatch can override this in userspace if it so chooses
2648 task_get_default_manager_qos(task_t task
)
2650 int primordial_qos
= task_compute_main_thread_qos(task
);
2652 if (primordial_qos
== THREAD_QOS_LEGACY
)
2653 primordial_qos
= THREAD_QOS_USER_INITIATED
;
2655 return primordial_qos
;
2659 * Check if the user promotion on thread has changed
2662 * thread locked on entry, might drop the thread lock
2666 thread_recompute_user_promotion_locked(thread_t thread
)
2668 boolean_t needs_update
= FALSE
;
2669 struct task_pend_token pend_token
= {};
2670 int user_promotion_basepri
= MIN(thread_get_inheritor_turnstile_priority(thread
), MAXPRI_USER
);
2671 int old_base_pri
= thread
->base_pri
;
2672 thread_qos_t qos_promotion
;
2674 /* Check if user promotion has changed */
2675 if (thread
->user_promotion_basepri
== user_promotion_basepri
) {
2676 return needs_update
;
2678 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2679 (TURNSTILE_CODE(TURNSTILE_PRIORITY_OPERATIONS
, (THREAD_USER_PROMOTION_CHANGE
))) | DBG_FUNC_NONE
,
2681 user_promotion_basepri
,
2682 thread
->user_promotion_basepri
,
2686 /* Update the user promotion base pri */
2687 thread
->user_promotion_basepri
= user_promotion_basepri
;
2688 pend_token
.tpt_force_recompute_pri
= 1;
2690 if (user_promotion_basepri
<= MAXPRI_THROTTLE
) {
2691 qos_promotion
= THREAD_QOS_UNSPECIFIED
;
2693 qos_promotion
= thread_user_promotion_qos_for_pri(user_promotion_basepri
);
2696 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
2697 TASK_POLICY_QOS_PROMOTE
, qos_promotion
, 0, &pend_token
);
2699 if (thread_get_waiting_turnstile(thread
) &&
2700 thread
->base_pri
!= old_base_pri
) {
2701 needs_update
= TRUE
;
2704 thread_unlock(thread
);
2706 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2708 thread_lock(thread
);
2710 return needs_update
;
2714 * Convert the thread user promotion base pri to qos for threads in qos world.
2715 * For priority above UI qos, the qos would be set to UI.
2718 thread_user_promotion_qos_for_pri(int priority
)
2721 for (qos
= THREAD_QOS_USER_INTERACTIVE
; qos
> THREAD_QOS_MAINTENANCE
; qos
--) {
2722 if (thread_qos_policy_params
.qos_pri
[qos
] <= priority
) {
2726 return THREAD_QOS_MAINTENANCE
;
2730 * Set the thread's QoS IPC override
2731 * Owned by the IPC subsystem
2733 * May be called with spinlocks held, but not spinlocks
2734 * that may deadlock against the thread lock, the throttle lock, or the SFI lock.
2736 * One 'add' must be balanced by one 'drop'.
2737 * Between 'add' and 'drop', the overide QoS value may be updated with an 'update'.
2738 * Before the thread is deallocated, there must be 0 remaining overrides.
2741 thread_ipc_override(thread_t thread
,
2742 uint32_t qos_override
,
2743 boolean_t is_new_override
)
2745 struct task_pend_token pend_token
= {};
2746 boolean_t needs_update
;
2748 spl_t s
= splsched();
2749 thread_lock(thread
);
2751 uint32_t old_override
= thread
->requested_policy
.thrp_qos_ipc_override
;
2753 assert(qos_override
> THREAD_QOS_UNSPECIFIED
);
2754 assert(qos_override
< THREAD_QOS_LAST
);
2756 if (is_new_override
) {
2757 if (thread
->ipc_overrides
++ == 0) {
2758 /* This add is the first override for this thread */
2759 assert(old_override
== THREAD_QOS_UNSPECIFIED
);
2761 /* There are already other overrides in effect for this thread */
2762 assert(old_override
> THREAD_QOS_UNSPECIFIED
);
2765 /* There must be at least one override (the previous add call) in effect */
2766 assert(thread
->ipc_overrides
> 0);
2767 assert(old_override
> THREAD_QOS_UNSPECIFIED
);
2771 * We can't allow lowering if there are several IPC overrides because
2772 * the caller can't possibly know the whole truth
2774 if (thread
->ipc_overrides
== 1) {
2775 needs_update
= qos_override
!= old_override
;
2777 needs_update
= qos_override
> old_override
;
2781 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
2782 TASK_POLICY_QOS_IPC_OVERRIDE
,
2783 qos_override
, 0, &pend_token
);
2784 assert(pend_token
.tpt_update_sockets
== 0);
2787 thread_unlock(thread
);
2790 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2794 thread_add_ipc_override(thread_t thread
,
2795 uint32_t qos_override
)
2797 thread_ipc_override(thread
, qos_override
, TRUE
);
2801 thread_update_ipc_override(thread_t thread
,
2802 uint32_t qos_override
)
2804 thread_ipc_override(thread
, qos_override
, FALSE
);
2808 thread_drop_ipc_override(thread_t thread
)
2810 struct task_pend_token pend_token
= {};
2812 spl_t s
= splsched();
2813 thread_lock(thread
);
2815 assert(thread
->ipc_overrides
> 0);
2817 if (--thread
->ipc_overrides
== 0) {
2819 * There are no more overrides for this thread, so we should
2820 * clear out the saturated override value
2823 proc_set_thread_policy_spinlocked(thread
, TASK_POLICY_ATTRIBUTE
,
2824 TASK_POLICY_QOS_IPC_OVERRIDE
, THREAD_QOS_UNSPECIFIED
,
2828 thread_unlock(thread
);
2831 thread_policy_update_complete_unlocked(thread
, &pend_token
);
2834 /* Get current requested qos / relpri, may be called from spinlock context */
2836 thread_get_requested_qos(thread_t thread
, int *relpri
)
2838 int relprio_value
= 0;
2841 qos
= proc_get_thread_policy_locked(thread
, TASK_POLICY_ATTRIBUTE
,
2842 TASK_POLICY_QOS_AND_RELPRIO
, &relprio_value
);
2843 if (relpri
) *relpri
= -relprio_value
;
2848 * This function will promote the thread priority
2849 * since exec could block other threads calling
2850 * proc_find on the proc. This boost must be removed
2851 * via call to thread_clear_exec_promotion.
2853 * This should be replaced with a generic 'priority inheriting gate' mechanism (24194397)
2856 thread_set_exec_promotion(thread_t thread
)
2858 spl_t s
= splsched();
2859 thread_lock(thread
);
2861 sched_thread_promote_reason(thread
, TH_SFLAG_EXEC_PROMOTED
, 0);
2863 thread_unlock(thread
);
2868 * This function will clear the exec thread
2869 * promotion set on the thread by thread_set_exec_promotion.
2872 thread_clear_exec_promotion(thread_t thread
)
2874 spl_t s
= splsched();
2875 thread_lock(thread
);
2877 sched_thread_unpromote_reason(thread
, TH_SFLAG_EXEC_PROMOTED
, 0);
2879 thread_unlock(thread
);