2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task_server.h>
32 #include <kern/sched.h>
33 #include <kern/task.h>
34 #include <mach/thread_policy.h>
35 #include <sys/errno.h>
36 #include <sys/resource.h>
37 #include <machine/limits.h>
38 #include <kern/ledger.h>
39 #include <kern/thread_call.h>
41 #include <kern/coalition.h>
43 #include <kern/telemetry.h>
46 #if IMPORTANCE_INHERITANCE
47 #include <ipc/ipc_importance.h>
49 #include <mach/machine/sdt.h>
50 #endif /* IMPORTANCE_DEBUG */
51 #endif /* IMPORTANCE_INHERITACE */
53 #include <sys/kdebug.h>
58 * This subsystem manages task and thread IO priority and backgrounding,
59 * as well as importance inheritance, process suppression, task QoS, and apptype.
60 * These properties have a suprising number of complex interactions, so they are
61 * centralized here in one state machine to simplify the implementation of those interactions.
64 * Threads and tasks have three policy fields: requested, effective, and pending.
65 * Requested represents the wishes of each interface that influences task policy.
66 * Effective represents the distillation of that policy into a set of behaviors.
67 * Pending represents updates that haven't been applied yet.
69 * Each interface that has an input into the task policy state machine controls a field in requested.
70 * If the interface has a getter, it returns what is in the field in requested, but that is
71 * not necessarily what is actually in effect.
73 * All kernel subsystems that behave differently based on task policy call into
74 * the get_effective_policy function, which returns the decision of the task policy state machine
75 * for that subsystem by querying only the 'effective' field.
77 * Policy change operations:
78 * Here are the steps to change a policy on a task or thread:
80 * 2) Change requested field for the relevant policy
81 * 3) Run a task policy update, which recalculates effective based on requested,
82 * then takes a diff between the old and new versions of requested and calls the relevant
83 * other subsystems to apply these changes, and updates the pending field.
85 * 5) Run task policy update complete, which looks at the pending field to update
86 * subsystems which cannot be touched while holding the task lock.
88 * To add a new requested policy, add the field in the requested struct, the flavor in task.h,
89 * the setter and getter in proc_(set|get)_task_policy*, and dump the state in task_requested_bitfield,
90 * then set up the effects of that behavior in task_policy_update*. If the policy manifests
91 * itself as a distinct effective policy, add it to the effective struct and add it to the
92 * proc_get_effective_policy accessor.
94 * Most policies are set via proc_set_task_policy, but policies that don't fit that interface
95 * roll their own lock/set/update/unlock/complete code inside this file.
100 * These are a set of behaviors that can be requested for a task. They currently have specific
101 * implied actions when they're enabled, but they may be made customizable in the future.
103 * When the affected task is boosted, we temporarily disable the suppression behaviors
104 * so that the affected process has a chance to run so it can call the API to permanently
105 * disable the suppression behaviors.
109 * Changing task policy on a task or thread takes the task lock, and not the thread lock.
110 * TODO: Should changing policy on a thread take the thread lock instead?
112 * Querying the effective policy does not take the task lock, to prevent deadlocks or slowdown in sensitive code.
113 * This means that any notification of state change needs to be externally synchronized.
117 extern const qos_policy_params_t thread_qos_policy_params
;
119 /* for task holds without dropping the lock */
120 extern void task_hold_locked(task_t task
);
121 extern void task_release_locked(task_t task
);
122 extern void task_wait_locked(task_t task
, boolean_t until_not_runnable
);
124 extern void thread_recompute_qos(thread_t thread
);
126 /* Task policy related helper functions */
127 static void proc_set_task_policy_locked(task_t task
, thread_t thread
, int category
, int flavor
, int value
);
128 static void proc_set_task_policy2_locked(task_t task
, thread_t thread
, int category
, int flavor
, int value1
, int value2
);
130 static void task_policy_update_locked(task_t task
, thread_t thread
, task_pend_token_t pend_token
);
131 static void task_policy_update_internal_locked(task_t task
, thread_t thread
, boolean_t in_create
, task_pend_token_t pend_token
);
132 static void task_policy_update_task_locked(task_t task
, boolean_t update_throttle
, boolean_t update_bg_throttle
, boolean_t update_sfi
);
133 static void task_policy_update_thread_locked(thread_t thread
, int update_cpu
, boolean_t update_throttle
, boolean_t update_sfi
, boolean_t update_qos
);
136 static boolean_t
task_policy_update_coalition_focal_tasks(task_t task
, int prev_role
, int next_role
);
139 static int proc_get_effective_policy(task_t task
, thread_t thread
, int policy
);
141 static void proc_iopol_to_tier(int iopolicy
, int *tier
, int *passive
);
142 static int proc_tier_to_iopol(int tier
, int passive
);
144 static uintptr_t trequested_0(task_t task
, thread_t thread
);
145 static uintptr_t trequested_1(task_t task
, thread_t thread
);
146 static uintptr_t teffective_0(task_t task
, thread_t thread
);
147 static uintptr_t teffective_1(task_t task
, thread_t thread
);
148 static uint32_t tpending(task_pend_token_t pend_token
);
149 static uint64_t task_requested_bitfield(task_t task
, thread_t thread
);
150 static uint64_t task_effective_bitfield(task_t task
, thread_t thread
);
152 void proc_get_thread_policy(thread_t thread
, thread_policy_state_t info
);
154 /* CPU Limits related helper functions */
155 static int task_get_cpuusage(task_t task
, uint8_t *percentagep
, uint64_t *intervalp
, uint64_t *deadlinep
, int *scope
);
156 int task_set_cpuusage(task_t task
, uint8_t percentage
, uint64_t interval
, uint64_t deadline
, int scope
, int entitled
);
157 static int task_clear_cpuusage_locked(task_t task
, int cpumon_entitled
);
158 int task_disable_cpumon(task_t task
);
159 static int task_apply_resource_actions(task_t task
, int type
);
160 void task_action_cpuusage(thread_call_param_t param0
, thread_call_param_t param1
);
161 void proc_init_cpumon_params(void);
164 int proc_pid(void *proc
);
165 extern int proc_selfpid(void);
166 extern char * proc_name_address(void *p
);
167 extern void rethrottle_thread(void * uthread
);
168 extern void proc_apply_task_networkbg(void * bsd_info
, thread_t thread
);
169 #endif /* MACH_BSD */
171 extern zone_t thread_qos_override_zone
;
172 static boolean_t
_proc_thread_qos_remove_override_internal(task_t task
, thread_t thread
, uint64_t tid
, user_addr_t resource
, int resource_type
, boolean_t reset
);
175 /* Importance Inheritance related helper functions */
177 #if IMPORTANCE_INHERITANCE
179 static void task_add_importance_watchport(task_t task
, mach_port_t port
, int *boostp
);
180 static void task_importance_update_live_donor(task_t target_task
);
182 #endif /* IMPORTANCE_INHERITANCE */
185 #define __impdebug_only
187 #define __impdebug_only __unused
190 #if IMPORTANCE_INHERITANCE
193 #define __imp_only __unused
196 #define TASK_LOCKED 1
197 #define TASK_UNLOCKED 0
199 #define DO_LOWPRI_CPU 1
200 #define UNDO_LOWPRI_CPU 2
202 /* Macros for making tracing simpler */
204 #define tpriority(task, thread) ((uintptr_t)(thread == THREAD_NULL ? (task->priority) : (thread->base_pri)))
205 #define tisthread(thread) (thread == THREAD_NULL ? TASK_POLICY_TASK : TASK_POLICY_THREAD)
206 #define targetid(task, thread) ((uintptr_t)(thread == THREAD_NULL ? (task_pid(task)) : (thread->thread_id)))
209 * Default parameters for certain policies
212 int proc_standard_daemon_tier
= THROTTLE_LEVEL_TIER1
;
213 int proc_suppressed_disk_tier
= THROTTLE_LEVEL_TIER1
;
214 int proc_tal_disk_tier
= THROTTLE_LEVEL_TIER1
;
216 int proc_graphics_timer_qos
= (LATENCY_QOS_TIER_0
& 0xFF);
218 const int proc_default_bg_iotier
= THROTTLE_LEVEL_TIER2
;
220 /* Latency/throughput QoS fields remain zeroed, i.e. TIER_UNSPECIFIED at creation */
221 const struct task_requested_policy default_task_requested_policy
= {
222 .bg_iotier
= proc_default_bg_iotier
224 const struct task_effective_policy default_task_effective_policy
= {};
225 const struct task_pended_policy default_task_pended_policy
= {};
228 * Default parameters for CPU usage monitor.
230 * Default setting is 50% over 3 minutes.
232 #define DEFAULT_CPUMON_PERCENTAGE 50
233 #define DEFAULT_CPUMON_INTERVAL (3 * 60)
235 uint8_t proc_max_cpumon_percentage
;
236 uint64_t proc_max_cpumon_interval
;
239 qos_latency_policy_validate(task_latency_qos_t ltier
) {
240 if ((ltier
!= LATENCY_QOS_TIER_UNSPECIFIED
) &&
241 ((ltier
> LATENCY_QOS_TIER_5
) || (ltier
< LATENCY_QOS_TIER_0
)))
242 return KERN_INVALID_ARGUMENT
;
248 qos_throughput_policy_validate(task_throughput_qos_t ttier
) {
249 if ((ttier
!= THROUGHPUT_QOS_TIER_UNSPECIFIED
) &&
250 ((ttier
> THROUGHPUT_QOS_TIER_5
) || (ttier
< THROUGHPUT_QOS_TIER_0
)))
251 return KERN_INVALID_ARGUMENT
;
257 task_qos_policy_validate(task_qos_policy_t qosinfo
, mach_msg_type_number_t count
) {
258 if (count
< TASK_QOS_POLICY_COUNT
)
259 return KERN_INVALID_ARGUMENT
;
261 task_latency_qos_t ltier
= qosinfo
->task_latency_qos_tier
;
262 task_throughput_qos_t ttier
= qosinfo
->task_throughput_qos_tier
;
264 kern_return_t kr
= qos_latency_policy_validate(ltier
);
266 if (kr
!= KERN_SUCCESS
)
269 kr
= qos_throughput_policy_validate(ttier
);
275 qos_extract(uint32_t qv
) {
280 qos_latency_policy_package(uint32_t qv
) {
281 return (qv
== LATENCY_QOS_TIER_UNSPECIFIED
) ? LATENCY_QOS_TIER_UNSPECIFIED
: ((0xFF << 16) | qv
);
285 qos_throughput_policy_package(uint32_t qv
) {
286 return (qv
== THROUGHPUT_QOS_TIER_UNSPECIFIED
) ? THROUGHPUT_QOS_TIER_UNSPECIFIED
: ((0xFE << 16) | qv
);
289 /* TEMPORARY boot-arg controlling task_policy suppression (App Nap) */
290 static boolean_t task_policy_suppression_disable
= FALSE
;
295 task_policy_flavor_t flavor
,
296 task_policy_t policy_info
,
297 mach_msg_type_number_t count
)
299 kern_return_t result
= KERN_SUCCESS
;
301 if (task
== TASK_NULL
|| task
== kernel_task
)
302 return (KERN_INVALID_ARGUMENT
);
306 case TASK_CATEGORY_POLICY
: {
307 task_category_policy_t info
= (task_category_policy_t
)policy_info
;
309 if (count
< TASK_CATEGORY_POLICY_COUNT
)
310 return (KERN_INVALID_ARGUMENT
);
314 case TASK_FOREGROUND_APPLICATION
:
315 case TASK_BACKGROUND_APPLICATION
:
316 case TASK_DEFAULT_APPLICATION
:
317 proc_set_task_policy(task
, THREAD_NULL
,
318 TASK_POLICY_ATTRIBUTE
, TASK_POLICY_ROLE
,
322 case TASK_CONTROL_APPLICATION
:
323 if (task
!= current_task() || task
->sec_token
.val
[0] != 0)
324 result
= KERN_INVALID_ARGUMENT
;
326 proc_set_task_policy(task
, THREAD_NULL
,
327 TASK_POLICY_ATTRIBUTE
, TASK_POLICY_ROLE
,
331 case TASK_GRAPHICS_SERVER
:
332 /* TODO: Restrict this role to FCFS <rdar://problem/12552788> */
333 if (task
!= current_task() || task
->sec_token
.val
[0] != 0)
334 result
= KERN_INVALID_ARGUMENT
;
336 proc_set_task_policy(task
, THREAD_NULL
,
337 TASK_POLICY_ATTRIBUTE
, TASK_POLICY_ROLE
,
341 result
= KERN_INVALID_ARGUMENT
;
343 } /* switch (info->role) */
348 /* Desired energy-efficiency/performance "quality-of-service" */
349 case TASK_BASE_QOS_POLICY
:
350 case TASK_OVERRIDE_QOS_POLICY
:
352 task_qos_policy_t qosinfo
= (task_qos_policy_t
)policy_info
;
353 kern_return_t kr
= task_qos_policy_validate(qosinfo
, count
);
355 if (kr
!= KERN_SUCCESS
)
359 uint32_t lqos
= qos_extract(qosinfo
->task_latency_qos_tier
);
360 uint32_t tqos
= qos_extract(qosinfo
->task_throughput_qos_tier
);
362 proc_set_task_policy2(task
, THREAD_NULL
, TASK_POLICY_ATTRIBUTE
,
363 flavor
== TASK_BASE_QOS_POLICY
? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS
: TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS
,
368 case TASK_BASE_LATENCY_QOS_POLICY
:
370 task_qos_policy_t qosinfo
= (task_qos_policy_t
)policy_info
;
371 kern_return_t kr
= task_qos_policy_validate(qosinfo
, count
);
373 if (kr
!= KERN_SUCCESS
)
376 uint32_t lqos
= qos_extract(qosinfo
->task_latency_qos_tier
);
378 proc_set_task_policy(task
, NULL
, TASK_POLICY_ATTRIBUTE
, TASK_BASE_LATENCY_QOS_POLICY
, lqos
);
382 case TASK_BASE_THROUGHPUT_QOS_POLICY
:
384 task_qos_policy_t qosinfo
= (task_qos_policy_t
)policy_info
;
385 kern_return_t kr
= task_qos_policy_validate(qosinfo
, count
);
387 if (kr
!= KERN_SUCCESS
)
390 uint32_t tqos
= qos_extract(qosinfo
->task_throughput_qos_tier
);
392 proc_set_task_policy(task
, NULL
, TASK_POLICY_ATTRIBUTE
, TASK_BASE_THROUGHPUT_QOS_POLICY
, tqos
);
396 case TASK_SUPPRESSION_POLICY
:
399 task_suppression_policy_t info
= (task_suppression_policy_t
)policy_info
;
401 if (count
< TASK_SUPPRESSION_POLICY_COUNT
)
402 return (KERN_INVALID_ARGUMENT
);
404 struct task_qos_policy qosinfo
;
406 qosinfo
.task_latency_qos_tier
= info
->timer_throttle
;
407 qosinfo
.task_throughput_qos_tier
= info
->throughput_qos
;
409 kern_return_t kr
= task_qos_policy_validate(&qosinfo
, TASK_QOS_POLICY_COUNT
);
411 if (kr
!= KERN_SUCCESS
)
414 /* TEMPORARY disablement of task suppression */
415 if (task_policy_suppression_disable
&& info
->active
)
418 struct task_pend_token pend_token
= {};
422 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
423 (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION
, info
->active
)) | DBG_FUNC_START
,
424 proc_selfpid(), task_pid(task
), trequested_0(task
, THREAD_NULL
),
425 trequested_1(task
, THREAD_NULL
), 0);
427 task
->requested_policy
.t_sup_active
= (info
->active
) ? 1 : 0;
428 task
->requested_policy
.t_sup_lowpri_cpu
= (info
->lowpri_cpu
) ? 1 : 0;
429 task
->requested_policy
.t_sup_timer
= qos_extract(info
->timer_throttle
);
430 task
->requested_policy
.t_sup_disk
= (info
->disk_throttle
) ? 1 : 0;
431 task
->requested_policy
.t_sup_cpu_limit
= (info
->cpu_limit
) ? 1 : 0;
432 task
->requested_policy
.t_sup_suspend
= (info
->suspend
) ? 1 : 0;
433 task
->requested_policy
.t_sup_throughput
= qos_extract(info
->throughput_qos
);
434 task
->requested_policy
.t_sup_cpu
= (info
->suppressed_cpu
) ? 1 : 0;
435 task
->requested_policy
.t_sup_bg_sockets
= (info
->background_sockets
) ? 1 : 0;
437 task_policy_update_locked(task
, THREAD_NULL
, &pend_token
);
441 task_policy_update_complete_unlocked(task
, THREAD_NULL
, &pend_token
);
443 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
444 (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION
, info
->active
)) | DBG_FUNC_END
,
445 proc_selfpid(), task_pid(task
), trequested_0(task
, THREAD_NULL
),
446 trequested_1(task
, THREAD_NULL
), 0);
453 result
= KERN_INVALID_ARGUMENT
;
460 /* Sets BSD 'nice' value on the task */
464 integer_t importance
)
466 if (task
== TASK_NULL
|| task
== kernel_task
)
467 return (KERN_INVALID_ARGUMENT
);
474 return (KERN_TERMINATED
);
477 if (proc_get_effective_task_policy(task
, TASK_POLICY_ROLE
) >= TASK_CONTROL_APPLICATION
) {
480 return (KERN_INVALID_ARGUMENT
);
483 task
->importance
= importance
;
485 /* TODO: tracepoint? */
487 /* Redrive only the task priority calculation */
488 task_policy_update_task_locked(task
, FALSE
, FALSE
, FALSE
);
492 return (KERN_SUCCESS
);
498 task_policy_flavor_t flavor
,
499 task_policy_t policy_info
,
500 mach_msg_type_number_t
*count
,
501 boolean_t
*get_default
)
503 if (task
== TASK_NULL
|| task
== kernel_task
)
504 return (KERN_INVALID_ARGUMENT
);
508 case TASK_CATEGORY_POLICY
:
510 task_category_policy_t info
= (task_category_policy_t
)policy_info
;
512 if (*count
< TASK_CATEGORY_POLICY_COUNT
)
513 return (KERN_INVALID_ARGUMENT
);
516 info
->role
= TASK_UNSPECIFIED
;
518 info
->role
= proc_get_task_policy(task
, THREAD_NULL
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_ROLE
);
522 case TASK_BASE_QOS_POLICY
: /* FALLTHRU */
523 case TASK_OVERRIDE_QOS_POLICY
:
525 task_qos_policy_t info
= (task_qos_policy_t
)policy_info
;
527 if (*count
< TASK_QOS_POLICY_COUNT
)
528 return (KERN_INVALID_ARGUMENT
);
531 info
->task_latency_qos_tier
= LATENCY_QOS_TIER_UNSPECIFIED
;
532 info
->task_throughput_qos_tier
= THROUGHPUT_QOS_TIER_UNSPECIFIED
;
533 } else if (flavor
== TASK_BASE_QOS_POLICY
) {
536 proc_get_task_policy2(task
, THREAD_NULL
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS
, &value1
, &value2
);
538 info
->task_latency_qos_tier
= qos_latency_policy_package(value1
);
539 info
->task_throughput_qos_tier
= qos_throughput_policy_package(value2
);
541 } else if (flavor
== TASK_OVERRIDE_QOS_POLICY
) {
544 proc_get_task_policy2(task
, THREAD_NULL
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS
, &value1
, &value2
);
546 info
->task_latency_qos_tier
= qos_latency_policy_package(value1
);
547 info
->task_throughput_qos_tier
= qos_throughput_policy_package(value2
);
553 case TASK_POLICY_STATE
:
555 task_policy_state_t info
= (task_policy_state_t
)policy_info
;
557 if (*count
< TASK_POLICY_STATE_COUNT
)
558 return (KERN_INVALID_ARGUMENT
);
560 /* Only root can get this info */
561 if (current_task()->sec_token
.val
[0] != 0)
562 return KERN_PROTECTION_FAILURE
;
568 info
->imp_assertcnt
= 0;
569 info
->imp_externcnt
= 0;
571 info
->imp_transitions
= 0;
575 info
->requested
= task_requested_bitfield(task
, THREAD_NULL
);
576 info
->effective
= task_effective_bitfield(task
, THREAD_NULL
);
580 if (task
->task_imp_base
!= NULL
) {
581 info
->imp_assertcnt
= task
->task_imp_base
->iit_assertcnt
;
582 info
->imp_externcnt
= IIT_EXTERN(task
->task_imp_base
);
583 info
->flags
|= (task_is_marked_importance_receiver(task
) ? TASK_IMP_RECEIVER
: 0);
584 info
->flags
|= (task_is_marked_importance_denap_receiver(task
) ? TASK_DENAP_RECEIVER
: 0);
585 info
->flags
|= (task_is_marked_importance_donor(task
) ? TASK_IMP_DONOR
: 0);
586 info
->flags
|= (task_is_marked_live_importance_donor(task
) ? TASK_IMP_LIVE_DONOR
: 0);
587 info
->imp_transitions
= task
->task_imp_base
->iit_transitions
;
589 info
->imp_assertcnt
= 0;
590 info
->imp_externcnt
= 0;
591 info
->imp_transitions
= 0;
596 info
->reserved
[0] = 0;
597 info
->reserved
[1] = 0;
602 case TASK_SUPPRESSION_POLICY
:
604 task_suppression_policy_t info
= (task_suppression_policy_t
)policy_info
;
606 if (*count
< TASK_SUPPRESSION_POLICY_COUNT
)
607 return (KERN_INVALID_ARGUMENT
);
613 info
->lowpri_cpu
= 0;
614 info
->timer_throttle
= LATENCY_QOS_TIER_UNSPECIFIED
;
615 info
->disk_throttle
= 0;
618 info
->throughput_qos
= 0;
619 info
->suppressed_cpu
= 0;
621 info
->active
= task
->requested_policy
.t_sup_active
;
622 info
->lowpri_cpu
= task
->requested_policy
.t_sup_lowpri_cpu
;
623 info
->timer_throttle
= qos_latency_policy_package(task
->requested_policy
.t_sup_timer
);
624 info
->disk_throttle
= task
->requested_policy
.t_sup_disk
;
625 info
->cpu_limit
= task
->requested_policy
.t_sup_cpu_limit
;
626 info
->suspend
= task
->requested_policy
.t_sup_suspend
;
627 info
->throughput_qos
= qos_throughput_policy_package(task
->requested_policy
.t_sup_throughput
);
628 info
->suppressed_cpu
= task
->requested_policy
.t_sup_cpu
;
629 info
->background_sockets
= task
->requested_policy
.t_sup_bg_sockets
;
637 return (KERN_INVALID_ARGUMENT
);
640 return (KERN_SUCCESS
);
644 * Called at task creation
645 * We calculate the correct effective but don't apply it to anything yet.
646 * The threads, etc will inherit from the task as they get created.
649 task_policy_create(task_t task
, int parent_boosted
)
651 if (task
->requested_policy
.t_apptype
== TASK_APPTYPE_DAEMON_ADAPTIVE
) {
652 if (parent_boosted
) {
653 task
->requested_policy
.t_apptype
= TASK_APPTYPE_DAEMON_INTERACTIVE
;
654 task_importance_mark_donor(task
, TRUE
);
656 task
->requested_policy
.t_apptype
= TASK_APPTYPE_DAEMON_BACKGROUND
;
657 task_importance_mark_receiver(task
, FALSE
);
661 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
662 (IMPORTANCE_CODE(IMP_UPDATE
, (IMP_UPDATE_TASK_CREATE
| TASK_POLICY_TASK
))) | DBG_FUNC_START
,
663 task_pid(task
), teffective_0(task
, THREAD_NULL
),
664 teffective_1(task
, THREAD_NULL
), tpriority(task
, THREAD_NULL
), 0);
666 task_policy_update_internal_locked(task
, THREAD_NULL
, TRUE
, NULL
);
668 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
669 (IMPORTANCE_CODE(IMP_UPDATE
, (IMP_UPDATE_TASK_CREATE
| TASK_POLICY_TASK
))) | DBG_FUNC_END
,
670 task_pid(task
), teffective_0(task
, THREAD_NULL
),
671 teffective_1(task
, THREAD_NULL
), tpriority(task
, THREAD_NULL
), 0);
673 task_importance_update_live_donor(task
);
674 task_policy_update_task_locked(task
, FALSE
, FALSE
, FALSE
);
678 thread_policy_create(thread_t thread
)
680 task_t task
= thread
->task
;
682 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
683 (IMPORTANCE_CODE(IMP_UPDATE
, (IMP_UPDATE_TASK_CREATE
| TASK_POLICY_THREAD
))) | DBG_FUNC_START
,
684 targetid(task
, thread
), teffective_0(task
, thread
),
685 teffective_1(task
, thread
), tpriority(task
, thread
), 0);
687 task_policy_update_internal_locked(task
, thread
, TRUE
, NULL
);
689 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
690 (IMPORTANCE_CODE(IMP_UPDATE
, (IMP_UPDATE_TASK_CREATE
| TASK_POLICY_THREAD
))) | DBG_FUNC_END
,
691 targetid(task
, thread
), teffective_0(task
, thread
),
692 teffective_1(task
, thread
), tpriority(task
, thread
), 0);
696 task_policy_update_locked(task_t task
, thread_t thread
, task_pend_token_t pend_token
)
698 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
699 (IMPORTANCE_CODE(IMP_UPDATE
, tisthread(thread
)) | DBG_FUNC_START
),
700 targetid(task
, thread
), teffective_0(task
, thread
),
701 teffective_1(task
, thread
), tpriority(task
, thread
), 0);
703 task_policy_update_internal_locked(task
, thread
, FALSE
, pend_token
);
705 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
706 (IMPORTANCE_CODE(IMP_UPDATE
, tisthread(thread
))) | DBG_FUNC_END
,
707 targetid(task
, thread
), teffective_0(task
, thread
),
708 teffective_1(task
, thread
), tpriority(task
, thread
), 0);
712 * One state update function TO RULE THEM ALL
714 * This function updates the task or thread effective policy fields
715 * and pushes the results to the relevant subsystems.
717 * Must call update_complete after unlocking the task,
718 * as some subsystems cannot be updated while holding the task lock.
720 * Called with task locked, not thread
724 task_policy_update_internal_locked(task_t task
, thread_t thread
, boolean_t in_create
, task_pend_token_t pend_token
)
726 boolean_t on_task
= (thread
== THREAD_NULL
) ? TRUE
: FALSE
;
730 * Gather requested policy
733 struct task_requested_policy requested
=
734 (on_task
) ? task
->requested_policy
: thread
->requested_policy
;
739 * Calculate new effective policies from requested policy and task state
741 * If in an 'on_task' block, must only look at and set fields starting with t_
742 * If operating on a task, don't touch anything starting with th_
743 * If operating on a thread, don't touch anything starting with t_
744 * Don't change requested, it won't take effect
747 struct task_effective_policy next
= {};
748 struct task_effective_policy task_effective
;
750 /* Calculate QoS policies */
753 /* Update task role */
754 next
.t_role
= requested
.t_role
;
756 /* Set task qos clamp and ceiling */
757 next
.t_qos_clamp
= requested
.t_qos_clamp
;
759 if (requested
.t_apptype
== TASK_APPTYPE_APP_DEFAULT
||
760 requested
.t_apptype
== TASK_APPTYPE_APP_TAL
) {
762 switch (next
.t_role
) {
763 case TASK_FOREGROUND_APPLICATION
:
764 /* Foreground apps get urgent scheduler priority */
765 next
.qos_ui_is_urgent
= 1;
766 next
.t_qos_ceiling
= THREAD_QOS_UNSPECIFIED
;
769 case TASK_BACKGROUND_APPLICATION
:
770 /* This is really 'non-focal but on-screen' */
771 next
.t_qos_ceiling
= THREAD_QOS_UNSPECIFIED
;
774 case TASK_DEFAULT_APPLICATION
:
775 /* This is 'may render UI but we don't know if it's focal/nonfocal' */
776 next
.t_qos_ceiling
= THREAD_QOS_UNSPECIFIED
;
779 case TASK_NONUI_APPLICATION
:
780 /* i.e. 'off-screen' */
781 next
.t_qos_ceiling
= THREAD_QOS_LEGACY
;
784 case TASK_CONTROL_APPLICATION
:
785 case TASK_GRAPHICS_SERVER
:
786 next
.qos_ui_is_urgent
= 1;
787 next
.t_qos_ceiling
= THREAD_QOS_UNSPECIFIED
;
790 case TASK_THROTTLE_APPLICATION
:
791 /* i.e. 'TAL launch' */
792 next
.t_qos_ceiling
= THREAD_QOS_UTILITY
;
795 case TASK_UNSPECIFIED
:
797 /* Apps that don't have an application role get
798 * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */
799 next
.t_qos_ceiling
= THREAD_QOS_LEGACY
;
803 /* Daemons get USER_INTERACTIVE squashed to USER_INITIATED */
804 next
.t_qos_ceiling
= THREAD_QOS_USER_INITIATED
;
808 * Set thread qos tier
809 * Note that an override only overrides the QoS field, not other policy settings.
810 * A thread must already be participating in QoS for override to take effect
813 /* Snapshot the task's effective policy */
814 task_effective
= task
->effective_policy
;
816 next
.qos_ui_is_urgent
= task_effective
.qos_ui_is_urgent
;
818 if ((requested
.thrp_qos_override
!= THREAD_QOS_UNSPECIFIED
) && (requested
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
))
819 next
.thep_qos
= MAX(requested
.thrp_qos_override
, requested
.thrp_qos
);
821 next
.thep_qos
= requested
.thrp_qos
;
823 /* A task clamp will result in an effective QoS even when requested is UNSPECIFIED */
824 if (task_effective
.t_qos_clamp
!= THREAD_QOS_UNSPECIFIED
) {
825 if (next
.thep_qos
!= THREAD_QOS_UNSPECIFIED
)
826 next
.thep_qos
= MIN(task_effective
.t_qos_clamp
, next
.thep_qos
);
828 next
.thep_qos
= task_effective
.t_qos_clamp
;
831 /* The ceiling only applies to threads that are in the QoS world */
832 if (task_effective
.t_qos_ceiling
!= THREAD_QOS_UNSPECIFIED
&&
833 next
.thep_qos
!= THREAD_QOS_UNSPECIFIED
) {
834 next
.thep_qos
= MIN(task_effective
.t_qos_ceiling
, next
.thep_qos
);
838 * The QoS relative priority is only applicable when the original programmer's
839 * intended (requested) QoS is in effect. When the QoS is clamped (e.g.
840 * USER_INITIATED-13REL clamped to UTILITY), the relative priority is not honored,
841 * since otherwise it would be lower than unclamped threads. Similarly, in the
842 * presence of boosting, the programmer doesn't know what other actors
843 * are boosting the thread.
845 if ((requested
.thrp_qos
!= THREAD_QOS_UNSPECIFIED
) &&
846 (requested
.thrp_qos
== next
.thep_qos
) &&
847 (requested
.thrp_qos_override
== THREAD_QOS_UNSPECIFIED
)) {
848 next
.thep_qos_relprio
= requested
.thrp_qos_relprio
;
850 next
.thep_qos_relprio
= 0;
854 /* Calculate DARWIN_BG */
855 boolean_t wants_darwinbg
= FALSE
;
856 boolean_t wants_all_sockets_bg
= FALSE
; /* Do I want my existing sockets to be bg */
857 boolean_t wants_watchersbg
= FALSE
; /* Do I want my pidbound threads to be bg */
860 * If DARWIN_BG has been requested at either level, it's engaged.
861 * Only true DARWIN_BG changes cause watchers to transition.
863 * Backgrounding due to apptype does.
865 if (requested
.int_darwinbg
|| requested
.ext_darwinbg
)
866 wants_watchersbg
= wants_all_sockets_bg
= wants_darwinbg
= TRUE
;
869 /* Background TAL apps are throttled when TAL is enabled */
870 if (requested
.t_apptype
== TASK_APPTYPE_APP_TAL
&&
871 requested
.t_role
== TASK_BACKGROUND_APPLICATION
&&
872 requested
.t_tal_enabled
== 1) {
873 next
.t_tal_engaged
= 1;
876 if ((requested
.t_apptype
== TASK_APPTYPE_APP_DEFAULT
||
877 requested
.t_apptype
== TASK_APPTYPE_APP_TAL
) &&
878 requested
.t_role
== TASK_THROTTLE_APPLICATION
) {
879 next
.t_tal_engaged
= 1;
882 /* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */
883 if (requested
.t_apptype
== TASK_APPTYPE_DAEMON_ADAPTIVE
&&
884 requested
.t_boosted
== 0)
885 wants_darwinbg
= TRUE
;
887 /* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */
888 if (requested
.t_apptype
== TASK_APPTYPE_DAEMON_BACKGROUND
)
889 wants_darwinbg
= TRUE
;
891 if (next
.t_qos_clamp
== THREAD_QOS_BACKGROUND
|| next
.t_qos_clamp
== THREAD_QOS_MAINTENANCE
)
892 wants_darwinbg
= TRUE
;
894 if (requested
.th_pidbind_bg
)
895 wants_all_sockets_bg
= wants_darwinbg
= TRUE
;
897 if (requested
.th_workq_bg
)
898 wants_darwinbg
= TRUE
;
900 if (next
.thep_qos
== THREAD_QOS_BACKGROUND
|| next
.thep_qos
== THREAD_QOS_MAINTENANCE
)
901 wants_darwinbg
= TRUE
;
904 /* Calculate side effects of DARWIN_BG */
906 if (wants_darwinbg
) {
908 /* darwinbg threads/tasks always create bg sockets, but we don't always loop over all sockets */
909 next
.new_sockets_bg
= 1;
913 if (wants_all_sockets_bg
)
914 next
.all_sockets_bg
= 1;
916 if (on_task
&& wants_watchersbg
)
917 next
.t_watchers_bg
= 1;
919 /* darwinbg on either task or thread implies background QOS (or lower) */
921 (wants_darwinbg
|| task_effective
.darwinbg
) &&
922 (next
.thep_qos
> THREAD_QOS_BACKGROUND
|| next
.thep_qos
== THREAD_QOS_UNSPECIFIED
)){
923 next
.thep_qos
= THREAD_QOS_BACKGROUND
;
924 next
.thep_qos_relprio
= 0;
927 /* Calculate low CPU priority */
929 boolean_t wants_lowpri_cpu
= FALSE
;
932 wants_lowpri_cpu
= TRUE
;
934 if (next
.t_tal_engaged
)
935 wants_lowpri_cpu
= TRUE
;
937 if (on_task
&& requested
.t_sup_lowpri_cpu
&& requested
.t_boosted
== 0)
938 wants_lowpri_cpu
= TRUE
;
940 if (wants_lowpri_cpu
)
943 /* Calculate IO policy */
945 /* Update BG IO policy (so we can see if it has changed) */
946 next
.bg_iotier
= requested
.bg_iotier
;
948 int iopol
= THROTTLE_LEVEL_TIER0
;
951 iopol
= MAX(iopol
, requested
.bg_iotier
);
954 if (requested
.t_apptype
== TASK_APPTYPE_DAEMON_STANDARD
)
955 iopol
= MAX(iopol
, proc_standard_daemon_tier
);
957 if (requested
.t_sup_disk
&& requested
.t_boosted
== 0)
958 iopol
= MAX(iopol
, proc_suppressed_disk_tier
);
960 if (next
.t_tal_engaged
)
961 iopol
= MAX(iopol
, proc_tal_disk_tier
);
963 if (next
.t_qos_clamp
!= THREAD_QOS_UNSPECIFIED
)
964 iopol
= MAX(iopol
, thread_qos_policy_params
.qos_iotier
[next
.t_qos_clamp
]);
967 /* Look up the associated IO tier value for the QoS class */
968 iopol
= MAX(iopol
, thread_qos_policy_params
.qos_iotier
[next
.thep_qos
]);
971 iopol
= MAX(iopol
, requested
.int_iotier
);
972 iopol
= MAX(iopol
, requested
.ext_iotier
);
974 next
.io_tier
= iopol
;
976 /* Calculate Passive IO policy */
978 if (requested
.ext_iopassive
|| requested
.int_iopassive
)
981 /* Calculate miscellaneous policy */
984 /* Calculate suppression-active flag */
985 if (requested
.t_sup_active
&& requested
.t_boosted
== 0)
986 next
.t_sup_active
= 1;
988 /* Calculate suspend policy */
989 if (requested
.t_sup_suspend
&& requested
.t_boosted
== 0)
990 next
.t_suspended
= 1;
992 /* Calculate timer QOS */
993 int latency_qos
= requested
.t_base_latency_qos
;
995 if (requested
.t_sup_timer
&& requested
.t_boosted
== 0)
996 latency_qos
= requested
.t_sup_timer
;
998 if (next
.t_qos_clamp
!= THREAD_QOS_UNSPECIFIED
)
999 latency_qos
= MAX(latency_qos
, (int)thread_qos_policy_params
.qos_latency_qos
[next
.t_qos_clamp
]);
1001 if (requested
.t_over_latency_qos
!= 0)
1002 latency_qos
= requested
.t_over_latency_qos
;
1004 /* Treat the windowserver special */
1005 if (requested
.t_role
== TASK_GRAPHICS_SERVER
)
1006 latency_qos
= proc_graphics_timer_qos
;
1008 next
.t_latency_qos
= latency_qos
;
1010 /* Calculate throughput QOS */
1011 int through_qos
= requested
.t_base_through_qos
;
1013 if (requested
.t_sup_throughput
&& requested
.t_boosted
== 0)
1014 through_qos
= requested
.t_sup_throughput
;
1016 if (next
.t_qos_clamp
!= THREAD_QOS_UNSPECIFIED
)
1017 through_qos
= MAX(through_qos
, (int)thread_qos_policy_params
.qos_through_qos
[next
.t_qos_clamp
]);
1019 if (requested
.t_over_through_qos
!= 0)
1020 through_qos
= requested
.t_over_through_qos
;
1022 next
.t_through_qos
= through_qos
;
1024 /* Calculate suppressed CPU priority */
1025 if (requested
.t_sup_cpu
&& requested
.t_boosted
== 0)
1026 next
.t_suppressed_cpu
= 1;
1029 * Calculate background sockets
1030 * Don't take into account boosting to limit transition frequency.
1032 if (requested
.t_sup_bg_sockets
){
1033 next
.all_sockets_bg
= 1;
1034 next
.new_sockets_bg
= 1;
1037 /* Apply SFI Managed class bit */
1038 next
.t_sfi_managed
= requested
.t_sfi_managed
;
1040 /* Calculate 'live donor' status for live importance */
1041 switch (requested
.t_apptype
) {
1042 case TASK_APPTYPE_APP_TAL
:
1043 case TASK_APPTYPE_APP_DEFAULT
:
1044 if (requested
.ext_darwinbg
== 0)
1045 next
.t_live_donor
= 1;
1047 next
.t_live_donor
= 0;
1050 case TASK_APPTYPE_DAEMON_INTERACTIVE
:
1051 case TASK_APPTYPE_DAEMON_STANDARD
:
1052 case TASK_APPTYPE_DAEMON_ADAPTIVE
:
1053 case TASK_APPTYPE_DAEMON_BACKGROUND
:
1055 next
.t_live_donor
= 0;
1060 if (requested
.terminated
) {
1062 * Shoot down the throttles that slow down exit or response to SIGTERM
1063 * We don't need to shoot down:
1064 * passive (don't want to cause others to throttle)
1065 * all_sockets_bg (don't need to iterate FDs on every exit)
1066 * new_sockets_bg (doesn't matter for exiting process)
1067 * pidsuspend (jetsam-ed BG process shouldn't run again)
1068 * watchers_bg (watcher threads don't need to be unthrottled)
1069 * t_latency_qos (affects userspace timers only)
1072 next
.terminated
= 1;
1074 next
.lowpri_cpu
= 0;
1075 next
.io_tier
= THROTTLE_LEVEL_TIER0
;
1077 next
.t_tal_engaged
= 0;
1078 next
.t_role
= TASK_UNSPECIFIED
;
1079 next
.t_suppressed_cpu
= 0;
1081 /* TODO: This should only be shot down on SIGTERM, not exit */
1082 next
.t_suspended
= 0;
1084 next
.thep_qos
= THREAD_QOS_UNSPECIFIED
;
1090 * Swap out old policy for new policy
1094 /* Acquire thread mutex to synchronize against
1095 * thread_policy_set(). Consider reworking to separate qos
1096 * fields, or locking the task in thread_policy_set.
1097 * A more efficient model would be to make the thread bits
1100 thread_mtx_lock(thread
);
1103 struct task_effective_policy prev
=
1104 (on_task
) ? task
->effective_policy
: thread
->effective_policy
;
1107 * Check for invalid transitions here for easier debugging
1108 * TODO: dump the structs as hex in the panic string
1110 if (task
== kernel_task
&& prev
.all_sockets_bg
!= next
.all_sockets_bg
)
1111 panic("unexpected network change for kernel task");
1113 /* This is the point where the new values become visible to other threads */
1115 task
->effective_policy
= next
;
1117 /* Preserve thread specific latency/throughput QoS modified via
1118 * thread_policy_set(). Inelegant in the extreme, to be reworked.
1120 * If thread QoS class is set, we don't need to preserve the previously set values.
1121 * We should ensure to not accidentally preserve previous thread QoS values if you set a thread
1122 * back to default QoS.
1124 uint32_t lqos
= thread
->effective_policy
.t_latency_qos
, tqos
= thread
->effective_policy
.t_through_qos
;
1126 if (prev
.thep_qos
== THREAD_QOS_UNSPECIFIED
&& next
.thep_qos
== THREAD_QOS_UNSPECIFIED
) {
1127 next
.t_latency_qos
= lqos
;
1128 next
.t_through_qos
= tqos
;
1129 } else if (prev
.thep_qos
!= THREAD_QOS_UNSPECIFIED
&& next
.thep_qos
== THREAD_QOS_UNSPECIFIED
) {
1130 next
.t_latency_qos
= 0;
1131 next
.t_through_qos
= 0;
1133 next
.t_latency_qos
= thread_qos_policy_params
.qos_latency_qos
[next
.thep_qos
];
1134 next
.t_through_qos
= thread_qos_policy_params
.qos_through_qos
[next
.thep_qos
];
1137 thread_update_qos_cpu_time(thread
, TRUE
);
1138 thread
->effective_policy
= next
;
1139 thread_mtx_unlock(thread
);
1142 /* Don't do anything further to a half-formed task or thread */
1148 * Pend updates that can't be done while holding the task lock
1151 if (prev
.all_sockets_bg
!= next
.all_sockets_bg
)
1152 pend_token
->tpt_update_sockets
= 1;
1155 /* Only re-scan the timer list if the qos level is getting less strong */
1156 if (prev
.t_latency_qos
> next
.t_latency_qos
)
1157 pend_token
->tpt_update_timers
= 1;
1160 if (prev
.t_live_donor
!= next
.t_live_donor
)
1161 pend_token
->tpt_update_live_donor
= 1;
1166 * Update other subsystems as necessary if something has changed
1169 boolean_t update_throttle
= (prev
.io_tier
!= next
.io_tier
) ? TRUE
: FALSE
;
1172 if (prev
.t_suspended
== 0 && next
.t_suspended
== 1 && task
->active
) {
1173 task_hold_locked(task
);
1174 task_wait_locked(task
, FALSE
);
1176 if (prev
.t_suspended
== 1 && next
.t_suspended
== 0 && task
->active
) {
1177 task_release_locked(task
);
1180 boolean_t update_threads
= FALSE
;
1181 boolean_t update_sfi
= FALSE
;
1183 if (prev
.bg_iotier
!= next
.bg_iotier
||
1184 prev
.terminated
!= next
.terminated
||
1185 prev
.t_qos_clamp
!= next
.t_qos_clamp
||
1186 prev
.t_qos_ceiling
!= next
.t_qos_ceiling
||
1187 prev
.qos_ui_is_urgent
!= next
.qos_ui_is_urgent
||
1188 prev
.darwinbg
!= next
.darwinbg
)
1189 update_threads
= TRUE
;
1192 * A bit of a layering violation. We know what task policy attributes
1193 * sfi_thread_classify() consults, so if they change, trigger SFI
1196 if ((prev
.t_latency_qos
!= next
.t_latency_qos
) ||
1197 (prev
.t_role
!= next
.t_role
) ||
1198 (prev
.darwinbg
!= next
.darwinbg
) ||
1199 (prev
.t_sfi_managed
!= next
.t_sfi_managed
))
1202 #if CONFIG_SCHED_SFI
1203 if (prev
.t_role
!= next
.t_role
&& task_policy_update_coalition_focal_tasks(task
, prev
.t_role
, next
.t_role
)) {
1205 pend_token
->tpt_update_coal_sfi
= 1;
1207 #endif /* !CONFIG_SCHED_SFI */
1209 task_policy_update_task_locked(task
, update_throttle
, update_threads
, update_sfi
);
1212 boolean_t update_sfi
= FALSE
;
1213 boolean_t update_qos
= FALSE
;
1215 if (prev
.lowpri_cpu
!= next
.lowpri_cpu
)
1216 update_cpu
= (next
.lowpri_cpu
? DO_LOWPRI_CPU
: UNDO_LOWPRI_CPU
);
1218 if (prev
.darwinbg
!= next
.darwinbg
||
1219 prev
.thep_qos
!= next
.thep_qos
)
1222 if (prev
.thep_qos
!= next
.thep_qos
||
1223 prev
.thep_qos_relprio
!= next
.thep_qos_relprio
||
1224 prev
.qos_ui_is_urgent
!= next
.qos_ui_is_urgent
||
1225 prev
.terminated
!= next
.terminated
) {
1229 task_policy_update_thread_locked(thread
, update_cpu
, update_throttle
, update_sfi
, update_qos
);
1234 #if CONFIG_SCHED_SFI
1236 * Yet another layering violation. We reach out and bang on the coalition directly.
1239 task_policy_update_coalition_focal_tasks(task_t task
,
1243 boolean_t sfi_transition
= FALSE
;
1245 /* task moving into/out-of the foreground */
1246 if (prev_role
!= TASK_FOREGROUND_APPLICATION
&& next_role
== TASK_FOREGROUND_APPLICATION
) {
1247 if (task_coalition_adjust_focal_count(task
, 1) == 1)
1248 sfi_transition
= TRUE
;
1249 } else if (prev_role
== TASK_FOREGROUND_APPLICATION
&& next_role
!= TASK_FOREGROUND_APPLICATION
) {
1250 if (task_coalition_adjust_focal_count(task
, -1) == 0)
1251 sfi_transition
= TRUE
;
1254 /* task moving into/out-of background */
1255 if (prev_role
!= TASK_BACKGROUND_APPLICATION
&& next_role
== TASK_BACKGROUND_APPLICATION
) {
1256 if (task_coalition_adjust_nonfocal_count(task
, 1) == 1)
1257 sfi_transition
= TRUE
;
1258 } else if (prev_role
== TASK_BACKGROUND_APPLICATION
&& next_role
!= TASK_BACKGROUND_APPLICATION
) {
1259 if (task_coalition_adjust_nonfocal_count(task
, -1) == 0)
1260 sfi_transition
= TRUE
;
1263 return sfi_transition
;
1265 #endif /* CONFIG_SCHED_SFI */
1267 /* Despite the name, the thread's task is locked, the thread is not */
1269 task_policy_update_thread_locked(thread_t thread
,
1271 boolean_t update_throttle
,
1272 boolean_t update_sfi
,
1273 boolean_t update_qos
)
1275 thread_precedence_policy_data_t policy
;
1277 if (update_throttle
) {
1278 rethrottle_thread(thread
->uthread
);
1282 sfi_reevaluate(thread
);
1286 * TODO: pidbind needs to stuff remembered importance into saved_importance
1287 * properly deal with bg'ed threads being pidbound and unbging while pidbound
1289 * TODO: A BG thread's priority is 0 on desktop and 4 on embedded. Need to reconcile this.
1291 if (update_cpu
== DO_LOWPRI_CPU
) {
1292 thread
->saved_importance
= thread
->importance
;
1293 policy
.importance
= INT_MIN
;
1294 } else if (update_cpu
== UNDO_LOWPRI_CPU
) {
1295 policy
.importance
= thread
->saved_importance
;
1296 thread
->saved_importance
= 0;
1299 /* Takes thread lock and thread mtx lock */
1301 thread_policy_set_internal(thread
, THREAD_PRECEDENCE_POLICY
,
1302 (thread_policy_t
)&policy
,
1303 THREAD_PRECEDENCE_POLICY_COUNT
);
1306 thread_recompute_qos(thread
);
1310 * Calculate priority on a task, loop through its threads, and tell them about
1311 * priority changes and throttle changes.
1314 task_policy_update_task_locked(task_t task
,
1315 boolean_t update_throttle
,
1316 boolean_t update_threads
,
1317 boolean_t update_sfi
)
1319 boolean_t update_priority
= FALSE
;
1321 if (task
== kernel_task
)
1322 panic("Attempting to set task policy on kernel_task");
1324 int priority
= BASEPRI_DEFAULT
;
1325 int max_priority
= MAXPRI_USER
;
1327 if (proc_get_effective_task_policy(task
, TASK_POLICY_LOWPRI_CPU
)) {
1328 priority
= MAXPRI_THROTTLE
;
1329 max_priority
= MAXPRI_THROTTLE
;
1330 } else if (proc_get_effective_task_policy(task
, TASK_POLICY_SUPPRESSED_CPU
)) {
1331 priority
= MAXPRI_SUPPRESSED
;
1332 max_priority
= MAXPRI_SUPPRESSED
;
1334 switch (proc_get_effective_task_policy(task
, TASK_POLICY_ROLE
)) {
1335 case TASK_CONTROL_APPLICATION
:
1336 priority
= BASEPRI_CONTROL
;
1338 case TASK_GRAPHICS_SERVER
:
1339 priority
= BASEPRI_GRAPHICS
;
1340 max_priority
= MAXPRI_RESERVED
;
1346 /* factor in 'nice' value */
1347 priority
+= task
->importance
;
1349 if (task
->effective_policy
.t_qos_clamp
!= THREAD_QOS_UNSPECIFIED
) {
1350 int qos_clamp_priority
= thread_qos_policy_params
.qos_pri
[task
->effective_policy
.t_qos_clamp
];
1352 priority
= MIN(priority
, qos_clamp_priority
);
1353 max_priority
= MIN(max_priority
, qos_clamp_priority
);
1357 /* avoid extra work if priority isn't changing */
1358 if (task
->priority
!= priority
|| task
->max_priority
!= max_priority
) {
1359 update_priority
= TRUE
;
1361 /* update the scheduling priority for the task */
1362 task
->max_priority
= max_priority
;
1364 if (priority
> task
->max_priority
)
1365 priority
= task
->max_priority
;
1366 else if (priority
< MINPRI
)
1369 task
->priority
= priority
;
1372 /* Loop over the threads in the task only once, and only if necessary */
1373 if (update_threads
|| update_throttle
|| update_priority
|| update_sfi
) {
1376 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1377 if (update_priority
) {
1378 thread_mtx_lock(thread
);
1380 thread_task_priority(thread
, priority
, max_priority
);
1382 thread_mtx_unlock(thread
);
1385 if (update_throttle
) {
1386 rethrottle_thread(thread
->uthread
);
1390 sfi_reevaluate(thread
);
1393 if (update_threads
) {
1394 thread
->requested_policy
.bg_iotier
= task
->effective_policy
.bg_iotier
;
1395 thread
->requested_policy
.terminated
= task
->effective_policy
.terminated
;
1397 task_policy_update_internal_locked(task
, thread
, FALSE
, NULL
);
1398 /* The thread policy must not emit any completion actions due to this change. */
1404 #if CONFIG_SCHED_SFI
1405 /* coalition object is locked */
1407 task_sfi_reevaluate_cb(coalition_t coal
, void *ctx
, task_t task
)
1411 /* unused for now */
1414 /* skip the task we're re-evaluating on behalf of: it's already updated */
1415 if (task
== (task_t
)ctx
)
1420 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1421 sfi_reevaluate(thread
);
1426 #endif /* CONFIG_SCHED_SFI */
1429 * Called with task unlocked to do things that can't be done while holding the task lock
1432 task_policy_update_complete_unlocked(task_t task
, thread_t thread
, task_pend_token_t pend_token
)
1434 boolean_t on_task
= (thread
== THREAD_NULL
) ? TRUE
: FALSE
;
1437 if (pend_token
->tpt_update_sockets
)
1438 proc_apply_task_networkbg(task
->bsd_info
, thread
);
1439 #endif /* MACH_BSD */
1442 /* The timer throttle has been removed or reduced, we need to look for expired timers and fire them */
1443 if (pend_token
->tpt_update_timers
)
1444 ml_timer_evaluate();
1447 if (pend_token
->tpt_update_live_donor
)
1448 task_importance_update_live_donor(task
);
1450 #if CONFIG_SCHED_SFI
1451 /* use the resource coalition for SFI re-evaluation */
1452 if (pend_token
->tpt_update_coal_sfi
)
1453 coalition_for_each_task(task
->coalition
[COALITION_TYPE_RESOURCE
],
1454 (void *)task
, task_sfi_reevaluate_cb
);
1455 #endif /* CONFIG_SCHED_SFI */
1460 * Initiate a task policy state transition
1462 * Everything that modifies requested except functions that need to hold the task lock
1463 * should use this function
1465 * Argument validation should be performed before reaching this point.
1467 * TODO: Do we need to check task->active or thread->active?
1470 proc_set_task_policy(task_t task
,
1476 struct task_pend_token pend_token
= {};
1480 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1481 (IMPORTANCE_CODE(flavor
, (category
| tisthread(thread
)))) | DBG_FUNC_START
,
1482 targetid(task
, thread
), trequested_0(task
, thread
), trequested_1(task
, thread
), value
, 0);
1484 proc_set_task_policy_locked(task
, thread
, category
, flavor
, value
);
1486 task_policy_update_locked(task
, thread
, &pend_token
);
1490 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1491 (IMPORTANCE_CODE(flavor
, (category
| tisthread(thread
)))) | DBG_FUNC_END
,
1492 targetid(task
, thread
), trequested_0(task
, thread
), trequested_1(task
, thread
), tpending(&pend_token
), 0);
1494 task_policy_update_complete_unlocked(task
, thread
, &pend_token
);
1498 * Initiate a task policy state transition on a thread with its TID
1499 * Useful if you cannot guarantee the thread won't get terminated
1502 proc_set_task_policy_thread(task_t task
,
1509 thread_t self
= current_thread();
1510 struct task_pend_token pend_token
= {};
1514 if (tid
== TID_NULL
|| tid
== self
->thread_id
)
1517 thread
= task_findtid(task
, tid
);
1519 if (thread
== THREAD_NULL
) {
1524 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1525 (IMPORTANCE_CODE(flavor
, (category
| TASK_POLICY_THREAD
))) | DBG_FUNC_START
,
1526 targetid(task
, thread
), trequested_0(task
, thread
), trequested_1(task
, thread
), value
, 0);
1528 proc_set_task_policy_locked(task
, thread
, category
, flavor
, value
);
1530 task_policy_update_locked(task
, thread
, &pend_token
);
1534 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1535 (IMPORTANCE_CODE(flavor
, (category
| TASK_POLICY_THREAD
))) | DBG_FUNC_END
,
1536 targetid(task
, thread
), trequested_0(task
, thread
), trequested_1(task
, thread
), tpending(&pend_token
), 0);
1538 task_policy_update_complete_unlocked(task
, thread
, &pend_token
);
1542 * Variant of proc_set_task_policy() that sets two scalars in the requested policy structure.
1543 * Same locking rules apply.
1546 proc_set_task_policy2(task_t task
, thread_t thread
, int category
, int flavor
, int value1
, int value2
)
1548 struct task_pend_token pend_token
= {};
1552 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1553 (IMPORTANCE_CODE(flavor
, (category
| tisthread(thread
)))) | DBG_FUNC_START
,
1554 targetid(task
, thread
), trequested_0(task
, thread
), trequested_1(task
, thread
), value1
, 0);
1556 proc_set_task_policy2_locked(task
, thread
, category
, flavor
, value1
, value2
);
1558 task_policy_update_locked(task
, thread
, &pend_token
);
1562 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1563 (IMPORTANCE_CODE(flavor
, (category
| tisthread(thread
)))) | DBG_FUNC_END
,
1564 targetid(task
, thread
), trequested_0(task
, thread
), trequested_0(task
, thread
), tpending(&pend_token
), 0);
1566 task_policy_update_complete_unlocked(task
, thread
, &pend_token
);
1570 * Set the requested state for a specific flavor to a specific value.
1573 * Verify that arguments to non iopol things are 1 or 0
1576 proc_set_task_policy_locked(task_t task
,
1582 boolean_t on_task
= (thread
== THREAD_NULL
) ? TRUE
: FALSE
;
1586 struct task_requested_policy requested
=
1587 (on_task
) ? task
->requested_policy
: thread
->requested_policy
;
1591 /* Category: EXTERNAL and INTERNAL, thread and task */
1593 case TASK_POLICY_DARWIN_BG
:
1594 if (category
== TASK_POLICY_EXTERNAL
)
1595 requested
.ext_darwinbg
= value
;
1597 requested
.int_darwinbg
= value
;
1600 case TASK_POLICY_IOPOL
:
1601 proc_iopol_to_tier(value
, &tier
, &passive
);
1602 if (category
== TASK_POLICY_EXTERNAL
) {
1603 requested
.ext_iotier
= tier
;
1604 requested
.ext_iopassive
= passive
;
1606 requested
.int_iotier
= tier
;
1607 requested
.int_iopassive
= passive
;
1611 case TASK_POLICY_IO
:
1612 if (category
== TASK_POLICY_EXTERNAL
)
1613 requested
.ext_iotier
= value
;
1615 requested
.int_iotier
= value
;
1618 case TASK_POLICY_PASSIVE_IO
:
1619 if (category
== TASK_POLICY_EXTERNAL
)
1620 requested
.ext_iopassive
= value
;
1622 requested
.int_iopassive
= value
;
1625 /* Category: INTERNAL, task only */
1627 case TASK_POLICY_DARWIN_BG_IOPOL
:
1628 assert(on_task
&& category
== TASK_POLICY_INTERNAL
);
1629 proc_iopol_to_tier(value
, &tier
, &passive
);
1630 requested
.bg_iotier
= tier
;
1633 /* Category: ATTRIBUTE, task only */
1635 case TASK_POLICY_TAL
:
1636 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1637 requested
.t_tal_enabled
= value
;
1640 case TASK_POLICY_BOOST
:
1641 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1642 requested
.t_boosted
= value
;
1645 case TASK_POLICY_ROLE
:
1646 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1647 requested
.t_role
= value
;
1650 case TASK_POLICY_TERMINATED
:
1651 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1652 requested
.terminated
= value
;
1654 case TASK_BASE_LATENCY_QOS_POLICY
:
1655 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1656 requested
.t_base_latency_qos
= value
;
1658 case TASK_BASE_THROUGHPUT_QOS_POLICY
:
1659 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1660 requested
.t_base_through_qos
= value
;
1662 case TASK_POLICY_SFI_MANAGED
:
1663 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1664 requested
.t_sfi_managed
= value
;
1667 /* Category: ATTRIBUTE, thread only */
1669 case TASK_POLICY_PIDBIND_BG
:
1670 assert(!on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1671 requested
.th_pidbind_bg
= value
;
1674 case TASK_POLICY_WORKQ_BG
:
1675 assert(!on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1676 requested
.th_workq_bg
= value
;
1679 case TASK_POLICY_QOS
:
1680 assert(!on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1681 requested
.thrp_qos
= value
;
1684 case TASK_POLICY_QOS_OVERRIDE
:
1685 assert(!on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1686 requested
.thrp_qos_override
= value
;
1690 panic("unknown task policy: %d %d %d", category
, flavor
, value
);
1695 task
->requested_policy
= requested
;
1697 thread
->requested_policy
= requested
;
1701 * Variant of proc_set_task_policy_locked() that sets two scalars in the requested policy structure.
1704 proc_set_task_policy2_locked(task_t task
,
1711 boolean_t on_task
= (thread
== THREAD_NULL
) ? TRUE
: FALSE
;
1713 struct task_requested_policy requested
=
1714 (on_task
) ? task
->requested_policy
: thread
->requested_policy
;
1718 /* Category: ATTRIBUTE, task only */
1720 case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS
:
1721 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1722 requested
.t_base_latency_qos
= value1
;
1723 requested
.t_base_through_qos
= value2
;
1726 case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS
:
1727 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1728 requested
.t_over_latency_qos
= value1
;
1729 requested
.t_over_through_qos
= value2
;
1732 /* Category: ATTRIBUTE, thread only */
1734 case TASK_POLICY_QOS_AND_RELPRIO
:
1736 assert(!on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1737 requested
.thrp_qos
= value1
;
1738 requested
.thrp_qos_relprio
= value2
;
1739 DTRACE_BOOST3(qos_set
, uint64_t, thread
->thread_id
, int, requested
.thrp_qos
, int, requested
.thrp_qos_relprio
);
1743 panic("unknown task policy: %d %d %d %d", category
, flavor
, value1
, value2
);
1748 task
->requested_policy
= requested
;
1750 thread
->requested_policy
= requested
;
1755 * Gets what you set. Effective values may be different.
1758 proc_get_task_policy(task_t task
,
1763 boolean_t on_task
= (thread
== THREAD_NULL
) ? TRUE
: FALSE
;
1769 struct task_requested_policy requested
=
1770 (on_task
) ? task
->requested_policy
: thread
->requested_policy
;
1773 case TASK_POLICY_DARWIN_BG
:
1774 if (category
== TASK_POLICY_EXTERNAL
)
1775 value
= requested
.ext_darwinbg
;
1777 value
= requested
.int_darwinbg
;
1779 case TASK_POLICY_IOPOL
:
1780 if (category
== TASK_POLICY_EXTERNAL
)
1781 value
= proc_tier_to_iopol(requested
.ext_iotier
,
1782 requested
.ext_iopassive
);
1784 value
= proc_tier_to_iopol(requested
.int_iotier
,
1785 requested
.int_iopassive
);
1787 case TASK_POLICY_IO
:
1788 if (category
== TASK_POLICY_EXTERNAL
)
1789 value
= requested
.ext_iotier
;
1791 value
= requested
.int_iotier
;
1793 case TASK_POLICY_PASSIVE_IO
:
1794 if (category
== TASK_POLICY_EXTERNAL
)
1795 value
= requested
.ext_iopassive
;
1797 value
= requested
.int_iopassive
;
1799 case TASK_POLICY_DARWIN_BG_IOPOL
:
1800 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1801 value
= proc_tier_to_iopol(requested
.bg_iotier
, 0);
1803 case TASK_POLICY_ROLE
:
1804 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1805 value
= requested
.t_role
;
1807 case TASK_POLICY_SFI_MANAGED
:
1808 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1809 value
= requested
.t_sfi_managed
;
1811 case TASK_POLICY_QOS
:
1812 assert(!on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1813 value
= requested
.thrp_qos
;
1815 case TASK_POLICY_QOS_OVERRIDE
:
1816 assert(!on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1817 value
= requested
.thrp_qos_override
;
1820 panic("unknown policy_flavor %d", flavor
);
1830 * Variant of proc_get_task_policy() that returns two scalar outputs.
1833 proc_get_task_policy2(task_t task
, thread_t thread
, int category __unused
, int flavor
, int *value1
, int *value2
)
1835 boolean_t on_task
= (thread
== THREAD_NULL
) ? TRUE
: FALSE
;
1839 struct task_requested_policy requested
=
1840 (on_task
) ? task
->requested_policy
: thread
->requested_policy
;
1843 /* TASK attributes */
1844 case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS
:
1845 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1846 *value1
= requested
.t_base_latency_qos
;
1847 *value2
= requested
.t_base_through_qos
;
1850 case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS
:
1851 assert(on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1852 *value1
= requested
.t_over_latency_qos
;
1853 *value2
= requested
.t_over_through_qos
;
1856 /* THREAD attributes */
1857 case TASK_POLICY_QOS_AND_RELPRIO
:
1858 assert(!on_task
&& category
== TASK_POLICY_ATTRIBUTE
);
1859 *value1
= requested
.thrp_qos
;
1860 *value2
= requested
.thrp_qos_relprio
;
1864 panic("unknown policy_flavor %d", flavor
);
1873 * Functions for querying effective state for relevant subsystems
1874 * ONLY the relevant subsystem should query these.
1875 * NEVER take a value from one of the 'effective' functions and stuff it into a setter.
1879 proc_get_effective_task_policy(task_t task
, int flavor
)
1881 return proc_get_effective_policy(task
, THREAD_NULL
, flavor
);
1885 proc_get_effective_thread_policy(thread_t thread
, int flavor
)
1887 return proc_get_effective_policy(thread
->task
, thread
, flavor
);
1891 * Gets what is actually in effect, for subsystems which pull policy instead of receive updates.
1893 * NOTE: This accessor does not take the task lock.
1894 * Notifications of state updates need to be externally synchronized with state queries.
1895 * This routine *MUST* remain interrupt safe, as it is potentially invoked
1896 * within the context of a timer interrupt. It is also called in KDP context for stackshot.
1899 proc_get_effective_policy(task_t task
,
1903 boolean_t on_task
= (thread
== THREAD_NULL
) ? TRUE
: FALSE
;
1907 case TASK_POLICY_DARWIN_BG
:
1909 * This backs the KPI call proc_pidbackgrounded to find
1910 * out if a pid is backgrounded,
1911 * as well as proc_get_effective_thread_policy.
1912 * Its main use is within the timer layer, as well as
1913 * prioritizing requests to the graphics system.
1914 * Returns 1 for background mode, 0 for normal mode
1917 value
= task
->effective_policy
.darwinbg
;
1919 value
= (task
->effective_policy
.darwinbg
||
1920 thread
->effective_policy
.darwinbg
) ? 1 : 0;
1922 case TASK_POLICY_IO
:
1924 * The I/O system calls here to find out what throttling tier to apply to an operation.
1925 * Returns THROTTLE_LEVEL_* values. Some userspace spinlock operations can apply
1926 * a temporary iotier override to make the I/O more aggressive to get the lock
1927 * owner to release the spinlock.
1930 value
= task
->effective_policy
.io_tier
;
1932 value
= MAX(task
->effective_policy
.io_tier
,
1933 thread
->effective_policy
.io_tier
);
1934 if (thread
->iotier_override
!= THROTTLE_LEVEL_NONE
)
1935 value
= MIN(value
, thread
->iotier_override
);
1938 case TASK_POLICY_PASSIVE_IO
:
1940 * The I/O system calls here to find out whether an operation should be passive.
1941 * (i.e. not cause operations with lower throttle tiers to be throttled)
1942 * Returns 1 for passive mode, 0 for normal mode.
1943 * If a userspace spinlock has applied an override, that I/O should always
1944 * be passive to avoid self-throttling when the override is removed and lower
1945 * iotier I/Os are issued.
1948 value
= task
->effective_policy
.io_passive
;
1950 int io_tier
= MAX(task
->effective_policy
.io_tier
, thread
->effective_policy
.io_tier
);
1951 boolean_t override_in_effect
= (thread
->iotier_override
!= THROTTLE_LEVEL_NONE
) && (thread
->iotier_override
< io_tier
);
1953 value
= (task
->effective_policy
.io_passive
||
1954 thread
->effective_policy
.io_passive
|| override_in_effect
) ? 1 : 0;
1957 case TASK_POLICY_ALL_SOCKETS_BG
:
1959 * do_background_socket() calls this to determine what it should do to the proc's sockets
1960 * Returns 1 for background mode, 0 for normal mode
1962 * This consults both thread and task so un-DBGing a thread while the task is BG
1963 * doesn't get you out of the network throttle.
1966 value
= task
->effective_policy
.all_sockets_bg
;
1968 value
= (task
->effective_policy
.all_sockets_bg
||
1969 thread
->effective_policy
.all_sockets_bg
) ? 1 : 0;
1971 case TASK_POLICY_NEW_SOCKETS_BG
:
1973 * socreate() calls this to determine if it should mark a new socket as background
1974 * Returns 1 for background mode, 0 for normal mode
1977 value
= task
->effective_policy
.new_sockets_bg
;
1979 value
= (task
->effective_policy
.new_sockets_bg
||
1980 thread
->effective_policy
.new_sockets_bg
) ? 1 : 0;
1982 case TASK_POLICY_LOWPRI_CPU
:
1984 * Returns 1 for low priority cpu mode, 0 for normal mode
1987 value
= task
->effective_policy
.lowpri_cpu
;
1989 value
= (task
->effective_policy
.lowpri_cpu
||
1990 thread
->effective_policy
.lowpri_cpu
) ? 1 : 0;
1992 case TASK_POLICY_SUPPRESSED_CPU
:
1994 * Returns 1 for suppressed cpu mode, 0 for normal mode
1997 value
= task
->effective_policy
.t_suppressed_cpu
;
1999 case TASK_POLICY_LATENCY_QOS
:
2001 * timer arming calls into here to find out the timer coalescing level
2002 * Returns a QoS tier (0-6)
2005 value
= task
->effective_policy
.t_latency_qos
;
2007 value
= MAX(task
->effective_policy
.t_latency_qos
, thread
->effective_policy
.t_latency_qos
);
2010 case TASK_POLICY_THROUGH_QOS
:
2012 * Returns a QoS tier (0-6)
2015 value
= task
->effective_policy
.t_through_qos
;
2017 case TASK_POLICY_ROLE
:
2019 value
= task
->effective_policy
.t_role
;
2021 case TASK_POLICY_WATCHERS_BG
:
2023 value
= task
->effective_policy
.t_watchers_bg
;
2025 case TASK_POLICY_SFI_MANAGED
:
2027 value
= task
->effective_policy
.t_sfi_managed
;
2029 case TASK_POLICY_QOS
:
2031 value
= thread
->effective_policy
.thep_qos
;
2034 panic("unknown policy_flavor %d", flavor
);
2042 * Convert from IOPOL_* values to throttle tiers.
2044 * TODO: Can this be made more compact, like an array lookup
2045 * Note that it is possible to support e.g. IOPOL_PASSIVE_STANDARD in the future
2049 proc_iopol_to_tier(int iopolicy
, int *tier
, int *passive
)
2054 case IOPOL_IMPORTANT
:
2055 *tier
= THROTTLE_LEVEL_TIER0
;
2058 *tier
= THROTTLE_LEVEL_TIER0
;
2061 case IOPOL_STANDARD
:
2062 *tier
= THROTTLE_LEVEL_TIER1
;
2065 *tier
= THROTTLE_LEVEL_TIER2
;
2067 case IOPOL_THROTTLE
:
2068 *tier
= THROTTLE_LEVEL_TIER3
;
2071 panic("unknown I/O policy %d", iopolicy
);
2077 proc_tier_to_iopol(int tier
, int passive
)
2081 case THROTTLE_LEVEL_TIER0
:
2082 return IOPOL_PASSIVE
;
2085 panic("unknown passive tier %d", tier
);
2086 return IOPOL_DEFAULT
;
2091 case THROTTLE_LEVEL_NONE
:
2092 case THROTTLE_LEVEL_TIER0
:
2093 return IOPOL_DEFAULT
;
2095 case THROTTLE_LEVEL_TIER1
:
2096 return IOPOL_STANDARD
;
2098 case THROTTLE_LEVEL_TIER2
:
2099 return IOPOL_UTILITY
;
2101 case THROTTLE_LEVEL_TIER3
:
2102 return IOPOL_THROTTLE
;
2105 panic("unknown tier %d", tier
);
2106 return IOPOL_DEFAULT
;
2113 proc_darwin_role_to_task_role(int darwin_role
, int* task_role
)
2115 integer_t role
= TASK_UNSPECIFIED
;
2117 switch (darwin_role
) {
2118 case PRIO_DARWIN_ROLE_DEFAULT
:
2119 role
= TASK_UNSPECIFIED
;
2121 case PRIO_DARWIN_ROLE_UI_FOCAL
:
2122 role
= TASK_FOREGROUND_APPLICATION
;
2124 case PRIO_DARWIN_ROLE_UI
:
2125 role
= TASK_DEFAULT_APPLICATION
;
2127 case PRIO_DARWIN_ROLE_NON_UI
:
2128 role
= TASK_NONUI_APPLICATION
;
2130 case PRIO_DARWIN_ROLE_UI_NON_FOCAL
:
2131 role
= TASK_BACKGROUND_APPLICATION
;
2133 case PRIO_DARWIN_ROLE_TAL_LAUNCH
:
2134 role
= TASK_THROTTLE_APPLICATION
;
2146 proc_task_role_to_darwin_role(int task_role
)
2148 switch (task_role
) {
2149 case TASK_FOREGROUND_APPLICATION
:
2150 return PRIO_DARWIN_ROLE_UI_FOCAL
;
2151 case TASK_BACKGROUND_APPLICATION
:
2152 return PRIO_DARWIN_ROLE_UI
;
2153 case TASK_NONUI_APPLICATION
:
2154 return PRIO_DARWIN_ROLE_NON_UI
;
2155 case TASK_DEFAULT_APPLICATION
:
2156 return PRIO_DARWIN_ROLE_UI_NON_FOCAL
;
2157 case TASK_THROTTLE_APPLICATION
:
2158 return PRIO_DARWIN_ROLE_TAL_LAUNCH
;
2159 case TASK_UNSPECIFIED
:
2161 return PRIO_DARWIN_ROLE_DEFAULT
;
2166 /* apply internal backgrounding for workqueue threads */
2168 proc_apply_workq_bgthreadpolicy(thread_t thread
)
2170 if (thread
== THREAD_NULL
)
2173 proc_set_task_policy(thread
->task
, thread
, TASK_POLICY_ATTRIBUTE
,
2174 TASK_POLICY_WORKQ_BG
, TASK_POLICY_ENABLE
);
2180 * remove internal backgrounding for workqueue threads
2181 * does NOT go find sockets created while BG and unbackground them
2184 proc_restore_workq_bgthreadpolicy(thread_t thread
)
2186 if (thread
== THREAD_NULL
)
2189 proc_set_task_policy(thread
->task
, thread
, TASK_POLICY_ATTRIBUTE
,
2190 TASK_POLICY_WORKQ_BG
, TASK_POLICY_DISABLE
);
2195 /* here for temporary compatibility */
2197 proc_setthread_saved_importance(__unused thread_t thread
, __unused
int importance
)
2203 * Set an override on the thread which is consulted with a
2204 * higher priority than the task/thread policy. This should
2205 * only be set for temporary grants until the thread
2206 * returns to the userspace boundary
2208 * We use atomic operations to swap in the override, with
2209 * the assumption that the thread itself can
2210 * read the override and clear it on return to userspace.
2212 * No locking is performed, since it is acceptable to see
2213 * a stale override for one loop through throttle_lowpri_io().
2214 * However a thread reference must be held on the thread.
2217 void set_thread_iotier_override(thread_t thread
, int policy
)
2219 int current_override
;
2221 /* Let most aggressive I/O policy win until user boundary */
2223 current_override
= thread
->iotier_override
;
2225 if (current_override
!= THROTTLE_LEVEL_NONE
)
2226 policy
= MIN(current_override
, policy
);
2228 if (current_override
== policy
) {
2229 /* no effective change */
2232 } while (!OSCompareAndSwap(current_override
, policy
, &thread
->iotier_override
));
2235 * Since the thread may be currently throttled,
2236 * re-evaluate tiers and potentially break out
2239 rethrottle_thread(thread
->uthread
);
2243 * Userspace synchronization routines (like pthread mutexes, pthread reader-writer locks,
2244 * semaphores, dispatch_sync) may result in priority inversions where a higher priority
2245 * (i.e. scheduler priority, I/O tier, QoS tier) is waiting on a resource owned by a lower
2246 * priority thread. In these cases, we attempt to propagate the priority token, as long
2247 * as the subsystem informs us of the relationships between the threads. The userspace
2248 * synchronization subsystem should maintain the information of owner->resource and
2249 * resource->waiters itself.
2253 * This helper canonicalizes the resource/resource_type given the current qos_override_mode
2254 * in effect. Note that wildcards (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD) may need
2255 * to be handled specially in the future, but for now it's fine to slam
2256 * *resource to USER_ADDR_NULL even if it was previously a wildcard.
2258 static void _canonicalize_resource_and_type(user_addr_t
*resource
, int *resource_type
) {
2259 if (qos_override_mode
== QOS_OVERRIDE_MODE_OVERHANG_PEAK
|| qos_override_mode
== QOS_OVERRIDE_MODE_IGNORE_OVERRIDE
) {
2260 /* Map all input resource/type to a single one */
2261 *resource
= USER_ADDR_NULL
;
2262 *resource_type
= THREAD_QOS_OVERRIDE_TYPE_UNKNOWN
;
2263 } else if (qos_override_mode
== QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE
) {
2265 } else if (qos_override_mode
== QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH
) {
2266 /* Map all dispatch overrides to a single one, to avoid memory overhead */
2267 if (*resource_type
== THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE
) {
2268 *resource
= USER_ADDR_NULL
;
2270 } else if (qos_override_mode
== QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE
) {
2271 /* Map all mutex overrides to a single one, to avoid memory overhead */
2272 if (*resource_type
== THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX
) {
2273 *resource
= USER_ADDR_NULL
;
2278 /* This helper routine finds an existing override if known. Locking should be done by caller */
2279 static struct thread_qos_override
*_find_qos_override(thread_t thread
, user_addr_t resource
, int resource_type
) {
2280 struct thread_qos_override
*override
;
2282 override
= thread
->overrides
;
2284 if (override
->override_resource
== resource
&&
2285 override
->override_resource_type
== resource_type
) {
2289 override
= override
->override_next
;
2295 static void _find_and_decrement_qos_override(thread_t thread
, user_addr_t resource
, int resource_type
, boolean_t reset
, struct thread_qos_override
**free_override_list
) {
2296 struct thread_qos_override
*override
, *override_prev
;
2298 override_prev
= NULL
;
2299 override
= thread
->overrides
;
2301 struct thread_qos_override
*override_next
= override
->override_next
;
2303 if ((THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD
== resource
|| override
->override_resource
== resource
) &&
2304 override
->override_resource_type
== resource_type
) {
2306 override
->override_contended_resource_count
= 0;
2308 override
->override_contended_resource_count
--;
2311 if (override
->override_contended_resource_count
== 0) {
2312 if (override_prev
== NULL
) {
2313 thread
->overrides
= override_next
;
2315 override_prev
->override_next
= override_next
;
2318 /* Add to out-param for later zfree */
2319 override
->override_next
= *free_override_list
;
2320 *free_override_list
= override
;
2322 override_prev
= override
;
2325 if (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD
!= resource
) {
2329 override_prev
= override
;
2332 override
= override_next
;
2336 /* This helper recalculates the current requested override using the policy selected at boot */
2337 static int _calculate_requested_qos_override(thread_t thread
)
2339 if (qos_override_mode
== QOS_OVERRIDE_MODE_IGNORE_OVERRIDE
) {
2340 return THREAD_QOS_UNSPECIFIED
;
2343 /* iterate over all overrides and calculate MAX */
2344 struct thread_qos_override
*override
;
2345 int qos_override
= THREAD_QOS_UNSPECIFIED
;
2347 override
= thread
->overrides
;
2349 if (qos_override_mode
!= QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH
||
2350 override
->override_resource_type
!= THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE
) {
2351 qos_override
= MAX(qos_override
, override
->override_qos
);
2354 override
= override
->override_next
;
2357 return qos_override
;
2360 boolean_t
proc_thread_qos_add_override(task_t task
, thread_t thread
, uint64_t tid
, int override_qos
, boolean_t first_override_for_resource
, user_addr_t resource
, int resource_type
)
2362 thread_t self
= current_thread();
2363 struct task_pend_token pend_token
= {};
2365 /* XXX move to thread mutex when thread policy does */
2369 * If thread is passed, it is assumed to be most accurate, since the caller must have an explicit (or implicit) reference
2373 if (thread
!= THREAD_NULL
) {
2374 assert(task
== thread
->task
);
2376 if (tid
== self
->thread_id
) {
2379 thread
= task_findtid(task
, tid
);
2381 if (thread
== THREAD_NULL
) {
2382 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_NONE
,
2383 tid
, 0, 0xdead, 0, 0);
2390 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_START
,
2391 thread_tid(thread
), override_qos
, first_override_for_resource
? 1 : 0, 0, 0);
2393 DTRACE_BOOST5(qos_add_override_pre
, uint64_t, tid
, uint64_t, thread
->requested_policy
.thrp_qos
,
2394 uint64_t, thread
->effective_policy
.thep_qos
, int, override_qos
, boolean_t
, first_override_for_resource
);
2396 struct task_requested_policy requested
= thread
->requested_policy
;
2397 struct thread_qos_override
*override
;
2398 struct thread_qos_override
*deferred_free_override
= NULL
;
2399 int new_qos_override
, prev_qos_override
;
2400 int new_effective_qos
;
2401 boolean_t has_thread_reference
= FALSE
;
2403 _canonicalize_resource_and_type(&resource
, &resource_type
);
2405 if (first_override_for_resource
) {
2406 override
= _find_qos_override(thread
, resource
, resource_type
);
2408 override
->override_contended_resource_count
++;
2410 struct thread_qos_override
*override_new
;
2412 /* We need to allocate a new object. Drop the task lock and recheck afterwards in case someone else added the override */
2413 thread_reference(thread
);
2414 has_thread_reference
= TRUE
;
2416 override_new
= zalloc(thread_qos_override_zone
);
2419 override
= _find_qos_override(thread
, resource
, resource_type
);
2421 /* Someone else already allocated while the task lock was dropped */
2422 deferred_free_override
= override_new
;
2423 override
->override_contended_resource_count
++;
2425 override
= override_new
;
2426 override
->override_next
= thread
->overrides
;
2427 override
->override_contended_resource_count
= 1 /* since first_override_for_resource was TRUE */;
2428 override
->override_resource
= resource
;
2429 override
->override_resource_type
= resource_type
;
2430 override
->override_qos
= THREAD_QOS_UNSPECIFIED
;
2431 thread
->overrides
= override
;
2435 override
= _find_qos_override(thread
, resource
, resource_type
);
2439 if (override
->override_qos
== THREAD_QOS_UNSPECIFIED
)
2440 override
->override_qos
= override_qos
;
2442 override
->override_qos
= MAX(override
->override_qos
, override_qos
);
2445 /* Determine how to combine the various overrides into a single current requested override */
2446 prev_qos_override
= requested
.thrp_qos_override
;
2447 new_qos_override
= _calculate_requested_qos_override(thread
);
2449 if (new_qos_override
!= prev_qos_override
) {
2450 requested
.thrp_qos_override
= new_qos_override
;
2452 thread
->requested_policy
= requested
;
2454 task_policy_update_locked(task
, thread
, &pend_token
);
2456 if (!has_thread_reference
) {
2457 thread_reference(thread
);
2462 task_policy_update_complete_unlocked(task
, thread
, &pend_token
);
2464 new_effective_qos
= thread
->effective_policy
.thep_qos
;
2466 thread_deallocate(thread
);
2468 new_effective_qos
= thread
->effective_policy
.thep_qos
;
2472 if (has_thread_reference
) {
2473 thread_deallocate(thread
);
2477 if (deferred_free_override
) {
2478 zfree(thread_qos_override_zone
, deferred_free_override
);
2481 DTRACE_BOOST3(qos_add_override_post
, int, prev_qos_override
, int, new_qos_override
,
2482 int, new_effective_qos
);
2484 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_ADD_OVERRIDE
)) | DBG_FUNC_END
,
2485 new_qos_override
, resource
, resource_type
, 0, 0);
2491 static boolean_t
_proc_thread_qos_remove_override_internal(task_t task
, thread_t thread
, uint64_t tid
, user_addr_t resource
, int resource_type
, boolean_t reset
)
2493 thread_t self
= current_thread();
2494 struct task_pend_token pend_token
= {};
2496 /* XXX move to thread mutex when thread policy does */
2500 * If thread is passed, it is assumed to be most accurate, since the caller must have an explicit (or implicit) reference
2503 if (thread
!= THREAD_NULL
) {
2504 assert(task
== thread
->task
);
2506 if (tid
== self
->thread_id
) {
2509 thread
= task_findtid(task
, tid
);
2511 if (thread
== THREAD_NULL
) {
2512 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_NONE
,
2513 tid
, 0, 0xdead, 0, 0);
2520 struct task_requested_policy requested
= thread
->requested_policy
;
2521 struct thread_qos_override
*deferred_free_override_list
= NULL
;
2522 int new_qos_override
, prev_qos_override
;
2524 _canonicalize_resource_and_type(&resource
, &resource_type
);
2526 _find_and_decrement_qos_override(thread
, resource
, resource_type
, reset
, &deferred_free_override_list
);
2528 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_START
,
2529 thread_tid(thread
), resource
, reset
, 0, 0);
2531 /* Determine how to combine the various overrides into a single current requested override */
2532 prev_qos_override
= requested
.thrp_qos_override
;
2533 new_qos_override
= _calculate_requested_qos_override(thread
);
2535 if (new_qos_override
!= prev_qos_override
) {
2536 requested
.thrp_qos_override
= new_qos_override
;
2538 thread
->requested_policy
= requested
;
2540 task_policy_update_locked(task
, thread
, &pend_token
);
2542 thread_reference(thread
);
2546 task_policy_update_complete_unlocked(task
, thread
, &pend_token
);
2548 thread_deallocate(thread
);
2553 while (deferred_free_override_list
) {
2554 struct thread_qos_override
*override_next
= deferred_free_override_list
->override_next
;
2556 zfree(thread_qos_override_zone
, deferred_free_override_list
);
2557 deferred_free_override_list
= override_next
;
2560 KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE
, IMP_USYNCH_REMOVE_OVERRIDE
)) | DBG_FUNC_END
,
2566 boolean_t
proc_thread_qos_remove_override(task_t task
, thread_t thread
, uint64_t tid
, user_addr_t resource
, int resource_type
)
2568 return _proc_thread_qos_remove_override_internal(task
, thread
, tid
, resource
, resource_type
, FALSE
);
2572 boolean_t
proc_thread_qos_reset_override(task_t task
, thread_t thread
, uint64_t tid
, user_addr_t resource
, int resource_type
)
2574 return _proc_thread_qos_remove_override_internal(task
, thread
, tid
, resource
, resource_type
, TRUE
);
2577 /* Deallocate before thread termination */
2578 void proc_thread_qos_deallocate(thread_t thread
)
2580 task_t task
= thread
->task
;
2581 struct thread_qos_override
*override
;
2583 /* XXX move to thread mutex when thread policy does */
2585 override
= thread
->overrides
;
2586 thread
->overrides
= NULL
; /* task policy re-evaluation needed? */
2587 thread
->requested_policy
.thrp_qos_override
= THREAD_QOS_UNSPECIFIED
;
2591 struct thread_qos_override
*override_next
= override
->override_next
;
2593 zfree(thread_qos_override_zone
, override
);
2594 override
= override_next
;
2598 /* TODO: remove this variable when interactive daemon audit period is over */
2599 extern boolean_t ipc_importance_interactive_receiver
;
2602 * Called at process exec to initialize the apptype, qos clamp, and qos seed of a process
2604 * TODO: Make this function more table-driven instead of ad-hoc
2607 proc_set_task_spawnpolicy(task_t task
, int apptype
, int qos_clamp
, int role
,
2608 ipc_port_t
* portwatch_ports
, int portwatch_count
)
2610 struct task_pend_token pend_token
= {};
2612 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2613 (IMPORTANCE_CODE(IMP_TASK_APPTYPE
, apptype
)) | DBG_FUNC_START
,
2614 task_pid(task
), trequested_0(task
, THREAD_NULL
), trequested_1(task
, THREAD_NULL
),
2618 case TASK_APPTYPE_APP_TAL
:
2619 case TASK_APPTYPE_APP_DEFAULT
:
2620 /* Apps become donors via the 'live-donor' flag instead of the static donor flag */
2621 task_importance_mark_donor(task
, FALSE
);
2622 task_importance_mark_live_donor(task
, TRUE
);
2623 task_importance_mark_receiver(task
, FALSE
);
2624 /* Apps are de-nap recievers on desktop for suppression behaviors */
2625 task_importance_mark_denap_receiver(task
, TRUE
);
2628 case TASK_APPTYPE_DAEMON_INTERACTIVE
:
2629 task_importance_mark_donor(task
, TRUE
);
2630 task_importance_mark_live_donor(task
, FALSE
);
2633 * A boot arg controls whether interactive daemons are importance receivers.
2634 * Normally, they are not. But for testing their behavior as an adaptive
2635 * daemon, the boot-arg can be set.
2637 * TODO: remove this when the interactive daemon audit period is over.
2639 task_importance_mark_receiver(task
, /* FALSE */ ipc_importance_interactive_receiver
);
2640 task_importance_mark_denap_receiver(task
, FALSE
);
2643 case TASK_APPTYPE_DAEMON_STANDARD
:
2644 task_importance_mark_donor(task
, TRUE
);
2645 task_importance_mark_live_donor(task
, FALSE
);
2646 task_importance_mark_receiver(task
, FALSE
);
2647 task_importance_mark_denap_receiver(task
, FALSE
);
2650 case TASK_APPTYPE_DAEMON_ADAPTIVE
:
2651 task_importance_mark_donor(task
, FALSE
);
2652 task_importance_mark_live_donor(task
, FALSE
);
2653 task_importance_mark_receiver(task
, TRUE
);
2654 task_importance_mark_denap_receiver(task
, FALSE
);
2657 case TASK_APPTYPE_DAEMON_BACKGROUND
:
2658 task_importance_mark_donor(task
, FALSE
);
2659 task_importance_mark_live_donor(task
, FALSE
);
2660 task_importance_mark_receiver(task
, FALSE
);
2661 task_importance_mark_denap_receiver(task
, FALSE
);
2664 case TASK_APPTYPE_NONE
:
2668 if (portwatch_ports
!= NULL
&& apptype
== TASK_APPTYPE_DAEMON_ADAPTIVE
) {
2669 int portwatch_boosts
= 0;
2671 for (int i
= 0; i
< portwatch_count
; i
++) {
2672 ipc_port_t port
= NULL
;
2674 if ((port
= portwatch_ports
[i
]) != NULL
) {
2676 task_add_importance_watchport(task
, port
, &boost
);
2677 portwatch_boosts
+= boost
;
2681 if (portwatch_boosts
> 0) {
2682 task_importance_hold_internal_assertion(task
, portwatch_boosts
);
2688 if (apptype
== TASK_APPTYPE_APP_TAL
) {
2689 /* TAL starts off enabled by default */
2690 task
->requested_policy
.t_tal_enabled
= 1;
2693 if (apptype
!= TASK_APPTYPE_NONE
) {
2694 task
->requested_policy
.t_apptype
= apptype
;
2697 if (role
!= TASK_UNSPECIFIED
) {
2698 task
->requested_policy
.t_role
= role
;
2701 if (qos_clamp
!= THREAD_QOS_UNSPECIFIED
) {
2702 task
->requested_policy
.t_qos_clamp
= qos_clamp
;
2705 task_policy_update_locked(task
, THREAD_NULL
, &pend_token
);
2709 /* Ensure the donor bit is updated to be in sync with the new live donor status */
2710 pend_token
.tpt_update_live_donor
= 1;
2712 task_policy_update_complete_unlocked(task
, THREAD_NULL
, &pend_token
);
2714 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2715 (IMPORTANCE_CODE(IMP_TASK_APPTYPE
, apptype
)) | DBG_FUNC_END
,
2716 task_pid(task
), trequested_0(task
, THREAD_NULL
), trequested_1(task
, THREAD_NULL
),
2717 task_is_importance_receiver(task
), 0);
2720 extern task_t bsd_init_task
;
2722 /* Set up the primordial thread's QoS */
2724 task_set_main_thread_qos(task_t task
, thread_t main_thread
) {
2725 struct task_pend_token pend_token
= {};
2727 assert(main_thread
->task
== task
);
2731 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2732 (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS
, 0)) | DBG_FUNC_START
,
2733 task_pid(task
), trequested_0(task
, THREAD_NULL
), trequested_1(task
, THREAD_NULL
),
2734 main_thread
->requested_policy
.thrp_qos
, 0);
2736 int primordial_qos
= THREAD_QOS_UNSPECIFIED
;
2738 int qos_clamp
= task
->requested_policy
.t_qos_clamp
;
2740 if (task
== bsd_init_task
) {
2741 /* PID 1 gets a special case */
2742 primordial_qos
= THREAD_QOS_USER_INITIATED
;
2745 switch (task
->requested_policy
.t_apptype
) {
2746 case TASK_APPTYPE_APP_TAL
:
2747 case TASK_APPTYPE_APP_DEFAULT
:
2748 primordial_qos
= THREAD_QOS_USER_INTERACTIVE
;
2751 case TASK_APPTYPE_DAEMON_INTERACTIVE
:
2752 case TASK_APPTYPE_DAEMON_STANDARD
:
2753 case TASK_APPTYPE_DAEMON_ADAPTIVE
:
2754 primordial_qos
= THREAD_QOS_LEGACY
;
2757 case TASK_APPTYPE_DAEMON_BACKGROUND
:
2758 primordial_qos
= THREAD_QOS_BACKGROUND
;
2762 if (qos_clamp
!= THREAD_QOS_UNSPECIFIED
) {
2763 if (primordial_qos
!= THREAD_QOS_UNSPECIFIED
) {
2764 primordial_qos
= MIN(qos_clamp
, primordial_qos
);
2766 primordial_qos
= qos_clamp
;
2770 main_thread
->requested_policy
.thrp_qos
= primordial_qos
;
2772 task_policy_update_locked(task
, main_thread
, &pend_token
);
2776 task_policy_update_complete_unlocked(task
, main_thread
, &pend_token
);
2778 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2779 (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS
, 0)) | DBG_FUNC_END
,
2780 task_pid(task
), trequested_0(task
, THREAD_NULL
), trequested_1(task
, THREAD_NULL
),
2784 /* for process_policy to check before attempting to set */
2786 proc_task_is_tal(task_t task
)
2788 return (task
->requested_policy
.t_apptype
== TASK_APPTYPE_APP_TAL
) ? TRUE
: FALSE
;
2792 task_get_apptype(task_t task
)
2794 return task
->requested_policy
.t_apptype
;
2799 task_grab_latency_qos(task_t task
)
2801 return qos_latency_policy_package(proc_get_effective_task_policy(task
, TASK_POLICY_LATENCY_QOS
));
2804 /* update the darwin background action state in the flags field for libproc */
2806 proc_get_darwinbgstate(task_t task
, uint32_t * flagsp
)
2808 if (task
->requested_policy
.ext_darwinbg
)
2809 *flagsp
|= PROC_FLAG_EXT_DARWINBG
;
2811 if (task
->requested_policy
.int_darwinbg
)
2812 *flagsp
|= PROC_FLAG_DARWINBG
;
2815 if (task
->requested_policy
.t_apptype
== TASK_APPTYPE_APP_DEFAULT
||
2816 task
->requested_policy
.t_apptype
== TASK_APPTYPE_APP_TAL
)
2817 *flagsp
|= PROC_FLAG_APPLICATION
;
2819 if (task
->requested_policy
.t_apptype
== TASK_APPTYPE_DAEMON_ADAPTIVE
)
2820 *flagsp
|= PROC_FLAG_ADAPTIVE
;
2822 if (task
->requested_policy
.t_apptype
== TASK_APPTYPE_DAEMON_ADAPTIVE
&& task
->requested_policy
.t_boosted
== 1)
2823 *flagsp
|= PROC_FLAG_ADAPTIVE_IMPORTANT
;
2825 if (task_is_importance_donor(task
))
2826 *flagsp
|= PROC_FLAG_IMPORTANCE_DONOR
;
2828 if (task
->effective_policy
.t_sup_active
)
2829 *flagsp
|= PROC_FLAG_SUPPRESSED
;
2834 /* All per-thread state is in the first 32-bits of the bitfield */
2836 proc_get_thread_policy(thread_t thread
, thread_policy_state_t info
)
2838 task_t task
= thread
->task
;
2840 info
->requested
= (integer_t
)task_requested_bitfield(task
, thread
);
2841 info
->effective
= (integer_t
)task_effective_bitfield(task
, thread
);
2847 * Tracepoint data... Reading the tracepoint data can be somewhat complicated.
2848 * The current scheme packs as much data into a single tracepoint as it can.
2850 * Each task/thread requested/effective structure is 64 bits in size. Any
2851 * given tracepoint will emit either requested or effective data, but not both.
2853 * A tracepoint may emit any of task, thread, or task & thread data.
2855 * The type of data emitted varies with pointer size. Where possible, both
2856 * task and thread data are emitted. In LP32 systems, the first and second
2857 * halves of either the task or thread data is emitted.
2859 * The code uses uintptr_t array indexes instead of high/low to avoid
2860 * confusion WRT big vs little endian.
2862 * The truth table for the tracepoint data functions is below, and has the
2863 * following invariants:
2865 * 1) task and thread are uintptr_t*
2866 * 2) task may never be NULL
2870 * trequested_0(task, NULL) task[0] task[0]
2871 * trequested_1(task, NULL) task[1] NULL
2872 * trequested_0(task, thread) thread[0] task[0]
2873 * trequested_1(task, thread) thread[1] thread[0]
2875 * Basically, you get a full task or thread on LP32, and both on LP64.
2877 * The uintptr_t munging here is squicky enough to deserve a comment.
2879 * The variables we are accessing are laid out in memory like this:
2881 * [ LP64 uintptr_t 0 ]
2882 * [ LP32 uintptr_t 0 ] [ LP32 uintptr_t 1 ]
2889 trequested_0(task_t task
, thread_t thread
)
2892 _Static_assert(sizeof(struct task_requested_policy
) == sizeof(uint64_t), "size invariant violated");
2893 _Static_assert(sizeof(task
->requested_policy
) == sizeof(thread
->requested_policy
), "size invariant violated");
2895 uintptr_t* raw
= (uintptr_t*)((thread
== THREAD_NULL
) ? &task
->requested_policy
: &thread
->requested_policy
);
2900 trequested_1(task_t task
, thread_t thread
)
2903 _Static_assert(sizeof(struct task_requested_policy
) == sizeof(uint64_t), "size invariant violated");
2904 _Static_assert(sizeof(task
->requested_policy
) == sizeof(thread
->requested_policy
), "size invariant violated");
2906 #if defined __LP64__
2907 return (thread
== NULL
) ? 0 : *(uintptr_t*)&thread
->requested_policy
;
2909 uintptr_t* raw
= (uintptr_t*)((thread
== THREAD_NULL
) ? &task
->requested_policy
: &thread
->requested_policy
);
2915 teffective_0(task_t task
, thread_t thread
)
2918 _Static_assert(sizeof(struct task_effective_policy
) == sizeof(uint64_t), "size invariant violated");
2919 _Static_assert(sizeof(task
->effective_policy
) == sizeof(thread
->effective_policy
), "size invariant violated");
2921 uintptr_t* raw
= (uintptr_t*)((thread
== THREAD_NULL
) ? &task
->effective_policy
: &thread
->effective_policy
);
2926 teffective_1(task_t task
, thread_t thread
)
2929 _Static_assert(sizeof(struct task_effective_policy
) == sizeof(uint64_t), "size invariant violated");
2930 _Static_assert(sizeof(task
->effective_policy
) == sizeof(thread
->effective_policy
), "size invariant violated");
2932 #if defined __LP64__
2933 return (thread
== NULL
) ? 0 : *(uintptr_t*)&thread
->effective_policy
;
2935 uintptr_t* raw
= (uintptr_t*)((thread
== THREAD_NULL
) ? &task
->effective_policy
: &thread
->effective_policy
);
2940 /* dump pending for tracepoint */
2941 static uint32_t tpending(task_pend_token_t pend_token
) { return *(uint32_t*)(void*)(pend_token
); }
2944 task_requested_bitfield(task_t task
, thread_t thread
)
2947 struct task_requested_policy requested
=
2948 (thread
== THREAD_NULL
) ? task
->requested_policy
: thread
->requested_policy
;
2950 bits
|= (requested
.int_darwinbg
? POLICY_REQ_INT_DARWIN_BG
: 0);
2951 bits
|= (requested
.ext_darwinbg
? POLICY_REQ_EXT_DARWIN_BG
: 0);
2952 bits
|= (requested
.int_iotier
? (((uint64_t)requested
.int_iotier
) << POLICY_REQ_INT_IO_TIER_SHIFT
) : 0);
2953 bits
|= (requested
.ext_iotier
? (((uint64_t)requested
.ext_iotier
) << POLICY_REQ_EXT_IO_TIER_SHIFT
) : 0);
2954 bits
|= (requested
.int_iopassive
? POLICY_REQ_INT_PASSIVE_IO
: 0);
2955 bits
|= (requested
.ext_iopassive
? POLICY_REQ_EXT_PASSIVE_IO
: 0);
2956 bits
|= (requested
.bg_iotier
? (((uint64_t)requested
.bg_iotier
) << POLICY_REQ_BG_IOTIER_SHIFT
) : 0);
2957 bits
|= (requested
.terminated
? POLICY_REQ_TERMINATED
: 0);
2959 bits
|= (requested
.th_pidbind_bg
? POLICY_REQ_PIDBIND_BG
: 0);
2960 bits
|= (requested
.th_workq_bg
? POLICY_REQ_WORKQ_BG
: 0);
2962 if (thread
!= THREAD_NULL
) {
2963 bits
|= (requested
.thrp_qos
? (((uint64_t)requested
.thrp_qos
) << POLICY_REQ_TH_QOS_SHIFT
) : 0);
2964 bits
|= (requested
.thrp_qos_override
? (((uint64_t)requested
.thrp_qos_override
) << POLICY_REQ_TH_QOS_OVER_SHIFT
) : 0);
2967 bits
|= (requested
.t_boosted
? POLICY_REQ_BOOSTED
: 0);
2968 bits
|= (requested
.t_tal_enabled
? POLICY_REQ_TAL_ENABLED
: 0);
2969 bits
|= (requested
.t_apptype
? (((uint64_t)requested
.t_apptype
) << POLICY_REQ_APPTYPE_SHIFT
) : 0);
2970 bits
|= (requested
.t_role
? (((uint64_t)requested
.t_role
) << POLICY_REQ_ROLE_SHIFT
) : 0);
2972 bits
|= (requested
.t_sup_active
? POLICY_REQ_SUP_ACTIVE
: 0);
2973 bits
|= (requested
.t_sup_lowpri_cpu
? POLICY_REQ_SUP_LOWPRI_CPU
: 0);
2974 bits
|= (requested
.t_sup_cpu
? POLICY_REQ_SUP_CPU
: 0);
2975 bits
|= (requested
.t_sup_timer
? (((uint64_t)requested
.t_sup_timer
) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT
) : 0);
2976 bits
|= (requested
.t_sup_throughput
? (((uint64_t)requested
.t_sup_throughput
) << POLICY_REQ_SUP_THROUGHPUT_SHIFT
) : 0);
2977 bits
|= (requested
.t_sup_disk
? POLICY_REQ_SUP_DISK_THROTTLE
: 0);
2978 bits
|= (requested
.t_sup_cpu_limit
? POLICY_REQ_SUP_CPU_LIMIT
: 0);
2979 bits
|= (requested
.t_sup_suspend
? POLICY_REQ_SUP_SUSPEND
: 0);
2980 bits
|= (requested
.t_sup_bg_sockets
? POLICY_REQ_SUP_BG_SOCKETS
: 0);
2981 bits
|= (requested
.t_base_latency_qos
? (((uint64_t)requested
.t_base_latency_qos
) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT
) : 0);
2982 bits
|= (requested
.t_over_latency_qos
? (((uint64_t)requested
.t_over_latency_qos
) << POLICY_REQ_OVER_LATENCY_QOS_SHIFT
) : 0);
2983 bits
|= (requested
.t_base_through_qos
? (((uint64_t)requested
.t_base_through_qos
) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT
) : 0);
2984 bits
|= (requested
.t_over_through_qos
? (((uint64_t)requested
.t_over_through_qos
) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT
) : 0);
2985 bits
|= (requested
.t_sfi_managed
? POLICY_REQ_SFI_MANAGED
: 0);
2986 bits
|= (requested
.t_qos_clamp
? (((uint64_t)requested
.t_qos_clamp
) << POLICY_REQ_QOS_CLAMP_SHIFT
) : 0);
2992 task_effective_bitfield(task_t task
, thread_t thread
)
2995 struct task_effective_policy effective
=
2996 (thread
== THREAD_NULL
) ? task
->effective_policy
: thread
->effective_policy
;
2998 bits
|= (effective
.io_tier
? (((uint64_t)effective
.io_tier
) << POLICY_EFF_IO_TIER_SHIFT
) : 0);
2999 bits
|= (effective
.io_passive
? POLICY_EFF_IO_PASSIVE
: 0);
3000 bits
|= (effective
.darwinbg
? POLICY_EFF_DARWIN_BG
: 0);
3001 bits
|= (effective
.lowpri_cpu
? POLICY_EFF_LOWPRI_CPU
: 0);
3002 bits
|= (effective
.terminated
? POLICY_EFF_TERMINATED
: 0);
3003 bits
|= (effective
.all_sockets_bg
? POLICY_EFF_ALL_SOCKETS_BG
: 0);
3004 bits
|= (effective
.new_sockets_bg
? POLICY_EFF_NEW_SOCKETS_BG
: 0);
3005 bits
|= (effective
.bg_iotier
? (((uint64_t)effective
.bg_iotier
) << POLICY_EFF_BG_IOTIER_SHIFT
) : 0);
3006 bits
|= (effective
.qos_ui_is_urgent
? POLICY_EFF_QOS_UI_IS_URGENT
: 0);
3008 if (thread
!= THREAD_NULL
)
3009 bits
|= (effective
.thep_qos
? (((uint64_t)effective
.thep_qos
) << POLICY_EFF_TH_QOS_SHIFT
) : 0);
3011 bits
|= (effective
.t_tal_engaged
? POLICY_EFF_TAL_ENGAGED
: 0);
3012 bits
|= (effective
.t_suspended
? POLICY_EFF_SUSPENDED
: 0);
3013 bits
|= (effective
.t_watchers_bg
? POLICY_EFF_WATCHERS_BG
: 0);
3014 bits
|= (effective
.t_sup_active
? POLICY_EFF_SUP_ACTIVE
: 0);
3015 bits
|= (effective
.t_suppressed_cpu
? POLICY_EFF_SUP_CPU
: 0);
3016 bits
|= (effective
.t_role
? (((uint64_t)effective
.t_role
) << POLICY_EFF_ROLE_SHIFT
) : 0);
3017 bits
|= (effective
.t_latency_qos
? (((uint64_t)effective
.t_latency_qos
) << POLICY_EFF_LATENCY_QOS_SHIFT
) : 0);
3018 bits
|= (effective
.t_through_qos
? (((uint64_t)effective
.t_through_qos
) << POLICY_EFF_THROUGH_QOS_SHIFT
) : 0);
3019 bits
|= (effective
.t_sfi_managed
? POLICY_EFF_SFI_MANAGED
: 0);
3020 bits
|= (effective
.t_qos_ceiling
? (((uint64_t)effective
.t_qos_ceiling
) << POLICY_EFF_QOS_CEILING_SHIFT
) : 0);
3027 * Resource usage and CPU related routines
3031 proc_get_task_ruse_cpu(task_t task
, uint32_t *policyp
, uint8_t *percentagep
, uint64_t *intervalp
, uint64_t *deadlinep
)
3040 error
= task_get_cpuusage(task
, percentagep
, intervalp
, deadlinep
, &scope
);
3044 * Reverse-map from CPU resource limit scopes back to policies (see comment below).
3046 if (scope
== TASK_RUSECPU_FLAGS_PERTHR_LIMIT
) {
3047 *policyp
= TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC
;
3048 } else if (scope
== TASK_RUSECPU_FLAGS_PROC_LIMIT
) {
3049 *policyp
= TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE
;
3050 } else if (scope
== TASK_RUSECPU_FLAGS_DEADLINE
) {
3051 *policyp
= TASK_POLICY_RESOURCE_ATTRIBUTE_NONE
;
3058 * Configure the default CPU usage monitor parameters.
3060 * For tasks which have this mechanism activated: if any thread in the
3061 * process consumes more CPU than this, an EXC_RESOURCE exception will be generated.
3064 proc_init_cpumon_params(void)
3067 * The max CPU percentage can be configured via the boot-args and
3068 * a key in the device tree. The boot-args are honored first, then the
3071 if (!PE_parse_boot_argn("max_cpumon_percentage", &proc_max_cpumon_percentage
,
3072 sizeof (proc_max_cpumon_percentage
)))
3074 uint64_t max_percentage
= 0ULL;
3076 if (!PE_get_default("kern.max_cpumon_percentage", &max_percentage
,
3077 sizeof(max_percentage
)))
3079 max_percentage
= DEFAULT_CPUMON_PERCENTAGE
;
3082 assert(max_percentage
<= UINT8_MAX
);
3083 proc_max_cpumon_percentage
= (uint8_t) max_percentage
;
3086 if (proc_max_cpumon_percentage
> 100) {
3087 proc_max_cpumon_percentage
= 100;
3091 * The interval should be specified in seconds.
3093 * Like the max CPU percentage, the max CPU interval can be configured
3094 * via boot-args and the device tree.
3096 if (!PE_parse_boot_argn("max_cpumon_interval", &proc_max_cpumon_interval
,
3097 sizeof (proc_max_cpumon_interval
)))
3099 if (!PE_get_default("kern.max_cpumon_interval", &proc_max_cpumon_interval
,
3100 sizeof(proc_max_cpumon_interval
)))
3102 proc_max_cpumon_interval
= DEFAULT_CPUMON_INTERVAL
;
3106 proc_max_cpumon_interval
*= NSEC_PER_SEC
;
3108 /* TEMPORARY boot arg to control App suppression */
3109 PE_parse_boot_argn("task_policy_suppression_disable",
3110 &task_policy_suppression_disable
,
3111 sizeof(task_policy_suppression_disable
));
3115 * Currently supported configurations for CPU limits.
3117 * Policy | Deadline-based CPU limit | Percentage-based CPU limit
3118 * -------------------------------------+--------------------------+------------------------------
3119 * PROC_POLICY_RSRCACT_THROTTLE | ENOTSUP | Task-wide scope only
3120 * PROC_POLICY_RSRCACT_SUSPEND | Task-wide scope only | ENOTSUP
3121 * PROC_POLICY_RSRCACT_TERMINATE | Task-wide scope only | ENOTSUP
3122 * PROC_POLICY_RSRCACT_NOTIFY_KQ | Task-wide scope only | ENOTSUP
3123 * PROC_POLICY_RSRCACT_NOTIFY_EXC | ENOTSUP | Per-thread scope only
3125 * A deadline-based CPU limit is actually a simple wallclock timer - the requested action is performed
3126 * after the specified amount of wallclock time has elapsed.
3128 * A percentage-based CPU limit performs the requested action after the specified amount of actual CPU time
3129 * has been consumed -- regardless of how much wallclock time has elapsed -- by either the task as an
3130 * aggregate entity (so-called "Task-wide" or "Proc-wide" scope, whereby the CPU time consumed by all threads
3131 * in the task are added together), or by any one thread in the task (so-called "per-thread" scope).
3133 * We support either deadline != 0 OR percentage != 0, but not both. The original intention in having them
3134 * share an API was to use actual CPU time as the basis of the deadline-based limit (as in: perform an action
3135 * after I have used some amount of CPU time; this is different than the recurring percentage/interval model)
3136 * but the potential consumer of the API at the time was insisting on wallclock time instead.
3138 * Currently, requesting notification via an exception is the only way to get per-thread scope for a
3139 * CPU limit. All other types of notifications force task-wide scope for the limit.
3142 proc_set_task_ruse_cpu(task_t task
, uint32_t policy
, uint8_t percentage
, uint64_t interval
, uint64_t deadline
,
3143 int cpumon_entitled
)
3149 * Enforce the matrix of supported configurations for policy, percentage, and deadline.
3152 // If no policy is explicitly given, the default is to throttle.
3153 case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE
:
3154 case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE
:
3157 scope
= TASK_RUSECPU_FLAGS_PROC_LIMIT
;
3159 case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND
:
3160 case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE
:
3161 case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ
:
3162 if (percentage
!= 0)
3164 scope
= TASK_RUSECPU_FLAGS_DEADLINE
;
3166 case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC
:
3169 scope
= TASK_RUSECPU_FLAGS_PERTHR_LIMIT
;
3170 #ifdef CONFIG_NOMONITORS
3172 #endif /* CONFIG_NOMONITORS */
3179 if (task
!= current_task()) {
3180 task
->policy_ru_cpu_ext
= policy
;
3182 task
->policy_ru_cpu
= policy
;
3184 error
= task_set_cpuusage(task
, percentage
, interval
, deadline
, scope
, cpumon_entitled
);
3190 proc_clear_task_ruse_cpu(task_t task
, int cpumon_entitled
)
3194 void * bsdinfo
= NULL
;
3197 if (task
!= current_task()) {
3198 task
->policy_ru_cpu_ext
= TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT
;
3200 task
->policy_ru_cpu
= TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT
;
3203 error
= task_clear_cpuusage_locked(task
, cpumon_entitled
);
3207 action
= task
->applied_ru_cpu
;
3208 if (task
->applied_ru_cpu_ext
!= TASK_POLICY_RESOURCE_ATTRIBUTE_NONE
) {
3210 task
->applied_ru_cpu_ext
= TASK_POLICY_RESOURCE_ATTRIBUTE_NONE
;
3212 if (action
!= TASK_POLICY_RESOURCE_ATTRIBUTE_NONE
) {
3213 bsdinfo
= task
->bsd_info
;
3215 proc_restore_resource_actions(bsdinfo
, TASK_POLICY_CPU_RESOURCE_USAGE
, action
);
3226 /* used to apply resource limit related actions */
3228 task_apply_resource_actions(task_t task
, int type
)
3230 int action
= TASK_POLICY_RESOURCE_ATTRIBUTE_NONE
;
3231 void * bsdinfo
= NULL
;
3234 case TASK_POLICY_CPU_RESOURCE_USAGE
:
3236 case TASK_POLICY_WIREDMEM_RESOURCE_USAGE
:
3237 case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE
:
3238 case TASK_POLICY_DISK_RESOURCE_USAGE
:
3239 case TASK_POLICY_NETWORK_RESOURCE_USAGE
:
3240 case TASK_POLICY_POWER_RESOURCE_USAGE
:
3247 /* only cpu actions for now */
3250 if (task
->applied_ru_cpu_ext
== TASK_POLICY_RESOURCE_ATTRIBUTE_NONE
) {
3252 task
->applied_ru_cpu_ext
= task
->policy_ru_cpu_ext
;
3253 action
= task
->applied_ru_cpu_ext
;
3255 action
= task
->applied_ru_cpu_ext
;
3258 if (action
!= TASK_POLICY_RESOURCE_ATTRIBUTE_NONE
) {
3259 bsdinfo
= task
->bsd_info
;
3261 proc_apply_resource_actions(bsdinfo
, TASK_POLICY_CPU_RESOURCE_USAGE
, action
);
3269 * XXX This API is somewhat broken; we support multiple simultaneous CPU limits, but the get/set API
3270 * only allows for one at a time. This means that if there is a per-thread limit active, the other
3271 * "scopes" will not be accessible via this API. We could change it to pass in the scope of interest
3272 * to the caller, and prefer that, but there's no need for that at the moment.
3275 task_get_cpuusage(task_t task
, uint8_t *percentagep
, uint64_t *intervalp
, uint64_t *deadlinep
, int *scope
)
3281 if ((task
->rusage_cpu_flags
& TASK_RUSECPU_FLAGS_PERTHR_LIMIT
) != 0) {
3282 *scope
= TASK_RUSECPU_FLAGS_PERTHR_LIMIT
;
3283 *percentagep
= task
->rusage_cpu_perthr_percentage
;
3284 *intervalp
= task
->rusage_cpu_perthr_interval
;
3285 } else if ((task
->rusage_cpu_flags
& TASK_RUSECPU_FLAGS_PROC_LIMIT
) != 0) {
3286 *scope
= TASK_RUSECPU_FLAGS_PROC_LIMIT
;
3287 *percentagep
= task
->rusage_cpu_percentage
;
3288 *intervalp
= task
->rusage_cpu_interval
;
3289 } else if ((task
->rusage_cpu_flags
& TASK_RUSECPU_FLAGS_DEADLINE
) != 0) {
3290 *scope
= TASK_RUSECPU_FLAGS_DEADLINE
;
3291 *deadlinep
= task
->rusage_cpu_deadline
;
3300 * Disable the CPU usage monitor for the task. Return value indicates
3301 * if the mechanism was actually enabled.
3304 task_disable_cpumon(task_t task
) {
3307 task_lock_assert_owned(task
);
3309 if ((task
->rusage_cpu_flags
& TASK_RUSECPU_FLAGS_PERTHR_LIMIT
) == 0) {
3310 return (KERN_INVALID_ARGUMENT
);
3313 #if CONFIG_TELEMETRY
3315 * Disable task-wide telemetry if it was ever enabled by the CPU usage
3316 * monitor's warning zone.
3318 telemetry_task_ctl_locked(task
, TF_CPUMON_WARNING
, 0);
3322 * Disable the monitor for the task, and propagate that change to each thread.
3324 task
->rusage_cpu_flags
&= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT
| TASK_RUSECPU_FLAGS_FATAL_CPUMON
);
3325 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
3326 set_astledger(thread
);
3328 task
->rusage_cpu_perthr_percentage
= 0;
3329 task
->rusage_cpu_perthr_interval
= 0;
3331 return (KERN_SUCCESS
);
3335 task_set_cpuusage(task_t task
, uint8_t percentage
, uint64_t interval
, uint64_t deadline
, int scope
, int cpumon_entitled
)
3338 uint64_t abstime
= 0;
3339 uint64_t limittime
= 0;
3341 lck_mtx_assert(&task
->lock
, LCK_MTX_ASSERT_OWNED
);
3343 /* By default, refill once per second */
3345 interval
= NSEC_PER_SEC
;
3347 if (percentage
!= 0) {
3348 if (scope
== TASK_RUSECPU_FLAGS_PERTHR_LIMIT
) {
3349 boolean_t warn
= FALSE
;
3352 * A per-thread CPU limit on a task generates an exception
3353 * (LEDGER_ACTION_EXCEPTION) if any one thread in the task
3354 * exceeds the limit.
3357 if (percentage
== TASK_POLICY_CPUMON_DISABLE
) {
3358 if (cpumon_entitled
) {
3359 task_disable_cpumon(task
);
3364 * This task wishes to disable the CPU usage monitor, but it's
3365 * missing the required entitlement:
3366 * com.apple.private.kernel.override-cpumon
3368 * Instead, treat this as a request to reset its params
3369 * back to the defaults.
3372 percentage
= TASK_POLICY_CPUMON_DEFAULTS
;
3375 if (percentage
== TASK_POLICY_CPUMON_DEFAULTS
) {
3376 percentage
= proc_max_cpumon_percentage
;
3377 interval
= proc_max_cpumon_interval
;
3380 if (percentage
> 100) {
3385 * Passing in an interval of -1 means either:
3386 * - Leave the interval as-is, if there's already a per-thread
3388 * - Use the system default.
3390 if (interval
== -1ULL) {
3391 if (task
->rusage_cpu_flags
& TASK_RUSECPU_FLAGS_PERTHR_LIMIT
) {
3392 interval
= task
->rusage_cpu_perthr_interval
;
3394 interval
= proc_max_cpumon_interval
;
3399 * Enforce global caps on CPU usage monitor here if the process is not
3400 * entitled to escape the global caps.
3402 if ((percentage
> proc_max_cpumon_percentage
) && (cpumon_entitled
== 0)) {
3404 percentage
= proc_max_cpumon_percentage
;
3407 if ((interval
> proc_max_cpumon_interval
) && (cpumon_entitled
== 0)) {
3409 interval
= proc_max_cpumon_interval
;
3414 const char *procname
= "unknown";
3417 pid
= proc_selfpid();
3418 if (current_task()->bsd_info
!= NULL
) {
3419 procname
= proc_name_address(current_task()->bsd_info
);
3423 printf("process %s[%d] denied attempt to escape CPU monitor"
3424 " (missing required entitlement).\n", procname
, pid
);
3427 task
->rusage_cpu_flags
|= TASK_RUSECPU_FLAGS_PERTHR_LIMIT
;
3428 task
->rusage_cpu_perthr_percentage
= percentage
;
3429 task
->rusage_cpu_perthr_interval
= interval
;
3430 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
3431 set_astledger(thread
);
3433 } else if (scope
== TASK_RUSECPU_FLAGS_PROC_LIMIT
) {
3435 * Currently, a proc-wide CPU limit always blocks if the limit is
3436 * exceeded (LEDGER_ACTION_BLOCK).
3438 task
->rusage_cpu_flags
|= TASK_RUSECPU_FLAGS_PROC_LIMIT
;
3439 task
->rusage_cpu_percentage
= percentage
;
3440 task
->rusage_cpu_interval
= interval
;
3442 limittime
= (interval
* percentage
) / 100;
3443 nanoseconds_to_absolutetime(limittime
, &abstime
);
3445 ledger_set_limit(task
->ledger
, task_ledgers
.cpu_time
, abstime
, 0);
3446 ledger_set_period(task
->ledger
, task_ledgers
.cpu_time
, interval
);
3447 ledger_set_action(task
->ledger
, task_ledgers
.cpu_time
, LEDGER_ACTION_BLOCK
);
3451 if (deadline
!= 0) {
3452 assert(scope
== TASK_RUSECPU_FLAGS_DEADLINE
);
3454 /* if already in use, cancel and wait for it to cleanout */
3455 if (task
->rusage_cpu_callt
!= NULL
) {
3457 thread_call_cancel_wait(task
->rusage_cpu_callt
);
3460 if (task
->rusage_cpu_callt
== NULL
) {
3461 task
->rusage_cpu_callt
= thread_call_allocate_with_priority(task_action_cpuusage
, (thread_call_param_t
)task
, THREAD_CALL_PRIORITY_KERNEL
);
3464 if (task
->rusage_cpu_callt
!= 0) {
3465 uint64_t save_abstime
= 0;
3467 task
->rusage_cpu_flags
|= TASK_RUSECPU_FLAGS_DEADLINE
;
3468 task
->rusage_cpu_deadline
= deadline
;
3470 nanoseconds_to_absolutetime(deadline
, &abstime
);
3471 save_abstime
= abstime
;
3472 clock_absolutetime_interval_to_deadline(save_abstime
, &abstime
);
3473 thread_call_enter_delayed(task
->rusage_cpu_callt
, abstime
);
3481 task_clear_cpuusage(task_t task
, int cpumon_entitled
)
3486 retval
= task_clear_cpuusage_locked(task
, cpumon_entitled
);
3493 task_clear_cpuusage_locked(task_t task
, int cpumon_entitled
)
3495 thread_call_t savecallt
;
3497 /* cancel percentage handling if set */
3498 if (task
->rusage_cpu_flags
& TASK_RUSECPU_FLAGS_PROC_LIMIT
) {
3499 task
->rusage_cpu_flags
&= ~TASK_RUSECPU_FLAGS_PROC_LIMIT
;
3500 ledger_set_limit(task
->ledger
, task_ledgers
.cpu_time
, LEDGER_LIMIT_INFINITY
, 0);
3501 task
->rusage_cpu_percentage
= 0;
3502 task
->rusage_cpu_interval
= 0;
3506 * Disable the CPU usage monitor.
3508 if (cpumon_entitled
) {
3509 task_disable_cpumon(task
);
3512 /* cancel deadline handling if set */
3513 if (task
->rusage_cpu_flags
& TASK_RUSECPU_FLAGS_DEADLINE
) {
3514 task
->rusage_cpu_flags
&= ~TASK_RUSECPU_FLAGS_DEADLINE
;
3515 if (task
->rusage_cpu_callt
!= 0) {
3516 savecallt
= task
->rusage_cpu_callt
;
3517 task
->rusage_cpu_callt
= NULL
;
3518 task
->rusage_cpu_deadline
= 0;
3520 thread_call_cancel_wait(savecallt
);
3521 thread_call_free(savecallt
);
3528 /* called by ledger unit to enforce action due to resource usage criteria being met */
3530 task_action_cpuusage(thread_call_param_t param0
, __unused thread_call_param_t param1
)
3532 task_t task
= (task_t
)param0
;
3533 (void)task_apply_resource_actions(task
, TASK_POLICY_CPU_RESOURCE_USAGE
);
3539 * Routines for taskwatch and pidbind
3544 * Routines for importance donation/inheritance/boosting
3548 task_importance_update_live_donor(task_t target_task
)
3550 #if IMPORTANCE_INHERITANCE
3552 ipc_importance_task_t task_imp
;
3554 task_imp
= ipc_importance_for_task(target_task
, FALSE
);
3555 if (IIT_NULL
!= task_imp
) {
3556 ipc_importance_task_update_live_donor(task_imp
);
3557 ipc_importance_task_release(task_imp
);
3559 #endif /* IMPORTANCE_INHERITANCE */
3563 task_importance_mark_donor(task_t task
, boolean_t donating
)
3565 #if IMPORTANCE_INHERITANCE
3566 ipc_importance_task_t task_imp
;
3568 task_imp
= ipc_importance_for_task(task
, FALSE
);
3569 if (IIT_NULL
!= task_imp
) {
3570 ipc_importance_task_mark_donor(task_imp
, donating
);
3571 ipc_importance_task_release(task_imp
);
3573 #endif /* IMPORTANCE_INHERITANCE */
3577 task_importance_mark_live_donor(task_t task
, boolean_t live_donating
)
3579 #if IMPORTANCE_INHERITANCE
3580 ipc_importance_task_t task_imp
;
3582 task_imp
= ipc_importance_for_task(task
, FALSE
);
3583 if (IIT_NULL
!= task_imp
) {
3584 ipc_importance_task_mark_live_donor(task_imp
, live_donating
);
3585 ipc_importance_task_release(task_imp
);
3587 #endif /* IMPORTANCE_INHERITANCE */
3591 task_importance_mark_receiver(task_t task
, boolean_t receiving
)
3593 #if IMPORTANCE_INHERITANCE
3594 ipc_importance_task_t task_imp
;
3596 task_imp
= ipc_importance_for_task(task
, FALSE
);
3597 if (IIT_NULL
!= task_imp
) {
3598 ipc_importance_task_mark_receiver(task_imp
, receiving
);
3599 ipc_importance_task_release(task_imp
);
3601 #endif /* IMPORTANCE_INHERITANCE */
3605 task_importance_mark_denap_receiver(task_t task
, boolean_t denap
)
3607 #if IMPORTANCE_INHERITANCE
3608 ipc_importance_task_t task_imp
;
3610 task_imp
= ipc_importance_for_task(task
, FALSE
);
3611 if (IIT_NULL
!= task_imp
) {
3612 ipc_importance_task_mark_denap_receiver(task_imp
, denap
);
3613 ipc_importance_task_release(task_imp
);
3615 #endif /* IMPORTANCE_INHERITANCE */
3619 task_importance_reset(__imp_only task_t task
)
3621 #if IMPORTANCE_INHERITANCE
3622 ipc_importance_task_t task_imp
;
3624 /* TODO: Lower importance downstream before disconnect */
3625 task_imp
= task
->task_imp_base
;
3626 ipc_importance_reset(task_imp
, FALSE
);
3627 task_importance_update_live_donor(task
);
3628 #endif /* IMPORTANCE_INHERITANCE */
3631 #if IMPORTANCE_INHERITANCE
3634 * Sets the task boost bit to the provided value. Does NOT run the update function.
3636 * Task lock must be held.
3639 task_set_boost_locked(task_t task
, boolean_t boost_active
)
3641 #if IMPORTANCE_DEBUG
3642 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_BOOST
, (boost_active
? IMP_BOOSTED
: IMP_UNBOOSTED
)) | DBG_FUNC_START
),
3643 proc_selfpid(), task_pid(task
), trequested_0(task
, THREAD_NULL
), trequested_1(task
, THREAD_NULL
), 0);
3646 task
->requested_policy
.t_boosted
= boost_active
;
3648 #if IMPORTANCE_DEBUG
3649 if (boost_active
== TRUE
){
3650 DTRACE_BOOST2(boost
, task_t
, task
, int, task_pid(task
));
3652 DTRACE_BOOST2(unboost
, task_t
, task
, int, task_pid(task
));
3654 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_BOOST
, (boost_active
? IMP_BOOSTED
: IMP_UNBOOSTED
)) | DBG_FUNC_END
),
3655 proc_selfpid(), task_pid(task
),
3656 trequested_0(task
, THREAD_NULL
), trequested_1(task
, THREAD_NULL
), 0);
3661 * Sets the task boost bit to the provided value and applies the update.
3663 * Task lock must be held. Must call update complete after unlocking the task.
3666 task_update_boost_locked(task_t task
, boolean_t boost_active
, task_pend_token_t pend_token
)
3668 task_set_boost_locked(task
, boost_active
);
3670 task_policy_update_locked(task
, THREAD_NULL
, pend_token
);
3674 * Check if this task should donate importance.
3676 * May be called without taking the task lock. In that case, donor status can change
3677 * so you must check only once for each donation event.
3680 task_is_importance_donor(task_t task
)
3682 if (task
->task_imp_base
== IIT_NULL
)
3684 return ipc_importance_task_is_donor(task
->task_imp_base
);
3688 * Query the status of the task's donor mark.
3691 task_is_marked_importance_donor(task_t task
)
3693 if (task
->task_imp_base
== IIT_NULL
)
3695 return ipc_importance_task_is_marked_donor(task
->task_imp_base
);
3699 * Query the status of the task's live donor and donor mark.
3702 task_is_marked_live_importance_donor(task_t task
)
3704 if (task
->task_imp_base
== IIT_NULL
)
3706 return ipc_importance_task_is_marked_live_donor(task
->task_imp_base
);
3711 * This routine may be called without holding task lock
3712 * since the value of imp_receiver can never be unset.
3715 task_is_importance_receiver(task_t task
)
3717 if (task
->task_imp_base
== IIT_NULL
)
3719 return ipc_importance_task_is_marked_receiver(task
->task_imp_base
);
3723 * Query the task's receiver mark.
3726 task_is_marked_importance_receiver(task_t task
)
3728 if (task
->task_imp_base
== IIT_NULL
)
3730 return ipc_importance_task_is_marked_receiver(task
->task_imp_base
);
3734 * This routine may be called without holding task lock
3735 * since the value of de-nap receiver can never be unset.
3738 task_is_importance_denap_receiver(task_t task
)
3740 if (task
->task_imp_base
== IIT_NULL
)
3742 return ipc_importance_task_is_denap_receiver(task
->task_imp_base
);
3746 * Query the task's de-nap receiver mark.
3749 task_is_marked_importance_denap_receiver(task_t task
)
3751 if (task
->task_imp_base
== IIT_NULL
)
3753 return ipc_importance_task_is_marked_denap_receiver(task
->task_imp_base
);
3757 * This routine may be called without holding task lock
3758 * since the value of imp_receiver can never be unset.
3761 task_is_importance_receiver_type(task_t task
)
3763 if (task
->task_imp_base
== IIT_NULL
)
3765 return (task_is_importance_receiver(task
) ||
3766 task_is_importance_denap_receiver(task
));
3770 * External importance assertions are managed by the process in userspace
3771 * Internal importance assertions are the responsibility of the kernel
3772 * Assertions are changed from internal to external via task_importance_externalize_assertion
3776 task_importance_hold_watchport_assertion(task_t target_task
, uint32_t count
)
3778 ipc_importance_task_t task_imp
;
3781 /* must already have set up an importance */
3782 task_imp
= target_task
->task_imp_base
;
3783 assert(IIT_NULL
!= task_imp
);
3785 ret
= ipc_importance_task_hold_internal_assertion(task_imp
, count
);
3786 return (KERN_SUCCESS
!= ret
) ? ENOTSUP
: 0;
3790 task_importance_hold_internal_assertion(task_t target_task
, uint32_t count
)
3792 ipc_importance_task_t task_imp
;
3795 /* may be first time, so allow for possible importance setup */
3796 task_imp
= ipc_importance_for_task(target_task
, FALSE
);
3797 if (IIT_NULL
== task_imp
) {
3800 ret
= ipc_importance_task_hold_internal_assertion(task_imp
, count
);
3801 ipc_importance_task_release(task_imp
);
3803 return (KERN_SUCCESS
!= ret
) ? ENOTSUP
: 0;
3807 task_importance_hold_file_lock_assertion(task_t target_task
, uint32_t count
)
3809 ipc_importance_task_t task_imp
;
3812 /* may be first time, so allow for possible importance setup */
3813 task_imp
= ipc_importance_for_task(target_task
, FALSE
);
3814 if (IIT_NULL
== task_imp
) {
3817 ret
= ipc_importance_task_hold_file_lock_assertion(task_imp
, count
);
3818 ipc_importance_task_release(task_imp
);
3820 return (KERN_SUCCESS
!= ret
) ? ENOTSUP
: 0;
3824 task_importance_hold_legacy_external_assertion(task_t target_task
, uint32_t count
)
3826 ipc_importance_task_t task_imp
;
3829 /* must already have set up an importance */
3830 task_imp
= target_task
->task_imp_base
;
3831 if (IIT_NULL
== task_imp
) {
3834 ret
= ipc_importance_task_hold_legacy_external_assertion(task_imp
, count
);
3835 return (KERN_SUCCESS
!= ret
) ? ENOTSUP
: 0;
3839 task_importance_drop_internal_assertion(task_t target_task
, uint32_t count
)
3841 ipc_importance_task_t task_imp
;
3844 /* must already have set up an importance */
3845 task_imp
= target_task
->task_imp_base
;
3846 if (IIT_NULL
== task_imp
) {
3849 ret
= ipc_importance_task_drop_internal_assertion(target_task
->task_imp_base
, count
);
3850 return (KERN_SUCCESS
!= ret
) ? ENOTSUP
: 0;
3854 task_importance_drop_file_lock_assertion(task_t target_task
, uint32_t count
)
3856 ipc_importance_task_t task_imp
;
3859 /* must already have set up an importance */
3860 task_imp
= target_task
->task_imp_base
;
3861 if (IIT_NULL
== task_imp
) {
3864 ret
= ipc_importance_task_drop_file_lock_assertion(target_task
->task_imp_base
, count
);
3865 return (KERN_SUCCESS
!= ret
) ? EOVERFLOW
: 0;
3869 task_importance_drop_legacy_external_assertion(task_t target_task
, uint32_t count
)
3871 ipc_importance_task_t task_imp
;
3874 /* must already have set up an importance */
3875 task_imp
= target_task
->task_imp_base
;
3876 if (IIT_NULL
== task_imp
) {
3879 ret
= ipc_importance_task_drop_legacy_external_assertion(task_imp
, count
);
3880 return (KERN_SUCCESS
!= ret
) ? EOVERFLOW
: 0;
3884 task_add_importance_watchport(task_t task
, mach_port_t port
, int *boostp
)
3888 __impdebug_only
int released_pid
= 0;
3889 __impdebug_only
int pid
= task_pid(task
);
3891 ipc_importance_task_t release_imp_task
= IIT_NULL
;
3893 if (IP_VALID(port
) != 0) {
3894 ipc_importance_task_t new_imp_task
= ipc_importance_for_task(task
, FALSE
);
3899 * The port must have been marked tempowner already.
3900 * This also filters out ports whose receive rights
3901 * are already enqueued in a message, as you can't
3902 * change the right's destination once it's already
3905 if (port
->ip_tempowner
!= 0) {
3906 assert(port
->ip_impdonation
!= 0);
3908 boost
= port
->ip_impcount
;
3909 if (IIT_NULL
!= port
->ip_imp_task
) {
3911 * if this port is already bound to a task,
3912 * release the task reference and drop any
3913 * watchport-forwarded boosts
3915 release_imp_task
= port
->ip_imp_task
;
3916 port
->ip_imp_task
= IIT_NULL
;
3919 /* mark the port is watching another task (reference held in port->ip_imp_task) */
3920 if (ipc_importance_task_is_marked_receiver(new_imp_task
)) {
3921 port
->ip_imp_task
= new_imp_task
;
3922 new_imp_task
= IIT_NULL
;
3927 if (IIT_NULL
!= new_imp_task
) {
3928 ipc_importance_task_release(new_imp_task
);
3931 if (IIT_NULL
!= release_imp_task
) {
3933 ipc_importance_task_drop_internal_assertion(release_imp_task
, boost
);
3935 // released_pid = task_pid(release_imp_task); /* TODO: Need ref-safe way to get pid */
3936 ipc_importance_task_release(release_imp_task
);
3938 #if IMPORTANCE_DEBUG
3939 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_WATCHPORT
, 0)) | DBG_FUNC_NONE
,
3940 proc_selfpid(), pid
, boost
, released_pid
, 0);
3941 #endif /* IMPORTANCE_DEBUG */
3948 #endif /* IMPORTANCE_INHERITANCE */
3951 * Routines for VM to query task importance
3956 * Order to be considered while estimating importance
3957 * for low memory notification and purging purgeable memory.
3959 #define TASK_IMPORTANCE_FOREGROUND 4
3960 #define TASK_IMPORTANCE_NOTDARWINBG 1
3964 * (Un)Mark the task as a privileged listener for memory notifications.
3965 * if marked, this task will be among the first to be notified amongst
3966 * the bulk of all other tasks when the system enters a pressure level
3967 * of interest to this task.
3970 task_low_mem_privileged_listener(task_t task
, boolean_t new_value
, boolean_t
*old_value
)
3972 if (old_value
!= NULL
) {
3973 *old_value
= (boolean_t
)task
->low_mem_privileged_listener
;
3976 task
->low_mem_privileged_listener
= (uint32_t)new_value
;
3984 * Checks if the task is already notified.
3986 * Condition: task lock should be held while calling this function.
3989 task_has_been_notified(task_t task
, int pressurelevel
)
3995 if (pressurelevel
== kVMPressureWarning
)
3996 return (task
->low_mem_notified_warn
? TRUE
: FALSE
);
3997 else if (pressurelevel
== kVMPressureCritical
)
3998 return (task
->low_mem_notified_critical
? TRUE
: FALSE
);
4005 * Checks if the task is used for purging.
4007 * Condition: task lock should be held while calling this function.
4010 task_used_for_purging(task_t task
, int pressurelevel
)
4016 if (pressurelevel
== kVMPressureWarning
)
4017 return (task
->purged_memory_warn
? TRUE
: FALSE
);
4018 else if (pressurelevel
== kVMPressureCritical
)
4019 return (task
->purged_memory_critical
? TRUE
: FALSE
);
4026 * Mark the task as notified with memory notification.
4028 * Condition: task lock should be held while calling this function.
4031 task_mark_has_been_notified(task_t task
, int pressurelevel
)
4037 if (pressurelevel
== kVMPressureWarning
)
4038 task
->low_mem_notified_warn
= 1;
4039 else if (pressurelevel
== kVMPressureCritical
)
4040 task
->low_mem_notified_critical
= 1;
4045 * Mark the task as purged.
4047 * Condition: task lock should be held while calling this function.
4050 task_mark_used_for_purging(task_t task
, int pressurelevel
)
4056 if (pressurelevel
== kVMPressureWarning
)
4057 task
->purged_memory_warn
= 1;
4058 else if (pressurelevel
== kVMPressureCritical
)
4059 task
->purged_memory_critical
= 1;
4064 * Mark the task eligible for low memory notification.
4066 * Condition: task lock should be held while calling this function.
4069 task_clear_has_been_notified(task_t task
, int pressurelevel
)
4075 if (pressurelevel
== kVMPressureWarning
)
4076 task
->low_mem_notified_warn
= 0;
4077 else if (pressurelevel
== kVMPressureCritical
)
4078 task
->low_mem_notified_critical
= 0;
4083 * Mark the task eligible for purging its purgeable memory.
4085 * Condition: task lock should be held while calling this function.
4088 task_clear_used_for_purging(task_t task
)
4094 task
->purged_memory_warn
= 0;
4095 task
->purged_memory_critical
= 0;
4100 * Estimate task importance for purging its purgeable memory
4101 * and low memory notification.
4103 * Importance is calculated in the following order of criteria:
4104 * -Task role : Background vs Foreground
4105 * -Boost status: Not boosted vs Boosted
4106 * -Darwin BG status.
4108 * Returns: Estimated task importance. Less important task will have lower
4109 * estimated importance.
4112 task_importance_estimate(task_t task
)
4114 int task_importance
= 0;
4120 if (proc_get_effective_task_policy(task
, TASK_POLICY_ROLE
) == TASK_FOREGROUND_APPLICATION
)
4121 task_importance
+= TASK_IMPORTANCE_FOREGROUND
;
4123 if (proc_get_effective_task_policy(task
, TASK_POLICY_DARWIN_BG
) == 0)
4124 task_importance
+= TASK_IMPORTANCE_NOTDARWINBG
;
4126 return task_importance
;