]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task_policy.c
dcd6fc472ffdfd984ebdc435f858e5f9d5d0ba5b
[apple/xnu.git] / osfmk / kern / task_policy.c
1 /*
2 * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/policy_internal.h>
30 #include <mach/task_policy.h>
31
32 #include <mach/mach_types.h>
33 #include <mach/task_server.h>
34
35 #include <kern/host.h> /* host_priv_self() */
36 #include <mach/host_priv.h> /* host_get_special_port() */
37 #include <mach/host_special_ports.h> /* RESOURCE_NOTIFY_PORT */
38 #include <kern/sched.h>
39 #include <kern/task.h>
40 #include <mach/thread_policy.h>
41 #include <sys/errno.h>
42 #include <sys/resource.h>
43 #include <machine/limits.h>
44 #include <kern/ledger.h>
45 #include <kern/thread_call.h>
46 #include <kern/sfi.h>
47 #include <kern/coalition.h>
48 #if CONFIG_TELEMETRY
49 #include <kern/telemetry.h>
50 #endif
51
52 #if IMPORTANCE_INHERITANCE
53 #include <ipc/ipc_importance.h>
54 #if IMPORTANCE_DEBUG
55 #include <mach/machine/sdt.h>
56 #endif /* IMPORTANCE_DEBUG */
57 #endif /* IMPORTANCE_INHERITACE */
58
59 #include <sys/kdebug.h>
60
61 /*
62 * Task Policy
63 *
64 * This subsystem manages task and thread IO priority and backgrounding,
65 * as well as importance inheritance, process suppression, task QoS, and apptype.
66 * These properties have a suprising number of complex interactions, so they are
67 * centralized here in one state machine to simplify the implementation of those interactions.
68 *
69 * Architecture:
70 * Threads and tasks have two policy fields: requested, effective.
71 * Requested represents the wishes of each interface that influences task policy.
72 * Effective represents the distillation of that policy into a set of behaviors.
73 *
74 * Each thread making a modification in the policy system passes a 'pending' struct,
75 * which tracks updates that will be applied after dropping the policy engine lock.
76 *
77 * Each interface that has an input into the task policy state machine controls a field in requested.
78 * If the interface has a getter, it returns what is in the field in requested, but that is
79 * not necessarily what is actually in effect.
80 *
81 * All kernel subsystems that behave differently based on task policy call into
82 * the proc_get_effective_(task|thread)_policy functions, which return the decision of the task policy state machine
83 * for that subsystem by querying only the 'effective' field.
84 *
85 * Policy change operations:
86 * Here are the steps to change a policy on a task or thread:
87 * 1) Lock task
88 * 2) Change requested field for the relevant policy
89 * 3) Run a task policy update, which recalculates effective based on requested,
90 * then takes a diff between the old and new versions of requested and calls the relevant
91 * other subsystems to apply these changes, and updates the pending field.
92 * 4) Unlock task
93 * 5) Run task policy update complete, which looks at the pending field to update
94 * subsystems which cannot be touched while holding the task lock.
95 *
96 * To add a new requested policy, add the field in the requested struct, the flavor in task.h,
97 * the setter and getter in proc_(set|get)_task_policy*,
98 * then set up the effects of that behavior in task_policy_update*. If the policy manifests
99 * itself as a distinct effective policy, add it to the effective struct and add it to the
100 * proc_get_effective_task_policy accessor.
101 *
102 * Most policies are set via proc_set_task_policy, but policies that don't fit that interface
103 * roll their own lock/set/update/unlock/complete code inside this file.
104 *
105 *
106 * Suppression policy
107 *
108 * These are a set of behaviors that can be requested for a task. They currently have specific
109 * implied actions when they're enabled, but they may be made customizable in the future.
110 *
111 * When the affected task is boosted, we temporarily disable the suppression behaviors
112 * so that the affected process has a chance to run so it can call the API to permanently
113 * disable the suppression behaviors.
114 *
115 * Locking
116 *
117 * Changing task policy on a task takes the task lock.
118 * Changing task policy on a thread takes the thread mutex.
119 * Task policy changes that affect threads will take each thread's mutex to update it if necessary.
120 *
121 * Querying the effective policy does not take a lock, because callers
122 * may run in interrupt context or other place where locks are not OK.
123 *
124 * This means that any notification of state change needs to be externally synchronized.
125 * We do this by idempotent callouts after the state has changed to ask
126 * other subsystems to update their view of the world.
127 *
128 * TODO: Move all cpu/wakes/io monitor code into a separate file
129 * TODO: Move all importance code over to importance subsystem
130 * TODO: Move all taskwatch code into a separate file
131 * TODO: Move all VM importance code into a separate file
132 */
133
134 /* Task policy related helper functions */
135 static void proc_set_task_policy_locked(task_t task, int category, int flavor, int value, int value2);
136
137 static void task_policy_update_locked(task_t task, task_pend_token_t pend_token);
138 static void task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_token_t pend_token);
139
140 /* For attributes that have two scalars as input/output */
141 static void proc_set_task_policy2(task_t task, int category, int flavor, int value1, int value2);
142 static void proc_get_task_policy2(task_t task, int category, int flavor, int *value1, int *value2);
143
144 #if CONFIG_SCHED_SFI
145 static boolean_t task_policy_update_coalition_focal_tasks(task_t task, int prev_role, int next_role);
146 #endif
147
148 static uint64_t task_requested_bitfield(task_t task);
149 static uint64_t task_effective_bitfield(task_t task);
150
151 /* Convenience functions for munging a policy bitfield into a tracepoint */
152 static uintptr_t trequested_0(task_t task);
153 static uintptr_t trequested_1(task_t task);
154 static uintptr_t teffective_0(task_t task);
155 static uintptr_t teffective_1(task_t task);
156
157 /* CPU limits helper functions */
158 static int task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int entitled);
159 static int task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope);
160 static int task_enable_cpumon_locked(task_t task);
161 static int task_disable_cpumon(task_t task);
162 static int task_clear_cpuusage_locked(task_t task, int cpumon_entitled);
163 static int task_apply_resource_actions(task_t task, int type);
164 static void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t param1);
165
166 #ifdef MACH_BSD
167 typedef struct proc * proc_t;
168 int proc_pid(void *proc);
169 extern int proc_selfpid(void);
170 extern char * proc_name_address(void *p);
171 extern char * proc_best_name(proc_t proc);
172
173 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg,
174 char *buffer, uint32_t buffersize,
175 int32_t *retval);
176 #endif /* MACH_BSD */
177
178
179
180 /* Importance Inheritance related helper functions */
181
182 #if IMPORTANCE_INHERITANCE
183
184 static void task_importance_mark_live_donor(task_t task, boolean_t donating);
185 static void task_importance_mark_receiver(task_t task, boolean_t receiving);
186 static void task_importance_mark_denap_receiver(task_t task, boolean_t denap);
187
188 static boolean_t task_is_marked_live_importance_donor(task_t task);
189 static boolean_t task_is_importance_receiver(task_t task);
190 static boolean_t task_is_importance_denap_receiver(task_t task);
191
192 static int task_importance_hold_internal_assertion(task_t target_task, uint32_t count);
193
194 static void task_add_importance_watchport(task_t task, mach_port_t port, int *boostp);
195 static void task_importance_update_live_donor(task_t target_task);
196
197 static void task_set_boost_locked(task_t task, boolean_t boost_active);
198
199 #endif /* IMPORTANCE_INHERITANCE */
200
201 #if IMPORTANCE_DEBUG
202 #define __impdebug_only
203 #else
204 #define __impdebug_only __unused
205 #endif
206
207 #if IMPORTANCE_INHERITANCE
208 #define __imp_only
209 #else
210 #define __imp_only __unused
211 #endif
212
213 /*
214 * Default parameters for certain policies
215 */
216
217 int proc_standard_daemon_tier = THROTTLE_LEVEL_TIER1;
218 int proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER1;
219 int proc_tal_disk_tier = THROTTLE_LEVEL_TIER1;
220
221 int proc_graphics_timer_qos = (LATENCY_QOS_TIER_0 & 0xFF);
222
223 const int proc_default_bg_iotier = THROTTLE_LEVEL_TIER2;
224
225 /* Latency/throughput QoS fields remain zeroed, i.e. TIER_UNSPECIFIED at creation */
226 const struct task_requested_policy default_task_requested_policy = {
227 .trp_bg_iotier = proc_default_bg_iotier
228 };
229 const struct task_effective_policy default_task_effective_policy = {};
230
231 /*
232 * Default parameters for CPU usage monitor.
233 *
234 * Default setting is 50% over 3 minutes.
235 */
236 #define DEFAULT_CPUMON_PERCENTAGE 50
237 #define DEFAULT_CPUMON_INTERVAL (3 * 60)
238
239 uint8_t proc_max_cpumon_percentage;
240 uint64_t proc_max_cpumon_interval;
241
242
243 kern_return_t
244 qos_latency_policy_validate(task_latency_qos_t ltier) {
245 if ((ltier != LATENCY_QOS_TIER_UNSPECIFIED) &&
246 ((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0)))
247 return KERN_INVALID_ARGUMENT;
248
249 return KERN_SUCCESS;
250 }
251
252 kern_return_t
253 qos_throughput_policy_validate(task_throughput_qos_t ttier) {
254 if ((ttier != THROUGHPUT_QOS_TIER_UNSPECIFIED) &&
255 ((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0)))
256 return KERN_INVALID_ARGUMENT;
257
258 return KERN_SUCCESS;
259 }
260
261 static kern_return_t
262 task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count) {
263 if (count < TASK_QOS_POLICY_COUNT)
264 return KERN_INVALID_ARGUMENT;
265
266 task_latency_qos_t ltier = qosinfo->task_latency_qos_tier;
267 task_throughput_qos_t ttier = qosinfo->task_throughput_qos_tier;
268
269 kern_return_t kr = qos_latency_policy_validate(ltier);
270
271 if (kr != KERN_SUCCESS)
272 return kr;
273
274 kr = qos_throughput_policy_validate(ttier);
275
276 return kr;
277 }
278
279 uint32_t
280 qos_extract(uint32_t qv) {
281 return (qv & 0xFF);
282 }
283
284 uint32_t
285 qos_latency_policy_package(uint32_t qv) {
286 return (qv == LATENCY_QOS_TIER_UNSPECIFIED) ? LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | qv);
287 }
288
289 uint32_t
290 qos_throughput_policy_package(uint32_t qv) {
291 return (qv == THROUGHPUT_QOS_TIER_UNSPECIFIED) ? THROUGHPUT_QOS_TIER_UNSPECIFIED : ((0xFE << 16) | qv);
292 }
293
294 /* TEMPORARY boot-arg controlling task_policy suppression (App Nap) */
295 static boolean_t task_policy_suppression_disable = FALSE;
296
297 kern_return_t
298 task_policy_set(
299 task_t task,
300 task_policy_flavor_t flavor,
301 task_policy_t policy_info,
302 mach_msg_type_number_t count)
303 {
304 kern_return_t result = KERN_SUCCESS;
305
306 if (task == TASK_NULL || task == kernel_task)
307 return (KERN_INVALID_ARGUMENT);
308
309 switch (flavor) {
310
311 case TASK_CATEGORY_POLICY: {
312 task_category_policy_t info = (task_category_policy_t)policy_info;
313
314 if (count < TASK_CATEGORY_POLICY_COUNT)
315 return (KERN_INVALID_ARGUMENT);
316
317
318 switch(info->role) {
319 case TASK_FOREGROUND_APPLICATION:
320 case TASK_BACKGROUND_APPLICATION:
321 case TASK_DEFAULT_APPLICATION:
322 proc_set_task_policy(task,
323 TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
324 info->role);
325 break;
326
327 case TASK_CONTROL_APPLICATION:
328 if (task != current_task() || task->sec_token.val[0] != 0)
329 result = KERN_INVALID_ARGUMENT;
330 else
331 proc_set_task_policy(task,
332 TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
333 info->role);
334 break;
335
336 case TASK_GRAPHICS_SERVER:
337 /* TODO: Restrict this role to FCFS <rdar://problem/12552788> */
338 if (task != current_task() || task->sec_token.val[0] != 0)
339 result = KERN_INVALID_ARGUMENT;
340 else
341 proc_set_task_policy(task,
342 TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
343 info->role);
344 break;
345 default:
346 result = KERN_INVALID_ARGUMENT;
347 break;
348 } /* switch (info->role) */
349
350 break;
351 }
352
353 /* Desired energy-efficiency/performance "quality-of-service" */
354 case TASK_BASE_QOS_POLICY:
355 case TASK_OVERRIDE_QOS_POLICY:
356 {
357 task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
358 kern_return_t kr = task_qos_policy_validate(qosinfo, count);
359
360 if (kr != KERN_SUCCESS)
361 return kr;
362
363
364 uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
365 uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
366
367 proc_set_task_policy2(task, TASK_POLICY_ATTRIBUTE,
368 flavor == TASK_BASE_QOS_POLICY ? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS : TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS,
369 lqos, tqos);
370 }
371 break;
372
373 case TASK_BASE_LATENCY_QOS_POLICY:
374 {
375 task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
376 kern_return_t kr = task_qos_policy_validate(qosinfo, count);
377
378 if (kr != KERN_SUCCESS)
379 return kr;
380
381 uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
382
383 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_LATENCY_QOS_POLICY, lqos);
384 }
385 break;
386
387 case TASK_BASE_THROUGHPUT_QOS_POLICY:
388 {
389 task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
390 kern_return_t kr = task_qos_policy_validate(qosinfo, count);
391
392 if (kr != KERN_SUCCESS)
393 return kr;
394
395 uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
396
397 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_THROUGHPUT_QOS_POLICY, tqos);
398 }
399 break;
400
401 case TASK_SUPPRESSION_POLICY:
402 {
403
404 task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
405
406 if (count < TASK_SUPPRESSION_POLICY_COUNT)
407 return (KERN_INVALID_ARGUMENT);
408
409 struct task_qos_policy qosinfo;
410
411 qosinfo.task_latency_qos_tier = info->timer_throttle;
412 qosinfo.task_throughput_qos_tier = info->throughput_qos;
413
414 kern_return_t kr = task_qos_policy_validate(&qosinfo, TASK_QOS_POLICY_COUNT);
415
416 if (kr != KERN_SUCCESS)
417 return kr;
418
419 /* TEMPORARY disablement of task suppression */
420 if (task_policy_suppression_disable && info->active)
421 return KERN_SUCCESS;
422
423 struct task_pend_token pend_token = {};
424
425 task_lock(task);
426
427 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
428 (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START,
429 proc_selfpid(), task_pid(task), trequested_0(task),
430 trequested_1(task), 0);
431
432 task->requested_policy.trp_sup_active = (info->active) ? 1 : 0;
433 task->requested_policy.trp_sup_lowpri_cpu = (info->lowpri_cpu) ? 1 : 0;
434 task->requested_policy.trp_sup_timer = qos_extract(info->timer_throttle);
435 task->requested_policy.trp_sup_disk = (info->disk_throttle) ? 1 : 0;
436 task->requested_policy.trp_sup_throughput = qos_extract(info->throughput_qos);
437 task->requested_policy.trp_sup_cpu = (info->suppressed_cpu) ? 1 : 0;
438 task->requested_policy.trp_sup_bg_sockets = (info->background_sockets) ? 1 : 0;
439
440 task_policy_update_locked(task, &pend_token);
441
442 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
443 (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END,
444 proc_selfpid(), task_pid(task), trequested_0(task),
445 trequested_1(task), 0);
446
447 task_unlock(task);
448
449 task_policy_update_complete_unlocked(task, &pend_token);
450
451 break;
452
453 }
454
455 default:
456 result = KERN_INVALID_ARGUMENT;
457 break;
458 }
459
460 return (result);
461 }
462
463 /* Sets BSD 'nice' value on the task */
464 kern_return_t
465 task_importance(
466 task_t task,
467 integer_t importance)
468 {
469 if (task == TASK_NULL || task == kernel_task)
470 return (KERN_INVALID_ARGUMENT);
471
472 task_lock(task);
473
474 if (!task->active) {
475 task_unlock(task);
476
477 return (KERN_TERMINATED);
478 }
479
480 if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) >= TASK_CONTROL_APPLICATION) {
481 task_unlock(task);
482
483 return (KERN_INVALID_ARGUMENT);
484 }
485
486 task->importance = importance;
487
488 struct task_pend_token pend_token = {};
489
490 task_policy_update_locked(task, &pend_token);
491
492 task_unlock(task);
493
494 task_policy_update_complete_unlocked(task, &pend_token);
495
496 return (KERN_SUCCESS);
497 }
498
499 kern_return_t
500 task_policy_get(
501 task_t task,
502 task_policy_flavor_t flavor,
503 task_policy_t policy_info,
504 mach_msg_type_number_t *count,
505 boolean_t *get_default)
506 {
507 if (task == TASK_NULL || task == kernel_task)
508 return (KERN_INVALID_ARGUMENT);
509
510 switch (flavor) {
511
512 case TASK_CATEGORY_POLICY:
513 {
514 task_category_policy_t info = (task_category_policy_t)policy_info;
515
516 if (*count < TASK_CATEGORY_POLICY_COUNT)
517 return (KERN_INVALID_ARGUMENT);
518
519 if (*get_default)
520 info->role = TASK_UNSPECIFIED;
521 else
522 info->role = proc_get_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
523 break;
524 }
525
526 case TASK_BASE_QOS_POLICY: /* FALLTHRU */
527 case TASK_OVERRIDE_QOS_POLICY:
528 {
529 task_qos_policy_t info = (task_qos_policy_t)policy_info;
530
531 if (*count < TASK_QOS_POLICY_COUNT)
532 return (KERN_INVALID_ARGUMENT);
533
534 if (*get_default) {
535 info->task_latency_qos_tier = LATENCY_QOS_TIER_UNSPECIFIED;
536 info->task_throughput_qos_tier = THROUGHPUT_QOS_TIER_UNSPECIFIED;
537 } else if (flavor == TASK_BASE_QOS_POLICY) {
538 int value1, value2;
539
540 proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
541
542 info->task_latency_qos_tier = qos_latency_policy_package(value1);
543 info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
544
545 } else if (flavor == TASK_OVERRIDE_QOS_POLICY) {
546 int value1, value2;
547
548 proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
549
550 info->task_latency_qos_tier = qos_latency_policy_package(value1);
551 info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
552 }
553
554 break;
555 }
556
557 case TASK_POLICY_STATE:
558 {
559 task_policy_state_t info = (task_policy_state_t)policy_info;
560
561 if (*count < TASK_POLICY_STATE_COUNT)
562 return (KERN_INVALID_ARGUMENT);
563
564 /* Only root can get this info */
565 if (current_task()->sec_token.val[0] != 0)
566 return KERN_PROTECTION_FAILURE;
567
568 if (*get_default) {
569 info->requested = 0;
570 info->effective = 0;
571 info->pending = 0;
572 info->imp_assertcnt = 0;
573 info->imp_externcnt = 0;
574 info->flags = 0;
575 info->imp_transitions = 0;
576 } else {
577 task_lock(task);
578
579 info->requested = task_requested_bitfield(task);
580 info->effective = task_effective_bitfield(task);
581 info->pending = 0;
582
583 info->tps_requested_policy = *(uint64_t*)(&task->requested_policy);
584 info->tps_effective_policy = *(uint64_t*)(&task->effective_policy);
585
586 info->flags = 0;
587 if (task->task_imp_base != NULL) {
588 info->imp_assertcnt = task->task_imp_base->iit_assertcnt;
589 info->imp_externcnt = IIT_EXTERN(task->task_imp_base);
590 info->flags |= (task_is_marked_importance_receiver(task) ? TASK_IMP_RECEIVER : 0);
591 info->flags |= (task_is_marked_importance_denap_receiver(task) ? TASK_DENAP_RECEIVER : 0);
592 info->flags |= (task_is_marked_importance_donor(task) ? TASK_IMP_DONOR : 0);
593 info->flags |= (task_is_marked_live_importance_donor(task) ? TASK_IMP_LIVE_DONOR : 0);
594 info->imp_transitions = task->task_imp_base->iit_transitions;
595 } else {
596 info->imp_assertcnt = 0;
597 info->imp_externcnt = 0;
598 info->imp_transitions = 0;
599 }
600 task_unlock(task);
601 }
602
603 break;
604 }
605
606 case TASK_SUPPRESSION_POLICY:
607 {
608 task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
609
610 if (*count < TASK_SUPPRESSION_POLICY_COUNT)
611 return (KERN_INVALID_ARGUMENT);
612
613 task_lock(task);
614
615 if (*get_default) {
616 info->active = 0;
617 info->lowpri_cpu = 0;
618 info->timer_throttle = LATENCY_QOS_TIER_UNSPECIFIED;
619 info->disk_throttle = 0;
620 info->cpu_limit = 0;
621 info->suspend = 0;
622 info->throughput_qos = 0;
623 info->suppressed_cpu = 0;
624 } else {
625 info->active = task->requested_policy.trp_sup_active;
626 info->lowpri_cpu = task->requested_policy.trp_sup_lowpri_cpu;
627 info->timer_throttle = qos_latency_policy_package(task->requested_policy.trp_sup_timer);
628 info->disk_throttle = task->requested_policy.trp_sup_disk;
629 info->cpu_limit = 0;
630 info->suspend = 0;
631 info->throughput_qos = qos_throughput_policy_package(task->requested_policy.trp_sup_throughput);
632 info->suppressed_cpu = task->requested_policy.trp_sup_cpu;
633 info->background_sockets = task->requested_policy.trp_sup_bg_sockets;
634 }
635
636 task_unlock(task);
637 break;
638 }
639
640 default:
641 return (KERN_INVALID_ARGUMENT);
642 }
643
644 return (KERN_SUCCESS);
645 }
646
647 /*
648 * Called at task creation
649 * We calculate the correct effective but don't apply it to anything yet.
650 * The threads, etc will inherit from the task as they get created.
651 */
652 void
653 task_policy_create(task_t task, task_t parent_task)
654 {
655 task->requested_policy.trp_apptype = parent_task->requested_policy.trp_apptype;
656
657 task->requested_policy.trp_int_darwinbg = parent_task->requested_policy.trp_int_darwinbg;
658 task->requested_policy.trp_ext_darwinbg = parent_task->requested_policy.trp_ext_darwinbg;
659 task->requested_policy.trp_int_iotier = parent_task->requested_policy.trp_int_iotier;
660 task->requested_policy.trp_ext_iotier = parent_task->requested_policy.trp_ext_iotier;
661 task->requested_policy.trp_int_iopassive = parent_task->requested_policy.trp_int_iopassive;
662 task->requested_policy.trp_ext_iopassive = parent_task->requested_policy.trp_ext_iopassive;
663 task->requested_policy.trp_bg_iotier = parent_task->requested_policy.trp_bg_iotier;
664 task->requested_policy.trp_terminated = parent_task->requested_policy.trp_terminated;
665 task->requested_policy.trp_qos_clamp = parent_task->requested_policy.trp_qos_clamp;
666
667 if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && !task_is_exec_copy(task)) {
668 /* Do not update the apptype for exec copy task */
669 if (parent_task->requested_policy.trp_boosted) {
670 task->requested_policy.trp_apptype = TASK_APPTYPE_DAEMON_INTERACTIVE;
671 task_importance_mark_donor(task, TRUE);
672 } else {
673 task->requested_policy.trp_apptype = TASK_APPTYPE_DAEMON_BACKGROUND;
674 task_importance_mark_receiver(task, FALSE);
675 }
676 }
677
678 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
679 (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START,
680 task_pid(task), teffective_0(task),
681 teffective_1(task), task->priority, 0);
682
683 task_policy_update_internal_locked(task, TRUE, NULL);
684
685 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
686 (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END,
687 task_pid(task), teffective_0(task),
688 teffective_1(task), task->priority, 0);
689
690 task_importance_update_live_donor(task);
691 }
692
693
694 static void
695 task_policy_update_locked(task_t task, task_pend_token_t pend_token)
696 {
697 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
698 (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK) | DBG_FUNC_START),
699 task_pid(task), teffective_0(task),
700 teffective_1(task), task->priority, 0);
701
702 task_policy_update_internal_locked(task, FALSE, pend_token);
703
704 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
705 (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END,
706 task_pid(task), teffective_0(task),
707 teffective_1(task), task->priority, 0);
708 }
709
710 /*
711 * One state update function TO RULE THEM ALL
712 *
713 * This function updates the task or thread effective policy fields
714 * and pushes the results to the relevant subsystems.
715 *
716 * Must call update_complete after unlocking the task,
717 * as some subsystems cannot be updated while holding the task lock.
718 *
719 * Called with task locked, not thread
720 */
721
722 static void
723 task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_token_t pend_token)
724 {
725 /*
726 * Step 1:
727 * Gather requested policy
728 */
729
730 struct task_requested_policy requested = task->requested_policy;
731
732 /*
733 * Step 2:
734 * Calculate new effective policies from requested policy and task state
735 * Rules:
736 * Don't change requested, it won't take effect
737 */
738
739 struct task_effective_policy next = {};
740
741 /* Update task role */
742 next.tep_role = requested.trp_role;
743
744 /* Set task qos clamp and ceiling */
745 next.tep_qos_clamp = requested.trp_qos_clamp;
746
747 if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT ||
748 requested.trp_apptype == TASK_APPTYPE_APP_TAL) {
749
750 switch (next.tep_role) {
751 case TASK_FOREGROUND_APPLICATION:
752 /* Foreground apps get urgent scheduler priority */
753 next.tep_qos_ui_is_urgent = 1;
754 next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
755 break;
756
757 case TASK_BACKGROUND_APPLICATION:
758 /* This is really 'non-focal but on-screen' */
759 next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
760 break;
761
762 case TASK_DEFAULT_APPLICATION:
763 /* This is 'may render UI but we don't know if it's focal/nonfocal' */
764 next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
765 break;
766
767 case TASK_NONUI_APPLICATION:
768 /* i.e. 'off-screen' */
769 next.tep_qos_ceiling = THREAD_QOS_LEGACY;
770 break;
771
772 case TASK_CONTROL_APPLICATION:
773 case TASK_GRAPHICS_SERVER:
774 next.tep_qos_ui_is_urgent = 1;
775 next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
776 break;
777
778 case TASK_THROTTLE_APPLICATION:
779 /* i.e. 'TAL launch' */
780 next.tep_qos_ceiling = THREAD_QOS_UTILITY;
781 break;
782
783 case TASK_UNSPECIFIED:
784 default:
785 /* Apps that don't have an application role get
786 * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */
787 next.tep_qos_ceiling = THREAD_QOS_LEGACY;
788 break;
789 }
790 } else {
791 /* Daemons get USER_INTERACTIVE squashed to USER_INITIATED */
792 next.tep_qos_ceiling = THREAD_QOS_USER_INITIATED;
793 }
794
795 /* Calculate DARWIN_BG */
796 boolean_t wants_darwinbg = FALSE;
797 boolean_t wants_all_sockets_bg = FALSE; /* Do I want my existing sockets to be bg */
798 boolean_t wants_watchersbg = FALSE; /* Do I want my pidbound threads to be bg */
799
800 /*
801 * If DARWIN_BG has been requested at either level, it's engaged.
802 * Only true DARWIN_BG changes cause watchers to transition.
803 *
804 * Backgrounding due to apptype does.
805 */
806 if (requested.trp_int_darwinbg || requested.trp_ext_darwinbg)
807 wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = TRUE;
808
809 /* Background TAL apps are throttled when TAL is enabled */
810 if (requested.trp_apptype == TASK_APPTYPE_APP_TAL &&
811 requested.trp_role == TASK_BACKGROUND_APPLICATION &&
812 requested.trp_tal_enabled == 1) {
813 next.tep_tal_engaged = 1;
814 }
815
816 if ((requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT ||
817 requested.trp_apptype == TASK_APPTYPE_APP_TAL) &&
818 requested.trp_role == TASK_THROTTLE_APPLICATION) {
819 next.tep_tal_engaged = 1;
820 }
821
822 /* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */
823 if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
824 requested.trp_boosted == 0)
825 wants_darwinbg = TRUE;
826
827 /* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */
828 if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND)
829 wants_darwinbg = TRUE;
830
831 if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND || next.tep_qos_clamp == THREAD_QOS_MAINTENANCE)
832 wants_darwinbg = TRUE;
833
834 /* Calculate side effects of DARWIN_BG */
835
836 if (wants_darwinbg) {
837 next.tep_darwinbg = 1;
838 /* darwinbg tasks always create bg sockets, but we don't always loop over all sockets */
839 next.tep_new_sockets_bg = 1;
840 next.tep_lowpri_cpu = 1;
841 }
842
843 if (wants_all_sockets_bg)
844 next.tep_all_sockets_bg = 1;
845
846 if (wants_watchersbg)
847 next.tep_watchers_bg = 1;
848
849 /* Calculate low CPU priority */
850
851 boolean_t wants_lowpri_cpu = FALSE;
852
853 if (wants_darwinbg)
854 wants_lowpri_cpu = TRUE;
855
856 if (next.tep_tal_engaged)
857 wants_lowpri_cpu = TRUE;
858
859 if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0)
860 wants_lowpri_cpu = TRUE;
861
862 if (wants_lowpri_cpu)
863 next.tep_lowpri_cpu = 1;
864
865 /* Calculate IO policy */
866
867 /* Update BG IO policy (so we can see if it has changed) */
868 next.tep_bg_iotier = requested.trp_bg_iotier;
869
870 int iopol = THROTTLE_LEVEL_TIER0;
871
872 if (wants_darwinbg)
873 iopol = MAX(iopol, requested.trp_bg_iotier);
874
875 if (requested.trp_apptype == TASK_APPTYPE_DAEMON_STANDARD)
876 iopol = MAX(iopol, proc_standard_daemon_tier);
877
878 if (requested.trp_sup_disk && requested.trp_boosted == 0)
879 iopol = MAX(iopol, proc_suppressed_disk_tier);
880
881 if (next.tep_tal_engaged)
882 iopol = MAX(iopol, proc_tal_disk_tier);
883
884 if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED)
885 iopol = MAX(iopol, thread_qos_policy_params.qos_iotier[next.tep_qos_clamp]);
886
887 iopol = MAX(iopol, requested.trp_int_iotier);
888 iopol = MAX(iopol, requested.trp_ext_iotier);
889
890 next.tep_io_tier = iopol;
891
892 /* Calculate Passive IO policy */
893
894 if (requested.trp_ext_iopassive || requested.trp_int_iopassive)
895 next.tep_io_passive = 1;
896
897 /* Calculate suppression-active flag */
898 if (requested.trp_sup_active && requested.trp_boosted == 0)
899 next.tep_sup_active = 1;
900
901 /* Calculate timer QOS */
902 int latency_qos = requested.trp_base_latency_qos;
903
904 if (requested.trp_sup_timer && requested.trp_boosted == 0)
905 latency_qos = requested.trp_sup_timer;
906
907 if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED)
908 latency_qos = MAX(latency_qos, (int)thread_qos_policy_params.qos_latency_qos[next.tep_qos_clamp]);
909
910 if (requested.trp_over_latency_qos != 0)
911 latency_qos = requested.trp_over_latency_qos;
912
913 /* Treat the windowserver special */
914 if (requested.trp_role == TASK_GRAPHICS_SERVER)
915 latency_qos = proc_graphics_timer_qos;
916
917 next.tep_latency_qos = latency_qos;
918
919 /* Calculate throughput QOS */
920 int through_qos = requested.trp_base_through_qos;
921
922 if (requested.trp_sup_throughput && requested.trp_boosted == 0)
923 through_qos = requested.trp_sup_throughput;
924
925 if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED)
926 through_qos = MAX(through_qos, (int)thread_qos_policy_params.qos_through_qos[next.tep_qos_clamp]);
927
928 if (requested.trp_over_through_qos != 0)
929 through_qos = requested.trp_over_through_qos;
930
931 next.tep_through_qos = through_qos;
932
933 /* Calculate suppressed CPU priority */
934 if (requested.trp_sup_cpu && requested.trp_boosted == 0)
935 next.tep_suppressed_cpu = 1;
936
937 /*
938 * Calculate background sockets
939 * Don't take into account boosting to limit transition frequency.
940 */
941 if (requested.trp_sup_bg_sockets){
942 next.tep_all_sockets_bg = 1;
943 next.tep_new_sockets_bg = 1;
944 }
945
946 /* Apply SFI Managed class bit */
947 next.tep_sfi_managed = requested.trp_sfi_managed;
948
949 /* Calculate 'live donor' status for live importance */
950 switch (requested.trp_apptype) {
951 case TASK_APPTYPE_APP_TAL:
952 case TASK_APPTYPE_APP_DEFAULT:
953 if (requested.trp_ext_darwinbg == 0)
954 next.tep_live_donor = 1;
955 else
956 next.tep_live_donor = 0;
957 break;
958
959 case TASK_APPTYPE_DAEMON_INTERACTIVE:
960 case TASK_APPTYPE_DAEMON_STANDARD:
961 case TASK_APPTYPE_DAEMON_ADAPTIVE:
962 case TASK_APPTYPE_DAEMON_BACKGROUND:
963 default:
964 next.tep_live_donor = 0;
965 break;
966 }
967
968 if (requested.trp_terminated) {
969 /*
970 * Shoot down the throttles that slow down exit or response to SIGTERM
971 * We don't need to shoot down:
972 * passive (don't want to cause others to throttle)
973 * all_sockets_bg (don't need to iterate FDs on every exit)
974 * new_sockets_bg (doesn't matter for exiting process)
975 * pidsuspend (jetsam-ed BG process shouldn't run again)
976 * watchers_bg (watcher threads don't need to be unthrottled)
977 * latency_qos (affects userspace timers only)
978 */
979
980 next.tep_terminated = 1;
981 next.tep_darwinbg = 0;
982 next.tep_lowpri_cpu = 0;
983 next.tep_io_tier = THROTTLE_LEVEL_TIER0;
984 next.tep_tal_engaged = 0;
985 next.tep_role = TASK_UNSPECIFIED;
986 next.tep_suppressed_cpu = 0;
987 }
988
989 /*
990 * Step 3:
991 * Swap out old policy for new policy
992 */
993
994 struct task_effective_policy prev = task->effective_policy;
995
996 /* This is the point where the new values become visible to other threads */
997 task->effective_policy = next;
998
999 /* Don't do anything further to a half-formed task */
1000 if (in_create)
1001 return;
1002
1003 if (task == kernel_task)
1004 panic("Attempting to set task policy on kernel_task");
1005
1006 /*
1007 * Step 4:
1008 * Pend updates that can't be done while holding the task lock
1009 */
1010
1011 if (prev.tep_all_sockets_bg != next.tep_all_sockets_bg)
1012 pend_token->tpt_update_sockets = 1;
1013
1014 /* Only re-scan the timer list if the qos level is getting less strong */
1015 if (prev.tep_latency_qos > next.tep_latency_qos)
1016 pend_token->tpt_update_timers = 1;
1017
1018
1019 if (prev.tep_live_donor != next.tep_live_donor)
1020 pend_token->tpt_update_live_donor = 1;
1021
1022 /*
1023 * Step 5:
1024 * Update other subsystems as necessary if something has changed
1025 */
1026
1027 boolean_t update_threads = FALSE, update_sfi = FALSE;
1028
1029 /*
1030 * Check for the attributes that thread_policy_update_internal_locked() consults,
1031 * and trigger thread policy re-evaluation.
1032 */
1033 if (prev.tep_io_tier != next.tep_io_tier ||
1034 prev.tep_bg_iotier != next.tep_bg_iotier ||
1035 prev.tep_io_passive != next.tep_io_passive ||
1036 prev.tep_darwinbg != next.tep_darwinbg ||
1037 prev.tep_qos_clamp != next.tep_qos_clamp ||
1038 prev.tep_qos_ceiling != next.tep_qos_ceiling ||
1039 prev.tep_qos_ui_is_urgent != next.tep_qos_ui_is_urgent ||
1040 prev.tep_latency_qos != next.tep_latency_qos ||
1041 prev.tep_through_qos != next.tep_through_qos ||
1042 prev.tep_lowpri_cpu != next.tep_lowpri_cpu ||
1043 prev.tep_new_sockets_bg != next.tep_new_sockets_bg ||
1044 prev.tep_terminated != next.tep_terminated )
1045 update_threads = TRUE;
1046
1047 /*
1048 * Check for the attributes that sfi_thread_classify() consults,
1049 * and trigger SFI re-evaluation.
1050 */
1051 if (prev.tep_latency_qos != next.tep_latency_qos ||
1052 prev.tep_role != next.tep_role ||
1053 prev.tep_sfi_managed != next.tep_sfi_managed )
1054 update_sfi = TRUE;
1055
1056 #if CONFIG_SCHED_SFI
1057 /* Reflect task role transitions into the coalition role counters */
1058 if (prev.tep_role != next.tep_role) {
1059 if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role)) {
1060 update_sfi = TRUE;
1061 pend_token->tpt_update_coal_sfi = 1;
1062 }
1063 }
1064 #endif /* !CONFIG_SCHED_SFI */
1065
1066 boolean_t update_priority = FALSE;
1067
1068 int priority = BASEPRI_DEFAULT;
1069 int max_priority = MAXPRI_USER;
1070
1071 if (next.tep_lowpri_cpu) {
1072 priority = MAXPRI_THROTTLE;
1073 max_priority = MAXPRI_THROTTLE;
1074 } else if (next.tep_suppressed_cpu) {
1075 priority = MAXPRI_SUPPRESSED;
1076 max_priority = MAXPRI_SUPPRESSED;
1077 } else {
1078 switch (next.tep_role) {
1079 case TASK_CONTROL_APPLICATION:
1080 priority = BASEPRI_CONTROL;
1081 break;
1082 case TASK_GRAPHICS_SERVER:
1083 priority = BASEPRI_GRAPHICS;
1084 max_priority = MAXPRI_RESERVED;
1085 break;
1086 default:
1087 break;
1088 }
1089
1090 /* factor in 'nice' value */
1091 priority += task->importance;
1092
1093 if (task->effective_policy.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1094 int qos_clamp_priority = thread_qos_policy_params.qos_pri[task->effective_policy.tep_qos_clamp];
1095
1096 priority = MIN(priority, qos_clamp_priority);
1097 max_priority = MIN(max_priority, qos_clamp_priority);
1098 }
1099
1100 if (priority > max_priority)
1101 priority = max_priority;
1102 else if (priority < MINPRI)
1103 priority = MINPRI;
1104 }
1105
1106 assert(priority <= max_priority);
1107
1108 /* avoid extra work if priority isn't changing */
1109 if (priority != task->priority ||
1110 max_priority != task->max_priority ) {
1111 /* update the scheduling priority for the task */
1112 task->max_priority = max_priority;
1113 task->priority = priority;
1114 update_priority = TRUE;
1115 }
1116
1117 /* Loop over the threads in the task:
1118 * only once
1119 * only if necessary
1120 * with one thread mutex hold per thread
1121 */
1122 if (update_threads || update_priority || update_sfi) {
1123 thread_t thread;
1124
1125 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1126 struct task_pend_token thread_pend_token = {};
1127
1128 if (update_sfi)
1129 thread_pend_token.tpt_update_thread_sfi = 1;
1130
1131 if (update_priority || update_threads)
1132 thread_policy_update_tasklocked(thread,
1133 task->priority, task->max_priority,
1134 &thread_pend_token);
1135
1136 assert(!thread_pend_token.tpt_update_sockets);
1137
1138 // Slightly risky, as we still hold the task lock...
1139 thread_policy_update_complete_unlocked(thread, &thread_pend_token);
1140 }
1141 }
1142 }
1143
1144
1145 #if CONFIG_SCHED_SFI
1146 /*
1147 * Yet another layering violation. We reach out and bang on the coalition directly.
1148 */
1149 static boolean_t
1150 task_policy_update_coalition_focal_tasks(task_t task,
1151 int prev_role,
1152 int next_role)
1153 {
1154 boolean_t sfi_transition = FALSE;
1155
1156 /* task moving into/out-of the foreground */
1157 if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) {
1158 if (task_coalition_adjust_focal_count(task, 1) == 1)
1159 sfi_transition = TRUE;
1160 } else if (prev_role == TASK_FOREGROUND_APPLICATION && next_role != TASK_FOREGROUND_APPLICATION) {
1161 if (task_coalition_adjust_focal_count(task, -1) == 0)
1162 sfi_transition = TRUE;
1163 }
1164
1165 /* task moving into/out-of background */
1166 if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) {
1167 if (task_coalition_adjust_nonfocal_count(task, 1) == 1)
1168 sfi_transition = TRUE;
1169 } else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) {
1170 if (task_coalition_adjust_nonfocal_count(task, -1) == 0)
1171 sfi_transition = TRUE;
1172 }
1173
1174 return sfi_transition;
1175 }
1176
1177 /* coalition object is locked */
1178 static void
1179 task_sfi_reevaluate_cb(coalition_t coal, void *ctx, task_t task)
1180 {
1181 thread_t thread;
1182
1183 /* unused for now */
1184 (void)coal;
1185
1186 /* skip the task we're re-evaluating on behalf of: it's already updated */
1187 if (task == (task_t)ctx)
1188 return;
1189
1190 task_lock(task);
1191
1192 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1193 sfi_reevaluate(thread);
1194 }
1195
1196 task_unlock(task);
1197 }
1198 #endif /* CONFIG_SCHED_SFI */
1199
1200 /*
1201 * Called with task unlocked to do things that can't be done while holding the task lock
1202 */
1203 void
1204 task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token)
1205 {
1206 #ifdef MACH_BSD
1207 if (pend_token->tpt_update_sockets)
1208 proc_apply_task_networkbg(task->bsd_info, THREAD_NULL);
1209 #endif /* MACH_BSD */
1210
1211 /* The timer throttle has been removed or reduced, we need to look for expired timers and fire them */
1212 if (pend_token->tpt_update_timers)
1213 ml_timer_evaluate();
1214
1215
1216 if (pend_token->tpt_update_live_donor)
1217 task_importance_update_live_donor(task);
1218
1219 #if CONFIG_SCHED_SFI
1220 /* use the resource coalition for SFI re-evaluation */
1221 if (pend_token->tpt_update_coal_sfi)
1222 coalition_for_each_task(task->coalition[COALITION_TYPE_RESOURCE],
1223 (void *)task, task_sfi_reevaluate_cb);
1224 #endif /* CONFIG_SCHED_SFI */
1225 }
1226
1227 /*
1228 * Initiate a task policy state transition
1229 *
1230 * Everything that modifies requested except functions that need to hold the task lock
1231 * should use this function
1232 *
1233 * Argument validation should be performed before reaching this point.
1234 *
1235 * TODO: Do we need to check task->active?
1236 */
1237 void
1238 proc_set_task_policy(task_t task,
1239 int category,
1240 int flavor,
1241 int value)
1242 {
1243 struct task_pend_token pend_token = {};
1244
1245 task_lock(task);
1246
1247 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1248 (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1249 task_pid(task), trequested_0(task),
1250 trequested_1(task), value, 0);
1251
1252 proc_set_task_policy_locked(task, category, flavor, value, 0);
1253
1254 task_policy_update_locked(task, &pend_token);
1255
1256
1257 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1258 (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1259 task_pid(task), trequested_0(task),
1260 trequested_1(task), tpending(&pend_token), 0);
1261
1262 task_unlock(task);
1263
1264 task_policy_update_complete_unlocked(task, &pend_token);
1265 }
1266
1267 /*
1268 * Variant of proc_set_task_policy() that sets two scalars in the requested policy structure.
1269 * Same locking rules apply.
1270 */
1271 void
1272 proc_set_task_policy2(task_t task,
1273 int category,
1274 int flavor,
1275 int value,
1276 int value2)
1277 {
1278 struct task_pend_token pend_token = {};
1279
1280 task_lock(task);
1281
1282 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1283 (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1284 task_pid(task), trequested_0(task),
1285 trequested_1(task), value, 0);
1286
1287 proc_set_task_policy_locked(task, category, flavor, value, value2);
1288
1289 task_policy_update_locked(task, &pend_token);
1290
1291 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1292 (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1293 task_pid(task), trequested_0(task),
1294 trequested_1(task), tpending(&pend_token), 0);
1295
1296 task_unlock(task);
1297
1298 task_policy_update_complete_unlocked(task, &pend_token);
1299 }
1300
1301 /*
1302 * Set the requested state for a specific flavor to a specific value.
1303 *
1304 * TODO:
1305 * Verify that arguments to non iopol things are 1 or 0
1306 */
1307 static void
1308 proc_set_task_policy_locked(task_t task,
1309 int category,
1310 int flavor,
1311 int value,
1312 int value2)
1313 {
1314 int tier, passive;
1315
1316 struct task_requested_policy requested = task->requested_policy;
1317
1318 switch (flavor) {
1319
1320 /* Category: EXTERNAL and INTERNAL */
1321
1322 case TASK_POLICY_DARWIN_BG:
1323 if (category == TASK_POLICY_EXTERNAL)
1324 requested.trp_ext_darwinbg = value;
1325 else
1326 requested.trp_int_darwinbg = value;
1327 break;
1328
1329 case TASK_POLICY_IOPOL:
1330 proc_iopol_to_tier(value, &tier, &passive);
1331 if (category == TASK_POLICY_EXTERNAL) {
1332 requested.trp_ext_iotier = tier;
1333 requested.trp_ext_iopassive = passive;
1334 } else {
1335 requested.trp_int_iotier = tier;
1336 requested.trp_int_iopassive = passive;
1337 }
1338 break;
1339
1340 case TASK_POLICY_IO:
1341 if (category == TASK_POLICY_EXTERNAL)
1342 requested.trp_ext_iotier = value;
1343 else
1344 requested.trp_int_iotier = value;
1345 break;
1346
1347 case TASK_POLICY_PASSIVE_IO:
1348 if (category == TASK_POLICY_EXTERNAL)
1349 requested.trp_ext_iopassive = value;
1350 else
1351 requested.trp_int_iopassive = value;
1352 break;
1353
1354 /* Category: INTERNAL */
1355
1356 case TASK_POLICY_DARWIN_BG_IOPOL:
1357 assert(category == TASK_POLICY_INTERNAL);
1358 proc_iopol_to_tier(value, &tier, &passive);
1359 requested.trp_bg_iotier = tier;
1360 break;
1361
1362 /* Category: ATTRIBUTE */
1363
1364 case TASK_POLICY_TAL:
1365 assert(category == TASK_POLICY_ATTRIBUTE);
1366 requested.trp_tal_enabled = value;
1367 break;
1368
1369 case TASK_POLICY_BOOST:
1370 assert(category == TASK_POLICY_ATTRIBUTE);
1371 requested.trp_boosted = value;
1372 break;
1373
1374 case TASK_POLICY_ROLE:
1375 assert(category == TASK_POLICY_ATTRIBUTE);
1376 requested.trp_role = value;
1377 break;
1378
1379 case TASK_POLICY_TERMINATED:
1380 assert(category == TASK_POLICY_ATTRIBUTE);
1381 requested.trp_terminated = value;
1382 break;
1383
1384 case TASK_BASE_LATENCY_QOS_POLICY:
1385 assert(category == TASK_POLICY_ATTRIBUTE);
1386 requested.trp_base_latency_qos = value;
1387 break;
1388
1389 case TASK_BASE_THROUGHPUT_QOS_POLICY:
1390 assert(category == TASK_POLICY_ATTRIBUTE);
1391 requested.trp_base_through_qos = value;
1392 break;
1393
1394 case TASK_POLICY_SFI_MANAGED:
1395 assert(category == TASK_POLICY_ATTRIBUTE);
1396 requested.trp_sfi_managed = value;
1397 break;
1398
1399 case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1400 assert(category == TASK_POLICY_ATTRIBUTE);
1401 requested.trp_base_latency_qos = value;
1402 requested.trp_base_through_qos = value2;
1403 break;
1404
1405 case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1406 assert(category == TASK_POLICY_ATTRIBUTE);
1407 requested.trp_over_latency_qos = value;
1408 requested.trp_over_through_qos = value2;
1409 break;
1410
1411 default:
1412 panic("unknown task policy: %d %d %d %d", category, flavor, value, value2);
1413 break;
1414 }
1415
1416 task->requested_policy = requested;
1417 }
1418
1419 /*
1420 * Gets what you set. Effective values may be different.
1421 */
1422 int
1423 proc_get_task_policy(task_t task,
1424 int category,
1425 int flavor)
1426 {
1427 int value = 0;
1428
1429 task_lock(task);
1430
1431 struct task_requested_policy requested = task->requested_policy;
1432
1433 switch (flavor) {
1434 case TASK_POLICY_DARWIN_BG:
1435 if (category == TASK_POLICY_EXTERNAL)
1436 value = requested.trp_ext_darwinbg;
1437 else
1438 value = requested.trp_int_darwinbg;
1439 break;
1440 case TASK_POLICY_IOPOL:
1441 if (category == TASK_POLICY_EXTERNAL)
1442 value = proc_tier_to_iopol(requested.trp_ext_iotier,
1443 requested.trp_ext_iopassive);
1444 else
1445 value = proc_tier_to_iopol(requested.trp_int_iotier,
1446 requested.trp_int_iopassive);
1447 break;
1448 case TASK_POLICY_IO:
1449 if (category == TASK_POLICY_EXTERNAL)
1450 value = requested.trp_ext_iotier;
1451 else
1452 value = requested.trp_int_iotier;
1453 break;
1454 case TASK_POLICY_PASSIVE_IO:
1455 if (category == TASK_POLICY_EXTERNAL)
1456 value = requested.trp_ext_iopassive;
1457 else
1458 value = requested.trp_int_iopassive;
1459 break;
1460 case TASK_POLICY_DARWIN_BG_IOPOL:
1461 assert(category == TASK_POLICY_ATTRIBUTE);
1462 value = proc_tier_to_iopol(requested.trp_bg_iotier, 0);
1463 break;
1464 case TASK_POLICY_ROLE:
1465 assert(category == TASK_POLICY_ATTRIBUTE);
1466 value = requested.trp_role;
1467 break;
1468 case TASK_POLICY_SFI_MANAGED:
1469 assert(category == TASK_POLICY_ATTRIBUTE);
1470 value = requested.trp_sfi_managed;
1471 break;
1472 default:
1473 panic("unknown policy_flavor %d", flavor);
1474 break;
1475 }
1476
1477 task_unlock(task);
1478
1479 return value;
1480 }
1481
1482 /*
1483 * Variant of proc_get_task_policy() that returns two scalar outputs.
1484 */
1485 void
1486 proc_get_task_policy2(task_t task,
1487 __assert_only int category,
1488 int flavor,
1489 int *value1,
1490 int *value2)
1491 {
1492 task_lock(task);
1493
1494 struct task_requested_policy requested = task->requested_policy;
1495
1496 switch (flavor) {
1497 case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1498 assert(category == TASK_POLICY_ATTRIBUTE);
1499 *value1 = requested.trp_base_latency_qos;
1500 *value2 = requested.trp_base_through_qos;
1501 break;
1502
1503 case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1504 assert(category == TASK_POLICY_ATTRIBUTE);
1505 *value1 = requested.trp_over_latency_qos;
1506 *value2 = requested.trp_over_through_qos;
1507 break;
1508
1509 default:
1510 panic("unknown policy_flavor %d", flavor);
1511 break;
1512 }
1513
1514 task_unlock(task);
1515 }
1516
1517 /*
1518 * Function for querying effective state for relevant subsystems
1519 * Gets what is actually in effect, for subsystems which pull policy instead of receive updates.
1520 *
1521 * ONLY the relevant subsystem should query this.
1522 * NEVER take a value from the 'effective' function and stuff it into a setter.
1523 *
1524 * NOTE: This accessor does not take the task lock.
1525 * Notifications of state updates need to be externally synchronized with state queries.
1526 * This routine *MUST* remain interrupt safe, as it is potentially invoked
1527 * within the context of a timer interrupt. It is also called in KDP context for stackshot.
1528 */
1529 int
1530 proc_get_effective_task_policy(task_t task,
1531 int flavor)
1532 {
1533 int value = 0;
1534
1535 switch (flavor) {
1536 case TASK_POLICY_DARWIN_BG:
1537 /*
1538 * This backs the KPI call proc_pidbackgrounded to find
1539 * out if a pid is backgrounded.
1540 * It is used to communicate state to the VM system, as well as
1541 * prioritizing requests to the graphics system.
1542 * Returns 1 for background mode, 0 for normal mode
1543 */
1544 value = task->effective_policy.tep_darwinbg;
1545 break;
1546 case TASK_POLICY_ALL_SOCKETS_BG:
1547 /*
1548 * do_background_socket() calls this to determine what it should do to the proc's sockets
1549 * Returns 1 for background mode, 0 for normal mode
1550 *
1551 * This consults both thread and task so un-DBGing a thread while the task is BG
1552 * doesn't get you out of the network throttle.
1553 */
1554 value = task->effective_policy.tep_all_sockets_bg;
1555 break;
1556 case TASK_POLICY_LATENCY_QOS:
1557 /*
1558 * timer arming calls into here to find out the timer coalescing level
1559 * Returns a QoS tier (0-6)
1560 */
1561 value = task->effective_policy.tep_latency_qos;
1562 break;
1563 case TASK_POLICY_THROUGH_QOS:
1564 /*
1565 * This value is passed into the urgency callout from the scheduler
1566 * to the performance management subsystem.
1567 * Returns a QoS tier (0-6)
1568 */
1569 value = task->effective_policy.tep_through_qos;
1570 break;
1571 case TASK_POLICY_ROLE:
1572 /*
1573 * This controls various things that ask whether a process is foreground,
1574 * like SFI, VM, access to GPU, etc
1575 */
1576 value = task->effective_policy.tep_role;
1577 break;
1578 case TASK_POLICY_WATCHERS_BG:
1579 /*
1580 * This controls whether or not a thread watching this process should be BG.
1581 */
1582 value = task->effective_policy.tep_watchers_bg;
1583 break;
1584 case TASK_POLICY_SFI_MANAGED:
1585 /*
1586 * This controls whether or not a process is targeted for specific control by thermald.
1587 */
1588 value = task->effective_policy.tep_sfi_managed;
1589 break;
1590 default:
1591 panic("unknown policy_flavor %d", flavor);
1592 break;
1593 }
1594
1595 return value;
1596 }
1597
1598 /*
1599 * Convert from IOPOL_* values to throttle tiers.
1600 *
1601 * TODO: Can this be made more compact, like an array lookup
1602 * Note that it is possible to support e.g. IOPOL_PASSIVE_STANDARD in the future
1603 */
1604
1605 void
1606 proc_iopol_to_tier(int iopolicy, int *tier, int *passive)
1607 {
1608 *passive = 0;
1609 *tier = 0;
1610 switch (iopolicy) {
1611 case IOPOL_IMPORTANT:
1612 *tier = THROTTLE_LEVEL_TIER0;
1613 break;
1614 case IOPOL_PASSIVE:
1615 *tier = THROTTLE_LEVEL_TIER0;
1616 *passive = 1;
1617 break;
1618 case IOPOL_STANDARD:
1619 *tier = THROTTLE_LEVEL_TIER1;
1620 break;
1621 case IOPOL_UTILITY:
1622 *tier = THROTTLE_LEVEL_TIER2;
1623 break;
1624 case IOPOL_THROTTLE:
1625 *tier = THROTTLE_LEVEL_TIER3;
1626 break;
1627 default:
1628 panic("unknown I/O policy %d", iopolicy);
1629 break;
1630 }
1631 }
1632
1633 int
1634 proc_tier_to_iopol(int tier, int passive)
1635 {
1636 if (passive == 1) {
1637 switch (tier) {
1638 case THROTTLE_LEVEL_TIER0:
1639 return IOPOL_PASSIVE;
1640 default:
1641 panic("unknown passive tier %d", tier);
1642 return IOPOL_DEFAULT;
1643 }
1644 } else {
1645 switch (tier) {
1646 case THROTTLE_LEVEL_NONE:
1647 case THROTTLE_LEVEL_TIER0:
1648 return IOPOL_DEFAULT;
1649 case THROTTLE_LEVEL_TIER1:
1650 return IOPOL_STANDARD;
1651 case THROTTLE_LEVEL_TIER2:
1652 return IOPOL_UTILITY;
1653 case THROTTLE_LEVEL_TIER3:
1654 return IOPOL_THROTTLE;
1655 default:
1656 panic("unknown tier %d", tier);
1657 return IOPOL_DEFAULT;
1658 }
1659 }
1660 }
1661
1662 int
1663 proc_darwin_role_to_task_role(int darwin_role, int* task_role)
1664 {
1665 integer_t role = TASK_UNSPECIFIED;
1666
1667 switch (darwin_role) {
1668 case PRIO_DARWIN_ROLE_DEFAULT:
1669 role = TASK_UNSPECIFIED;
1670 break;
1671 case PRIO_DARWIN_ROLE_UI_FOCAL:
1672 role = TASK_FOREGROUND_APPLICATION;
1673 break;
1674 case PRIO_DARWIN_ROLE_UI:
1675 role = TASK_DEFAULT_APPLICATION;
1676 break;
1677 case PRIO_DARWIN_ROLE_NON_UI:
1678 role = TASK_NONUI_APPLICATION;
1679 break;
1680 case PRIO_DARWIN_ROLE_UI_NON_FOCAL:
1681 role = TASK_BACKGROUND_APPLICATION;
1682 break;
1683 case PRIO_DARWIN_ROLE_TAL_LAUNCH:
1684 role = TASK_THROTTLE_APPLICATION;
1685 break;
1686 default:
1687 return EINVAL;
1688 }
1689
1690 *task_role = role;
1691
1692 return 0;
1693 }
1694
1695 int
1696 proc_task_role_to_darwin_role(int task_role)
1697 {
1698 switch (task_role) {
1699 case TASK_FOREGROUND_APPLICATION:
1700 return PRIO_DARWIN_ROLE_UI_FOCAL;
1701 case TASK_BACKGROUND_APPLICATION:
1702 return PRIO_DARWIN_ROLE_UI_NON_FOCAL;
1703 case TASK_NONUI_APPLICATION:
1704 return PRIO_DARWIN_ROLE_NON_UI;
1705 case TASK_DEFAULT_APPLICATION:
1706 return PRIO_DARWIN_ROLE_UI;
1707 case TASK_THROTTLE_APPLICATION:
1708 return PRIO_DARWIN_ROLE_TAL_LAUNCH;
1709 case TASK_UNSPECIFIED:
1710 default:
1711 return PRIO_DARWIN_ROLE_DEFAULT;
1712 }
1713 }
1714
1715
1716 /* TODO: remove this variable when interactive daemon audit period is over */
1717 extern boolean_t ipc_importance_interactive_receiver;
1718
1719 /*
1720 * Called at process exec to initialize the apptype, qos clamp, and qos seed of a process
1721 *
1722 * TODO: Make this function more table-driven instead of ad-hoc
1723 */
1724 void
1725 proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role,
1726 ipc_port_t * portwatch_ports, int portwatch_count)
1727 {
1728 struct task_pend_token pend_token = {};
1729
1730 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1731 (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START,
1732 task_pid(task), trequested_0(task), trequested_1(task),
1733 apptype, 0);
1734
1735 switch (apptype) {
1736 case TASK_APPTYPE_APP_TAL:
1737 case TASK_APPTYPE_APP_DEFAULT:
1738 /* Apps become donors via the 'live-donor' flag instead of the static donor flag */
1739 task_importance_mark_donor(task, FALSE);
1740 task_importance_mark_live_donor(task, TRUE);
1741 task_importance_mark_receiver(task, FALSE);
1742 /* Apps are de-nap recievers on desktop for suppression behaviors */
1743 task_importance_mark_denap_receiver(task, TRUE);
1744 break;
1745
1746 case TASK_APPTYPE_DAEMON_INTERACTIVE:
1747 task_importance_mark_donor(task, TRUE);
1748 task_importance_mark_live_donor(task, FALSE);
1749
1750 /*
1751 * A boot arg controls whether interactive daemons are importance receivers.
1752 * Normally, they are not. But for testing their behavior as an adaptive
1753 * daemon, the boot-arg can be set.
1754 *
1755 * TODO: remove this when the interactive daemon audit period is over.
1756 */
1757 task_importance_mark_receiver(task, /* FALSE */ ipc_importance_interactive_receiver);
1758 task_importance_mark_denap_receiver(task, FALSE);
1759 break;
1760
1761 case TASK_APPTYPE_DAEMON_STANDARD:
1762 task_importance_mark_donor(task, TRUE);
1763 task_importance_mark_live_donor(task, FALSE);
1764 task_importance_mark_receiver(task, FALSE);
1765 task_importance_mark_denap_receiver(task, FALSE);
1766 break;
1767
1768 case TASK_APPTYPE_DAEMON_ADAPTIVE:
1769 task_importance_mark_donor(task, FALSE);
1770 task_importance_mark_live_donor(task, FALSE);
1771 task_importance_mark_receiver(task, TRUE);
1772 task_importance_mark_denap_receiver(task, FALSE);
1773 break;
1774
1775 case TASK_APPTYPE_DAEMON_BACKGROUND:
1776 task_importance_mark_donor(task, FALSE);
1777 task_importance_mark_live_donor(task, FALSE);
1778 task_importance_mark_receiver(task, FALSE);
1779 task_importance_mark_denap_receiver(task, FALSE);
1780 break;
1781
1782 case TASK_APPTYPE_NONE:
1783 break;
1784 }
1785
1786 if (portwatch_ports != NULL && apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
1787 int portwatch_boosts = 0;
1788
1789 for (int i = 0; i < portwatch_count; i++) {
1790 ipc_port_t port = NULL;
1791
1792 if ((port = portwatch_ports[i]) != NULL) {
1793 int boost = 0;
1794 task_add_importance_watchport(task, port, &boost);
1795 portwatch_boosts += boost;
1796 }
1797 }
1798
1799 if (portwatch_boosts > 0) {
1800 task_importance_hold_internal_assertion(task, portwatch_boosts);
1801 }
1802 }
1803
1804 task_lock(task);
1805
1806 if (apptype == TASK_APPTYPE_APP_TAL) {
1807 /* TAL starts off enabled by default */
1808 task->requested_policy.trp_tal_enabled = 1;
1809 }
1810
1811 if (apptype != TASK_APPTYPE_NONE) {
1812 task->requested_policy.trp_apptype = apptype;
1813 }
1814
1815 if (role != TASK_UNSPECIFIED) {
1816 task->requested_policy.trp_role = role;
1817 }
1818
1819 if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
1820 task->requested_policy.trp_qos_clamp = qos_clamp;
1821 }
1822
1823 task_policy_update_locked(task, &pend_token);
1824
1825 task_unlock(task);
1826
1827 /* Ensure the donor bit is updated to be in sync with the new live donor status */
1828 pend_token.tpt_update_live_donor = 1;
1829
1830 task_policy_update_complete_unlocked(task, &pend_token);
1831
1832 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1833 (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END,
1834 task_pid(task), trequested_0(task), trequested_1(task),
1835 task_is_importance_receiver(task), 0);
1836 }
1837
1838 extern task_t bsd_init_task;
1839
1840 /*
1841 * Compute the default main thread qos for a task
1842 */
1843 int
1844 task_compute_main_thread_qos(task_t task)
1845 {
1846 int primordial_qos = THREAD_QOS_UNSPECIFIED;
1847
1848 int qos_clamp = task->requested_policy.trp_qos_clamp;
1849
1850 switch (task->requested_policy.trp_apptype) {
1851 case TASK_APPTYPE_APP_TAL:
1852 case TASK_APPTYPE_APP_DEFAULT:
1853 primordial_qos = THREAD_QOS_USER_INTERACTIVE;
1854 break;
1855
1856 case TASK_APPTYPE_DAEMON_INTERACTIVE:
1857 case TASK_APPTYPE_DAEMON_STANDARD:
1858 case TASK_APPTYPE_DAEMON_ADAPTIVE:
1859 primordial_qos = THREAD_QOS_LEGACY;
1860 break;
1861
1862 case TASK_APPTYPE_DAEMON_BACKGROUND:
1863 primordial_qos = THREAD_QOS_BACKGROUND;
1864 break;
1865 }
1866
1867 if (task == bsd_init_task) {
1868 /* PID 1 gets a special case */
1869 primordial_qos = MAX(primordial_qos, THREAD_QOS_USER_INITIATED);
1870 }
1871
1872 if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
1873 if (primordial_qos != THREAD_QOS_UNSPECIFIED) {
1874 primordial_qos = MIN(qos_clamp, primordial_qos);
1875 } else {
1876 primordial_qos = qos_clamp;
1877 }
1878 }
1879
1880 return primordial_qos;
1881 }
1882
1883
1884 /* for process_policy to check before attempting to set */
1885 boolean_t
1886 proc_task_is_tal(task_t task)
1887 {
1888 return (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) ? TRUE : FALSE;
1889 }
1890
1891 int
1892 task_get_apptype(task_t task)
1893 {
1894 return task->requested_policy.trp_apptype;
1895 }
1896
1897 boolean_t
1898 task_is_daemon(task_t task)
1899 {
1900 switch (task->requested_policy.trp_apptype) {
1901 case TASK_APPTYPE_DAEMON_INTERACTIVE:
1902 case TASK_APPTYPE_DAEMON_STANDARD:
1903 case TASK_APPTYPE_DAEMON_ADAPTIVE:
1904 case TASK_APPTYPE_DAEMON_BACKGROUND:
1905 return TRUE;
1906 default:
1907 return FALSE;
1908 }
1909 }
1910
1911 boolean_t
1912 task_is_app(task_t task)
1913 {
1914 switch (task->requested_policy.trp_apptype) {
1915 case TASK_APPTYPE_APP_DEFAULT:
1916 case TASK_APPTYPE_APP_TAL:
1917 return TRUE;
1918 default:
1919 return FALSE;
1920 }
1921 }
1922
1923 /* for telemetry */
1924 integer_t
1925 task_grab_latency_qos(task_t task)
1926 {
1927 return qos_latency_policy_package(proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS));
1928 }
1929
1930 /* update the darwin background action state in the flags field for libproc */
1931 int
1932 proc_get_darwinbgstate(task_t task, uint32_t * flagsp)
1933 {
1934 if (task->requested_policy.trp_ext_darwinbg)
1935 *flagsp |= PROC_FLAG_EXT_DARWINBG;
1936
1937 if (task->requested_policy.trp_int_darwinbg)
1938 *flagsp |= PROC_FLAG_DARWINBG;
1939
1940
1941 if (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_DEFAULT ||
1942 task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL)
1943 *flagsp |= PROC_FLAG_APPLICATION;
1944
1945 if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE)
1946 *flagsp |= PROC_FLAG_ADAPTIVE;
1947
1948 if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
1949 task->requested_policy.trp_boosted == 1)
1950 *flagsp |= PROC_FLAG_ADAPTIVE_IMPORTANT;
1951
1952 if (task_is_importance_donor(task))
1953 *flagsp |= PROC_FLAG_IMPORTANCE_DONOR;
1954
1955 if (task->effective_policy.tep_sup_active)
1956 *flagsp |= PROC_FLAG_SUPPRESSED;
1957
1958 return(0);
1959 }
1960
1961 /*
1962 * Tracepoint data... Reading the tracepoint data can be somewhat complicated.
1963 * The current scheme packs as much data into a single tracepoint as it can.
1964 *
1965 * Each task/thread requested/effective structure is 64 bits in size. Any
1966 * given tracepoint will emit either requested or effective data, but not both.
1967 *
1968 * A tracepoint may emit any of task, thread, or task & thread data.
1969 *
1970 * The type of data emitted varies with pointer size. Where possible, both
1971 * task and thread data are emitted. In LP32 systems, the first and second
1972 * halves of either the task or thread data is emitted.
1973 *
1974 * The code uses uintptr_t array indexes instead of high/low to avoid
1975 * confusion WRT big vs little endian.
1976 *
1977 * The truth table for the tracepoint data functions is below, and has the
1978 * following invariants:
1979 *
1980 * 1) task and thread are uintptr_t*
1981 * 2) task may never be NULL
1982 *
1983 *
1984 * LP32 LP64
1985 * trequested_0(task, NULL) task[0] task[0]
1986 * trequested_1(task, NULL) task[1] NULL
1987 * trequested_0(task, thread) thread[0] task[0]
1988 * trequested_1(task, thread) thread[1] thread[0]
1989 *
1990 * Basically, you get a full task or thread on LP32, and both on LP64.
1991 *
1992 * The uintptr_t munging here is squicky enough to deserve a comment.
1993 *
1994 * The variables we are accessing are laid out in memory like this:
1995 *
1996 * [ LP64 uintptr_t 0 ]
1997 * [ LP32 uintptr_t 0 ] [ LP32 uintptr_t 1 ]
1998 *
1999 * 1 2 3 4 5 6 7 8
2000 *
2001 */
2002
2003 static uintptr_t
2004 trequested_0(task_t task)
2005 {
2006 static_assert(sizeof(struct task_requested_policy) == sizeof(uint64_t), "size invariant violated");
2007
2008 uintptr_t* raw = (uintptr_t*)&task->requested_policy;
2009
2010 return raw[0];
2011 }
2012
2013 static uintptr_t
2014 trequested_1(task_t task)
2015 {
2016 #if defined __LP64__
2017 (void)task;
2018 return 0;
2019 #else
2020 uintptr_t* raw = (uintptr_t*)(&task->requested_policy);
2021 return raw[1];
2022 #endif
2023 }
2024
2025 static uintptr_t
2026 teffective_0(task_t task)
2027 {
2028 uintptr_t* raw = (uintptr_t*)&task->effective_policy;
2029
2030 return raw[0];
2031 }
2032
2033 static uintptr_t
2034 teffective_1(task_t task)
2035 {
2036 #if defined __LP64__
2037 (void)task;
2038 return 0;
2039 #else
2040 uintptr_t* raw = (uintptr_t*)(&task->effective_policy);
2041 return raw[1];
2042 #endif
2043 }
2044
2045 /* dump pending for tracepoint */
2046 uint32_t tpending(task_pend_token_t pend_token) { return *(uint32_t*)(void*)(pend_token); }
2047
2048 uint64_t
2049 task_requested_bitfield(task_t task)
2050 {
2051 uint64_t bits = 0;
2052 struct task_requested_policy requested = task->requested_policy;
2053
2054 bits |= (requested.trp_int_darwinbg ? POLICY_REQ_INT_DARWIN_BG : 0);
2055 bits |= (requested.trp_ext_darwinbg ? POLICY_REQ_EXT_DARWIN_BG : 0);
2056 bits |= (requested.trp_int_iotier ? (((uint64_t)requested.trp_int_iotier) << POLICY_REQ_INT_IO_TIER_SHIFT) : 0);
2057 bits |= (requested.trp_ext_iotier ? (((uint64_t)requested.trp_ext_iotier) << POLICY_REQ_EXT_IO_TIER_SHIFT) : 0);
2058 bits |= (requested.trp_int_iopassive ? POLICY_REQ_INT_PASSIVE_IO : 0);
2059 bits |= (requested.trp_ext_iopassive ? POLICY_REQ_EXT_PASSIVE_IO : 0);
2060 bits |= (requested.trp_bg_iotier ? (((uint64_t)requested.trp_bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT) : 0);
2061 bits |= (requested.trp_terminated ? POLICY_REQ_TERMINATED : 0);
2062
2063 bits |= (requested.trp_boosted ? POLICY_REQ_BOOSTED : 0);
2064 bits |= (requested.trp_tal_enabled ? POLICY_REQ_TAL_ENABLED : 0);
2065 bits |= (requested.trp_apptype ? (((uint64_t)requested.trp_apptype) << POLICY_REQ_APPTYPE_SHIFT) : 0);
2066 bits |= (requested.trp_role ? (((uint64_t)requested.trp_role) << POLICY_REQ_ROLE_SHIFT) : 0);
2067
2068 bits |= (requested.trp_sup_active ? POLICY_REQ_SUP_ACTIVE : 0);
2069 bits |= (requested.trp_sup_lowpri_cpu ? POLICY_REQ_SUP_LOWPRI_CPU : 0);
2070 bits |= (requested.trp_sup_cpu ? POLICY_REQ_SUP_CPU : 0);
2071 bits |= (requested.trp_sup_timer ? (((uint64_t)requested.trp_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0);
2072 bits |= (requested.trp_sup_throughput ? (((uint64_t)requested.trp_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT) : 0);
2073 bits |= (requested.trp_sup_disk ? POLICY_REQ_SUP_DISK_THROTTLE : 0);
2074 bits |= (requested.trp_sup_bg_sockets ? POLICY_REQ_SUP_BG_SOCKETS : 0);
2075
2076 bits |= (requested.trp_base_latency_qos ? (((uint64_t)requested.trp_base_latency_qos) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT) : 0);
2077 bits |= (requested.trp_over_latency_qos ? (((uint64_t)requested.trp_over_latency_qos) << POLICY_REQ_OVER_LATENCY_QOS_SHIFT) : 0);
2078 bits |= (requested.trp_base_through_qos ? (((uint64_t)requested.trp_base_through_qos) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT) : 0);
2079 bits |= (requested.trp_over_through_qos ? (((uint64_t)requested.trp_over_through_qos) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT) : 0);
2080 bits |= (requested.trp_sfi_managed ? POLICY_REQ_SFI_MANAGED : 0);
2081 bits |= (requested.trp_qos_clamp ? (((uint64_t)requested.trp_qos_clamp) << POLICY_REQ_QOS_CLAMP_SHIFT) : 0);
2082
2083 return bits;
2084 }
2085
2086 uint64_t
2087 task_effective_bitfield(task_t task)
2088 {
2089 uint64_t bits = 0;
2090 struct task_effective_policy effective = task->effective_policy;
2091
2092 bits |= (effective.tep_io_tier ? (((uint64_t)effective.tep_io_tier) << POLICY_EFF_IO_TIER_SHIFT) : 0);
2093 bits |= (effective.tep_io_passive ? POLICY_EFF_IO_PASSIVE : 0);
2094 bits |= (effective.tep_darwinbg ? POLICY_EFF_DARWIN_BG : 0);
2095 bits |= (effective.tep_lowpri_cpu ? POLICY_EFF_LOWPRI_CPU : 0);
2096 bits |= (effective.tep_terminated ? POLICY_EFF_TERMINATED : 0);
2097 bits |= (effective.tep_all_sockets_bg ? POLICY_EFF_ALL_SOCKETS_BG : 0);
2098 bits |= (effective.tep_new_sockets_bg ? POLICY_EFF_NEW_SOCKETS_BG : 0);
2099 bits |= (effective.tep_bg_iotier ? (((uint64_t)effective.tep_bg_iotier) << POLICY_EFF_BG_IOTIER_SHIFT) : 0);
2100 bits |= (effective.tep_qos_ui_is_urgent ? POLICY_EFF_QOS_UI_IS_URGENT : 0);
2101
2102 bits |= (effective.tep_tal_engaged ? POLICY_EFF_TAL_ENGAGED : 0);
2103 bits |= (effective.tep_watchers_bg ? POLICY_EFF_WATCHERS_BG : 0);
2104 bits |= (effective.tep_sup_active ? POLICY_EFF_SUP_ACTIVE : 0);
2105 bits |= (effective.tep_suppressed_cpu ? POLICY_EFF_SUP_CPU : 0);
2106 bits |= (effective.tep_role ? (((uint64_t)effective.tep_role) << POLICY_EFF_ROLE_SHIFT) : 0);
2107 bits |= (effective.tep_latency_qos ? (((uint64_t)effective.tep_latency_qos) << POLICY_EFF_LATENCY_QOS_SHIFT) : 0);
2108 bits |= (effective.tep_through_qos ? (((uint64_t)effective.tep_through_qos) << POLICY_EFF_THROUGH_QOS_SHIFT) : 0);
2109 bits |= (effective.tep_sfi_managed ? POLICY_EFF_SFI_MANAGED : 0);
2110 bits |= (effective.tep_qos_ceiling ? (((uint64_t)effective.tep_qos_ceiling) << POLICY_EFF_QOS_CEILING_SHIFT) : 0);
2111
2112 return bits;
2113 }
2114
2115
2116 /*
2117 * Resource usage and CPU related routines
2118 */
2119
2120 int
2121 proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep)
2122 {
2123
2124 int error = 0;
2125 int scope;
2126
2127 task_lock(task);
2128
2129
2130 error = task_get_cpuusage(task, percentagep, intervalp, deadlinep, &scope);
2131 task_unlock(task);
2132
2133 /*
2134 * Reverse-map from CPU resource limit scopes back to policies (see comment below).
2135 */
2136 if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2137 *policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC;
2138 } else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2139 *policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE;
2140 } else if (scope == TASK_RUSECPU_FLAGS_DEADLINE) {
2141 *policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2142 }
2143
2144 return(error);
2145 }
2146
2147 /*
2148 * Configure the default CPU usage monitor parameters.
2149 *
2150 * For tasks which have this mechanism activated: if any thread in the
2151 * process consumes more CPU than this, an EXC_RESOURCE exception will be generated.
2152 */
2153 void
2154 proc_init_cpumon_params(void)
2155 {
2156 /*
2157 * The max CPU percentage can be configured via the boot-args and
2158 * a key in the device tree. The boot-args are honored first, then the
2159 * device tree.
2160 */
2161 if (!PE_parse_boot_argn("max_cpumon_percentage", &proc_max_cpumon_percentage,
2162 sizeof (proc_max_cpumon_percentage)))
2163 {
2164 uint64_t max_percentage = 0ULL;
2165
2166 if (!PE_get_default("kern.max_cpumon_percentage", &max_percentage,
2167 sizeof(max_percentage)))
2168 {
2169 max_percentage = DEFAULT_CPUMON_PERCENTAGE;
2170 }
2171
2172 assert(max_percentage <= UINT8_MAX);
2173 proc_max_cpumon_percentage = (uint8_t) max_percentage;
2174 }
2175
2176 if (proc_max_cpumon_percentage > 100) {
2177 proc_max_cpumon_percentage = 100;
2178 }
2179
2180 /*
2181 * The interval should be specified in seconds.
2182 *
2183 * Like the max CPU percentage, the max CPU interval can be configured
2184 * via boot-args and the device tree.
2185 */
2186 if (!PE_parse_boot_argn("max_cpumon_interval", &proc_max_cpumon_interval,
2187 sizeof (proc_max_cpumon_interval)))
2188 {
2189 if (!PE_get_default("kern.max_cpumon_interval", &proc_max_cpumon_interval,
2190 sizeof(proc_max_cpumon_interval)))
2191 {
2192 proc_max_cpumon_interval = DEFAULT_CPUMON_INTERVAL;
2193 }
2194 }
2195
2196 proc_max_cpumon_interval *= NSEC_PER_SEC;
2197
2198 /* TEMPORARY boot arg to control App suppression */
2199 PE_parse_boot_argn("task_policy_suppression_disable",
2200 &task_policy_suppression_disable,
2201 sizeof(task_policy_suppression_disable));
2202 }
2203
2204 /*
2205 * Currently supported configurations for CPU limits.
2206 *
2207 * Policy | Deadline-based CPU limit | Percentage-based CPU limit
2208 * -------------------------------------+--------------------------+------------------------------
2209 * PROC_POLICY_RSRCACT_THROTTLE | ENOTSUP | Task-wide scope only
2210 * PROC_POLICY_RSRCACT_SUSPEND | Task-wide scope only | ENOTSUP
2211 * PROC_POLICY_RSRCACT_TERMINATE | Task-wide scope only | ENOTSUP
2212 * PROC_POLICY_RSRCACT_NOTIFY_KQ | Task-wide scope only | ENOTSUP
2213 * PROC_POLICY_RSRCACT_NOTIFY_EXC | ENOTSUP | Per-thread scope only
2214 *
2215 * A deadline-based CPU limit is actually a simple wallclock timer - the requested action is performed
2216 * after the specified amount of wallclock time has elapsed.
2217 *
2218 * A percentage-based CPU limit performs the requested action after the specified amount of actual CPU time
2219 * has been consumed -- regardless of how much wallclock time has elapsed -- by either the task as an
2220 * aggregate entity (so-called "Task-wide" or "Proc-wide" scope, whereby the CPU time consumed by all threads
2221 * in the task are added together), or by any one thread in the task (so-called "per-thread" scope).
2222 *
2223 * We support either deadline != 0 OR percentage != 0, but not both. The original intention in having them
2224 * share an API was to use actual CPU time as the basis of the deadline-based limit (as in: perform an action
2225 * after I have used some amount of CPU time; this is different than the recurring percentage/interval model)
2226 * but the potential consumer of the API at the time was insisting on wallclock time instead.
2227 *
2228 * Currently, requesting notification via an exception is the only way to get per-thread scope for a
2229 * CPU limit. All other types of notifications force task-wide scope for the limit.
2230 */
2231 int
2232 proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline,
2233 int cpumon_entitled)
2234 {
2235 int error = 0;
2236 int scope;
2237
2238 /*
2239 * Enforce the matrix of supported configurations for policy, percentage, and deadline.
2240 */
2241 switch (policy) {
2242 // If no policy is explicitly given, the default is to throttle.
2243 case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE:
2244 case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE:
2245 if (deadline != 0)
2246 return (ENOTSUP);
2247 scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2248 break;
2249 case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND:
2250 case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE:
2251 case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ:
2252 if (percentage != 0)
2253 return (ENOTSUP);
2254 scope = TASK_RUSECPU_FLAGS_DEADLINE;
2255 break;
2256 case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC:
2257 if (deadline != 0)
2258 return (ENOTSUP);
2259 scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2260 #ifdef CONFIG_NOMONITORS
2261 return (error);
2262 #endif /* CONFIG_NOMONITORS */
2263 break;
2264 default:
2265 return (EINVAL);
2266 }
2267
2268 task_lock(task);
2269 if (task != current_task()) {
2270 task->policy_ru_cpu_ext = policy;
2271 } else {
2272 task->policy_ru_cpu = policy;
2273 }
2274 error = task_set_cpuusage(task, percentage, interval, deadline, scope, cpumon_entitled);
2275 task_unlock(task);
2276 return(error);
2277 }
2278
2279 /* TODO: get rid of these */
2280 #define TASK_POLICY_CPU_RESOURCE_USAGE 0
2281 #define TASK_POLICY_WIREDMEM_RESOURCE_USAGE 1
2282 #define TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE 2
2283 #define TASK_POLICY_DISK_RESOURCE_USAGE 3
2284 #define TASK_POLICY_NETWORK_RESOURCE_USAGE 4
2285 #define TASK_POLICY_POWER_RESOURCE_USAGE 5
2286
2287 #define TASK_POLICY_RESOURCE_USAGE_COUNT 6
2288
2289 int
2290 proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled)
2291 {
2292 int error = 0;
2293 int action;
2294 void * bsdinfo = NULL;
2295
2296 task_lock(task);
2297 if (task != current_task()) {
2298 task->policy_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2299 } else {
2300 task->policy_ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2301 }
2302
2303 error = task_clear_cpuusage_locked(task, cpumon_entitled);
2304 if (error != 0)
2305 goto out;
2306
2307 action = task->applied_ru_cpu;
2308 if (task->applied_ru_cpu_ext != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2309 /* reset action */
2310 task->applied_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2311 }
2312 if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2313 bsdinfo = task->bsd_info;
2314 task_unlock(task);
2315 proc_restore_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2316 goto out1;
2317 }
2318
2319 out:
2320 task_unlock(task);
2321 out1:
2322 return(error);
2323
2324 }
2325
2326 /* used to apply resource limit related actions */
2327 static int
2328 task_apply_resource_actions(task_t task, int type)
2329 {
2330 int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2331 void * bsdinfo = NULL;
2332
2333 switch (type) {
2334 case TASK_POLICY_CPU_RESOURCE_USAGE:
2335 break;
2336 case TASK_POLICY_WIREDMEM_RESOURCE_USAGE:
2337 case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE:
2338 case TASK_POLICY_DISK_RESOURCE_USAGE:
2339 case TASK_POLICY_NETWORK_RESOURCE_USAGE:
2340 case TASK_POLICY_POWER_RESOURCE_USAGE:
2341 return(0);
2342
2343 default:
2344 return(1);
2345 };
2346
2347 /* only cpu actions for now */
2348 task_lock(task);
2349
2350 if (task->applied_ru_cpu_ext == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2351 /* apply action */
2352 task->applied_ru_cpu_ext = task->policy_ru_cpu_ext;
2353 action = task->applied_ru_cpu_ext;
2354 } else {
2355 action = task->applied_ru_cpu_ext;
2356 }
2357
2358 if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2359 bsdinfo = task->bsd_info;
2360 task_unlock(task);
2361 proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2362 } else
2363 task_unlock(task);
2364
2365 return(0);
2366 }
2367
2368 /*
2369 * XXX This API is somewhat broken; we support multiple simultaneous CPU limits, but the get/set API
2370 * only allows for one at a time. This means that if there is a per-thread limit active, the other
2371 * "scopes" will not be accessible via this API. We could change it to pass in the scope of interest
2372 * to the caller, and prefer that, but there's no need for that at the moment.
2373 */
2374 static int
2375 task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope)
2376 {
2377 *percentagep = 0;
2378 *intervalp = 0;
2379 *deadlinep = 0;
2380
2381 if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) {
2382 *scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2383 *percentagep = task->rusage_cpu_perthr_percentage;
2384 *intervalp = task->rusage_cpu_perthr_interval;
2385 } else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) != 0) {
2386 *scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2387 *percentagep = task->rusage_cpu_percentage;
2388 *intervalp = task->rusage_cpu_interval;
2389 } else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) != 0) {
2390 *scope = TASK_RUSECPU_FLAGS_DEADLINE;
2391 *deadlinep = task->rusage_cpu_deadline;
2392 } else {
2393 *scope = 0;
2394 }
2395
2396 return(0);
2397 }
2398
2399 /*
2400 * Suspend the CPU usage monitor for the task. Return value indicates
2401 * if the mechanism was actually enabled.
2402 */
2403 int
2404 task_suspend_cpumon(task_t task)
2405 {
2406 thread_t thread;
2407
2408 task_lock_assert_owned(task);
2409
2410 if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) {
2411 return KERN_INVALID_ARGUMENT;
2412 }
2413
2414 #if CONFIG_TELEMETRY
2415 /*
2416 * Disable task-wide telemetry if it was ever enabled by the CPU usage
2417 * monitor's warning zone.
2418 */
2419 telemetry_task_ctl_locked(task, TF_CPUMON_WARNING, 0);
2420 #endif
2421
2422 /*
2423 * Suspend monitoring for the task, and propagate that change to each thread.
2424 */
2425 task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON);
2426 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2427 set_astledger(thread);
2428 }
2429
2430 return KERN_SUCCESS;
2431 }
2432
2433 /*
2434 * Remove all traces of the CPU monitor.
2435 */
2436 int
2437 task_disable_cpumon(task_t task)
2438 {
2439 int kret;
2440
2441 task_lock_assert_owned(task);
2442
2443 kret = task_suspend_cpumon(task);
2444 if (kret) return kret;
2445
2446 /* Once we clear these values, the monitor can't be resumed */
2447 task->rusage_cpu_perthr_percentage = 0;
2448 task->rusage_cpu_perthr_interval = 0;
2449
2450 return (KERN_SUCCESS);
2451 }
2452
2453
2454 static int
2455 task_enable_cpumon_locked(task_t task)
2456 {
2457 thread_t thread;
2458 task_lock_assert_owned(task);
2459
2460 if (task->rusage_cpu_perthr_percentage == 0 ||
2461 task->rusage_cpu_perthr_interval == 0) {
2462 return KERN_INVALID_ARGUMENT;
2463 }
2464
2465 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2466 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2467 set_astledger(thread);
2468 }
2469
2470 return KERN_SUCCESS;
2471 }
2472
2473 int
2474 task_resume_cpumon(task_t task)
2475 {
2476 kern_return_t kret;
2477
2478 if (!task) {
2479 return EINVAL;
2480 }
2481
2482 task_lock(task);
2483 kret = task_enable_cpumon_locked(task);
2484 task_unlock(task);
2485
2486 return kret;
2487 }
2488
2489
2490 /* duplicate values from bsd/sys/process_policy.h */
2491 #define PROC_POLICY_CPUMON_DISABLE 0xFF
2492 #define PROC_POLICY_CPUMON_DEFAULTS 0xFE
2493
2494 static int
2495 task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int cpumon_entitled)
2496 {
2497 uint64_t abstime = 0;
2498 uint64_t limittime = 0;
2499
2500 lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED);
2501
2502 /* By default, refill once per second */
2503 if (interval == 0)
2504 interval = NSEC_PER_SEC;
2505
2506 if (percentage != 0) {
2507 if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2508 boolean_t warn = FALSE;
2509
2510 /*
2511 * A per-thread CPU limit on a task generates an exception
2512 * (LEDGER_ACTION_EXCEPTION) if any one thread in the task
2513 * exceeds the limit.
2514 */
2515
2516 if (percentage == PROC_POLICY_CPUMON_DISABLE) {
2517 if (cpumon_entitled) {
2518 /* 25095698 - task_disable_cpumon() should be reliable */
2519 task_disable_cpumon(task);
2520 return 0;
2521 }
2522
2523 /*
2524 * This task wishes to disable the CPU usage monitor, but it's
2525 * missing the required entitlement:
2526 * com.apple.private.kernel.override-cpumon
2527 *
2528 * Instead, treat this as a request to reset its params
2529 * back to the defaults.
2530 */
2531 warn = TRUE;
2532 percentage = PROC_POLICY_CPUMON_DEFAULTS;
2533 }
2534
2535 if (percentage == PROC_POLICY_CPUMON_DEFAULTS) {
2536 percentage = proc_max_cpumon_percentage;
2537 interval = proc_max_cpumon_interval;
2538 }
2539
2540 if (percentage > 100) {
2541 percentage = 100;
2542 }
2543
2544 /*
2545 * Passing in an interval of -1 means either:
2546 * - Leave the interval as-is, if there's already a per-thread
2547 * limit configured
2548 * - Use the system default.
2549 */
2550 if (interval == -1ULL) {
2551 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2552 interval = task->rusage_cpu_perthr_interval;
2553 } else {
2554 interval = proc_max_cpumon_interval;
2555 }
2556 }
2557
2558 /*
2559 * Enforce global caps on CPU usage monitor here if the process is not
2560 * entitled to escape the global caps.
2561 */
2562 if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) {
2563 warn = TRUE;
2564 percentage = proc_max_cpumon_percentage;
2565 }
2566
2567 if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) {
2568 warn = TRUE;
2569 interval = proc_max_cpumon_interval;
2570 }
2571
2572 if (warn) {
2573 int pid = 0;
2574 const char *procname = "unknown";
2575
2576 #ifdef MACH_BSD
2577 pid = proc_selfpid();
2578 if (current_task()->bsd_info != NULL) {
2579 procname = proc_name_address(current_task()->bsd_info);
2580 }
2581 #endif
2582
2583 printf("process %s[%d] denied attempt to escape CPU monitor"
2584 " (missing required entitlement).\n", procname, pid);
2585 }
2586
2587 /* configure the limit values */
2588 task->rusage_cpu_perthr_percentage = percentage;
2589 task->rusage_cpu_perthr_interval = interval;
2590
2591 /* and enable the CPU monitor */
2592 (void)task_enable_cpumon_locked(task);
2593 } else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2594 /*
2595 * Currently, a proc-wide CPU limit always blocks if the limit is
2596 * exceeded (LEDGER_ACTION_BLOCK).
2597 */
2598 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PROC_LIMIT;
2599 task->rusage_cpu_percentage = percentage;
2600 task->rusage_cpu_interval = interval;
2601
2602 limittime = (interval * percentage) / 100;
2603 nanoseconds_to_absolutetime(limittime, &abstime);
2604
2605 ledger_set_limit(task->ledger, task_ledgers.cpu_time, abstime, 0);
2606 ledger_set_period(task->ledger, task_ledgers.cpu_time, interval);
2607 ledger_set_action(task->ledger, task_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
2608 }
2609 }
2610
2611 if (deadline != 0) {
2612 assert(scope == TASK_RUSECPU_FLAGS_DEADLINE);
2613
2614 /* if already in use, cancel and wait for it to cleanout */
2615 if (task->rusage_cpu_callt != NULL) {
2616 task_unlock(task);
2617 thread_call_cancel_wait(task->rusage_cpu_callt);
2618 task_lock(task);
2619 }
2620 if (task->rusage_cpu_callt == NULL) {
2621 task->rusage_cpu_callt = thread_call_allocate_with_priority(task_action_cpuusage, (thread_call_param_t)task, THREAD_CALL_PRIORITY_KERNEL);
2622 }
2623 /* setup callout */
2624 if (task->rusage_cpu_callt != 0) {
2625 uint64_t save_abstime = 0;
2626
2627 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_DEADLINE;
2628 task->rusage_cpu_deadline = deadline;
2629
2630 nanoseconds_to_absolutetime(deadline, &abstime);
2631 save_abstime = abstime;
2632 clock_absolutetime_interval_to_deadline(save_abstime, &abstime);
2633 thread_call_enter_delayed(task->rusage_cpu_callt, abstime);
2634 }
2635 }
2636
2637 return(0);
2638 }
2639
2640 int
2641 task_clear_cpuusage(task_t task, int cpumon_entitled)
2642 {
2643 int retval = 0;
2644
2645 task_lock(task);
2646 retval = task_clear_cpuusage_locked(task, cpumon_entitled);
2647 task_unlock(task);
2648
2649 return(retval);
2650 }
2651
2652 static int
2653 task_clear_cpuusage_locked(task_t task, int cpumon_entitled)
2654 {
2655 thread_call_t savecallt;
2656
2657 /* cancel percentage handling if set */
2658 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2659 task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT;
2660 ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
2661 task->rusage_cpu_percentage = 0;
2662 task->rusage_cpu_interval = 0;
2663 }
2664
2665 /*
2666 * Disable the CPU usage monitor.
2667 */
2668 if (cpumon_entitled) {
2669 task_disable_cpumon(task);
2670 }
2671
2672 /* cancel deadline handling if set */
2673 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) {
2674 task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_DEADLINE;
2675 if (task->rusage_cpu_callt != 0) {
2676 savecallt = task->rusage_cpu_callt;
2677 task->rusage_cpu_callt = NULL;
2678 task->rusage_cpu_deadline = 0;
2679 task_unlock(task);
2680 thread_call_cancel_wait(savecallt);
2681 thread_call_free(savecallt);
2682 task_lock(task);
2683 }
2684 }
2685 return(0);
2686 }
2687
2688 /* called by ledger unit to enforce action due to resource usage criteria being met */
2689 static void
2690 task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t param1)
2691 {
2692 task_t task = (task_t)param0;
2693 (void)task_apply_resource_actions(task, TASK_POLICY_CPU_RESOURCE_USAGE);
2694 return;
2695 }
2696
2697
2698 /*
2699 * Routines for taskwatch and pidbind
2700 */
2701
2702
2703 /*
2704 * Routines for importance donation/inheritance/boosting
2705 */
2706
2707 static void
2708 task_importance_update_live_donor(task_t target_task)
2709 {
2710 #if IMPORTANCE_INHERITANCE
2711
2712 ipc_importance_task_t task_imp;
2713
2714 task_imp = ipc_importance_for_task(target_task, FALSE);
2715 if (IIT_NULL != task_imp) {
2716 ipc_importance_task_update_live_donor(task_imp);
2717 ipc_importance_task_release(task_imp);
2718 }
2719 #endif /* IMPORTANCE_INHERITANCE */
2720 }
2721
2722 void
2723 task_importance_mark_donor(task_t task, boolean_t donating)
2724 {
2725 #if IMPORTANCE_INHERITANCE
2726 ipc_importance_task_t task_imp;
2727
2728 task_imp = ipc_importance_for_task(task, FALSE);
2729 if (IIT_NULL != task_imp) {
2730 ipc_importance_task_mark_donor(task_imp, donating);
2731 ipc_importance_task_release(task_imp);
2732 }
2733 #endif /* IMPORTANCE_INHERITANCE */
2734 }
2735
2736 void
2737 task_importance_mark_live_donor(task_t task, boolean_t live_donating)
2738 {
2739 #if IMPORTANCE_INHERITANCE
2740 ipc_importance_task_t task_imp;
2741
2742 task_imp = ipc_importance_for_task(task, FALSE);
2743 if (IIT_NULL != task_imp) {
2744 ipc_importance_task_mark_live_donor(task_imp, live_donating);
2745 ipc_importance_task_release(task_imp);
2746 }
2747 #endif /* IMPORTANCE_INHERITANCE */
2748 }
2749
2750 void
2751 task_importance_mark_receiver(task_t task, boolean_t receiving)
2752 {
2753 #if IMPORTANCE_INHERITANCE
2754 ipc_importance_task_t task_imp;
2755
2756 task_imp = ipc_importance_for_task(task, FALSE);
2757 if (IIT_NULL != task_imp) {
2758 ipc_importance_task_mark_receiver(task_imp, receiving);
2759 ipc_importance_task_release(task_imp);
2760 }
2761 #endif /* IMPORTANCE_INHERITANCE */
2762 }
2763
2764 void
2765 task_importance_mark_denap_receiver(task_t task, boolean_t denap)
2766 {
2767 #if IMPORTANCE_INHERITANCE
2768 ipc_importance_task_t task_imp;
2769
2770 task_imp = ipc_importance_for_task(task, FALSE);
2771 if (IIT_NULL != task_imp) {
2772 ipc_importance_task_mark_denap_receiver(task_imp, denap);
2773 ipc_importance_task_release(task_imp);
2774 }
2775 #endif /* IMPORTANCE_INHERITANCE */
2776 }
2777
2778 void
2779 task_importance_reset(__imp_only task_t task)
2780 {
2781 #if IMPORTANCE_INHERITANCE
2782 ipc_importance_task_t task_imp;
2783
2784 /* TODO: Lower importance downstream before disconnect */
2785 task_imp = task->task_imp_base;
2786 ipc_importance_reset(task_imp, FALSE);
2787 task_importance_update_live_donor(task);
2788 #endif /* IMPORTANCE_INHERITANCE */
2789 }
2790
2791 #if IMPORTANCE_INHERITANCE
2792
2793 /*
2794 * Sets the task boost bit to the provided value. Does NOT run the update function.
2795 *
2796 * Task lock must be held.
2797 */
2798 static void
2799 task_set_boost_locked(task_t task, boolean_t boost_active)
2800 {
2801 #if IMPORTANCE_DEBUG
2802 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_START),
2803 proc_selfpid(), task_pid(task), trequested_0(task), trequested_1(task), 0);
2804 #endif
2805
2806 task->requested_policy.trp_boosted = boost_active;
2807
2808 #if IMPORTANCE_DEBUG
2809 if (boost_active == TRUE){
2810 DTRACE_BOOST2(boost, task_t, task, int, task_pid(task));
2811 } else {
2812 DTRACE_BOOST2(unboost, task_t, task, int, task_pid(task));
2813 }
2814 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_END),
2815 proc_selfpid(), task_pid(task),
2816 trequested_0(task), trequested_1(task), 0);
2817 #endif
2818 }
2819
2820 /*
2821 * Sets the task boost bit to the provided value and applies the update.
2822 *
2823 * Task lock must be held. Must call update complete after unlocking the task.
2824 */
2825 void
2826 task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t pend_token)
2827 {
2828 task_set_boost_locked(task, boost_active);
2829
2830 task_policy_update_locked(task, pend_token);
2831 }
2832
2833 /*
2834 * Check if this task should donate importance.
2835 *
2836 * May be called without taking the task lock. In that case, donor status can change
2837 * so you must check only once for each donation event.
2838 */
2839 boolean_t
2840 task_is_importance_donor(task_t task)
2841 {
2842 if (task->task_imp_base == IIT_NULL)
2843 return FALSE;
2844 return ipc_importance_task_is_donor(task->task_imp_base);
2845 }
2846
2847 /*
2848 * Query the status of the task's donor mark.
2849 */
2850 boolean_t
2851 task_is_marked_importance_donor(task_t task)
2852 {
2853 if (task->task_imp_base == IIT_NULL)
2854 return FALSE;
2855 return ipc_importance_task_is_marked_donor(task->task_imp_base);
2856 }
2857
2858 /*
2859 * Query the status of the task's live donor and donor mark.
2860 */
2861 boolean_t
2862 task_is_marked_live_importance_donor(task_t task)
2863 {
2864 if (task->task_imp_base == IIT_NULL)
2865 return FALSE;
2866 return ipc_importance_task_is_marked_live_donor(task->task_imp_base);
2867 }
2868
2869
2870 /*
2871 * This routine may be called without holding task lock
2872 * since the value of imp_receiver can never be unset.
2873 */
2874 boolean_t
2875 task_is_importance_receiver(task_t task)
2876 {
2877 if (task->task_imp_base == IIT_NULL)
2878 return FALSE;
2879 return ipc_importance_task_is_marked_receiver(task->task_imp_base);
2880 }
2881
2882 /*
2883 * Query the task's receiver mark.
2884 */
2885 boolean_t
2886 task_is_marked_importance_receiver(task_t task)
2887 {
2888 if (task->task_imp_base == IIT_NULL)
2889 return FALSE;
2890 return ipc_importance_task_is_marked_receiver(task->task_imp_base);
2891 }
2892
2893 /*
2894 * This routine may be called without holding task lock
2895 * since the value of de-nap receiver can never be unset.
2896 */
2897 boolean_t
2898 task_is_importance_denap_receiver(task_t task)
2899 {
2900 if (task->task_imp_base == IIT_NULL)
2901 return FALSE;
2902 return ipc_importance_task_is_denap_receiver(task->task_imp_base);
2903 }
2904
2905 /*
2906 * Query the task's de-nap receiver mark.
2907 */
2908 boolean_t
2909 task_is_marked_importance_denap_receiver(task_t task)
2910 {
2911 if (task->task_imp_base == IIT_NULL)
2912 return FALSE;
2913 return ipc_importance_task_is_marked_denap_receiver(task->task_imp_base);
2914 }
2915
2916 /*
2917 * This routine may be called without holding task lock
2918 * since the value of imp_receiver can never be unset.
2919 */
2920 boolean_t
2921 task_is_importance_receiver_type(task_t task)
2922 {
2923 if (task->task_imp_base == IIT_NULL)
2924 return FALSE;
2925 return (task_is_importance_receiver(task) ||
2926 task_is_importance_denap_receiver(task));
2927 }
2928
2929 /*
2930 * External importance assertions are managed by the process in userspace
2931 * Internal importance assertions are the responsibility of the kernel
2932 * Assertions are changed from internal to external via task_importance_externalize_assertion
2933 */
2934
2935 int
2936 task_importance_hold_internal_assertion(task_t target_task, uint32_t count)
2937 {
2938 ipc_importance_task_t task_imp;
2939 kern_return_t ret;
2940
2941 /* may be first time, so allow for possible importance setup */
2942 task_imp = ipc_importance_for_task(target_task, FALSE);
2943 if (IIT_NULL == task_imp) {
2944 return EOVERFLOW;
2945 }
2946 ret = ipc_importance_task_hold_internal_assertion(task_imp, count);
2947 ipc_importance_task_release(task_imp);
2948
2949 return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
2950 }
2951
2952 int
2953 task_importance_hold_file_lock_assertion(task_t target_task, uint32_t count)
2954 {
2955 ipc_importance_task_t task_imp;
2956 kern_return_t ret;
2957
2958 /* may be first time, so allow for possible importance setup */
2959 task_imp = ipc_importance_for_task(target_task, FALSE);
2960 if (IIT_NULL == task_imp) {
2961 return EOVERFLOW;
2962 }
2963 ret = ipc_importance_task_hold_file_lock_assertion(task_imp, count);
2964 ipc_importance_task_release(task_imp);
2965
2966 return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
2967 }
2968
2969 int
2970 task_importance_hold_legacy_external_assertion(task_t target_task, uint32_t count)
2971 {
2972 ipc_importance_task_t task_imp;
2973 kern_return_t ret;
2974
2975 /* must already have set up an importance */
2976 task_imp = target_task->task_imp_base;
2977 if (IIT_NULL == task_imp) {
2978 return EOVERFLOW;
2979 }
2980 ret = ipc_importance_task_hold_legacy_external_assertion(task_imp, count);
2981 return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
2982 }
2983
2984 int
2985 task_importance_drop_file_lock_assertion(task_t target_task, uint32_t count)
2986 {
2987 ipc_importance_task_t task_imp;
2988 kern_return_t ret;
2989
2990 /* must already have set up an importance */
2991 task_imp = target_task->task_imp_base;
2992 if (IIT_NULL == task_imp) {
2993 return EOVERFLOW;
2994 }
2995 ret = ipc_importance_task_drop_file_lock_assertion(target_task->task_imp_base, count);
2996 return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
2997 }
2998
2999 int
3000 task_importance_drop_legacy_external_assertion(task_t target_task, uint32_t count)
3001 {
3002 ipc_importance_task_t task_imp;
3003 kern_return_t ret;
3004
3005 /* must already have set up an importance */
3006 task_imp = target_task->task_imp_base;
3007 if (IIT_NULL == task_imp) {
3008 return EOVERFLOW;
3009 }
3010 ret = ipc_importance_task_drop_legacy_external_assertion(task_imp, count);
3011 return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3012 }
3013
3014 static void
3015 task_add_importance_watchport(task_t task, mach_port_t port, int *boostp)
3016 {
3017 int boost = 0;
3018
3019 __impdebug_only int released_pid = 0;
3020 __impdebug_only int pid = task_pid(task);
3021
3022 ipc_importance_task_t release_imp_task = IIT_NULL;
3023
3024 if (IP_VALID(port) != 0) {
3025 ipc_importance_task_t new_imp_task = ipc_importance_for_task(task, FALSE);
3026
3027 ip_lock(port);
3028
3029 /*
3030 * The port must have been marked tempowner already.
3031 * This also filters out ports whose receive rights
3032 * are already enqueued in a message, as you can't
3033 * change the right's destination once it's already
3034 * on its way.
3035 */
3036 if (port->ip_tempowner != 0) {
3037 assert(port->ip_impdonation != 0);
3038
3039 boost = port->ip_impcount;
3040 if (IIT_NULL != port->ip_imp_task) {
3041 /*
3042 * if this port is already bound to a task,
3043 * release the task reference and drop any
3044 * watchport-forwarded boosts
3045 */
3046 release_imp_task = port->ip_imp_task;
3047 port->ip_imp_task = IIT_NULL;
3048 }
3049
3050 /* mark the port is watching another task (reference held in port->ip_imp_task) */
3051 if (ipc_importance_task_is_marked_receiver(new_imp_task)) {
3052 port->ip_imp_task = new_imp_task;
3053 new_imp_task = IIT_NULL;
3054 }
3055 }
3056 ip_unlock(port);
3057
3058 if (IIT_NULL != new_imp_task) {
3059 ipc_importance_task_release(new_imp_task);
3060 }
3061
3062 if (IIT_NULL != release_imp_task) {
3063 if (boost > 0)
3064 ipc_importance_task_drop_internal_assertion(release_imp_task, boost);
3065
3066 // released_pid = task_pid(release_imp_task); /* TODO: Need ref-safe way to get pid */
3067 ipc_importance_task_release(release_imp_task);
3068 }
3069 #if IMPORTANCE_DEBUG
3070 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_WATCHPORT, 0)) | DBG_FUNC_NONE,
3071 proc_selfpid(), pid, boost, released_pid, 0);
3072 #endif /* IMPORTANCE_DEBUG */
3073 }
3074
3075 *boostp = boost;
3076 return;
3077 }
3078
3079 #endif /* IMPORTANCE_INHERITANCE */
3080
3081 /*
3082 * Routines for VM to query task importance
3083 */
3084
3085
3086 /*
3087 * Order to be considered while estimating importance
3088 * for low memory notification and purging purgeable memory.
3089 */
3090 #define TASK_IMPORTANCE_FOREGROUND 4
3091 #define TASK_IMPORTANCE_NOTDARWINBG 1
3092
3093
3094 /*
3095 * (Un)Mark the task as a privileged listener for memory notifications.
3096 * if marked, this task will be among the first to be notified amongst
3097 * the bulk of all other tasks when the system enters a pressure level
3098 * of interest to this task.
3099 */
3100 int
3101 task_low_mem_privileged_listener(task_t task, boolean_t new_value, boolean_t *old_value)
3102 {
3103 if (old_value != NULL) {
3104 *old_value = (boolean_t)task->low_mem_privileged_listener;
3105 } else {
3106 task_lock(task);
3107 task->low_mem_privileged_listener = (uint32_t)new_value;
3108 task_unlock(task);
3109 }
3110
3111 return 0;
3112 }
3113
3114 /*
3115 * Checks if the task is already notified.
3116 *
3117 * Condition: task lock should be held while calling this function.
3118 */
3119 boolean_t
3120 task_has_been_notified(task_t task, int pressurelevel)
3121 {
3122 if (task == NULL) {
3123 return FALSE;
3124 }
3125
3126 if (pressurelevel == kVMPressureWarning)
3127 return (task->low_mem_notified_warn ? TRUE : FALSE);
3128 else if (pressurelevel == kVMPressureCritical)
3129 return (task->low_mem_notified_critical ? TRUE : FALSE);
3130 else
3131 return TRUE;
3132 }
3133
3134
3135 /*
3136 * Checks if the task is used for purging.
3137 *
3138 * Condition: task lock should be held while calling this function.
3139 */
3140 boolean_t
3141 task_used_for_purging(task_t task, int pressurelevel)
3142 {
3143 if (task == NULL) {
3144 return FALSE;
3145 }
3146
3147 if (pressurelevel == kVMPressureWarning)
3148 return (task->purged_memory_warn ? TRUE : FALSE);
3149 else if (pressurelevel == kVMPressureCritical)
3150 return (task->purged_memory_critical ? TRUE : FALSE);
3151 else
3152 return TRUE;
3153 }
3154
3155
3156 /*
3157 * Mark the task as notified with memory notification.
3158 *
3159 * Condition: task lock should be held while calling this function.
3160 */
3161 void
3162 task_mark_has_been_notified(task_t task, int pressurelevel)
3163 {
3164 if (task == NULL) {
3165 return;
3166 }
3167
3168 if (pressurelevel == kVMPressureWarning)
3169 task->low_mem_notified_warn = 1;
3170 else if (pressurelevel == kVMPressureCritical)
3171 task->low_mem_notified_critical = 1;
3172 }
3173
3174
3175 /*
3176 * Mark the task as purged.
3177 *
3178 * Condition: task lock should be held while calling this function.
3179 */
3180 void
3181 task_mark_used_for_purging(task_t task, int pressurelevel)
3182 {
3183 if (task == NULL) {
3184 return;
3185 }
3186
3187 if (pressurelevel == kVMPressureWarning)
3188 task->purged_memory_warn = 1;
3189 else if (pressurelevel == kVMPressureCritical)
3190 task->purged_memory_critical = 1;
3191 }
3192
3193
3194 /*
3195 * Mark the task eligible for low memory notification.
3196 *
3197 * Condition: task lock should be held while calling this function.
3198 */
3199 void
3200 task_clear_has_been_notified(task_t task, int pressurelevel)
3201 {
3202 if (task == NULL) {
3203 return;
3204 }
3205
3206 if (pressurelevel == kVMPressureWarning)
3207 task->low_mem_notified_warn = 0;
3208 else if (pressurelevel == kVMPressureCritical)
3209 task->low_mem_notified_critical = 0;
3210 }
3211
3212
3213 /*
3214 * Mark the task eligible for purging its purgeable memory.
3215 *
3216 * Condition: task lock should be held while calling this function.
3217 */
3218 void
3219 task_clear_used_for_purging(task_t task)
3220 {
3221 if (task == NULL) {
3222 return;
3223 }
3224
3225 task->purged_memory_warn = 0;
3226 task->purged_memory_critical = 0;
3227 }
3228
3229
3230 /*
3231 * Estimate task importance for purging its purgeable memory
3232 * and low memory notification.
3233 *
3234 * Importance is calculated in the following order of criteria:
3235 * -Task role : Background vs Foreground
3236 * -Boost status: Not boosted vs Boosted
3237 * -Darwin BG status.
3238 *
3239 * Returns: Estimated task importance. Less important task will have lower
3240 * estimated importance.
3241 */
3242 int
3243 task_importance_estimate(task_t task)
3244 {
3245 int task_importance = 0;
3246
3247 if (task == NULL) {
3248 return 0;
3249 }
3250
3251 if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION)
3252 task_importance += TASK_IMPORTANCE_FOREGROUND;
3253
3254 if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0)
3255 task_importance += TASK_IMPORTANCE_NOTDARWINBG;
3256
3257 return task_importance;
3258 }
3259
3260 boolean_t
3261 task_has_assertions(task_t task)
3262 {
3263 return (task->task_imp_base->iit_assertcnt? TRUE : FALSE);
3264 }
3265
3266
3267 kern_return_t
3268 send_resource_violation(typeof(send_cpu_usage_violation) sendfunc,
3269 task_t violator,
3270 struct ledger_entry_info *linfo,
3271 resource_notify_flags_t flags)
3272 {
3273 #ifndef MACH_BSD
3274 return KERN_NOT_SUPPORTED;
3275 #else
3276 kern_return_t kr = KERN_SUCCESS;
3277 proc_t proc = NULL;
3278 posix_path_t proc_path = "";
3279 proc_name_t procname = "<unknown>";
3280 int pid = -1;
3281 clock_sec_t secs;
3282 clock_nsec_t nsecs;
3283 mach_timespec_t timestamp;
3284 thread_t curthread = current_thread();
3285 ipc_port_t dstport = MACH_PORT_NULL;
3286
3287 if (!violator) {
3288 kr = KERN_INVALID_ARGUMENT; goto finish;
3289 }
3290
3291 /* extract violator information */
3292 task_lock(violator);
3293 if (!(proc = get_bsdtask_info(violator))) {
3294 task_unlock(violator);
3295 kr = KERN_INVALID_ARGUMENT; goto finish;
3296 }
3297 (void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname));
3298 pid = task_pid(violator);
3299 if (flags & kRNFatalLimitFlag) {
3300 kr = proc_pidpathinfo_internal(proc, 0, proc_path,
3301 sizeof(proc_path), NULL);
3302 }
3303 task_unlock(violator);
3304 if (kr) goto finish;
3305
3306 /* violation time ~ now */
3307 clock_get_calendar_nanotime(&secs, &nsecs);
3308 timestamp.tv_sec = (int32_t)secs;
3309 timestamp.tv_nsec = (int32_t)nsecs;
3310 /* 25567702 tracks widening mach_timespec_t */
3311
3312 /* send message */
3313 kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE,
3314 HOST_RESOURCE_NOTIFY_PORT, &dstport);
3315 if (kr) goto finish;
3316
3317 /* TH_OPT_HONOR_QLIMIT causes ipc_kmsg_send() to respect the
3318 * queue limit. It also unsets this flag, but this code also
3319 * unsets it for clarity and in case that code changes. */
3320 curthread->options |= TH_OPT_HONOR_QLIMIT;
3321 kr = sendfunc(dstport,
3322 procname, pid, proc_path, timestamp,
3323 linfo->lei_balance, linfo->lei_last_refill,
3324 linfo->lei_limit, linfo->lei_refill_period,
3325 flags);
3326 curthread->options &= (~TH_OPT_HONOR_QLIMIT);
3327
3328 ipc_port_release_send(dstport);
3329
3330 finish:
3331 return kr;
3332 #endif /* MACH_BSD */
3333 }
3334
3335
3336 /*
3337 * Resource violations trace four 64-bit integers. For K32, two additional
3338 * codes are allocated, the first with the low nibble doubled. So if the K64
3339 * code is 0x042, the K32 codes would be 0x044 and 0x45.
3340 */
3341 #ifdef __LP64__
3342 void
3343 trace_resource_violation(uint16_t code,
3344 struct ledger_entry_info *linfo)
3345 {
3346 KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, code),
3347 linfo->lei_balance, linfo->lei_last_refill,
3348 linfo->lei_limit, linfo->lei_refill_period);
3349 }
3350 #else /* K32 */
3351 /* TODO: create/find a trace_two_LLs() for K32 systems */
3352 #define MASK32 0xffffffff
3353 void
3354 trace_resource_violation(uint16_t code,
3355 struct ledger_entry_info *linfo)
3356 {
3357 int8_t lownibble = (code & 0x3) * 2;
3358 int16_t codeA = (code & 0xffc) | lownibble;
3359 int16_t codeB = codeA + 1;
3360
3361 int32_t balance_high = (linfo->lei_balance >> 32) & MASK32;
3362 int32_t balance_low = linfo->lei_balance & MASK32;
3363 int32_t last_refill_high = (linfo->lei_last_refill >> 32) & MASK32;
3364 int32_t last_refill_low = linfo->lei_last_refill & MASK32;
3365
3366 int32_t limit_high = (linfo->lei_limit >> 32) & MASK32;
3367 int32_t limit_low = linfo->lei_limit & MASK32;
3368 int32_t refill_period_high = (linfo->lei_refill_period >> 32) & MASK32;
3369 int32_t refill_period_low = linfo->lei_refill_period & MASK32;
3370
3371 KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeA),
3372 balance_high, balance_low,
3373 last_refill_high, last_refill_low);
3374 KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeB),
3375 limit_high, limit_low,
3376 refill_period_high, refill_period_low);
3377 }
3378 #endif /* K64/K32 */