2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kern_types.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
33 #include <kern/coalition.h>
34 #include <kern/exc_resource.h>
35 #include <kern/host.h>
36 #include <kern/ledger.h>
37 #include <kern/mach_param.h> /* for TASK_CHUNK */
39 #include <kern/monotonic.h>
40 #endif /* MONOTONIC */
41 #include <kern/policy_internal.h>
42 #include <kern/task.h>
43 #include <kern/thread_group.h>
44 #include <kern/zalloc.h>
46 #include <libkern/OSAtomic.h>
48 #include <mach/coalition_notification_server.h>
49 #include <mach/host_priv.h>
50 #include <mach/host_special_ports.h>
54 #include <sys/errno.h>
57 * BSD interface functions
59 int coalitions_get_list(int type
, struct procinfo_coalinfo
*coal_list
, int list_sz
);
60 coalition_t
task_get_coalition(task_t task
, int type
);
61 boolean_t
coalition_is_leader(task_t task
, coalition_t coal
);
62 task_t
coalition_get_leader(coalition_t coal
);
63 int coalition_get_task_count(coalition_t coal
);
64 uint64_t coalition_get_page_count(coalition_t coal
, int *ntasks
);
65 int coalition_get_pid_list(coalition_t coal
, uint32_t rolemask
, int sort_order
,
66 int *pid_list
, int list_sz
);
68 /* defined in task.c */
69 extern ledger_template_t task_ledger_template
;
72 * Templates; task template is copied due to potential allocation limits on
75 ledger_template_t coalition_task_ledger_template
= NULL
;
76 ledger_template_t coalition_ledger_template
= NULL
;
78 extern int proc_selfpid(void);
80 * Coalition zone needs limits. We expect there will be as many coalitions as
81 * tasks (same order of magnitude), so use the task zone's limits.
83 #define CONFIG_COALITION_MAX CONFIG_TASK_MAX
84 #define COALITION_CHUNK TASK_CHUNK
86 int unrestrict_coalition_syscalls
;
87 int merge_adaptive_coalitions
;
89 LCK_GRP_DECLARE(coalitions_lck_grp
, "coalition");
91 /* coalitions_list_lock protects coalition_count, coalitions queue, next_coalition_id. */
92 static LCK_MTX_DECLARE(coalitions_list_lock
, &coalitions_lck_grp
);
93 static uint64_t coalition_count
;
94 static uint64_t coalition_next_id
= 1;
95 static queue_head_t coalitions_q
;
97 coalition_t init_coalition
[COALITION_NUM_TYPES
];
98 coalition_t corpse_coalition
[COALITION_NUM_TYPES
];
101 coal_type_str(int type
)
104 case COALITION_TYPE_RESOURCE
:
106 case COALITION_TYPE_JETSAM
:
113 struct coalition_type
{
118 * pre-condition: coalition just allocated (unlocked), unreferenced,
121 kern_return_t (*init
)(coalition_t coal
, boolean_t privileged
);
125 * pre-condition: coalition unlocked
126 * pre-condition: coalition refcount=0, active_count=0,
127 * termrequested=1, terminated=1, reaped=1
129 void (*dealloc
)(coalition_t coal
);
133 * pre-condition: coalition locked
134 * pre-condition: coalition !repead and !terminated
136 kern_return_t (*adopt_task
)(coalition_t coal
, task_t task
);
140 * pre-condition: coalition locked
141 * pre-condition: task has been removed from coalition's task list
143 kern_return_t (*remove_task
)(coalition_t coal
, task_t task
);
147 * pre-condition: coalition locked
148 * pre-condition: task added to coalition's task list,
149 * active_count >= 1 (at least the given task is active)
151 kern_return_t (*set_taskrole
)(coalition_t coal
, task_t task
, int role
);
155 * pre-condition: coalition locked
156 * pre-condition: task added to coalition's task list,
157 * active_count >= 1 (at least the given task is active)
159 int (*get_taskrole
)(coalition_t coal
, task_t task
);
163 * pre-condition: coalition locked
165 void (*iterate_tasks
)(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
));
169 * COALITION_TYPE_RESOURCE
172 static kern_return_t
i_coal_resource_init(coalition_t coal
, boolean_t privileged
);
173 static void i_coal_resource_dealloc(coalition_t coal
);
174 static kern_return_t
i_coal_resource_adopt_task(coalition_t coal
, task_t task
);
175 static kern_return_t
i_coal_resource_remove_task(coalition_t coal
, task_t task
);
176 static kern_return_t
i_coal_resource_set_taskrole(coalition_t coal
,
177 task_t task
, int role
);
178 static int i_coal_resource_get_taskrole(coalition_t coal
, task_t task
);
179 static void i_coal_resource_iterate_tasks(coalition_t coal
, void *ctx
,
180 void (*callback
)(coalition_t
, void *, task_t
));
183 * Ensure COALITION_NUM_THREAD_QOS_TYPES defined in mach/coalition.h still
184 * matches THREAD_QOS_LAST defined in mach/thread_policy.h
186 static_assert(COALITION_NUM_THREAD_QOS_TYPES
== THREAD_QOS_LAST
);
188 struct i_resource_coalition
{
190 * This keeps track of resource utilization of tasks that are no longer active
191 * in the coalition and is updated when a task is removed from the coalition.
195 uint64_t byteswritten
;
198 uint64_t logical_immediate_writes
;
199 uint64_t logical_deferred_writes
;
200 uint64_t logical_invalidated_writes
;
201 uint64_t logical_metadata_writes
;
202 uint64_t logical_immediate_writes_to_external
;
203 uint64_t logical_deferred_writes_to_external
;
204 uint64_t logical_invalidated_writes_to_external
;
205 uint64_t logical_metadata_writes_to_external
;
207 uint64_t cpu_time_eqos
[COALITION_NUM_THREAD_QOS_TYPES
]; /* cpu time per effective QoS class */
208 uint64_t cpu_time_rqos
[COALITION_NUM_THREAD_QOS_TYPES
]; /* cpu time per requested QoS class */
209 uint64_t cpu_instructions
;
212 uint64_t task_count
; /* tasks that have started in this coalition */
213 uint64_t dead_task_count
; /* tasks that have exited in this coalition;
214 * subtract from task_count to get count
215 * of "active" tasks */
217 * Count the length of time this coalition had at least one active task.
218 * This can be a 'denominator' to turn e.g. cpu_time to %cpu.
220 uint64_t last_became_nonempty_time
;
221 uint64_t time_nonempty
;
223 queue_head_t tasks
; /* List of active tasks in the coalition */
225 * This ledger is used for triggering resource exception. For the tracked resources, this is updated
226 * when the member tasks' resource usage changes.
228 ledger_t resource_monitor_ledger
;
229 #if CONFIG_PHYS_WRITE_ACCT
230 uint64_t fs_metadata_writes
;
231 #endif /* CONFIG_PHYS_WRITE_ACCT */
235 * COALITION_TYPE_JETSAM
238 static kern_return_t
i_coal_jetsam_init(coalition_t coal
, boolean_t privileged
);
239 static void i_coal_jetsam_dealloc(coalition_t coal
);
240 static kern_return_t
i_coal_jetsam_adopt_task(coalition_t coal
, task_t task
);
241 static kern_return_t
i_coal_jetsam_remove_task(coalition_t coal
, task_t task
);
242 static kern_return_t
i_coal_jetsam_set_taskrole(coalition_t coal
,
243 task_t task
, int role
);
244 int i_coal_jetsam_get_taskrole(coalition_t coal
, task_t task
);
245 static void i_coal_jetsam_iterate_tasks(coalition_t coal
, void *ctx
,
246 void (*callback
)(coalition_t
, void *, task_t
));
248 struct i_jetsam_coalition
{
250 queue_head_t extensions
;
251 queue_head_t services
;
253 struct thread_group
*thread_group
;
258 * main coalition structure
261 uint64_t id
; /* monotonically increasing */
263 uint32_t role
; /* default task role (background, adaptive, interactive, etc) */
264 uint32_t ref_count
; /* Number of references to the memory containing this struct */
265 uint32_t active_count
; /* Number of members of (tasks in) the
266 * coalition, plus vouchers referring
267 * to the coalition */
268 uint32_t focal_task_count
; /* Number of TASK_FOREGROUND_APPLICATION tasks in the coalition */
269 uint32_t nonfocal_task_count
; /* Number of TASK_BACKGROUND_APPLICATION tasks in the coalition */
271 /* coalition flags */
272 uint32_t privileged
: 1; /* Members of this coalition may create
273 * and manage coalitions and may posix_spawn
274 * processes into selected coalitions */
277 uint32_t termrequested
: 1; /* launchd has requested termination when coalition becomes empty */
278 uint32_t terminated
: 1; /* coalition became empty and spawns are now forbidden */
279 uint32_t reaped
: 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
280 uint32_t notified
: 1; /* no-more-processes notification was sent via special port */
281 uint32_t efficient
: 1; /* launchd has marked the coalition as efficient */
282 #if DEVELOPMENT || DEBUG
283 uint32_t should_notify
: 1; /* should this coalition send notifications (default: yes) */
286 queue_chain_t coalitions
; /* global list of coalitions */
288 decl_lck_mtx_data(, lock
); /* Coalition lock. */
290 /* put coalition type-specific structures here */
292 struct i_resource_coalition r
;
293 struct i_jetsam_coalition j
;
298 * register different coalition types:
299 * these must be kept in the order specified in coalition.h
301 static const struct coalition_type
302 s_coalition_types
[COALITION_NUM_TYPES
] = {
304 COALITION_TYPE_RESOURCE
,
306 i_coal_resource_init
,
307 i_coal_resource_dealloc
,
308 i_coal_resource_adopt_task
,
309 i_coal_resource_remove_task
,
310 i_coal_resource_set_taskrole
,
311 i_coal_resource_get_taskrole
,
312 i_coal_resource_iterate_tasks
,
315 COALITION_TYPE_JETSAM
,
318 i_coal_jetsam_dealloc
,
319 i_coal_jetsam_adopt_task
,
320 i_coal_jetsam_remove_task
,
321 i_coal_jetsam_set_taskrole
,
322 i_coal_jetsam_get_taskrole
,
323 i_coal_jetsam_iterate_tasks
,
327 ZONE_DECLARE(coalition_zone
, "coalitions",
328 sizeof(struct coalition
), ZC_NOENCRYPT
| ZC_ZFREE_CLEARMEM
);
330 #define coal_call(coal, func, ...) \
331 (s_coalition_types[(coal)->type].func)(coal, ## __VA_ARGS__)
334 #define coalition_lock(c) do{ lck_mtx_lock(&c->lock); }while(0)
335 #define coalition_unlock(c) do{ lck_mtx_unlock(&c->lock); }while(0)
338 * Define the coalition type to track focal tasks.
339 * On embedded, track them using jetsam coalitions since they have associated thread
340 * groups which reflect this property as a flag (and pass it down to CLPC).
341 * On non-embedded platforms, since not all coalitions have jetsam coalitions
342 * track focal counts on the resource coalition.
344 #if !XNU_TARGET_OS_OSX
345 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_JETSAM
346 #else /* !XNU_TARGET_OS_OSX */
347 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_RESOURCE
348 #endif /* !XNU_TARGET_OS_OSX */
353 * Coalition ledger implementation
357 struct coalition_ledger_indices coalition_ledgers
=
358 {.logical_writes
= -1, };
359 void __attribute__((noinline
)) SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor
);
362 coalition_ledger_get_from_task(task_t task
)
364 ledger_t ledger
= LEDGER_NULL
;
365 coalition_t coal
= task
->coalition
[COALITION_TYPE_RESOURCE
];
367 if (coal
!= NULL
&& (!queue_empty(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]))) {
368 ledger
= coal
->r
.resource_monitor_ledger
;
369 ledger_reference(ledger
);
376 COALITION_IO_LEDGER_ENABLE
,
377 COALITION_IO_LEDGER_DISABLE
381 coalition_io_monitor_ctl(struct coalition
*coalition
, uint32_t flags
, int64_t limit
)
383 ledger_t ledger
= coalition
->r
.resource_monitor_ledger
;
385 if (flags
== COALITION_IO_LEDGER_ENABLE
) {
386 /* Configure the logical I/O ledger */
387 ledger_set_limit(ledger
, coalition_ledgers
.logical_writes
, (limit
* 1024 * 1024), 0);
388 ledger_set_period(ledger
, coalition_ledgers
.logical_writes
, (COALITION_LEDGER_MONITOR_INTERVAL_SECS
* NSEC_PER_SEC
));
389 } else if (flags
== COALITION_IO_LEDGER_DISABLE
) {
390 ledger_disable_refill(ledger
, coalition_ledgers
.logical_writes
);
391 ledger_disable_callback(ledger
, coalition_ledgers
.logical_writes
);
396 coalition_ledger_set_logical_writes_limit(struct coalition
*coalition
, int64_t limit
)
400 /* limit = -1 will be used to disable the limit and the callback */
401 if (limit
> COALITION_MAX_LOGICAL_WRITES_LIMIT
|| limit
== 0 || limit
< -1) {
406 coalition_lock(coalition
);
408 coalition_io_monitor_ctl(coalition
, COALITION_IO_LEDGER_DISABLE
, limit
);
410 coalition_io_monitor_ctl(coalition
, COALITION_IO_LEDGER_ENABLE
, limit
);
412 coalition_unlock(coalition
);
417 void __attribute__((noinline
))
418 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor
)
420 int pid
= proc_selfpid();
421 ledger_amount_t new_limit
;
422 task_t task
= current_task();
423 struct ledger_entry_info lei
;
426 struct coalition
*coalition
= task
->coalition
[COALITION_TYPE_RESOURCE
];
428 assert(coalition
!= NULL
);
429 ledger
= coalition
->r
.resource_monitor_ledger
;
432 case FLAVOR_IO_LOGICAL_WRITES
:
433 ledger_get_entry_info(ledger
, coalition_ledgers
.logical_writes
, &lei
);
434 trace_resource_violation(RMON_LOGWRITES_VIOLATED
, &lei
);
440 os_log(OS_LOG_DEFAULT
, "Coalition [%lld] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]. Triggered by process [%d]\n",
441 coalition
->id
, flavor
, (lei
.lei_balance
/ (1024 * 1024)), (lei
.lei_limit
/ (1024 * 1024)),
442 (lei
.lei_refill_period
/ NSEC_PER_SEC
), pid
);
444 kr
= send_resource_violation(send_disk_writes_violation
, task
, &lei
, kRNFlagsNone
);
446 os_log(OS_LOG_DEFAULT
, "ERROR %#x returned from send_resource_violation(disk_writes, ...)\n", kr
);
450 * Continue to monitor the coalition after it hits the initital limit, but increase
451 * the limit exponentially so that we don't spam the listener.
453 new_limit
= (lei
.lei_limit
/ 1024 / 1024) * 4;
454 coalition_lock(coalition
);
455 if (new_limit
> COALITION_MAX_LOGICAL_WRITES_LIMIT
) {
456 coalition_io_monitor_ctl(coalition
, COALITION_IO_LEDGER_DISABLE
, -1);
458 coalition_io_monitor_ctl(coalition
, COALITION_IO_LEDGER_ENABLE
, new_limit
);
460 coalition_unlock(coalition
);
467 coalition_io_rate_exceeded(int warning
, const void *param0
, __unused
const void *param1
)
470 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO((int)param0
);
475 init_coalition_ledgers(void)
478 assert(coalition_ledger_template
== NULL
);
480 if ((t
= ledger_template_create("Per-coalition ledgers")) == NULL
) {
481 panic("couldn't create coalition ledger template");
484 coalition_ledgers
.logical_writes
= ledger_entry_add(t
, "logical_writes", "res", "bytes");
486 if (coalition_ledgers
.logical_writes
< 0) {
487 panic("couldn't create entries for coaliton ledger template");
490 ledger_set_callback(t
, coalition_ledgers
.logical_writes
, coalition_io_rate_exceeded
, (void *)FLAVOR_IO_LOGICAL_WRITES
, NULL
);
491 ledger_template_complete(t
);
493 coalition_task_ledger_template
= ledger_template_copy(task_ledger_template
, "Coalition task ledgers");
495 if (coalition_task_ledger_template
== NULL
) {
496 panic("couldn't create coalition task ledger template");
499 ledger_template_complete(coalition_task_ledger_template
);
501 coalition_ledger_template
= t
;
505 coalition_io_ledger_update(task_t task
, int32_t flavor
, boolean_t is_credit
, uint32_t io_size
)
508 coalition_t coal
= task
->coalition
[COALITION_TYPE_RESOURCE
];
510 assert(coal
!= NULL
);
511 ledger
= coal
->r
.resource_monitor_ledger
;
512 if (LEDGER_VALID(ledger
)) {
513 if (flavor
== FLAVOR_IO_LOGICAL_WRITES
) {
515 ledger_credit(ledger
, coalition_ledgers
.logical_writes
, io_size
);
517 ledger_debit(ledger
, coalition_ledgers
.logical_writes
, io_size
);
524 coalition_notify_user(uint64_t id
, uint32_t flags
)
526 mach_port_t user_port
;
529 kr
= host_get_coalition_port(host_priv_self(), &user_port
);
530 if ((kr
!= KERN_SUCCESS
) || !IPC_PORT_VALID(user_port
)) {
534 coalition_notification(user_port
, id
, flags
);
535 ipc_port_release_send(user_port
);
540 * COALITION_TYPE_RESOURCE
544 i_coal_resource_init(coalition_t coal
, boolean_t privileged
)
547 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
548 coal
->r
.ledger
= ledger_instantiate(coalition_task_ledger_template
,
549 LEDGER_CREATE_ACTIVE_ENTRIES
);
550 if (coal
->r
.ledger
== NULL
) {
551 return KERN_RESOURCE_SHORTAGE
;
554 coal
->r
.resource_monitor_ledger
= ledger_instantiate(coalition_ledger_template
,
555 LEDGER_CREATE_ACTIVE_ENTRIES
);
556 if (coal
->r
.resource_monitor_ledger
== NULL
) {
557 return KERN_RESOURCE_SHORTAGE
;
560 queue_init(&coal
->r
.tasks
);
566 i_coal_resource_dealloc(coalition_t coal
)
568 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
570 ledger_dereference(coal
->r
.ledger
);
571 ledger_dereference(coal
->r
.resource_monitor_ledger
);
575 i_coal_resource_adopt_task(coalition_t coal
, task_t task
)
577 struct i_resource_coalition
*cr
;
579 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
580 assert(queue_empty(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]));
585 if (cr
->task_count
< cr
->dead_task_count
) {
586 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
587 __func__
, coal
, coal
->id
, coal_type_str(coal
->type
),
588 cr
->task_count
, cr
->dead_task_count
);
591 /* If moving from 0->1 active tasks */
592 if (cr
->task_count
- cr
->dead_task_count
== 1) {
593 cr
->last_became_nonempty_time
= mach_absolute_time();
596 /* put the task on the coalition's list of tasks */
597 enqueue_tail(&cr
->tasks
, &task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
599 coal_dbg("Added PID:%d to id:%llu, task_count:%llu, dead_count:%llu, nonempty_time:%llu",
600 task_pid(task
), coal
->id
, cr
->task_count
, cr
->dead_task_count
,
601 cr
->last_became_nonempty_time
);
607 i_coal_resource_remove_task(coalition_t coal
, task_t task
)
609 struct i_resource_coalition
*cr
;
611 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
612 assert(task
->coalition
[COALITION_TYPE_RESOURCE
] == coal
);
613 assert(!queue_empty(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]));
616 * handle resource coalition accounting rollup for dead tasks
620 cr
->dead_task_count
++;
622 if (cr
->task_count
< cr
->dead_task_count
) {
623 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
624 __func__
, coal
, coal
->id
, coal_type_str(coal
->type
), cr
->task_count
, cr
->dead_task_count
);
627 /* If moving from 1->0 active tasks */
628 if (cr
->task_count
- cr
->dead_task_count
== 0) {
629 uint64_t last_time_nonempty
= mach_absolute_time() - cr
->last_became_nonempty_time
;
630 cr
->last_became_nonempty_time
= 0;
631 cr
->time_nonempty
+= last_time_nonempty
;
634 /* Do not roll up for exec'd task or exec copy task */
635 if (!task_is_exec_copy(task
) && !task_did_exec(task
)) {
636 ledger_rollup(cr
->ledger
, task
->ledger
);
637 cr
->bytesread
+= task
->task_io_stats
->disk_reads
.size
;
638 cr
->byteswritten
+= task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
;
639 #if defined(__x86_64__)
640 cr
->gpu_time
+= task_gpu_utilisation(task
);
641 #endif /* defined(__x86_64__) */
643 #if defined(__arm__) || defined(__arm64__)
644 cr
->energy
+= task_energy(task
);
645 #endif /* defined(__arm__) || defined(__arm64__) */
647 cr
->logical_immediate_writes
+= task
->task_writes_counters_internal
.task_immediate_writes
;
648 cr
->logical_deferred_writes
+= task
->task_writes_counters_internal
.task_deferred_writes
;
649 cr
->logical_invalidated_writes
+= task
->task_writes_counters_internal
.task_invalidated_writes
;
650 cr
->logical_metadata_writes
+= task
->task_writes_counters_internal
.task_metadata_writes
;
651 cr
->logical_immediate_writes_to_external
+= task
->task_writes_counters_external
.task_immediate_writes
;
652 cr
->logical_deferred_writes_to_external
+= task
->task_writes_counters_external
.task_deferred_writes
;
653 cr
->logical_invalidated_writes_to_external
+= task
->task_writes_counters_external
.task_invalidated_writes
;
654 cr
->logical_metadata_writes_to_external
+= task
->task_writes_counters_external
.task_metadata_writes
;
655 #if CONFIG_PHYS_WRITE_ACCT
656 cr
->fs_metadata_writes
+= task
->task_fs_metadata_writes
;
657 #endif /* CONFIG_PHYS_WRITE_ACCT */
658 cr
->cpu_ptime
+= task_cpu_ptime(task
);
659 task_update_cpu_time_qos_stats(task
, cr
->cpu_time_eqos
, cr
->cpu_time_rqos
);
661 uint64_t counts
[MT_CORE_NFIXED
] = {};
662 (void)mt_fixed_task_counts(task
, counts
);
663 cr
->cpu_cycles
+= counts
[MT_CORE_CYCLES
];
664 #if defined(MT_CORE_INSTRS)
665 cr
->cpu_instructions
+= counts
[MT_CORE_INSTRS
];
666 #endif /* defined(MT_CORE_INSTRS) */
667 #endif /* MONOTONIC */
670 /* remove the task from the coalition's list */
671 remqueue(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
672 queue_chain_init(task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
674 coal_dbg("removed PID:%d from id:%llu, task_count:%llu, dead_count:%llu",
675 task_pid(task
), coal
->id
, cr
->task_count
, cr
->dead_task_count
);
681 i_coal_resource_set_taskrole(__unused coalition_t coal
,
682 __unused task_t task
, __unused
int role
)
688 i_coal_resource_get_taskrole(__unused coalition_t coal
, __unused task_t task
)
692 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
694 qe_foreach_element(t
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
]) {
696 return COALITION_TASKROLE_UNDEF
;
704 i_coal_resource_iterate_tasks(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
))
707 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
709 qe_foreach_element(t
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
])
710 callback(coal
, ctx
, t
);
713 #if CONFIG_PHYS_WRITE_ACCT
714 extern uint64_t kernel_pm_writes
;
715 #endif /* CONFIG_PHYS_WRITE_ACCT */
718 coalition_resource_usage_internal(coalition_t coal
, struct coalition_resource_usage
*cru_out
)
721 ledger_amount_t credit
, debit
;
724 if (coal
->type
!= COALITION_TYPE_RESOURCE
) {
725 return KERN_INVALID_ARGUMENT
;
728 /* Return KERN_INVALID_ARGUMENT for Corpse coalition */
729 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
730 if (coal
== corpse_coalition
[i
]) {
731 return KERN_INVALID_ARGUMENT
;
735 ledger_t sum_ledger
= ledger_instantiate(coalition_task_ledger_template
, LEDGER_CREATE_ACTIVE_ENTRIES
);
736 if (sum_ledger
== LEDGER_NULL
) {
737 return KERN_RESOURCE_SHORTAGE
;
740 coalition_lock(coal
);
743 * Start with the coalition's ledger, which holds the totals from all
746 ledger_rollup(sum_ledger
, coal
->r
.ledger
);
747 uint64_t bytesread
= coal
->r
.bytesread
;
748 uint64_t byteswritten
= coal
->r
.byteswritten
;
749 uint64_t gpu_time
= coal
->r
.gpu_time
;
750 uint64_t energy
= coal
->r
.energy
;
751 uint64_t logical_immediate_writes
= coal
->r
.logical_immediate_writes
;
752 uint64_t logical_deferred_writes
= coal
->r
.logical_deferred_writes
;
753 uint64_t logical_invalidated_writes
= coal
->r
.logical_invalidated_writes
;
754 uint64_t logical_metadata_writes
= coal
->r
.logical_metadata_writes
;
755 uint64_t logical_immediate_writes_to_external
= coal
->r
.logical_immediate_writes_to_external
;
756 uint64_t logical_deferred_writes_to_external
= coal
->r
.logical_deferred_writes_to_external
;
757 uint64_t logical_invalidated_writes_to_external
= coal
->r
.logical_invalidated_writes_to_external
;
758 uint64_t logical_metadata_writes_to_external
= coal
->r
.logical_metadata_writes_to_external
;
759 #if CONFIG_PHYS_WRITE_ACCT
760 uint64_t fs_metadata_writes
= coal
->r
.fs_metadata_writes
;
761 #endif /* CONFIG_PHYS_WRITE_ACCT */
762 int64_t cpu_time_billed_to_me
= 0;
763 int64_t cpu_time_billed_to_others
= 0;
764 int64_t energy_billed_to_me
= 0;
765 int64_t energy_billed_to_others
= 0;
766 uint64_t cpu_ptime
= coal
->r
.cpu_ptime
;
767 uint64_t cpu_time_eqos
[COALITION_NUM_THREAD_QOS_TYPES
];
768 memcpy(cpu_time_eqos
, coal
->r
.cpu_time_eqos
, sizeof(cpu_time_eqos
));
769 uint64_t cpu_time_rqos
[COALITION_NUM_THREAD_QOS_TYPES
];
770 memcpy(cpu_time_rqos
, coal
->r
.cpu_time_rqos
, sizeof(cpu_time_rqos
));
771 uint64_t cpu_instructions
= coal
->r
.cpu_instructions
;
772 uint64_t cpu_cycles
= coal
->r
.cpu_cycles
;
775 * Add to that all the active tasks' ledgers. Tasks cannot deallocate
776 * out from under us, since we hold the coalition lock.
779 qe_foreach_element(task
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
]) {
781 * Rolling up stats for exec copy task or exec'd task will lead to double accounting.
782 * Cannot take task lock after taking coaliton lock
784 if (task_is_exec_copy(task
) || task_did_exec(task
)) {
788 ledger_rollup(sum_ledger
, task
->ledger
);
789 bytesread
+= task
->task_io_stats
->disk_reads
.size
;
790 byteswritten
+= task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
;
791 #if defined(__x86_64__)
792 gpu_time
+= task_gpu_utilisation(task
);
793 #endif /* defined(__x86_64__) */
795 #if defined(__arm__) || defined(__arm64__)
796 energy
+= task_energy(task
);
797 #endif /* defined(__arm__) || defined(__arm64__) */
799 logical_immediate_writes
+= task
->task_writes_counters_internal
.task_immediate_writes
;
800 logical_deferred_writes
+= task
->task_writes_counters_internal
.task_deferred_writes
;
801 logical_invalidated_writes
+= task
->task_writes_counters_internal
.task_invalidated_writes
;
802 logical_metadata_writes
+= task
->task_writes_counters_internal
.task_metadata_writes
;
803 logical_immediate_writes_to_external
+= task
->task_writes_counters_external
.task_immediate_writes
;
804 logical_deferred_writes_to_external
+= task
->task_writes_counters_external
.task_deferred_writes
;
805 logical_invalidated_writes_to_external
+= task
->task_writes_counters_external
.task_invalidated_writes
;
806 logical_metadata_writes_to_external
+= task
->task_writes_counters_external
.task_metadata_writes
;
807 #if CONFIG_PHYS_WRITE_ACCT
808 fs_metadata_writes
+= task
->task_fs_metadata_writes
;
809 #endif /* CONFIG_PHYS_WRITE_ACCT */
811 cpu_ptime
+= task_cpu_ptime(task
);
812 task_update_cpu_time_qos_stats(task
, cpu_time_eqos
, cpu_time_rqos
);
814 uint64_t counts
[MT_CORE_NFIXED
] = {};
815 (void)mt_fixed_task_counts(task
, counts
);
816 cpu_cycles
+= counts
[MT_CORE_CYCLES
];
817 #if defined(MT_CORE_INSTRS)
818 cpu_instructions
+= counts
[MT_CORE_INSTRS
];
819 #endif /* defined(MT_CORE_INSTRS) */
820 #endif /* MONOTONIC */
823 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.cpu_time_billed_to_me
, (int64_t *)&cpu_time_billed_to_me
);
824 if (kr
!= KERN_SUCCESS
|| cpu_time_billed_to_me
< 0) {
825 cpu_time_billed_to_me
= 0;
828 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.cpu_time_billed_to_others
, (int64_t *)&cpu_time_billed_to_others
);
829 if (kr
!= KERN_SUCCESS
|| cpu_time_billed_to_others
< 0) {
830 cpu_time_billed_to_others
= 0;
833 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.energy_billed_to_me
, (int64_t *)&energy_billed_to_me
);
834 if (kr
!= KERN_SUCCESS
|| energy_billed_to_me
< 0) {
835 energy_billed_to_me
= 0;
838 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.energy_billed_to_others
, (int64_t *)&energy_billed_to_others
);
839 if (kr
!= KERN_SUCCESS
|| energy_billed_to_others
< 0) {
840 energy_billed_to_others
= 0;
843 /* collect information from the coalition itself */
844 cru_out
->tasks_started
= coal
->r
.task_count
;
845 cru_out
->tasks_exited
= coal
->r
.dead_task_count
;
847 uint64_t time_nonempty
= coal
->r
.time_nonempty
;
848 uint64_t last_became_nonempty_time
= coal
->r
.last_became_nonempty_time
;
850 coalition_unlock(coal
);
852 /* Copy the totals out of sum_ledger */
853 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.cpu_time
,
855 if (kr
!= KERN_SUCCESS
) {
858 cru_out
->cpu_time
= credit
;
859 cru_out
->cpu_time_billed_to_me
= (uint64_t)cpu_time_billed_to_me
;
860 cru_out
->cpu_time_billed_to_others
= (uint64_t)cpu_time_billed_to_others
;
861 cru_out
->energy_billed_to_me
= (uint64_t)energy_billed_to_me
;
862 cru_out
->energy_billed_to_others
= (uint64_t)energy_billed_to_others
;
864 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.interrupt_wakeups
,
866 if (kr
!= KERN_SUCCESS
) {
869 cru_out
->interrupt_wakeups
= credit
;
871 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.platform_idle_wakeups
,
873 if (kr
!= KERN_SUCCESS
) {
876 cru_out
->platform_idle_wakeups
= credit
;
878 cru_out
->bytesread
= bytesread
;
879 cru_out
->byteswritten
= byteswritten
;
880 cru_out
->gpu_time
= gpu_time
;
881 cru_out
->energy
= energy
;
882 cru_out
->logical_immediate_writes
= logical_immediate_writes
;
883 cru_out
->logical_deferred_writes
= logical_deferred_writes
;
884 cru_out
->logical_invalidated_writes
= logical_invalidated_writes
;
885 cru_out
->logical_metadata_writes
= logical_metadata_writes
;
886 cru_out
->logical_immediate_writes_to_external
= logical_immediate_writes_to_external
;
887 cru_out
->logical_deferred_writes_to_external
= logical_deferred_writes_to_external
;
888 cru_out
->logical_invalidated_writes_to_external
= logical_invalidated_writes_to_external
;
889 cru_out
->logical_metadata_writes_to_external
= logical_metadata_writes_to_external
;
890 #if CONFIG_PHYS_WRITE_ACCT
891 cru_out
->fs_metadata_writes
= fs_metadata_writes
;
893 cru_out
->fs_metadata_writes
= 0;
894 #endif /* CONFIG_PHYS_WRITE_ACCT */
895 cru_out
->cpu_ptime
= cpu_ptime
;
896 cru_out
->cpu_time_eqos_len
= COALITION_NUM_THREAD_QOS_TYPES
;
897 memcpy(cru_out
->cpu_time_eqos
, cpu_time_eqos
, sizeof(cru_out
->cpu_time_eqos
));
898 cru_out
->cpu_cycles
= cpu_cycles
;
899 cru_out
->cpu_instructions
= cpu_instructions
;
900 ledger_dereference(sum_ledger
);
901 sum_ledger
= LEDGER_NULL
;
903 #if CONFIG_PHYS_WRITE_ACCT
904 // kernel_pm_writes are only recorded under kernel_task coalition
905 if (coalition_id(coal
) == COALITION_ID_KERNEL
) {
906 cru_out
->pm_writes
= kernel_pm_writes
;
908 cru_out
->pm_writes
= 0;
911 cru_out
->pm_writes
= 0;
912 #endif /* CONFIG_PHYS_WRITE_ACCT */
914 if (last_became_nonempty_time
) {
915 time_nonempty
+= mach_absolute_time() - last_became_nonempty_time
;
917 absolutetime_to_nanoseconds(time_nonempty
, &cru_out
->time_nonempty
);
924 * COALITION_TYPE_JETSAM
928 i_coal_jetsam_init(coalition_t coal
, boolean_t privileged
)
930 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
933 coal
->j
.leader
= TASK_NULL
;
934 queue_head_init(coal
->j
.extensions
);
935 queue_head_init(coal
->j
.services
);
936 queue_head_init(coal
->j
.other
);
938 #if CONFIG_THREAD_GROUPS
939 switch (coal
->role
) {
940 case COALITION_ROLE_SYSTEM
:
941 coal
->j
.thread_group
= thread_group_find_by_id_and_retain(THREAD_GROUP_SYSTEM
);
943 case COALITION_ROLE_BACKGROUND
:
944 coal
->j
.thread_group
= thread_group_find_by_id_and_retain(THREAD_GROUP_BACKGROUND
);
946 case COALITION_ROLE_ADAPTIVE
:
947 if (merge_adaptive_coalitions
) {
948 coal
->j
.thread_group
= thread_group_find_by_id_and_retain(THREAD_GROUP_ADAPTIVE
);
950 coal
->j
.thread_group
= thread_group_create_and_retain();
954 coal
->j
.thread_group
= thread_group_create_and_retain();
956 assert(coal
->j
.thread_group
!= NULL
);
962 i_coal_jetsam_dealloc(__unused coalition_t coal
)
964 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
966 /* the coalition should be completely clear at this point */
967 assert(queue_empty(&coal
->j
.extensions
));
968 assert(queue_empty(&coal
->j
.services
));
969 assert(queue_empty(&coal
->j
.other
));
970 assert(coal
->j
.leader
== TASK_NULL
);
972 #if CONFIG_THREAD_GROUPS
973 /* disassociate from the thread group */
974 assert(coal
->j
.thread_group
!= NULL
);
975 thread_group_release(coal
->j
.thread_group
);
976 coal
->j
.thread_group
= NULL
;
981 i_coal_jetsam_adopt_task(coalition_t coal
, task_t task
)
983 struct i_jetsam_coalition
*cj
;
984 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
988 assert(queue_empty(&task
->task_coalition
[COALITION_TYPE_JETSAM
]));
990 /* put each task initially in the "other" list */
991 enqueue_tail(&cj
->other
, &task
->task_coalition
[COALITION_TYPE_JETSAM
]);
992 coal_dbg("coalition %lld adopted PID:%d as UNDEF",
993 coal
->id
, task_pid(task
));
999 i_coal_jetsam_remove_task(coalition_t coal
, task_t task
)
1001 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
1002 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
1004 coal_dbg("removing PID:%d from coalition id:%lld",
1005 task_pid(task
), coal
->id
);
1007 if (task
== coal
->j
.leader
) {
1008 coal
->j
.leader
= NULL
;
1009 coal_dbg(" PID:%d was the leader!", task_pid(task
));
1011 assert(!queue_empty(&task
->task_coalition
[COALITION_TYPE_JETSAM
]));
1014 /* remove the task from the specific coalition role queue */
1015 remqueue(&task
->task_coalition
[COALITION_TYPE_JETSAM
]);
1016 queue_chain_init(task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
1018 return KERN_SUCCESS
;
1021 static kern_return_t
1022 i_coal_jetsam_set_taskrole(coalition_t coal
, task_t task
, int role
)
1024 struct i_jetsam_coalition
*cj
;
1026 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
1027 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
1032 case COALITION_TASKROLE_LEADER
:
1033 coal_dbg("setting PID:%d as LEADER of %lld",
1034 task_pid(task
), coal
->id
);
1035 if (cj
->leader
!= TASK_NULL
) {
1036 /* re-queue the exiting leader onto the "other" list */
1037 coal_dbg(" re-queue existing leader (%d) as OTHER",
1038 task_pid(cj
->leader
));
1039 re_queue_tail(&cj
->other
, &cj
->leader
->task_coalition
[COALITION_TYPE_JETSAM
]);
1042 * remove the task from the "other" list
1043 * (where it was put by default)
1045 remqueue(&task
->task_coalition
[COALITION_TYPE_JETSAM
]);
1046 queue_chain_init(task
->task_coalition
[COALITION_TYPE_JETSAM
]);
1048 /* set the coalition leader */
1051 case COALITION_TASKROLE_XPC
:
1052 coal_dbg("setting PID:%d as XPC in %lld",
1053 task_pid(task
), coal
->id
);
1054 q
= (queue_t
)&cj
->services
;
1056 case COALITION_TASKROLE_EXT
:
1057 coal_dbg("setting PID:%d as EXT in %lld",
1058 task_pid(task
), coal
->id
);
1059 q
= (queue_t
)&cj
->extensions
;
1061 case COALITION_TASKROLE_NONE
:
1063 * Tasks with a role of "none" should fall through to an
1064 * undefined role so long as the task is currently a member
1065 * of the coalition. This scenario can happen if a task is
1066 * killed (usually via jetsam) during exec.
1068 if (task
->coalition
[COALITION_TYPE_JETSAM
] != coal
) {
1069 panic("%s: task %p attempting to set role %d "
1070 "in coalition %p to which it does not belong!", __func__
, task
, role
, coal
);
1073 case COALITION_TASKROLE_UNDEF
:
1074 coal_dbg("setting PID:%d as UNDEF in %lld",
1075 task_pid(task
), coal
->id
);
1076 q
= (queue_t
)&cj
->other
;
1079 panic("%s: invalid role(%d) for task", __func__
, role
);
1080 return KERN_INVALID_ARGUMENT
;
1084 re_queue_tail(q
, &task
->task_coalition
[COALITION_TYPE_JETSAM
]);
1087 return KERN_SUCCESS
;
1091 i_coal_jetsam_get_taskrole(coalition_t coal
, task_t task
)
1093 struct i_jetsam_coalition
*cj
;
1096 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
1097 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
1101 if (task
== cj
->leader
) {
1102 return COALITION_TASKROLE_LEADER
;
1105 qe_foreach_element(t
, &cj
->services
, task_coalition
[COALITION_TYPE_JETSAM
]) {
1107 return COALITION_TASKROLE_XPC
;
1111 qe_foreach_element(t
, &cj
->extensions
, task_coalition
[COALITION_TYPE_JETSAM
]) {
1113 return COALITION_TASKROLE_EXT
;
1117 qe_foreach_element(t
, &cj
->other
, task_coalition
[COALITION_TYPE_JETSAM
]) {
1119 return COALITION_TASKROLE_UNDEF
;
1123 /* task not in the coalition?! */
1124 return COALITION_TASKROLE_NONE
;
1128 i_coal_jetsam_iterate_tasks(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
))
1130 struct i_jetsam_coalition
*cj
;
1133 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
1138 callback(coal
, ctx
, cj
->leader
);
1141 qe_foreach_element(t
, &cj
->services
, task_coalition
[COALITION_TYPE_JETSAM
])
1142 callback(coal
, ctx
, t
);
1144 qe_foreach_element(t
, &cj
->extensions
, task_coalition
[COALITION_TYPE_JETSAM
])
1145 callback(coal
, ctx
, t
);
1147 qe_foreach_element(t
, &cj
->other
, task_coalition
[COALITION_TYPE_JETSAM
])
1148 callback(coal
, ctx
, t
);
1154 * Main Coalition implementation
1159 * coalition_create_internal
1160 * Returns: New coalition object, referenced for the caller and unlocked.
1161 * Condition: coalitions_list_lock must be UNLOCKED.
1164 coalition_create_internal(int type
, int role
, boolean_t privileged
, coalition_t
*out
, uint64_t *coalition_id
)
1167 struct coalition
*new_coal
;
1171 if (type
< 0 || type
> COALITION_TYPE_MAX
) {
1172 return KERN_INVALID_ARGUMENT
;
1175 new_coal
= (struct coalition
*)zalloc(coalition_zone
);
1176 if (new_coal
== COALITION_NULL
) {
1177 return KERN_RESOURCE_SHORTAGE
;
1179 bzero(new_coal
, sizeof(*new_coal
));
1181 new_coal
->type
= type
;
1182 new_coal
->role
= role
;
1184 /* initialize type-specific resources */
1185 kr
= coal_call(new_coal
, init
, privileged
);
1186 if (kr
!= KERN_SUCCESS
) {
1187 zfree(coalition_zone
, new_coal
);
1191 /* One for caller, one for coalitions list */
1192 new_coal
->ref_count
= 2;
1194 new_coal
->privileged
= privileged
? TRUE
: FALSE
;
1195 #if DEVELOPMENT || DEBUG
1196 new_coal
->should_notify
= 1;
1199 lck_mtx_init(&new_coal
->lock
, &coalitions_lck_grp
, LCK_ATTR_NULL
);
1201 lck_mtx_lock(&coalitions_list_lock
);
1202 new_coal
->id
= coalition_next_id
++;
1204 enqueue_tail(&coalitions_q
, &new_coal
->coalitions
);
1206 #if CONFIG_THREAD_GROUPS
1207 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_NEW
),
1208 new_coal
->id
, new_coal
->type
,
1209 (new_coal
->type
== COALITION_TYPE_JETSAM
&& new_coal
->j
.thread_group
) ?
1210 thread_group_get_id(new_coal
->j
.thread_group
) : 0);
1213 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_NEW
),
1214 new_coal
->id
, new_coal
->type
);
1217 ctype
= new_coal
->type
;
1218 lck_mtx_unlock(&coalitions_list_lock
);
1220 coal_dbg("id:%llu, type:%s", cid
, coal_type_str(ctype
));
1222 if (coalition_id
!= NULL
) {
1223 *coalition_id
= cid
;
1227 return KERN_SUCCESS
;
1232 * Condition: coalition must be UNLOCKED.
1235 coalition_release(coalition_t coal
)
1237 /* TODO: This can be done with atomics. */
1238 coalition_lock(coal
);
1242 uint32_t rc
= coal
->ref_count
;
1243 uint32_t ac
= coal
->active_count
;
1244 #endif /* COALITION_DEBUG */
1246 coal_dbg("id:%llu type:%s ref_count:%u active_count:%u%s",
1247 coal
->id
, coal_type_str(coal
->type
), rc
, ac
,
1248 rc
<= 0 ? ", will deallocate now" : "");
1250 if (coal
->ref_count
> 0) {
1251 coalition_unlock(coal
);
1255 assert(coal
->termrequested
);
1256 assert(coal
->terminated
);
1257 assert(coal
->active_count
== 0);
1258 assert(coal
->reaped
);
1259 assert(coal
->focal_task_count
== 0);
1260 assert(coal
->nonfocal_task_count
== 0);
1261 #if CONFIG_THREAD_GROUPS
1262 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_FREE
),
1263 coal
->id
, coal
->type
,
1264 coal
->type
== COALITION_TYPE_JETSAM
?
1265 coal
->j
.thread_group
: 0);
1267 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_FREE
),
1268 coal
->id
, coal
->type
);
1271 coal_call(coal
, dealloc
);
1273 coalition_unlock(coal
);
1275 lck_mtx_destroy(&coal
->lock
, &coalitions_lck_grp
);
1277 zfree(coalition_zone
, coal
);
1281 * coalition_find_by_id_internal
1282 * Returns: Coalition object with specified id, NOT referenced.
1283 * If not found, returns COALITION_NULL.
1284 * Condition: coalitions_list_lock must be LOCKED.
1287 coalition_find_by_id_internal(uint64_t coal_id
)
1290 return COALITION_NULL
;
1293 lck_mtx_assert(&coalitions_list_lock
, LCK_MTX_ASSERT_OWNED
);
1295 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
1296 if (coal
->id
== coal_id
) {
1300 return COALITION_NULL
;
1304 * coalition_find_by_id
1305 * Returns: Coalition object with specified id, referenced.
1306 * Condition: coalitions_list_lock must be UNLOCKED.
1309 coalition_find_by_id(uint64_t cid
)
1312 return COALITION_NULL
;
1315 lck_mtx_lock(&coalitions_list_lock
);
1317 coalition_t coal
= coalition_find_by_id_internal(cid
);
1318 if (coal
== COALITION_NULL
) {
1319 lck_mtx_unlock(&coalitions_list_lock
);
1320 return COALITION_NULL
;
1323 coalition_lock(coal
);
1326 coalition_unlock(coal
);
1327 lck_mtx_unlock(&coalitions_list_lock
);
1328 return COALITION_NULL
;
1331 if (coal
->ref_count
== 0) {
1332 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n",
1333 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
1337 uint32_t rc
= coal
->ref_count
;
1340 coalition_unlock(coal
);
1341 lck_mtx_unlock(&coalitions_list_lock
);
1343 coal_dbg("id:%llu type:%s ref_count:%u",
1344 coal
->id
, coal_type_str(coal
->type
), rc
);
1350 * coalition_find_and_activate_by_id
1351 * Returns: Coalition object with specified id, referenced, and activated.
1352 * Condition: coalitions_list_lock must be UNLOCKED.
1353 * This is the function to use when putting a 'new' thing into a coalition,
1354 * like posix_spawn of an XPC service by launchd.
1355 * See also coalition_extend_active.
1358 coalition_find_and_activate_by_id(uint64_t cid
)
1361 return COALITION_NULL
;
1364 lck_mtx_lock(&coalitions_list_lock
);
1366 coalition_t coal
= coalition_find_by_id_internal(cid
);
1367 if (coal
== COALITION_NULL
) {
1368 lck_mtx_unlock(&coalitions_list_lock
);
1369 return COALITION_NULL
;
1372 coalition_lock(coal
);
1374 if (coal
->reaped
|| coal
->terminated
) {
1375 /* Too late to put something new into this coalition, it's
1376 * already on its way out the door */
1377 coalition_unlock(coal
);
1378 lck_mtx_unlock(&coalitions_list_lock
);
1379 return COALITION_NULL
;
1382 if (coal
->ref_count
== 0) {
1383 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n",
1384 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
1388 coal
->active_count
++;
1391 uint32_t rc
= coal
->ref_count
;
1392 uint32_t ac
= coal
->active_count
;
1395 coalition_unlock(coal
);
1396 lck_mtx_unlock(&coalitions_list_lock
);
1398 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u",
1399 coal
->id
, coal_type_str(coal
->type
), rc
, ac
);
1405 coalition_id(coalition_t coal
)
1407 assert(coal
!= COALITION_NULL
);
1412 task_coalition_ids(task_t task
, uint64_t ids
[COALITION_NUM_TYPES
])
1415 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1416 if (task
->coalition
[i
]) {
1417 ids
[i
] = task
->coalition
[i
]->id
;
1425 task_coalition_roles(task_t task
, int roles
[COALITION_NUM_TYPES
])
1428 memset(roles
, 0, COALITION_NUM_TYPES
* sizeof(roles
[0]));
1430 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1431 if (task
->coalition
[i
]) {
1432 coalition_lock(task
->coalition
[i
]);
1433 roles
[i
] = coal_call(task
->coalition
[i
],
1434 get_taskrole
, task
);
1435 coalition_unlock(task
->coalition
[i
]);
1437 roles
[i
] = COALITION_TASKROLE_NONE
;
1444 coalition_type(coalition_t coal
)
1450 coalition_term_requested(coalition_t coal
)
1452 return coal
->termrequested
;
1456 coalition_is_terminated(coalition_t coal
)
1458 return coal
->terminated
;
1462 coalition_is_reaped(coalition_t coal
)
1464 return coal
->reaped
;
1468 coalition_is_privileged(coalition_t coal
)
1470 return coal
->privileged
|| unrestrict_coalition_syscalls
;
1474 task_is_in_privileged_coalition(task_t task
, int type
)
1476 if (type
< 0 || type
> COALITION_TYPE_MAX
) {
1479 if (unrestrict_coalition_syscalls
) {
1482 if (!task
->coalition
[type
]) {
1485 return task
->coalition
[type
]->privileged
;
1489 task_coalition_update_gpu_stats(task_t task
, uint64_t gpu_ns_delta
)
1493 assert(task
!= TASK_NULL
);
1494 if (gpu_ns_delta
== 0) {
1498 coal
= task
->coalition
[COALITION_TYPE_RESOURCE
];
1499 assert(coal
!= COALITION_NULL
);
1501 coalition_lock(coal
);
1502 coal
->r
.gpu_time
+= gpu_ns_delta
;
1503 coalition_unlock(coal
);
1507 task_coalition_adjust_focal_count(task_t task
, int count
, uint32_t *new_count
)
1509 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1510 if (coal
== COALITION_NULL
) {
1514 *new_count
= os_atomic_add(&coal
->focal_task_count
, count
, relaxed
);
1515 assert(*new_count
!= UINT32_MAX
);
1520 task_coalition_focal_count(task_t task
)
1522 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1523 if (coal
== COALITION_NULL
) {
1527 return coal
->focal_task_count
;
1531 task_coalition_adjust_nonfocal_count(task_t task
, int count
, uint32_t *new_count
)
1533 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1534 if (coal
== COALITION_NULL
) {
1538 *new_count
= os_atomic_add(&coal
->nonfocal_task_count
, count
, relaxed
);
1539 assert(*new_count
!= UINT32_MAX
);
1544 task_coalition_nonfocal_count(task_t task
)
1546 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1547 if (coal
== COALITION_NULL
) {
1551 return coal
->nonfocal_task_count
;
1555 coalition_set_efficient(coalition_t coal
)
1557 coalition_lock(coal
);
1558 coal
->efficient
= TRUE
;
1559 coalition_unlock(coal
);
1562 #if CONFIG_THREAD_GROUPS
1563 struct thread_group
*
1564 task_coalition_get_thread_group(task_t task
)
1566 coalition_t coal
= task
->coalition
[COALITION_TYPE_JETSAM
];
1567 /* return system thread group for non-jetsam coalitions */
1568 if (coal
== COALITION_NULL
) {
1569 return init_coalition
[COALITION_TYPE_JETSAM
]->j
.thread_group
;
1571 return coal
->j
.thread_group
;
1575 struct thread_group
*
1576 kdp_coalition_get_thread_group(coalition_t coal
)
1578 if (coal
->type
!= COALITION_TYPE_JETSAM
) {
1581 assert(coal
->j
.thread_group
!= NULL
);
1582 return coal
->j
.thread_group
;
1585 struct thread_group
*
1586 coalition_get_thread_group(coalition_t coal
)
1588 if (coal
->type
!= COALITION_TYPE_JETSAM
) {
1591 assert(coal
->j
.thread_group
!= NULL
);
1592 return thread_group_retain(coal
->j
.thread_group
);
1596 coalition_set_thread_group(coalition_t coal
, struct thread_group
*tg
)
1598 assert(coal
!= COALITION_NULL
);
1601 if (coal
->type
!= COALITION_TYPE_JETSAM
) {
1604 struct thread_group
*old_tg
= coal
->j
.thread_group
;
1605 assert(old_tg
!= NULL
);
1606 coal
->j
.thread_group
= tg
;
1608 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_THREAD_GROUP_SET
),
1609 coal
->id
, coal
->type
, thread_group_get_id(tg
));
1611 thread_group_release(old_tg
);
1615 task_coalition_thread_group_focal_update(task_t task
)
1617 assert(task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
] != COALITION_NULL
);
1618 thread_group_flags_update_lock();
1619 uint32_t focal_count
= task_coalition_focal_count(task
);
1621 thread_group_set_flags_locked(task_coalition_get_thread_group(task
), THREAD_GROUP_FLAGS_UI_APP
);
1623 thread_group_clear_flags_locked(task_coalition_get_thread_group(task
), THREAD_GROUP_FLAGS_UI_APP
);
1625 thread_group_flags_update_unlock();
1631 coalition_for_each_task(coalition_t coal
, void *ctx
,
1632 void (*callback
)(coalition_t
, void *, task_t
))
1634 assert(coal
!= COALITION_NULL
);
1636 coal_dbg("iterating tasks in coalition %p id:%llu type:%s, active_count:%u",
1637 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
1639 coalition_lock(coal
);
1641 coal_call(coal
, iterate_tasks
, ctx
, callback
);
1643 coalition_unlock(coal
);
1648 coalition_remove_active(coalition_t coal
)
1650 coalition_lock(coal
);
1652 assert(!coal
->reaped
);
1653 assert(coal
->active_count
> 0);
1655 coal
->active_count
--;
1657 boolean_t do_notify
= FALSE
;
1658 uint64_t notify_id
= 0;
1659 uint32_t notify_flags
= 0;
1660 if (coal
->termrequested
&& coal
->active_count
== 0) {
1661 /* We only notify once, when active_count reaches zero.
1662 * We just decremented, so if it reached zero, we mustn't have
1665 assert(!coal
->terminated
);
1666 coal
->terminated
= TRUE
;
1668 assert(!coal
->notified
);
1670 coal
->notified
= TRUE
;
1671 #if DEVELOPMENT || DEBUG
1672 do_notify
= coal
->should_notify
;
1676 notify_id
= coal
->id
;
1681 uint64_t cid
= coal
->id
;
1682 uint32_t rc
= coal
->ref_count
;
1683 int ac
= coal
->active_count
;
1684 int ct
= coal
->type
;
1686 coalition_unlock(coal
);
1688 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u,%s",
1689 cid
, coal_type_str(ct
), rc
, ac
, do_notify
? " NOTIFY" : " ");
1692 coalition_notify_user(notify_id
, notify_flags
);
1696 /* Used for kernel_task, launchd, launchd's early boot tasks... */
1698 coalitions_adopt_init_task(task_t task
)
1701 kr
= coalitions_adopt_task(init_coalition
, task
);
1702 if (kr
!= KERN_SUCCESS
) {
1703 panic("failed to adopt task %p into default coalition: %d", task
, kr
);
1708 /* Used for forked corpses. */
1710 coalitions_adopt_corpse_task(task_t task
)
1713 kr
= coalitions_adopt_task(corpse_coalition
, task
);
1714 if (kr
!= KERN_SUCCESS
) {
1715 panic("failed to adopt task %p into corpse coalition: %d", task
, kr
);
1721 * coalition_adopt_task_internal
1722 * Condition: Coalition must be referenced and unlocked. Will fail if coalition
1723 * is already terminated.
1725 static kern_return_t
1726 coalition_adopt_task_internal(coalition_t coal
, task_t task
)
1730 if (task
->coalition
[coal
->type
]) {
1731 return KERN_ALREADY_IN_SET
;
1734 coalition_lock(coal
);
1736 if (coal
->reaped
|| coal
->terminated
) {
1737 coalition_unlock(coal
);
1738 return KERN_TERMINATED
;
1741 kr
= coal_call(coal
, adopt_task
, task
);
1742 if (kr
!= KERN_SUCCESS
) {
1746 coal
->active_count
++;
1750 task
->coalition
[coal
->type
] = coal
;
1754 (void)coal
; /* need expression after label */
1755 uint64_t cid
= coal
->id
;
1756 uint32_t rc
= coal
->ref_count
;
1757 uint32_t ct
= coal
->type
;
1759 if (get_task_uniqueid(task
) != UINT64_MAX
) {
1760 /* On 32-bit targets, uniqueid will get truncated to 32 bits */
1761 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_ADOPT
),
1762 coal
->id
, get_task_uniqueid(task
));
1765 coalition_unlock(coal
);
1767 coal_dbg("task:%d, id:%llu type:%s ref_count:%u, kr=%d",
1768 task_pid(task
), cid
, coal_type_str(ct
), rc
, kr
);
1772 static kern_return_t
1773 coalition_remove_task_internal(task_t task
, int type
)
1777 coalition_t coal
= task
->coalition
[type
];
1780 return KERN_SUCCESS
;
1783 assert(coal
->type
== (uint32_t)type
);
1785 coalition_lock(coal
);
1787 kr
= coal_call(coal
, remove_task
, task
);
1790 uint64_t cid
= coal
->id
;
1791 uint32_t rc
= coal
->ref_count
;
1792 int ac
= coal
->active_count
;
1793 int ct
= coal
->type
;
1795 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_REMOVE
),
1796 coal
->id
, get_task_uniqueid(task
));
1797 coalition_unlock(coal
);
1799 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u, kr=%d",
1800 cid
, coal_type_str(ct
), rc
, ac
, kr
);
1802 coalition_remove_active(coal
);
1808 * coalitions_adopt_task
1809 * Condition: All coalitions must be referenced and unlocked.
1810 * Will fail if any coalition is already terminated.
1813 coalitions_adopt_task(coalition_t
*coals
, task_t task
)
1818 if (!coals
|| coals
[COALITION_TYPE_RESOURCE
] == COALITION_NULL
) {
1819 return KERN_INVALID_ARGUMENT
;
1822 /* verify that the incoming coalitions are what they say they are */
1823 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1824 if (coals
[i
] && coals
[i
]->type
!= (uint32_t)i
) {
1825 return KERN_INVALID_ARGUMENT
;
1829 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1832 kr
= coalition_adopt_task_internal(coals
[i
], task
);
1834 if (kr
!= KERN_SUCCESS
) {
1835 /* dis-associate any coalitions that just adopted this task */
1837 if (task
->coalition
[i
]) {
1838 coalition_remove_task_internal(task
, i
);
1848 * coalitions_remove_task
1849 * Condition: task must be referenced and UNLOCKED; all task's coalitions must be UNLOCKED
1852 coalitions_remove_task(task_t task
)
1857 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1858 kr
= coalition_remove_task_internal(task
, i
);
1859 assert(kr
== KERN_SUCCESS
);
1866 * task_release_coalitions
1867 * helper function to release references to all coalitions in which
1868 * 'task' is a member.
1871 task_release_coalitions(task_t task
)
1874 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1875 if (task
->coalition
[i
]) {
1876 coalition_release(task
->coalition
[i
]);
1877 } else if (i
== COALITION_TYPE_RESOURCE
) {
1878 panic("deallocating task %p was not a member of a resource coalition", task
);
1884 * coalitions_set_roles
1885 * for each type of coalition, if the task is a member of a coalition of
1886 * that type (given in the coalitions parameter) then set the role of
1887 * the task within that that coalition.
1890 coalitions_set_roles(coalition_t coalitions
[COALITION_NUM_TYPES
],
1891 task_t task
, int roles
[COALITION_NUM_TYPES
])
1893 kern_return_t kr
= KERN_SUCCESS
;
1896 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1897 if (!coalitions
[i
]) {
1900 coalition_lock(coalitions
[i
]);
1901 kr
= coal_call(coalitions
[i
], set_taskrole
, task
, roles
[i
]);
1902 coalition_unlock(coalitions
[i
]);
1903 assert(kr
== KERN_SUCCESS
);
1910 * coalition_terminate_internal
1911 * Condition: Coalition must be referenced and UNLOCKED.
1914 coalition_request_terminate_internal(coalition_t coal
)
1916 assert(coal
->type
>= 0 && coal
->type
<= COALITION_TYPE_MAX
);
1918 if (coal
== init_coalition
[coal
->type
]) {
1919 return KERN_DEFAULT_SET
;
1922 coalition_lock(coal
);
1925 coalition_unlock(coal
);
1926 return KERN_INVALID_NAME
;
1929 if (coal
->terminated
|| coal
->termrequested
) {
1930 coalition_unlock(coal
);
1931 return KERN_TERMINATED
;
1934 coal
->termrequested
= TRUE
;
1936 boolean_t do_notify
= FALSE
;
1937 uint64_t note_id
= 0;
1938 uint32_t note_flags
= 0;
1940 if (coal
->active_count
== 0) {
1942 * We only notify once, when active_count reaches zero.
1943 * We just set termrequested to zero. If the active count
1944 * was already at zero (tasks died before we could request
1945 * a termination notification), we should notify.
1947 assert(!coal
->terminated
);
1948 coal
->terminated
= TRUE
;
1950 assert(!coal
->notified
);
1952 coal
->notified
= TRUE
;
1953 #if DEVELOPMENT || DEBUG
1954 do_notify
= coal
->should_notify
;
1962 coalition_unlock(coal
);
1965 coalition_notify_user(note_id
, note_flags
);
1968 return KERN_SUCCESS
;
1972 * coalition_reap_internal
1973 * Condition: Coalition must be referenced and UNLOCKED.
1976 coalition_reap_internal(coalition_t coal
)
1978 assert(coal
->type
<= COALITION_TYPE_MAX
);
1980 if (coal
== init_coalition
[coal
->type
]) {
1981 return KERN_DEFAULT_SET
;
1984 coalition_lock(coal
);
1986 coalition_unlock(coal
);
1987 return KERN_TERMINATED
;
1989 if (!coal
->terminated
) {
1990 coalition_unlock(coal
);
1991 return KERN_FAILURE
;
1993 assert(coal
->termrequested
);
1994 if (coal
->active_count
> 0) {
1995 coalition_unlock(coal
);
1996 return KERN_FAILURE
;
1999 coal
->reaped
= TRUE
;
2001 /* Caller, launchd, and coalitions list should each have a reference */
2002 assert(coal
->ref_count
> 2);
2004 coalition_unlock(coal
);
2006 lck_mtx_lock(&coalitions_list_lock
);
2008 remqueue(&coal
->coalitions
);
2009 lck_mtx_unlock(&coalitions_list_lock
);
2011 /* Release the list's reference and launchd's reference. */
2012 coalition_release(coal
);
2013 coalition_release(coal
);
2015 return KERN_SUCCESS
;
2018 #if DEVELOPMENT || DEBUG
2020 coalition_should_notify(coalition_t coal
)
2026 coalition_lock(coal
);
2027 should
= coal
->should_notify
;
2028 coalition_unlock(coal
);
2034 coalition_set_notify(coalition_t coal
, int notify
)
2039 coalition_lock(coal
);
2040 coal
->should_notify
= !!notify
;
2041 coalition_unlock(coal
);
2046 coalitions_init(void)
2050 const struct coalition_type
*ctype
;
2052 queue_head_init(coalitions_q
);
2054 if (!PE_parse_boot_argn("unrestrict_coalition_syscalls", &unrestrict_coalition_syscalls
,
2055 sizeof(unrestrict_coalition_syscalls
))) {
2056 unrestrict_coalition_syscalls
= 0;
2059 if (!PE_parse_boot_argn("tg_adaptive", &merge_adaptive_coalitions
,
2060 sizeof(merge_adaptive_coalitions
))) {
2061 merge_adaptive_coalitions
= 0;
2064 init_task_ledgers();
2066 init_coalition_ledgers();
2068 for (i
= 0, ctype
= &s_coalition_types
[0]; i
< COALITION_NUM_TYPES
; ctype
++, i
++) {
2069 /* verify the entry in the global coalition types array */
2070 if (ctype
->type
!= i
||
2073 !ctype
->adopt_task
||
2074 !ctype
->remove_task
) {
2075 panic("%s: Malformed coalition type %s(%d) in slot for type:%s(%d)",
2076 __func__
, coal_type_str(ctype
->type
), ctype
->type
, coal_type_str(i
), i
);
2078 if (!ctype
->has_default
) {
2081 kr
= coalition_create_internal(ctype
->type
, COALITION_ROLE_SYSTEM
, TRUE
, &init_coalition
[ctype
->type
], NULL
);
2082 if (kr
!= KERN_SUCCESS
) {
2083 panic("%s: could not create init %s coalition: kr:%d",
2084 __func__
, coal_type_str(i
), kr
);
2086 if (i
== COALITION_TYPE_RESOURCE
) {
2087 assert(COALITION_ID_KERNEL
== init_coalition
[ctype
->type
]->id
);
2089 kr
= coalition_create_internal(ctype
->type
, COALITION_ROLE_SYSTEM
, FALSE
, &corpse_coalition
[ctype
->type
], NULL
);
2090 if (kr
!= KERN_SUCCESS
) {
2091 panic("%s: could not create corpse %s coalition: kr:%d",
2092 __func__
, coal_type_str(i
), kr
);
2096 /* "Leak" our reference to the global object */
2100 * BSD Kernel interface functions
2104 coalition_fill_procinfo(struct coalition
*coal
,
2105 struct procinfo_coalinfo
*coalinfo
)
2107 coalinfo
->coalition_id
= coal
->id
;
2108 coalinfo
->coalition_type
= coal
->type
;
2109 coalinfo
->coalition_tasks
= coalition_get_task_count(coal
);
2114 coalitions_get_list(int type
, struct procinfo_coalinfo
*coal_list
, int list_sz
)
2117 struct coalition
*coal
;
2119 lck_mtx_lock(&coalitions_list_lock
);
2120 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
2121 if (!coal
->reaped
&& (type
< 0 || type
== (int)coal
->type
)) {
2122 if (coal_list
&& ncoals
< list_sz
) {
2123 coalition_fill_procinfo(coal
, &coal_list
[ncoals
]);
2128 lck_mtx_unlock(&coalitions_list_lock
);
2134 * Return the coaltion of the given type to which the task belongs.
2137 task_get_coalition(task_t task
, int coal_type
)
2141 if (task
== NULL
|| coal_type
> COALITION_TYPE_MAX
) {
2142 return COALITION_NULL
;
2145 c
= task
->coalition
[coal_type
];
2146 assert(c
== COALITION_NULL
|| (int)c
->type
== coal_type
);
2151 * Report if the given task is the leader of the given jetsam coalition.
2154 coalition_is_leader(task_t task
, coalition_t coal
)
2156 boolean_t ret
= FALSE
;
2158 if (coal
!= COALITION_NULL
) {
2159 coalition_lock(coal
);
2161 ret
= (coal
->type
== COALITION_TYPE_JETSAM
&& coal
->j
.leader
== task
);
2163 coalition_unlock(coal
);
2170 coalition_iterate_stackshot(coalition_iterate_fn_t callout
, void *arg
, uint32_t coalition_type
)
2175 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
2176 if (coal
== NULL
|| !ml_validate_nofault((vm_offset_t
)coal
, sizeof(struct coalition
))) {
2177 return KERN_FAILURE
;
2180 if (coalition_type
== coal
->type
) {
2181 callout(arg
, i
++, coal
);
2185 return KERN_SUCCESS
;
2189 kdp_coalition_get_leader(coalition_t coal
)
2195 if (coal
->type
== COALITION_TYPE_JETSAM
) {
2196 return coal
->j
.leader
;
2202 coalition_get_leader(coalition_t coal
)
2204 task_t leader
= TASK_NULL
;
2210 coalition_lock(coal
);
2211 if (coal
->type
!= COALITION_TYPE_JETSAM
) {
2215 leader
= coal
->j
.leader
;
2216 if (leader
!= TASK_NULL
) {
2217 task_reference(leader
);
2221 coalition_unlock(coal
);
2227 coalition_get_task_count(coalition_t coal
)
2230 struct queue_entry
*qe
;
2235 coalition_lock(coal
);
2236 switch (coal
->type
) {
2237 case COALITION_TYPE_RESOURCE
:
2238 qe_foreach(qe
, &coal
->r
.tasks
)
2241 case COALITION_TYPE_JETSAM
:
2242 if (coal
->j
.leader
) {
2245 qe_foreach(qe
, &coal
->j
.other
)
2247 qe_foreach(qe
, &coal
->j
.extensions
)
2249 qe_foreach(qe
, &coal
->j
.services
)
2255 coalition_unlock(coal
);
2262 i_get_list_footprint(queue_t list
, int type
, int *ntasks
)
2267 qe_foreach_element(task
, list
, task_coalition
[type
]) {
2268 bytes
+= get_task_phys_footprint(task
);
2269 coal_dbg(" [%d] task_pid:%d, type:%d, footprint:%lld",
2270 *ntasks
, task_pid(task
), type
, bytes
);
2278 coalition_get_page_count(coalition_t coal
, int *ntasks
)
2290 coalition_lock(coal
);
2292 switch (coal
->type
) {
2293 case COALITION_TYPE_RESOURCE
:
2294 bytes
+= i_get_list_footprint(&coal
->r
.tasks
, COALITION_TYPE_RESOURCE
, &num_tasks
);
2296 case COALITION_TYPE_JETSAM
:
2297 if (coal
->j
.leader
) {
2298 bytes
+= get_task_phys_footprint(coal
->j
.leader
);
2301 bytes
+= i_get_list_footprint(&coal
->j
.extensions
, COALITION_TYPE_JETSAM
, &num_tasks
);
2302 bytes
+= i_get_list_footprint(&coal
->j
.services
, COALITION_TYPE_JETSAM
, &num_tasks
);
2303 bytes
+= i_get_list_footprint(&coal
->j
.other
, COALITION_TYPE_JETSAM
, &num_tasks
);
2309 coalition_unlock(coal
);
2312 *ntasks
= num_tasks
;
2315 return bytes
/ PAGE_SIZE_64
;
2318 struct coal_sort_s
{
2325 * return < 0 for a < b
2329 typedef int (*cmpfunc_t
)(const void *a
, const void *b
);
2332 qsort(void *a
, size_t n
, size_t es
, cmpfunc_t cmp
);
2335 dflt_cmp(const void *a
, const void *b
)
2337 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2338 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2341 * if both A and B are equal, use a memory descending sort
2343 if (csA
->usr_order
== csB
->usr_order
) {
2344 return (int)((int64_t)csB
->bytes
- (int64_t)csA
->bytes
);
2347 /* otherwise, return the relationship between user specified orders */
2348 return csA
->usr_order
- csB
->usr_order
;
2352 mem_asc_cmp(const void *a
, const void *b
)
2354 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2355 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2357 return (int)((int64_t)csA
->bytes
- (int64_t)csB
->bytes
);
2361 mem_dec_cmp(const void *a
, const void *b
)
2363 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2364 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2366 return (int)((int64_t)csB
->bytes
- (int64_t)csA
->bytes
);
2370 usr_asc_cmp(const void *a
, const void *b
)
2372 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2373 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2375 return csA
->usr_order
- csB
->usr_order
;
2379 usr_dec_cmp(const void *a
, const void *b
)
2381 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2382 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2384 return csB
->usr_order
- csA
->usr_order
;
2387 /* avoid dynamic allocation in this path */
2388 #define MAX_SORTED_PIDS 80
2391 coalition_get_sort_list(coalition_t coal
, int sort_order
, queue_t list
,
2392 struct coal_sort_s
*sort_array
, int array_sz
)
2397 assert(sort_array
!= NULL
);
2399 if (array_sz
<= 0) {
2405 * this function will only be called with a NULL
2406 * list for JETSAM-type coalitions, and is intended
2407 * to investigate the leader process
2409 if (coal
->type
!= COALITION_TYPE_JETSAM
||
2410 coal
->j
.leader
== TASK_NULL
) {
2413 sort_array
[0].pid
= task_pid(coal
->j
.leader
);
2414 switch (sort_order
) {
2415 case COALITION_SORT_DEFAULT
:
2416 sort_array
[0].usr_order
= 0;
2418 case COALITION_SORT_MEM_ASC
:
2419 case COALITION_SORT_MEM_DEC
:
2420 sort_array
[0].bytes
= get_task_phys_footprint(coal
->j
.leader
);
2422 case COALITION_SORT_USER_ASC
:
2423 case COALITION_SORT_USER_DEC
:
2424 sort_array
[0].usr_order
= 0;
2432 qe_foreach_element(task
, list
, task_coalition
[coal
->type
]) {
2433 if (ntasks
>= array_sz
) {
2434 printf("WARNING: more than %d pids in coalition %llu\n",
2435 MAX_SORTED_PIDS
, coal
->id
);
2439 sort_array
[ntasks
].pid
= task_pid(task
);
2441 switch (sort_order
) {
2442 case COALITION_SORT_DEFAULT
:
2443 sort_array
[ntasks
].usr_order
= 0;
2445 case COALITION_SORT_MEM_ASC
:
2446 case COALITION_SORT_MEM_DEC
:
2447 sort_array
[ntasks
].bytes
= get_task_phys_footprint(task
);
2449 case COALITION_SORT_USER_ASC
:
2450 case COALITION_SORT_USER_DEC
:
2451 sort_array
[ntasks
].usr_order
= 0;
2464 coalition_get_pid_list(coalition_t coal
, uint32_t rolemask
, int sort_order
,
2465 int *pid_list
, int list_sz
)
2467 struct i_jetsam_coalition
*cj
;
2469 cmpfunc_t cmp_func
= NULL
;
2470 struct coal_sort_s sort_array
[MAX_SORTED_PIDS
] = { {0, 0, 0} }; /* keep to < 2k */
2473 !(rolemask
& COALITION_ROLEMASK_ALLROLES
) ||
2474 !pid_list
|| list_sz
< 1) {
2475 coal_dbg("Invalid parameters: coal:%p, type:%d, rolemask:0x%x, "
2476 "pid_list:%p, list_sz:%d", coal
, coal
? coal
->type
: -1,
2477 rolemask
, pid_list
, list_sz
);
2481 switch (sort_order
) {
2482 case COALITION_SORT_NOSORT
:
2485 case COALITION_SORT_DEFAULT
:
2486 cmp_func
= dflt_cmp
;
2488 case COALITION_SORT_MEM_ASC
:
2489 cmp_func
= mem_asc_cmp
;
2491 case COALITION_SORT_MEM_DEC
:
2492 cmp_func
= mem_dec_cmp
;
2494 case COALITION_SORT_USER_ASC
:
2495 cmp_func
= usr_asc_cmp
;
2497 case COALITION_SORT_USER_DEC
:
2498 cmp_func
= usr_dec_cmp
;
2504 coalition_lock(coal
);
2506 if (coal
->type
== COALITION_TYPE_RESOURCE
) {
2507 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &coal
->r
.tasks
,
2508 sort_array
, MAX_SORTED_PIDS
);
2514 if (rolemask
& COALITION_ROLEMASK_UNDEF
) {
2515 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->other
,
2516 sort_array
+ ntasks
,
2517 MAX_SORTED_PIDS
- ntasks
);
2520 if (rolemask
& COALITION_ROLEMASK_XPC
) {
2521 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->services
,
2522 sort_array
+ ntasks
,
2523 MAX_SORTED_PIDS
- ntasks
);
2526 if (rolemask
& COALITION_ROLEMASK_EXT
) {
2527 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->extensions
,
2528 sort_array
+ ntasks
,
2529 MAX_SORTED_PIDS
- ntasks
);
2532 if (rolemask
& COALITION_ROLEMASK_LEADER
) {
2533 ntasks
+= coalition_get_sort_list(coal
, sort_order
, NULL
,
2534 sort_array
+ ntasks
,
2535 MAX_SORTED_PIDS
- ntasks
);
2539 coalition_unlock(coal
);
2541 /* sort based on the chosen criterion (no sense sorting 1 item) */
2542 if (cmp_func
&& ntasks
> 1) {
2543 qsort(sort_array
, ntasks
, sizeof(struct coal_sort_s
), cmp_func
);
2546 for (int i
= 0; i
< ntasks
; i
++) {
2550 coal_dbg(" [%d] PID:%d, footprint:%lld, usr_order:%d",
2551 i
, sort_array
[i
].pid
, sort_array
[i
].bytes
,
2552 sort_array
[i
].usr_order
);
2553 pid_list
[i
] = sort_array
[i
].pid
;