2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kern_types.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
33 #include <kern/coalition.h>
34 #include <kern/exc_resource.h>
35 #include <kern/host.h>
36 #include <kern/ledger.h>
37 #include <kern/mach_param.h> /* for TASK_CHUNK */
39 #include <kern/monotonic.h>
40 #endif /* MONOTONIC */
41 #include <kern/policy_internal.h>
42 #include <kern/task.h>
43 #include <kern/thread_group.h>
44 #include <kern/zalloc.h>
46 #include <libkern/OSAtomic.h>
48 #include <mach/coalition_notification_server.h>
49 #include <mach/host_priv.h>
50 #include <mach/host_special_ports.h>
54 #include <sys/errno.h>
57 * BSD interface functions
59 int coalitions_get_list(int type
, struct procinfo_coalinfo
*coal_list
, int list_sz
);
60 coalition_t
task_get_coalition(task_t task
, int type
);
61 boolean_t
coalition_is_leader(task_t task
, coalition_t coal
);
62 task_t
coalition_get_leader(coalition_t coal
);
63 int coalition_get_task_count(coalition_t coal
);
64 uint64_t coalition_get_page_count(coalition_t coal
, int *ntasks
);
65 int coalition_get_pid_list(coalition_t coal
, uint32_t rolemask
, int sort_order
,
66 int *pid_list
, int list_sz
);
68 /* defined in task.c */
69 extern ledger_template_t task_ledger_template
;
72 * Templates; task template is copied due to potential allocation limits on
75 ledger_template_t coalition_task_ledger_template
= NULL
;
76 ledger_template_t coalition_ledger_template
= NULL
;
78 extern int proc_selfpid(void);
80 * Coalition zone needs limits. We expect there will be as many coalitions as
81 * tasks (same order of magnitude), so use the task zone's limits.
83 #define CONFIG_COALITION_MAX CONFIG_TASK_MAX
84 #define COALITION_CHUNK TASK_CHUNK
86 int unrestrict_coalition_syscalls
;
87 int merge_adaptive_coalitions
;
89 LCK_GRP_DECLARE(coalitions_lck_grp
, "coalition");
91 /* coalitions_list_lock protects coalition_count, coalitions queue, next_coalition_id. */
92 static LCK_RW_DECLARE(coalitions_list_lock
, &coalitions_lck_grp
);
93 static uint64_t coalition_count
;
94 static uint64_t coalition_next_id
= 1;
95 static queue_head_t coalitions_q
;
97 coalition_t init_coalition
[COALITION_NUM_TYPES
];
98 coalition_t corpse_coalition
[COALITION_NUM_TYPES
];
101 coal_type_str(int type
)
104 case COALITION_TYPE_RESOURCE
:
106 case COALITION_TYPE_JETSAM
:
113 struct coalition_type
{
118 * pre-condition: coalition just allocated (unlocked), unreferenced,
121 kern_return_t (*init
)(coalition_t coal
, boolean_t privileged
);
125 * pre-condition: coalition unlocked
126 * pre-condition: coalition refcount=0, active_count=0,
127 * termrequested=1, terminated=1, reaped=1
129 void (*dealloc
)(coalition_t coal
);
133 * pre-condition: coalition locked
134 * pre-condition: coalition !repead and !terminated
136 kern_return_t (*adopt_task
)(coalition_t coal
, task_t task
);
140 * pre-condition: coalition locked
141 * pre-condition: task has been removed from coalition's task list
143 kern_return_t (*remove_task
)(coalition_t coal
, task_t task
);
147 * pre-condition: coalition locked
148 * pre-condition: task added to coalition's task list,
149 * active_count >= 1 (at least the given task is active)
151 kern_return_t (*set_taskrole
)(coalition_t coal
, task_t task
, int role
);
155 * pre-condition: coalition locked
156 * pre-condition: task added to coalition's task list,
157 * active_count >= 1 (at least the given task is active)
159 int (*get_taskrole
)(coalition_t coal
, task_t task
);
163 * pre-condition: coalition locked
165 void (*iterate_tasks
)(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
));
169 * COALITION_TYPE_RESOURCE
172 static kern_return_t
i_coal_resource_init(coalition_t coal
, boolean_t privileged
);
173 static void i_coal_resource_dealloc(coalition_t coal
);
174 static kern_return_t
i_coal_resource_adopt_task(coalition_t coal
, task_t task
);
175 static kern_return_t
i_coal_resource_remove_task(coalition_t coal
, task_t task
);
176 static kern_return_t
i_coal_resource_set_taskrole(coalition_t coal
,
177 task_t task
, int role
);
178 static int i_coal_resource_get_taskrole(coalition_t coal
, task_t task
);
179 static void i_coal_resource_iterate_tasks(coalition_t coal
, void *ctx
,
180 void (*callback
)(coalition_t
, void *, task_t
));
183 * Ensure COALITION_NUM_THREAD_QOS_TYPES defined in mach/coalition.h still
184 * matches THREAD_QOS_LAST defined in mach/thread_policy.h
186 static_assert(COALITION_NUM_THREAD_QOS_TYPES
== THREAD_QOS_LAST
);
188 struct i_resource_coalition
{
190 * This keeps track of resource utilization of tasks that are no longer active
191 * in the coalition and is updated when a task is removed from the coalition.
195 uint64_t byteswritten
;
198 uint64_t logical_immediate_writes
;
199 uint64_t logical_deferred_writes
;
200 uint64_t logical_invalidated_writes
;
201 uint64_t logical_metadata_writes
;
202 uint64_t logical_immediate_writes_to_external
;
203 uint64_t logical_deferred_writes_to_external
;
204 uint64_t logical_invalidated_writes_to_external
;
205 uint64_t logical_metadata_writes_to_external
;
207 uint64_t cpu_time_eqos
[COALITION_NUM_THREAD_QOS_TYPES
]; /* cpu time per effective QoS class */
208 uint64_t cpu_time_rqos
[COALITION_NUM_THREAD_QOS_TYPES
]; /* cpu time per requested QoS class */
209 uint64_t cpu_instructions
;
212 uint64_t task_count
; /* tasks that have started in this coalition */
213 uint64_t dead_task_count
; /* tasks that have exited in this coalition;
214 * subtract from task_count to get count
215 * of "active" tasks */
217 * Count the length of time this coalition had at least one active task.
218 * This can be a 'denominator' to turn e.g. cpu_time to %cpu.
220 uint64_t last_became_nonempty_time
;
221 uint64_t time_nonempty
;
223 queue_head_t tasks
; /* List of active tasks in the coalition */
225 * This ledger is used for triggering resource exception. For the tracked resources, this is updated
226 * when the member tasks' resource usage changes.
228 ledger_t resource_monitor_ledger
;
229 #if CONFIG_PHYS_WRITE_ACCT
230 uint64_t fs_metadata_writes
;
231 #endif /* CONFIG_PHYS_WRITE_ACCT */
235 * COALITION_TYPE_JETSAM
238 static kern_return_t
i_coal_jetsam_init(coalition_t coal
, boolean_t privileged
);
239 static void i_coal_jetsam_dealloc(coalition_t coal
);
240 static kern_return_t
i_coal_jetsam_adopt_task(coalition_t coal
, task_t task
);
241 static kern_return_t
i_coal_jetsam_remove_task(coalition_t coal
, task_t task
);
242 static kern_return_t
i_coal_jetsam_set_taskrole(coalition_t coal
,
243 task_t task
, int role
);
244 int i_coal_jetsam_get_taskrole(coalition_t coal
, task_t task
);
245 static void i_coal_jetsam_iterate_tasks(coalition_t coal
, void *ctx
,
246 void (*callback
)(coalition_t
, void *, task_t
));
248 struct i_jetsam_coalition
{
250 queue_head_t extensions
;
251 queue_head_t services
;
253 struct thread_group
*thread_group
;
258 * main coalition structure
261 uint64_t id
; /* monotonically increasing */
263 uint32_t role
; /* default task role (background, adaptive, interactive, etc) */
264 uint32_t ref_count
; /* Number of references to the memory containing this struct */
265 uint32_t active_count
; /* Number of members of (tasks in) the
266 * coalition, plus vouchers referring
267 * to the coalition */
268 uint32_t focal_task_count
; /* Number of TASK_FOREGROUND_APPLICATION tasks in the coalition */
269 uint32_t nonfocal_task_count
; /* Number of TASK_BACKGROUND_APPLICATION tasks in the coalition */
271 /* coalition flags */
272 uint32_t privileged
: 1; /* Members of this coalition may create
273 * and manage coalitions and may posix_spawn
274 * processes into selected coalitions */
277 uint32_t termrequested
: 1; /* launchd has requested termination when coalition becomes empty */
278 uint32_t terminated
: 1; /* coalition became empty and spawns are now forbidden */
279 uint32_t reaped
: 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
280 uint32_t notified
: 1; /* no-more-processes notification was sent via special port */
281 uint32_t efficient
: 1; /* launchd has marked the coalition as efficient */
282 #if DEVELOPMENT || DEBUG
283 uint32_t should_notify
: 1; /* should this coalition send notifications (default: yes) */
286 queue_chain_t coalitions
; /* global list of coalitions */
288 decl_lck_mtx_data(, lock
); /* Coalition lock. */
290 /* put coalition type-specific structures here */
292 struct i_resource_coalition r
;
293 struct i_jetsam_coalition j
;
298 * register different coalition types:
299 * these must be kept in the order specified in coalition.h
301 static const struct coalition_type
302 s_coalition_types
[COALITION_NUM_TYPES
] = {
304 COALITION_TYPE_RESOURCE
,
306 i_coal_resource_init
,
307 i_coal_resource_dealloc
,
308 i_coal_resource_adopt_task
,
309 i_coal_resource_remove_task
,
310 i_coal_resource_set_taskrole
,
311 i_coal_resource_get_taskrole
,
312 i_coal_resource_iterate_tasks
,
315 COALITION_TYPE_JETSAM
,
318 i_coal_jetsam_dealloc
,
319 i_coal_jetsam_adopt_task
,
320 i_coal_jetsam_remove_task
,
321 i_coal_jetsam_set_taskrole
,
322 i_coal_jetsam_get_taskrole
,
323 i_coal_jetsam_iterate_tasks
,
327 ZONE_DECLARE(coalition_zone
, "coalitions",
328 sizeof(struct coalition
), ZC_NOENCRYPT
| ZC_ZFREE_CLEARMEM
);
330 #define coal_call(coal, func, ...) \
331 (s_coalition_types[(coal)->type].func)(coal, ## __VA_ARGS__)
334 #define coalition_lock(c) do{ lck_mtx_lock(&c->lock); }while(0)
335 #define coalition_unlock(c) do{ lck_mtx_unlock(&c->lock); }while(0)
338 * Define the coalition type to track focal tasks.
339 * On embedded, track them using jetsam coalitions since they have associated thread
340 * groups which reflect this property as a flag (and pass it down to CLPC).
341 * On non-embedded platforms, since not all coalitions have jetsam coalitions
342 * track focal counts on the resource coalition.
344 #if !XNU_TARGET_OS_OSX
345 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_JETSAM
346 #else /* !XNU_TARGET_OS_OSX */
347 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_RESOURCE
348 #endif /* !XNU_TARGET_OS_OSX */
353 * Coalition ledger implementation
357 struct coalition_ledger_indices coalition_ledgers
=
358 {.logical_writes
= -1, };
359 void __attribute__((noinline
)) SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor
);
362 coalition_ledger_get_from_task(task_t task
)
364 ledger_t ledger
= LEDGER_NULL
;
365 coalition_t coal
= task
->coalition
[COALITION_TYPE_RESOURCE
];
367 if (coal
!= NULL
&& (!queue_empty(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]))) {
368 ledger
= coal
->r
.resource_monitor_ledger
;
369 ledger_reference(ledger
);
376 COALITION_IO_LEDGER_ENABLE
,
377 COALITION_IO_LEDGER_DISABLE
381 coalition_io_monitor_ctl(struct coalition
*coalition
, uint32_t flags
, int64_t limit
)
383 ledger_t ledger
= coalition
->r
.resource_monitor_ledger
;
385 if (flags
== COALITION_IO_LEDGER_ENABLE
) {
386 /* Configure the logical I/O ledger */
387 ledger_set_limit(ledger
, coalition_ledgers
.logical_writes
, (limit
* 1024 * 1024), 0);
388 ledger_set_period(ledger
, coalition_ledgers
.logical_writes
, (COALITION_LEDGER_MONITOR_INTERVAL_SECS
* NSEC_PER_SEC
));
389 } else if (flags
== COALITION_IO_LEDGER_DISABLE
) {
390 ledger_disable_refill(ledger
, coalition_ledgers
.logical_writes
);
391 ledger_disable_callback(ledger
, coalition_ledgers
.logical_writes
);
396 coalition_ledger_set_logical_writes_limit(struct coalition
*coalition
, int64_t limit
)
400 /* limit = -1 will be used to disable the limit and the callback */
401 if (limit
> COALITION_MAX_LOGICAL_WRITES_LIMIT
|| limit
== 0 || limit
< -1) {
406 coalition_lock(coalition
);
408 coalition_io_monitor_ctl(coalition
, COALITION_IO_LEDGER_DISABLE
, limit
);
410 coalition_io_monitor_ctl(coalition
, COALITION_IO_LEDGER_ENABLE
, limit
);
412 coalition_unlock(coalition
);
417 void __attribute__((noinline
))
418 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO(int flavor
)
420 int pid
= proc_selfpid();
421 ledger_amount_t new_limit
;
422 task_t task
= current_task();
423 struct ledger_entry_info lei
;
426 struct coalition
*coalition
= task
->coalition
[COALITION_TYPE_RESOURCE
];
428 assert(coalition
!= NULL
);
429 ledger
= coalition
->r
.resource_monitor_ledger
;
432 case FLAVOR_IO_LOGICAL_WRITES
:
433 ledger_get_entry_info(ledger
, coalition_ledgers
.logical_writes
, &lei
);
434 trace_resource_violation(RMON_LOGWRITES_VIOLATED
, &lei
);
440 os_log(OS_LOG_DEFAULT
, "Coalition [%lld] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]. Triggered by process [%d]\n",
441 coalition
->id
, flavor
, (lei
.lei_balance
/ (1024 * 1024)), (lei
.lei_limit
/ (1024 * 1024)),
442 (lei
.lei_refill_period
/ NSEC_PER_SEC
), pid
);
444 kr
= send_resource_violation(send_disk_writes_violation
, task
, &lei
, kRNFlagsNone
);
446 os_log(OS_LOG_DEFAULT
, "ERROR %#x returned from send_resource_violation(disk_writes, ...)\n", kr
);
450 * Continue to monitor the coalition after it hits the initital limit, but increase
451 * the limit exponentially so that we don't spam the listener.
453 new_limit
= (lei
.lei_limit
/ 1024 / 1024) * 4;
454 coalition_lock(coalition
);
455 if (new_limit
> COALITION_MAX_LOGICAL_WRITES_LIMIT
) {
456 coalition_io_monitor_ctl(coalition
, COALITION_IO_LEDGER_DISABLE
, -1);
458 coalition_io_monitor_ctl(coalition
, COALITION_IO_LEDGER_ENABLE
, new_limit
);
460 coalition_unlock(coalition
);
467 coalition_io_rate_exceeded(int warning
, const void *param0
, __unused
const void *param1
)
470 SENDING_NOTIFICATION__THIS_COALITION_IS_CAUSING_TOO_MUCH_IO((int)param0
);
475 init_coalition_ledgers(void)
478 assert(coalition_ledger_template
== NULL
);
480 if ((t
= ledger_template_create("Per-coalition ledgers")) == NULL
) {
481 panic("couldn't create coalition ledger template");
484 coalition_ledgers
.logical_writes
= ledger_entry_add(t
, "logical_writes", "res", "bytes");
486 if (coalition_ledgers
.logical_writes
< 0) {
487 panic("couldn't create entries for coaliton ledger template");
490 ledger_set_callback(t
, coalition_ledgers
.logical_writes
, coalition_io_rate_exceeded
, (void *)FLAVOR_IO_LOGICAL_WRITES
, NULL
);
491 ledger_template_complete(t
);
493 coalition_task_ledger_template
= ledger_template_copy(task_ledger_template
, "Coalition task ledgers");
495 if (coalition_task_ledger_template
== NULL
) {
496 panic("couldn't create coalition task ledger template");
499 ledger_template_complete(coalition_task_ledger_template
);
501 coalition_ledger_template
= t
;
505 coalition_io_ledger_update(task_t task
, int32_t flavor
, boolean_t is_credit
, uint32_t io_size
)
508 coalition_t coal
= task
->coalition
[COALITION_TYPE_RESOURCE
];
510 assert(coal
!= NULL
);
511 ledger
= coal
->r
.resource_monitor_ledger
;
512 if (LEDGER_VALID(ledger
)) {
513 if (flavor
== FLAVOR_IO_LOGICAL_WRITES
) {
515 ledger_credit(ledger
, coalition_ledgers
.logical_writes
, io_size
);
517 ledger_debit(ledger
, coalition_ledgers
.logical_writes
, io_size
);
524 coalition_notify_user(uint64_t id
, uint32_t flags
)
526 mach_port_t user_port
;
529 kr
= host_get_coalition_port(host_priv_self(), &user_port
);
530 if ((kr
!= KERN_SUCCESS
) || !IPC_PORT_VALID(user_port
)) {
534 coalition_notification(user_port
, id
, flags
);
535 ipc_port_release_send(user_port
);
540 * COALITION_TYPE_RESOURCE
544 i_coal_resource_init(coalition_t coal
, boolean_t privileged
)
547 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
548 coal
->r
.ledger
= ledger_instantiate(coalition_task_ledger_template
,
549 LEDGER_CREATE_ACTIVE_ENTRIES
);
550 if (coal
->r
.ledger
== NULL
) {
551 return KERN_RESOURCE_SHORTAGE
;
554 coal
->r
.resource_monitor_ledger
= ledger_instantiate(coalition_ledger_template
,
555 LEDGER_CREATE_ACTIVE_ENTRIES
);
556 if (coal
->r
.resource_monitor_ledger
== NULL
) {
557 return KERN_RESOURCE_SHORTAGE
;
560 queue_init(&coal
->r
.tasks
);
566 i_coal_resource_dealloc(coalition_t coal
)
568 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
570 ledger_dereference(coal
->r
.ledger
);
571 ledger_dereference(coal
->r
.resource_monitor_ledger
);
575 i_coal_resource_adopt_task(coalition_t coal
, task_t task
)
577 struct i_resource_coalition
*cr
;
579 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
580 assert(queue_empty(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]));
585 if (cr
->task_count
< cr
->dead_task_count
) {
586 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
587 __func__
, coal
, coal
->id
, coal_type_str(coal
->type
),
588 cr
->task_count
, cr
->dead_task_count
);
591 /* If moving from 0->1 active tasks */
592 if (cr
->task_count
- cr
->dead_task_count
== 1) {
593 cr
->last_became_nonempty_time
= mach_absolute_time();
596 /* put the task on the coalition's list of tasks */
597 enqueue_tail(&cr
->tasks
, &task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
599 coal_dbg("Added PID:%d to id:%llu, task_count:%llu, dead_count:%llu, nonempty_time:%llu",
600 task_pid(task
), coal
->id
, cr
->task_count
, cr
->dead_task_count
,
601 cr
->last_became_nonempty_time
);
607 i_coal_resource_remove_task(coalition_t coal
, task_t task
)
609 struct i_resource_coalition
*cr
;
611 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
612 assert(task
->coalition
[COALITION_TYPE_RESOURCE
] == coal
);
613 assert(!queue_empty(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]));
616 * handle resource coalition accounting rollup for dead tasks
620 cr
->dead_task_count
++;
622 if (cr
->task_count
< cr
->dead_task_count
) {
623 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
624 __func__
, coal
, coal
->id
, coal_type_str(coal
->type
), cr
->task_count
, cr
->dead_task_count
);
627 /* If moving from 1->0 active tasks */
628 if (cr
->task_count
- cr
->dead_task_count
== 0) {
629 uint64_t last_time_nonempty
= mach_absolute_time() - cr
->last_became_nonempty_time
;
630 cr
->last_became_nonempty_time
= 0;
631 cr
->time_nonempty
+= last_time_nonempty
;
634 /* Do not roll up for exec'd task or exec copy task */
635 if (!task_is_exec_copy(task
) && !task_did_exec(task
)) {
636 ledger_rollup(cr
->ledger
, task
->ledger
);
637 cr
->bytesread
+= task
->task_io_stats
->disk_reads
.size
;
638 cr
->byteswritten
+= task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
;
639 #if defined(__x86_64__)
640 cr
->gpu_time
+= task_gpu_utilisation(task
);
641 #endif /* defined(__x86_64__) */
643 #if defined(__arm__) || defined(__arm64__)
644 cr
->energy
+= task_energy(task
);
645 #endif /* defined(__arm__) || defined(__arm64__) */
647 cr
->logical_immediate_writes
+= task
->task_writes_counters_internal
.task_immediate_writes
;
648 cr
->logical_deferred_writes
+= task
->task_writes_counters_internal
.task_deferred_writes
;
649 cr
->logical_invalidated_writes
+= task
->task_writes_counters_internal
.task_invalidated_writes
;
650 cr
->logical_metadata_writes
+= task
->task_writes_counters_internal
.task_metadata_writes
;
651 cr
->logical_immediate_writes_to_external
+= task
->task_writes_counters_external
.task_immediate_writes
;
652 cr
->logical_deferred_writes_to_external
+= task
->task_writes_counters_external
.task_deferred_writes
;
653 cr
->logical_invalidated_writes_to_external
+= task
->task_writes_counters_external
.task_invalidated_writes
;
654 cr
->logical_metadata_writes_to_external
+= task
->task_writes_counters_external
.task_metadata_writes
;
655 #if CONFIG_PHYS_WRITE_ACCT
656 cr
->fs_metadata_writes
+= task
->task_fs_metadata_writes
;
657 #endif /* CONFIG_PHYS_WRITE_ACCT */
658 cr
->cpu_ptime
+= task_cpu_ptime(task
);
659 task_update_cpu_time_qos_stats(task
, cr
->cpu_time_eqos
, cr
->cpu_time_rqos
);
661 uint64_t counts
[MT_CORE_NFIXED
] = {};
662 (void)mt_fixed_task_counts(task
, counts
);
663 cr
->cpu_cycles
+= counts
[MT_CORE_CYCLES
];
664 #if defined(MT_CORE_INSTRS)
665 cr
->cpu_instructions
+= counts
[MT_CORE_INSTRS
];
666 #endif /* defined(MT_CORE_INSTRS) */
667 #endif /* MONOTONIC */
670 /* remove the task from the coalition's list */
671 remqueue(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
672 queue_chain_init(task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
674 coal_dbg("removed PID:%d from id:%llu, task_count:%llu, dead_count:%llu",
675 task_pid(task
), coal
->id
, cr
->task_count
, cr
->dead_task_count
);
681 i_coal_resource_set_taskrole(__unused coalition_t coal
,
682 __unused task_t task
, __unused
int role
)
688 i_coal_resource_get_taskrole(__unused coalition_t coal
, __unused task_t task
)
692 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
694 qe_foreach_element(t
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
]) {
696 return COALITION_TASKROLE_UNDEF
;
704 i_coal_resource_iterate_tasks(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
))
707 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
709 qe_foreach_element(t
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
])
710 callback(coal
, ctx
, t
);
713 #if CONFIG_PHYS_WRITE_ACCT
714 extern uint64_t kernel_pm_writes
;
715 #endif /* CONFIG_PHYS_WRITE_ACCT */
718 coalition_resource_usage_internal(coalition_t coal
, struct coalition_resource_usage
*cru_out
)
721 ledger_amount_t credit
, debit
;
724 if (coal
->type
!= COALITION_TYPE_RESOURCE
) {
725 return KERN_INVALID_ARGUMENT
;
728 /* Return KERN_INVALID_ARGUMENT for Corpse coalition */
729 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
730 if (coal
== corpse_coalition
[i
]) {
731 return KERN_INVALID_ARGUMENT
;
735 ledger_t sum_ledger
= ledger_instantiate(coalition_task_ledger_template
, LEDGER_CREATE_ACTIVE_ENTRIES
);
736 if (sum_ledger
== LEDGER_NULL
) {
737 return KERN_RESOURCE_SHORTAGE
;
740 coalition_lock(coal
);
743 * Start with the coalition's ledger, which holds the totals from all
746 ledger_rollup(sum_ledger
, coal
->r
.ledger
);
747 uint64_t bytesread
= coal
->r
.bytesread
;
748 uint64_t byteswritten
= coal
->r
.byteswritten
;
749 uint64_t gpu_time
= coal
->r
.gpu_time
;
750 uint64_t energy
= coal
->r
.energy
;
751 uint64_t logical_immediate_writes
= coal
->r
.logical_immediate_writes
;
752 uint64_t logical_deferred_writes
= coal
->r
.logical_deferred_writes
;
753 uint64_t logical_invalidated_writes
= coal
->r
.logical_invalidated_writes
;
754 uint64_t logical_metadata_writes
= coal
->r
.logical_metadata_writes
;
755 uint64_t logical_immediate_writes_to_external
= coal
->r
.logical_immediate_writes_to_external
;
756 uint64_t logical_deferred_writes_to_external
= coal
->r
.logical_deferred_writes_to_external
;
757 uint64_t logical_invalidated_writes_to_external
= coal
->r
.logical_invalidated_writes_to_external
;
758 uint64_t logical_metadata_writes_to_external
= coal
->r
.logical_metadata_writes_to_external
;
759 #if CONFIG_PHYS_WRITE_ACCT
760 uint64_t fs_metadata_writes
= coal
->r
.fs_metadata_writes
;
761 #endif /* CONFIG_PHYS_WRITE_ACCT */
762 int64_t cpu_time_billed_to_me
= 0;
763 int64_t cpu_time_billed_to_others
= 0;
764 int64_t energy_billed_to_me
= 0;
765 int64_t energy_billed_to_others
= 0;
766 uint64_t cpu_ptime
= coal
->r
.cpu_ptime
;
767 uint64_t cpu_time_eqos
[COALITION_NUM_THREAD_QOS_TYPES
];
768 memcpy(cpu_time_eqos
, coal
->r
.cpu_time_eqos
, sizeof(cpu_time_eqos
));
769 uint64_t cpu_time_rqos
[COALITION_NUM_THREAD_QOS_TYPES
];
770 memcpy(cpu_time_rqos
, coal
->r
.cpu_time_rqos
, sizeof(cpu_time_rqos
));
771 uint64_t cpu_instructions
= coal
->r
.cpu_instructions
;
772 uint64_t cpu_cycles
= coal
->r
.cpu_cycles
;
775 * Add to that all the active tasks' ledgers. Tasks cannot deallocate
776 * out from under us, since we hold the coalition lock.
779 qe_foreach_element(task
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
]) {
781 * Rolling up stats for exec copy task or exec'd task will lead to double accounting.
782 * Cannot take task lock after taking coaliton lock
784 if (task_is_exec_copy(task
) || task_did_exec(task
)) {
788 ledger_rollup(sum_ledger
, task
->ledger
);
789 bytesread
+= task
->task_io_stats
->disk_reads
.size
;
790 byteswritten
+= task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
;
791 #if defined(__x86_64__)
792 gpu_time
+= task_gpu_utilisation(task
);
793 #endif /* defined(__x86_64__) */
795 #if defined(__arm__) || defined(__arm64__)
796 energy
+= task_energy(task
);
797 #endif /* defined(__arm__) || defined(__arm64__) */
799 logical_immediate_writes
+= task
->task_writes_counters_internal
.task_immediate_writes
;
800 logical_deferred_writes
+= task
->task_writes_counters_internal
.task_deferred_writes
;
801 logical_invalidated_writes
+= task
->task_writes_counters_internal
.task_invalidated_writes
;
802 logical_metadata_writes
+= task
->task_writes_counters_internal
.task_metadata_writes
;
803 logical_immediate_writes_to_external
+= task
->task_writes_counters_external
.task_immediate_writes
;
804 logical_deferred_writes_to_external
+= task
->task_writes_counters_external
.task_deferred_writes
;
805 logical_invalidated_writes_to_external
+= task
->task_writes_counters_external
.task_invalidated_writes
;
806 logical_metadata_writes_to_external
+= task
->task_writes_counters_external
.task_metadata_writes
;
807 #if CONFIG_PHYS_WRITE_ACCT
808 fs_metadata_writes
+= task
->task_fs_metadata_writes
;
809 #endif /* CONFIG_PHYS_WRITE_ACCT */
811 cpu_ptime
+= task_cpu_ptime(task
);
812 task_update_cpu_time_qos_stats(task
, cpu_time_eqos
, cpu_time_rqos
);
814 uint64_t counts
[MT_CORE_NFIXED
] = {};
815 (void)mt_fixed_task_counts(task
, counts
);
816 cpu_cycles
+= counts
[MT_CORE_CYCLES
];
817 #if defined(MT_CORE_INSTRS)
818 cpu_instructions
+= counts
[MT_CORE_INSTRS
];
819 #endif /* defined(MT_CORE_INSTRS) */
820 #endif /* MONOTONIC */
823 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.cpu_time_billed_to_me
, (int64_t *)&cpu_time_billed_to_me
);
824 if (kr
!= KERN_SUCCESS
|| cpu_time_billed_to_me
< 0) {
825 cpu_time_billed_to_me
= 0;
828 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.cpu_time_billed_to_others
, (int64_t *)&cpu_time_billed_to_others
);
829 if (kr
!= KERN_SUCCESS
|| cpu_time_billed_to_others
< 0) {
830 cpu_time_billed_to_others
= 0;
833 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.energy_billed_to_me
, (int64_t *)&energy_billed_to_me
);
834 if (kr
!= KERN_SUCCESS
|| energy_billed_to_me
< 0) {
835 energy_billed_to_me
= 0;
838 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.energy_billed_to_others
, (int64_t *)&energy_billed_to_others
);
839 if (kr
!= KERN_SUCCESS
|| energy_billed_to_others
< 0) {
840 energy_billed_to_others
= 0;
843 /* collect information from the coalition itself */
844 cru_out
->tasks_started
= coal
->r
.task_count
;
845 cru_out
->tasks_exited
= coal
->r
.dead_task_count
;
847 uint64_t time_nonempty
= coal
->r
.time_nonempty
;
848 uint64_t last_became_nonempty_time
= coal
->r
.last_became_nonempty_time
;
850 coalition_unlock(coal
);
852 /* Copy the totals out of sum_ledger */
853 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.cpu_time
,
855 if (kr
!= KERN_SUCCESS
) {
858 cru_out
->cpu_time
= credit
;
859 cru_out
->cpu_time_billed_to_me
= (uint64_t)cpu_time_billed_to_me
;
860 cru_out
->cpu_time_billed_to_others
= (uint64_t)cpu_time_billed_to_others
;
861 cru_out
->energy_billed_to_me
= (uint64_t)energy_billed_to_me
;
862 cru_out
->energy_billed_to_others
= (uint64_t)energy_billed_to_others
;
864 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.interrupt_wakeups
,
866 if (kr
!= KERN_SUCCESS
) {
869 cru_out
->interrupt_wakeups
= credit
;
871 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.platform_idle_wakeups
,
873 if (kr
!= KERN_SUCCESS
) {
876 cru_out
->platform_idle_wakeups
= credit
;
878 cru_out
->bytesread
= bytesread
;
879 cru_out
->byteswritten
= byteswritten
;
880 cru_out
->gpu_time
= gpu_time
;
881 cru_out
->energy
= energy
;
882 cru_out
->logical_immediate_writes
= logical_immediate_writes
;
883 cru_out
->logical_deferred_writes
= logical_deferred_writes
;
884 cru_out
->logical_invalidated_writes
= logical_invalidated_writes
;
885 cru_out
->logical_metadata_writes
= logical_metadata_writes
;
886 cru_out
->logical_immediate_writes_to_external
= logical_immediate_writes_to_external
;
887 cru_out
->logical_deferred_writes_to_external
= logical_deferred_writes_to_external
;
888 cru_out
->logical_invalidated_writes_to_external
= logical_invalidated_writes_to_external
;
889 cru_out
->logical_metadata_writes_to_external
= logical_metadata_writes_to_external
;
890 #if CONFIG_PHYS_WRITE_ACCT
891 cru_out
->fs_metadata_writes
= fs_metadata_writes
;
893 cru_out
->fs_metadata_writes
= 0;
894 #endif /* CONFIG_PHYS_WRITE_ACCT */
895 cru_out
->cpu_ptime
= cpu_ptime
;
896 cru_out
->cpu_time_eqos_len
= COALITION_NUM_THREAD_QOS_TYPES
;
897 memcpy(cru_out
->cpu_time_eqos
, cpu_time_eqos
, sizeof(cru_out
->cpu_time_eqos
));
898 cru_out
->cpu_cycles
= cpu_cycles
;
899 cru_out
->cpu_instructions
= cpu_instructions
;
900 ledger_dereference(sum_ledger
);
901 sum_ledger
= LEDGER_NULL
;
903 #if CONFIG_PHYS_WRITE_ACCT
904 // kernel_pm_writes are only recorded under kernel_task coalition
905 if (coalition_id(coal
) == COALITION_ID_KERNEL
) {
906 cru_out
->pm_writes
= kernel_pm_writes
;
908 cru_out
->pm_writes
= 0;
911 cru_out
->pm_writes
= 0;
912 #endif /* CONFIG_PHYS_WRITE_ACCT */
914 if (last_became_nonempty_time
) {
915 time_nonempty
+= mach_absolute_time() - last_became_nonempty_time
;
917 absolutetime_to_nanoseconds(time_nonempty
, &cru_out
->time_nonempty
);
924 * COALITION_TYPE_JETSAM
928 i_coal_jetsam_init(coalition_t coal
, boolean_t privileged
)
930 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
933 coal
->j
.leader
= TASK_NULL
;
934 queue_head_init(coal
->j
.extensions
);
935 queue_head_init(coal
->j
.services
);
936 queue_head_init(coal
->j
.other
);
938 #if CONFIG_THREAD_GROUPS
939 switch (coal
->role
) {
940 case COALITION_ROLE_SYSTEM
:
941 coal
->j
.thread_group
= thread_group_find_by_id_and_retain(THREAD_GROUP_SYSTEM
);
943 case COALITION_ROLE_BACKGROUND
:
944 coal
->j
.thread_group
= thread_group_find_by_id_and_retain(THREAD_GROUP_BACKGROUND
);
946 case COALITION_ROLE_ADAPTIVE
:
947 if (merge_adaptive_coalitions
) {
948 coal
->j
.thread_group
= thread_group_find_by_id_and_retain(THREAD_GROUP_ADAPTIVE
);
950 coal
->j
.thread_group
= thread_group_create_and_retain();
954 coal
->j
.thread_group
= thread_group_create_and_retain();
956 assert(coal
->j
.thread_group
!= NULL
);
962 i_coal_jetsam_dealloc(__unused coalition_t coal
)
964 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
966 /* the coalition should be completely clear at this point */
967 assert(queue_empty(&coal
->j
.extensions
));
968 assert(queue_empty(&coal
->j
.services
));
969 assert(queue_empty(&coal
->j
.other
));
970 assert(coal
->j
.leader
== TASK_NULL
);
972 #if CONFIG_THREAD_GROUPS
973 /* disassociate from the thread group */
974 assert(coal
->j
.thread_group
!= NULL
);
975 thread_group_release(coal
->j
.thread_group
);
976 coal
->j
.thread_group
= NULL
;
981 i_coal_jetsam_adopt_task(coalition_t coal
, task_t task
)
983 struct i_jetsam_coalition
*cj
;
984 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
988 assert(queue_empty(&task
->task_coalition
[COALITION_TYPE_JETSAM
]));
990 /* put each task initially in the "other" list */
991 enqueue_tail(&cj
->other
, &task
->task_coalition
[COALITION_TYPE_JETSAM
]);
992 coal_dbg("coalition %lld adopted PID:%d as UNDEF",
993 coal
->id
, task_pid(task
));
999 i_coal_jetsam_remove_task(coalition_t coal
, task_t task
)
1001 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
1002 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
1004 coal_dbg("removing PID:%d from coalition id:%lld",
1005 task_pid(task
), coal
->id
);
1007 if (task
== coal
->j
.leader
) {
1008 coal
->j
.leader
= NULL
;
1009 coal_dbg(" PID:%d was the leader!", task_pid(task
));
1011 assert(!queue_empty(&task
->task_coalition
[COALITION_TYPE_JETSAM
]));
1014 /* remove the task from the specific coalition role queue */
1015 remqueue(&task
->task_coalition
[COALITION_TYPE_JETSAM
]);
1016 queue_chain_init(task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
1018 return KERN_SUCCESS
;
1021 static kern_return_t
1022 i_coal_jetsam_set_taskrole(coalition_t coal
, task_t task
, int role
)
1024 struct i_jetsam_coalition
*cj
;
1026 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
1027 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
1032 case COALITION_TASKROLE_LEADER
:
1033 coal_dbg("setting PID:%d as LEADER of %lld",
1034 task_pid(task
), coal
->id
);
1035 if (cj
->leader
!= TASK_NULL
) {
1036 /* re-queue the exiting leader onto the "other" list */
1037 coal_dbg(" re-queue existing leader (%d) as OTHER",
1038 task_pid(cj
->leader
));
1039 re_queue_tail(&cj
->other
, &cj
->leader
->task_coalition
[COALITION_TYPE_JETSAM
]);
1042 * remove the task from the "other" list
1043 * (where it was put by default)
1045 remqueue(&task
->task_coalition
[COALITION_TYPE_JETSAM
]);
1046 queue_chain_init(task
->task_coalition
[COALITION_TYPE_JETSAM
]);
1048 /* set the coalition leader */
1051 case COALITION_TASKROLE_XPC
:
1052 coal_dbg("setting PID:%d as XPC in %lld",
1053 task_pid(task
), coal
->id
);
1054 q
= (queue_t
)&cj
->services
;
1056 case COALITION_TASKROLE_EXT
:
1057 coal_dbg("setting PID:%d as EXT in %lld",
1058 task_pid(task
), coal
->id
);
1059 q
= (queue_t
)&cj
->extensions
;
1061 case COALITION_TASKROLE_NONE
:
1063 * Tasks with a role of "none" should fall through to an
1064 * undefined role so long as the task is currently a member
1065 * of the coalition. This scenario can happen if a task is
1066 * killed (usually via jetsam) during exec.
1068 if (task
->coalition
[COALITION_TYPE_JETSAM
] != coal
) {
1069 panic("%s: task %p attempting to set role %d "
1070 "in coalition %p to which it does not belong!", __func__
, task
, role
, coal
);
1073 case COALITION_TASKROLE_UNDEF
:
1074 coal_dbg("setting PID:%d as UNDEF in %lld",
1075 task_pid(task
), coal
->id
);
1076 q
= (queue_t
)&cj
->other
;
1079 panic("%s: invalid role(%d) for task", __func__
, role
);
1080 return KERN_INVALID_ARGUMENT
;
1084 re_queue_tail(q
, &task
->task_coalition
[COALITION_TYPE_JETSAM
]);
1087 return KERN_SUCCESS
;
1091 i_coal_jetsam_get_taskrole(coalition_t coal
, task_t task
)
1093 struct i_jetsam_coalition
*cj
;
1096 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
1097 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
1101 if (task
== cj
->leader
) {
1102 return COALITION_TASKROLE_LEADER
;
1105 qe_foreach_element(t
, &cj
->services
, task_coalition
[COALITION_TYPE_JETSAM
]) {
1107 return COALITION_TASKROLE_XPC
;
1111 qe_foreach_element(t
, &cj
->extensions
, task_coalition
[COALITION_TYPE_JETSAM
]) {
1113 return COALITION_TASKROLE_EXT
;
1117 qe_foreach_element(t
, &cj
->other
, task_coalition
[COALITION_TYPE_JETSAM
]) {
1119 return COALITION_TASKROLE_UNDEF
;
1123 /* task not in the coalition?! */
1124 return COALITION_TASKROLE_NONE
;
1128 i_coal_jetsam_iterate_tasks(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
))
1130 struct i_jetsam_coalition
*cj
;
1133 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
1138 callback(coal
, ctx
, cj
->leader
);
1141 qe_foreach_element(t
, &cj
->services
, task_coalition
[COALITION_TYPE_JETSAM
])
1142 callback(coal
, ctx
, t
);
1144 qe_foreach_element(t
, &cj
->extensions
, task_coalition
[COALITION_TYPE_JETSAM
])
1145 callback(coal
, ctx
, t
);
1147 qe_foreach_element(t
, &cj
->other
, task_coalition
[COALITION_TYPE_JETSAM
])
1148 callback(coal
, ctx
, t
);
1154 * Main Coalition implementation
1159 * coalition_create_internal
1160 * Returns: New coalition object, referenced for the caller and unlocked.
1161 * Condition: coalitions_list_lock must be UNLOCKED.
1164 coalition_create_internal(int type
, int role
, boolean_t privileged
, coalition_t
*out
, uint64_t *coalition_id
)
1167 struct coalition
*new_coal
;
1171 if (type
< 0 || type
> COALITION_TYPE_MAX
) {
1172 return KERN_INVALID_ARGUMENT
;
1175 new_coal
= (struct coalition
*)zalloc(coalition_zone
);
1176 if (new_coal
== COALITION_NULL
) {
1177 return KERN_RESOURCE_SHORTAGE
;
1179 bzero(new_coal
, sizeof(*new_coal
));
1181 new_coal
->type
= type
;
1182 new_coal
->role
= role
;
1184 /* initialize type-specific resources */
1185 kr
= coal_call(new_coal
, init
, privileged
);
1186 if (kr
!= KERN_SUCCESS
) {
1187 zfree(coalition_zone
, new_coal
);
1191 /* One for caller, one for coalitions list */
1192 new_coal
->ref_count
= 2;
1194 new_coal
->privileged
= privileged
? TRUE
: FALSE
;
1195 #if DEVELOPMENT || DEBUG
1196 new_coal
->should_notify
= 1;
1199 lck_mtx_init(&new_coal
->lock
, &coalitions_lck_grp
, LCK_ATTR_NULL
);
1201 lck_rw_lock_exclusive(&coalitions_list_lock
);
1202 new_coal
->id
= coalition_next_id
++;
1204 enqueue_tail(&coalitions_q
, &new_coal
->coalitions
);
1206 #if CONFIG_THREAD_GROUPS
1207 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_NEW
),
1208 new_coal
->id
, new_coal
->type
,
1209 (new_coal
->type
== COALITION_TYPE_JETSAM
&& new_coal
->j
.thread_group
) ?
1210 thread_group_get_id(new_coal
->j
.thread_group
) : 0);
1213 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_NEW
),
1214 new_coal
->id
, new_coal
->type
);
1217 ctype
= new_coal
->type
;
1218 lck_rw_unlock_exclusive(&coalitions_list_lock
);
1220 coal_dbg("id:%llu, type:%s", cid
, coal_type_str(ctype
));
1222 if (coalition_id
!= NULL
) {
1223 *coalition_id
= cid
;
1227 return KERN_SUCCESS
;
1232 * Condition: coalition must be UNLOCKED.
1235 coalition_release(coalition_t coal
)
1237 /* TODO: This can be done with atomics. */
1238 coalition_lock(coal
);
1242 uint32_t rc
= coal
->ref_count
;
1243 uint32_t ac
= coal
->active_count
;
1244 #endif /* COALITION_DEBUG */
1246 coal_dbg("id:%llu type:%s ref_count:%u active_count:%u%s",
1247 coal
->id
, coal_type_str(coal
->type
), rc
, ac
,
1248 rc
<= 0 ? ", will deallocate now" : "");
1250 if (coal
->ref_count
> 0) {
1251 coalition_unlock(coal
);
1255 assert(coal
->termrequested
);
1256 assert(coal
->terminated
);
1257 assert(coal
->active_count
== 0);
1258 assert(coal
->reaped
);
1259 assert(coal
->focal_task_count
== 0);
1260 assert(coal
->nonfocal_task_count
== 0);
1261 #if CONFIG_THREAD_GROUPS
1262 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_FREE
),
1263 coal
->id
, coal
->type
,
1264 coal
->type
== COALITION_TYPE_JETSAM
?
1265 coal
->j
.thread_group
: 0);
1267 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_FREE
),
1268 coal
->id
, coal
->type
);
1271 coal_call(coal
, dealloc
);
1273 coalition_unlock(coal
);
1275 lck_mtx_destroy(&coal
->lock
, &coalitions_lck_grp
);
1277 zfree(coalition_zone
, coal
);
1281 * coalition_find_by_id_internal
1282 * Returns: Coalition object with specified id, NOT referenced.
1283 * If not found, returns COALITION_NULL.
1284 * If found, returns a locked coalition.
1286 * Condition: No locks held
1289 coalition_find_by_id_internal(uint64_t coal_id
)
1294 return COALITION_NULL
;
1297 lck_rw_lock_shared(&coalitions_list_lock
);
1298 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
1299 if (coal
->id
== coal_id
) {
1300 coalition_lock(coal
);
1301 lck_rw_unlock_shared(&coalitions_list_lock
);
1305 lck_rw_unlock_shared(&coalitions_list_lock
);
1307 return COALITION_NULL
;
1311 * coalition_find_by_id
1312 * Returns: Coalition object with specified id, referenced.
1313 * Condition: coalitions_list_lock must be UNLOCKED.
1316 coalition_find_by_id(uint64_t cid
)
1318 coalition_t coal
= coalition_find_by_id_internal(cid
);
1320 if (coal
== COALITION_NULL
) {
1321 return COALITION_NULL
;
1324 /* coal is locked */
1327 coalition_unlock(coal
);
1328 return COALITION_NULL
;
1331 if (coal
->ref_count
== 0) {
1332 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n",
1333 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
1337 uint32_t rc
= coal
->ref_count
;
1340 coalition_unlock(coal
);
1342 coal_dbg("id:%llu type:%s ref_count:%u",
1343 coal
->id
, coal_type_str(coal
->type
), rc
);
1349 * coalition_find_and_activate_by_id
1350 * Returns: Coalition object with specified id, referenced, and activated.
1351 * Condition: coalitions_list_lock must be UNLOCKED.
1352 * This is the function to use when putting a 'new' thing into a coalition,
1353 * like posix_spawn of an XPC service by launchd.
1354 * See also coalition_extend_active.
1357 coalition_find_and_activate_by_id(uint64_t cid
)
1359 coalition_t coal
= coalition_find_by_id_internal(cid
);
1361 if (coal
== COALITION_NULL
) {
1362 return COALITION_NULL
;
1365 /* coal is locked */
1367 if (coal
->reaped
|| coal
->terminated
) {
1368 /* Too late to put something new into this coalition, it's
1369 * already on its way out the door */
1370 coalition_unlock(coal
);
1371 return COALITION_NULL
;
1374 if (coal
->ref_count
== 0) {
1375 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n",
1376 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
1380 coal
->active_count
++;
1383 uint32_t rc
= coal
->ref_count
;
1384 uint32_t ac
= coal
->active_count
;
1387 coalition_unlock(coal
);
1389 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u",
1390 coal
->id
, coal_type_str(coal
->type
), rc
, ac
);
1396 coalition_id(coalition_t coal
)
1398 assert(coal
!= COALITION_NULL
);
1403 task_coalition_ids(task_t task
, uint64_t ids
[COALITION_NUM_TYPES
])
1406 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1407 if (task
->coalition
[i
]) {
1408 ids
[i
] = task
->coalition
[i
]->id
;
1416 task_coalition_roles(task_t task
, int roles
[COALITION_NUM_TYPES
])
1419 memset(roles
, 0, COALITION_NUM_TYPES
* sizeof(roles
[0]));
1421 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1422 if (task
->coalition
[i
]) {
1423 coalition_lock(task
->coalition
[i
]);
1424 roles
[i
] = coal_call(task
->coalition
[i
],
1425 get_taskrole
, task
);
1426 coalition_unlock(task
->coalition
[i
]);
1428 roles
[i
] = COALITION_TASKROLE_NONE
;
1435 coalition_type(coalition_t coal
)
1441 coalition_term_requested(coalition_t coal
)
1443 return coal
->termrequested
;
1447 coalition_is_terminated(coalition_t coal
)
1449 return coal
->terminated
;
1453 coalition_is_reaped(coalition_t coal
)
1455 return coal
->reaped
;
1459 coalition_is_privileged(coalition_t coal
)
1461 return coal
->privileged
|| unrestrict_coalition_syscalls
;
1465 task_is_in_privileged_coalition(task_t task
, int type
)
1467 if (type
< 0 || type
> COALITION_TYPE_MAX
) {
1470 if (unrestrict_coalition_syscalls
) {
1473 if (!task
->coalition
[type
]) {
1476 return task
->coalition
[type
]->privileged
;
1480 task_coalition_update_gpu_stats(task_t task
, uint64_t gpu_ns_delta
)
1484 assert(task
!= TASK_NULL
);
1485 if (gpu_ns_delta
== 0) {
1489 coal
= task
->coalition
[COALITION_TYPE_RESOURCE
];
1490 assert(coal
!= COALITION_NULL
);
1492 coalition_lock(coal
);
1493 coal
->r
.gpu_time
+= gpu_ns_delta
;
1494 coalition_unlock(coal
);
1498 task_coalition_adjust_focal_count(task_t task
, int count
, uint32_t *new_count
)
1500 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1501 if (coal
== COALITION_NULL
) {
1505 *new_count
= os_atomic_add(&coal
->focal_task_count
, count
, relaxed
);
1506 assert(*new_count
!= UINT32_MAX
);
1511 task_coalition_focal_count(task_t task
)
1513 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1514 if (coal
== COALITION_NULL
) {
1518 return coal
->focal_task_count
;
1522 task_coalition_adjust_nonfocal_count(task_t task
, int count
, uint32_t *new_count
)
1524 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1525 if (coal
== COALITION_NULL
) {
1529 *new_count
= os_atomic_add(&coal
->nonfocal_task_count
, count
, relaxed
);
1530 assert(*new_count
!= UINT32_MAX
);
1535 task_coalition_nonfocal_count(task_t task
)
1537 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1538 if (coal
== COALITION_NULL
) {
1542 return coal
->nonfocal_task_count
;
1546 coalition_set_efficient(coalition_t coal
)
1548 coalition_lock(coal
);
1549 coal
->efficient
= TRUE
;
1550 coalition_unlock(coal
);
1553 #if CONFIG_THREAD_GROUPS
1554 struct thread_group
*
1555 task_coalition_get_thread_group(task_t task
)
1557 coalition_t coal
= task
->coalition
[COALITION_TYPE_JETSAM
];
1558 /* return system thread group for non-jetsam coalitions */
1559 if (coal
== COALITION_NULL
) {
1560 return init_coalition
[COALITION_TYPE_JETSAM
]->j
.thread_group
;
1562 return coal
->j
.thread_group
;
1566 struct thread_group
*
1567 kdp_coalition_get_thread_group(coalition_t coal
)
1569 if (coal
->type
!= COALITION_TYPE_JETSAM
) {
1572 assert(coal
->j
.thread_group
!= NULL
);
1573 return coal
->j
.thread_group
;
1576 struct thread_group
*
1577 coalition_get_thread_group(coalition_t coal
)
1579 if (coal
->type
!= COALITION_TYPE_JETSAM
) {
1582 assert(coal
->j
.thread_group
!= NULL
);
1583 return thread_group_retain(coal
->j
.thread_group
);
1587 coalition_set_thread_group(coalition_t coal
, struct thread_group
*tg
)
1589 assert(coal
!= COALITION_NULL
);
1592 if (coal
->type
!= COALITION_TYPE_JETSAM
) {
1595 struct thread_group
*old_tg
= coal
->j
.thread_group
;
1596 assert(old_tg
!= NULL
);
1597 coal
->j
.thread_group
= tg
;
1599 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_THREAD_GROUP_SET
),
1600 coal
->id
, coal
->type
, thread_group_get_id(tg
));
1602 thread_group_release(old_tg
);
1606 task_coalition_thread_group_focal_update(task_t task
)
1608 assert(task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
] != COALITION_NULL
);
1609 thread_group_flags_update_lock();
1610 uint32_t focal_count
= task_coalition_focal_count(task
);
1612 thread_group_set_flags_locked(task_coalition_get_thread_group(task
), THREAD_GROUP_FLAGS_UI_APP
);
1614 thread_group_clear_flags_locked(task_coalition_get_thread_group(task
), THREAD_GROUP_FLAGS_UI_APP
);
1616 thread_group_flags_update_unlock();
1622 coalition_for_each_task(coalition_t coal
, void *ctx
,
1623 void (*callback
)(coalition_t
, void *, task_t
))
1625 assert(coal
!= COALITION_NULL
);
1627 coal_dbg("iterating tasks in coalition %p id:%llu type:%s, active_count:%u",
1628 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
1630 coalition_lock(coal
);
1632 coal_call(coal
, iterate_tasks
, ctx
, callback
);
1634 coalition_unlock(coal
);
1639 coalition_remove_active(coalition_t coal
)
1641 coalition_lock(coal
);
1643 assert(!coal
->reaped
);
1644 assert(coal
->active_count
> 0);
1646 coal
->active_count
--;
1648 boolean_t do_notify
= FALSE
;
1649 uint64_t notify_id
= 0;
1650 uint32_t notify_flags
= 0;
1651 if (coal
->termrequested
&& coal
->active_count
== 0) {
1652 /* We only notify once, when active_count reaches zero.
1653 * We just decremented, so if it reached zero, we mustn't have
1656 assert(!coal
->terminated
);
1657 coal
->terminated
= TRUE
;
1659 assert(!coal
->notified
);
1661 coal
->notified
= TRUE
;
1662 #if DEVELOPMENT || DEBUG
1663 do_notify
= coal
->should_notify
;
1667 notify_id
= coal
->id
;
1672 uint64_t cid
= coal
->id
;
1673 uint32_t rc
= coal
->ref_count
;
1674 int ac
= coal
->active_count
;
1675 int ct
= coal
->type
;
1677 coalition_unlock(coal
);
1679 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u,%s",
1680 cid
, coal_type_str(ct
), rc
, ac
, do_notify
? " NOTIFY" : " ");
1683 coalition_notify_user(notify_id
, notify_flags
);
1687 /* Used for kernel_task, launchd, launchd's early boot tasks... */
1689 coalitions_adopt_init_task(task_t task
)
1692 kr
= coalitions_adopt_task(init_coalition
, task
);
1693 if (kr
!= KERN_SUCCESS
) {
1694 panic("failed to adopt task %p into default coalition: %d", task
, kr
);
1699 /* Used for forked corpses. */
1701 coalitions_adopt_corpse_task(task_t task
)
1704 kr
= coalitions_adopt_task(corpse_coalition
, task
);
1705 if (kr
!= KERN_SUCCESS
) {
1706 panic("failed to adopt task %p into corpse coalition: %d", task
, kr
);
1712 * coalition_adopt_task_internal
1713 * Condition: Coalition must be referenced and unlocked. Will fail if coalition
1714 * is already terminated.
1716 static kern_return_t
1717 coalition_adopt_task_internal(coalition_t coal
, task_t task
)
1721 if (task
->coalition
[coal
->type
]) {
1722 return KERN_ALREADY_IN_SET
;
1725 coalition_lock(coal
);
1727 if (coal
->reaped
|| coal
->terminated
) {
1728 coalition_unlock(coal
);
1729 return KERN_TERMINATED
;
1732 kr
= coal_call(coal
, adopt_task
, task
);
1733 if (kr
!= KERN_SUCCESS
) {
1737 coal
->active_count
++;
1741 task
->coalition
[coal
->type
] = coal
;
1745 (void)coal
; /* need expression after label */
1746 uint64_t cid
= coal
->id
;
1747 uint32_t rc
= coal
->ref_count
;
1748 uint32_t ct
= coal
->type
;
1750 if (get_task_uniqueid(task
) != UINT64_MAX
) {
1751 /* On 32-bit targets, uniqueid will get truncated to 32 bits */
1752 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_ADOPT
),
1753 coal
->id
, get_task_uniqueid(task
));
1756 coalition_unlock(coal
);
1758 coal_dbg("task:%d, id:%llu type:%s ref_count:%u, kr=%d",
1759 task_pid(task
), cid
, coal_type_str(ct
), rc
, kr
);
1763 static kern_return_t
1764 coalition_remove_task_internal(task_t task
, int type
)
1768 coalition_t coal
= task
->coalition
[type
];
1771 return KERN_SUCCESS
;
1774 assert(coal
->type
== (uint32_t)type
);
1776 coalition_lock(coal
);
1778 kr
= coal_call(coal
, remove_task
, task
);
1781 uint64_t cid
= coal
->id
;
1782 uint32_t rc
= coal
->ref_count
;
1783 int ac
= coal
->active_count
;
1784 int ct
= coal
->type
;
1786 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_REMOVE
),
1787 coal
->id
, get_task_uniqueid(task
));
1788 coalition_unlock(coal
);
1790 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u, kr=%d",
1791 cid
, coal_type_str(ct
), rc
, ac
, kr
);
1793 coalition_remove_active(coal
);
1799 * coalitions_adopt_task
1800 * Condition: All coalitions must be referenced and unlocked.
1801 * Will fail if any coalition is already terminated.
1804 coalitions_adopt_task(coalition_t
*coals
, task_t task
)
1809 if (!coals
|| coals
[COALITION_TYPE_RESOURCE
] == COALITION_NULL
) {
1810 return KERN_INVALID_ARGUMENT
;
1813 /* verify that the incoming coalitions are what they say they are */
1814 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1815 if (coals
[i
] && coals
[i
]->type
!= (uint32_t)i
) {
1816 return KERN_INVALID_ARGUMENT
;
1820 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1823 kr
= coalition_adopt_task_internal(coals
[i
], task
);
1825 if (kr
!= KERN_SUCCESS
) {
1826 /* dis-associate any coalitions that just adopted this task */
1828 if (task
->coalition
[i
]) {
1829 coalition_remove_task_internal(task
, i
);
1839 * coalitions_remove_task
1840 * Condition: task must be referenced and UNLOCKED; all task's coalitions must be UNLOCKED
1843 coalitions_remove_task(task_t task
)
1848 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1849 kr
= coalition_remove_task_internal(task
, i
);
1850 assert(kr
== KERN_SUCCESS
);
1857 * task_release_coalitions
1858 * helper function to release references to all coalitions in which
1859 * 'task' is a member.
1862 task_release_coalitions(task_t task
)
1865 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1866 if (task
->coalition
[i
]) {
1867 coalition_release(task
->coalition
[i
]);
1868 } else if (i
== COALITION_TYPE_RESOURCE
) {
1869 panic("deallocating task %p was not a member of a resource coalition", task
);
1875 * coalitions_set_roles
1876 * for each type of coalition, if the task is a member of a coalition of
1877 * that type (given in the coalitions parameter) then set the role of
1878 * the task within that that coalition.
1881 coalitions_set_roles(coalition_t coalitions
[COALITION_NUM_TYPES
],
1882 task_t task
, int roles
[COALITION_NUM_TYPES
])
1884 kern_return_t kr
= KERN_SUCCESS
;
1887 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1888 if (!coalitions
[i
]) {
1891 coalition_lock(coalitions
[i
]);
1892 kr
= coal_call(coalitions
[i
], set_taskrole
, task
, roles
[i
]);
1893 coalition_unlock(coalitions
[i
]);
1894 assert(kr
== KERN_SUCCESS
);
1901 * coalition_terminate_internal
1902 * Condition: Coalition must be referenced and UNLOCKED.
1905 coalition_request_terminate_internal(coalition_t coal
)
1907 assert(coal
->type
>= 0 && coal
->type
<= COALITION_TYPE_MAX
);
1909 if (coal
== init_coalition
[coal
->type
]) {
1910 return KERN_DEFAULT_SET
;
1913 coalition_lock(coal
);
1916 coalition_unlock(coal
);
1917 return KERN_INVALID_NAME
;
1920 if (coal
->terminated
|| coal
->termrequested
) {
1921 coalition_unlock(coal
);
1922 return KERN_TERMINATED
;
1925 coal
->termrequested
= TRUE
;
1927 boolean_t do_notify
= FALSE
;
1928 uint64_t note_id
= 0;
1929 uint32_t note_flags
= 0;
1931 if (coal
->active_count
== 0) {
1933 * We only notify once, when active_count reaches zero.
1934 * We just set termrequested to zero. If the active count
1935 * was already at zero (tasks died before we could request
1936 * a termination notification), we should notify.
1938 assert(!coal
->terminated
);
1939 coal
->terminated
= TRUE
;
1941 assert(!coal
->notified
);
1943 coal
->notified
= TRUE
;
1944 #if DEVELOPMENT || DEBUG
1945 do_notify
= coal
->should_notify
;
1953 coalition_unlock(coal
);
1956 coalition_notify_user(note_id
, note_flags
);
1959 return KERN_SUCCESS
;
1963 * coalition_reap_internal
1964 * Condition: Coalition must be referenced and UNLOCKED.
1967 coalition_reap_internal(coalition_t coal
)
1969 assert(coal
->type
<= COALITION_TYPE_MAX
);
1971 if (coal
== init_coalition
[coal
->type
]) {
1972 return KERN_DEFAULT_SET
;
1975 coalition_lock(coal
);
1977 coalition_unlock(coal
);
1978 return KERN_TERMINATED
;
1980 if (!coal
->terminated
) {
1981 coalition_unlock(coal
);
1982 return KERN_FAILURE
;
1984 assert(coal
->termrequested
);
1985 if (coal
->active_count
> 0) {
1986 coalition_unlock(coal
);
1987 return KERN_FAILURE
;
1990 coal
->reaped
= TRUE
;
1992 /* Caller, launchd, and coalitions list should each have a reference */
1993 assert(coal
->ref_count
> 2);
1995 coalition_unlock(coal
);
1997 lck_rw_lock_exclusive(&coalitions_list_lock
);
1999 remqueue(&coal
->coalitions
);
2000 lck_rw_unlock_exclusive(&coalitions_list_lock
);
2002 /* Release the list's reference and launchd's reference. */
2003 coalition_release(coal
);
2004 coalition_release(coal
);
2006 return KERN_SUCCESS
;
2009 #if DEVELOPMENT || DEBUG
2011 coalition_should_notify(coalition_t coal
)
2017 coalition_lock(coal
);
2018 should
= coal
->should_notify
;
2019 coalition_unlock(coal
);
2025 coalition_set_notify(coalition_t coal
, int notify
)
2030 coalition_lock(coal
);
2031 coal
->should_notify
= !!notify
;
2032 coalition_unlock(coal
);
2037 coalitions_init(void)
2041 const struct coalition_type
*ctype
;
2043 queue_head_init(coalitions_q
);
2045 if (!PE_parse_boot_argn("unrestrict_coalition_syscalls", &unrestrict_coalition_syscalls
,
2046 sizeof(unrestrict_coalition_syscalls
))) {
2047 unrestrict_coalition_syscalls
= 0;
2050 if (!PE_parse_boot_argn("tg_adaptive", &merge_adaptive_coalitions
,
2051 sizeof(merge_adaptive_coalitions
))) {
2052 merge_adaptive_coalitions
= 0;
2055 init_task_ledgers();
2057 init_coalition_ledgers();
2059 for (i
= 0, ctype
= &s_coalition_types
[0]; i
< COALITION_NUM_TYPES
; ctype
++, i
++) {
2060 /* verify the entry in the global coalition types array */
2061 if (ctype
->type
!= i
||
2064 !ctype
->adopt_task
||
2065 !ctype
->remove_task
) {
2066 panic("%s: Malformed coalition type %s(%d) in slot for type:%s(%d)",
2067 __func__
, coal_type_str(ctype
->type
), ctype
->type
, coal_type_str(i
), i
);
2069 if (!ctype
->has_default
) {
2072 kr
= coalition_create_internal(ctype
->type
, COALITION_ROLE_SYSTEM
, TRUE
, &init_coalition
[ctype
->type
], NULL
);
2073 if (kr
!= KERN_SUCCESS
) {
2074 panic("%s: could not create init %s coalition: kr:%d",
2075 __func__
, coal_type_str(i
), kr
);
2077 if (i
== COALITION_TYPE_RESOURCE
) {
2078 assert(COALITION_ID_KERNEL
== init_coalition
[ctype
->type
]->id
);
2080 kr
= coalition_create_internal(ctype
->type
, COALITION_ROLE_SYSTEM
, FALSE
, &corpse_coalition
[ctype
->type
], NULL
);
2081 if (kr
!= KERN_SUCCESS
) {
2082 panic("%s: could not create corpse %s coalition: kr:%d",
2083 __func__
, coal_type_str(i
), kr
);
2087 /* "Leak" our reference to the global object */
2091 * BSD Kernel interface functions
2095 coalition_fill_procinfo(struct coalition
*coal
,
2096 struct procinfo_coalinfo
*coalinfo
)
2098 coalinfo
->coalition_id
= coal
->id
;
2099 coalinfo
->coalition_type
= coal
->type
;
2100 coalinfo
->coalition_tasks
= coalition_get_task_count(coal
);
2105 coalitions_get_list(int type
, struct procinfo_coalinfo
*coal_list
, int list_sz
)
2108 struct coalition
*coal
;
2110 lck_rw_lock_shared(&coalitions_list_lock
);
2111 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
2112 if (!coal
->reaped
&& (type
< 0 || type
== (int)coal
->type
)) {
2113 if (coal_list
&& ncoals
< list_sz
) {
2114 coalition_fill_procinfo(coal
, &coal_list
[ncoals
]);
2119 lck_rw_unlock_shared(&coalitions_list_lock
);
2125 * Return the coaltion of the given type to which the task belongs.
2128 task_get_coalition(task_t task
, int coal_type
)
2132 if (task
== NULL
|| coal_type
> COALITION_TYPE_MAX
) {
2133 return COALITION_NULL
;
2136 c
= task
->coalition
[coal_type
];
2137 assert(c
== COALITION_NULL
|| (int)c
->type
== coal_type
);
2142 * Report if the given task is the leader of the given jetsam coalition.
2145 coalition_is_leader(task_t task
, coalition_t coal
)
2147 boolean_t ret
= FALSE
;
2149 if (coal
!= COALITION_NULL
) {
2150 coalition_lock(coal
);
2152 ret
= (coal
->type
== COALITION_TYPE_JETSAM
&& coal
->j
.leader
== task
);
2154 coalition_unlock(coal
);
2161 coalition_iterate_stackshot(coalition_iterate_fn_t callout
, void *arg
, uint32_t coalition_type
)
2166 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
2167 if (coal
== NULL
|| !ml_validate_nofault((vm_offset_t
)coal
, sizeof(struct coalition
))) {
2168 return KERN_FAILURE
;
2171 if (coalition_type
== coal
->type
) {
2172 callout(arg
, i
++, coal
);
2176 return KERN_SUCCESS
;
2180 kdp_coalition_get_leader(coalition_t coal
)
2186 if (coal
->type
== COALITION_TYPE_JETSAM
) {
2187 return coal
->j
.leader
;
2193 coalition_get_leader(coalition_t coal
)
2195 task_t leader
= TASK_NULL
;
2201 coalition_lock(coal
);
2202 if (coal
->type
!= COALITION_TYPE_JETSAM
) {
2206 leader
= coal
->j
.leader
;
2207 if (leader
!= TASK_NULL
) {
2208 task_reference(leader
);
2212 coalition_unlock(coal
);
2218 coalition_get_task_count(coalition_t coal
)
2221 struct queue_entry
*qe
;
2226 coalition_lock(coal
);
2227 switch (coal
->type
) {
2228 case COALITION_TYPE_RESOURCE
:
2229 qe_foreach(qe
, &coal
->r
.tasks
)
2232 case COALITION_TYPE_JETSAM
:
2233 if (coal
->j
.leader
) {
2236 qe_foreach(qe
, &coal
->j
.other
)
2238 qe_foreach(qe
, &coal
->j
.extensions
)
2240 qe_foreach(qe
, &coal
->j
.services
)
2246 coalition_unlock(coal
);
2253 i_get_list_footprint(queue_t list
, int type
, int *ntasks
)
2258 qe_foreach_element(task
, list
, task_coalition
[type
]) {
2259 bytes
+= get_task_phys_footprint(task
);
2260 coal_dbg(" [%d] task_pid:%d, type:%d, footprint:%lld",
2261 *ntasks
, task_pid(task
), type
, bytes
);
2269 coalition_get_page_count(coalition_t coal
, int *ntasks
)
2281 coalition_lock(coal
);
2283 switch (coal
->type
) {
2284 case COALITION_TYPE_RESOURCE
:
2285 bytes
+= i_get_list_footprint(&coal
->r
.tasks
, COALITION_TYPE_RESOURCE
, &num_tasks
);
2287 case COALITION_TYPE_JETSAM
:
2288 if (coal
->j
.leader
) {
2289 bytes
+= get_task_phys_footprint(coal
->j
.leader
);
2292 bytes
+= i_get_list_footprint(&coal
->j
.extensions
, COALITION_TYPE_JETSAM
, &num_tasks
);
2293 bytes
+= i_get_list_footprint(&coal
->j
.services
, COALITION_TYPE_JETSAM
, &num_tasks
);
2294 bytes
+= i_get_list_footprint(&coal
->j
.other
, COALITION_TYPE_JETSAM
, &num_tasks
);
2300 coalition_unlock(coal
);
2303 *ntasks
= num_tasks
;
2306 return bytes
/ PAGE_SIZE_64
;
2309 struct coal_sort_s
{
2316 * return < 0 for a < b
2320 typedef int (*cmpfunc_t
)(const void *a
, const void *b
);
2323 qsort(void *a
, size_t n
, size_t es
, cmpfunc_t cmp
);
2326 dflt_cmp(const void *a
, const void *b
)
2328 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2329 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2332 * if both A and B are equal, use a memory descending sort
2334 if (csA
->usr_order
== csB
->usr_order
) {
2335 return (int)((int64_t)csB
->bytes
- (int64_t)csA
->bytes
);
2338 /* otherwise, return the relationship between user specified orders */
2339 return csA
->usr_order
- csB
->usr_order
;
2343 mem_asc_cmp(const void *a
, const void *b
)
2345 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2346 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2348 return (int)((int64_t)csA
->bytes
- (int64_t)csB
->bytes
);
2352 mem_dec_cmp(const void *a
, const void *b
)
2354 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2355 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2357 return (int)((int64_t)csB
->bytes
- (int64_t)csA
->bytes
);
2361 usr_asc_cmp(const void *a
, const void *b
)
2363 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2364 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2366 return csA
->usr_order
- csB
->usr_order
;
2370 usr_dec_cmp(const void *a
, const void *b
)
2372 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
2373 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
2375 return csB
->usr_order
- csA
->usr_order
;
2378 /* avoid dynamic allocation in this path */
2379 #define MAX_SORTED_PIDS 80
2382 coalition_get_sort_list(coalition_t coal
, int sort_order
, queue_t list
,
2383 struct coal_sort_s
*sort_array
, int array_sz
)
2388 assert(sort_array
!= NULL
);
2390 if (array_sz
<= 0) {
2396 * this function will only be called with a NULL
2397 * list for JETSAM-type coalitions, and is intended
2398 * to investigate the leader process
2400 if (coal
->type
!= COALITION_TYPE_JETSAM
||
2401 coal
->j
.leader
== TASK_NULL
) {
2404 sort_array
[0].pid
= task_pid(coal
->j
.leader
);
2405 switch (sort_order
) {
2406 case COALITION_SORT_DEFAULT
:
2407 sort_array
[0].usr_order
= 0;
2409 case COALITION_SORT_MEM_ASC
:
2410 case COALITION_SORT_MEM_DEC
:
2411 sort_array
[0].bytes
= get_task_phys_footprint(coal
->j
.leader
);
2413 case COALITION_SORT_USER_ASC
:
2414 case COALITION_SORT_USER_DEC
:
2415 sort_array
[0].usr_order
= 0;
2423 qe_foreach_element(task
, list
, task_coalition
[coal
->type
]) {
2424 if (ntasks
>= array_sz
) {
2425 printf("WARNING: more than %d pids in coalition %llu\n",
2426 MAX_SORTED_PIDS
, coal
->id
);
2430 sort_array
[ntasks
].pid
= task_pid(task
);
2432 switch (sort_order
) {
2433 case COALITION_SORT_DEFAULT
:
2434 sort_array
[ntasks
].usr_order
= 0;
2436 case COALITION_SORT_MEM_ASC
:
2437 case COALITION_SORT_MEM_DEC
:
2438 sort_array
[ntasks
].bytes
= get_task_phys_footprint(task
);
2440 case COALITION_SORT_USER_ASC
:
2441 case COALITION_SORT_USER_DEC
:
2442 sort_array
[ntasks
].usr_order
= 0;
2455 coalition_get_pid_list(coalition_t coal
, uint32_t rolemask
, int sort_order
,
2456 int *pid_list
, int list_sz
)
2458 struct i_jetsam_coalition
*cj
;
2460 cmpfunc_t cmp_func
= NULL
;
2461 struct coal_sort_s sort_array
[MAX_SORTED_PIDS
] = { {0, 0, 0} }; /* keep to < 2k */
2464 !(rolemask
& COALITION_ROLEMASK_ALLROLES
) ||
2465 !pid_list
|| list_sz
< 1) {
2466 coal_dbg("Invalid parameters: coal:%p, type:%d, rolemask:0x%x, "
2467 "pid_list:%p, list_sz:%d", coal
, coal
? coal
->type
: -1,
2468 rolemask
, pid_list
, list_sz
);
2472 switch (sort_order
) {
2473 case COALITION_SORT_NOSORT
:
2476 case COALITION_SORT_DEFAULT
:
2477 cmp_func
= dflt_cmp
;
2479 case COALITION_SORT_MEM_ASC
:
2480 cmp_func
= mem_asc_cmp
;
2482 case COALITION_SORT_MEM_DEC
:
2483 cmp_func
= mem_dec_cmp
;
2485 case COALITION_SORT_USER_ASC
:
2486 cmp_func
= usr_asc_cmp
;
2488 case COALITION_SORT_USER_DEC
:
2489 cmp_func
= usr_dec_cmp
;
2495 coalition_lock(coal
);
2497 if (coal
->type
== COALITION_TYPE_RESOURCE
) {
2498 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &coal
->r
.tasks
,
2499 sort_array
, MAX_SORTED_PIDS
);
2505 if (rolemask
& COALITION_ROLEMASK_UNDEF
) {
2506 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->other
,
2507 sort_array
+ ntasks
,
2508 MAX_SORTED_PIDS
- ntasks
);
2511 if (rolemask
& COALITION_ROLEMASK_XPC
) {
2512 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->services
,
2513 sort_array
+ ntasks
,
2514 MAX_SORTED_PIDS
- ntasks
);
2517 if (rolemask
& COALITION_ROLEMASK_EXT
) {
2518 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->extensions
,
2519 sort_array
+ ntasks
,
2520 MAX_SORTED_PIDS
- ntasks
);
2523 if (rolemask
& COALITION_ROLEMASK_LEADER
) {
2524 ntasks
+= coalition_get_sort_list(coal
, sort_order
, NULL
,
2525 sort_array
+ ntasks
,
2526 MAX_SORTED_PIDS
- ntasks
);
2530 coalition_unlock(coal
);
2532 /* sort based on the chosen criterion (no sense sorting 1 item) */
2533 if (cmp_func
&& ntasks
> 1) {
2534 qsort(sort_array
, ntasks
, sizeof(struct coal_sort_s
), cmp_func
);
2537 for (int i
= 0; i
< ntasks
; i
++) {
2541 coal_dbg(" [%d] PID:%d, footprint:%lld, usr_order:%d",
2542 i
, sort_array
[i
].pid
, sort_array
[i
].bytes
,
2543 sort_array
[i
].usr_order
);
2544 pid_list
[i
] = sort_array
[i
].pid
;