2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kern_types.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
33 #include <kern/coalition.h>
34 #include <kern/host.h>
35 #include <kern/kalloc.h>
36 #include <kern/ledger.h>
37 #include <kern/mach_param.h> /* for TASK_CHUNK */
38 #include <kern/task.h>
39 #include <kern/thread_group.h>
40 #include <kern/zalloc.h>
42 #include <libkern/OSAtomic.h>
44 #include <mach/coalition_notification_server.h>
45 #include <mach/host_priv.h>
46 #include <mach/host_special_ports.h>
48 #include <sys/errno.h>
51 * BSD interface functions
53 int coalitions_get_list(int type
, struct procinfo_coalinfo
*coal_list
, int list_sz
);
54 boolean_t
coalition_is_leader(task_t task
, int coal_type
, coalition_t
*coal
);
55 task_t
coalition_get_leader(coalition_t coal
);
56 int coalition_get_task_count(coalition_t coal
);
57 uint64_t coalition_get_page_count(coalition_t coal
, int *ntasks
);
58 int coalition_get_pid_list(coalition_t coal
, uint32_t rolemask
, int sort_order
,
59 int *pid_list
, int list_sz
);
61 /* defined in task.c */
62 extern ledger_template_t task_ledger_template
;
65 * Coalition zone needs limits. We expect there will be as many coalitions as
66 * tasks (same order of magnitude), so use the task zone's limits.
68 #define CONFIG_COALITION_MAX CONFIG_TASK_MAX
69 #define COALITION_CHUNK TASK_CHUNK
71 int unrestrict_coalition_syscalls
;
72 int merge_adaptive_coalitions
;
74 lck_attr_t coalitions_lck_attr
;
75 lck_grp_t coalitions_lck_grp
;
76 lck_grp_attr_t coalitions_lck_grp_attr
;
78 /* coalitions_list_lock protects coalition_count, coalitions queue, next_coalition_id. */
79 decl_lck_mtx_data(static,coalitions_list_lock
);
80 static uint64_t coalition_count
;
81 static uint64_t coalition_next_id
= 1;
82 static queue_head_t coalitions_q
;
84 coalition_t init_coalition
[COALITION_NUM_TYPES
];
85 coalition_t corpse_coalition
[COALITION_NUM_TYPES
];
87 zone_t coalition_zone
;
89 static const char *coal_type_str(int type
)
92 case COALITION_TYPE_RESOURCE
:
94 case COALITION_TYPE_JETSAM
:
101 struct coalition_type
{
106 * pre-condition: coalition just allocated (unlocked), unreferenced,
109 kern_return_t (*init
)(coalition_t coal
, boolean_t privileged
);
113 * pre-condition: coalition unlocked
114 * pre-condition: coalition refcount=0, active_count=0,
115 * termrequested=1, terminated=1, reaped=1
117 void (*dealloc
)(coalition_t coal
);
121 * pre-condition: coalition locked
122 * pre-condition: coalition !repead and !terminated
124 kern_return_t (*adopt_task
)(coalition_t coal
, task_t task
);
128 * pre-condition: coalition locked
129 * pre-condition: task has been removed from coalition's task list
131 kern_return_t (*remove_task
)(coalition_t coal
, task_t task
);
135 * pre-condition: coalition locked
136 * pre-condition: task added to coalition's task list,
137 * active_count >= 1 (at least the given task is active)
139 kern_return_t (*set_taskrole
)(coalition_t coal
, task_t task
, int role
);
143 * pre-condition: coalition locked
144 * pre-condition: task added to coalition's task list,
145 * active_count >= 1 (at least the given task is active)
147 int (*get_taskrole
)(coalition_t coal
, task_t task
);
151 * pre-condition: coalition locked
153 void (*iterate_tasks
)(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
));
157 * COALITION_TYPE_RESOURCE
160 static kern_return_t
i_coal_resource_init(coalition_t coal
, boolean_t privileged
);
161 static void i_coal_resource_dealloc(coalition_t coal
);
162 static kern_return_t
i_coal_resource_adopt_task(coalition_t coal
, task_t task
);
163 static kern_return_t
i_coal_resource_remove_task(coalition_t coal
, task_t task
);
164 static kern_return_t
i_coal_resource_set_taskrole(coalition_t coal
,
165 task_t task
, int role
);
166 static int i_coal_resource_get_taskrole(coalition_t coal
, task_t task
);
167 static void i_coal_resource_iterate_tasks(coalition_t coal
, void *ctx
,
168 void (*callback
)(coalition_t
, void *, task_t
));
171 * Ensure COALITION_NUM_THREAD_QOS_TYPES defined in mach/coalition.h still
172 * matches THREAD_QOS_LAST defined in mach/thread_policy.h
174 static_assert(COALITION_NUM_THREAD_QOS_TYPES
== THREAD_QOS_LAST
);
176 struct i_resource_coalition
{
179 uint64_t byteswritten
;
182 uint64_t logical_immediate_writes
;
183 uint64_t logical_deferred_writes
;
184 uint64_t logical_invalidated_writes
;
185 uint64_t logical_metadata_writes
;
187 uint64_t cpu_time_eqos
[COALITION_NUM_THREAD_QOS_TYPES
]; /* cpu time per effective QoS class */
188 uint64_t cpu_time_rqos
[COALITION_NUM_THREAD_QOS_TYPES
]; /* cpu time per requested QoS class */
190 uint64_t task_count
; /* tasks that have started in this coalition */
191 uint64_t dead_task_count
; /* tasks that have exited in this coalition;
192 subtract from task_count to get count
195 * Count the length of time this coalition had at least one active task.
196 * This can be a 'denominator' to turn e.g. cpu_time to %cpu.
198 uint64_t last_became_nonempty_time
;
199 uint64_t time_nonempty
;
201 queue_head_t tasks
; /* List of active tasks in the coalition */
205 * COALITION_TYPE_JETSAM
208 static kern_return_t
i_coal_jetsam_init(coalition_t coal
, boolean_t privileged
);
209 static void i_coal_jetsam_dealloc(coalition_t coal
);
210 static kern_return_t
i_coal_jetsam_adopt_task(coalition_t coal
, task_t task
);
211 static kern_return_t
i_coal_jetsam_remove_task(coalition_t coal
, task_t task
);
212 static kern_return_t
i_coal_jetsam_set_taskrole(coalition_t coal
,
213 task_t task
, int role
);
214 static int i_coal_jetsam_get_taskrole(coalition_t coal
, task_t task
);
215 static void i_coal_jetsam_iterate_tasks(coalition_t coal
, void *ctx
,
216 void (*callback
)(coalition_t
, void *, task_t
));
218 struct i_jetsam_coalition
{
220 queue_head_t extensions
;
221 queue_head_t services
;
223 thread_group_t thread_group
;
228 * main coalition structure
231 uint64_t id
; /* monotonically increasing */
233 uint32_t role
; /* default task role (background, adaptive, interactive, etc) */
234 uint32_t ref_count
; /* Number of references to the memory containing this struct */
235 uint32_t active_count
; /* Number of members of (tasks in) the
236 coalition, plus vouchers referring
238 uint32_t focal_task_count
; /* Number of TASK_FOREGROUND_APPLICATION tasks in the coalition */
239 uint32_t nonfocal_task_count
; /* Number of TASK_BACKGROUND_APPLICATION tasks in the coalition */
241 /* coalition flags */
242 uint32_t privileged
: 1; /* Members of this coalition may create
243 and manage coalitions and may posix_spawn
244 processes into selected coalitions */
247 uint32_t termrequested
: 1; /* launchd has requested termination when coalition becomes empty */
248 uint32_t terminated
: 1; /* coalition became empty and spawns are now forbidden */
249 uint32_t reaped
: 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
250 uint32_t notified
: 1; /* no-more-processes notification was sent via special port */
251 uint32_t efficient
: 1; /* launchd has marked the coalition as efficient */
252 #if DEVELOPMENT || DEBUG
253 uint32_t should_notify
: 1; /* should this coalition send notifications (default: yes) */
256 queue_chain_t coalitions
; /* global list of coalitions */
258 decl_lck_mtx_data(,lock
) /* Coalition lock. */
260 /* put coalition type-specific structures here */
262 struct i_resource_coalition r
;
263 struct i_jetsam_coalition j
;
268 * register different coalition types:
269 * these must be kept in the order specified in coalition.h
271 static const struct coalition_type
272 s_coalition_types
[COALITION_NUM_TYPES
] = {
274 COALITION_TYPE_RESOURCE
,
276 i_coal_resource_init
,
277 i_coal_resource_dealloc
,
278 i_coal_resource_adopt_task
,
279 i_coal_resource_remove_task
,
280 i_coal_resource_set_taskrole
,
281 i_coal_resource_get_taskrole
,
282 i_coal_resource_iterate_tasks
,
285 COALITION_TYPE_JETSAM
,
288 i_coal_jetsam_dealloc
,
289 i_coal_jetsam_adopt_task
,
290 i_coal_jetsam_remove_task
,
291 i_coal_jetsam_set_taskrole
,
292 i_coal_jetsam_get_taskrole
,
293 i_coal_jetsam_iterate_tasks
,
297 #define coal_call(coal, func, ...) \
298 (s_coalition_types[(coal)->type].func)(coal, ## __VA_ARGS__)
301 #define coalition_lock(c) do{ lck_mtx_lock(&c->lock); }while(0)
302 #define coalition_unlock(c) do{ lck_mtx_unlock(&c->lock); }while(0)
305 * Define the coalition type to track focal tasks.
306 * On embedded, track them using jetsam coalitions since they have associated thread
307 * groups which reflect this property as a flag (and pass it down to CLPC).
308 * On non-embedded platforms, since not all coalitions have jetsam coalitions
309 * track focal counts on the resource coalition.
312 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_JETSAM
313 #else /* CONFIG_EMBEDDED */
314 #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_RESOURCE
315 #endif /* CONFIG_EMBEDDED */
319 coalition_notify_user(uint64_t id
, uint32_t flags
)
321 mach_port_t user_port
;
324 kr
= host_get_coalition_port(host_priv_self(), &user_port
);
325 if ((kr
!= KERN_SUCCESS
) || !IPC_PORT_VALID(user_port
)) {
329 coalition_notification(user_port
, id
, flags
);
330 ipc_port_release_send(user_port
);
335 * COALITION_TYPE_RESOURCE
339 i_coal_resource_init(coalition_t coal
, boolean_t privileged
)
342 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
343 coal
->r
.ledger
= ledger_instantiate(task_ledger_template
,
344 LEDGER_CREATE_ACTIVE_ENTRIES
);
345 if (coal
->r
.ledger
== NULL
)
346 return KERN_RESOURCE_SHORTAGE
;
348 queue_init(&coal
->r
.tasks
);
354 i_coal_resource_dealloc(coalition_t coal
)
356 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
357 ledger_dereference(coal
->r
.ledger
);
361 i_coal_resource_adopt_task(coalition_t coal
, task_t task
)
363 struct i_resource_coalition
*cr
;
365 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
366 assert(queue_empty(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]));
371 if (cr
->task_count
< cr
->dead_task_count
) {
372 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
373 __func__
, coal
, coal
->id
, coal_type_str(coal
->type
),
374 cr
->task_count
, cr
->dead_task_count
);
377 /* If moving from 0->1 active tasks */
378 if (cr
->task_count
- cr
->dead_task_count
== 1) {
379 cr
->last_became_nonempty_time
= mach_absolute_time();
382 /* put the task on the coalition's list of tasks */
383 enqueue_tail(&cr
->tasks
, &task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
385 coal_dbg("Added PID:%d to id:%llu, task_count:%llu, dead_count:%llu, nonempty_time:%llu",
386 task_pid(task
), coal
->id
, cr
->task_count
, cr
->dead_task_count
,
387 cr
->last_became_nonempty_time
);
393 i_coal_resource_remove_task(coalition_t coal
, task_t task
)
395 struct i_resource_coalition
*cr
;
397 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
398 assert(task
->coalition
[COALITION_TYPE_RESOURCE
] == coal
);
399 assert(!queue_empty(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]));
402 * handle resource coalition accounting rollup for dead tasks
406 cr
->dead_task_count
++;
408 if (cr
->task_count
< cr
->dead_task_count
) {
409 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
410 __func__
, coal
, coal
->id
, coal_type_str(coal
->type
), cr
->task_count
, cr
->dead_task_count
);
413 /* If moving from 1->0 active tasks */
414 if (cr
->task_count
- cr
->dead_task_count
== 0) {
415 uint64_t last_time_nonempty
= mach_absolute_time() - cr
->last_became_nonempty_time
;
416 cr
->last_became_nonempty_time
= 0;
417 cr
->time_nonempty
+= last_time_nonempty
;
420 /* Do not roll up for exec'd task or exec copy task */
421 if (!task_is_exec_copy(task
) && !task_did_exec(task
)) {
422 ledger_rollup(cr
->ledger
, task
->ledger
);
423 cr
->bytesread
+= task
->task_io_stats
->disk_reads
.size
;
424 cr
->byteswritten
+= task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
;
426 cr
->gpu_time
+= task_gpu_utilisation(task
);
428 cr
->energy
+= task_energy(task
);
430 cr
->logical_immediate_writes
+= task
->task_immediate_writes
;
431 cr
->logical_deferred_writes
+= task
->task_deferred_writes
;
432 cr
->logical_invalidated_writes
+= task
->task_invalidated_writes
;
433 cr
->logical_metadata_writes
+= task
->task_metadata_writes
;
434 cr
->cpu_ptime
+= task_cpu_ptime(task
);
435 task_update_cpu_time_qos_stats(task
, cr
->cpu_time_eqos
, cr
->cpu_time_rqos
);
438 /* remove the task from the coalition's list */
439 remqueue(&task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
440 queue_chain_init(task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
442 coal_dbg("removed PID:%d from id:%llu, task_count:%llu, dead_count:%llu",
443 task_pid(task
), coal
->id
, cr
->task_count
, cr
->dead_task_count
);
449 i_coal_resource_set_taskrole(__unused coalition_t coal
,
450 __unused task_t task
, __unused
int role
)
456 i_coal_resource_get_taskrole(__unused coalition_t coal
, __unused task_t task
)
460 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
462 qe_foreach_element(t
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
]) {
464 return COALITION_TASKROLE_UNDEF
;
471 i_coal_resource_iterate_tasks(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
))
474 assert(coal
&& coal
->type
== COALITION_TYPE_RESOURCE
);
476 qe_foreach_element(t
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
])
477 callback(coal
, ctx
, t
);
481 coalition_resource_usage_internal(coalition_t coal
, struct coalition_resource_usage
*cru_out
)
484 ledger_amount_t credit
, debit
;
487 if (coal
->type
!= COALITION_TYPE_RESOURCE
)
488 return KERN_INVALID_ARGUMENT
;
490 /* Return KERN_INVALID_ARGUMENT for Corpse coalition */
491 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
492 if (coal
== corpse_coalition
[i
]) {
493 return KERN_INVALID_ARGUMENT
;
497 ledger_t sum_ledger
= ledger_instantiate(task_ledger_template
, LEDGER_CREATE_ACTIVE_ENTRIES
);
498 if (sum_ledger
== LEDGER_NULL
)
499 return KERN_RESOURCE_SHORTAGE
;
501 coalition_lock(coal
);
504 * Start with the coalition's ledger, which holds the totals from all
507 ledger_rollup(sum_ledger
, coal
->r
.ledger
);
508 uint64_t bytesread
= coal
->r
.bytesread
;
509 uint64_t byteswritten
= coal
->r
.byteswritten
;
510 uint64_t gpu_time
= coal
->r
.gpu_time
;
511 uint64_t energy
= coal
->r
.energy
;
512 uint64_t logical_immediate_writes
= coal
->r
.logical_immediate_writes
;
513 uint64_t logical_deferred_writes
= coal
->r
.logical_deferred_writes
;
514 uint64_t logical_invalidated_writes
= coal
->r
.logical_invalidated_writes
;
515 uint64_t logical_metadata_writes
= coal
->r
.logical_metadata_writes
;
516 int64_t cpu_time_billed_to_me
= 0;
517 int64_t cpu_time_billed_to_others
= 0;
518 int64_t energy_billed_to_me
= 0;
519 int64_t energy_billed_to_others
= 0;
520 uint64_t cpu_ptime
= coal
->r
.cpu_ptime
;
521 uint64_t cpu_time_eqos
[COALITION_NUM_THREAD_QOS_TYPES
];
522 memcpy(cpu_time_eqos
, coal
->r
.cpu_time_eqos
, sizeof(cpu_time_eqos
));
523 uint64_t cpu_time_rqos
[COALITION_NUM_THREAD_QOS_TYPES
];
524 memcpy(cpu_time_rqos
, coal
->r
.cpu_time_rqos
, sizeof(cpu_time_rqos
));
526 * Add to that all the active tasks' ledgers. Tasks cannot deallocate
527 * out from under us, since we hold the coalition lock.
530 qe_foreach_element(task
, &coal
->r
.tasks
, task_coalition
[COALITION_TYPE_RESOURCE
]) {
532 * Rolling up stats for exec copy task or exec'd task will lead to double accounting.
533 * Cannot take task lock after taking coaliton lock
535 if (task_is_exec_copy(task
) || task_did_exec(task
)) {
539 ledger_rollup(sum_ledger
, task
->ledger
);
540 bytesread
+= task
->task_io_stats
->disk_reads
.size
;
541 byteswritten
+= task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
;
543 gpu_time
+= task_gpu_utilisation(task
);
545 energy
+= task_energy(task
);
547 logical_immediate_writes
+= task
->task_immediate_writes
;
548 logical_deferred_writes
+= task
->task_deferred_writes
;
549 logical_invalidated_writes
+= task
->task_invalidated_writes
;
550 logical_metadata_writes
+= task
->task_metadata_writes
;
551 cpu_ptime
+= task_cpu_ptime(task
);
552 task_update_cpu_time_qos_stats(task
, cpu_time_eqos
, cpu_time_rqos
);
555 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.cpu_time_billed_to_me
, (int64_t *)&cpu_time_billed_to_me
);
556 if (kr
!= KERN_SUCCESS
|| cpu_time_billed_to_me
< 0) {
557 cpu_time_billed_to_me
= 0;
560 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.cpu_time_billed_to_others
, (int64_t *)&cpu_time_billed_to_others
);
561 if (kr
!= KERN_SUCCESS
|| cpu_time_billed_to_others
< 0) {
562 cpu_time_billed_to_others
= 0;
565 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.energy_billed_to_me
, (int64_t *)&energy_billed_to_me
);
566 if (kr
!= KERN_SUCCESS
|| energy_billed_to_me
< 0) {
567 energy_billed_to_me
= 0;
570 kr
= ledger_get_balance(sum_ledger
, task_ledgers
.energy_billed_to_others
, (int64_t *)&energy_billed_to_others
);
571 if (kr
!= KERN_SUCCESS
|| energy_billed_to_others
< 0) {
572 energy_billed_to_others
= 0;
575 /* collect information from the coalition itself */
576 cru_out
->tasks_started
= coal
->r
.task_count
;
577 cru_out
->tasks_exited
= coal
->r
.dead_task_count
;
579 uint64_t time_nonempty
= coal
->r
.time_nonempty
;
580 uint64_t last_became_nonempty_time
= coal
->r
.last_became_nonempty_time
;
582 coalition_unlock(coal
);
584 /* Copy the totals out of sum_ledger */
585 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.cpu_time
,
587 if (kr
!= KERN_SUCCESS
) {
590 cru_out
->cpu_time
= credit
;
591 cru_out
->cpu_time_billed_to_me
= (uint64_t)cpu_time_billed_to_me
;
592 cru_out
->cpu_time_billed_to_others
= (uint64_t)cpu_time_billed_to_others
;
593 cru_out
->energy_billed_to_me
= (uint64_t)energy_billed_to_me
;
594 cru_out
->energy_billed_to_others
= (uint64_t)energy_billed_to_others
;
596 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.interrupt_wakeups
,
598 if (kr
!= KERN_SUCCESS
) {
601 cru_out
->interrupt_wakeups
= credit
;
603 kr
= ledger_get_entries(sum_ledger
, task_ledgers
.platform_idle_wakeups
,
605 if (kr
!= KERN_SUCCESS
) {
608 cru_out
->platform_idle_wakeups
= credit
;
610 cru_out
->bytesread
= bytesread
;
611 cru_out
->byteswritten
= byteswritten
;
612 cru_out
->gpu_time
= gpu_time
;
613 cru_out
->energy
= energy
;
614 cru_out
->logical_immediate_writes
= logical_immediate_writes
;
615 cru_out
->logical_deferred_writes
= logical_deferred_writes
;
616 cru_out
->logical_invalidated_writes
= logical_invalidated_writes
;
617 cru_out
->logical_metadata_writes
= logical_metadata_writes
;
618 cru_out
->cpu_ptime
= cpu_ptime
;
619 cru_out
->cpu_time_eqos_len
= COALITION_NUM_THREAD_QOS_TYPES
;
620 memcpy(cru_out
->cpu_time_eqos
, cpu_time_eqos
, sizeof(cru_out
->cpu_time_eqos
));
621 ledger_dereference(sum_ledger
);
622 sum_ledger
= LEDGER_NULL
;
624 if (last_became_nonempty_time
) {
625 time_nonempty
+= mach_absolute_time() - last_became_nonempty_time
;
627 absolutetime_to_nanoseconds(time_nonempty
, &cru_out
->time_nonempty
);
634 * COALITION_TYPE_JETSAM
638 i_coal_jetsam_init(coalition_t coal
, boolean_t privileged
)
640 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
643 coal
->j
.leader
= TASK_NULL
;
644 queue_head_init(coal
->j
.extensions
);
645 queue_head_init(coal
->j
.services
);
646 queue_head_init(coal
->j
.other
);
652 i_coal_jetsam_dealloc(__unused coalition_t coal
)
654 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
656 /* the coalition should be completely clear at this point */
657 assert(queue_empty(&coal
->j
.extensions
));
658 assert(queue_empty(&coal
->j
.services
));
659 assert(queue_empty(&coal
->j
.other
));
660 assert(coal
->j
.leader
== TASK_NULL
);
665 i_coal_jetsam_adopt_task(coalition_t coal
, task_t task
)
667 struct i_jetsam_coalition
*cj
;
668 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
672 assert(queue_empty(&task
->task_coalition
[COALITION_TYPE_JETSAM
]));
674 /* put each task initially in the "other" list */
675 enqueue_tail(&cj
->other
, &task
->task_coalition
[COALITION_TYPE_JETSAM
]);
676 coal_dbg("coalition %lld adopted PID:%d as UNDEF",
677 coal
->id
, task_pid(task
));
683 i_coal_jetsam_remove_task(coalition_t coal
, task_t task
)
685 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
686 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
688 coal_dbg("removing PID:%d from coalition id:%lld",
689 task_pid(task
), coal
->id
);
691 if (task
== coal
->j
.leader
) {
692 coal
->j
.leader
= NULL
;
693 coal_dbg(" PID:%d was the leader!", task_pid(task
));
695 assert(!queue_empty(&task
->task_coalition
[COALITION_TYPE_JETSAM
]));
698 /* remove the task from the specific coalition role queue */
699 remqueue(&task
->task_coalition
[COALITION_TYPE_JETSAM
]);
700 queue_chain_init(task
->task_coalition
[COALITION_TYPE_RESOURCE
]);
706 i_coal_jetsam_set_taskrole(coalition_t coal
, task_t task
, int role
)
708 struct i_jetsam_coalition
*cj
;
710 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
711 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
716 case COALITION_TASKROLE_LEADER
:
717 coal_dbg("setting PID:%d as LEADER of %lld",
718 task_pid(task
), coal
->id
);
719 if (cj
->leader
!= TASK_NULL
) {
720 /* re-queue the exiting leader onto the "other" list */
721 coal_dbg(" re-queue existing leader (%d) as OTHER",
722 task_pid(cj
->leader
));
723 re_queue_tail(&cj
->other
, &cj
->leader
->task_coalition
[COALITION_TYPE_JETSAM
]);
726 * remove the task from the "other" list
727 * (where it was put by default)
729 remqueue(&task
->task_coalition
[COALITION_TYPE_JETSAM
]);
730 queue_chain_init(task
->task_coalition
[COALITION_TYPE_JETSAM
]);
732 /* set the coalition leader */
735 case COALITION_TASKROLE_XPC
:
736 coal_dbg("setting PID:%d as XPC in %lld",
737 task_pid(task
), coal
->id
);
738 q
= (queue_t
)&cj
->services
;
740 case COALITION_TASKROLE_EXT
:
741 coal_dbg("setting PID:%d as EXT in %lld",
742 task_pid(task
), coal
->id
);
743 q
= (queue_t
)&cj
->extensions
;
745 case COALITION_TASKROLE_NONE
:
747 * Tasks with a role of "none" should fall through to an
748 * undefined role so long as the task is currently a member
749 * of the coalition. This scenario can happen if a task is
750 * killed (usually via jetsam) during exec.
752 if (task
->coalition
[COALITION_TYPE_JETSAM
] != coal
) {
753 panic("%s: task %p attempting to set role %d "
754 "in coalition %p to which it does not belong!", __func__
, task
, role
, coal
);
757 case COALITION_TASKROLE_UNDEF
:
758 coal_dbg("setting PID:%d as UNDEF in %lld",
759 task_pid(task
), coal
->id
);
760 q
= (queue_t
)&cj
->other
;
763 panic("%s: invalid role(%d) for task", __func__
, role
);
764 return KERN_INVALID_ARGUMENT
;
768 re_queue_tail(q
, &task
->task_coalition
[COALITION_TYPE_JETSAM
]);
774 i_coal_jetsam_get_taskrole(coalition_t coal
, task_t task
)
776 struct i_jetsam_coalition
*cj
;
779 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
780 assert(task
->coalition
[COALITION_TYPE_JETSAM
] == coal
);
784 if (task
== cj
->leader
)
785 return COALITION_TASKROLE_LEADER
;
787 qe_foreach_element(t
, &cj
->services
, task_coalition
[COALITION_TYPE_JETSAM
]) {
789 return COALITION_TASKROLE_XPC
;
792 qe_foreach_element(t
, &cj
->extensions
, task_coalition
[COALITION_TYPE_JETSAM
]) {
794 return COALITION_TASKROLE_EXT
;
797 qe_foreach_element(t
, &cj
->other
, task_coalition
[COALITION_TYPE_JETSAM
]) {
799 return COALITION_TASKROLE_UNDEF
;
802 /* task not in the coalition?! */
803 return COALITION_TASKROLE_NONE
;
807 i_coal_jetsam_iterate_tasks(coalition_t coal
, void *ctx
, void (*callback
)(coalition_t
, void *, task_t
))
809 struct i_jetsam_coalition
*cj
;
812 assert(coal
&& coal
->type
== COALITION_TYPE_JETSAM
);
817 callback(coal
, ctx
, cj
->leader
);
819 qe_foreach_element(t
, &cj
->services
, task_coalition
[COALITION_TYPE_JETSAM
])
820 callback(coal
, ctx
, t
);
822 qe_foreach_element(t
, &cj
->extensions
, task_coalition
[COALITION_TYPE_JETSAM
])
823 callback(coal
, ctx
, t
);
825 qe_foreach_element(t
, &cj
->other
, task_coalition
[COALITION_TYPE_JETSAM
])
826 callback(coal
, ctx
, t
);
832 * Main Coalition implementation
837 * coalition_create_internal
838 * Returns: New coalition object, referenced for the caller and unlocked.
839 * Condition: coalitions_list_lock must be UNLOCKED.
842 coalition_create_internal(int type
, int role
, boolean_t privileged
, coalition_t
*out
)
845 struct coalition
*new_coal
;
847 if (type
< 0 || type
> COALITION_TYPE_MAX
)
848 return KERN_INVALID_ARGUMENT
;
850 new_coal
= (struct coalition
*)zalloc(coalition_zone
);
851 if (new_coal
== COALITION_NULL
)
852 return KERN_RESOURCE_SHORTAGE
;
853 bzero(new_coal
, sizeof(*new_coal
));
855 new_coal
->type
= type
;
856 new_coal
->role
= role
;
858 /* initialize type-specific resources */
859 kr
= coal_call(new_coal
, init
, privileged
);
860 if (kr
!= KERN_SUCCESS
) {
861 zfree(coalition_zone
, new_coal
);
865 /* One for caller, one for coalitions list */
866 new_coal
->ref_count
= 2;
868 new_coal
->privileged
= privileged
? TRUE
: FALSE
;
869 #if DEVELOPMENT || DEBUG
870 new_coal
->should_notify
= 1;
873 lck_mtx_init(&new_coal
->lock
, &coalitions_lck_grp
, &coalitions_lck_attr
);
875 lck_mtx_lock(&coalitions_list_lock
);
876 new_coal
->id
= coalition_next_id
++;
878 enqueue_tail(&coalitions_q
, &new_coal
->coalitions
);
880 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_NEW
),
881 new_coal
->id
, new_coal
->type
);
882 lck_mtx_unlock(&coalitions_list_lock
);
884 coal_dbg("id:%llu, type:%s", new_coal
->id
, coal_type_str(new_coal
->type
));
892 * Condition: coalition must be UNLOCKED.
895 coalition_release(coalition_t coal
)
897 /* TODO: This can be done with atomics. */
898 coalition_lock(coal
);
902 uint32_t rc
= coal
->ref_count
;
903 uint32_t ac
= coal
->active_count
;
904 #endif /* COALITION_DEBUG */
906 coal_dbg("id:%llu type:%s ref_count:%u active_count:%u%s",
907 coal
->id
, coal_type_str(coal
->type
), rc
, ac
,
908 rc
<= 0 ? ", will deallocate now" : "");
910 if (coal
->ref_count
> 0) {
911 coalition_unlock(coal
);
915 assert(coal
->termrequested
);
916 assert(coal
->terminated
);
917 assert(coal
->active_count
== 0);
918 assert(coal
->reaped
);
919 assert(coal
->focal_task_count
== 0);
920 assert(coal
->nonfocal_task_count
== 0);
921 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_FREE
),
922 coal
->id
, coal
->type
);
924 coal_call(coal
, dealloc
);
926 coalition_unlock(coal
);
928 lck_mtx_destroy(&coal
->lock
, &coalitions_lck_grp
);
930 zfree(coalition_zone
, coal
);
934 * coalition_find_by_id_internal
935 * Returns: Coalition object with specified id, NOT referenced.
936 * If not found, returns COALITION_NULL.
937 * Condition: coalitions_list_lock must be LOCKED.
940 coalition_find_by_id_internal(uint64_t coal_id
)
943 return COALITION_NULL
;
946 lck_mtx_assert(&coalitions_list_lock
, LCK_MTX_ASSERT_OWNED
);
948 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
949 if (coal
->id
== coal_id
) {
953 return COALITION_NULL
;
957 * coalition_find_by_id
958 * Returns: Coalition object with specified id, referenced.
959 * Condition: coalitions_list_lock must be UNLOCKED.
962 coalition_find_by_id(uint64_t cid
)
965 return COALITION_NULL
;
968 lck_mtx_lock(&coalitions_list_lock
);
970 coalition_t coal
= coalition_find_by_id_internal(cid
);
971 if (coal
== COALITION_NULL
) {
972 lck_mtx_unlock(&coalitions_list_lock
);
973 return COALITION_NULL
;
976 coalition_lock(coal
);
979 coalition_unlock(coal
);
980 lck_mtx_unlock(&coalitions_list_lock
);
981 return COALITION_NULL
;
984 if (coal
->ref_count
== 0) {
985 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n",
986 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
990 uint32_t rc
= coal
->ref_count
;
993 coalition_unlock(coal
);
994 lck_mtx_unlock(&coalitions_list_lock
);
996 coal_dbg("id:%llu type:%s ref_count:%u",
997 coal
->id
, coal_type_str(coal
->type
), rc
);
1003 * coalition_find_and_activate_by_id
1004 * Returns: Coalition object with specified id, referenced, and activated.
1005 * Condition: coalitions_list_lock must be UNLOCKED.
1006 * This is the function to use when putting a 'new' thing into a coalition,
1007 * like posix_spawn of an XPC service by launchd.
1008 * See also coalition_extend_active.
1011 coalition_find_and_activate_by_id(uint64_t cid
)
1014 return COALITION_NULL
;
1017 lck_mtx_lock(&coalitions_list_lock
);
1019 coalition_t coal
= coalition_find_by_id_internal(cid
);
1020 if (coal
== COALITION_NULL
) {
1021 lck_mtx_unlock(&coalitions_list_lock
);
1022 return COALITION_NULL
;
1025 coalition_lock(coal
);
1027 if (coal
->reaped
|| coal
->terminated
) {
1028 /* Too late to put something new into this coalition, it's
1029 * already on its way out the door */
1030 coalition_unlock(coal
);
1031 lck_mtx_unlock(&coalitions_list_lock
);
1032 return COALITION_NULL
;
1035 if (coal
->ref_count
== 0) {
1036 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n",
1037 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
1041 coal
->active_count
++;
1044 uint32_t rc
= coal
->ref_count
;
1045 uint32_t ac
= coal
->active_count
;
1048 coalition_unlock(coal
);
1049 lck_mtx_unlock(&coalitions_list_lock
);
1051 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u",
1052 coal
->id
, coal_type_str(coal
->type
), rc
, ac
);
1058 coalition_id(coalition_t coal
)
1064 task_coalition_ids(task_t task
, uint64_t ids
[COALITION_NUM_TYPES
])
1067 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1068 if (task
->coalition
[i
])
1069 ids
[i
] = task
->coalition
[i
]->id
;
1076 task_coalition_roles(task_t task
, int roles
[COALITION_NUM_TYPES
])
1079 memset(roles
, 0, COALITION_NUM_TYPES
* sizeof(roles
[0]));
1081 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1082 if (task
->coalition
[i
]) {
1083 coalition_lock(task
->coalition
[i
]);
1084 roles
[i
] = coal_call(task
->coalition
[i
],
1085 get_taskrole
, task
);
1086 coalition_unlock(task
->coalition
[i
]);
1088 roles
[i
] = COALITION_TASKROLE_NONE
;
1095 coalition_type(coalition_t coal
)
1101 coalition_term_requested(coalition_t coal
)
1103 return coal
->termrequested
;
1107 coalition_is_terminated(coalition_t coal
)
1109 return coal
->terminated
;
1113 coalition_is_reaped(coalition_t coal
)
1115 return coal
->reaped
;
1119 coalition_is_privileged(coalition_t coal
)
1121 return coal
->privileged
|| unrestrict_coalition_syscalls
;
1125 task_is_in_privileged_coalition(task_t task
, int type
)
1127 if (type
< 0 || type
> COALITION_TYPE_MAX
)
1129 if (unrestrict_coalition_syscalls
)
1131 if (!task
->coalition
[type
])
1133 return task
->coalition
[type
]->privileged
;
1136 void task_coalition_update_gpu_stats(task_t task
, uint64_t gpu_ns_delta
)
1140 assert(task
!= TASK_NULL
);
1141 if (gpu_ns_delta
== 0)
1144 coal
= task
->coalition
[COALITION_TYPE_RESOURCE
];
1145 assert(coal
!= COALITION_NULL
);
1147 coalition_lock(coal
);
1148 coal
->r
.gpu_time
+= gpu_ns_delta
;
1149 coalition_unlock(coal
);
1152 boolean_t
task_coalition_adjust_focal_count(task_t task
, int count
, uint32_t *new_count
)
1154 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1155 if (coal
== COALITION_NULL
)
1158 *new_count
= hw_atomic_add(&coal
->focal_task_count
, count
);
1159 assert(*new_count
!= UINT32_MAX
);
1163 uint32_t task_coalition_focal_count(task_t task
)
1165 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1166 if (coal
== COALITION_NULL
)
1169 return coal
->focal_task_count
;
1172 boolean_t
task_coalition_adjust_nonfocal_count(task_t task
, int count
, uint32_t *new_count
)
1174 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1175 if (coal
== COALITION_NULL
)
1178 *new_count
= hw_atomic_add(&coal
->nonfocal_task_count
, count
);
1179 assert(*new_count
!= UINT32_MAX
);
1183 uint32_t task_coalition_nonfocal_count(task_t task
)
1185 coalition_t coal
= task
->coalition
[COALITION_FOCAL_TASKS_ACCOUNTING
];
1186 if (coal
== COALITION_NULL
)
1189 return coal
->nonfocal_task_count
;
1192 void coalition_set_efficient(coalition_t coal
)
1194 coalition_lock(coal
);
1195 coal
->efficient
= TRUE
;
1196 coalition_unlock(coal
);
1200 void coalition_for_each_task(coalition_t coal
, void *ctx
,
1201 void (*callback
)(coalition_t
, void *, task_t
))
1203 assert(coal
!= COALITION_NULL
);
1205 coal_dbg("iterating tasks in coalition %p id:%llu type:%s, active_count:%u",
1206 coal
, coal
->id
, coal_type_str(coal
->type
), coal
->active_count
);
1208 coalition_lock(coal
);
1210 coal_call(coal
, iterate_tasks
, ctx
, callback
);
1212 coalition_unlock(coal
);
1217 coalition_remove_active(coalition_t coal
)
1219 coalition_lock(coal
);
1221 assert(!coal
->reaped
);
1222 assert(coal
->active_count
> 0);
1224 coal
->active_count
--;
1226 boolean_t do_notify
= FALSE
;
1227 uint64_t notify_id
= 0;
1228 uint32_t notify_flags
= 0;
1229 if (coal
->termrequested
&& coal
->active_count
== 0) {
1230 /* We only notify once, when active_count reaches zero.
1231 * We just decremented, so if it reached zero, we mustn't have
1234 assert(!coal
->terminated
);
1235 coal
->terminated
= TRUE
;
1237 assert(!coal
->notified
);
1239 coal
->notified
= TRUE
;
1240 #if DEVELOPMENT || DEBUG
1241 do_notify
= coal
->should_notify
;
1245 notify_id
= coal
->id
;
1250 uint64_t cid
= coal
->id
;
1251 uint32_t rc
= coal
->ref_count
;
1252 int ac
= coal
->active_count
;
1253 int ct
= coal
->type
;
1255 coalition_unlock(coal
);
1257 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u,%s",
1258 cid
, coal_type_str(ct
), rc
, ac
, do_notify
? " NOTIFY" : " ");
1261 coalition_notify_user(notify_id
, notify_flags
);
1265 /* Used for kernel_task, launchd, launchd's early boot tasks... */
1267 coalitions_adopt_init_task(task_t task
)
1270 kr
= coalitions_adopt_task(init_coalition
, task
);
1271 if (kr
!= KERN_SUCCESS
) {
1272 panic("failed to adopt task %p into default coalition: %d", task
, kr
);
1277 /* Used for forked corpses. */
1279 coalitions_adopt_corpse_task(task_t task
)
1282 kr
= coalitions_adopt_task(corpse_coalition
, task
);
1283 if (kr
!= KERN_SUCCESS
) {
1284 panic("failed to adopt task %p into corpse coalition: %d", task
, kr
);
1290 * coalition_adopt_task_internal
1291 * Condition: Coalition must be referenced and unlocked. Will fail if coalition
1292 * is already terminated.
1294 static kern_return_t
1295 coalition_adopt_task_internal(coalition_t coal
, task_t task
)
1299 if (task
->coalition
[coal
->type
]) {
1300 return KERN_ALREADY_IN_SET
;
1303 coalition_lock(coal
);
1305 if (coal
->reaped
|| coal
->terminated
) {
1306 coalition_unlock(coal
);
1307 return KERN_TERMINATED
;
1310 kr
= coal_call(coal
, adopt_task
, task
);
1311 if (kr
!= KERN_SUCCESS
)
1314 coal
->active_count
++;
1318 task
->coalition
[coal
->type
] = coal
;
1322 (void)coal
; /* need expression after label */
1323 uint64_t cid
= coal
->id
;
1324 uint32_t rc
= coal
->ref_count
;
1325 uint32_t ct
= coal
->type
;
1327 if (get_task_uniqueid(task
) != UINT64_MAX
) {
1328 /* On 32-bit targets, uniqueid will get truncated to 32 bits */
1329 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_ADOPT
),
1330 coal
->id
, get_task_uniqueid(task
));
1333 coalition_unlock(coal
);
1335 coal_dbg("task:%d, id:%llu type:%s ref_count:%u, kr=%d",
1336 task_pid(task
), cid
, coal_type_str(ct
), rc
, kr
);
1340 static kern_return_t
1341 coalition_remove_task_internal(task_t task
, int type
)
1345 coalition_t coal
= task
->coalition
[type
];
1348 return KERN_SUCCESS
;
1350 assert(coal
->type
== (uint32_t)type
);
1352 coalition_lock(coal
);
1354 kr
= coal_call(coal
, remove_task
, task
);
1357 uint64_t cid
= coal
->id
;
1358 uint32_t rc
= coal
->ref_count
;
1359 int ac
= coal
->active_count
;
1360 int ct
= coal
->type
;
1362 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION
, MACH_COALITION_REMOVE
),
1363 coal
->id
, get_task_uniqueid(task
));
1364 coalition_unlock(coal
);
1366 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u, kr=%d",
1367 cid
, coal_type_str(ct
), rc
, ac
, kr
);
1369 coalition_remove_active(coal
);
1375 * coalitions_adopt_task
1376 * Condition: All coalitions must be referenced and unlocked.
1377 * Will fail if any coalition is already terminated.
1380 coalitions_adopt_task(coalition_t
*coals
, task_t task
)
1385 if (!coals
|| coals
[COALITION_TYPE_RESOURCE
] == COALITION_NULL
)
1386 return KERN_INVALID_ARGUMENT
;
1388 /* verify that the incoming coalitions are what they say they are */
1389 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++)
1390 if (coals
[i
] && coals
[i
]->type
!= (uint32_t)i
)
1391 return KERN_INVALID_ARGUMENT
;
1393 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1396 kr
= coalition_adopt_task_internal(coals
[i
], task
);
1397 if (kr
!= KERN_SUCCESS
) {
1398 /* dis-associate any coalitions that just adopted this task */
1400 if (task
->coalition
[i
])
1401 coalition_remove_task_internal(task
, i
);
1410 * coalitions_remove_task
1411 * Condition: task must be referenced and UNLOCKED; all task's coalitions must be UNLOCKED
1414 coalitions_remove_task(task_t task
)
1419 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1420 kr
= coalition_remove_task_internal(task
, i
);
1421 assert(kr
== KERN_SUCCESS
);
1428 * task_release_coalitions
1429 * helper function to release references to all coalitions in which
1430 * 'task' is a member.
1433 task_release_coalitions(task_t task
)
1436 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1437 if (task
->coalition
[i
]) {
1438 coalition_release(task
->coalition
[i
]);
1439 } else if (i
== COALITION_TYPE_RESOURCE
) {
1440 panic("deallocating task %p was not a member of a resource coalition", task
);
1446 * coalitions_set_roles
1447 * for each type of coalition, if the task is a member of a coalition of
1448 * that type (given in the coalitions parameter) then set the role of
1449 * the task within that that coalition.
1451 kern_return_t
coalitions_set_roles(coalition_t coalitions
[COALITION_NUM_TYPES
],
1452 task_t task
, int roles
[COALITION_NUM_TYPES
])
1454 kern_return_t kr
= KERN_SUCCESS
;
1457 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
1460 coalition_lock(coalitions
[i
]);
1461 kr
= coal_call(coalitions
[i
], set_taskrole
, task
, roles
[i
]);
1462 coalition_unlock(coalitions
[i
]);
1463 assert(kr
== KERN_SUCCESS
);
1470 * coalition_terminate_internal
1471 * Condition: Coalition must be referenced and UNLOCKED.
1474 coalition_request_terminate_internal(coalition_t coal
)
1476 assert(coal
->type
>= 0 && coal
->type
<= COALITION_TYPE_MAX
);
1478 if (coal
== init_coalition
[coal
->type
]) {
1479 return KERN_DEFAULT_SET
;
1482 coalition_lock(coal
);
1485 coalition_unlock(coal
);
1486 return KERN_INVALID_NAME
;
1489 if (coal
->terminated
|| coal
->termrequested
) {
1490 coalition_unlock(coal
);
1491 return KERN_TERMINATED
;
1494 coal
->termrequested
= TRUE
;
1496 boolean_t do_notify
= FALSE
;
1497 uint64_t note_id
= 0;
1498 uint32_t note_flags
= 0;
1500 if (coal
->active_count
== 0) {
1502 * We only notify once, when active_count reaches zero.
1503 * We just set termrequested to zero. If the active count
1504 * was already at zero (tasks died before we could request
1505 * a termination notification), we should notify.
1507 assert(!coal
->terminated
);
1508 coal
->terminated
= TRUE
;
1510 assert(!coal
->notified
);
1512 coal
->notified
= TRUE
;
1513 #if DEVELOPMENT || DEBUG
1514 do_notify
= coal
->should_notify
;
1522 coalition_unlock(coal
);
1525 coalition_notify_user(note_id
, note_flags
);
1528 return KERN_SUCCESS
;
1532 * coalition_reap_internal
1533 * Condition: Coalition must be referenced and UNLOCKED.
1536 coalition_reap_internal(coalition_t coal
)
1538 assert(coal
->type
<= COALITION_TYPE_MAX
);
1540 if (coal
== init_coalition
[coal
->type
]) {
1541 return KERN_DEFAULT_SET
;
1544 coalition_lock(coal
);
1546 coalition_unlock(coal
);
1547 return KERN_TERMINATED
;
1549 if (!coal
->terminated
) {
1550 coalition_unlock(coal
);
1551 return KERN_FAILURE
;
1553 assert(coal
->termrequested
);
1554 if (coal
->active_count
> 0) {
1555 coalition_unlock(coal
);
1556 return KERN_FAILURE
;
1559 coal
->reaped
= TRUE
;
1561 /* Caller, launchd, and coalitions list should each have a reference */
1562 assert(coal
->ref_count
> 2);
1564 coalition_unlock(coal
);
1566 lck_mtx_lock(&coalitions_list_lock
);
1568 remqueue(&coal
->coalitions
);
1569 lck_mtx_unlock(&coalitions_list_lock
);
1571 /* Release the list's reference and launchd's reference. */
1572 coalition_release(coal
);
1573 coalition_release(coal
);
1575 return KERN_SUCCESS
;
1578 #if DEVELOPMENT || DEBUG
1579 int coalition_should_notify(coalition_t coal
)
1584 coalition_lock(coal
);
1585 should
= coal
->should_notify
;
1586 coalition_unlock(coal
);
1591 void coalition_set_notify(coalition_t coal
, int notify
)
1595 coalition_lock(coal
);
1596 coal
->should_notify
= !!notify
;
1597 coalition_unlock(coal
);
1602 coalitions_init(void)
1606 const struct coalition_type
*ctype
;
1608 coalition_zone
= zinit(
1609 sizeof(struct coalition
),
1610 CONFIG_COALITION_MAX
* sizeof(struct coalition
),
1611 COALITION_CHUNK
* sizeof(struct coalition
),
1613 zone_change(coalition_zone
, Z_NOENCRYPT
, TRUE
);
1614 queue_head_init(coalitions_q
);
1616 if (!PE_parse_boot_argn("unrestrict_coalition_syscalls", &unrestrict_coalition_syscalls
,
1617 sizeof (unrestrict_coalition_syscalls
))) {
1618 unrestrict_coalition_syscalls
= 0;
1621 if (!PE_parse_boot_argn("tg_adaptive", &merge_adaptive_coalitions
,
1622 sizeof (merge_adaptive_coalitions
))) {
1623 merge_adaptive_coalitions
= 0;
1626 lck_grp_attr_setdefault(&coalitions_lck_grp_attr
);
1627 lck_grp_init(&coalitions_lck_grp
, "coalition", &coalitions_lck_grp_attr
);
1628 lck_attr_setdefault(&coalitions_lck_attr
);
1629 lck_mtx_init(&coalitions_list_lock
, &coalitions_lck_grp
, &coalitions_lck_attr
);
1631 init_task_ledgers();
1633 for (i
= 0, ctype
= &s_coalition_types
[0]; i
< COALITION_NUM_TYPES
; ctype
++, i
++) {
1634 /* verify the entry in the global coalition types array */
1635 if (ctype
->type
!= i
||
1638 !ctype
->adopt_task
||
1639 !ctype
->remove_task
) {
1640 panic("%s: Malformed coalition type %s(%d) in slot for type:%s(%d)",
1641 __func__
, coal_type_str(ctype
->type
), ctype
->type
, coal_type_str(i
), i
);
1643 if (!ctype
->has_default
)
1645 kr
= coalition_create_internal(ctype
->type
, COALITION_ROLE_SYSTEM
, TRUE
, &init_coalition
[ctype
->type
]);
1646 if (kr
!= KERN_SUCCESS
)
1647 panic("%s: could not create init %s coalition: kr:%d",
1648 __func__
, coal_type_str(i
), kr
);
1649 kr
= coalition_create_internal(ctype
->type
, COALITION_ROLE_SYSTEM
, FALSE
, &corpse_coalition
[ctype
->type
]);
1650 if (kr
!= KERN_SUCCESS
)
1651 panic("%s: could not create corpse %s coalition: kr:%d",
1652 __func__
, coal_type_str(i
), kr
);
1655 /* "Leak" our reference to the global object */
1659 * BSD Kernel interface functions
1662 static void coalition_fill_procinfo(struct coalition
*coal
,
1663 struct procinfo_coalinfo
*coalinfo
)
1665 coalinfo
->coalition_id
= coal
->id
;
1666 coalinfo
->coalition_type
= coal
->type
;
1667 coalinfo
->coalition_tasks
= coalition_get_task_count(coal
);
1671 int coalitions_get_list(int type
, struct procinfo_coalinfo
*coal_list
, int list_sz
)
1674 struct coalition
*coal
;
1676 lck_mtx_lock(&coalitions_list_lock
);
1677 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
1678 if (!coal
->reaped
&& (type
< 0 || type
== (int)coal
->type
)) {
1679 if (coal_list
&& ncoals
< list_sz
)
1680 coalition_fill_procinfo(coal
, &coal_list
[ncoals
]);
1684 lck_mtx_unlock(&coalitions_list_lock
);
1690 * Jetsam coalition interface
1693 boolean_t
coalition_is_leader(task_t task
, int coal_type
, coalition_t
*coal
)
1698 if (coal
) /* handle the error cases gracefully */
1699 *coal
= COALITION_NULL
;
1704 if (coal_type
> COALITION_TYPE_MAX
)
1707 c
= task
->coalition
[coal_type
];
1711 assert((int)c
->type
== coal_type
);
1719 if (c
->type
== COALITION_TYPE_JETSAM
&& c
->j
.leader
== task
)
1722 coalition_unlock(c
);
1727 kern_return_t
coalition_iterate_stackshot(coalition_iterate_fn_t callout
, void *arg
, uint32_t coalition_type
)
1732 qe_foreach_element(coal
, &coalitions_q
, coalitions
) {
1733 if (coal
== NULL
|| !ml_validate_nofault((vm_offset_t
)coal
, sizeof(struct coalition
)))
1734 return KERN_FAILURE
;
1736 if (coalition_type
== coal
->type
)
1737 callout(arg
, i
++, coal
);
1740 return KERN_SUCCESS
;
1743 task_t
kdp_coalition_get_leader(coalition_t coal
)
1748 if (coal
->type
== COALITION_TYPE_JETSAM
) {
1749 return coal
->j
.leader
;
1754 task_t
coalition_get_leader(coalition_t coal
)
1756 task_t leader
= TASK_NULL
;
1761 coalition_lock(coal
);
1762 if (coal
->type
!= COALITION_TYPE_JETSAM
)
1765 leader
= coal
->j
.leader
;
1766 if (leader
!= TASK_NULL
)
1767 task_reference(leader
);
1770 coalition_unlock(coal
);
1775 int coalition_get_task_count(coalition_t coal
)
1778 struct queue_entry
*qe
;
1782 coalition_lock(coal
);
1783 switch (coal
->type
) {
1784 case COALITION_TYPE_RESOURCE
:
1785 qe_foreach(qe
, &coal
->r
.tasks
)
1788 case COALITION_TYPE_JETSAM
:
1791 qe_foreach(qe
, &coal
->j
.other
)
1793 qe_foreach(qe
, &coal
->j
.extensions
)
1795 qe_foreach(qe
, &coal
->j
.services
)
1801 coalition_unlock(coal
);
1807 static uint64_t i_get_list_footprint(queue_t list
, int type
, int *ntasks
)
1812 qe_foreach_element(task
, list
, task_coalition
[type
]) {
1813 bytes
+= get_task_phys_footprint(task
);
1814 coal_dbg(" [%d] task_pid:%d, type:%d, footprint:%lld",
1815 *ntasks
, task_pid(task
), type
, bytes
);
1822 uint64_t coalition_get_page_count(coalition_t coal
, int *ntasks
)
1832 coalition_lock(coal
);
1834 switch (coal
->type
) {
1835 case COALITION_TYPE_RESOURCE
:
1836 bytes
+= i_get_list_footprint(&coal
->r
.tasks
, COALITION_TYPE_RESOURCE
, &num_tasks
);
1838 case COALITION_TYPE_JETSAM
:
1839 if (coal
->j
.leader
) {
1840 bytes
+= get_task_phys_footprint(coal
->j
.leader
);
1843 bytes
+= i_get_list_footprint(&coal
->j
.extensions
, COALITION_TYPE_JETSAM
, &num_tasks
);
1844 bytes
+= i_get_list_footprint(&coal
->j
.services
, COALITION_TYPE_JETSAM
, &num_tasks
);
1845 bytes
+= i_get_list_footprint(&coal
->j
.other
, COALITION_TYPE_JETSAM
, &num_tasks
);
1851 coalition_unlock(coal
);
1854 *ntasks
= num_tasks
;
1856 return bytes
/ PAGE_SIZE_64
;
1859 struct coal_sort_s
{
1866 * return < 0 for a < b
1870 typedef int (*cmpfunc_t
)(const void *a
, const void *b
);
1873 qsort(void *a
, size_t n
, size_t es
, cmpfunc_t cmp
);
1875 static int dflt_cmp(const void *a
, const void *b
)
1877 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
1878 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
1881 * if both A and B are equal, use a memory descending sort
1883 if (csA
->usr_order
== csB
->usr_order
)
1884 return (int)((int64_t)csB
->bytes
- (int64_t)csA
->bytes
);
1886 /* otherwise, return the relationship between user specified orders */
1887 return (csA
->usr_order
- csB
->usr_order
);
1890 static int mem_asc_cmp(const void *a
, const void *b
)
1892 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
1893 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
1895 return (int)((int64_t)csA
->bytes
- (int64_t)csB
->bytes
);
1898 static int mem_dec_cmp(const void *a
, const void *b
)
1900 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
1901 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
1903 return (int)((int64_t)csB
->bytes
- (int64_t)csA
->bytes
);
1906 static int usr_asc_cmp(const void *a
, const void *b
)
1908 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
1909 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
1911 return (csA
->usr_order
- csB
->usr_order
);
1914 static int usr_dec_cmp(const void *a
, const void *b
)
1916 const struct coal_sort_s
*csA
= (const struct coal_sort_s
*)a
;
1917 const struct coal_sort_s
*csB
= (const struct coal_sort_s
*)b
;
1919 return (csB
->usr_order
- csA
->usr_order
);
1922 /* avoid dynamic allocation in this path */
1923 #define MAX_SORTED_PIDS 80
1925 static int coalition_get_sort_list(coalition_t coal
, int sort_order
, queue_t list
,
1926 struct coal_sort_s
*sort_array
, int array_sz
)
1931 assert(sort_array
!= NULL
);
1938 * this function will only be called with a NULL
1939 * list for JETSAM-type coalitions, and is intended
1940 * to investigate the leader process
1942 if (coal
->type
!= COALITION_TYPE_JETSAM
||
1943 coal
->j
.leader
== TASK_NULL
)
1945 sort_array
[0].pid
= task_pid(coal
->j
.leader
);
1946 switch (sort_order
) {
1947 case COALITION_SORT_DEFAULT
:
1948 sort_array
[0].usr_order
= 0;
1950 case COALITION_SORT_MEM_ASC
:
1951 case COALITION_SORT_MEM_DEC
:
1952 sort_array
[0].bytes
= get_task_phys_footprint(coal
->j
.leader
);
1954 case COALITION_SORT_USER_ASC
:
1955 case COALITION_SORT_USER_DEC
:
1956 sort_array
[0].usr_order
= 0;
1964 qe_foreach_element(task
, list
, task_coalition
[coal
->type
]) {
1965 if (ntasks
>= array_sz
) {
1966 printf("WARNING: more than %d pids in coalition %llu\n",
1967 MAX_SORTED_PIDS
, coal
->id
);
1971 sort_array
[ntasks
].pid
= task_pid(task
);
1973 switch (sort_order
) {
1974 case COALITION_SORT_DEFAULT
:
1975 sort_array
[ntasks
].usr_order
= 0;
1977 case COALITION_SORT_MEM_ASC
:
1978 case COALITION_SORT_MEM_DEC
:
1979 sort_array
[ntasks
].bytes
= get_task_phys_footprint(task
);
1981 case COALITION_SORT_USER_ASC
:
1982 case COALITION_SORT_USER_DEC
:
1983 sort_array
[ntasks
].usr_order
= 0;
1995 int coalition_get_pid_list(coalition_t coal
, uint32_t rolemask
, int sort_order
,
1996 int *pid_list
, int list_sz
)
1998 struct i_jetsam_coalition
*cj
;
2000 cmpfunc_t cmp_func
= NULL
;
2001 struct coal_sort_s sort_array
[MAX_SORTED_PIDS
] = { {0,0,0} }; /* keep to < 2k */
2004 !(rolemask
& COALITION_ROLEMASK_ALLROLES
) ||
2005 !pid_list
|| list_sz
< 1) {
2006 coal_dbg("Invalid parameters: coal:%p, type:%d, rolemask:0x%x, "
2007 "pid_list:%p, list_sz:%d", coal
, coal
? coal
->type
: -1,
2008 rolemask
, pid_list
, list_sz
);
2012 switch (sort_order
) {
2013 case COALITION_SORT_NOSORT
:
2016 case COALITION_SORT_DEFAULT
:
2017 cmp_func
= dflt_cmp
;
2019 case COALITION_SORT_MEM_ASC
:
2020 cmp_func
= mem_asc_cmp
;
2022 case COALITION_SORT_MEM_DEC
:
2023 cmp_func
= mem_dec_cmp
;
2025 case COALITION_SORT_USER_ASC
:
2026 cmp_func
= usr_asc_cmp
;
2028 case COALITION_SORT_USER_DEC
:
2029 cmp_func
= usr_dec_cmp
;
2035 coalition_lock(coal
);
2037 if (coal
->type
== COALITION_TYPE_RESOURCE
) {
2038 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &coal
->r
.tasks
,
2039 sort_array
, MAX_SORTED_PIDS
);
2045 if (rolemask
& COALITION_ROLEMASK_UNDEF
)
2046 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->other
,
2047 sort_array
+ ntasks
,
2048 MAX_SORTED_PIDS
- ntasks
);
2050 if (rolemask
& COALITION_ROLEMASK_XPC
)
2051 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->services
,
2052 sort_array
+ ntasks
,
2053 MAX_SORTED_PIDS
- ntasks
);
2055 if (rolemask
& COALITION_ROLEMASK_EXT
)
2056 ntasks
+= coalition_get_sort_list(coal
, sort_order
, &cj
->extensions
,
2057 sort_array
+ ntasks
,
2058 MAX_SORTED_PIDS
- ntasks
);
2060 if (rolemask
& COALITION_ROLEMASK_LEADER
)
2061 ntasks
+= coalition_get_sort_list(coal
, sort_order
, NULL
,
2062 sort_array
+ ntasks
,
2063 MAX_SORTED_PIDS
- ntasks
);
2066 coalition_unlock(coal
);
2068 /* sort based on the chosen criterion (no sense sorting 1 item) */
2069 if (cmp_func
&& ntasks
> 1)
2070 qsort(sort_array
, ntasks
, sizeof(struct coal_sort_s
), cmp_func
);
2072 for (int i
= 0; i
< ntasks
; i
++) {
2075 coal_dbg(" [%d] PID:%d, footprint:%lld, usr_order:%d",
2076 i
, sort_array
[i
].pid
, sort_array
[i
].bytes
,
2077 sort_array
[i
].usr_order
);
2078 pid_list
[i
] = sort_array
[i
].pid
;