2 * Copyright (c) 2013-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/notify.h>
31 #include <ipc/ipc_types.h>
32 #include <ipc/ipc_importance.h>
33 #include <ipc/ipc_port.h>
34 #include <ipc/ipc_voucher.h>
35 #include <kern/ipc_kobject.h>
36 #include <kern/ipc_tt.h>
37 #include <kern/mach_param.h>
38 #include <kern/misc_protos.h>
39 #include <kern/zalloc.h>
40 #include <kern/queue.h>
41 #include <kern/task.h>
42 #include <kern/policy_internal.h>
44 #include <sys/kdebug.h>
46 #include <mach/mach_voucher_attr_control.h>
47 #include <mach/machine/sdt.h>
49 extern int proc_pid(void *);
50 extern int proc_selfpid(void);
51 extern uint64_t proc_uniqueid(void *p
);
52 extern char *proc_name_address(void *p
);
55 * Globals for delayed boost drop processing.
57 static queue_head_t ipc_importance_delayed_drop_queue
;
58 static thread_call_t ipc_importance_delayed_drop_call
;
59 static uint64_t ipc_importance_delayed_drop_timestamp
;
60 static boolean_t ipc_importance_delayed_drop_call_requested
= FALSE
;
62 #define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */
63 #define DENAP_DROP_SKEW (100 * NSEC_PER_MSEC) /* request skew for wakeup */
64 #define DENAP_DROP_LEEWAY (2 * DENAP_DROP_SKEW) /* specified wakeup leeway */
66 #define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW)
67 #define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY)
70 * Importance Voucher Attribute Manager
72 static LCK_SPIN_DECLARE_ATTR(ipc_importance_lock_data
, &ipc_lck_grp
, &ipc_lck_attr
);
74 #define ipc_importance_lock() \
75 lck_spin_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
76 #define ipc_importance_lock_try() \
77 lck_spin_try_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
78 #define ipc_importance_unlock() \
79 lck_spin_unlock(&ipc_importance_lock_data)
80 #define ipc_importance_assert_held() \
81 lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED)
84 #define incr_ref_counter(x) (os_atomic_inc(&(x), relaxed))
88 ipc_importance_reference_internal(ipc_importance_elem_t elem
)
90 incr_ref_counter(elem
->iie_refs_added
);
91 return os_atomic_inc(&elem
->iie_bits
, relaxed
) & IIE_REFS_MASK
;
96 ipc_importance_release_internal(ipc_importance_elem_t elem
)
98 incr_ref_counter(elem
->iie_refs_dropped
);
99 return os_atomic_dec(&elem
->iie_bits
, relaxed
) & IIE_REFS_MASK
;
104 ipc_importance_task_reference_internal(ipc_importance_task_t task_imp
)
107 out
= ipc_importance_reference_internal(&task_imp
->iit_elem
);
108 incr_ref_counter(task_imp
->iit_elem
.iie_task_refs_added
);
114 ipc_importance_task_release_internal(ipc_importance_task_t task_imp
)
118 assert(1 < IIT_REFS(task_imp
));
119 incr_ref_counter(task_imp
->iit_elem
.iie_task_refs_dropped
);
120 out
= ipc_importance_release_internal(&task_imp
->iit_elem
);
126 ipc_importance_counter_init(ipc_importance_elem_t elem
)
128 elem
->iie_refs_added
= 0;
129 elem
->iie_refs_dropped
= 0;
130 elem
->iie_kmsg_refs_added
= 0;
131 elem
->iie_kmsg_refs_inherited
= 0;
132 elem
->iie_kmsg_refs_coalesced
= 0;
133 elem
->iie_kmsg_refs_dropped
= 0;
134 elem
->iie_task_refs_added
= 0;
135 elem
->iie_task_refs_added_inherit_from
= 0;
136 elem
->iie_task_refs_added_transition
= 0;
137 elem
->iie_task_refs_self_added
= 0;
138 elem
->iie_task_refs_inherited
= 0;
139 elem
->iie_task_refs_coalesced
= 0;
140 elem
->iie_task_refs_dropped
= 0;
143 #define incr_ref_counter(x)
146 #if DEVELOPMENT || DEBUG
147 static queue_head_t global_iit_alloc_queue
=
148 QUEUE_HEAD_INITIALIZER(global_iit_alloc_queue
);
151 static ZONE_DECLARE(ipc_importance_task_zone
, "ipc task importance",
152 sizeof(struct ipc_importance_task
), ZC_NOENCRYPT
);
153 static ZONE_DECLARE(ipc_importance_inherit_zone
, "ipc importance inherit",
154 sizeof(struct ipc_importance_inherit
), ZC_NOENCRYPT
);
155 static zone_t ipc_importance_inherit_zone
;
157 static ipc_voucher_attr_control_t ipc_importance_control
;
159 static boolean_t
ipc_importance_task_check_transition(ipc_importance_task_t task_imp
,
160 iit_update_type_t type
, uint32_t delta
);
162 static void ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp
,
163 iit_update_type_t type
, boolean_t update_task_imp
);
165 static ipc_importance_inherit_t
ipc_importance_inherit_from_task(task_t from_task
, task_t to_task
);
168 * Routine: ipc_importance_kmsg_link
170 * Link the kmsg onto the appropriate propagation chain.
171 * If the element is a task importance, we link directly
172 * on its propagation chain. Otherwise, we link onto the
173 * destination task of the inherit.
175 * Importance lock held.
176 * Caller is donating an importance elem reference to the kmsg.
179 ipc_importance_kmsg_link(
181 ipc_importance_elem_t elem
)
183 ipc_importance_elem_t link_elem
;
185 assert(IIE_NULL
== kmsg
->ikm_importance
);
187 link_elem
= (IIE_TYPE_INHERIT
== IIE_TYPE(elem
)) ?
188 (ipc_importance_elem_t
)((ipc_importance_inherit_t
)elem
)->iii_to_task
:
191 queue_enter(&link_elem
->iie_kmsgs
, kmsg
, ipc_kmsg_t
, ikm_inheritance
);
192 kmsg
->ikm_importance
= elem
;
196 * Routine: ipc_importance_kmsg_unlink
198 * Unlink the kmsg from its current propagation chain.
199 * If the element is a task importance, we unlink directly
200 * from its propagation chain. Otherwise, we unlink from the
201 * destination task of the inherit.
203 * The reference to the importance element it was linked on.
205 * Importance lock held.
206 * Caller is responsible for dropping reference on returned elem.
208 static ipc_importance_elem_t
209 ipc_importance_kmsg_unlink(
212 ipc_importance_elem_t elem
= kmsg
->ikm_importance
;
214 if (IIE_NULL
!= elem
) {
215 ipc_importance_elem_t unlink_elem
;
217 unlink_elem
= (IIE_TYPE_INHERIT
== IIE_TYPE(elem
)) ?
218 (ipc_importance_elem_t
)((ipc_importance_inherit_t
)elem
)->iii_to_task
:
221 queue_remove(&unlink_elem
->iie_kmsgs
, kmsg
, ipc_kmsg_t
, ikm_inheritance
);
222 kmsg
->ikm_importance
= IIE_NULL
;
228 * Routine: ipc_importance_inherit_link
230 * Link the inherit onto the appropriate propagation chain.
231 * If the element is a task importance, we link directly
232 * on its propagation chain. Otherwise, we link onto the
233 * destination task of the inherit.
235 * Importance lock held.
236 * Caller is donating an elem importance reference to the inherit.
239 ipc_importance_inherit_link(
240 ipc_importance_inherit_t inherit
,
241 ipc_importance_elem_t elem
)
243 ipc_importance_task_t link_task
;
245 assert(IIE_NULL
== inherit
->iii_from_elem
);
246 link_task
= (IIE_TYPE_INHERIT
== IIE_TYPE(elem
)) ?
247 ((ipc_importance_inherit_t
)elem
)->iii_to_task
:
248 (ipc_importance_task_t
)elem
;
250 queue_enter(&link_task
->iit_inherits
, inherit
,
251 ipc_importance_inherit_t
, iii_inheritance
);
252 inherit
->iii_from_elem
= elem
;
256 * Routine: ipc_importance_inherit_find
258 * Find an existing inherit that links the from element to the
259 * to_task at a given nesting depth. As inherits from other
260 * inherits are actually linked off the original inherit's donation
261 * receiving task, we have to conduct our search from there if
262 * the from element is an inherit.
264 * A pointer (not a reference) to the matching inherit.
266 * Importance lock held.
268 static ipc_importance_inherit_t
269 ipc_importance_inherit_find(
270 ipc_importance_elem_t from
,
271 ipc_importance_task_t to_task
,
274 ipc_importance_task_t link_task
;
275 ipc_importance_inherit_t inherit
;
277 link_task
= (IIE_TYPE_INHERIT
== IIE_TYPE(from
)) ?
278 ((ipc_importance_inherit_t
)from
)->iii_to_task
:
279 (ipc_importance_task_t
)from
;
281 queue_iterate(&link_task
->iit_inherits
, inherit
,
282 ipc_importance_inherit_t
, iii_inheritance
) {
283 if (inherit
->iii_to_task
== to_task
&& inherit
->iii_depth
== depth
) {
291 * Routine: ipc_importance_inherit_unlink
293 * Unlink the inherit from its current propagation chain.
294 * If the element is a task importance, we unlink directly
295 * from its propagation chain. Otherwise, we unlink from the
296 * destination task of the inherit.
298 * The reference to the importance element it was linked on.
300 * Importance lock held.
301 * Caller is responsible for dropping reference on returned elem.
303 static ipc_importance_elem_t
304 ipc_importance_inherit_unlink(
305 ipc_importance_inherit_t inherit
)
307 ipc_importance_elem_t elem
= inherit
->iii_from_elem
;
309 if (IIE_NULL
!= elem
) {
310 ipc_importance_task_t unlink_task
;
312 unlink_task
= (IIE_TYPE_INHERIT
== IIE_TYPE(elem
)) ?
313 ((ipc_importance_inherit_t
)elem
)->iii_to_task
:
314 (ipc_importance_task_t
)elem
;
316 queue_remove(&unlink_task
->iit_inherits
, inherit
,
317 ipc_importance_inherit_t
, iii_inheritance
);
318 inherit
->iii_from_elem
= IIE_NULL
;
324 * Routine: ipc_importance_reference
326 * Add a reference to the importance element.
328 * Caller must hold a reference on the element.
331 ipc_importance_reference(ipc_importance_elem_t elem
)
333 assert(0 < IIE_REFS(elem
));
334 ipc_importance_reference_internal(elem
);
338 * Routine: ipc_importance_release_locked
340 * Release a reference on an importance attribute value,
341 * unlinking and deallocating the attribute if the last reference.
343 * Entered with importance lock held, leaves with it unlocked.
346 ipc_importance_release_locked(ipc_importance_elem_t elem
)
348 assert(0 < IIE_REFS(elem
));
351 ipc_importance_inherit_t temp_inherit
;
352 ipc_importance_task_t link_task
;
353 ipc_kmsg_t temp_kmsg
;
354 uint32_t expected
= 0;
356 if (0 < elem
->iie_made
) {
360 link_task
= (IIE_TYPE_INHERIT
== IIE_TYPE(elem
)) ?
361 ((ipc_importance_inherit_t
)elem
)->iii_to_task
:
362 (ipc_importance_task_t
)elem
;
364 queue_iterate(&link_task
->iit_kmsgs
, temp_kmsg
, ipc_kmsg_t
, ikm_inheritance
)
365 if (temp_kmsg
->ikm_importance
== elem
) {
368 queue_iterate(&link_task
->iit_inherits
, temp_inherit
,
369 ipc_importance_inherit_t
, iii_inheritance
)
370 if (temp_inherit
->iii_from_elem
== elem
) {
373 if (IIE_REFS(elem
) < expected
+ 1) {
374 panic("ipc_importance_release_locked (%p)", elem
);
376 #endif /* IMPORTANCE_DEBUG */
378 if (0 < ipc_importance_release_internal(elem
)) {
379 ipc_importance_unlock();
385 switch (IIE_TYPE(elem
)) {
386 /* just a "from" task reference to drop */
389 ipc_importance_task_t task_elem
;
391 task_elem
= (ipc_importance_task_t
)elem
;
393 /* the task can't still hold a reference on the task importance */
394 assert(TASK_NULL
== task_elem
->iit_task
);
396 #if DEVELOPMENT || DEBUG
397 queue_remove(&global_iit_alloc_queue
, task_elem
, ipc_importance_task_t
, iit_allocation
);
400 ipc_importance_unlock();
402 zfree(ipc_importance_task_zone
, task_elem
);
406 /* dropping an inherit element */
407 case IIE_TYPE_INHERIT
:
409 ipc_importance_inherit_t inherit
= (ipc_importance_inherit_t
)elem
;
410 ipc_importance_task_t to_task
= inherit
->iii_to_task
;
411 ipc_importance_elem_t from_elem
;
413 assert(IIT_NULL
!= to_task
);
414 assert(ipc_importance_task_is_any_receiver_type(to_task
));
416 /* unlink the inherit from its source element */
417 from_elem
= ipc_importance_inherit_unlink(inherit
);
418 assert(IIE_NULL
!= from_elem
);
421 * The attribute might have pending external boosts if the attribute
422 * was given out during exec, drop them from the appropriate destination
425 * The attribute will not have any pending external boosts if the
426 * attribute was given out to voucher system since it would have been
427 * dropped by ipc_importance_release_value, but there is not way to
428 * detect that, thus if the attribute has a pending external boost,
429 * drop them from the appropriate destination task.
431 * The inherit attribute from exec and voucher system would not
432 * get deduped to each other, thus dropping the external boost
433 * from destination task at two different places will not have
434 * any unintended side effects.
436 assert(inherit
->iii_externcnt
>= inherit
->iii_externdrop
);
437 if (inherit
->iii_donating
) {
438 uint32_t assertcnt
= III_EXTERN(inherit
);
440 assert(ipc_importance_task_is_any_receiver_type(to_task
));
441 assert(to_task
->iit_externcnt
>= inherit
->iii_externcnt
);
442 assert(to_task
->iit_externdrop
>= inherit
->iii_externdrop
);
443 to_task
->iit_externcnt
-= inherit
->iii_externcnt
;
444 to_task
->iit_externdrop
-= inherit
->iii_externdrop
;
445 inherit
->iii_externcnt
= 0;
446 inherit
->iii_externdrop
= 0;
447 inherit
->iii_donating
= FALSE
;
449 /* adjust the internal assertions - and propagate as needed */
450 if (ipc_importance_task_check_transition(to_task
, IIT_UPDATE_DROP
, assertcnt
)) {
451 ipc_importance_task_propagate_assertion_locked(to_task
, IIT_UPDATE_DROP
, TRUE
);
454 inherit
->iii_externcnt
= 0;
455 inherit
->iii_externdrop
= 0;
458 /* release the reference on the source element */
459 ipc_importance_release_locked(from_elem
);
460 /* unlocked on return */
462 /* release the reference on the destination task */
463 ipc_importance_task_release(to_task
);
465 /* free the inherit */
466 zfree(ipc_importance_inherit_zone
, inherit
);
473 * Routine: ipc_importance_release
475 * Release a reference on an importance attribute value,
476 * unlinking and deallocating the attribute if the last reference.
478 * nothing locked on entrance, nothing locked on exit.
482 ipc_importance_release(ipc_importance_elem_t elem
)
484 if (IIE_NULL
== elem
) {
488 ipc_importance_lock();
489 ipc_importance_release_locked(elem
);
494 * Routine: ipc_importance_task_reference
498 * Retain a reference on a task importance attribute value.
500 * nothing locked on entrance, nothing locked on exit.
501 * caller holds a reference already.
504 ipc_importance_task_reference(ipc_importance_task_t task_elem
)
506 if (IIT_NULL
== task_elem
) {
510 incr_ref_counter(task_elem
->iit_elem
.iie_task_refs_added
);
512 ipc_importance_reference(&task_elem
->iit_elem
);
516 * Routine: ipc_importance_task_release
518 * Release a reference on a task importance attribute value,
519 * unlinking and deallocating the attribute if the last reference.
521 * nothing locked on entrance, nothing locked on exit.
525 ipc_importance_task_release(ipc_importance_task_t task_elem
)
527 if (IIT_NULL
== task_elem
) {
531 ipc_importance_lock();
533 incr_ref_counter(task_elem
->iit_elem
.iie_task_refs_dropped
);
535 ipc_importance_release_locked(&task_elem
->iit_elem
);
540 * Routine: ipc_importance_task_release_locked
542 * Release a reference on a task importance attribute value,
543 * unlinking and deallocating the attribute if the last reference.
545 * importance lock held on entry, nothing locked on exit.
549 ipc_importance_task_release_locked(ipc_importance_task_t task_elem
)
551 if (IIT_NULL
== task_elem
) {
552 ipc_importance_unlock();
556 incr_ref_counter(task_elem
->iit_elem
.iie_task_refs_dropped
);
558 ipc_importance_release_locked(&task_elem
->iit_elem
);
563 * Routines for importance donation/inheritance/boosting
568 * External importance assertions are managed by the process in userspace
569 * Internal importance assertions are the responsibility of the kernel
570 * Assertions are changed from internal to external via task_importance_externalize_assertion
574 * Routine: ipc_importance_task_check_transition
576 * Increase or decrement the internal task importance counter of the
577 * specified task and determine if propagation and a task policy
578 * update is required.
580 * If it is already enqueued for a policy update, steal it from that queue
581 * (as we are reversing that update before it happens).
584 * Called with the importance lock held.
585 * It is the caller's responsibility to perform the propagation of the
586 * transition and/or policy changes by checking the return value.
589 ipc_importance_task_check_transition(
590 ipc_importance_task_t task_imp
,
591 iit_update_type_t type
,
595 task_t target_task
= task_imp
->iit_task
;
597 boolean_t boost
= (IIT_UPDATE_HOLD
== type
);
598 boolean_t before_boosted
, after_boosted
;
600 ipc_importance_assert_held();
602 if (!ipc_importance_task_is_any_receiver_type(task_imp
)) {
607 int target_pid
= task_pid(target_task
);
609 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_ASSERTION
, (((boost
) ? IMP_HOLD
: IMP_DROP
) | TASK_POLICY_INTERNAL
))) | DBG_FUNC_START
,
610 proc_selfpid(), target_pid
, task_imp
->iit_assertcnt
, IIT_EXTERN(task_imp
), 0);
613 /* snapshot the effective boosting status before making any changes */
614 before_boosted
= (task_imp
->iit_assertcnt
> 0);
616 /* Adjust the assertcnt appropriately */
618 task_imp
->iit_assertcnt
+= delta
;
620 DTRACE_BOOST6(send_boost
, task_t
, target_task
, int, target_pid
,
621 task_t
, current_task(), int, proc_selfpid(), int, delta
, int, task_imp
->iit_assertcnt
);
624 // assert(delta <= task_imp->iit_assertcnt);
625 if (task_imp
->iit_assertcnt
< delta
+ IIT_EXTERN(task_imp
)) {
626 /* TODO: Turn this back into a panic <rdar://problem/12592649> */
627 task_imp
->iit_assertcnt
= IIT_EXTERN(task_imp
);
629 task_imp
->iit_assertcnt
-= delta
;
632 // This convers both legacy and voucher-based importance.
633 DTRACE_BOOST4(drop_boost
, task_t
, target_task
, int, target_pid
, int, delta
, int, task_imp
->iit_assertcnt
);
638 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_ASSERTION
, (((boost
) ? IMP_HOLD
: IMP_DROP
) | TASK_POLICY_INTERNAL
))) | DBG_FUNC_END
,
639 proc_selfpid(), target_pid
, task_imp
->iit_assertcnt
, IIT_EXTERN(task_imp
), 0);
642 /* did the change result in an effective donor status change? */
643 after_boosted
= (task_imp
->iit_assertcnt
> 0);
645 if (after_boosted
!= before_boosted
) {
647 * If the task importance is already on an update queue, we just reversed the need for a
648 * pending policy update. If the queue is any other than the delayed-drop-queue, pull it
649 * off that queue and release the reference it got going onto the update queue. If it is
650 * the delayed-drop-queue we leave it in place in case it comes back into the drop state
651 * before its time delay is up.
653 * We still need to propagate the change downstream to reverse the assertcnt effects,
654 * but we no longer need to update this task's boost policy state.
656 * Otherwise, mark it as needing a policy update.
658 assert(0 == task_imp
->iit_updatepolicy
);
659 if (NULL
!= task_imp
->iit_updateq
) {
660 if (&ipc_importance_delayed_drop_queue
!= task_imp
->iit_updateq
) {
661 queue_remove(task_imp
->iit_updateq
, task_imp
, ipc_importance_task_t
, iit_updates
);
662 task_imp
->iit_updateq
= NULL
;
663 ipc_importance_task_release_internal(task_imp
); /* can't be last ref */
666 task_imp
->iit_updatepolicy
= 1;
676 * Routine: ipc_importance_task_propagate_helper
678 * Increase or decrement the internal task importance counter of all
679 * importance tasks inheriting from the specified one. If this causes
680 * that importance task to change state, add it to the list of tasks
681 * to do a policy update against.
683 * Called with the importance lock held.
684 * It is the caller's responsibility to iterate down the generated list
685 * and propagate any subsequent assertion changes from there.
688 ipc_importance_task_propagate_helper(
689 ipc_importance_task_t task_imp
,
690 iit_update_type_t type
,
693 ipc_importance_task_t temp_task_imp
;
696 * iterate the downstream kmsgs, adjust their boosts,
697 * and capture the next task to adjust for each message
700 ipc_kmsg_t temp_kmsg
;
702 queue_iterate(&task_imp
->iit_kmsgs
, temp_kmsg
, ipc_kmsg_t
, ikm_inheritance
) {
703 mach_msg_header_t
*hdr
= temp_kmsg
->ikm_header
;
704 mach_port_delta_t delta
;
707 /* toggle the kmsg importance bit as a barrier to parallel adjusts */
708 if (IIT_UPDATE_HOLD
== type
) {
709 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr
->msgh_bits
)) {
713 /* mark the message as now carrying importance */
714 hdr
->msgh_bits
|= MACH_MSGH_BITS_RAISEIMP
;
717 if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr
->msgh_bits
)) {
721 /* clear the message as now carrying importance */
722 hdr
->msgh_bits
&= ~MACH_MSGH_BITS_RAISEIMP
;
726 /* determine the task importance to adjust as result (if any) */
727 port
= hdr
->msgh_remote_port
;
728 assert(IP_VALID(port
));
730 temp_task_imp
= IIT_NULL
;
731 if (!ipc_port_importance_delta_internal(port
, IPID_OPTION_NORMAL
, &delta
, &temp_task_imp
)) {
735 /* no task importance to adjust associated with the port? */
736 if (IIT_NULL
== temp_task_imp
) {
740 /* hold a reference on temp_task_imp */
742 /* Adjust the task assertions and determine if an edge was crossed */
743 if (ipc_importance_task_check_transition(temp_task_imp
, type
, 1)) {
744 incr_ref_counter(temp_task_imp
->iit_elem
.iie_task_refs_added_transition
);
745 queue_enter(propagation
, temp_task_imp
, ipc_importance_task_t
, iit_props
);
746 /* reference donated */
748 ipc_importance_task_release_internal(temp_task_imp
);
753 * iterate the downstream importance inherits
754 * and capture the next task importance to boost for each
756 ipc_importance_inherit_t temp_inherit
;
758 queue_iterate(&task_imp
->iit_inherits
, temp_inherit
, ipc_importance_inherit_t
, iii_inheritance
) {
759 uint32_t assertcnt
= III_EXTERN(temp_inherit
);
761 temp_task_imp
= temp_inherit
->iii_to_task
;
762 assert(IIT_NULL
!= temp_task_imp
);
764 if (IIT_UPDATE_HOLD
== type
) {
765 /* if no undropped externcnts in the inherit, nothing to do */
766 if (0 == assertcnt
) {
767 assert(temp_inherit
->iii_donating
== FALSE
);
771 /* nothing to do if the inherit is already donating (forced donation) */
772 if (temp_inherit
->iii_donating
) {
776 /* mark it donating and contribute to the task externcnts */
777 temp_inherit
->iii_donating
= TRUE
;
778 temp_task_imp
->iit_externcnt
+= temp_inherit
->iii_externcnt
;
779 temp_task_imp
->iit_externdrop
+= temp_inherit
->iii_externdrop
;
781 /* if no contributing assertions, move on */
782 if (0 == assertcnt
) {
783 assert(temp_inherit
->iii_donating
== FALSE
);
787 /* nothing to do if the inherit is not donating */
788 if (!temp_inherit
->iii_donating
) {
792 /* mark it no longer donating */
793 temp_inherit
->iii_donating
= FALSE
;
795 /* remove the contribution the inherit made to the to-task */
796 assert(IIT_EXTERN(temp_task_imp
) >= III_EXTERN(temp_inherit
));
797 assert(temp_task_imp
->iit_externcnt
>= temp_inherit
->iii_externcnt
);
798 assert(temp_task_imp
->iit_externdrop
>= temp_inherit
->iii_externdrop
);
799 temp_task_imp
->iit_externcnt
-= temp_inherit
->iii_externcnt
;
800 temp_task_imp
->iit_externdrop
-= temp_inherit
->iii_externdrop
;
803 /* Adjust the task assertions and determine if an edge was crossed */
804 assert(ipc_importance_task_is_any_receiver_type(temp_task_imp
));
805 if (ipc_importance_task_check_transition(temp_task_imp
, type
, assertcnt
)) {
806 ipc_importance_task_reference(temp_task_imp
);
807 incr_ref_counter(temp_task_imp
->iit_elem
.iie_task_refs_added_transition
);
808 queue_enter(propagation
, temp_task_imp
, ipc_importance_task_t
, iit_props
);
814 * Routine: ipc_importance_task_process_updates
816 * Process the queue of task importances and apply the policy
817 * update called for. Only process tasks in the queue with an
818 * update timestamp less than the supplied max.
820 * Called and returns with importance locked.
821 * May drop importance lock and block temporarily.
824 ipc_importance_task_process_updates(
825 queue_t supplied_queue
,
827 uint64_t max_timestamp
)
829 ipc_importance_task_t task_imp
;
830 queue_head_t second_chance
;
831 queue_t queue
= supplied_queue
;
834 * This queue will hold the task's we couldn't trylock on first pass.
835 * By using a second (private) queue, we guarantee all tasks that get
836 * entered on this queue have a timestamp under the maximum.
838 queue_init(&second_chance
);
840 /* process any resulting policy updates */
842 while (!queue_empty(queue
)) {
844 struct task_pend_token pend_token
= {};
846 task_imp
= (ipc_importance_task_t
)queue_first(queue
);
847 assert(0 == task_imp
->iit_updatepolicy
);
848 assert(queue
== task_imp
->iit_updateq
);
850 /* if timestamp is too big, we're done */
851 if (task_imp
->iit_updatetime
> max_timestamp
) {
855 /* we were given a reference on each task in the queue */
857 /* remove it from the supplied queue */
858 queue_remove(queue
, task_imp
, ipc_importance_task_t
, iit_updates
);
859 task_imp
->iit_updateq
= NULL
;
861 target_task
= task_imp
->iit_task
;
863 /* Is it well on the way to exiting? */
864 if (TASK_NULL
== target_task
) {
865 ipc_importance_task_release_locked(task_imp
);
866 /* importance unlocked */
867 ipc_importance_lock();
871 /* Has the update been reversed on the hysteresis queue? */
872 if (0 < task_imp
->iit_assertcnt
&&
873 queue
== &ipc_importance_delayed_drop_queue
) {
874 ipc_importance_task_release_locked(task_imp
);
875 /* importance unlocked */
876 ipc_importance_lock();
881 * Can we get the task lock out-of-order?
882 * If not, stick this back on the second-chance queue.
884 if (!task_lock_try(target_task
)) {
885 boolean_t should_wait_lock
= (queue
== &second_chance
);
886 task_imp
->iit_updateq
= &second_chance
;
889 * If we're already processing second-chances on
890 * tasks, keep this task on the front of the queue.
891 * We will wait for the task lock before coming
892 * back and trying again, and we have a better
893 * chance of re-acquiring the lock if we come back
896 if (should_wait_lock
) {
897 task_reference(target_task
);
898 queue_enter_first(&second_chance
, task_imp
,
899 ipc_importance_task_t
, iit_updates
);
901 queue_enter(&second_chance
, task_imp
,
902 ipc_importance_task_t
, iit_updates
);
904 ipc_importance_unlock();
906 if (should_wait_lock
) {
907 task_lock(target_task
);
908 task_unlock(target_task
);
909 task_deallocate(target_task
);
912 ipc_importance_lock();
916 /* is it going away? */
917 if (!target_task
->active
) {
918 task_unlock(target_task
);
919 ipc_importance_task_release_locked(task_imp
);
920 /* importance unlocked */
921 ipc_importance_lock();
925 /* take a task reference for while we don't have the importance lock */
926 task_reference(target_task
);
928 /* count the transition */
930 task_imp
->iit_transitions
++;
933 ipc_importance_unlock();
935 /* apply the policy adjust to the target task (while it is still locked) */
936 task_update_boost_locked(target_task
, boost
, &pend_token
);
938 /* complete the policy update with the task unlocked */
939 ipc_importance_task_release(task_imp
);
940 task_unlock(target_task
);
941 task_policy_update_complete_unlocked(target_task
, &pend_token
);
942 task_deallocate(target_task
);
944 ipc_importance_lock();
947 /* If there are tasks we couldn't update the first time, try again */
948 if (!queue_empty(&second_chance
)) {
949 queue
= &second_chance
;
956 * Routine: ipc_importance_task_delayed_drop_scan
958 * The thread call routine to scan the delayed drop queue,
959 * requesting all updates with a deadline up to the last target
960 * for the thread-call (which is DENAP_DROP_SKEW beyond the first
961 * thread's optimum delay).
962 * update to drop its boost.
967 ipc_importance_task_delayed_drop_scan(
971 ipc_importance_lock();
973 /* process all queued task drops with timestamps up to TARGET(first)+SKEW */
974 ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue
,
976 ipc_importance_delayed_drop_timestamp
);
978 /* importance lock may have been temporarily dropped */
980 /* If there are any entries left in the queue, re-arm the call here */
981 if (!queue_empty(&ipc_importance_delayed_drop_queue
)) {
982 ipc_importance_task_t task_imp
;
986 task_imp
= (ipc_importance_task_t
)queue_first(&ipc_importance_delayed_drop_queue
);
988 nanoseconds_to_absolutetime(DENAP_DROP_DELAY
, &deadline
);
989 deadline
+= task_imp
->iit_updatetime
;
990 ipc_importance_delayed_drop_timestamp
= deadline
;
992 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY
, &leeway
);
994 thread_call_enter_delayed_with_leeway(
995 ipc_importance_delayed_drop_call
,
1001 ipc_importance_delayed_drop_call_requested
= FALSE
;
1003 ipc_importance_unlock();
1007 * Routine: ipc_importance_task_delayed_drop
1009 * Queue the specified task importance for delayed policy
1010 * update to drop its boost.
1012 * Called with the importance lock held.
1015 ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp
)
1017 uint64_t timestamp
= mach_absolute_time(); /* no mach_approximate_time() in kernel */
1019 assert(ipc_importance_delayed_drop_call
!= NULL
);
1022 * If still on an update queue from a previous change,
1023 * remove it first (and use that reference). Otherwise, take
1024 * a new reference for the delay drop update queue.
1026 if (NULL
!= task_imp
->iit_updateq
) {
1027 queue_remove(task_imp
->iit_updateq
, task_imp
,
1028 ipc_importance_task_t
, iit_updates
);
1030 ipc_importance_task_reference_internal(task_imp
);
1033 task_imp
->iit_updateq
= &ipc_importance_delayed_drop_queue
;
1034 task_imp
->iit_updatetime
= timestamp
;
1036 queue_enter(&ipc_importance_delayed_drop_queue
, task_imp
,
1037 ipc_importance_task_t
, iit_updates
);
1039 /* request the delayed thread-call if not already requested */
1040 if (!ipc_importance_delayed_drop_call_requested
) {
1044 nanoseconds_to_absolutetime(DENAP_DROP_DELAY
, &deadline
);
1045 deadline
+= task_imp
->iit_updatetime
;
1046 ipc_importance_delayed_drop_timestamp
= deadline
;
1048 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY
, &leeway
);
1050 ipc_importance_delayed_drop_call_requested
= TRUE
;
1051 thread_call_enter_delayed_with_leeway(
1052 ipc_importance_delayed_drop_call
,
1062 * Routine: ipc_importance_task_propagate_assertion_locked
1064 * Propagate the importance transition type to every item
1065 * If this causes a boost to be applied, determine if that
1066 * boost should propagate downstream.
1068 * Called with the importance lock held.
1071 ipc_importance_task_propagate_assertion_locked(
1072 ipc_importance_task_t task_imp
,
1073 iit_update_type_t type
,
1074 boolean_t update_task_imp
)
1076 boolean_t boost
= (IIT_UPDATE_HOLD
== type
);
1077 ipc_importance_task_t temp_task_imp
;
1078 queue_head_t propagate
;
1079 queue_head_t updates
;
1081 queue_init(&updates
);
1082 queue_init(&propagate
);
1084 ipc_importance_assert_held();
1087 * If we're going to update the policy for the provided task,
1088 * enqueue it on the propagate queue itself. Otherwise, only
1089 * enqueue downstream things.
1091 if (update_task_imp
) {
1092 ipc_importance_task_reference(task_imp
);
1093 incr_ref_counter(task_imp
->iit_elem
.iie_task_refs_added_transition
);
1094 queue_enter(&propagate
, task_imp
, ipc_importance_task_t
, iit_props
);
1096 ipc_importance_task_propagate_helper(task_imp
, type
, &propagate
);
1100 * for each item on the propagation list, propagate any change downstream,
1101 * adding new tasks to propagate further if they transistioned as well.
1103 while (!queue_empty(&propagate
)) {
1104 boolean_t need_update
;
1106 queue_remove_first(&propagate
, temp_task_imp
, ipc_importance_task_t
, iit_props
);
1107 /* hold a reference on temp_task_imp */
1109 assert(IIT_NULL
!= temp_task_imp
);
1111 /* only propagate for receivers not already marked as a donor */
1112 if (!ipc_importance_task_is_marked_donor(temp_task_imp
) &&
1113 ipc_importance_task_is_marked_receiver(temp_task_imp
)) {
1114 ipc_importance_task_propagate_helper(temp_task_imp
, type
, &propagate
);
1117 /* if we have a policy update to apply, enqueue a reference for later processing */
1118 need_update
= (0 != temp_task_imp
->iit_updatepolicy
);
1119 temp_task_imp
->iit_updatepolicy
= 0;
1120 if (need_update
&& TASK_NULL
!= temp_task_imp
->iit_task
) {
1121 if (NULL
== temp_task_imp
->iit_updateq
) {
1123 * If a downstream task that needs an update is subjects to AppNap,
1124 * drop boosts according to the delay hysteresis. Otherwise,
1125 * immediate update it.
1127 if (!boost
&& temp_task_imp
!= task_imp
&&
1128 ipc_importance_delayed_drop_call
!= NULL
&&
1129 ipc_importance_task_is_marked_denap_receiver(temp_task_imp
)) {
1130 ipc_importance_task_delayed_drop(temp_task_imp
);
1132 temp_task_imp
->iit_updatetime
= 0;
1133 temp_task_imp
->iit_updateq
= &updates
;
1134 ipc_importance_task_reference_internal(temp_task_imp
);
1136 queue_enter(&updates
, temp_task_imp
,
1137 ipc_importance_task_t
, iit_updates
);
1139 queue_enter_first(&updates
, temp_task_imp
,
1140 ipc_importance_task_t
, iit_updates
);
1144 /* Must already be on the AppNap hysteresis queue */
1145 assert(ipc_importance_delayed_drop_call
!= NULL
);
1146 assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp
));
1150 ipc_importance_task_release_internal(temp_task_imp
);
1153 /* apply updates to task (may drop importance lock) */
1154 if (!queue_empty(&updates
)) {
1155 ipc_importance_task_process_updates(&updates
, boost
, 0);
1160 * Routine: ipc_importance_task_hold_internal_assertion_locked
1162 * Increment the assertion count on the task importance.
1163 * If this results in a boost state change in that task,
1164 * prepare to update task policy for this task AND, if
1165 * if not just waking out of App Nap, all down-stream
1166 * tasks that have a similar transition through inheriting
1169 * importance locked on entry and exit.
1170 * May temporarily drop importance lock and block.
1172 static kern_return_t
1173 ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp
, uint32_t count
)
1175 if (ipc_importance_task_check_transition(task_imp
, IIT_UPDATE_HOLD
, count
)) {
1176 ipc_importance_task_propagate_assertion_locked(task_imp
, IIT_UPDATE_HOLD
, TRUE
);
1178 return KERN_SUCCESS
;
1182 * Routine: ipc_importance_task_drop_internal_assertion_locked
1184 * Decrement the assertion count on the task importance.
1185 * If this results in a boost state change in that task,
1186 * prepare to update task policy for this task AND, if
1187 * if not just waking out of App Nap, all down-stream
1188 * tasks that have a similar transition through inheriting
1191 * importance locked on entry and exit.
1192 * May temporarily drop importance lock and block.
1194 static kern_return_t
1195 ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp
, uint32_t count
)
1197 if (ipc_importance_task_check_transition(task_imp
, IIT_UPDATE_DROP
, count
)) {
1198 ipc_importance_task_propagate_assertion_locked(task_imp
, IIT_UPDATE_DROP
, TRUE
);
1200 return KERN_SUCCESS
;
1204 * Routine: ipc_importance_task_hold_internal_assertion
1206 * Increment the assertion count on the task importance.
1207 * If this results in a 0->1 change in that count,
1208 * prepare to update task policy for this task AND
1209 * (potentially) all down-stream tasks that have a
1210 * similar transition through inheriting this update.
1213 * May block after dropping importance lock.
1216 ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp
, uint32_t count
)
1218 int ret
= KERN_SUCCESS
;
1220 if (ipc_importance_task_is_any_receiver_type(task_imp
)) {
1221 ipc_importance_lock();
1222 ret
= ipc_importance_task_hold_internal_assertion_locked(task_imp
, count
);
1223 ipc_importance_unlock();
1229 * Routine: ipc_importance_task_drop_internal_assertion
1231 * Decrement the assertion count on the task importance.
1232 * If this results in a X->0 change in that count,
1233 * prepare to update task policy for this task AND
1234 * all down-stream tasks that have a similar transition
1235 * through inheriting this drop update.
1237 * Nothing locked on entry.
1238 * May block after dropping importance lock.
1241 ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp
, uint32_t count
)
1243 kern_return_t ret
= KERN_SUCCESS
;
1245 if (ipc_importance_task_is_any_receiver_type(task_imp
)) {
1246 ipc_importance_lock();
1247 ret
= ipc_importance_task_drop_internal_assertion_locked(task_imp
, count
);
1248 ipc_importance_unlock();
1254 * Routine: ipc_importance_task_hold_file_lock_assertion
1256 * Increment the file lock assertion count on the task importance.
1257 * If this results in a 0->1 change in that count,
1258 * prepare to update task policy for this task AND
1259 * (potentially) all down-stream tasks that have a
1260 * similar transition through inheriting this update.
1263 * May block after dropping importance lock.
1266 ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp
, uint32_t count
)
1268 kern_return_t ret
= KERN_SUCCESS
;
1270 if (ipc_importance_task_is_any_receiver_type(task_imp
)) {
1271 ipc_importance_lock();
1272 ret
= ipc_importance_task_hold_internal_assertion_locked(task_imp
, count
);
1273 if (KERN_SUCCESS
== ret
) {
1274 task_imp
->iit_filelocks
+= count
;
1276 ipc_importance_unlock();
1282 * Routine: ipc_importance_task_drop_file_lock_assertion
1284 * Decrement the assertion count on the task importance.
1285 * If this results in a X->0 change in that count,
1286 * prepare to update task policy for this task AND
1287 * all down-stream tasks that have a similar transition
1288 * through inheriting this drop update.
1290 * Nothing locked on entry.
1291 * May block after dropping importance lock.
1294 ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp
, uint32_t count
)
1296 kern_return_t ret
= KERN_SUCCESS
;
1298 if (ipc_importance_task_is_any_receiver_type(task_imp
)) {
1299 ipc_importance_lock();
1300 if (count
<= task_imp
->iit_filelocks
) {
1301 task_imp
->iit_filelocks
-= count
;
1302 ret
= ipc_importance_task_drop_internal_assertion_locked(task_imp
, count
);
1304 ret
= KERN_INVALID_ARGUMENT
;
1306 ipc_importance_unlock();
1312 * Routine: ipc_importance_task_hold_legacy_external_assertion
1314 * Increment the external assertion count on the task importance.
1315 * This cannot result in an 0->1 transition, as the caller must
1316 * already hold an external boost.
1318 * Nothing locked on entry.
1319 * May block after dropping importance lock.
1320 * A queue of task importance structures is returned
1321 * by ipc_importance_task_hold_assertion_locked(). Each
1322 * needs to be updated (outside the importance lock hold).
1325 ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp
, uint32_t count
)
1328 uint32_t target_assertcnt
;
1329 uint32_t target_externcnt
;
1330 uint32_t target_legacycnt
;
1334 ipc_importance_lock();
1335 target_task
= task_imp
->iit_task
;
1337 #if IMPORTANCE_TRACE
1338 int target_pid
= task_pid(target_task
);
1340 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_ASSERTION
, (IMP_HOLD
| TASK_POLICY_EXTERNAL
))) | DBG_FUNC_START
,
1341 proc_selfpid(), target_pid
, task_imp
->iit_assertcnt
, IIT_LEGACY_EXTERN(task_imp
), 0);
1344 if (IIT_LEGACY_EXTERN(task_imp
) == 0) {
1345 /* Only allowed to take a new boost assertion when holding an external boost */
1346 /* save data for diagnostic printf below */
1347 target_assertcnt
= task_imp
->iit_assertcnt
;
1348 target_externcnt
= IIT_EXTERN(task_imp
);
1349 target_legacycnt
= IIT_LEGACY_EXTERN(task_imp
);
1353 assert(ipc_importance_task_is_any_receiver_type(task_imp
));
1354 assert(0 < task_imp
->iit_assertcnt
);
1355 assert(0 < IIT_EXTERN(task_imp
));
1356 task_imp
->iit_assertcnt
+= count
;
1357 task_imp
->iit_externcnt
+= count
;
1358 task_imp
->iit_legacy_externcnt
+= count
;
1361 ipc_importance_unlock();
1363 #if IMPORTANCE_TRACE
1364 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_ASSERTION
, (IMP_HOLD
| TASK_POLICY_EXTERNAL
))) | DBG_FUNC_END
,
1365 proc_selfpid(), target_pid
, task_imp
->iit_assertcnt
, IIT_LEGACY_EXTERN(task_imp
), 0);
1366 // This covers the legacy case where a task takes an extra boost.
1367 DTRACE_BOOST5(receive_boost
, task_t
, target_task
, int, target_pid
, int, proc_selfpid(), int, count
, int, task_imp
->iit_assertcnt
);
1370 if (KERN_FAILURE
== ret
&& target_task
!= TASK_NULL
) {
1371 printf("BUG in process %s[%d]: "
1372 "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. "
1373 "(%d total, %d external, %d legacy-external)\n",
1374 proc_name_address(target_task
->bsd_info
), task_pid(target_task
),
1375 target_assertcnt
, target_externcnt
, target_legacycnt
);
1382 * Routine: ipc_importance_task_drop_legacy_external_assertion
1384 * Drop the legacy external assertion count on the task and
1385 * reflect that change to total external assertion count and
1386 * then onto the internal importance count.
1388 * If this results in a X->0 change in the internal,
1389 * count, prepare to update task policy for this task AND
1390 * all down-stream tasks that have a similar transition
1391 * through inheriting this update.
1393 * Nothing locked on entry.
1396 ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp
, uint32_t count
)
1398 int ret
= KERN_SUCCESS
;
1400 uint32_t target_assertcnt
;
1401 uint32_t target_externcnt
;
1402 uint32_t target_legacycnt
;
1405 return KERN_INVALID_ARGUMENT
;
1408 ipc_importance_lock();
1409 target_task
= task_imp
->iit_task
;
1411 #if IMPORTANCE_TRACE
1412 int target_pid
= task_pid(target_task
);
1414 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_ASSERTION
, (IMP_DROP
| TASK_POLICY_EXTERNAL
))) | DBG_FUNC_START
,
1415 proc_selfpid(), target_pid
, task_imp
->iit_assertcnt
, IIT_LEGACY_EXTERN(task_imp
), 0);
1418 if (count
> IIT_LEGACY_EXTERN(task_imp
)) {
1419 /* Process over-released its boost count - save data for diagnostic printf */
1420 /* TODO: If count > 1, we should clear out as many external assertions as there are left. */
1421 target_assertcnt
= task_imp
->iit_assertcnt
;
1422 target_externcnt
= IIT_EXTERN(task_imp
);
1423 target_legacycnt
= IIT_LEGACY_EXTERN(task_imp
);
1427 * decrement legacy external count from the top level and reflect
1428 * into internal for this and all subsequent updates.
1430 assert(ipc_importance_task_is_any_receiver_type(task_imp
));
1431 assert(IIT_EXTERN(task_imp
) >= count
);
1433 task_imp
->iit_legacy_externdrop
+= count
;
1434 task_imp
->iit_externdrop
+= count
;
1436 /* reset extern counters (if appropriate) */
1437 if (IIT_LEGACY_EXTERN(task_imp
) == 0) {
1438 if (IIT_EXTERN(task_imp
) != 0) {
1439 task_imp
->iit_externcnt
-= task_imp
->iit_legacy_externcnt
;
1440 task_imp
->iit_externdrop
-= task_imp
->iit_legacy_externdrop
;
1442 task_imp
->iit_externcnt
= 0;
1443 task_imp
->iit_externdrop
= 0;
1445 task_imp
->iit_legacy_externcnt
= 0;
1446 task_imp
->iit_legacy_externdrop
= 0;
1449 /* reflect the drop to the internal assertion count (and effect any importance change) */
1450 if (ipc_importance_task_check_transition(task_imp
, IIT_UPDATE_DROP
, count
)) {
1451 ipc_importance_task_propagate_assertion_locked(task_imp
, IIT_UPDATE_DROP
, TRUE
);
1456 #if IMPORTANCE_TRACE
1457 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_ASSERTION
, (IMP_DROP
| TASK_POLICY_EXTERNAL
))) | DBG_FUNC_END
,
1458 proc_selfpid(), target_pid
, task_imp
->iit_assertcnt
, IIT_LEGACY_EXTERN(task_imp
), 0);
1461 ipc_importance_unlock();
1463 /* delayed printf for user-supplied data failures */
1464 if (KERN_FAILURE
== ret
&& TASK_NULL
!= target_task
) {
1465 printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n",
1466 proc_name_address(target_task
->bsd_info
), task_pid(target_task
),
1467 target_assertcnt
, target_externcnt
, target_legacycnt
);
1474 #if LEGACY_IMPORTANCE_DELIVERY
1475 /* Transfer an assertion to legacy userspace responsibility */
1476 static kern_return_t
1477 ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp
, uint32_t count
, __unused
int sender_pid
)
1481 assert(IIT_NULL
!= task_imp
);
1482 target_task
= task_imp
->iit_task
;
1484 if (TASK_NULL
== target_task
||
1485 !ipc_importance_task_is_any_receiver_type(task_imp
)) {
1486 return KERN_FAILURE
;
1489 #if IMPORTANCE_TRACE
1490 int target_pid
= task_pid(target_task
);
1492 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_ASSERTION
, IMP_EXTERN
)) | DBG_FUNC_START
,
1493 proc_selfpid(), target_pid
, task_imp
->iit_assertcnt
, IIT_EXTERN(task_imp
), 0);
1496 ipc_importance_lock();
1497 /* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */
1498 assert(IIT_EXTERN(task_imp
) >= IIT_LEGACY_EXTERN(task_imp
));
1499 task_imp
->iit_legacy_externcnt
+= count
;
1500 task_imp
->iit_externcnt
+= count
;
1501 ipc_importance_unlock();
1503 #if IMPORTANCE_TRACE
1504 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_ASSERTION
, IMP_EXTERN
)) | DBG_FUNC_END
,
1505 proc_selfpid(), target_pid
, task_imp
->iit_assertcnt
, IIT_LEGACY_EXTERN(task_imp
), 0);
1506 // This is the legacy boosting path
1507 DTRACE_BOOST5(receive_boost
, task_t
, target_task
, int, target_pid
, int, sender_pid
, int, count
, int, IIT_LEGACY_EXTERN(task_imp
));
1508 #endif /* IMPORTANCE_TRACE */
1510 return KERN_SUCCESS
;
1512 #endif /* LEGACY_IMPORTANCE_DELIVERY */
1515 * Routine: ipc_importance_task_update_live_donor
1517 * Read the live donor status and update the live_donor bit/propagate the change in importance.
1519 * Nothing locked on entrance, nothing locked on exit.
1521 * TODO: Need tracepoints around this function...
1524 ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp
)
1526 uint32_t task_live_donor
;
1527 boolean_t before_donor
;
1528 boolean_t after_donor
;
1531 assert(task_imp
!= NULL
);
1534 * Nothing to do if the task is not marked as expecting
1535 * live donor updates.
1537 if (!ipc_importance_task_is_marked_live_donor(task_imp
)) {
1541 ipc_importance_lock();
1543 /* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */
1544 target_task
= task_imp
->iit_task
;
1545 if (TASK_NULL
== target_task
) {
1546 ipc_importance_unlock();
1549 before_donor
= ipc_importance_task_is_marked_donor(task_imp
);
1551 /* snapshot task live donor status - may change, but another call will accompany the change */
1552 task_live_donor
= target_task
->effective_policy
.tep_live_donor
;
1554 #if IMPORTANCE_TRACE
1555 int target_pid
= task_pid(target_task
);
1557 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1558 (IMPORTANCE_CODE(IMP_DONOR_CHANGE
, IMP_DONOR_UPDATE_LIVE_DONOR_STATE
)) | DBG_FUNC_START
,
1559 target_pid
, task_imp
->iit_donor
, task_live_donor
, before_donor
, 0);
1562 /* update the task importance live donor status based on the task's value */
1563 task_imp
->iit_donor
= task_live_donor
;
1565 after_donor
= ipc_importance_task_is_marked_donor(task_imp
);
1567 /* Has the effectiveness of being a donor changed as a result of this update? */
1568 if (before_donor
!= after_donor
) {
1569 iit_update_type_t type
;
1571 /* propagate assertions without updating the current task policy (already handled) */
1572 if (0 == before_donor
) {
1573 task_imp
->iit_transitions
++;
1574 type
= IIT_UPDATE_HOLD
;
1576 type
= IIT_UPDATE_DROP
;
1578 ipc_importance_task_propagate_assertion_locked(task_imp
, type
, FALSE
);
1581 #if IMPORTANCE_TRACE
1582 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1583 (IMPORTANCE_CODE(IMP_DONOR_CHANGE
, IMP_DONOR_UPDATE_LIVE_DONOR_STATE
)) | DBG_FUNC_END
,
1584 target_pid
, task_imp
->iit_donor
, task_live_donor
, after_donor
, 0);
1587 ipc_importance_unlock();
1592 * Routine: ipc_importance_task_mark_donor
1594 * Set the task importance donor flag.
1596 * Nothing locked on entrance, nothing locked on exit.
1598 * This is only called while the task is being constructed,
1599 * so no need to update task policy or propagate downstream.
1602 ipc_importance_task_mark_donor(ipc_importance_task_t task_imp
, boolean_t donating
)
1604 assert(task_imp
!= NULL
);
1606 ipc_importance_lock();
1608 int old_donor
= task_imp
->iit_donor
;
1610 task_imp
->iit_donor
= (donating
? 1 : 0);
1612 if (task_imp
->iit_donor
> 0 && old_donor
== 0) {
1613 task_imp
->iit_transitions
++;
1616 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1617 (IMPORTANCE_CODE(IMP_DONOR_CHANGE
, IMP_DONOR_INIT_DONOR_STATE
)) | DBG_FUNC_NONE
,
1618 task_pid(task_imp
->iit_task
), donating
,
1619 old_donor
, task_imp
->iit_donor
, 0);
1621 ipc_importance_unlock();
1625 * Routine: ipc_importance_task_marked_donor
1627 * Query the donor flag for the given task importance.
1629 * May be called without taking the importance lock.
1630 * In that case, donor status can change so you must
1631 * check only once for each donation event.
1634 ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp
)
1636 if (IIT_NULL
== task_imp
) {
1639 return 0 != task_imp
->iit_donor
;
1643 * Routine: ipc_importance_task_mark_live_donor
1645 * Indicate that the task is eligible for live donor updates.
1647 * Nothing locked on entrance, nothing locked on exit.
1649 * This is only called while the task is being constructed.
1652 ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp
, boolean_t live_donating
)
1654 assert(task_imp
!= NULL
);
1656 ipc_importance_lock();
1657 task_imp
->iit_live_donor
= (live_donating
? 1 : 0);
1658 ipc_importance_unlock();
1662 * Routine: ipc_importance_task_is_marked_live_donor
1664 * Query the live donor and donor flags for the given task importance.
1666 * May be called without taking the importance lock.
1667 * In that case, donor status can change so you must
1668 * check only once for each donation event.
1671 ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp
)
1673 if (IIT_NULL
== task_imp
) {
1676 return 0 != task_imp
->iit_live_donor
;
1680 * Routine: ipc_importance_task_is_donor
1682 * Query the full donor status for the given task importance.
1684 * May be called without taking the importance lock.
1685 * In that case, donor status can change so you must
1686 * check only once for each donation event.
1689 ipc_importance_task_is_donor(ipc_importance_task_t task_imp
)
1691 if (IIT_NULL
== task_imp
) {
1694 return ipc_importance_task_is_marked_donor(task_imp
) ||
1695 (ipc_importance_task_is_marked_receiver(task_imp
) &&
1696 task_imp
->iit_assertcnt
> 0);
1700 * Routine: ipc_importance_task_is_never_donor
1702 * Query if a given task can ever donate importance.
1704 * May be called without taking the importance lock.
1705 * Condition is permanent for a give task.
1708 ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp
)
1710 if (IIT_NULL
== task_imp
) {
1713 return !ipc_importance_task_is_marked_donor(task_imp
) &&
1714 !ipc_importance_task_is_marked_live_donor(task_imp
) &&
1715 !ipc_importance_task_is_marked_receiver(task_imp
);
1719 * Routine: ipc_importance_task_mark_receiver
1721 * Update the task importance receiver flag.
1723 * Nothing locked on entrance, nothing locked on exit.
1724 * This can only be invoked before the task is discoverable,
1725 * so no worries about atomicity(?)
1728 ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp
, boolean_t receiving
)
1730 assert(task_imp
!= NULL
);
1732 ipc_importance_lock();
1734 assert(task_imp
->iit_assertcnt
== 0);
1735 assert(task_imp
->iit_externcnt
== 0);
1736 assert(task_imp
->iit_externdrop
== 0);
1737 assert(task_imp
->iit_denap
== 0);
1738 task_imp
->iit_receiver
= 1; /* task can receive importance boost */
1739 } else if (task_imp
->iit_receiver
) {
1740 assert(task_imp
->iit_denap
== 0);
1741 if (task_imp
->iit_assertcnt
!= 0 || IIT_EXTERN(task_imp
) != 0) {
1742 panic("disabling imp_receiver on task with pending importance boosts!");
1744 task_imp
->iit_receiver
= 0;
1746 ipc_importance_unlock();
1751 * Routine: ipc_importance_task_marked_receiver
1753 * Query the receiver flag for the given task importance.
1755 * May be called without taking the importance lock as
1756 * the importance flag can never change after task init.
1759 ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp
)
1761 return IIT_NULL
!= task_imp
&& 0 != task_imp
->iit_receiver
;
1766 * Routine: ipc_importance_task_mark_denap_receiver
1768 * Update the task importance de-nap receiver flag.
1770 * Nothing locked on entrance, nothing locked on exit.
1771 * This can only be invoked before the task is discoverable,
1772 * so no worries about atomicity(?)
1775 ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp
, boolean_t denap
)
1777 assert(task_imp
!= NULL
);
1779 ipc_importance_lock();
1781 assert(task_imp
->iit_assertcnt
== 0);
1782 assert(task_imp
->iit_externcnt
== 0);
1783 assert(task_imp
->iit_receiver
== 0);
1784 task_imp
->iit_denap
= 1; /* task can receive de-nap boost */
1785 } else if (task_imp
->iit_denap
) {
1786 assert(task_imp
->iit_receiver
== 0);
1787 if (0 < task_imp
->iit_assertcnt
|| 0 < IIT_EXTERN(task_imp
)) {
1788 panic("disabling de-nap on task with pending de-nap boosts!");
1790 task_imp
->iit_denap
= 0;
1792 ipc_importance_unlock();
1797 * Routine: ipc_importance_task_marked_denap_receiver
1799 * Query the de-nap receiver flag for the given task importance.
1801 * May be called without taking the importance lock as
1802 * the de-nap flag can never change after task init.
1805 ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp
)
1807 return IIT_NULL
!= task_imp
&& 0 != task_imp
->iit_denap
;
1811 * Routine: ipc_importance_task_is_denap_receiver
1813 * Query the full de-nap receiver status for the given task importance.
1814 * For now, that is simply whether the receiver flag is set.
1816 * May be called without taking the importance lock as
1817 * the de-nap receiver flag can never change after task init.
1820 ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp
)
1822 return ipc_importance_task_is_marked_denap_receiver(task_imp
);
1826 * Routine: ipc_importance_task_is_any_receiver_type
1828 * Query if the task is marked to receive boosts - either
1829 * importance or denap.
1831 * May be called without taking the importance lock as both
1832 * the importance and de-nap receiver flags can never change
1836 ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp
)
1838 return ipc_importance_task_is_marked_receiver(task_imp
) ||
1839 ipc_importance_task_is_marked_denap_receiver(task_imp
);
1842 #if 0 /* currently unused */
1845 * Routine: ipc_importance_inherit_reference
1847 * Add a reference to the inherit importance element.
1849 * Caller most hold a reference on the inherit element.
1852 ipc_importance_inherit_reference(ipc_importance_inherit_t inherit
)
1854 ipc_importance_reference(&inherit
->iii_elem
);
1856 #endif /* currently unused */
1859 * Routine: ipc_importance_inherit_release_locked
1861 * Release a reference on an inherit importance attribute value,
1862 * unlinking and deallocating the attribute if the last reference.
1864 * Entered with importance lock held, leaves with it unlocked.
1867 ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit
)
1869 ipc_importance_release_locked(&inherit
->iii_elem
);
1872 #if 0 /* currently unused */
1874 * Routine: ipc_importance_inherit_release
1876 * Release a reference on an inherit importance attribute value,
1877 * unlinking and deallocating the attribute if the last reference.
1879 * nothing locked on entrance, nothing locked on exit.
1883 ipc_importance_inherit_release(ipc_importance_inherit_t inherit
)
1885 if (III_NULL
!= inherit
) {
1886 ipc_importance_release(&inherit
->iii_elem
);
1889 #endif /* 0 currently unused */
1892 * Routine: ipc_importance_for_task
1894 * Create a reference for the specified task's base importance
1895 * element. If the base importance element doesn't exist, make it and
1896 * bind it to the active task. If the task is inactive, there isn't
1897 * any need to return a new reference.
1899 * If made is true, a "made" reference is returned (for donating to
1900 * the voucher system). Otherwise an internal reference is returned.
1902 * Nothing locked on entry. May block.
1904 ipc_importance_task_t
1905 ipc_importance_for_task(task_t task
, boolean_t made
)
1907 ipc_importance_task_t task_elem
;
1908 boolean_t first_pass
= TRUE
;
1910 assert(TASK_NULL
!= task
);
1913 /* No use returning anything for inactive task */
1914 if (!task
->active
) {
1918 ipc_importance_lock();
1919 task_elem
= task
->task_imp_base
;
1920 if (IIT_NULL
!= task_elem
) {
1921 /* Add a made reference (borrowing active task ref to do it) */
1923 if (0 == task_elem
->iit_made
++) {
1924 assert(IIT_REFS_MAX
> IIT_REFS(task_elem
));
1925 ipc_importance_task_reference_internal(task_elem
);
1928 assert(IIT_REFS_MAX
> IIT_REFS(task_elem
));
1929 ipc_importance_task_reference_internal(task_elem
);
1931 ipc_importance_unlock();
1934 ipc_importance_unlock();
1941 /* Need to make one - may race with others (be prepared to drop) */
1942 task_elem
= zalloc_flags(ipc_importance_task_zone
, Z_WAITOK
| Z_ZERO
);
1943 if (IIT_NULL
== task_elem
) {
1947 task_elem
->iit_bits
= IIE_TYPE_TASK
| 2; /* one for task, one for return/made */
1948 task_elem
->iit_made
= (made
) ? 1 : 0;
1949 task_elem
->iit_task
= task
; /* take actual ref when we're sure */
1951 ipc_importance_counter_init(&task_elem
->iit_elem
);
1953 queue_init(&task_elem
->iit_kmsgs
);
1954 queue_init(&task_elem
->iit_inherits
);
1956 ipc_importance_lock();
1957 if (!task
->active
) {
1958 ipc_importance_unlock();
1959 zfree(ipc_importance_task_zone
, task_elem
);
1963 /* did we lose the race? */
1964 if (IIT_NULL
!= task
->task_imp_base
) {
1965 ipc_importance_unlock();
1966 zfree(ipc_importance_task_zone
, task_elem
);
1970 /* we won the race */
1971 task
->task_imp_base
= task_elem
;
1972 task_reference(task
);
1973 #if DEVELOPMENT || DEBUG
1974 queue_enter(&global_iit_alloc_queue
, task_elem
, ipc_importance_task_t
, iit_allocation
);
1975 task_importance_update_owner_info(task
);
1977 ipc_importance_unlock();
1982 #if DEVELOPMENT || DEBUG
1984 task_importance_update_owner_info(task_t task
)
1986 if (task
!= TASK_NULL
&& task
->task_imp_base
!= IIT_NULL
) {
1987 ipc_importance_task_t task_elem
= task
->task_imp_base
;
1989 task_elem
->iit_bsd_pid
= task_pid(task
);
1990 if (task
->bsd_info
) {
1991 strncpy(&task_elem
->iit_procname
[0], proc_name_address(task
->bsd_info
), 16);
1992 task_elem
->iit_procname
[16] = '\0';
1994 strncpy(&task_elem
->iit_procname
[0], "unknown", 16);
2001 task_importance_task_get_pid(ipc_importance_task_t iit
)
2003 #if DEVELOPMENT || DEBUG
2004 return (int)iit
->iit_bsd_pid
;
2006 return task_pid(iit
->iit_task
);
2011 * Routine: ipc_importance_reset_locked
2013 * Reset a task's IPC importance (the task is going away or exec'ing)
2015 * Remove the donor bit and legacy externalized assertions from the
2016 * current task importance and see if that wipes out downstream donations.
2018 * importance lock held.
2022 ipc_importance_reset_locked(ipc_importance_task_t task_imp
, boolean_t donor
)
2024 boolean_t before_donor
, after_donor
;
2026 /* remove the donor bit, live-donor bit and externalized boosts */
2027 before_donor
= ipc_importance_task_is_donor(task_imp
);
2029 task_imp
->iit_donor
= 0;
2031 assert(IIT_LEGACY_EXTERN(task_imp
) <= IIT_EXTERN(task_imp
));
2032 assert(task_imp
->iit_legacy_externcnt
<= task_imp
->iit_externcnt
);
2033 assert(task_imp
->iit_legacy_externdrop
<= task_imp
->iit_externdrop
);
2034 task_imp
->iit_externcnt
-= task_imp
->iit_legacy_externcnt
;
2035 task_imp
->iit_externdrop
-= task_imp
->iit_legacy_externdrop
;
2037 /* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */
2038 if (IIT_EXTERN(task_imp
) < task_imp
->iit_assertcnt
) {
2039 task_imp
->iit_assertcnt
-= IIT_LEGACY_EXTERN(task_imp
);
2041 task_imp
->iit_assertcnt
= IIT_EXTERN(task_imp
);
2043 task_imp
->iit_legacy_externcnt
= 0;
2044 task_imp
->iit_legacy_externdrop
= 0;
2045 after_donor
= ipc_importance_task_is_donor(task_imp
);
2047 /* propagate a downstream drop if there was a change in donor status */
2048 if (after_donor
!= before_donor
) {
2049 ipc_importance_task_propagate_assertion_locked(task_imp
, IIT_UPDATE_DROP
, FALSE
);
2054 * Routine: ipc_importance_reset
2056 * Reset a task's IPC importance
2058 * The task is being reset, although staying around. Arrange to have the
2059 * external state of the task reset from the importance.
2061 * importance lock not held.
2065 ipc_importance_reset(ipc_importance_task_t task_imp
, boolean_t donor
)
2067 if (IIT_NULL
== task_imp
) {
2070 ipc_importance_lock();
2071 ipc_importance_reset_locked(task_imp
, donor
);
2072 ipc_importance_unlock();
2076 * Routine: ipc_importance_disconnect_task
2078 * Disconnect a task from its importance.
2080 * Clear the task pointer from the importance and drop the
2081 * reference the task held on the importance object. Before
2082 * doing that, reset the effects the current task holds on
2083 * the importance and see if that wipes out downstream donations.
2085 * We allow the upstream boosts to continue to affect downstream
2086 * even though the local task is being effectively pulled from
2092 ipc_importance_disconnect_task(task_t task
)
2094 ipc_importance_task_t task_imp
;
2097 ipc_importance_lock();
2098 task_imp
= task
->task_imp_base
;
2100 /* did somebody beat us to it? */
2101 if (IIT_NULL
== task_imp
) {
2102 ipc_importance_unlock();
2107 /* disconnect the task from this importance */
2108 assert(task_imp
->iit_task
== task
);
2109 task_imp
->iit_task
= TASK_NULL
;
2110 task
->task_imp_base
= IIT_NULL
;
2113 /* reset the effects the current task hold on the importance */
2114 ipc_importance_reset_locked(task_imp
, TRUE
);
2116 ipc_importance_task_release_locked(task_imp
);
2117 /* importance unlocked */
2119 /* deallocate the task now that the importance is unlocked */
2120 task_deallocate(task
);
2124 * Routine: ipc_importance_exec_switch_task
2126 * Switch importance task base from old task to new task in exec.
2128 * Create an ipc importance linkage from old task to new task,
2129 * once the linkage is created, switch the importance task base
2130 * from old task to new task. After the switch, the linkage will
2131 * represent importance linkage from new task to old task with
2132 * watch port importance inheritance linked to new task.
2135 * Returns a reference on importance inherit.
2137 ipc_importance_inherit_t
2138 ipc_importance_exec_switch_task(
2142 ipc_importance_inherit_t inherit
= III_NULL
;
2143 ipc_importance_task_t old_task_imp
= IIT_NULL
;
2144 ipc_importance_task_t new_task_imp
= IIT_NULL
;
2146 task_importance_reset(old_task
);
2148 /* Create an importance linkage from old_task to new_task */
2149 inherit
= ipc_importance_inherit_from_task(old_task
, new_task
);
2151 /* Switch task importance base from old task to new task */
2152 ipc_importance_lock();
2154 old_task_imp
= old_task
->task_imp_base
;
2155 new_task_imp
= new_task
->task_imp_base
;
2157 old_task_imp
->iit_task
= new_task
;
2158 new_task_imp
->iit_task
= old_task
;
2160 old_task
->task_imp_base
= new_task_imp
;
2161 new_task
->task_imp_base
= old_task_imp
;
2163 #if DEVELOPMENT || DEBUG
2165 * Update the pid an proc name for importance base if any
2167 task_importance_update_owner_info(new_task
);
2169 ipc_importance_unlock();
2175 * Routine: ipc_importance_check_circularity
2177 * Check if queueing "port" in a message for "dest"
2178 * would create a circular group of ports and messages.
2180 * If no circularity (FALSE returned), then "port"
2181 * is changed from "in limbo" to "in transit".
2183 * That is, we want to set port->ip_destination == dest,
2184 * but guaranteeing that this doesn't create a circle
2185 * port->ip_destination->ip_destination->... == port
2187 * Additionally, if port was successfully changed to "in transit",
2188 * propagate boost assertions from the "in limbo" port to all
2189 * the ports in the chain, and, if the destination task accepts
2190 * boosts, to the destination task.
2193 * No ports locked. References held for "port" and "dest".
2197 ipc_importance_check_circularity(
2201 ipc_importance_task_t imp_task
= IIT_NULL
;
2202 ipc_importance_task_t release_imp_task
= IIT_NULL
;
2203 boolean_t imp_lock_held
= FALSE
;
2206 struct turnstile
*send_turnstile
= TURNSTILE_NULL
;
2207 struct task_watchport_elem
*watchport_elem
= NULL
;
2208 bool took_base_ref
= false;
2210 assert(port
!= IP_NULL
);
2211 assert(dest
!= IP_NULL
);
2218 /* Check if destination needs a turnstile */
2219 ipc_port_send_turnstile_prepare(dest
);
2221 /* port is in limbo, so donation status is safe to latch */
2222 if (port
->ip_impdonation
!= 0) {
2223 imp_lock_held
= TRUE
;
2224 ipc_importance_lock();
2228 * First try a quick check that can run in parallel.
2229 * No circularity if dest is not in transit.
2234 * Even if port is just carrying assertions for others,
2235 * we need the importance lock.
2237 if (port
->ip_impcount
> 0 && !imp_lock_held
) {
2238 if (!ipc_importance_lock_try()) {
2240 ipc_importance_lock();
2243 imp_lock_held
= TRUE
;
2246 if (ip_lock_try(dest
)) {
2247 if (!ip_active(dest
) ||
2248 (dest
->ip_receiver_name
!= MACH_PORT_NULL
) ||
2249 (dest
->ip_destination
== IP_NULL
)) {
2253 /* dest is in transit; further checking necessary */
2260 * We're about to pay the cost to serialize,
2261 * just go ahead and grab importance lock.
2263 if (!imp_lock_held
) {
2264 ipc_importance_lock();
2265 imp_lock_held
= TRUE
;
2268 ipc_port_multiple_lock(); /* massive serialization */
2270 took_base_ref
= ipc_port_destination_chain_lock(dest
, &base
);
2271 /* all ports in chain from dest to base, inclusive, are locked */
2274 /* circularity detected! */
2276 ipc_port_multiple_unlock();
2278 /* port (== base) is in limbo */
2280 require_ip_active(port
);
2281 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
2282 assert(port
->ip_destination
== IP_NULL
);
2283 assert(!took_base_ref
);
2286 while (base
!= IP_NULL
) {
2289 /* base is in transit or in limbo */
2291 require_ip_active(base
);
2292 assert(base
->ip_receiver_name
== MACH_PORT_NULL
);
2294 next
= base
->ip_destination
;
2299 if (imp_lock_held
) {
2300 ipc_importance_unlock();
2303 ipc_port_send_turnstile_complete(dest
);
2308 * The guarantee: lock port while the entire chain is locked.
2309 * Once port is locked, we can take a reference to dest,
2310 * add port to the chain, and unlock everything.
2314 ipc_port_multiple_unlock();
2317 /* port is in limbo */
2318 imq_lock(&port
->ip_messages
);
2320 require_ip_active(port
);
2321 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
2322 assert(port
->ip_destination
== IP_NULL
);
2324 /* Port is being enqueued in a kmsg, remove the watchport boost in order to push on destination port */
2325 watchport_elem
= ipc_port_clear_watchport_elem_internal(port
);
2327 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
2328 if (dest
->ip_specialreply
&& dest
->ip_sync_bootstrap_checkin
) {
2329 port
->ip_sync_bootstrap_checkin
= 1;
2333 port
->ip_destination
= dest
;
2335 /* must have been in limbo or still bound to a task */
2336 assert(port
->ip_tempowner
!= 0);
2339 * We delayed dropping assertions from a specific task.
2340 * Cache that info now (we'll drop assertions and the
2341 * task reference below).
2343 release_imp_task
= port
->ip_imp_task
;
2344 if (IIT_NULL
!= release_imp_task
) {
2345 port
->ip_imp_task
= IIT_NULL
;
2347 assertcnt
= port
->ip_impcount
;
2349 /* take the port out of limbo w.r.t. assertions */
2350 port
->ip_tempowner
= 0;
2353 * Setup linkage for source port if it has a send turnstile i.e. it has
2354 * a thread waiting in send or has a port enqueued in it or has sync ipc
2355 * push from a special reply port.
2357 if (port_send_turnstile(port
)) {
2358 send_turnstile
= turnstile_prepare((uintptr_t)port
,
2359 port_send_turnstile_address(port
),
2360 TURNSTILE_NULL
, TURNSTILE_SYNC_IPC
);
2362 turnstile_update_inheritor(send_turnstile
, port_send_turnstile(dest
),
2363 (TURNSTILE_INHERITOR_TURNSTILE
| TURNSTILE_IMMEDIATE_UPDATE
));
2365 /* update complete and turnstile complete called after dropping all locks */
2367 imq_unlock(&port
->ip_messages
);
2369 /* now unlock chain */
2375 /* every port along chain track assertions behind it */
2376 ipc_port_impcount_delta(dest
, assertcnt
, base
);
2382 /* port is in transit */
2384 require_ip_active(dest
);
2385 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
2386 assert(dest
->ip_destination
!= IP_NULL
);
2387 assert(dest
->ip_tempowner
== 0);
2389 next
= dest
->ip_destination
;
2394 /* base is not in transit */
2395 assert(!ip_active(base
) ||
2396 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
2397 (base
->ip_destination
== IP_NULL
));
2400 * Find the task to boost (if any).
2401 * We will boost "through" ports that don't know
2402 * about inheritance to deliver receive rights that
2405 if (ip_active(base
) && (assertcnt
> 0)) {
2406 assert(imp_lock_held
);
2407 if (base
->ip_tempowner
!= 0) {
2408 if (IIT_NULL
!= base
->ip_imp_task
) {
2409 /* specified tempowner task */
2410 imp_task
= base
->ip_imp_task
;
2411 assert(ipc_importance_task_is_any_receiver_type(imp_task
));
2413 /* otherwise don't boost current task */
2414 } else if (base
->ip_receiver_name
!= MACH_PORT_NULL
) {
2415 ipc_space_t space
= base
->ip_receiver
;
2417 /* only spaces with boost-accepting tasks */
2418 if (space
->is_task
!= TASK_NULL
&&
2419 ipc_importance_task_is_any_receiver_type(space
->is_task
->task_imp_base
)) {
2420 imp_task
= space
->is_task
->task_imp_base
;
2424 /* take reference before unlocking base */
2425 if (imp_task
!= IIT_NULL
) {
2426 ipc_importance_task_reference(imp_task
);
2431 if (took_base_ref
) {
2435 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
2436 if (send_turnstile
) {
2437 turnstile_update_inheritor_complete(send_turnstile
, TURNSTILE_INTERLOCK_NOT_HELD
);
2439 /* Take the mq lock to call turnstile complete */
2440 imq_lock(&port
->ip_messages
);
2441 turnstile_complete((uintptr_t)port
, port_send_turnstile_address(port
), NULL
, TURNSTILE_SYNC_IPC
);
2442 send_turnstile
= TURNSTILE_NULL
;
2443 imq_unlock(&port
->ip_messages
);
2444 turnstile_cleanup();
2448 * Transfer assertions now that the ports are unlocked.
2449 * Avoid extra overhead if transferring to/from the same task.
2451 * NOTE: If a transfer is occurring, the new assertions will
2452 * be added to imp_task BEFORE the importance lock is unlocked.
2453 * This is critical - to avoid decrements coming from the kmsgs
2454 * beating the increment to the task.
2456 boolean_t transfer_assertions
= (imp_task
!= release_imp_task
);
2458 if (imp_task
!= IIT_NULL
) {
2459 assert(imp_lock_held
);
2460 if (transfer_assertions
) {
2461 ipc_importance_task_hold_internal_assertion_locked(imp_task
, assertcnt
);
2465 if (release_imp_task
!= IIT_NULL
) {
2466 assert(imp_lock_held
);
2467 if (transfer_assertions
) {
2468 ipc_importance_task_drop_internal_assertion_locked(release_imp_task
, assertcnt
);
2472 if (imp_lock_held
) {
2473 ipc_importance_unlock();
2476 if (imp_task
!= IIT_NULL
) {
2477 ipc_importance_task_release(imp_task
);
2480 if (release_imp_task
!= IIT_NULL
) {
2481 ipc_importance_task_release(release_imp_task
);
2484 if (watchport_elem
) {
2485 task_watchport_elem_deallocate(watchport_elem
);
2492 * Routine: ipc_importance_send
2494 * Post the importance voucher attribute [if sent] or a static
2495 * importance boost depending upon options and conditions.
2497 * Destination port locked on entry and exit, may be dropped during the call.
2499 * A boolean identifying if the port lock was tempoarily dropped.
2502 ipc_importance_send(
2504 mach_msg_option_t option
)
2506 ipc_port_t port
= kmsg
->ikm_header
->msgh_remote_port
;
2507 boolean_t port_lock_dropped
= FALSE
;
2508 ipc_importance_elem_t elem
;
2510 ipc_importance_task_t task_imp
;
2513 assert(IP_VALID(port
));
2515 /* If no donation to be made, return quickly */
2516 if ((port
->ip_impdonation
== 0) ||
2517 (option
& MACH_SEND_NOIMPORTANCE
) != 0) {
2518 return port_lock_dropped
;
2521 task
= current_task();
2523 /* If forced sending a static boost, go update the port */
2524 if ((option
& MACH_SEND_IMPORTANCE
) != 0) {
2525 /* acquire the importance lock while trying to hang on to port lock */
2526 if (!ipc_importance_lock_try()) {
2527 port_lock_dropped
= TRUE
;
2529 ipc_importance_lock();
2534 task_imp
= task
->task_imp_base
;
2535 assert(IIT_NULL
!= task_imp
);
2537 /* If the sender can never donate importance, nothing to do */
2538 if (ipc_importance_task_is_never_donor(task_imp
)) {
2539 return port_lock_dropped
;
2544 /* If importance receiver and passing a voucher, look for importance in there */
2545 if (IP_VALID(kmsg
->ikm_voucher
) &&
2546 ipc_importance_task_is_marked_receiver(task_imp
)) {
2547 mach_voucher_attr_value_handle_t vals
[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED
];
2548 mach_voucher_attr_value_handle_array_size_t val_count
;
2549 ipc_voucher_t voucher
;
2551 assert(ip_kotype(kmsg
->ikm_voucher
) == IKOT_VOUCHER
);
2552 voucher
= (ipc_voucher_t
)ip_get_kobject(kmsg
->ikm_voucher
);
2554 /* check to see if the voucher has an importance attribute */
2555 val_count
= MACH_VOUCHER_ATTR_VALUE_MAX_NESTED
;
2556 kr
= mach_voucher_attr_control_get_values(ipc_importance_control
, voucher
,
2558 assert(KERN_SUCCESS
== kr
);
2561 * Only use importance associated with our task (either directly
2562 * or through an inherit that donates to our task).
2564 if (0 < val_count
) {
2565 ipc_importance_elem_t check_elem
;
2567 check_elem
= (ipc_importance_elem_t
)vals
[0];
2568 assert(IIE_NULL
!= check_elem
);
2569 if (IIE_TYPE_INHERIT
== IIE_TYPE(check_elem
)) {
2570 ipc_importance_inherit_t inherit
;
2571 inherit
= (ipc_importance_inherit_t
) check_elem
;
2572 if (inherit
->iii_to_task
== task_imp
) {
2575 } else if (check_elem
== (ipc_importance_elem_t
)task_imp
) {
2581 /* If we haven't found an importance attribute to send yet, use the task's */
2582 if (IIE_NULL
== elem
) {
2583 elem
= (ipc_importance_elem_t
)task_imp
;
2586 /* take a reference for the message to hold */
2587 ipc_importance_reference_internal(elem
);
2589 /* acquire the importance lock while trying to hang on to port lock */
2590 if (!ipc_importance_lock_try()) {
2591 port_lock_dropped
= TRUE
;
2593 ipc_importance_lock();
2596 /* link kmsg onto the donor element propagation chain */
2597 ipc_importance_kmsg_link(kmsg
, elem
);
2598 /* elem reference transfered to kmsg */
2600 incr_ref_counter(elem
->iie_kmsg_refs_added
);
2602 /* If the sender isn't currently a donor, no need to apply boost */
2603 if (!ipc_importance_task_is_donor(task_imp
)) {
2604 ipc_importance_unlock();
2606 /* re-acquire port lock, if needed */
2607 if (TRUE
== port_lock_dropped
) {
2611 return port_lock_dropped
;
2615 /* Mark the fact that we are (currently) donating through this message */
2616 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_RAISEIMP
;
2619 * If we need to relock the port, do it with the importance still locked.
2620 * This assures we get to add the importance boost through the port to
2621 * the task BEFORE anyone else can attempt to undo that operation if
2622 * the sender lost donor status.
2624 if (TRUE
== port_lock_dropped
) {
2628 ipc_importance_assert_held();
2630 #if IMPORTANCE_TRACE
2631 if (kdebug_enable
) {
2632 mach_msg_max_trailer_t
*dbgtrailer
= (mach_msg_max_trailer_t
*)
2633 ((vm_offset_t
)kmsg
->ikm_header
+ mach_round_msg(kmsg
->ikm_header
->msgh_size
));
2634 unsigned int sender_pid
= dbgtrailer
->msgh_audit
.val
[5];
2635 mach_msg_id_t imp_msgh_id
= kmsg
->ikm_header
->msgh_id
;
2636 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_MSG
, IMP_MSG_SEND
)) | DBG_FUNC_START
,
2637 task_pid(task
), sender_pid
, imp_msgh_id
, 0, 0);
2639 #endif /* IMPORTANCE_TRACE */
2641 mach_port_delta_t delta
= 1;
2642 boolean_t need_port_lock
;
2643 task_imp
= IIT_NULL
;
2645 /* adjust port boost count (with importance and port locked) */
2646 need_port_lock
= ipc_port_importance_delta_internal(port
, IPID_OPTION_NORMAL
, &delta
, &task_imp
);
2647 /* hold a reference on task_imp */
2649 /* if we need to adjust a task importance as a result, apply that here */
2650 if (IIT_NULL
!= task_imp
&& delta
!= 0) {
2653 /* if this results in a change of state, propagate the transistion */
2654 if (ipc_importance_task_check_transition(task_imp
, IIT_UPDATE_HOLD
, delta
)) {
2655 /* can't hold the port lock during task transition(s) */
2656 if (!need_port_lock
) {
2657 need_port_lock
= TRUE
;
2660 ipc_importance_task_propagate_assertion_locked(task_imp
, IIT_UPDATE_HOLD
, TRUE
);
2665 ipc_importance_task_release_locked(task_imp
);
2666 /* importance unlocked */
2668 ipc_importance_unlock();
2671 if (need_port_lock
) {
2672 port_lock_dropped
= TRUE
;
2676 return port_lock_dropped
;
2680 * Routine: ipc_importance_inherit_from_kmsg
2682 * Create a "made" reference for an importance attribute representing
2683 * an inheritance between the sender of a message (if linked) and the
2684 * current task importance. If the message is not linked, a static
2685 * boost may be created, based on the boost state of the message.
2687 * Any transfer from kmsg linkage to inherit linkage must be atomic.
2689 * If the task is inactive, there isn't any need to return a new reference.
2691 * Nothing locked on entry. May block.
2693 static ipc_importance_inherit_t
2694 ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg
)
2696 ipc_importance_task_t task_imp
= IIT_NULL
;
2697 ipc_importance_elem_t from_elem
= kmsg
->ikm_importance
;
2698 ipc_importance_elem_t elem
;
2699 task_t task_self
= current_task();
2701 ipc_port_t port
= kmsg
->ikm_header
->msgh_remote_port
;
2702 ipc_importance_inherit_t inherit
= III_NULL
;
2703 ipc_importance_inherit_t alloc
= III_NULL
;
2704 boolean_t cleared_self_donation
= FALSE
;
2708 /* The kmsg must have an importance donor or static boost to proceed */
2709 if (IIE_NULL
== kmsg
->ikm_importance
&&
2710 !MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
)) {
2715 * No need to set up an inherit linkage if the dest isn't a receiver
2716 * of one type or the other.
2718 if (!ipc_importance_task_is_any_receiver_type(task_self
->task_imp_base
)) {
2719 ipc_importance_lock();
2723 /* Grab a reference on the importance of the destination */
2724 task_imp
= ipc_importance_for_task(task_self
, FALSE
);
2726 ipc_importance_lock();
2728 if (IIT_NULL
== task_imp
) {
2732 incr_ref_counter(task_imp
->iit_elem
.iie_task_refs_added_inherit_from
);
2734 /* If message is already associated with an inherit... */
2735 if (IIE_TYPE_INHERIT
== IIE_TYPE(from_elem
)) {
2736 ipc_importance_inherit_t from_inherit
= (ipc_importance_inherit_t
)from_elem
;
2738 /* already targeting our task? - just use it */
2739 if (from_inherit
->iii_to_task
== task_imp
) {
2740 /* clear self-donation if not also present in inherit */
2741 if (!from_inherit
->iii_donating
&&
2742 MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
)) {
2743 kmsg
->ikm_header
->msgh_bits
&= ~MACH_MSGH_BITS_RAISEIMP
;
2744 cleared_self_donation
= TRUE
;
2746 inherit
= from_inherit
;
2747 } else if (III_DEPTH_MAX
== III_DEPTH(from_inherit
)) {
2748 ipc_importance_task_t to_task
;
2749 ipc_importance_elem_t unlinked_from
;
2752 * Chain too long. Switch to looking
2753 * directly at the from_inherit's to-task
2754 * as our source of importance.
2756 to_task
= from_inherit
->iii_to_task
;
2757 ipc_importance_task_reference(to_task
);
2758 from_elem
= (ipc_importance_elem_t
)to_task
;
2759 depth
= III_DEPTH_RESET
| 1;
2761 /* Fixup the kmsg linkage to reflect change */
2762 unlinked_from
= ipc_importance_kmsg_unlink(kmsg
);
2763 assert(unlinked_from
== (ipc_importance_elem_t
)from_inherit
);
2764 ipc_importance_kmsg_link(kmsg
, from_elem
);
2765 ipc_importance_inherit_release_locked(from_inherit
);
2766 /* importance unlocked */
2767 ipc_importance_lock();
2769 /* inheriting from an inherit */
2770 depth
= from_inherit
->iii_depth
+ 1;
2775 * Don't allow a task to inherit from itself (would keep it permanently
2776 * boosted even if all other donors to the task went away).
2779 if (from_elem
== (ipc_importance_elem_t
)task_imp
) {
2784 * But if the message isn't associated with any linked source, it is
2785 * intended to be permanently boosting (static boost from kernel).
2786 * In that case DO let the process permanently boost itself.
2788 if (IIE_NULL
== from_elem
) {
2789 assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
));
2790 ipc_importance_task_reference_internal(task_imp
);
2791 from_elem
= (ipc_importance_elem_t
)task_imp
;
2795 * Now that we have the from_elem figured out,
2796 * check to see if we already have an inherit for this pairing
2798 while (III_NULL
== inherit
) {
2799 inherit
= ipc_importance_inherit_find(from_elem
, task_imp
, depth
);
2801 /* Do we have to allocate a new inherit */
2802 if (III_NULL
== inherit
) {
2803 if (III_NULL
!= alloc
) {
2807 /* allocate space */
2808 ipc_importance_unlock();
2809 alloc
= (ipc_importance_inherit_t
)
2810 zalloc(ipc_importance_inherit_zone
);
2811 ipc_importance_lock();
2815 /* snapshot the donating status while we have importance locked */
2816 donating
= MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
);
2818 if (III_NULL
!= inherit
) {
2819 /* We found one, piggyback on that */
2820 assert(0 < III_REFS(inherit
));
2821 assert(0 < IIE_REFS(inherit
->iii_from_elem
));
2822 assert(inherit
->iii_externcnt
>= inherit
->iii_made
);
2824 /* add in a made reference */
2825 if (0 == inherit
->iii_made
++) {
2826 assert(III_REFS_MAX
> III_REFS(inherit
));
2827 ipc_importance_inherit_reference_internal(inherit
);
2830 /* Reflect the inherit's change of status into the task boosts */
2831 if (0 == III_EXTERN(inherit
)) {
2832 assert(!inherit
->iii_donating
);
2833 inherit
->iii_donating
= donating
;
2835 task_imp
->iit_externcnt
+= inherit
->iii_externcnt
;
2836 task_imp
->iit_externdrop
+= inherit
->iii_externdrop
;
2839 assert(donating
== inherit
->iii_donating
);
2842 /* add in a external reference for this use of the inherit */
2843 inherit
->iii_externcnt
++;
2845 /* initialize the previously allocated space */
2847 inherit
->iii_bits
= IIE_TYPE_INHERIT
| 1;
2848 inherit
->iii_made
= 1;
2849 inherit
->iii_externcnt
= 1;
2850 inherit
->iii_externdrop
= 0;
2851 inherit
->iii_depth
= depth
;
2852 inherit
->iii_to_task
= task_imp
;
2853 inherit
->iii_from_elem
= IIE_NULL
;
2854 queue_init(&inherit
->iii_kmsgs
);
2857 inherit
->iii_donating
= TRUE
;
2859 inherit
->iii_donating
= FALSE
;
2863 * Chain our new inherit on the element it inherits from.
2864 * The new inherit takes our reference on from_elem.
2866 ipc_importance_inherit_link(inherit
, from_elem
);
2869 ipc_importance_counter_init(&inherit
->iii_elem
);
2870 from_elem
->iie_kmsg_refs_inherited
++;
2871 task_imp
->iit_elem
.iie_task_refs_inherited
++;
2877 * for those paths that came straight here: snapshot the donating status
2878 * (this should match previous snapshot for other paths).
2880 donating
= MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
);
2882 /* unlink the kmsg inheritance (if any) */
2883 elem
= ipc_importance_kmsg_unlink(kmsg
);
2884 assert(elem
== from_elem
);
2886 /* If found inherit and donating, reflect that in the task externcnt */
2887 if (III_NULL
!= inherit
&& donating
) {
2888 task_imp
->iit_externcnt
++;
2889 /* The owner of receive right might have changed, take the internal assertion */
2890 ipc_importance_task_hold_internal_assertion_locked(task_imp
, 1);
2891 /* may have dropped and retaken importance lock */
2894 /* If we didn't create a new inherit, we have some resources to release */
2895 if (III_NULL
== inherit
|| inherit
!= alloc
) {
2896 if (IIE_NULL
!= from_elem
) {
2897 if (III_NULL
!= inherit
) {
2898 incr_ref_counter(from_elem
->iie_kmsg_refs_coalesced
);
2900 incr_ref_counter(from_elem
->iie_kmsg_refs_dropped
);
2902 ipc_importance_release_locked(from_elem
);
2903 /* importance unlocked */
2905 ipc_importance_unlock();
2908 if (IIT_NULL
!= task_imp
) {
2909 if (III_NULL
!= inherit
) {
2910 incr_ref_counter(task_imp
->iit_elem
.iie_task_refs_coalesced
);
2912 ipc_importance_task_release(task_imp
);
2915 if (III_NULL
!= alloc
) {
2916 zfree(ipc_importance_inherit_zone
, alloc
);
2919 /* from_elem and task_imp references transferred to new inherit */
2920 ipc_importance_unlock();
2924 * decrement port boost count
2925 * This is OK to do without the importance lock as we atomically
2926 * unlinked the kmsg and snapshot the donating state while holding
2927 * the importance lock
2929 if (donating
|| cleared_self_donation
) {
2931 /* drop importance from port and destination task */
2932 if (ipc_port_importance_delta(port
, IPID_OPTION_NORMAL
, -1) == FALSE
) {
2937 if (III_NULL
!= inherit
) {
2938 /* have an associated importance attr, even if currently not donating */
2939 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_RAISEIMP
;
2941 /* we won't have an importance attribute associated with our message */
2942 kmsg
->ikm_header
->msgh_bits
&= ~MACH_MSGH_BITS_RAISEIMP
;
2949 * Routine: ipc_importance_inherit_from_task
2951 * Create a reference for an importance attribute representing
2952 * an inheritance between the to_task and from_task. The iii
2953 * created will be marked as III_FLAGS_FOR_OTHERS.
2955 * It will not dedup any iii which are not marked as III_FLAGS_FOR_OTHERS.
2957 * If the task is inactive, there isn't any need to return a new reference.
2959 * Nothing locked on entry. May block.
2960 * It should not be called from voucher subsystem.
2962 static ipc_importance_inherit_t
2963 ipc_importance_inherit_from_task(
2967 ipc_importance_task_t to_task_imp
= IIT_NULL
;
2968 ipc_importance_task_t from_task_imp
= IIT_NULL
;
2969 ipc_importance_elem_t from_elem
= IIE_NULL
;
2971 ipc_importance_inherit_t inherit
= III_NULL
;
2972 ipc_importance_inherit_t alloc
= III_NULL
;
2976 to_task_imp
= ipc_importance_for_task(to_task
, FALSE
);
2977 from_task_imp
= ipc_importance_for_task(from_task
, FALSE
);
2978 from_elem
= (ipc_importance_elem_t
)from_task_imp
;
2980 ipc_importance_lock();
2982 if (IIT_NULL
== to_task_imp
|| IIT_NULL
== from_task_imp
) {
2987 * No need to set up an inherit linkage if the to_task or from_task
2988 * isn't a receiver of one type or the other.
2990 if (!ipc_importance_task_is_any_receiver_type(to_task_imp
) ||
2991 !ipc_importance_task_is_any_receiver_type(from_task_imp
)) {
2995 /* Do not allow to create a linkage to self */
2996 if (to_task_imp
== from_task_imp
) {
3000 incr_ref_counter(to_task_imp
->iit_elem
.iie_task_refs_added_inherit_from
);
3001 incr_ref_counter(from_elem
->iie_kmsg_refs_added
);
3004 * Now that we have the from_elem figured out,
3005 * check to see if we already have an inherit for this pairing
3007 while (III_NULL
== inherit
) {
3008 inherit
= ipc_importance_inherit_find(from_elem
, to_task_imp
, depth
);
3010 /* Do we have to allocate a new inherit */
3011 if (III_NULL
== inherit
) {
3012 if (III_NULL
!= alloc
) {
3016 /* allocate space */
3017 ipc_importance_unlock();
3018 alloc
= (ipc_importance_inherit_t
)
3019 zalloc(ipc_importance_inherit_zone
);
3020 ipc_importance_lock();
3024 /* snapshot the donating status while we have importance locked */
3025 donating
= ipc_importance_task_is_donor(from_task_imp
);
3027 if (III_NULL
!= inherit
) {
3028 /* We found one, piggyback on that */
3029 assert(0 < III_REFS(inherit
));
3030 assert(0 < IIE_REFS(inherit
->iii_from_elem
));
3032 /* Take a reference for inherit */
3033 assert(III_REFS_MAX
> III_REFS(inherit
));
3034 ipc_importance_inherit_reference_internal(inherit
);
3036 /* Reflect the inherit's change of status into the task boosts */
3037 if (0 == III_EXTERN(inherit
)) {
3038 assert(!inherit
->iii_donating
);
3039 inherit
->iii_donating
= donating
;
3041 to_task_imp
->iit_externcnt
+= inherit
->iii_externcnt
;
3042 to_task_imp
->iit_externdrop
+= inherit
->iii_externdrop
;
3045 assert(donating
== inherit
->iii_donating
);
3048 /* add in a external reference for this use of the inherit */
3049 inherit
->iii_externcnt
++;
3051 /* initialize the previously allocated space */
3053 inherit
->iii_bits
= IIE_TYPE_INHERIT
| 1;
3054 inherit
->iii_made
= 0;
3055 inherit
->iii_externcnt
= 1;
3056 inherit
->iii_externdrop
= 0;
3057 inherit
->iii_depth
= depth
;
3058 inherit
->iii_to_task
= to_task_imp
;
3059 inherit
->iii_from_elem
= IIE_NULL
;
3060 queue_init(&inherit
->iii_kmsgs
);
3063 inherit
->iii_donating
= TRUE
;
3065 inherit
->iii_donating
= FALSE
;
3069 * Chain our new inherit on the element it inherits from.
3070 * The new inherit takes our reference on from_elem.
3072 ipc_importance_inherit_link(inherit
, from_elem
);
3075 ipc_importance_counter_init(&inherit
->iii_elem
);
3076 from_elem
->iie_kmsg_refs_inherited
++;
3077 task_imp
->iit_elem
.iie_task_refs_inherited
++;
3083 /* If found inherit and donating, reflect that in the task externcnt */
3084 if (III_NULL
!= inherit
&& donating
) {
3085 to_task_imp
->iit_externcnt
++;
3086 /* take the internal assertion */
3087 ipc_importance_task_hold_internal_assertion_locked(to_task_imp
, 1);
3088 /* may have dropped and retaken importance lock */
3091 /* If we didn't create a new inherit, we have some resources to release */
3092 if (III_NULL
== inherit
|| inherit
!= alloc
) {
3093 if (IIE_NULL
!= from_elem
) {
3094 if (III_NULL
!= inherit
) {
3095 incr_ref_counter(from_elem
->iie_kmsg_refs_coalesced
);
3097 incr_ref_counter(from_elem
->iie_kmsg_refs_dropped
);
3099 ipc_importance_release_locked(from_elem
);
3100 /* importance unlocked */
3102 ipc_importance_unlock();
3105 if (IIT_NULL
!= to_task_imp
) {
3106 if (III_NULL
!= inherit
) {
3107 incr_ref_counter(to_task_imp
->iit_elem
.iie_task_refs_coalesced
);
3109 ipc_importance_task_release(to_task_imp
);
3112 if (III_NULL
!= alloc
) {
3113 zfree(ipc_importance_inherit_zone
, alloc
);
3116 /* from_elem and to_task_imp references transferred to new inherit */
3117 ipc_importance_unlock();
3124 * Routine: ipc_importance_receive
3126 * Process importance attributes in a received message.
3128 * If an importance voucher attribute was sent, transform
3129 * that into an attribute value reflecting the inheritance
3130 * from the sender to the receiver.
3132 * If a static boost is received (or the voucher isn't on
3133 * a voucher-based boost), export a static boost.
3138 ipc_importance_receive(
3140 mach_msg_option_t option
)
3144 #if IMPORTANCE_TRACE || LEGACY_IMPORTANCE_DELIVERY
3145 task_t task_self
= current_task();
3146 unsigned int sender_pid
= ((mach_msg_max_trailer_t
*)
3147 ((vm_offset_t
)kmsg
->ikm_header
+
3148 mach_round_msg(kmsg
->ikm_header
->msgh_size
)))->msgh_audit
.val
[5];
3151 /* convert to a voucher with an inherit importance attribute? */
3152 if ((option
& MACH_RCV_VOUCHER
) != 0) {
3153 uint8_t recipes
[2 * sizeof(ipc_voucher_attr_recipe_data_t
) +
3154 sizeof(mach_voucher_attr_value_handle_t
)];
3155 ipc_voucher_attr_raw_recipe_array_size_t recipe_size
= 0;
3156 ipc_voucher_attr_recipe_t recipe
= (ipc_voucher_attr_recipe_t
)recipes
;
3157 ipc_voucher_t recv_voucher
;
3158 mach_voucher_attr_value_handle_t handle
;
3159 ipc_importance_inherit_t inherit
;
3162 /* set up recipe to copy the old voucher */
3163 if (IP_VALID(kmsg
->ikm_voucher
)) {
3164 ipc_voucher_t sent_voucher
= (ipc_voucher_t
)ip_get_kobject(kmsg
->ikm_voucher
);
3166 recipe
->key
= MACH_VOUCHER_ATTR_KEY_ALL
;
3167 recipe
->command
= MACH_VOUCHER_ATTR_COPY
;
3168 recipe
->previous_voucher
= sent_voucher
;
3169 recipe
->content_size
= 0;
3170 recipe_size
+= sizeof(*recipe
);
3174 * create an inheritance attribute from the kmsg (may be NULL)
3175 * transferring any boosts from the kmsg linkage through the
3176 * port directly to the new inheritance object.
3178 inherit
= ipc_importance_inherit_from_kmsg(kmsg
);
3179 handle
= (mach_voucher_attr_value_handle_t
)inherit
;
3181 assert(IIE_NULL
== kmsg
->ikm_importance
);
3184 * Only create a new voucher if we have an inherit object
3185 * (from the ikm_importance field of the incoming message), OR
3186 * we have a valid incoming voucher. If we have neither of
3187 * these things then there is no need to create a new voucher.
3189 if (IP_VALID(kmsg
->ikm_voucher
) || inherit
!= III_NULL
) {
3190 /* replace the importance attribute with the handle we created */
3191 /* our made reference on the inherit is donated to the voucher */
3192 recipe
= (ipc_voucher_attr_recipe_t
)&recipes
[recipe_size
];
3193 recipe
->key
= MACH_VOUCHER_ATTR_KEY_IMPORTANCE
;
3194 recipe
->command
= MACH_VOUCHER_ATTR_SET_VALUE_HANDLE
;
3195 recipe
->previous_voucher
= IPC_VOUCHER_NULL
;
3196 recipe
->content_size
= sizeof(mach_voucher_attr_value_handle_t
);
3197 *(mach_voucher_attr_value_handle_t
*)(void *)recipe
->content
= handle
;
3198 recipe_size
+= sizeof(*recipe
) + sizeof(mach_voucher_attr_value_handle_t
);
3200 kr
= ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control
,
3204 assert(KERN_SUCCESS
== kr
);
3206 /* swap the voucher port (and set voucher bits in case it didn't already exist) */
3207 kmsg
->ikm_header
->msgh_bits
|= (MACH_MSG_TYPE_MOVE_SEND
<< 16);
3208 ipc_port_release_send(kmsg
->ikm_voucher
);
3209 kmsg
->ikm_voucher
= convert_voucher_to_port(recv_voucher
);
3210 if (III_NULL
!= inherit
) {
3214 } else { /* Don't want a voucher */
3215 /* got linked importance? have to drop */
3216 if (IIE_NULL
!= kmsg
->ikm_importance
) {
3217 ipc_importance_elem_t elem
;
3219 ipc_importance_lock();
3220 elem
= ipc_importance_kmsg_unlink(kmsg
);
3222 elem
->iie_kmsg_refs_dropped
++;
3224 ipc_importance_release_locked(elem
);
3225 /* importance unlocked */
3228 /* With kmsg unlinked, can safely examine message importance attribute. */
3229 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
)) {
3230 ipc_port_t port
= kmsg
->ikm_header
->msgh_remote_port
;
3231 #if LEGACY_IMPORTANCE_DELIVERY
3232 ipc_importance_task_t task_imp
= task_self
->task_imp_base
;
3234 /* The owner of receive right might have changed, take the internal assertion */
3235 if (KERN_SUCCESS
== ipc_importance_task_hold_internal_assertion(task_imp
, 1)) {
3236 ipc_importance_task_externalize_legacy_assertion(task_imp
, 1, sender_pid
);
3241 /* The importance boost never applied to task (clear the bit) */
3242 kmsg
->ikm_header
->msgh_bits
&= ~MACH_MSGH_BITS_RAISEIMP
;
3246 /* Drop the boost on the port and the owner of the receive right */
3248 if (ipc_port_importance_delta(port
, IPID_OPTION_NORMAL
, -1) == FALSE
) {
3254 #if IMPORTANCE_TRACE
3255 if (-1 < impresult
) {
3256 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_MSG
, IMP_MSG_DELV
)) | DBG_FUNC_NONE
,
3257 sender_pid
, task_pid(task_self
),
3258 kmsg
->ikm_header
->msgh_id
, impresult
, 0);
3260 if (impresult
== 2) {
3262 * This probe only covers new voucher-based path. Legacy importance
3263 * will trigger the probe in ipc_importance_task_externalize_assertion()
3264 * above and have impresult==1 here.
3266 DTRACE_BOOST5(receive_boost
, task_t
, task_self
, int, task_pid(task_self
),
3267 int, sender_pid
, int, 1, int, task_self
->task_imp_base
->iit_assertcnt
);
3269 #endif /* IMPORTANCE_TRACE */
3273 * Routine: ipc_importance_unreceive
3275 * Undo receive of importance attributes in a message.
3281 ipc_importance_unreceive(
3283 mach_msg_option_t __unused option
)
3285 /* importance should already be in the voucher and out of the kmsg */
3286 assert(IIE_NULL
== kmsg
->ikm_importance
);
3288 /* See if there is a legacy boost to be dropped from receiver */
3289 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
)) {
3290 ipc_importance_task_t task_imp
;
3292 kmsg
->ikm_header
->msgh_bits
&= ~MACH_MSGH_BITS_RAISEIMP
;
3293 task_imp
= current_task()->task_imp_base
;
3294 if (!IP_VALID(kmsg
->ikm_voucher
) && IIT_NULL
!= task_imp
) {
3295 ipc_importance_task_drop_legacy_external_assertion(task_imp
, 1);
3298 * ipc_kmsg_copyout_dest() will consume the voucher
3299 * and any contained importance.
3305 * Routine: ipc_importance_clean
3307 * Clean up importance state in a kmsg that is being cleaned.
3308 * Unlink the importance chain if one was set up, and drop
3309 * the reference this kmsg held on the donor. Then check to
3310 * if importance was carried to the port, and remove that if
3316 ipc_importance_clean(
3321 /* Is the kmsg still linked? If so, remove that first */
3322 if (IIE_NULL
!= kmsg
->ikm_importance
) {
3323 ipc_importance_elem_t elem
;
3325 ipc_importance_lock();
3326 elem
= ipc_importance_kmsg_unlink(kmsg
);
3327 assert(IIE_NULL
!= elem
);
3328 ipc_importance_release_locked(elem
);
3329 /* importance unlocked */
3332 /* See if there is a legacy importance boost to be dropped from port */
3333 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
)) {
3334 kmsg
->ikm_header
->msgh_bits
&= ~MACH_MSGH_BITS_RAISEIMP
;
3335 port
= kmsg
->ikm_header
->msgh_remote_port
;
3336 if (IP_VALID(port
)) {
3338 /* inactive ports already had their importance boosts dropped */
3339 if (!ip_active(port
) ||
3340 ipc_port_importance_delta(port
, IPID_OPTION_NORMAL
, -1) == FALSE
) {
3348 ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg
)
3350 assert(IIE_NULL
== kmsg
->ikm_importance
);
3351 assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg
->ikm_header
->msgh_bits
));
3355 * IPC Importance Attribute Manager definition
3358 static kern_return_t
3359 ipc_importance_release_value(
3360 ipc_voucher_attr_manager_t manager
,
3361 mach_voucher_attr_key_t key
,
3362 mach_voucher_attr_value_handle_t value
,
3363 mach_voucher_attr_value_reference_t sync
);
3365 static kern_return_t
3366 ipc_importance_get_value(
3367 ipc_voucher_attr_manager_t manager
,
3368 mach_voucher_attr_key_t key
,
3369 mach_voucher_attr_recipe_command_t command
,
3370 mach_voucher_attr_value_handle_array_t prev_values
,
3371 mach_voucher_attr_value_handle_array_size_t prev_value_count
,
3372 mach_voucher_attr_content_t content
,
3373 mach_voucher_attr_content_size_t content_size
,
3374 mach_voucher_attr_value_handle_t
*out_value
,
3375 mach_voucher_attr_value_flags_t
*out_flags
,
3376 ipc_voucher_t
*out_value_voucher
);
3378 static kern_return_t
3379 ipc_importance_extract_content(
3380 ipc_voucher_attr_manager_t manager
,
3381 mach_voucher_attr_key_t key
,
3382 mach_voucher_attr_value_handle_array_t values
,
3383 mach_voucher_attr_value_handle_array_size_t value_count
,
3384 mach_voucher_attr_recipe_command_t
*out_command
,
3385 mach_voucher_attr_content_t out_content
,
3386 mach_voucher_attr_content_size_t
*in_out_content_size
);
3388 static kern_return_t
3389 ipc_importance_command(
3390 ipc_voucher_attr_manager_t manager
,
3391 mach_voucher_attr_key_t key
,
3392 mach_voucher_attr_value_handle_array_t values
,
3393 mach_msg_type_number_t value_count
,
3394 mach_voucher_attr_command_t command
,
3395 mach_voucher_attr_content_t in_content
,
3396 mach_voucher_attr_content_size_t in_content_size
,
3397 mach_voucher_attr_content_t out_content
,
3398 mach_voucher_attr_content_size_t
*out_content_size
);
3401 ipc_importance_manager_release(
3402 ipc_voucher_attr_manager_t manager
);
3404 const struct ipc_voucher_attr_manager ipc_importance_manager
= {
3405 .ivam_release_value
= ipc_importance_release_value
,
3406 .ivam_get_value
= ipc_importance_get_value
,
3407 .ivam_extract_content
= ipc_importance_extract_content
,
3408 .ivam_command
= ipc_importance_command
,
3409 .ivam_release
= ipc_importance_manager_release
,
3410 .ivam_flags
= IVAM_FLAGS_NONE
,
3413 #define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key))
3414 #define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager))
3417 * Routine: ipc_importance_release_value [Voucher Attribute Manager Interface]
3419 * Release what the voucher system believes is the last "made" reference
3420 * on an importance attribute value handle. The sync parameter is used to
3421 * avoid races with new made references concurrently being returned to the
3422 * voucher system in other threads.
3424 * Nothing locked on entry. May block.
3426 static kern_return_t
3427 ipc_importance_release_value(
3428 ipc_voucher_attr_manager_t __assert_only manager
,
3429 mach_voucher_attr_key_t __assert_only key
,
3430 mach_voucher_attr_value_handle_t value
,
3431 mach_voucher_attr_value_reference_t sync
)
3433 ipc_importance_elem_t elem
;
3435 IMPORTANCE_ASSERT_MANAGER(manager
);
3436 IMPORTANCE_ASSERT_KEY(key
);
3439 elem
= (ipc_importance_elem_t
)value
;
3441 ipc_importance_lock();
3443 /* Any oustanding made refs? */
3444 if (sync
!= elem
->iie_made
) {
3445 assert(sync
< elem
->iie_made
);
3446 ipc_importance_unlock();
3447 return KERN_FAILURE
;
3454 * If there are pending external boosts represented by this attribute,
3455 * drop them from the apropriate task
3457 if (IIE_TYPE_INHERIT
== IIE_TYPE(elem
)) {
3458 ipc_importance_inherit_t inherit
= (ipc_importance_inherit_t
)elem
;
3460 assert(inherit
->iii_externcnt
>= inherit
->iii_externdrop
);
3462 if (inherit
->iii_donating
) {
3463 ipc_importance_task_t imp_task
= inherit
->iii_to_task
;
3464 uint32_t assertcnt
= III_EXTERN(inherit
);
3466 assert(ipc_importance_task_is_any_receiver_type(imp_task
));
3467 assert(imp_task
->iit_externcnt
>= inherit
->iii_externcnt
);
3468 assert(imp_task
->iit_externdrop
>= inherit
->iii_externdrop
);
3469 imp_task
->iit_externcnt
-= inherit
->iii_externcnt
;
3470 imp_task
->iit_externdrop
-= inherit
->iii_externdrop
;
3471 inherit
->iii_externcnt
= 0;
3472 inherit
->iii_externdrop
= 0;
3473 inherit
->iii_donating
= FALSE
;
3475 /* adjust the internal assertions - and propagate if needed */
3476 if (ipc_importance_task_check_transition(imp_task
, IIT_UPDATE_DROP
, assertcnt
)) {
3477 ipc_importance_task_propagate_assertion_locked(imp_task
, IIT_UPDATE_DROP
, TRUE
);
3480 inherit
->iii_externcnt
= 0;
3481 inherit
->iii_externdrop
= 0;
3485 /* drop the made reference on elem */
3486 ipc_importance_release_locked(elem
);
3487 /* returns unlocked */
3489 return KERN_SUCCESS
;
3494 * Routine: ipc_importance_get_value [Voucher Attribute Manager Interface]
3496 * Convert command and content data into a reference on a [potentially new]
3497 * attribute value. The importance attribute manager will only allow the
3498 * caller to get a value for the current task's importance, or to redeem
3499 * an importance attribute from an existing voucher.
3501 * Nothing locked on entry. May block.
3503 static kern_return_t
3504 ipc_importance_get_value(
3505 ipc_voucher_attr_manager_t __assert_only manager
,
3506 mach_voucher_attr_key_t __assert_only key
,
3507 mach_voucher_attr_recipe_command_t command
,
3508 mach_voucher_attr_value_handle_array_t prev_values
,
3509 mach_voucher_attr_value_handle_array_size_t prev_value_count
,
3510 mach_voucher_attr_content_t __unused content
,
3511 mach_voucher_attr_content_size_t content_size
,
3512 mach_voucher_attr_value_handle_t
*out_value
,
3513 mach_voucher_attr_value_flags_t
*out_flags
,
3514 ipc_voucher_t
*out_value_voucher
)
3516 ipc_importance_elem_t elem
;
3519 IMPORTANCE_ASSERT_MANAGER(manager
);
3520 IMPORTANCE_ASSERT_KEY(key
);
3522 if (0 != content_size
) {
3523 return KERN_INVALID_ARGUMENT
;
3526 *out_flags
= MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE
;
3527 /* never an out voucher */
3530 case MACH_VOUCHER_ATTR_REDEEM
:
3532 /* redeem of previous values is the value */
3533 if (0 < prev_value_count
) {
3534 elem
= (ipc_importance_elem_t
)prev_values
[0];
3535 assert(IIE_NULL
!= elem
);
3537 ipc_importance_lock();
3538 assert(0 < elem
->iie_made
);
3540 ipc_importance_unlock();
3542 *out_value
= prev_values
[0];
3543 return KERN_SUCCESS
;
3546 /* redeem of default is default */
3548 *out_value_voucher
= IPC_VOUCHER_NULL
;
3549 return KERN_SUCCESS
;
3551 case MACH_VOUCHER_ATTR_IMPORTANCE_SELF
:
3552 self
= current_task();
3554 elem
= (ipc_importance_elem_t
)ipc_importance_for_task(self
, TRUE
);
3555 /* made reference added (or IIE_NULL which isn't referenced) */
3557 *out_value
= (mach_voucher_attr_value_handle_t
)elem
;
3558 *out_value_voucher
= IPC_VOUCHER_NULL
;
3559 return KERN_SUCCESS
;
3563 * every other command is unknown
3565 * Specifically, there is no mechanism provided to construct an
3566 * importance attribute for a task/process from just a pid or
3567 * task port. It has to be copied (or redeemed) from a previous
3568 * voucher that has it.
3570 return KERN_INVALID_ARGUMENT
;
3575 * Routine: ipc_importance_extract_content [Voucher Attribute Manager Interface]
3577 * Extract meaning from the attribute value present in a voucher. While
3578 * the real goal is to provide commands and data that can reproduce the
3579 * voucher's value "out of thin air", this isn't possible with importance
3580 * attribute values. Instead, return debug info to help track down dependencies.
3582 * Nothing locked on entry. May block.
3584 static kern_return_t
3585 ipc_importance_extract_content(
3586 ipc_voucher_attr_manager_t __assert_only manager
,
3587 mach_voucher_attr_key_t __assert_only key
,
3588 mach_voucher_attr_value_handle_array_t values
,
3589 mach_voucher_attr_value_handle_array_size_t value_count
,
3590 mach_voucher_attr_recipe_command_t
*out_command
,
3591 mach_voucher_attr_content_t out_content
,
3592 mach_voucher_attr_content_size_t
*in_out_content_size
)
3594 ipc_importance_elem_t elem
;
3597 char *buf
= (char *)out_content
;
3598 mach_voucher_attr_content_size_t size
= *in_out_content_size
;
3599 mach_voucher_attr_content_size_t pos
= 0;
3602 IMPORTANCE_ASSERT_MANAGER(manager
);
3603 IMPORTANCE_ASSERT_KEY(key
);
3605 /* the first non-default value provides the data */
3606 for (i
= 0; i
< value_count
; i
++) {
3607 elem
= (ipc_importance_elem_t
)values
[i
];
3608 if (IIE_NULL
== elem
) {
3612 pos
+= scnprintf(buf
+ pos
, size
- pos
, "Importance for ");
3615 ipc_importance_inherit_t inherit
= III_NULL
;
3616 ipc_importance_task_t task_imp
;
3618 if (IIE_TYPE_TASK
== IIE_TYPE(elem
)) {
3619 task_imp
= (ipc_importance_task_t
)elem
;
3621 inherit
= (ipc_importance_inherit_t
)elem
;
3622 task_imp
= inherit
->iii_to_task
;
3624 #if DEVELOPMENT || DEBUG
3625 pos
+= scnprintf(buf
+ pos
, size
- pos
, "%s[%d]",
3626 task_imp
->iit_procname
, task_imp
->iit_bsd_pid
);
3628 ipc_importance_lock();
3629 pid
= task_importance_task_get_pid(task_imp
);
3630 ipc_importance_unlock();
3631 pos
+= scnprintf(buf
+ pos
, size
- pos
, "pid %d", pid
);
3632 #endif /* DEVELOPMENT || DEBUG */
3634 if (III_NULL
== inherit
) {
3637 pos
+= scnprintf(buf
+ pos
, size
- pos
,
3638 " (%d of %d boosts) %s from ",
3639 III_EXTERN(inherit
), inherit
->iii_externcnt
,
3640 (inherit
->iii_donating
) ? "donated" : "linked");
3641 elem
= inherit
->iii_from_elem
;
3644 pos
++; /* account for terminating \0 */
3647 *out_command
= MACH_VOUCHER_ATTR_NOOP
; /* cannot be used to regenerate value */
3648 *in_out_content_size
= pos
;
3649 return KERN_SUCCESS
;
3653 * Routine: ipc_importance_command [Voucher Attribute Manager Interface]
3655 * Run commands against the importance attribute value found in a voucher.
3656 * No such commands are currently supported.
3658 * Nothing locked on entry. May block.
3660 static kern_return_t
3661 ipc_importance_command(
3662 ipc_voucher_attr_manager_t __assert_only manager
,
3663 mach_voucher_attr_key_t __assert_only key
,
3664 mach_voucher_attr_value_handle_array_t values
,
3665 mach_msg_type_number_t value_count
,
3666 mach_voucher_attr_command_t command
,
3667 mach_voucher_attr_content_t in_content
,
3668 mach_voucher_attr_content_size_t in_content_size
,
3669 mach_voucher_attr_content_t out_content
,
3670 mach_voucher_attr_content_size_t
*out_content_size
)
3672 ipc_importance_inherit_t inherit
;
3673 ipc_importance_task_t to_task
;
3674 uint32_t refs
, *outrefsp
;
3675 mach_msg_type_number_t i
;
3678 IMPORTANCE_ASSERT_MANAGER(manager
);
3679 IMPORTANCE_ASSERT_KEY(key
);
3681 if (in_content_size
!= sizeof(refs
) ||
3682 (*out_content_size
!= 0 && *out_content_size
!= sizeof(refs
))) {
3683 return KERN_INVALID_ARGUMENT
;
3685 refs
= *(uint32_t *)(void *)in_content
;
3686 outrefsp
= (*out_content_size
!= 0) ? (uint32_t *)(void *)out_content
: NULL
;
3688 if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL
!= command
) {
3689 return KERN_NOT_SUPPORTED
;
3692 /* the first non-default value of the apropos type provides the data */
3694 for (i
= 0; i
< value_count
; i
++) {
3695 ipc_importance_elem_t elem
= (ipc_importance_elem_t
)values
[i
];
3697 if (IIE_NULL
!= elem
&& IIE_TYPE_INHERIT
== IIE_TYPE(elem
)) {
3698 inherit
= (ipc_importance_inherit_t
)elem
;
3702 if (III_NULL
== inherit
) {
3703 return KERN_INVALID_ARGUMENT
;
3706 ipc_importance_lock();
3709 if (NULL
!= outrefsp
) {
3710 *outrefsp
= III_EXTERN(inherit
);
3712 ipc_importance_unlock();
3713 return KERN_SUCCESS
;
3716 to_task
= inherit
->iii_to_task
;
3717 assert(ipc_importance_task_is_any_receiver_type(to_task
));
3719 /* if not donating to a denap receiver, it was called incorrectly */
3720 if (!ipc_importance_task_is_marked_denap_receiver(to_task
)) {
3721 ipc_importance_unlock();
3722 return KERN_INVALID_TASK
; /* keeps dispatch happy */
3725 /* Enough external references left to drop? */
3726 if (III_EXTERN(inherit
) < refs
) {
3727 ipc_importance_unlock();
3728 return KERN_FAILURE
;
3731 /* re-base external and internal counters at the inherit and the to-task (if apropos) */
3732 if (inherit
->iii_donating
) {
3733 assert(IIT_EXTERN(to_task
) >= III_EXTERN(inherit
));
3734 assert(to_task
->iit_externcnt
>= inherit
->iii_externcnt
);
3735 assert(to_task
->iit_externdrop
>= inherit
->iii_externdrop
);
3736 inherit
->iii_externdrop
+= refs
;
3737 to_task
->iit_externdrop
+= refs
;
3738 externcnt
= III_EXTERN(inherit
);
3739 if (0 == externcnt
) {
3740 inherit
->iii_donating
= FALSE
;
3741 to_task
->iit_externcnt
-= inherit
->iii_externcnt
;
3742 to_task
->iit_externdrop
-= inherit
->iii_externdrop
;
3745 /* Start AppNap delay hysteresis - even if not the last boost for the task. */
3746 if (ipc_importance_delayed_drop_call
!= NULL
&&
3747 ipc_importance_task_is_marked_denap_receiver(to_task
)) {
3748 ipc_importance_task_delayed_drop(to_task
);
3751 /* drop task assertions associated with the dropped boosts */
3752 if (ipc_importance_task_check_transition(to_task
, IIT_UPDATE_DROP
, refs
)) {
3753 ipc_importance_task_propagate_assertion_locked(to_task
, IIT_UPDATE_DROP
, TRUE
);
3754 /* may have dropped and retaken importance lock */
3757 /* assert(to_task->iit_assertcnt >= refs + externcnt); */
3758 /* defensive deduction in case of assertcnt underflow */
3759 if (to_task
->iit_assertcnt
> refs
+ externcnt
) {
3760 to_task
->iit_assertcnt
-= refs
;
3762 to_task
->iit_assertcnt
= externcnt
;
3766 inherit
->iii_externdrop
+= refs
;
3767 externcnt
= III_EXTERN(inherit
);
3770 /* capture result (if requested) */
3771 if (NULL
!= outrefsp
) {
3772 *outrefsp
= externcnt
;
3775 ipc_importance_unlock();
3776 return KERN_SUCCESS
;
3780 * Routine: ipc_importance_manager_release [Voucher Attribute Manager Interface]
3782 * Release the Voucher system's reference on the IPC importance attribute
3785 * As this can only occur after the manager drops the Attribute control
3786 * reference granted back at registration time, and that reference is never
3787 * dropped, this should never be called.
3791 ipc_importance_manager_release(
3792 ipc_voucher_attr_manager_t __assert_only manager
)
3794 IMPORTANCE_ASSERT_MANAGER(manager
);
3795 panic("Voucher importance manager released");
3799 * Routine: ipc_importance_init
3801 * Initialize the IPC importance manager.
3803 * Zones and Vouchers are already initialized.
3806 ipc_importance_init(void)
3810 kr
= ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager
,
3811 (mach_voucher_attr_value_handle_t
)0,
3812 MACH_VOUCHER_ATTR_KEY_IMPORTANCE
,
3813 &ipc_importance_control
);
3814 if (KERN_SUCCESS
!= kr
) {
3815 printf("Voucher importance manager register returned %d", kr
);
3820 * Routine: ipc_importance_thread_call_init
3822 * Initialize the IPC importance code dependent upon
3823 * thread-call support being available.
3825 * Thread-call mechanism is already initialized.
3828 ipc_importance_thread_call_init(void)
3830 /* initialize delayed drop queue and thread-call */
3831 queue_init(&ipc_importance_delayed_drop_queue
);
3832 ipc_importance_delayed_drop_call
=
3833 thread_call_allocate(ipc_importance_task_delayed_drop_scan
, NULL
);
3834 if (NULL
== ipc_importance_delayed_drop_call
) {
3835 panic("ipc_importance_init");
3840 * Routing: task_importance_list_pids
3841 * Purpose: list pids where task in donating importance.
3842 * Conditions: To be called only from kdp stackshot code.
3843 * Will panic the system otherwise.
3846 task_importance_list_pids(task_t task
, int flags
, char *pid_list
, unsigned int max_count
)
3848 if (kdp_lck_spin_is_acquired(&ipc_importance_lock_data
) ||
3850 task
->task_imp_base
== IIT_NULL
||
3852 flags
!= TASK_IMP_LIST_DONATING_PIDS
) {
3855 unsigned int pidcount
= 0;
3857 ipc_importance_task_t task_imp
= task
->task_imp_base
;
3858 ipc_kmsg_t temp_kmsg
;
3859 ipc_importance_inherit_t temp_inherit
;
3860 ipc_importance_elem_t elem
;
3861 int target_pid
= 0, previous_pid
;
3863 queue_iterate(&task_imp
->iit_inherits
, temp_inherit
, ipc_importance_inherit_t
, iii_inheritance
) {
3864 /* check space in buffer */
3865 if (pidcount
>= max_count
) {
3868 previous_pid
= target_pid
;
3871 if (temp_inherit
->iii_donating
) {
3872 target_pid
= task_importance_task_get_pid(temp_inherit
->iii_to_task
);
3875 if (target_pid
!= -1 && previous_pid
!= target_pid
) {
3876 memcpy(pid_list
, &target_pid
, sizeof(target_pid
));
3877 pid_list
+= sizeof(target_pid
);
3883 queue_iterate(&task_imp
->iit_kmsgs
, temp_kmsg
, ipc_kmsg_t
, ikm_inheritance
) {
3884 if (pidcount
>= max_count
) {
3887 previous_pid
= target_pid
;
3889 elem
= temp_kmsg
->ikm_importance
;
3890 temp_task
= TASK_NULL
;
3892 if (elem
== IIE_NULL
) {
3896 if (!(temp_kmsg
->ikm_header
&& MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_kmsg
->ikm_header
->msgh_bits
))) {
3900 if (IIE_TYPE_TASK
== IIE_TYPE(elem
)) {
3901 ipc_importance_task_t temp_iit
= (ipc_importance_task_t
)elem
;
3902 target_pid
= task_importance_task_get_pid(temp_iit
);
3904 temp_inherit
= (ipc_importance_inherit_t
)elem
;
3905 target_pid
= task_importance_task_get_pid(temp_inherit
->iii_to_task
);
3908 if (target_pid
!= -1 && previous_pid
!= target_pid
) {
3909 memcpy(pid_list
, &target_pid
, sizeof(target_pid
));
3910 pid_list
+= sizeof(target_pid
);