]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_importance.c
47440d332ac6fd23b31d6617d421dd36b37f271a
[apple/xnu.git] / osfmk / ipc / ipc_importance.c
1 /*
2 * Copyright (c) 2013-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/notify.h>
31 #include <ipc/ipc_types.h>
32 #include <ipc/ipc_importance.h>
33 #include <ipc/ipc_port.h>
34 #include <ipc/ipc_voucher.h>
35 #include <kern/ipc_kobject.h>
36 #include <kern/ipc_tt.h>
37 #include <kern/mach_param.h>
38 #include <kern/misc_protos.h>
39 #include <kern/zalloc.h>
40 #include <kern/queue.h>
41 #include <kern/task.h>
42 #include <kern/policy_internal.h>
43
44 #include <sys/kdebug.h>
45
46 #include <mach/mach_voucher_attr_control.h>
47 #include <mach/machine/sdt.h>
48
49 extern int proc_pid(void *);
50 extern int proc_selfpid(void);
51 extern uint64_t proc_uniqueid(void *p);
52 extern char *proc_name_address(void *p);
53
54 /*
55 * Globals for delayed boost drop processing.
56 */
57 static queue_head_t ipc_importance_delayed_drop_queue;
58 static thread_call_t ipc_importance_delayed_drop_call;
59 static uint64_t ipc_importance_delayed_drop_timestamp;
60 static boolean_t ipc_importance_delayed_drop_call_requested = FALSE;
61
62 #define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */
63 #define DENAP_DROP_SKEW (100 * NSEC_PER_MSEC) /* request skew for wakeup */
64 #define DENAP_DROP_LEEWAY (2 * DENAP_DROP_SKEW) /* specified wakeup leeway */
65
66 #define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW)
67 #define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY)
68
69 /*
70 * Importance Voucher Attribute Manager
71 */
72 static LCK_SPIN_DECLARE_ATTR(ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr);
73
74 #define ipc_importance_lock() \
75 lck_spin_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
76 #define ipc_importance_lock_try() \
77 lck_spin_try_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
78 #define ipc_importance_unlock() \
79 lck_spin_unlock(&ipc_importance_lock_data)
80 #define ipc_importance_assert_held() \
81 lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED)
82
83 #if IIE_REF_DEBUG
84 #define incr_ref_counter(x) (os_atomic_inc(&(x), relaxed))
85
86 static inline
87 uint32_t
88 ipc_importance_reference_internal(ipc_importance_elem_t elem)
89 {
90 incr_ref_counter(elem->iie_refs_added);
91 return os_atomic_inc(&elem->iie_bits, relaxed) & IIE_REFS_MASK;
92 }
93
94 static inline
95 uint32_t
96 ipc_importance_release_internal(ipc_importance_elem_t elem)
97 {
98 incr_ref_counter(elem->iie_refs_dropped);
99 return os_atomic_dec(&elem->iie_bits, relaxed) & IIE_REFS_MASK;
100 }
101
102 static inline
103 uint32_t
104 ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)
105 {
106 uint32_t out;
107 out = ipc_importance_reference_internal(&task_imp->iit_elem);
108 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added);
109 return out;
110 }
111
112 static inline
113 uint32_t
114 ipc_importance_task_release_internal(ipc_importance_task_t task_imp)
115 {
116 uint32_t out;
117
118 assert(1 < IIT_REFS(task_imp));
119 incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped);
120 out = ipc_importance_release_internal(&task_imp->iit_elem);
121 return out;
122 }
123
124 static inline
125 void
126 ipc_importance_counter_init(ipc_importance_elem_t elem)
127 {
128 elem->iie_refs_added = 0;
129 elem->iie_refs_dropped = 0;
130 elem->iie_kmsg_refs_added = 0;
131 elem->iie_kmsg_refs_inherited = 0;
132 elem->iie_kmsg_refs_coalesced = 0;
133 elem->iie_kmsg_refs_dropped = 0;
134 elem->iie_task_refs_added = 0;
135 elem->iie_task_refs_added_inherit_from = 0;
136 elem->iie_task_refs_added_transition = 0;
137 elem->iie_task_refs_self_added = 0;
138 elem->iie_task_refs_inherited = 0;
139 elem->iie_task_refs_coalesced = 0;
140 elem->iie_task_refs_dropped = 0;
141 }
142 #else
143 #define incr_ref_counter(x)
144 #endif
145
146 #if DEVELOPMENT || DEBUG
147 static queue_head_t global_iit_alloc_queue =
148 QUEUE_HEAD_INITIALIZER(global_iit_alloc_queue);
149 #endif
150
151 static ZONE_DECLARE(ipc_importance_task_zone, "ipc task importance",
152 sizeof(struct ipc_importance_task), ZC_NOENCRYPT);
153 static ZONE_DECLARE(ipc_importance_inherit_zone, "ipc importance inherit",
154 sizeof(struct ipc_importance_inherit), ZC_NOENCRYPT);
155 static zone_t ipc_importance_inherit_zone;
156
157 static ipc_voucher_attr_control_t ipc_importance_control;
158
159 static boolean_t ipc_importance_task_check_transition(ipc_importance_task_t task_imp,
160 iit_update_type_t type, uint32_t delta);
161
162 static void ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp,
163 iit_update_type_t type, boolean_t update_task_imp);
164
165 static ipc_importance_inherit_t ipc_importance_inherit_from_task(task_t from_task, task_t to_task);
166
167 /*
168 * Routine: ipc_importance_kmsg_link
169 * Purpose:
170 * Link the kmsg onto the appropriate propagation chain.
171 * If the element is a task importance, we link directly
172 * on its propagation chain. Otherwise, we link onto the
173 * destination task of the inherit.
174 * Conditions:
175 * Importance lock held.
176 * Caller is donating an importance elem reference to the kmsg.
177 */
178 static void
179 ipc_importance_kmsg_link(
180 ipc_kmsg_t kmsg,
181 ipc_importance_elem_t elem)
182 {
183 ipc_importance_elem_t link_elem;
184
185 assert(IIE_NULL == kmsg->ikm_importance);
186
187 link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
188 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
189 elem;
190
191 queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
192 kmsg->ikm_importance = elem;
193 }
194
195 /*
196 * Routine: ipc_importance_kmsg_unlink
197 * Purpose:
198 * Unlink the kmsg from its current propagation chain.
199 * If the element is a task importance, we unlink directly
200 * from its propagation chain. Otherwise, we unlink from the
201 * destination task of the inherit.
202 * Returns:
203 * The reference to the importance element it was linked on.
204 * Conditions:
205 * Importance lock held.
206 * Caller is responsible for dropping reference on returned elem.
207 */
208 static ipc_importance_elem_t
209 ipc_importance_kmsg_unlink(
210 ipc_kmsg_t kmsg)
211 {
212 ipc_importance_elem_t elem = kmsg->ikm_importance;
213
214 if (IIE_NULL != elem) {
215 ipc_importance_elem_t unlink_elem;
216
217 unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
218 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
219 elem;
220
221 queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
222 kmsg->ikm_importance = IIE_NULL;
223 }
224 return elem;
225 }
226
227 /*
228 * Routine: ipc_importance_inherit_link
229 * Purpose:
230 * Link the inherit onto the appropriate propagation chain.
231 * If the element is a task importance, we link directly
232 * on its propagation chain. Otherwise, we link onto the
233 * destination task of the inherit.
234 * Conditions:
235 * Importance lock held.
236 * Caller is donating an elem importance reference to the inherit.
237 */
238 static void
239 ipc_importance_inherit_link(
240 ipc_importance_inherit_t inherit,
241 ipc_importance_elem_t elem)
242 {
243 ipc_importance_task_t link_task;
244
245 assert(IIE_NULL == inherit->iii_from_elem);
246 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
247 ((ipc_importance_inherit_t)elem)->iii_to_task :
248 (ipc_importance_task_t)elem;
249
250 queue_enter(&link_task->iit_inherits, inherit,
251 ipc_importance_inherit_t, iii_inheritance);
252 inherit->iii_from_elem = elem;
253 }
254
255 /*
256 * Routine: ipc_importance_inherit_find
257 * Purpose:
258 * Find an existing inherit that links the from element to the
259 * to_task at a given nesting depth. As inherits from other
260 * inherits are actually linked off the original inherit's donation
261 * receiving task, we have to conduct our search from there if
262 * the from element is an inherit.
263 * Returns:
264 * A pointer (not a reference) to the matching inherit.
265 * Conditions:
266 * Importance lock held.
267 */
268 static ipc_importance_inherit_t
269 ipc_importance_inherit_find(
270 ipc_importance_elem_t from,
271 ipc_importance_task_t to_task,
272 unsigned int depth)
273 {
274 ipc_importance_task_t link_task;
275 ipc_importance_inherit_t inherit;
276
277 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(from)) ?
278 ((ipc_importance_inherit_t)from)->iii_to_task :
279 (ipc_importance_task_t)from;
280
281 queue_iterate(&link_task->iit_inherits, inherit,
282 ipc_importance_inherit_t, iii_inheritance) {
283 if (inherit->iii_to_task == to_task && inherit->iii_depth == depth) {
284 return inherit;
285 }
286 }
287 return III_NULL;
288 }
289
290 /*
291 * Routine: ipc_importance_inherit_unlink
292 * Purpose:
293 * Unlink the inherit from its current propagation chain.
294 * If the element is a task importance, we unlink directly
295 * from its propagation chain. Otherwise, we unlink from the
296 * destination task of the inherit.
297 * Returns:
298 * The reference to the importance element it was linked on.
299 * Conditions:
300 * Importance lock held.
301 * Caller is responsible for dropping reference on returned elem.
302 */
303 static ipc_importance_elem_t
304 ipc_importance_inherit_unlink(
305 ipc_importance_inherit_t inherit)
306 {
307 ipc_importance_elem_t elem = inherit->iii_from_elem;
308
309 if (IIE_NULL != elem) {
310 ipc_importance_task_t unlink_task;
311
312 unlink_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
313 ((ipc_importance_inherit_t)elem)->iii_to_task :
314 (ipc_importance_task_t)elem;
315
316 queue_remove(&unlink_task->iit_inherits, inherit,
317 ipc_importance_inherit_t, iii_inheritance);
318 inherit->iii_from_elem = IIE_NULL;
319 }
320 return elem;
321 }
322
323 /*
324 * Routine: ipc_importance_reference
325 * Purpose:
326 * Add a reference to the importance element.
327 * Conditions:
328 * Caller must hold a reference on the element.
329 */
330 void
331 ipc_importance_reference(ipc_importance_elem_t elem)
332 {
333 assert(0 < IIE_REFS(elem));
334 ipc_importance_reference_internal(elem);
335 }
336
337 /*
338 * Routine: ipc_importance_release_locked
339 * Purpose:
340 * Release a reference on an importance attribute value,
341 * unlinking and deallocating the attribute if the last reference.
342 * Conditions:
343 * Entered with importance lock held, leaves with it unlocked.
344 */
345 static void
346 ipc_importance_release_locked(ipc_importance_elem_t elem)
347 {
348 assert(0 < IIE_REFS(elem));
349
350 #if IMPORTANCE_DEBUG
351 ipc_importance_inherit_t temp_inherit;
352 ipc_importance_task_t link_task;
353 ipc_kmsg_t temp_kmsg;
354 uint32_t expected = 0;
355
356 if (0 < elem->iie_made) {
357 expected++;
358 }
359
360 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
361 ((ipc_importance_inherit_t)elem)->iii_to_task :
362 (ipc_importance_task_t)elem;
363
364 queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance)
365 if (temp_kmsg->ikm_importance == elem) {
366 expected++;
367 }
368 queue_iterate(&link_task->iit_inherits, temp_inherit,
369 ipc_importance_inherit_t, iii_inheritance)
370 if (temp_inherit->iii_from_elem == elem) {
371 expected++;
372 }
373 if (IIE_REFS(elem) < expected + 1) {
374 panic("ipc_importance_release_locked (%p)", elem);
375 }
376 #endif /* IMPORTANCE_DEBUG */
377
378 if (0 < ipc_importance_release_internal(elem)) {
379 ipc_importance_unlock();
380 return;
381 }
382
383 /* last ref */
384
385 switch (IIE_TYPE(elem)) {
386 /* just a "from" task reference to drop */
387 case IIE_TYPE_TASK:
388 {
389 ipc_importance_task_t task_elem;
390
391 task_elem = (ipc_importance_task_t)elem;
392
393 /* the task can't still hold a reference on the task importance */
394 assert(TASK_NULL == task_elem->iit_task);
395
396 #if DEVELOPMENT || DEBUG
397 queue_remove(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
398 #endif
399
400 ipc_importance_unlock();
401
402 zfree(ipc_importance_task_zone, task_elem);
403 break;
404 }
405
406 /* dropping an inherit element */
407 case IIE_TYPE_INHERIT:
408 {
409 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
410 ipc_importance_task_t to_task = inherit->iii_to_task;
411 ipc_importance_elem_t from_elem;
412
413 assert(IIT_NULL != to_task);
414 assert(ipc_importance_task_is_any_receiver_type(to_task));
415
416 /* unlink the inherit from its source element */
417 from_elem = ipc_importance_inherit_unlink(inherit);
418 assert(IIE_NULL != from_elem);
419
420 /*
421 * The attribute might have pending external boosts if the attribute
422 * was given out during exec, drop them from the appropriate destination
423 * task.
424 *
425 * The attribute will not have any pending external boosts if the
426 * attribute was given out to voucher system since it would have been
427 * dropped by ipc_importance_release_value, but there is not way to
428 * detect that, thus if the attribute has a pending external boost,
429 * drop them from the appropriate destination task.
430 *
431 * The inherit attribute from exec and voucher system would not
432 * get deduped to each other, thus dropping the external boost
433 * from destination task at two different places will not have
434 * any unintended side effects.
435 */
436 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
437 if (inherit->iii_donating) {
438 uint32_t assertcnt = III_EXTERN(inherit);
439
440 assert(ipc_importance_task_is_any_receiver_type(to_task));
441 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
442 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
443 to_task->iit_externcnt -= inherit->iii_externcnt;
444 to_task->iit_externdrop -= inherit->iii_externdrop;
445 inherit->iii_externcnt = 0;
446 inherit->iii_externdrop = 0;
447 inherit->iii_donating = FALSE;
448
449 /* adjust the internal assertions - and propagate as needed */
450 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, assertcnt)) {
451 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
452 }
453 } else {
454 inherit->iii_externcnt = 0;
455 inherit->iii_externdrop = 0;
456 }
457
458 /* release the reference on the source element */
459 ipc_importance_release_locked(from_elem);
460 /* unlocked on return */
461
462 /* release the reference on the destination task */
463 ipc_importance_task_release(to_task);
464
465 /* free the inherit */
466 zfree(ipc_importance_inherit_zone, inherit);
467 break;
468 }
469 }
470 }
471
472 /*
473 * Routine: ipc_importance_release
474 * Purpose:
475 * Release a reference on an importance attribute value,
476 * unlinking and deallocating the attribute if the last reference.
477 * Conditions:
478 * nothing locked on entrance, nothing locked on exit.
479 * May block.
480 */
481 void
482 ipc_importance_release(ipc_importance_elem_t elem)
483 {
484 if (IIE_NULL == elem) {
485 return;
486 }
487
488 ipc_importance_lock();
489 ipc_importance_release_locked(elem);
490 /* unlocked */
491 }
492
493 /*
494 * Routine: ipc_importance_task_reference
495 *
496 *
497 * Purpose:
498 * Retain a reference on a task importance attribute value.
499 * Conditions:
500 * nothing locked on entrance, nothing locked on exit.
501 * caller holds a reference already.
502 */
503 void
504 ipc_importance_task_reference(ipc_importance_task_t task_elem)
505 {
506 if (IIT_NULL == task_elem) {
507 return;
508 }
509 #if IIE_REF_DEBUG
510 incr_ref_counter(task_elem->iit_elem.iie_task_refs_added);
511 #endif
512 ipc_importance_reference(&task_elem->iit_elem);
513 }
514
515 /*
516 * Routine: ipc_importance_task_release
517 * Purpose:
518 * Release a reference on a task importance attribute value,
519 * unlinking and deallocating the attribute if the last reference.
520 * Conditions:
521 * nothing locked on entrance, nothing locked on exit.
522 * May block.
523 */
524 void
525 ipc_importance_task_release(ipc_importance_task_t task_elem)
526 {
527 if (IIT_NULL == task_elem) {
528 return;
529 }
530
531 ipc_importance_lock();
532 #if IIE_REF_DEBUG
533 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
534 #endif
535 ipc_importance_release_locked(&task_elem->iit_elem);
536 /* unlocked */
537 }
538
539 /*
540 * Routine: ipc_importance_task_release_locked
541 * Purpose:
542 * Release a reference on a task importance attribute value,
543 * unlinking and deallocating the attribute if the last reference.
544 * Conditions:
545 * importance lock held on entry, nothing locked on exit.
546 * May block.
547 */
548 static void
549 ipc_importance_task_release_locked(ipc_importance_task_t task_elem)
550 {
551 if (IIT_NULL == task_elem) {
552 ipc_importance_unlock();
553 return;
554 }
555 #if IIE_REF_DEBUG
556 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
557 #endif
558 ipc_importance_release_locked(&task_elem->iit_elem);
559 /* unlocked */
560 }
561
562 /*
563 * Routines for importance donation/inheritance/boosting
564 */
565
566
567 /*
568 * External importance assertions are managed by the process in userspace
569 * Internal importance assertions are the responsibility of the kernel
570 * Assertions are changed from internal to external via task_importance_externalize_assertion
571 */
572
573 /*
574 * Routine: ipc_importance_task_check_transition
575 * Purpose:
576 * Increase or decrement the internal task importance counter of the
577 * specified task and determine if propagation and a task policy
578 * update is required.
579 *
580 * If it is already enqueued for a policy update, steal it from that queue
581 * (as we are reversing that update before it happens).
582 *
583 * Conditions:
584 * Called with the importance lock held.
585 * It is the caller's responsibility to perform the propagation of the
586 * transition and/or policy changes by checking the return value.
587 */
588 static boolean_t
589 ipc_importance_task_check_transition(
590 ipc_importance_task_t task_imp,
591 iit_update_type_t type,
592 uint32_t delta)
593 {
594 #if IMPORTANCE_TRACE
595 task_t target_task = task_imp->iit_task;
596 #endif
597 boolean_t boost = (IIT_UPDATE_HOLD == type);
598 boolean_t before_boosted, after_boosted;
599
600 ipc_importance_assert_held();
601
602 if (!ipc_importance_task_is_any_receiver_type(task_imp)) {
603 return FALSE;
604 }
605
606 #if IMPORTANCE_TRACE
607 int target_pid = task_pid(target_task);
608
609 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START,
610 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
611 #endif
612
613 /* snapshot the effective boosting status before making any changes */
614 before_boosted = (task_imp->iit_assertcnt > 0);
615
616 /* Adjust the assertcnt appropriately */
617 if (boost) {
618 task_imp->iit_assertcnt += delta;
619 #if IMPORTANCE_TRACE
620 DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid,
621 task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt);
622 #endif
623 } else {
624 // assert(delta <= task_imp->iit_assertcnt);
625 if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) {
626 /* TODO: Turn this back into a panic <rdar://problem/12592649> */
627 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
628 } else {
629 task_imp->iit_assertcnt -= delta;
630 }
631 #if IMPORTANCE_TRACE
632 // This convers both legacy and voucher-based importance.
633 DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt);
634 #endif
635 }
636
637 #if IMPORTANCE_TRACE
638 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END,
639 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
640 #endif
641
642 /* did the change result in an effective donor status change? */
643 after_boosted = (task_imp->iit_assertcnt > 0);
644
645 if (after_boosted != before_boosted) {
646 /*
647 * If the task importance is already on an update queue, we just reversed the need for a
648 * pending policy update. If the queue is any other than the delayed-drop-queue, pull it
649 * off that queue and release the reference it got going onto the update queue. If it is
650 * the delayed-drop-queue we leave it in place in case it comes back into the drop state
651 * before its time delay is up.
652 *
653 * We still need to propagate the change downstream to reverse the assertcnt effects,
654 * but we no longer need to update this task's boost policy state.
655 *
656 * Otherwise, mark it as needing a policy update.
657 */
658 assert(0 == task_imp->iit_updatepolicy);
659 if (NULL != task_imp->iit_updateq) {
660 if (&ipc_importance_delayed_drop_queue != task_imp->iit_updateq) {
661 queue_remove(task_imp->iit_updateq, task_imp, ipc_importance_task_t, iit_updates);
662 task_imp->iit_updateq = NULL;
663 ipc_importance_task_release_internal(task_imp); /* can't be last ref */
664 }
665 } else {
666 task_imp->iit_updatepolicy = 1;
667 }
668 return TRUE;
669 }
670
671 return FALSE;
672 }
673
674
675 /*
676 * Routine: ipc_importance_task_propagate_helper
677 * Purpose:
678 * Increase or decrement the internal task importance counter of all
679 * importance tasks inheriting from the specified one. If this causes
680 * that importance task to change state, add it to the list of tasks
681 * to do a policy update against.
682 * Conditions:
683 * Called with the importance lock held.
684 * It is the caller's responsibility to iterate down the generated list
685 * and propagate any subsequent assertion changes from there.
686 */
687 static void
688 ipc_importance_task_propagate_helper(
689 ipc_importance_task_t task_imp,
690 iit_update_type_t type,
691 queue_t propagation)
692 {
693 ipc_importance_task_t temp_task_imp;
694
695 /*
696 * iterate the downstream kmsgs, adjust their boosts,
697 * and capture the next task to adjust for each message
698 */
699
700 ipc_kmsg_t temp_kmsg;
701
702 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
703 mach_msg_header_t *hdr = temp_kmsg->ikm_header;
704 mach_port_delta_t delta;
705 ipc_port_t port;
706
707 /* toggle the kmsg importance bit as a barrier to parallel adjusts */
708 if (IIT_UPDATE_HOLD == type) {
709 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
710 continue;
711 }
712
713 /* mark the message as now carrying importance */
714 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
715 delta = 1;
716 } else {
717 if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
718 continue;
719 }
720
721 /* clear the message as now carrying importance */
722 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
723 delta = -1;
724 }
725
726 /* determine the task importance to adjust as result (if any) */
727 port = hdr->msgh_remote_port;
728 assert(IP_VALID(port));
729 ip_lock(port);
730 temp_task_imp = IIT_NULL;
731 if (!ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &temp_task_imp)) {
732 ip_unlock(port);
733 }
734
735 /* no task importance to adjust associated with the port? */
736 if (IIT_NULL == temp_task_imp) {
737 continue;
738 }
739
740 /* hold a reference on temp_task_imp */
741
742 /* Adjust the task assertions and determine if an edge was crossed */
743 if (ipc_importance_task_check_transition(temp_task_imp, type, 1)) {
744 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
745 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
746 /* reference donated */
747 } else {
748 ipc_importance_task_release_internal(temp_task_imp);
749 }
750 }
751
752 /*
753 * iterate the downstream importance inherits
754 * and capture the next task importance to boost for each
755 */
756 ipc_importance_inherit_t temp_inherit;
757
758 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
759 uint32_t assertcnt = III_EXTERN(temp_inherit);
760
761 temp_task_imp = temp_inherit->iii_to_task;
762 assert(IIT_NULL != temp_task_imp);
763
764 if (IIT_UPDATE_HOLD == type) {
765 /* if no undropped externcnts in the inherit, nothing to do */
766 if (0 == assertcnt) {
767 assert(temp_inherit->iii_donating == FALSE);
768 continue;
769 }
770
771 /* nothing to do if the inherit is already donating (forced donation) */
772 if (temp_inherit->iii_donating) {
773 continue;
774 }
775
776 /* mark it donating and contribute to the task externcnts */
777 temp_inherit->iii_donating = TRUE;
778 temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt;
779 temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop;
780 } else {
781 /* if no contributing assertions, move on */
782 if (0 == assertcnt) {
783 assert(temp_inherit->iii_donating == FALSE);
784 continue;
785 }
786
787 /* nothing to do if the inherit is not donating */
788 if (!temp_inherit->iii_donating) {
789 continue;
790 }
791
792 /* mark it no longer donating */
793 temp_inherit->iii_donating = FALSE;
794
795 /* remove the contribution the inherit made to the to-task */
796 assert(IIT_EXTERN(temp_task_imp) >= III_EXTERN(temp_inherit));
797 assert(temp_task_imp->iit_externcnt >= temp_inherit->iii_externcnt);
798 assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop);
799 temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt;
800 temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop;
801 }
802
803 /* Adjust the task assertions and determine if an edge was crossed */
804 assert(ipc_importance_task_is_any_receiver_type(temp_task_imp));
805 if (ipc_importance_task_check_transition(temp_task_imp, type, assertcnt)) {
806 ipc_importance_task_reference(temp_task_imp);
807 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
808 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
809 }
810 }
811 }
812
813 /*
814 * Routine: ipc_importance_task_process_updates
815 * Purpose:
816 * Process the queue of task importances and apply the policy
817 * update called for. Only process tasks in the queue with an
818 * update timestamp less than the supplied max.
819 * Conditions:
820 * Called and returns with importance locked.
821 * May drop importance lock and block temporarily.
822 */
823 static void
824 ipc_importance_task_process_updates(
825 queue_t supplied_queue,
826 boolean_t boost,
827 uint64_t max_timestamp)
828 {
829 ipc_importance_task_t task_imp;
830 queue_head_t second_chance;
831 queue_t queue = supplied_queue;
832
833 /*
834 * This queue will hold the task's we couldn't trylock on first pass.
835 * By using a second (private) queue, we guarantee all tasks that get
836 * entered on this queue have a timestamp under the maximum.
837 */
838 queue_init(&second_chance);
839
840 /* process any resulting policy updates */
841 retry:
842 while (!queue_empty(queue)) {
843 task_t target_task;
844 struct task_pend_token pend_token = {};
845
846 task_imp = (ipc_importance_task_t)queue_first(queue);
847 assert(0 == task_imp->iit_updatepolicy);
848 assert(queue == task_imp->iit_updateq);
849
850 /* if timestamp is too big, we're done */
851 if (task_imp->iit_updatetime > max_timestamp) {
852 break;
853 }
854
855 /* we were given a reference on each task in the queue */
856
857 /* remove it from the supplied queue */
858 queue_remove(queue, task_imp, ipc_importance_task_t, iit_updates);
859 task_imp->iit_updateq = NULL;
860
861 target_task = task_imp->iit_task;
862
863 /* Is it well on the way to exiting? */
864 if (TASK_NULL == target_task) {
865 ipc_importance_task_release_locked(task_imp);
866 /* importance unlocked */
867 ipc_importance_lock();
868 continue;
869 }
870
871 /* Has the update been reversed on the hysteresis queue? */
872 if (0 < task_imp->iit_assertcnt &&
873 queue == &ipc_importance_delayed_drop_queue) {
874 ipc_importance_task_release_locked(task_imp);
875 /* importance unlocked */
876 ipc_importance_lock();
877 continue;
878 }
879
880 /*
881 * Can we get the task lock out-of-order?
882 * If not, stick this back on the second-chance queue.
883 */
884 if (!task_lock_try(target_task)) {
885 boolean_t should_wait_lock = (queue == &second_chance);
886 task_imp->iit_updateq = &second_chance;
887
888 /*
889 * If we're already processing second-chances on
890 * tasks, keep this task on the front of the queue.
891 * We will wait for the task lock before coming
892 * back and trying again, and we have a better
893 * chance of re-acquiring the lock if we come back
894 * to it right away.
895 */
896 if (should_wait_lock) {
897 task_reference(target_task);
898 queue_enter_first(&second_chance, task_imp,
899 ipc_importance_task_t, iit_updates);
900 } else {
901 queue_enter(&second_chance, task_imp,
902 ipc_importance_task_t, iit_updates);
903 }
904 ipc_importance_unlock();
905
906 if (should_wait_lock) {
907 task_lock(target_task);
908 task_unlock(target_task);
909 task_deallocate(target_task);
910 }
911
912 ipc_importance_lock();
913 continue;
914 }
915
916 /* is it going away? */
917 if (!target_task->active) {
918 task_unlock(target_task);
919 ipc_importance_task_release_locked(task_imp);
920 /* importance unlocked */
921 ipc_importance_lock();
922 continue;
923 }
924
925 /* take a task reference for while we don't have the importance lock */
926 task_reference(target_task);
927
928 /* count the transition */
929 if (boost) {
930 task_imp->iit_transitions++;
931 }
932
933 ipc_importance_unlock();
934
935 /* apply the policy adjust to the target task (while it is still locked) */
936 task_update_boost_locked(target_task, boost, &pend_token);
937
938 /* complete the policy update with the task unlocked */
939 ipc_importance_task_release(task_imp);
940 task_unlock(target_task);
941 task_policy_update_complete_unlocked(target_task, &pend_token);
942 task_deallocate(target_task);
943
944 ipc_importance_lock();
945 }
946
947 /* If there are tasks we couldn't update the first time, try again */
948 if (!queue_empty(&second_chance)) {
949 queue = &second_chance;
950 goto retry;
951 }
952 }
953
954
955 /*
956 * Routine: ipc_importance_task_delayed_drop_scan
957 * Purpose:
958 * The thread call routine to scan the delayed drop queue,
959 * requesting all updates with a deadline up to the last target
960 * for the thread-call (which is DENAP_DROP_SKEW beyond the first
961 * thread's optimum delay).
962 * update to drop its boost.
963 * Conditions:
964 * Nothing locked
965 */
966 static void
967 ipc_importance_task_delayed_drop_scan(
968 __unused void *arg1,
969 __unused void *arg2)
970 {
971 ipc_importance_lock();
972
973 /* process all queued task drops with timestamps up to TARGET(first)+SKEW */
974 ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue,
975 FALSE,
976 ipc_importance_delayed_drop_timestamp);
977
978 /* importance lock may have been temporarily dropped */
979
980 /* If there are any entries left in the queue, re-arm the call here */
981 if (!queue_empty(&ipc_importance_delayed_drop_queue)) {
982 ipc_importance_task_t task_imp;
983 uint64_t deadline;
984 uint64_t leeway;
985
986 task_imp = (ipc_importance_task_t)queue_first(&ipc_importance_delayed_drop_queue);
987
988 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
989 deadline += task_imp->iit_updatetime;
990 ipc_importance_delayed_drop_timestamp = deadline;
991
992 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
993
994 thread_call_enter_delayed_with_leeway(
995 ipc_importance_delayed_drop_call,
996 NULL,
997 deadline,
998 leeway,
999 DENAP_DROP_FLAGS);
1000 } else {
1001 ipc_importance_delayed_drop_call_requested = FALSE;
1002 }
1003 ipc_importance_unlock();
1004 }
1005
1006 /*
1007 * Routine: ipc_importance_task_delayed_drop
1008 * Purpose:
1009 * Queue the specified task importance for delayed policy
1010 * update to drop its boost.
1011 * Conditions:
1012 * Called with the importance lock held.
1013 */
1014 static void
1015 ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)
1016 {
1017 uint64_t timestamp = mach_absolute_time(); /* no mach_approximate_time() in kernel */
1018
1019 assert(ipc_importance_delayed_drop_call != NULL);
1020
1021 /*
1022 * If still on an update queue from a previous change,
1023 * remove it first (and use that reference). Otherwise, take
1024 * a new reference for the delay drop update queue.
1025 */
1026 if (NULL != task_imp->iit_updateq) {
1027 queue_remove(task_imp->iit_updateq, task_imp,
1028 ipc_importance_task_t, iit_updates);
1029 } else {
1030 ipc_importance_task_reference_internal(task_imp);
1031 }
1032
1033 task_imp->iit_updateq = &ipc_importance_delayed_drop_queue;
1034 task_imp->iit_updatetime = timestamp;
1035
1036 queue_enter(&ipc_importance_delayed_drop_queue, task_imp,
1037 ipc_importance_task_t, iit_updates);
1038
1039 /* request the delayed thread-call if not already requested */
1040 if (!ipc_importance_delayed_drop_call_requested) {
1041 uint64_t deadline;
1042 uint64_t leeway;
1043
1044 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
1045 deadline += task_imp->iit_updatetime;
1046 ipc_importance_delayed_drop_timestamp = deadline;
1047
1048 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
1049
1050 ipc_importance_delayed_drop_call_requested = TRUE;
1051 thread_call_enter_delayed_with_leeway(
1052 ipc_importance_delayed_drop_call,
1053 NULL,
1054 deadline,
1055 leeway,
1056 DENAP_DROP_FLAGS);
1057 }
1058 }
1059
1060
1061 /*
1062 * Routine: ipc_importance_task_propagate_assertion_locked
1063 * Purpose:
1064 * Propagate the importance transition type to every item
1065 * If this causes a boost to be applied, determine if that
1066 * boost should propagate downstream.
1067 * Conditions:
1068 * Called with the importance lock held.
1069 */
1070 static void
1071 ipc_importance_task_propagate_assertion_locked(
1072 ipc_importance_task_t task_imp,
1073 iit_update_type_t type,
1074 boolean_t update_task_imp)
1075 {
1076 boolean_t boost = (IIT_UPDATE_HOLD == type);
1077 ipc_importance_task_t temp_task_imp;
1078 queue_head_t propagate;
1079 queue_head_t updates;
1080
1081 queue_init(&updates);
1082 queue_init(&propagate);
1083
1084 ipc_importance_assert_held();
1085
1086 /*
1087 * If we're going to update the policy for the provided task,
1088 * enqueue it on the propagate queue itself. Otherwise, only
1089 * enqueue downstream things.
1090 */
1091 if (update_task_imp) {
1092 ipc_importance_task_reference(task_imp);
1093 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
1094 queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props);
1095 } else {
1096 ipc_importance_task_propagate_helper(task_imp, type, &propagate);
1097 }
1098
1099 /*
1100 * for each item on the propagation list, propagate any change downstream,
1101 * adding new tasks to propagate further if they transistioned as well.
1102 */
1103 while (!queue_empty(&propagate)) {
1104 boolean_t need_update;
1105
1106 queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props);
1107 /* hold a reference on temp_task_imp */
1108
1109 assert(IIT_NULL != temp_task_imp);
1110
1111 /* only propagate for receivers not already marked as a donor */
1112 if (!ipc_importance_task_is_marked_donor(temp_task_imp) &&
1113 ipc_importance_task_is_marked_receiver(temp_task_imp)) {
1114 ipc_importance_task_propagate_helper(temp_task_imp, type, &propagate);
1115 }
1116
1117 /* if we have a policy update to apply, enqueue a reference for later processing */
1118 need_update = (0 != temp_task_imp->iit_updatepolicy);
1119 temp_task_imp->iit_updatepolicy = 0;
1120 if (need_update && TASK_NULL != temp_task_imp->iit_task) {
1121 if (NULL == temp_task_imp->iit_updateq) {
1122 /*
1123 * If a downstream task that needs an update is subjects to AppNap,
1124 * drop boosts according to the delay hysteresis. Otherwise,
1125 * immediate update it.
1126 */
1127 if (!boost && temp_task_imp != task_imp &&
1128 ipc_importance_delayed_drop_call != NULL &&
1129 ipc_importance_task_is_marked_denap_receiver(temp_task_imp)) {
1130 ipc_importance_task_delayed_drop(temp_task_imp);
1131 } else {
1132 temp_task_imp->iit_updatetime = 0;
1133 temp_task_imp->iit_updateq = &updates;
1134 ipc_importance_task_reference_internal(temp_task_imp);
1135 if (boost) {
1136 queue_enter(&updates, temp_task_imp,
1137 ipc_importance_task_t, iit_updates);
1138 } else {
1139 queue_enter_first(&updates, temp_task_imp,
1140 ipc_importance_task_t, iit_updates);
1141 }
1142 }
1143 } else {
1144 /* Must already be on the AppNap hysteresis queue */
1145 assert(ipc_importance_delayed_drop_call != NULL);
1146 assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp));
1147 }
1148 }
1149
1150 ipc_importance_task_release_internal(temp_task_imp);
1151 }
1152
1153 /* apply updates to task (may drop importance lock) */
1154 if (!queue_empty(&updates)) {
1155 ipc_importance_task_process_updates(&updates, boost, 0);
1156 }
1157 }
1158
1159 /*
1160 * Routine: ipc_importance_task_hold_internal_assertion_locked
1161 * Purpose:
1162 * Increment the assertion count on the task importance.
1163 * If this results in a boost state change in that task,
1164 * prepare to update task policy for this task AND, if
1165 * if not just waking out of App Nap, all down-stream
1166 * tasks that have a similar transition through inheriting
1167 * this update.
1168 * Conditions:
1169 * importance locked on entry and exit.
1170 * May temporarily drop importance lock and block.
1171 */
1172 static kern_return_t
1173 ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1174 {
1175 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, count)) {
1176 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
1177 }
1178 return KERN_SUCCESS;
1179 }
1180
1181 /*
1182 * Routine: ipc_importance_task_drop_internal_assertion_locked
1183 * Purpose:
1184 * Decrement the assertion count on the task importance.
1185 * If this results in a boost state change in that task,
1186 * prepare to update task policy for this task AND, if
1187 * if not just waking out of App Nap, all down-stream
1188 * tasks that have a similar transition through inheriting
1189 * this update.
1190 * Conditions:
1191 * importance locked on entry and exit.
1192 * May temporarily drop importance lock and block.
1193 */
1194 static kern_return_t
1195 ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1196 {
1197 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1198 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1199 }
1200 return KERN_SUCCESS;
1201 }
1202
1203 /*
1204 * Routine: ipc_importance_task_hold_internal_assertion
1205 * Purpose:
1206 * Increment the assertion count on the task importance.
1207 * If this results in a 0->1 change in that count,
1208 * prepare to update task policy for this task AND
1209 * (potentially) all down-stream tasks that have a
1210 * similar transition through inheriting this update.
1211 * Conditions:
1212 * Nothing locked
1213 * May block after dropping importance lock.
1214 */
1215 int
1216 ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1217 {
1218 int ret = KERN_SUCCESS;
1219
1220 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1221 ipc_importance_lock();
1222 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1223 ipc_importance_unlock();
1224 }
1225 return ret;
1226 }
1227
1228 /*
1229 * Routine: ipc_importance_task_drop_internal_assertion
1230 * Purpose:
1231 * Decrement the assertion count on the task importance.
1232 * If this results in a X->0 change in that count,
1233 * prepare to update task policy for this task AND
1234 * all down-stream tasks that have a similar transition
1235 * through inheriting this drop update.
1236 * Conditions:
1237 * Nothing locked on entry.
1238 * May block after dropping importance lock.
1239 */
1240 kern_return_t
1241 ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1242 {
1243 kern_return_t ret = KERN_SUCCESS;
1244
1245 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1246 ipc_importance_lock();
1247 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1248 ipc_importance_unlock();
1249 }
1250 return ret;
1251 }
1252
1253 /*
1254 * Routine: ipc_importance_task_hold_file_lock_assertion
1255 * Purpose:
1256 * Increment the file lock assertion count on the task importance.
1257 * If this results in a 0->1 change in that count,
1258 * prepare to update task policy for this task AND
1259 * (potentially) all down-stream tasks that have a
1260 * similar transition through inheriting this update.
1261 * Conditions:
1262 * Nothing locked
1263 * May block after dropping importance lock.
1264 */
1265 kern_return_t
1266 ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1267 {
1268 kern_return_t ret = KERN_SUCCESS;
1269
1270 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1271 ipc_importance_lock();
1272 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1273 if (KERN_SUCCESS == ret) {
1274 task_imp->iit_filelocks += count;
1275 }
1276 ipc_importance_unlock();
1277 }
1278 return ret;
1279 }
1280
1281 /*
1282 * Routine: ipc_importance_task_drop_file_lock_assertion
1283 * Purpose:
1284 * Decrement the assertion count on the task importance.
1285 * If this results in a X->0 change in that count,
1286 * prepare to update task policy for this task AND
1287 * all down-stream tasks that have a similar transition
1288 * through inheriting this drop update.
1289 * Conditions:
1290 * Nothing locked on entry.
1291 * May block after dropping importance lock.
1292 */
1293 kern_return_t
1294 ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1295 {
1296 kern_return_t ret = KERN_SUCCESS;
1297
1298 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1299 ipc_importance_lock();
1300 if (count <= task_imp->iit_filelocks) {
1301 task_imp->iit_filelocks -= count;
1302 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1303 } else {
1304 ret = KERN_INVALID_ARGUMENT;
1305 }
1306 ipc_importance_unlock();
1307 }
1308 return ret;
1309 }
1310
1311 /*
1312 * Routine: ipc_importance_task_hold_legacy_external_assertion
1313 * Purpose:
1314 * Increment the external assertion count on the task importance.
1315 * This cannot result in an 0->1 transition, as the caller must
1316 * already hold an external boost.
1317 * Conditions:
1318 * Nothing locked on entry.
1319 * May block after dropping importance lock.
1320 * A queue of task importance structures is returned
1321 * by ipc_importance_task_hold_assertion_locked(). Each
1322 * needs to be updated (outside the importance lock hold).
1323 */
1324 kern_return_t
1325 ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1326 {
1327 task_t target_task;
1328 uint32_t target_assertcnt;
1329 uint32_t target_externcnt;
1330 uint32_t target_legacycnt;
1331
1332 kern_return_t ret;
1333
1334 ipc_importance_lock();
1335 target_task = task_imp->iit_task;
1336
1337 #if IMPORTANCE_TRACE
1338 int target_pid = task_pid(target_task);
1339
1340 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1341 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1342 #endif
1343
1344 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1345 /* Only allowed to take a new boost assertion when holding an external boost */
1346 /* save data for diagnostic printf below */
1347 target_assertcnt = task_imp->iit_assertcnt;
1348 target_externcnt = IIT_EXTERN(task_imp);
1349 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1350 ret = KERN_FAILURE;
1351 count = 0;
1352 } else {
1353 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1354 assert(0 < task_imp->iit_assertcnt);
1355 assert(0 < IIT_EXTERN(task_imp));
1356 task_imp->iit_assertcnt += count;
1357 task_imp->iit_externcnt += count;
1358 task_imp->iit_legacy_externcnt += count;
1359 ret = KERN_SUCCESS;
1360 }
1361 ipc_importance_unlock();
1362
1363 #if IMPORTANCE_TRACE
1364 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1365 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1366 // This covers the legacy case where a task takes an extra boost.
1367 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt);
1368 #endif
1369
1370 if (KERN_FAILURE == ret && target_task != TASK_NULL) {
1371 printf("BUG in process %s[%d]: "
1372 "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. "
1373 "(%d total, %d external, %d legacy-external)\n",
1374 proc_name_address(target_task->bsd_info), task_pid(target_task),
1375 target_assertcnt, target_externcnt, target_legacycnt);
1376 }
1377
1378 return ret;
1379 }
1380
1381 /*
1382 * Routine: ipc_importance_task_drop_legacy_external_assertion
1383 * Purpose:
1384 * Drop the legacy external assertion count on the task and
1385 * reflect that change to total external assertion count and
1386 * then onto the internal importance count.
1387 *
1388 * If this results in a X->0 change in the internal,
1389 * count, prepare to update task policy for this task AND
1390 * all down-stream tasks that have a similar transition
1391 * through inheriting this update.
1392 * Conditions:
1393 * Nothing locked on entry.
1394 */
1395 kern_return_t
1396 ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1397 {
1398 int ret = KERN_SUCCESS;
1399 task_t target_task;
1400 uint32_t target_assertcnt;
1401 uint32_t target_externcnt;
1402 uint32_t target_legacycnt;
1403
1404 if (count > 1) {
1405 return KERN_INVALID_ARGUMENT;
1406 }
1407
1408 ipc_importance_lock();
1409 target_task = task_imp->iit_task;
1410
1411 #if IMPORTANCE_TRACE
1412 int target_pid = task_pid(target_task);
1413
1414 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1415 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1416 #endif
1417
1418 if (count > IIT_LEGACY_EXTERN(task_imp)) {
1419 /* Process over-released its boost count - save data for diagnostic printf */
1420 /* TODO: If count > 1, we should clear out as many external assertions as there are left. */
1421 target_assertcnt = task_imp->iit_assertcnt;
1422 target_externcnt = IIT_EXTERN(task_imp);
1423 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1424 ret = KERN_FAILURE;
1425 } else {
1426 /*
1427 * decrement legacy external count from the top level and reflect
1428 * into internal for this and all subsequent updates.
1429 */
1430 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1431 assert(IIT_EXTERN(task_imp) >= count);
1432
1433 task_imp->iit_legacy_externdrop += count;
1434 task_imp->iit_externdrop += count;
1435
1436 /* reset extern counters (if appropriate) */
1437 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1438 if (IIT_EXTERN(task_imp) != 0) {
1439 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
1440 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
1441 } else {
1442 task_imp->iit_externcnt = 0;
1443 task_imp->iit_externdrop = 0;
1444 }
1445 task_imp->iit_legacy_externcnt = 0;
1446 task_imp->iit_legacy_externdrop = 0;
1447 }
1448
1449 /* reflect the drop to the internal assertion count (and effect any importance change) */
1450 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1451 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1452 }
1453 ret = KERN_SUCCESS;
1454 }
1455
1456 #if IMPORTANCE_TRACE
1457 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1458 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1459 #endif
1460
1461 ipc_importance_unlock();
1462
1463 /* delayed printf for user-supplied data failures */
1464 if (KERN_FAILURE == ret && TASK_NULL != target_task) {
1465 printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n",
1466 proc_name_address(target_task->bsd_info), task_pid(target_task),
1467 target_assertcnt, target_externcnt, target_legacycnt);
1468 }
1469
1470 return ret;
1471 }
1472
1473
1474 #if LEGACY_IMPORTANCE_DELIVERY
1475 /* Transfer an assertion to legacy userspace responsibility */
1476 static kern_return_t
1477 ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, uint32_t count, __unused int sender_pid)
1478 {
1479 task_t target_task;
1480
1481 assert(IIT_NULL != task_imp);
1482 target_task = task_imp->iit_task;
1483
1484 if (TASK_NULL == target_task ||
1485 !ipc_importance_task_is_any_receiver_type(task_imp)) {
1486 return KERN_FAILURE;
1487 }
1488
1489 #if IMPORTANCE_TRACE
1490 int target_pid = task_pid(target_task);
1491
1492 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
1493 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
1494 #endif
1495
1496 ipc_importance_lock();
1497 /* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */
1498 assert(IIT_EXTERN(task_imp) >= IIT_LEGACY_EXTERN(task_imp));
1499 task_imp->iit_legacy_externcnt += count;
1500 task_imp->iit_externcnt += count;
1501 ipc_importance_unlock();
1502
1503 #if IMPORTANCE_TRACE
1504 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
1505 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1506 // This is the legacy boosting path
1507 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp));
1508 #endif /* IMPORTANCE_TRACE */
1509
1510 return KERN_SUCCESS;
1511 }
1512 #endif /* LEGACY_IMPORTANCE_DELIVERY */
1513
1514 /*
1515 * Routine: ipc_importance_task_update_live_donor
1516 * Purpose:
1517 * Read the live donor status and update the live_donor bit/propagate the change in importance.
1518 * Conditions:
1519 * Nothing locked on entrance, nothing locked on exit.
1520 *
1521 * TODO: Need tracepoints around this function...
1522 */
1523 void
1524 ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)
1525 {
1526 uint32_t task_live_donor;
1527 boolean_t before_donor;
1528 boolean_t after_donor;
1529 task_t target_task;
1530
1531 assert(task_imp != NULL);
1532
1533 /*
1534 * Nothing to do if the task is not marked as expecting
1535 * live donor updates.
1536 */
1537 if (!ipc_importance_task_is_marked_live_donor(task_imp)) {
1538 return;
1539 }
1540
1541 ipc_importance_lock();
1542
1543 /* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */
1544 target_task = task_imp->iit_task;
1545 if (TASK_NULL == target_task) {
1546 ipc_importance_unlock();
1547 return;
1548 }
1549 before_donor = ipc_importance_task_is_marked_donor(task_imp);
1550
1551 /* snapshot task live donor status - may change, but another call will accompany the change */
1552 task_live_donor = target_task->effective_policy.tep_live_donor;
1553
1554 #if IMPORTANCE_TRACE
1555 int target_pid = task_pid(target_task);
1556
1557 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1558 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START,
1559 target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0);
1560 #endif
1561
1562 /* update the task importance live donor status based on the task's value */
1563 task_imp->iit_donor = task_live_donor;
1564
1565 after_donor = ipc_importance_task_is_marked_donor(task_imp);
1566
1567 /* Has the effectiveness of being a donor changed as a result of this update? */
1568 if (before_donor != after_donor) {
1569 iit_update_type_t type;
1570
1571 /* propagate assertions without updating the current task policy (already handled) */
1572 if (0 == before_donor) {
1573 task_imp->iit_transitions++;
1574 type = IIT_UPDATE_HOLD;
1575 } else {
1576 type = IIT_UPDATE_DROP;
1577 }
1578 ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE);
1579 }
1580
1581 #if IMPORTANCE_TRACE
1582 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1583 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END,
1584 target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0);
1585 #endif
1586
1587 ipc_importance_unlock();
1588 }
1589
1590
1591 /*
1592 * Routine: ipc_importance_task_mark_donor
1593 * Purpose:
1594 * Set the task importance donor flag.
1595 * Conditions:
1596 * Nothing locked on entrance, nothing locked on exit.
1597 *
1598 * This is only called while the task is being constructed,
1599 * so no need to update task policy or propagate downstream.
1600 */
1601 void
1602 ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donating)
1603 {
1604 assert(task_imp != NULL);
1605
1606 ipc_importance_lock();
1607
1608 int old_donor = task_imp->iit_donor;
1609
1610 task_imp->iit_donor = (donating ? 1 : 0);
1611
1612 if (task_imp->iit_donor > 0 && old_donor == 0) {
1613 task_imp->iit_transitions++;
1614 }
1615
1616 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1617 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE,
1618 task_pid(task_imp->iit_task), donating,
1619 old_donor, task_imp->iit_donor, 0);
1620
1621 ipc_importance_unlock();
1622 }
1623
1624 /*
1625 * Routine: ipc_importance_task_marked_donor
1626 * Purpose:
1627 * Query the donor flag for the given task importance.
1628 * Conditions:
1629 * May be called without taking the importance lock.
1630 * In that case, donor status can change so you must
1631 * check only once for each donation event.
1632 */
1633 boolean_t
1634 ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)
1635 {
1636 if (IIT_NULL == task_imp) {
1637 return FALSE;
1638 }
1639 return 0 != task_imp->iit_donor;
1640 }
1641
1642 /*
1643 * Routine: ipc_importance_task_mark_live_donor
1644 * Purpose:
1645 * Indicate that the task is eligible for live donor updates.
1646 * Conditions:
1647 * Nothing locked on entrance, nothing locked on exit.
1648 *
1649 * This is only called while the task is being constructed.
1650 */
1651 void
1652 ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp, boolean_t live_donating)
1653 {
1654 assert(task_imp != NULL);
1655
1656 ipc_importance_lock();
1657 task_imp->iit_live_donor = (live_donating ? 1 : 0);
1658 ipc_importance_unlock();
1659 }
1660
1661 /*
1662 * Routine: ipc_importance_task_is_marked_live_donor
1663 * Purpose:
1664 * Query the live donor and donor flags for the given task importance.
1665 * Conditions:
1666 * May be called without taking the importance lock.
1667 * In that case, donor status can change so you must
1668 * check only once for each donation event.
1669 */
1670 boolean_t
1671 ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)
1672 {
1673 if (IIT_NULL == task_imp) {
1674 return FALSE;
1675 }
1676 return 0 != task_imp->iit_live_donor;
1677 }
1678
1679 /*
1680 * Routine: ipc_importance_task_is_donor
1681 * Purpose:
1682 * Query the full donor status for the given task importance.
1683 * Conditions:
1684 * May be called without taking the importance lock.
1685 * In that case, donor status can change so you must
1686 * check only once for each donation event.
1687 */
1688 boolean_t
1689 ipc_importance_task_is_donor(ipc_importance_task_t task_imp)
1690 {
1691 if (IIT_NULL == task_imp) {
1692 return FALSE;
1693 }
1694 return ipc_importance_task_is_marked_donor(task_imp) ||
1695 (ipc_importance_task_is_marked_receiver(task_imp) &&
1696 task_imp->iit_assertcnt > 0);
1697 }
1698
1699 /*
1700 * Routine: ipc_importance_task_is_never_donor
1701 * Purpose:
1702 * Query if a given task can ever donate importance.
1703 * Conditions:
1704 * May be called without taking the importance lock.
1705 * Condition is permanent for a give task.
1706 */
1707 boolean_t
1708 ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)
1709 {
1710 if (IIT_NULL == task_imp) {
1711 return FALSE;
1712 }
1713 return !ipc_importance_task_is_marked_donor(task_imp) &&
1714 !ipc_importance_task_is_marked_live_donor(task_imp) &&
1715 !ipc_importance_task_is_marked_receiver(task_imp);
1716 }
1717
1718 /*
1719 * Routine: ipc_importance_task_mark_receiver
1720 * Purpose:
1721 * Update the task importance receiver flag.
1722 * Conditions:
1723 * Nothing locked on entrance, nothing locked on exit.
1724 * This can only be invoked before the task is discoverable,
1725 * so no worries about atomicity(?)
1726 */
1727 void
1728 ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving)
1729 {
1730 assert(task_imp != NULL);
1731
1732 ipc_importance_lock();
1733 if (receiving) {
1734 assert(task_imp->iit_assertcnt == 0);
1735 assert(task_imp->iit_externcnt == 0);
1736 assert(task_imp->iit_externdrop == 0);
1737 assert(task_imp->iit_denap == 0);
1738 task_imp->iit_receiver = 1; /* task can receive importance boost */
1739 } else if (task_imp->iit_receiver) {
1740 assert(task_imp->iit_denap == 0);
1741 if (task_imp->iit_assertcnt != 0 || IIT_EXTERN(task_imp) != 0) {
1742 panic("disabling imp_receiver on task with pending importance boosts!");
1743 }
1744 task_imp->iit_receiver = 0;
1745 }
1746 ipc_importance_unlock();
1747 }
1748
1749
1750 /*
1751 * Routine: ipc_importance_task_marked_receiver
1752 * Purpose:
1753 * Query the receiver flag for the given task importance.
1754 * Conditions:
1755 * May be called without taking the importance lock as
1756 * the importance flag can never change after task init.
1757 */
1758 boolean_t
1759 ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)
1760 {
1761 return IIT_NULL != task_imp && 0 != task_imp->iit_receiver;
1762 }
1763
1764
1765 /*
1766 * Routine: ipc_importance_task_mark_denap_receiver
1767 * Purpose:
1768 * Update the task importance de-nap receiver flag.
1769 * Conditions:
1770 * Nothing locked on entrance, nothing locked on exit.
1771 * This can only be invoked before the task is discoverable,
1772 * so no worries about atomicity(?)
1773 */
1774 void
1775 ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap)
1776 {
1777 assert(task_imp != NULL);
1778
1779 ipc_importance_lock();
1780 if (denap) {
1781 assert(task_imp->iit_assertcnt == 0);
1782 assert(task_imp->iit_externcnt == 0);
1783 assert(task_imp->iit_receiver == 0);
1784 task_imp->iit_denap = 1; /* task can receive de-nap boost */
1785 } else if (task_imp->iit_denap) {
1786 assert(task_imp->iit_receiver == 0);
1787 if (0 < task_imp->iit_assertcnt || 0 < IIT_EXTERN(task_imp)) {
1788 panic("disabling de-nap on task with pending de-nap boosts!");
1789 }
1790 task_imp->iit_denap = 0;
1791 }
1792 ipc_importance_unlock();
1793 }
1794
1795
1796 /*
1797 * Routine: ipc_importance_task_marked_denap_receiver
1798 * Purpose:
1799 * Query the de-nap receiver flag for the given task importance.
1800 * Conditions:
1801 * May be called without taking the importance lock as
1802 * the de-nap flag can never change after task init.
1803 */
1804 boolean_t
1805 ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)
1806 {
1807 return IIT_NULL != task_imp && 0 != task_imp->iit_denap;
1808 }
1809
1810 /*
1811 * Routine: ipc_importance_task_is_denap_receiver
1812 * Purpose:
1813 * Query the full de-nap receiver status for the given task importance.
1814 * For now, that is simply whether the receiver flag is set.
1815 * Conditions:
1816 * May be called without taking the importance lock as
1817 * the de-nap receiver flag can never change after task init.
1818 */
1819 boolean_t
1820 ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)
1821 {
1822 return ipc_importance_task_is_marked_denap_receiver(task_imp);
1823 }
1824
1825 /*
1826 * Routine: ipc_importance_task_is_any_receiver_type
1827 * Purpose:
1828 * Query if the task is marked to receive boosts - either
1829 * importance or denap.
1830 * Conditions:
1831 * May be called without taking the importance lock as both
1832 * the importance and de-nap receiver flags can never change
1833 * after task init.
1834 */
1835 boolean_t
1836 ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)
1837 {
1838 return ipc_importance_task_is_marked_receiver(task_imp) ||
1839 ipc_importance_task_is_marked_denap_receiver(task_imp);
1840 }
1841
1842 #if 0 /* currently unused */
1843
1844 /*
1845 * Routine: ipc_importance_inherit_reference
1846 * Purpose:
1847 * Add a reference to the inherit importance element.
1848 * Conditions:
1849 * Caller most hold a reference on the inherit element.
1850 */
1851 static inline void
1852 ipc_importance_inherit_reference(ipc_importance_inherit_t inherit)
1853 {
1854 ipc_importance_reference(&inherit->iii_elem);
1855 }
1856 #endif /* currently unused */
1857
1858 /*
1859 * Routine: ipc_importance_inherit_release_locked
1860 * Purpose:
1861 * Release a reference on an inherit importance attribute value,
1862 * unlinking and deallocating the attribute if the last reference.
1863 * Conditions:
1864 * Entered with importance lock held, leaves with it unlocked.
1865 */
1866 static inline void
1867 ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)
1868 {
1869 ipc_importance_release_locked(&inherit->iii_elem);
1870 }
1871
1872 #if 0 /* currently unused */
1873 /*
1874 * Routine: ipc_importance_inherit_release
1875 * Purpose:
1876 * Release a reference on an inherit importance attribute value,
1877 * unlinking and deallocating the attribute if the last reference.
1878 * Conditions:
1879 * nothing locked on entrance, nothing locked on exit.
1880 * May block.
1881 */
1882 void
1883 ipc_importance_inherit_release(ipc_importance_inherit_t inherit)
1884 {
1885 if (III_NULL != inherit) {
1886 ipc_importance_release(&inherit->iii_elem);
1887 }
1888 }
1889 #endif /* 0 currently unused */
1890
1891 /*
1892 * Routine: ipc_importance_for_task
1893 * Purpose:
1894 * Create a reference for the specified task's base importance
1895 * element. If the base importance element doesn't exist, make it and
1896 * bind it to the active task. If the task is inactive, there isn't
1897 * any need to return a new reference.
1898 * Conditions:
1899 * If made is true, a "made" reference is returned (for donating to
1900 * the voucher system). Otherwise an internal reference is returned.
1901 *
1902 * Nothing locked on entry. May block.
1903 */
1904 ipc_importance_task_t
1905 ipc_importance_for_task(task_t task, boolean_t made)
1906 {
1907 ipc_importance_task_t task_elem;
1908 boolean_t first_pass = TRUE;
1909
1910 assert(TASK_NULL != task);
1911
1912 retry:
1913 /* No use returning anything for inactive task */
1914 if (!task->active) {
1915 return IIT_NULL;
1916 }
1917
1918 ipc_importance_lock();
1919 task_elem = task->task_imp_base;
1920 if (IIT_NULL != task_elem) {
1921 /* Add a made reference (borrowing active task ref to do it) */
1922 if (made) {
1923 if (0 == task_elem->iit_made++) {
1924 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1925 ipc_importance_task_reference_internal(task_elem);
1926 }
1927 } else {
1928 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1929 ipc_importance_task_reference_internal(task_elem);
1930 }
1931 ipc_importance_unlock();
1932 return task_elem;
1933 }
1934 ipc_importance_unlock();
1935
1936 if (!first_pass) {
1937 return IIT_NULL;
1938 }
1939 first_pass = FALSE;
1940
1941 /* Need to make one - may race with others (be prepared to drop) */
1942 task_elem = zalloc_flags(ipc_importance_task_zone, Z_WAITOK | Z_ZERO);
1943 if (IIT_NULL == task_elem) {
1944 goto retry;
1945 }
1946
1947 task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */
1948 task_elem->iit_made = (made) ? 1 : 0;
1949 task_elem->iit_task = task; /* take actual ref when we're sure */
1950 #if IIE_REF_DEBUG
1951 ipc_importance_counter_init(&task_elem->iit_elem);
1952 #endif
1953 queue_init(&task_elem->iit_kmsgs);
1954 queue_init(&task_elem->iit_inherits);
1955
1956 ipc_importance_lock();
1957 if (!task->active) {
1958 ipc_importance_unlock();
1959 zfree(ipc_importance_task_zone, task_elem);
1960 return IIT_NULL;
1961 }
1962
1963 /* did we lose the race? */
1964 if (IIT_NULL != task->task_imp_base) {
1965 ipc_importance_unlock();
1966 zfree(ipc_importance_task_zone, task_elem);
1967 goto retry;
1968 }
1969
1970 /* we won the race */
1971 task->task_imp_base = task_elem;
1972 task_reference(task);
1973 #if DEVELOPMENT || DEBUG
1974 queue_enter(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
1975 task_importance_update_owner_info(task);
1976 #endif
1977 ipc_importance_unlock();
1978
1979 return task_elem;
1980 }
1981
1982 #if DEVELOPMENT || DEBUG
1983 void
1984 task_importance_update_owner_info(task_t task)
1985 {
1986 if (task != TASK_NULL && task->task_imp_base != IIT_NULL) {
1987 ipc_importance_task_t task_elem = task->task_imp_base;
1988
1989 task_elem->iit_bsd_pid = task_pid(task);
1990 if (task->bsd_info) {
1991 strncpy(&task_elem->iit_procname[0], proc_name_address(task->bsd_info), 16);
1992 task_elem->iit_procname[16] = '\0';
1993 } else {
1994 strncpy(&task_elem->iit_procname[0], "unknown", 16);
1995 }
1996 }
1997 }
1998 #endif
1999
2000 /*
2001 * Routine: ipc_importance_reset_locked
2002 * Purpose:
2003 * Reset a task's IPC importance (the task is going away or exec'ing)
2004 *
2005 * Remove the donor bit and legacy externalized assertions from the
2006 * current task importance and see if that wipes out downstream donations.
2007 * Conditions:
2008 * importance lock held.
2009 */
2010
2011 static void
2012 ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor)
2013 {
2014 boolean_t before_donor, after_donor;
2015
2016 /* remove the donor bit, live-donor bit and externalized boosts */
2017 before_donor = ipc_importance_task_is_donor(task_imp);
2018 if (donor) {
2019 task_imp->iit_donor = 0;
2020 }
2021 assert(IIT_LEGACY_EXTERN(task_imp) <= IIT_EXTERN(task_imp));
2022 assert(task_imp->iit_legacy_externcnt <= task_imp->iit_externcnt);
2023 assert(task_imp->iit_legacy_externdrop <= task_imp->iit_externdrop);
2024 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
2025 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
2026
2027 /* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */
2028 if (IIT_EXTERN(task_imp) < task_imp->iit_assertcnt) {
2029 task_imp->iit_assertcnt -= IIT_LEGACY_EXTERN(task_imp);
2030 } else {
2031 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
2032 }
2033 task_imp->iit_legacy_externcnt = 0;
2034 task_imp->iit_legacy_externdrop = 0;
2035 after_donor = ipc_importance_task_is_donor(task_imp);
2036
2037 #if DEVELOPMENT || DEBUG
2038 if (task_imp->iit_assertcnt > 0 && task_imp->iit_live_donor) {
2039 printf("Live donor task %s[%d] still has %d importance assertions after reset\n",
2040 task_imp->iit_procname, task_imp->iit_bsd_pid, task_imp->iit_assertcnt);
2041 }
2042 #endif
2043
2044 /* propagate a downstream drop if there was a change in donor status */
2045 if (after_donor != before_donor) {
2046 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, FALSE);
2047 }
2048 }
2049
2050 /*
2051 * Routine: ipc_importance_reset
2052 * Purpose:
2053 * Reset a task's IPC importance
2054 *
2055 * The task is being reset, although staying around. Arrange to have the
2056 * external state of the task reset from the importance.
2057 * Conditions:
2058 * importance lock not held.
2059 */
2060
2061 void
2062 ipc_importance_reset(ipc_importance_task_t task_imp, boolean_t donor)
2063 {
2064 if (IIT_NULL == task_imp) {
2065 return;
2066 }
2067 ipc_importance_lock();
2068 ipc_importance_reset_locked(task_imp, donor);
2069 ipc_importance_unlock();
2070 }
2071
2072 /*
2073 * Routine: ipc_importance_disconnect_task
2074 * Purpose:
2075 * Disconnect a task from its importance.
2076 *
2077 * Clear the task pointer from the importance and drop the
2078 * reference the task held on the importance object. Before
2079 * doing that, reset the effects the current task holds on
2080 * the importance and see if that wipes out downstream donations.
2081 *
2082 * We allow the upstream boosts to continue to affect downstream
2083 * even though the local task is being effectively pulled from
2084 * the chain.
2085 * Conditions:
2086 * Nothing locked.
2087 */
2088 void
2089 ipc_importance_disconnect_task(task_t task)
2090 {
2091 ipc_importance_task_t task_imp;
2092
2093 task_lock(task);
2094 ipc_importance_lock();
2095 task_imp = task->task_imp_base;
2096
2097 /* did somebody beat us to it? */
2098 if (IIT_NULL == task_imp) {
2099 ipc_importance_unlock();
2100 task_unlock(task);
2101 return;
2102 }
2103
2104 /* disconnect the task from this importance */
2105 assert(task_imp->iit_task == task);
2106 task_imp->iit_task = TASK_NULL;
2107 task->task_imp_base = IIT_NULL;
2108 task_unlock(task);
2109
2110 /* reset the effects the current task hold on the importance */
2111 ipc_importance_reset_locked(task_imp, TRUE);
2112
2113 ipc_importance_task_release_locked(task_imp);
2114 /* importance unlocked */
2115
2116 /* deallocate the task now that the importance is unlocked */
2117 task_deallocate(task);
2118 }
2119
2120 /*
2121 * Routine: ipc_importance_exec_switch_task
2122 * Purpose:
2123 * Switch importance task base from old task to new task in exec.
2124 *
2125 * Create an ipc importance linkage from old task to new task,
2126 * once the linkage is created, switch the importance task base
2127 * from old task to new task. After the switch, the linkage will
2128 * represent importance linkage from new task to old task with
2129 * watch port importance inheritance linked to new task.
2130 * Conditions:
2131 * Nothing locked.
2132 * Returns a reference on importance inherit.
2133 */
2134 ipc_importance_inherit_t
2135 ipc_importance_exec_switch_task(
2136 task_t old_task,
2137 task_t new_task)
2138 {
2139 ipc_importance_inherit_t inherit = III_NULL;
2140 ipc_importance_task_t old_task_imp = IIT_NULL;
2141 ipc_importance_task_t new_task_imp = IIT_NULL;
2142
2143 task_importance_reset(old_task);
2144
2145 /* Create an importance linkage from old_task to new_task */
2146 inherit = ipc_importance_inherit_from_task(old_task, new_task);
2147
2148 /* Switch task importance base from old task to new task */
2149 ipc_importance_lock();
2150
2151 old_task_imp = old_task->task_imp_base;
2152 new_task_imp = new_task->task_imp_base;
2153
2154 old_task_imp->iit_task = new_task;
2155 new_task_imp->iit_task = old_task;
2156
2157 old_task->task_imp_base = new_task_imp;
2158 new_task->task_imp_base = old_task_imp;
2159
2160 #if DEVELOPMENT || DEBUG
2161 /*
2162 * Update the pid an proc name for importance base if any
2163 */
2164 task_importance_update_owner_info(new_task);
2165 #endif
2166 ipc_importance_unlock();
2167
2168 return inherit;
2169 }
2170
2171 /*
2172 * Routine: ipc_importance_check_circularity
2173 * Purpose:
2174 * Check if queueing "port" in a message for "dest"
2175 * would create a circular group of ports and messages.
2176 *
2177 * If no circularity (FALSE returned), then "port"
2178 * is changed from "in limbo" to "in transit".
2179 *
2180 * That is, we want to set port->ip_destination == dest,
2181 * but guaranteeing that this doesn't create a circle
2182 * port->ip_destination->ip_destination->... == port
2183 *
2184 * Additionally, if port was successfully changed to "in transit",
2185 * propagate boost assertions from the "in limbo" port to all
2186 * the ports in the chain, and, if the destination task accepts
2187 * boosts, to the destination task.
2188 *
2189 * Conditions:
2190 * No ports locked. References held for "port" and "dest".
2191 */
2192
2193 boolean_t
2194 ipc_importance_check_circularity(
2195 ipc_port_t port,
2196 ipc_port_t dest)
2197 {
2198 ipc_importance_task_t imp_task = IIT_NULL;
2199 ipc_importance_task_t release_imp_task = IIT_NULL;
2200 boolean_t imp_lock_held = FALSE;
2201 int assertcnt = 0;
2202 ipc_port_t base;
2203 struct turnstile *send_turnstile = TURNSTILE_NULL;
2204 struct task_watchport_elem *watchport_elem = NULL;
2205
2206 assert(port != IP_NULL);
2207 assert(dest != IP_NULL);
2208
2209 if (port == dest) {
2210 return TRUE;
2211 }
2212 base = dest;
2213
2214 /* Check if destination needs a turnstile */
2215 ipc_port_send_turnstile_prepare(dest);
2216
2217 /* port is in limbo, so donation status is safe to latch */
2218 if (port->ip_impdonation != 0) {
2219 imp_lock_held = TRUE;
2220 ipc_importance_lock();
2221 }
2222
2223 /*
2224 * First try a quick check that can run in parallel.
2225 * No circularity if dest is not in transit.
2226 */
2227 ip_lock(port);
2228
2229 /*
2230 * Even if port is just carrying assertions for others,
2231 * we need the importance lock.
2232 */
2233 if (port->ip_impcount > 0 && !imp_lock_held) {
2234 if (!ipc_importance_lock_try()) {
2235 ip_unlock(port);
2236 ipc_importance_lock();
2237 ip_lock(port);
2238 }
2239 imp_lock_held = TRUE;
2240 }
2241
2242 if (ip_lock_try(dest)) {
2243 if (!ip_active(dest) ||
2244 (dest->ip_receiver_name != MACH_PORT_NULL) ||
2245 (dest->ip_destination == IP_NULL)) {
2246 goto not_circular;
2247 }
2248
2249 /* dest is in transit; further checking necessary */
2250
2251 ip_unlock(dest);
2252 }
2253 ip_unlock(port);
2254
2255 /*
2256 * We're about to pay the cost to serialize,
2257 * just go ahead and grab importance lock.
2258 */
2259 if (!imp_lock_held) {
2260 ipc_importance_lock();
2261 imp_lock_held = TRUE;
2262 }
2263
2264 ipc_port_multiple_lock(); /* massive serialization */
2265
2266 /*
2267 * Search for the end of the chain (a port not in transit),
2268 * acquiring locks along the way.
2269 */
2270
2271 for (;;) {
2272 ip_lock(base);
2273
2274 if (!ip_active(base) ||
2275 (base->ip_receiver_name != MACH_PORT_NULL) ||
2276 (base->ip_destination == IP_NULL)) {
2277 break;
2278 }
2279
2280 base = base->ip_destination;
2281 }
2282
2283 /* all ports in chain from dest to base, inclusive, are locked */
2284
2285 if (port == base) {
2286 /* circularity detected! */
2287
2288 ipc_port_multiple_unlock();
2289
2290 /* port (== base) is in limbo */
2291
2292 require_ip_active(port);
2293 assert(port->ip_receiver_name == MACH_PORT_NULL);
2294 assert(port->ip_destination == IP_NULL);
2295
2296 base = dest;
2297 while (base != IP_NULL) {
2298 ipc_port_t next;
2299
2300 /* base is in transit or in limbo */
2301
2302 require_ip_active(base);
2303 assert(base->ip_receiver_name == MACH_PORT_NULL);
2304
2305 next = base->ip_destination;
2306 ip_unlock(base);
2307 base = next;
2308 }
2309
2310 if (imp_lock_held) {
2311 ipc_importance_unlock();
2312 }
2313
2314 ipc_port_send_turnstile_complete(dest);
2315 return TRUE;
2316 }
2317
2318 /*
2319 * The guarantee: lock port while the entire chain is locked.
2320 * Once port is locked, we can take a reference to dest,
2321 * add port to the chain, and unlock everything.
2322 */
2323
2324 ip_lock(port);
2325 ipc_port_multiple_unlock();
2326
2327 not_circular:
2328 /* port is in limbo */
2329 imq_lock(&port->ip_messages);
2330
2331 require_ip_active(port);
2332 assert(port->ip_receiver_name == MACH_PORT_NULL);
2333 assert(port->ip_destination == IP_NULL);
2334
2335 /* Port is being enqueued in a kmsg, remove the watchport boost in order to push on destination port */
2336 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
2337
2338 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
2339 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
2340 port->ip_sync_bootstrap_checkin = 1;
2341 }
2342
2343 ip_reference(dest);
2344 port->ip_destination = dest;
2345
2346 /* must have been in limbo or still bound to a task */
2347 assert(port->ip_tempowner != 0);
2348
2349 /*
2350 * We delayed dropping assertions from a specific task.
2351 * Cache that info now (we'll drop assertions and the
2352 * task reference below).
2353 */
2354 release_imp_task = port->ip_imp_task;
2355 if (IIT_NULL != release_imp_task) {
2356 port->ip_imp_task = IIT_NULL;
2357 }
2358 assertcnt = port->ip_impcount;
2359
2360 /* take the port out of limbo w.r.t. assertions */
2361 port->ip_tempowner = 0;
2362
2363 /*
2364 * Setup linkage for source port if it has a send turnstile i.e. it has
2365 * a thread waiting in send or has a port enqueued in it or has sync ipc
2366 * push from a special reply port.
2367 */
2368 if (port_send_turnstile(port)) {
2369 send_turnstile = turnstile_prepare((uintptr_t)port,
2370 port_send_turnstile_address(port),
2371 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
2372
2373 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
2374 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
2375
2376 /* update complete and turnstile complete called after dropping all locks */
2377 }
2378 imq_unlock(&port->ip_messages);
2379
2380 /* now unlock chain */
2381
2382 ip_unlock(port);
2383
2384 for (;;) {
2385 ipc_port_t next;
2386 /* every port along chain track assertions behind it */
2387 ipc_port_impcount_delta(dest, assertcnt, base);
2388
2389 if (dest == base) {
2390 break;
2391 }
2392
2393 /* port is in transit */
2394
2395 require_ip_active(dest);
2396 assert(dest->ip_receiver_name == MACH_PORT_NULL);
2397 assert(dest->ip_destination != IP_NULL);
2398 assert(dest->ip_tempowner == 0);
2399
2400 next = dest->ip_destination;
2401 ip_unlock(dest);
2402 dest = next;
2403 }
2404
2405 /* base is not in transit */
2406 assert(!ip_active(base) ||
2407 (base->ip_receiver_name != MACH_PORT_NULL) ||
2408 (base->ip_destination == IP_NULL));
2409
2410 /*
2411 * Find the task to boost (if any).
2412 * We will boost "through" ports that don't know
2413 * about inheritance to deliver receive rights that
2414 * do.
2415 */
2416 if (ip_active(base) && (assertcnt > 0)) {
2417 assert(imp_lock_held);
2418 if (base->ip_tempowner != 0) {
2419 if (IIT_NULL != base->ip_imp_task) {
2420 /* specified tempowner task */
2421 imp_task = base->ip_imp_task;
2422 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2423 }
2424 /* otherwise don't boost current task */
2425 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
2426 ipc_space_t space = base->ip_receiver;
2427
2428 /* only spaces with boost-accepting tasks */
2429 if (space->is_task != TASK_NULL &&
2430 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2431 imp_task = space->is_task->task_imp_base;
2432 }
2433 }
2434
2435 /* take reference before unlocking base */
2436 if (imp_task != IIT_NULL) {
2437 ipc_importance_task_reference(imp_task);
2438 }
2439 }
2440
2441 ip_unlock(base);
2442
2443 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
2444 if (send_turnstile) {
2445 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
2446
2447 /* Take the mq lock to call turnstile complete */
2448 imq_lock(&port->ip_messages);
2449 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
2450 send_turnstile = TURNSTILE_NULL;
2451 imq_unlock(&port->ip_messages);
2452 turnstile_cleanup();
2453 }
2454
2455 /*
2456 * Transfer assertions now that the ports are unlocked.
2457 * Avoid extra overhead if transferring to/from the same task.
2458 *
2459 * NOTE: If a transfer is occurring, the new assertions will
2460 * be added to imp_task BEFORE the importance lock is unlocked.
2461 * This is critical - to avoid decrements coming from the kmsgs
2462 * beating the increment to the task.
2463 */
2464 boolean_t transfer_assertions = (imp_task != release_imp_task);
2465
2466 if (imp_task != IIT_NULL) {
2467 assert(imp_lock_held);
2468 if (transfer_assertions) {
2469 ipc_importance_task_hold_internal_assertion_locked(imp_task, assertcnt);
2470 }
2471 }
2472
2473 if (release_imp_task != IIT_NULL) {
2474 assert(imp_lock_held);
2475 if (transfer_assertions) {
2476 ipc_importance_task_drop_internal_assertion_locked(release_imp_task, assertcnt);
2477 }
2478 }
2479
2480 if (imp_lock_held) {
2481 ipc_importance_unlock();
2482 }
2483
2484 if (imp_task != IIT_NULL) {
2485 ipc_importance_task_release(imp_task);
2486 }
2487
2488 if (release_imp_task != IIT_NULL) {
2489 ipc_importance_task_release(release_imp_task);
2490 }
2491
2492 if (watchport_elem) {
2493 task_watchport_elem_deallocate(watchport_elem);
2494 }
2495
2496 return FALSE;
2497 }
2498
2499 /*
2500 * Routine: ipc_importance_send
2501 * Purpose:
2502 * Post the importance voucher attribute [if sent] or a static
2503 * importance boost depending upon options and conditions.
2504 * Conditions:
2505 * Destination port locked on entry and exit, may be dropped during the call.
2506 * Returns:
2507 * A boolean identifying if the port lock was tempoarily dropped.
2508 */
2509 boolean_t
2510 ipc_importance_send(
2511 ipc_kmsg_t kmsg,
2512 mach_msg_option_t option)
2513 {
2514 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
2515 boolean_t port_lock_dropped = FALSE;
2516 ipc_importance_elem_t elem;
2517 task_t task;
2518 ipc_importance_task_t task_imp;
2519 kern_return_t kr;
2520
2521 assert(IP_VALID(port));
2522
2523 /* If no donation to be made, return quickly */
2524 if ((port->ip_impdonation == 0) ||
2525 (option & MACH_SEND_NOIMPORTANCE) != 0) {
2526 return port_lock_dropped;
2527 }
2528
2529 task = current_task();
2530
2531 /* If forced sending a static boost, go update the port */
2532 if ((option & MACH_SEND_IMPORTANCE) != 0) {
2533 /* acquire the importance lock while trying to hang on to port lock */
2534 if (!ipc_importance_lock_try()) {
2535 port_lock_dropped = TRUE;
2536 ip_unlock(port);
2537 ipc_importance_lock();
2538 }
2539 goto portupdate;
2540 }
2541
2542 task_imp = task->task_imp_base;
2543 assert(IIT_NULL != task_imp);
2544
2545 /* If the sender can never donate importance, nothing to do */
2546 if (ipc_importance_task_is_never_donor(task_imp)) {
2547 return port_lock_dropped;
2548 }
2549
2550 elem = IIE_NULL;
2551
2552 /* If importance receiver and passing a voucher, look for importance in there */
2553 if (IP_VALID(kmsg->ikm_voucher) &&
2554 ipc_importance_task_is_marked_receiver(task_imp)) {
2555 mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED];
2556 mach_voucher_attr_value_handle_array_size_t val_count;
2557 ipc_voucher_t voucher;
2558
2559 assert(ip_kotype(kmsg->ikm_voucher) == IKOT_VOUCHER);
2560 voucher = (ipc_voucher_t)ip_get_kobject(kmsg->ikm_voucher);
2561
2562 /* check to see if the voucher has an importance attribute */
2563 val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED;
2564 kr = mach_voucher_attr_control_get_values(ipc_importance_control, voucher,
2565 vals, &val_count);
2566 assert(KERN_SUCCESS == kr);
2567
2568 /*
2569 * Only use importance associated with our task (either directly
2570 * or through an inherit that donates to our task).
2571 */
2572 if (0 < val_count) {
2573 ipc_importance_elem_t check_elem;
2574
2575 check_elem = (ipc_importance_elem_t)vals[0];
2576 assert(IIE_NULL != check_elem);
2577 if (IIE_TYPE_INHERIT == IIE_TYPE(check_elem)) {
2578 ipc_importance_inherit_t inherit;
2579 inherit = (ipc_importance_inherit_t) check_elem;
2580 if (inherit->iii_to_task == task_imp) {
2581 elem = check_elem;
2582 }
2583 } else if (check_elem == (ipc_importance_elem_t)task_imp) {
2584 elem = check_elem;
2585 }
2586 }
2587 }
2588
2589 /* If we haven't found an importance attribute to send yet, use the task's */
2590 if (IIE_NULL == elem) {
2591 elem = (ipc_importance_elem_t)task_imp;
2592 }
2593
2594 /* take a reference for the message to hold */
2595 ipc_importance_reference_internal(elem);
2596
2597 /* acquire the importance lock while trying to hang on to port lock */
2598 if (!ipc_importance_lock_try()) {
2599 port_lock_dropped = TRUE;
2600 ip_unlock(port);
2601 ipc_importance_lock();
2602 }
2603
2604 /* link kmsg onto the donor element propagation chain */
2605 ipc_importance_kmsg_link(kmsg, elem);
2606 /* elem reference transfered to kmsg */
2607
2608 incr_ref_counter(elem->iie_kmsg_refs_added);
2609
2610 /* If the sender isn't currently a donor, no need to apply boost */
2611 if (!ipc_importance_task_is_donor(task_imp)) {
2612 ipc_importance_unlock();
2613
2614 /* re-acquire port lock, if needed */
2615 if (TRUE == port_lock_dropped) {
2616 ip_lock(port);
2617 }
2618
2619 return port_lock_dropped;
2620 }
2621
2622 portupdate:
2623 /* Mark the fact that we are (currently) donating through this message */
2624 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2625
2626 /*
2627 * If we need to relock the port, do it with the importance still locked.
2628 * This assures we get to add the importance boost through the port to
2629 * the task BEFORE anyone else can attempt to undo that operation if
2630 * the sender lost donor status.
2631 */
2632 if (TRUE == port_lock_dropped) {
2633 ip_lock(port);
2634 }
2635
2636 ipc_importance_assert_held();
2637
2638 #if IMPORTANCE_TRACE
2639 if (kdebug_enable) {
2640 mach_msg_max_trailer_t *dbgtrailer = (mach_msg_max_trailer_t *)
2641 ((vm_offset_t)kmsg->ikm_header + mach_round_msg(kmsg->ikm_header->msgh_size));
2642 unsigned int sender_pid = dbgtrailer->msgh_audit.val[5];
2643 mach_msg_id_t imp_msgh_id = kmsg->ikm_header->msgh_id;
2644 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START,
2645 task_pid(task), sender_pid, imp_msgh_id, 0, 0);
2646 }
2647 #endif /* IMPORTANCE_TRACE */
2648
2649 mach_port_delta_t delta = 1;
2650 boolean_t need_port_lock;
2651 task_imp = IIT_NULL;
2652
2653 /* adjust port boost count (with importance and port locked) */
2654 need_port_lock = ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &task_imp);
2655 /* hold a reference on task_imp */
2656
2657 /* if we need to adjust a task importance as a result, apply that here */
2658 if (IIT_NULL != task_imp && delta != 0) {
2659 assert(delta == 1);
2660
2661 /* if this results in a change of state, propagate the transistion */
2662 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta)) {
2663 /* can't hold the port lock during task transition(s) */
2664 if (!need_port_lock) {
2665 need_port_lock = TRUE;
2666 ip_unlock(port);
2667 }
2668 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
2669 }
2670 }
2671
2672 if (task_imp) {
2673 ipc_importance_task_release_locked(task_imp);
2674 /* importance unlocked */
2675 } else {
2676 ipc_importance_unlock();
2677 }
2678
2679 if (need_port_lock) {
2680 port_lock_dropped = TRUE;
2681 ip_lock(port);
2682 }
2683
2684 return port_lock_dropped;
2685 }
2686
2687 /*
2688 * Routine: ipc_importance_inherit_from_kmsg
2689 * Purpose:
2690 * Create a "made" reference for an importance attribute representing
2691 * an inheritance between the sender of a message (if linked) and the
2692 * current task importance. If the message is not linked, a static
2693 * boost may be created, based on the boost state of the message.
2694 *
2695 * Any transfer from kmsg linkage to inherit linkage must be atomic.
2696 *
2697 * If the task is inactive, there isn't any need to return a new reference.
2698 * Conditions:
2699 * Nothing locked on entry. May block.
2700 */
2701 static ipc_importance_inherit_t
2702 ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg)
2703 {
2704 ipc_importance_task_t task_imp = IIT_NULL;
2705 ipc_importance_elem_t from_elem = kmsg->ikm_importance;
2706 ipc_importance_elem_t elem;
2707 task_t task_self = current_task();
2708
2709 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
2710 ipc_importance_inherit_t inherit = III_NULL;
2711 ipc_importance_inherit_t alloc = III_NULL;
2712 boolean_t cleared_self_donation = FALSE;
2713 boolean_t donating;
2714 uint32_t depth = 1;
2715
2716 /* The kmsg must have an importance donor or static boost to proceed */
2717 if (IIE_NULL == kmsg->ikm_importance &&
2718 !MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2719 return III_NULL;
2720 }
2721
2722 /*
2723 * No need to set up an inherit linkage if the dest isn't a receiver
2724 * of one type or the other.
2725 */
2726 if (!ipc_importance_task_is_any_receiver_type(task_self->task_imp_base)) {
2727 ipc_importance_lock();
2728 goto out_locked;
2729 }
2730
2731 /* Grab a reference on the importance of the destination */
2732 task_imp = ipc_importance_for_task(task_self, FALSE);
2733
2734 ipc_importance_lock();
2735
2736 if (IIT_NULL == task_imp) {
2737 goto out_locked;
2738 }
2739
2740 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_inherit_from);
2741
2742 /* If message is already associated with an inherit... */
2743 if (IIE_TYPE_INHERIT == IIE_TYPE(from_elem)) {
2744 ipc_importance_inherit_t from_inherit = (ipc_importance_inherit_t)from_elem;
2745
2746 /* already targeting our task? - just use it */
2747 if (from_inherit->iii_to_task == task_imp) {
2748 /* clear self-donation if not also present in inherit */
2749 if (!from_inherit->iii_donating &&
2750 MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2751 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2752 cleared_self_donation = TRUE;
2753 }
2754 inherit = from_inherit;
2755 } else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) {
2756 ipc_importance_task_t to_task;
2757 ipc_importance_elem_t unlinked_from;
2758
2759 /*
2760 * Chain too long. Switch to looking
2761 * directly at the from_inherit's to-task
2762 * as our source of importance.
2763 */
2764 to_task = from_inherit->iii_to_task;
2765 ipc_importance_task_reference(to_task);
2766 from_elem = (ipc_importance_elem_t)to_task;
2767 depth = III_DEPTH_RESET | 1;
2768
2769 /* Fixup the kmsg linkage to reflect change */
2770 unlinked_from = ipc_importance_kmsg_unlink(kmsg);
2771 assert(unlinked_from == (ipc_importance_elem_t)from_inherit);
2772 ipc_importance_kmsg_link(kmsg, from_elem);
2773 ipc_importance_inherit_release_locked(from_inherit);
2774 /* importance unlocked */
2775 ipc_importance_lock();
2776 } else {
2777 /* inheriting from an inherit */
2778 depth = from_inherit->iii_depth + 1;
2779 }
2780 }
2781
2782 /*
2783 * Don't allow a task to inherit from itself (would keep it permanently
2784 * boosted even if all other donors to the task went away).
2785 */
2786
2787 if (from_elem == (ipc_importance_elem_t)task_imp) {
2788 goto out_locked;
2789 }
2790
2791 /*
2792 * But if the message isn't associated with any linked source, it is
2793 * intended to be permanently boosting (static boost from kernel).
2794 * In that case DO let the process permanently boost itself.
2795 */
2796 if (IIE_NULL == from_elem) {
2797 assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits));
2798 ipc_importance_task_reference_internal(task_imp);
2799 from_elem = (ipc_importance_elem_t)task_imp;
2800 }
2801
2802 /*
2803 * Now that we have the from_elem figured out,
2804 * check to see if we already have an inherit for this pairing
2805 */
2806 while (III_NULL == inherit) {
2807 inherit = ipc_importance_inherit_find(from_elem, task_imp, depth);
2808
2809 /* Do we have to allocate a new inherit */
2810 if (III_NULL == inherit) {
2811 if (III_NULL != alloc) {
2812 break;
2813 }
2814
2815 /* allocate space */
2816 ipc_importance_unlock();
2817 alloc = (ipc_importance_inherit_t)
2818 zalloc(ipc_importance_inherit_zone);
2819 ipc_importance_lock();
2820 }
2821 }
2822
2823 /* snapshot the donating status while we have importance locked */
2824 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits);
2825
2826 if (III_NULL != inherit) {
2827 /* We found one, piggyback on that */
2828 assert(0 < III_REFS(inherit));
2829 assert(0 < IIE_REFS(inherit->iii_from_elem));
2830 assert(inherit->iii_externcnt >= inherit->iii_made);
2831
2832 /* add in a made reference */
2833 if (0 == inherit->iii_made++) {
2834 assert(III_REFS_MAX > III_REFS(inherit));
2835 ipc_importance_inherit_reference_internal(inherit);
2836 }
2837
2838 /* Reflect the inherit's change of status into the task boosts */
2839 if (0 == III_EXTERN(inherit)) {
2840 assert(!inherit->iii_donating);
2841 inherit->iii_donating = donating;
2842 if (donating) {
2843 task_imp->iit_externcnt += inherit->iii_externcnt;
2844 task_imp->iit_externdrop += inherit->iii_externdrop;
2845 }
2846 } else {
2847 assert(donating == inherit->iii_donating);
2848 }
2849
2850 /* add in a external reference for this use of the inherit */
2851 inherit->iii_externcnt++;
2852 } else {
2853 /* initialize the previously allocated space */
2854 inherit = alloc;
2855 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
2856 inherit->iii_made = 1;
2857 inherit->iii_externcnt = 1;
2858 inherit->iii_externdrop = 0;
2859 inherit->iii_depth = depth;
2860 inherit->iii_to_task = task_imp;
2861 inherit->iii_from_elem = IIE_NULL;
2862 queue_init(&inherit->iii_kmsgs);
2863
2864 if (donating) {
2865 inherit->iii_donating = TRUE;
2866 } else {
2867 inherit->iii_donating = FALSE;
2868 }
2869
2870 /*
2871 * Chain our new inherit on the element it inherits from.
2872 * The new inherit takes our reference on from_elem.
2873 */
2874 ipc_importance_inherit_link(inherit, from_elem);
2875
2876 #if IIE_REF_DEBUG
2877 ipc_importance_counter_init(&inherit->iii_elem);
2878 from_elem->iie_kmsg_refs_inherited++;
2879 task_imp->iit_elem.iie_task_refs_inherited++;
2880 #endif
2881 }
2882
2883 out_locked:
2884 /*
2885 * for those paths that came straight here: snapshot the donating status
2886 * (this should match previous snapshot for other paths).
2887 */
2888 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits);
2889
2890 /* unlink the kmsg inheritance (if any) */
2891 elem = ipc_importance_kmsg_unlink(kmsg);
2892 assert(elem == from_elem);
2893
2894 /* If found inherit and donating, reflect that in the task externcnt */
2895 if (III_NULL != inherit && donating) {
2896 task_imp->iit_externcnt++;
2897 /* The owner of receive right might have changed, take the internal assertion */
2898 ipc_importance_task_hold_internal_assertion_locked(task_imp, 1);
2899 /* may have dropped and retaken importance lock */
2900 }
2901
2902 /* If we didn't create a new inherit, we have some resources to release */
2903 if (III_NULL == inherit || inherit != alloc) {
2904 if (IIE_NULL != from_elem) {
2905 if (III_NULL != inherit) {
2906 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
2907 } else {
2908 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
2909 }
2910 ipc_importance_release_locked(from_elem);
2911 /* importance unlocked */
2912 } else {
2913 ipc_importance_unlock();
2914 }
2915
2916 if (IIT_NULL != task_imp) {
2917 if (III_NULL != inherit) {
2918 incr_ref_counter(task_imp->iit_elem.iie_task_refs_coalesced);
2919 }
2920 ipc_importance_task_release(task_imp);
2921 }
2922
2923 if (III_NULL != alloc) {
2924 zfree(ipc_importance_inherit_zone, alloc);
2925 }
2926 } else {
2927 /* from_elem and task_imp references transferred to new inherit */
2928 ipc_importance_unlock();
2929 }
2930
2931 /*
2932 * decrement port boost count
2933 * This is OK to do without the importance lock as we atomically
2934 * unlinked the kmsg and snapshot the donating state while holding
2935 * the importance lock
2936 */
2937 if (donating || cleared_self_donation) {
2938 ip_lock(port);
2939 /* drop importance from port and destination task */
2940 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
2941 ip_unlock(port);
2942 }
2943 }
2944
2945 if (III_NULL != inherit) {
2946 /* have an associated importance attr, even if currently not donating */
2947 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2948 } else {
2949 /* we won't have an importance attribute associated with our message */
2950 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2951 }
2952
2953 return inherit;
2954 }
2955
2956 /*
2957 * Routine: ipc_importance_inherit_from_task
2958 * Purpose:
2959 * Create a reference for an importance attribute representing
2960 * an inheritance between the to_task and from_task. The iii
2961 * created will be marked as III_FLAGS_FOR_OTHERS.
2962 *
2963 * It will not dedup any iii which are not marked as III_FLAGS_FOR_OTHERS.
2964 *
2965 * If the task is inactive, there isn't any need to return a new reference.
2966 * Conditions:
2967 * Nothing locked on entry. May block.
2968 * It should not be called from voucher subsystem.
2969 */
2970 static ipc_importance_inherit_t
2971 ipc_importance_inherit_from_task(
2972 task_t from_task,
2973 task_t to_task)
2974 {
2975 ipc_importance_task_t to_task_imp = IIT_NULL;
2976 ipc_importance_task_t from_task_imp = IIT_NULL;
2977 ipc_importance_elem_t from_elem = IIE_NULL;
2978
2979 ipc_importance_inherit_t inherit = III_NULL;
2980 ipc_importance_inherit_t alloc = III_NULL;
2981 boolean_t donating;
2982 uint32_t depth = 1;
2983
2984 to_task_imp = ipc_importance_for_task(to_task, FALSE);
2985 from_task_imp = ipc_importance_for_task(from_task, FALSE);
2986 from_elem = (ipc_importance_elem_t)from_task_imp;
2987
2988 ipc_importance_lock();
2989
2990 if (IIT_NULL == to_task_imp || IIT_NULL == from_task_imp) {
2991 goto out_locked;
2992 }
2993
2994 /*
2995 * No need to set up an inherit linkage if the to_task or from_task
2996 * isn't a receiver of one type or the other.
2997 */
2998 if (!ipc_importance_task_is_any_receiver_type(to_task_imp) ||
2999 !ipc_importance_task_is_any_receiver_type(from_task_imp)) {
3000 goto out_locked;
3001 }
3002
3003 /* Do not allow to create a linkage to self */
3004 if (to_task_imp == from_task_imp) {
3005 goto out_locked;
3006 }
3007
3008 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_added_inherit_from);
3009 incr_ref_counter(from_elem->iie_kmsg_refs_added);
3010
3011 /*
3012 * Now that we have the from_elem figured out,
3013 * check to see if we already have an inherit for this pairing
3014 */
3015 while (III_NULL == inherit) {
3016 inherit = ipc_importance_inherit_find(from_elem, to_task_imp, depth);
3017
3018 /* Do we have to allocate a new inherit */
3019 if (III_NULL == inherit) {
3020 if (III_NULL != alloc) {
3021 break;
3022 }
3023
3024 /* allocate space */
3025 ipc_importance_unlock();
3026 alloc = (ipc_importance_inherit_t)
3027 zalloc(ipc_importance_inherit_zone);
3028 ipc_importance_lock();
3029 }
3030 }
3031
3032 /* snapshot the donating status while we have importance locked */
3033 donating = ipc_importance_task_is_donor(from_task_imp);
3034
3035 if (III_NULL != inherit) {
3036 /* We found one, piggyback on that */
3037 assert(0 < III_REFS(inherit));
3038 assert(0 < IIE_REFS(inherit->iii_from_elem));
3039
3040 /* Take a reference for inherit */
3041 assert(III_REFS_MAX > III_REFS(inherit));
3042 ipc_importance_inherit_reference_internal(inherit);
3043
3044 /* Reflect the inherit's change of status into the task boosts */
3045 if (0 == III_EXTERN(inherit)) {
3046 assert(!inherit->iii_donating);
3047 inherit->iii_donating = donating;
3048 if (donating) {
3049 to_task_imp->iit_externcnt += inherit->iii_externcnt;
3050 to_task_imp->iit_externdrop += inherit->iii_externdrop;
3051 }
3052 } else {
3053 assert(donating == inherit->iii_donating);
3054 }
3055
3056 /* add in a external reference for this use of the inherit */
3057 inherit->iii_externcnt++;
3058 } else {
3059 /* initialize the previously allocated space */
3060 inherit = alloc;
3061 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
3062 inherit->iii_made = 0;
3063 inherit->iii_externcnt = 1;
3064 inherit->iii_externdrop = 0;
3065 inherit->iii_depth = depth;
3066 inherit->iii_to_task = to_task_imp;
3067 inherit->iii_from_elem = IIE_NULL;
3068 queue_init(&inherit->iii_kmsgs);
3069
3070 if (donating) {
3071 inherit->iii_donating = TRUE;
3072 } else {
3073 inherit->iii_donating = FALSE;
3074 }
3075
3076 /*
3077 * Chain our new inherit on the element it inherits from.
3078 * The new inherit takes our reference on from_elem.
3079 */
3080 ipc_importance_inherit_link(inherit, from_elem);
3081
3082 #if IIE_REF_DEBUG
3083 ipc_importance_counter_init(&inherit->iii_elem);
3084 from_elem->iie_kmsg_refs_inherited++;
3085 task_imp->iit_elem.iie_task_refs_inherited++;
3086 #endif
3087 }
3088
3089 out_locked:
3090
3091 /* If found inherit and donating, reflect that in the task externcnt */
3092 if (III_NULL != inherit && donating) {
3093 to_task_imp->iit_externcnt++;
3094 /* take the internal assertion */
3095 ipc_importance_task_hold_internal_assertion_locked(to_task_imp, 1);
3096 /* may have dropped and retaken importance lock */
3097 }
3098
3099 /* If we didn't create a new inherit, we have some resources to release */
3100 if (III_NULL == inherit || inherit != alloc) {
3101 if (IIE_NULL != from_elem) {
3102 if (III_NULL != inherit) {
3103 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
3104 } else {
3105 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
3106 }
3107 ipc_importance_release_locked(from_elem);
3108 /* importance unlocked */
3109 } else {
3110 ipc_importance_unlock();
3111 }
3112
3113 if (IIT_NULL != to_task_imp) {
3114 if (III_NULL != inherit) {
3115 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_coalesced);
3116 }
3117 ipc_importance_task_release(to_task_imp);
3118 }
3119
3120 if (III_NULL != alloc) {
3121 zfree(ipc_importance_inherit_zone, alloc);
3122 }
3123 } else {
3124 /* from_elem and to_task_imp references transferred to new inherit */
3125 ipc_importance_unlock();
3126 }
3127
3128 return inherit;
3129 }
3130
3131 /*
3132 * Routine: ipc_importance_receive
3133 * Purpose:
3134 * Process importance attributes in a received message.
3135 *
3136 * If an importance voucher attribute was sent, transform
3137 * that into an attribute value reflecting the inheritance
3138 * from the sender to the receiver.
3139 *
3140 * If a static boost is received (or the voucher isn't on
3141 * a voucher-based boost), export a static boost.
3142 * Conditions:
3143 * Nothing locked.
3144 */
3145 void
3146 ipc_importance_receive(
3147 ipc_kmsg_t kmsg,
3148 mach_msg_option_t option)
3149 {
3150 int impresult = -1;
3151
3152 #if IMPORTANCE_TRACE || LEGACY_IMPORTANCE_DELIVERY
3153 task_t task_self = current_task();
3154 unsigned int sender_pid = ((mach_msg_max_trailer_t *)
3155 ((vm_offset_t)kmsg->ikm_header +
3156 mach_round_msg(kmsg->ikm_header->msgh_size)))->msgh_audit.val[5];
3157 #endif
3158
3159 /* convert to a voucher with an inherit importance attribute? */
3160 if ((option & MACH_RCV_VOUCHER) != 0) {
3161 uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) +
3162 sizeof(mach_voucher_attr_value_handle_t)];
3163 ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0;
3164 ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes;
3165 ipc_voucher_t recv_voucher;
3166 mach_voucher_attr_value_handle_t handle;
3167 ipc_importance_inherit_t inherit;
3168 kern_return_t kr;
3169
3170 /* set up recipe to copy the old voucher */
3171 if (IP_VALID(kmsg->ikm_voucher)) {
3172 ipc_voucher_t sent_voucher = (ipc_voucher_t)ip_get_kobject(kmsg->ikm_voucher);
3173
3174 recipe->key = MACH_VOUCHER_ATTR_KEY_ALL;
3175 recipe->command = MACH_VOUCHER_ATTR_COPY;
3176 recipe->previous_voucher = sent_voucher;
3177 recipe->content_size = 0;
3178 recipe_size += sizeof(*recipe);
3179 }
3180
3181 /*
3182 * create an inheritance attribute from the kmsg (may be NULL)
3183 * transferring any boosts from the kmsg linkage through the
3184 * port directly to the new inheritance object.
3185 */
3186 inherit = ipc_importance_inherit_from_kmsg(kmsg);
3187 handle = (mach_voucher_attr_value_handle_t)inherit;
3188
3189 assert(IIE_NULL == kmsg->ikm_importance);
3190
3191 /*
3192 * Only create a new voucher if we have an inherit object
3193 * (from the ikm_importance field of the incoming message), OR
3194 * we have a valid incoming voucher. If we have neither of
3195 * these things then there is no need to create a new voucher.
3196 */
3197 if (IP_VALID(kmsg->ikm_voucher) || inherit != III_NULL) {
3198 /* replace the importance attribute with the handle we created */
3199 /* our made reference on the inherit is donated to the voucher */
3200 recipe = (ipc_voucher_attr_recipe_t)&recipes[recipe_size];
3201 recipe->key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE;
3202 recipe->command = MACH_VOUCHER_ATTR_SET_VALUE_HANDLE;
3203 recipe->previous_voucher = IPC_VOUCHER_NULL;
3204 recipe->content_size = sizeof(mach_voucher_attr_value_handle_t);
3205 *(mach_voucher_attr_value_handle_t *)(void *)recipe->content = handle;
3206 recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t);
3207
3208 kr = ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control,
3209 recipes,
3210 recipe_size,
3211 &recv_voucher);
3212 assert(KERN_SUCCESS == kr);
3213
3214 /* swap the voucher port (and set voucher bits in case it didn't already exist) */
3215 kmsg->ikm_header->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16);
3216 ipc_port_release_send(kmsg->ikm_voucher);
3217 kmsg->ikm_voucher = convert_voucher_to_port(recv_voucher);
3218 if (III_NULL != inherit) {
3219 impresult = 2;
3220 }
3221 }
3222 } else { /* Don't want a voucher */
3223 /* got linked importance? have to drop */
3224 if (IIE_NULL != kmsg->ikm_importance) {
3225 ipc_importance_elem_t elem;
3226
3227 ipc_importance_lock();
3228 elem = ipc_importance_kmsg_unlink(kmsg);
3229 #if IIE_REF_DEBUG
3230 elem->iie_kmsg_refs_dropped++;
3231 #endif
3232 ipc_importance_release_locked(elem);
3233 /* importance unlocked */
3234 }
3235
3236 /* With kmsg unlinked, can safely examine message importance attribute. */
3237 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3238 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
3239 #if LEGACY_IMPORTANCE_DELIVERY
3240 ipc_importance_task_t task_imp = task_self->task_imp_base;
3241
3242 /* The owner of receive right might have changed, take the internal assertion */
3243 if (KERN_SUCCESS == ipc_importance_task_hold_internal_assertion(task_imp, 1)) {
3244 ipc_importance_task_externalize_legacy_assertion(task_imp, 1, sender_pid);
3245 impresult = 1;
3246 } else
3247 #endif
3248 {
3249 /* The importance boost never applied to task (clear the bit) */
3250 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3251 impresult = 0;
3252 }
3253
3254 /* Drop the boost on the port and the owner of the receive right */
3255 ip_lock(port);
3256 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3257 ip_unlock(port);
3258 }
3259 }
3260 }
3261
3262 #if IMPORTANCE_TRACE
3263 if (-1 < impresult) {
3264 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE,
3265 sender_pid, task_pid(task_self),
3266 kmsg->ikm_header->msgh_id, impresult, 0);
3267 }
3268 if (impresult == 2) {
3269 /*
3270 * This probe only covers new voucher-based path. Legacy importance
3271 * will trigger the probe in ipc_importance_task_externalize_assertion()
3272 * above and have impresult==1 here.
3273 */
3274 DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self), int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt);
3275 }
3276 #endif /* IMPORTANCE_TRACE */
3277 }
3278
3279 /*
3280 * Routine: ipc_importance_unreceive
3281 * Purpose:
3282 * Undo receive of importance attributes in a message.
3283 *
3284 * Conditions:
3285 * Nothing locked.
3286 */
3287 void
3288 ipc_importance_unreceive(
3289 ipc_kmsg_t kmsg,
3290 mach_msg_option_t __unused option)
3291 {
3292 /* importance should already be in the voucher and out of the kmsg */
3293 assert(IIE_NULL == kmsg->ikm_importance);
3294
3295 /* See if there is a legacy boost to be dropped from receiver */
3296 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3297 ipc_importance_task_t task_imp;
3298
3299 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3300 task_imp = current_task()->task_imp_base;
3301 if (!IP_VALID(kmsg->ikm_voucher) && IIT_NULL != task_imp) {
3302 ipc_importance_task_drop_legacy_external_assertion(task_imp, 1);
3303 }
3304 /*
3305 * ipc_kmsg_copyout_dest() will consume the voucher
3306 * and any contained importance.
3307 */
3308 }
3309 }
3310
3311 /*
3312 * Routine: ipc_importance_clean
3313 * Purpose:
3314 * Clean up importance state in a kmsg that is being cleaned.
3315 * Unlink the importance chain if one was set up, and drop
3316 * the reference this kmsg held on the donor. Then check to
3317 * if importance was carried to the port, and remove that if
3318 * needed.
3319 * Conditions:
3320 * Nothing locked.
3321 */
3322 void
3323 ipc_importance_clean(
3324 ipc_kmsg_t kmsg)
3325 {
3326 ipc_port_t port;
3327
3328 /* Is the kmsg still linked? If so, remove that first */
3329 if (IIE_NULL != kmsg->ikm_importance) {
3330 ipc_importance_elem_t elem;
3331
3332 ipc_importance_lock();
3333 elem = ipc_importance_kmsg_unlink(kmsg);
3334 assert(IIE_NULL != elem);
3335 ipc_importance_release_locked(elem);
3336 /* importance unlocked */
3337 }
3338
3339 /* See if there is a legacy importance boost to be dropped from port */
3340 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3341 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3342 port = kmsg->ikm_header->msgh_remote_port;
3343 if (IP_VALID(port)) {
3344 ip_lock(port);
3345 /* inactive ports already had their importance boosts dropped */
3346 if (!ip_active(port) ||
3347 ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3348 ip_unlock(port);
3349 }
3350 }
3351 }
3352 }
3353
3354 void
3355 ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)
3356 {
3357 assert(IIE_NULL == kmsg->ikm_importance);
3358 assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits));
3359 }
3360
3361 /*
3362 * IPC Importance Attribute Manager definition
3363 */
3364
3365 static kern_return_t
3366 ipc_importance_release_value(
3367 ipc_voucher_attr_manager_t manager,
3368 mach_voucher_attr_key_t key,
3369 mach_voucher_attr_value_handle_t value,
3370 mach_voucher_attr_value_reference_t sync);
3371
3372 static kern_return_t
3373 ipc_importance_get_value(
3374 ipc_voucher_attr_manager_t manager,
3375 mach_voucher_attr_key_t key,
3376 mach_voucher_attr_recipe_command_t command,
3377 mach_voucher_attr_value_handle_array_t prev_values,
3378 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3379 mach_voucher_attr_content_t content,
3380 mach_voucher_attr_content_size_t content_size,
3381 mach_voucher_attr_value_handle_t *out_value,
3382 mach_voucher_attr_value_flags_t *out_flags,
3383 ipc_voucher_t *out_value_voucher);
3384
3385 static kern_return_t
3386 ipc_importance_extract_content(
3387 ipc_voucher_attr_manager_t manager,
3388 mach_voucher_attr_key_t key,
3389 mach_voucher_attr_value_handle_array_t values,
3390 mach_voucher_attr_value_handle_array_size_t value_count,
3391 mach_voucher_attr_recipe_command_t *out_command,
3392 mach_voucher_attr_content_t out_content,
3393 mach_voucher_attr_content_size_t *in_out_content_size);
3394
3395 static kern_return_t
3396 ipc_importance_command(
3397 ipc_voucher_attr_manager_t manager,
3398 mach_voucher_attr_key_t key,
3399 mach_voucher_attr_value_handle_array_t values,
3400 mach_msg_type_number_t value_count,
3401 mach_voucher_attr_command_t command,
3402 mach_voucher_attr_content_t in_content,
3403 mach_voucher_attr_content_size_t in_content_size,
3404 mach_voucher_attr_content_t out_content,
3405 mach_voucher_attr_content_size_t *out_content_size);
3406
3407 static void
3408 ipc_importance_manager_release(
3409 ipc_voucher_attr_manager_t manager);
3410
3411 const struct ipc_voucher_attr_manager ipc_importance_manager = {
3412 .ivam_release_value = ipc_importance_release_value,
3413 .ivam_get_value = ipc_importance_get_value,
3414 .ivam_extract_content = ipc_importance_extract_content,
3415 .ivam_command = ipc_importance_command,
3416 .ivam_release = ipc_importance_manager_release,
3417 .ivam_flags = IVAM_FLAGS_NONE,
3418 };
3419
3420 #define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key))
3421 #define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager))
3422
3423 /*
3424 * Routine: ipc_importance_release_value [Voucher Attribute Manager Interface]
3425 * Purpose:
3426 * Release what the voucher system believes is the last "made" reference
3427 * on an importance attribute value handle. The sync parameter is used to
3428 * avoid races with new made references concurrently being returned to the
3429 * voucher system in other threads.
3430 * Conditions:
3431 * Nothing locked on entry. May block.
3432 */
3433 static kern_return_t
3434 ipc_importance_release_value(
3435 ipc_voucher_attr_manager_t __assert_only manager,
3436 mach_voucher_attr_key_t __assert_only key,
3437 mach_voucher_attr_value_handle_t value,
3438 mach_voucher_attr_value_reference_t sync)
3439 {
3440 ipc_importance_elem_t elem;
3441
3442 IMPORTANCE_ASSERT_MANAGER(manager);
3443 IMPORTANCE_ASSERT_KEY(key);
3444 assert(0 < sync);
3445
3446 elem = (ipc_importance_elem_t)value;
3447
3448 ipc_importance_lock();
3449
3450 /* Any oustanding made refs? */
3451 if (sync != elem->iie_made) {
3452 assert(sync < elem->iie_made);
3453 ipc_importance_unlock();
3454 return KERN_FAILURE;
3455 }
3456
3457 /* clear made */
3458 elem->iie_made = 0;
3459
3460 /*
3461 * If there are pending external boosts represented by this attribute,
3462 * drop them from the apropriate task
3463 */
3464 if (IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3465 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
3466
3467 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
3468
3469 if (inherit->iii_donating) {
3470 ipc_importance_task_t imp_task = inherit->iii_to_task;
3471 uint32_t assertcnt = III_EXTERN(inherit);
3472
3473 assert(ipc_importance_task_is_any_receiver_type(imp_task));
3474 assert(imp_task->iit_externcnt >= inherit->iii_externcnt);
3475 assert(imp_task->iit_externdrop >= inherit->iii_externdrop);
3476 imp_task->iit_externcnt -= inherit->iii_externcnt;
3477 imp_task->iit_externdrop -= inherit->iii_externdrop;
3478 inherit->iii_externcnt = 0;
3479 inherit->iii_externdrop = 0;
3480 inherit->iii_donating = FALSE;
3481
3482 /* adjust the internal assertions - and propagate if needed */
3483 if (ipc_importance_task_check_transition(imp_task, IIT_UPDATE_DROP, assertcnt)) {
3484 ipc_importance_task_propagate_assertion_locked(imp_task, IIT_UPDATE_DROP, TRUE);
3485 }
3486 } else {
3487 inherit->iii_externcnt = 0;
3488 inherit->iii_externdrop = 0;
3489 }
3490 }
3491
3492 /* drop the made reference on elem */
3493 ipc_importance_release_locked(elem);
3494 /* returns unlocked */
3495
3496 return KERN_SUCCESS;
3497 }
3498
3499
3500 /*
3501 * Routine: ipc_importance_get_value [Voucher Attribute Manager Interface]
3502 * Purpose:
3503 * Convert command and content data into a reference on a [potentially new]
3504 * attribute value. The importance attribute manager will only allow the
3505 * caller to get a value for the current task's importance, or to redeem
3506 * an importance attribute from an existing voucher.
3507 * Conditions:
3508 * Nothing locked on entry. May block.
3509 */
3510 static kern_return_t
3511 ipc_importance_get_value(
3512 ipc_voucher_attr_manager_t __assert_only manager,
3513 mach_voucher_attr_key_t __assert_only key,
3514 mach_voucher_attr_recipe_command_t command,
3515 mach_voucher_attr_value_handle_array_t prev_values,
3516 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3517 mach_voucher_attr_content_t __unused content,
3518 mach_voucher_attr_content_size_t content_size,
3519 mach_voucher_attr_value_handle_t *out_value,
3520 mach_voucher_attr_value_flags_t *out_flags,
3521 ipc_voucher_t *out_value_voucher)
3522 {
3523 ipc_importance_elem_t elem;
3524 task_t self;
3525
3526 IMPORTANCE_ASSERT_MANAGER(manager);
3527 IMPORTANCE_ASSERT_KEY(key);
3528
3529 if (0 != content_size) {
3530 return KERN_INVALID_ARGUMENT;
3531 }
3532
3533 *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE;
3534 /* never an out voucher */
3535
3536 switch (command) {
3537 case MACH_VOUCHER_ATTR_REDEEM:
3538
3539 /* redeem of previous values is the value */
3540 if (0 < prev_value_count) {
3541 elem = (ipc_importance_elem_t)prev_values[0];
3542 assert(IIE_NULL != elem);
3543
3544 ipc_importance_lock();
3545 assert(0 < elem->iie_made);
3546 elem->iie_made++;
3547 ipc_importance_unlock();
3548
3549 *out_value = prev_values[0];
3550 return KERN_SUCCESS;
3551 }
3552
3553 /* redeem of default is default */
3554 *out_value = 0;
3555 *out_value_voucher = IPC_VOUCHER_NULL;
3556 return KERN_SUCCESS;
3557
3558 case MACH_VOUCHER_ATTR_IMPORTANCE_SELF:
3559 self = current_task();
3560
3561 elem = (ipc_importance_elem_t)ipc_importance_for_task(self, TRUE);
3562 /* made reference added (or IIE_NULL which isn't referenced) */
3563
3564 *out_value = (mach_voucher_attr_value_handle_t)elem;
3565 *out_value_voucher = IPC_VOUCHER_NULL;
3566 return KERN_SUCCESS;
3567
3568 default:
3569 /*
3570 * every other command is unknown
3571 *
3572 * Specifically, there is no mechanism provided to construct an
3573 * importance attribute for a task/process from just a pid or
3574 * task port. It has to be copied (or redeemed) from a previous
3575 * voucher that has it.
3576 */
3577 return KERN_INVALID_ARGUMENT;
3578 }
3579 }
3580
3581 /*
3582 * Routine: ipc_importance_extract_content [Voucher Attribute Manager Interface]
3583 * Purpose:
3584 * Extract meaning from the attribute value present in a voucher. While
3585 * the real goal is to provide commands and data that can reproduce the
3586 * voucher's value "out of thin air", this isn't possible with importance
3587 * attribute values. Instead, return debug info to help track down dependencies.
3588 * Conditions:
3589 * Nothing locked on entry. May block.
3590 */
3591 static kern_return_t
3592 ipc_importance_extract_content(
3593 ipc_voucher_attr_manager_t __assert_only manager,
3594 mach_voucher_attr_key_t __assert_only key,
3595 mach_voucher_attr_value_handle_array_t values,
3596 mach_voucher_attr_value_handle_array_size_t value_count,
3597 mach_voucher_attr_recipe_command_t *out_command,
3598 mach_voucher_attr_content_t out_content,
3599 mach_voucher_attr_content_size_t *in_out_content_size)
3600 {
3601 mach_voucher_attr_content_size_t size = 0;
3602 ipc_importance_elem_t elem;
3603 unsigned int i;
3604
3605 IMPORTANCE_ASSERT_MANAGER(manager);
3606 IMPORTANCE_ASSERT_KEY(key);
3607
3608 /* the first non-default value provides the data */
3609 for (i = 0; i < value_count && *in_out_content_size > 0; i++) {
3610 elem = (ipc_importance_elem_t)values[i];
3611 if (IIE_NULL == elem) {
3612 continue;
3613 }
3614
3615 snprintf((char *)out_content, *in_out_content_size, "Importance for pid ");
3616 size = (mach_voucher_attr_content_size_t)strlen((char *)out_content);
3617
3618 for (;;) {
3619 ipc_importance_inherit_t inherit = III_NULL;
3620 ipc_importance_task_t task_imp;
3621 task_t task;
3622 int t_pid;
3623
3624 if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3625 task_imp = (ipc_importance_task_t)elem;
3626 task = task_imp->iit_task;
3627 t_pid = (TASK_NULL != task) ?
3628 task_pid(task) : -1;
3629 snprintf((char *)out_content + size, *in_out_content_size - size, "%d", t_pid);
3630 } else {
3631 inherit = (ipc_importance_inherit_t)elem;
3632 task_imp = inherit->iii_to_task;
3633 task = task_imp->iit_task;
3634 t_pid = (TASK_NULL != task) ?
3635 task_pid(task) : -1;
3636 snprintf((char *)out_content + size, *in_out_content_size - size,
3637 "%d (%d of %d boosts) %s from pid ", t_pid,
3638 III_EXTERN(inherit), inherit->iii_externcnt,
3639 (inherit->iii_donating) ? "donated" : "linked");
3640 }
3641
3642 size = (mach_voucher_attr_content_size_t)strlen((char *)out_content);
3643
3644 if (III_NULL == inherit) {
3645 break;
3646 }
3647
3648 elem = inherit->iii_from_elem;
3649 }
3650 size++; /* account for NULL */
3651 }
3652 *out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */
3653 *in_out_content_size = size;
3654 return KERN_SUCCESS;
3655 }
3656
3657 /*
3658 * Routine: ipc_importance_command [Voucher Attribute Manager Interface]
3659 * Purpose:
3660 * Run commands against the importance attribute value found in a voucher.
3661 * No such commands are currently supported.
3662 * Conditions:
3663 * Nothing locked on entry. May block.
3664 */
3665 static kern_return_t
3666 ipc_importance_command(
3667 ipc_voucher_attr_manager_t __assert_only manager,
3668 mach_voucher_attr_key_t __assert_only key,
3669 mach_voucher_attr_value_handle_array_t values,
3670 mach_msg_type_number_t value_count,
3671 mach_voucher_attr_command_t command,
3672 mach_voucher_attr_content_t in_content,
3673 mach_voucher_attr_content_size_t in_content_size,
3674 mach_voucher_attr_content_t out_content,
3675 mach_voucher_attr_content_size_t *out_content_size)
3676 {
3677 ipc_importance_inherit_t inherit;
3678 ipc_importance_task_t to_task;
3679 uint32_t refs, *outrefsp;
3680 mach_msg_type_number_t i;
3681 uint32_t externcnt;
3682
3683 IMPORTANCE_ASSERT_MANAGER(manager);
3684 IMPORTANCE_ASSERT_KEY(key);
3685
3686 if (in_content_size != sizeof(refs) ||
3687 (*out_content_size != 0 && *out_content_size != sizeof(refs))) {
3688 return KERN_INVALID_ARGUMENT;
3689 }
3690 refs = *(uint32_t *)(void *)in_content;
3691 outrefsp = (*out_content_size != 0) ? (uint32_t *)(void *)out_content : NULL;
3692
3693 if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL != command) {
3694 return KERN_NOT_SUPPORTED;
3695 }
3696
3697 /* the first non-default value of the apropos type provides the data */
3698 inherit = III_NULL;
3699 for (i = 0; i < value_count; i++) {
3700 ipc_importance_elem_t elem = (ipc_importance_elem_t)values[i];
3701
3702 if (IIE_NULL != elem && IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3703 inherit = (ipc_importance_inherit_t)elem;
3704 break;
3705 }
3706 }
3707 if (III_NULL == inherit) {
3708 return KERN_INVALID_ARGUMENT;
3709 }
3710
3711 ipc_importance_lock();
3712
3713 if (0 == refs) {
3714 if (NULL != outrefsp) {
3715 *outrefsp = III_EXTERN(inherit);
3716 }
3717 ipc_importance_unlock();
3718 return KERN_SUCCESS;
3719 }
3720
3721 to_task = inherit->iii_to_task;
3722 assert(ipc_importance_task_is_any_receiver_type(to_task));
3723
3724 /* if not donating to a denap receiver, it was called incorrectly */
3725 if (!ipc_importance_task_is_marked_denap_receiver(to_task)) {
3726 ipc_importance_unlock();
3727 return KERN_INVALID_TASK; /* keeps dispatch happy */
3728 }
3729
3730 /* Enough external references left to drop? */
3731 if (III_EXTERN(inherit) < refs) {
3732 ipc_importance_unlock();
3733 return KERN_FAILURE;
3734 }
3735
3736 /* re-base external and internal counters at the inherit and the to-task (if apropos) */
3737 if (inherit->iii_donating) {
3738 assert(IIT_EXTERN(to_task) >= III_EXTERN(inherit));
3739 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
3740 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
3741 inherit->iii_externdrop += refs;
3742 to_task->iit_externdrop += refs;
3743 externcnt = III_EXTERN(inherit);
3744 if (0 == externcnt) {
3745 inherit->iii_donating = FALSE;
3746 to_task->iit_externcnt -= inherit->iii_externcnt;
3747 to_task->iit_externdrop -= inherit->iii_externdrop;
3748
3749
3750 /* Start AppNap delay hysteresis - even if not the last boost for the task. */
3751 if (ipc_importance_delayed_drop_call != NULL &&
3752 ipc_importance_task_is_marked_denap_receiver(to_task)) {
3753 ipc_importance_task_delayed_drop(to_task);
3754 }
3755
3756 /* drop task assertions associated with the dropped boosts */
3757 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, refs)) {
3758 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
3759 /* may have dropped and retaken importance lock */
3760 }
3761 } else {
3762 /* assert(to_task->iit_assertcnt >= refs + externcnt); */
3763 /* defensive deduction in case of assertcnt underflow */
3764 if (to_task->iit_assertcnt > refs + externcnt) {
3765 to_task->iit_assertcnt -= refs;
3766 } else {
3767 to_task->iit_assertcnt = externcnt;
3768 }
3769 }
3770 } else {
3771 inherit->iii_externdrop += refs;
3772 externcnt = III_EXTERN(inherit);
3773 }
3774
3775 /* capture result (if requested) */
3776 if (NULL != outrefsp) {
3777 *outrefsp = externcnt;
3778 }
3779
3780 ipc_importance_unlock();
3781 return KERN_SUCCESS;
3782 }
3783
3784 /*
3785 * Routine: ipc_importance_manager_release [Voucher Attribute Manager Interface]
3786 * Purpose:
3787 * Release the Voucher system's reference on the IPC importance attribute
3788 * manager.
3789 * Conditions:
3790 * As this can only occur after the manager drops the Attribute control
3791 * reference granted back at registration time, and that reference is never
3792 * dropped, this should never be called.
3793 */
3794 __abortlike
3795 static void
3796 ipc_importance_manager_release(
3797 ipc_voucher_attr_manager_t __assert_only manager)
3798 {
3799 IMPORTANCE_ASSERT_MANAGER(manager);
3800 panic("Voucher importance manager released");
3801 }
3802
3803 /*
3804 * Routine: ipc_importance_init
3805 * Purpose:
3806 * Initialize the IPC importance manager.
3807 * Conditions:
3808 * Zones and Vouchers are already initialized.
3809 */
3810 void
3811 ipc_importance_init(void)
3812 {
3813 kern_return_t kr;
3814
3815 kr = ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager,
3816 (mach_voucher_attr_value_handle_t)0,
3817 MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
3818 &ipc_importance_control);
3819 if (KERN_SUCCESS != kr) {
3820 printf("Voucher importance manager register returned %d", kr);
3821 }
3822 }
3823
3824 /*
3825 * Routine: ipc_importance_thread_call_init
3826 * Purpose:
3827 * Initialize the IPC importance code dependent upon
3828 * thread-call support being available.
3829 * Conditions:
3830 * Thread-call mechanism is already initialized.
3831 */
3832 void
3833 ipc_importance_thread_call_init(void)
3834 {
3835 /* initialize delayed drop queue and thread-call */
3836 queue_init(&ipc_importance_delayed_drop_queue);
3837 ipc_importance_delayed_drop_call =
3838 thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL);
3839 if (NULL == ipc_importance_delayed_drop_call) {
3840 panic("ipc_importance_init");
3841 }
3842 }
3843
3844 /*
3845 * Routing: task_importance_list_pids
3846 * Purpose: list pids where task in donating importance.
3847 * Conditions: To be called only from kdp stackshot code.
3848 * Will panic the system otherwise.
3849 */
3850 extern int
3851 task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count)
3852 {
3853 if (kdp_lck_spin_is_acquired(&ipc_importance_lock_data) ||
3854 max_count < 1 ||
3855 task->task_imp_base == IIT_NULL ||
3856 pid_list == NULL ||
3857 flags != TASK_IMP_LIST_DONATING_PIDS) {
3858 return 0;
3859 }
3860 unsigned int pidcount = 0;
3861 task_t temp_task;
3862 ipc_importance_task_t task_imp = task->task_imp_base;
3863 ipc_kmsg_t temp_kmsg;
3864 ipc_importance_inherit_t temp_inherit;
3865 ipc_importance_elem_t elem;
3866 int target_pid = 0, previous_pid;
3867
3868 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
3869 /* check space in buffer */
3870 if (pidcount >= max_count) {
3871 break;
3872 }
3873 previous_pid = target_pid;
3874 target_pid = -1;
3875
3876 if (temp_inherit->iii_donating) {
3877 #if DEVELOPMENT || DEBUG
3878 target_pid = temp_inherit->iii_to_task->iit_bsd_pid;
3879 #else
3880 temp_task = temp_inherit->iii_to_task->iit_task;
3881 if (temp_task != TASK_NULL) {
3882 target_pid = task_pid(temp_task);
3883 }
3884 #endif
3885 }
3886
3887 if (target_pid != -1 && previous_pid != target_pid) {
3888 memcpy(pid_list, &target_pid, sizeof(target_pid));
3889 pid_list += sizeof(target_pid);
3890 pidcount++;
3891 }
3892 }
3893
3894 target_pid = 0;
3895 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
3896 if (pidcount >= max_count) {
3897 break;
3898 }
3899 previous_pid = target_pid;
3900 target_pid = -1;
3901 elem = temp_kmsg->ikm_importance;
3902 temp_task = TASK_NULL;
3903
3904 if (elem == IIE_NULL) {
3905 continue;
3906 }
3907
3908 if (!(temp_kmsg->ikm_header && MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_kmsg->ikm_header->msgh_bits))) {
3909 continue;
3910 }
3911
3912 if (IIE_TYPE_TASK == IIE_TYPE(elem) &&
3913 (((ipc_importance_task_t)elem)->iit_task != TASK_NULL)) {
3914 target_pid = task_pid(((ipc_importance_task_t)elem)->iit_task);
3915 } else {
3916 temp_inherit = (ipc_importance_inherit_t)elem;
3917 #if DEVELOPMENT || DEBUG
3918 target_pid = temp_inherit->iii_to_task->iit_bsd_pid;
3919 #else
3920 temp_task = temp_inherit->iii_to_task->iit_task;
3921 if (temp_task != TASK_NULL) {
3922 target_pid = task_pid(temp_task);
3923 }
3924 #endif
3925 }
3926
3927 if (target_pid != -1 && previous_pid != target_pid) {
3928 memcpy(pid_list, &target_pid, sizeof(target_pid));
3929 pid_list += sizeof(target_pid);
3930 pidcount++;
3931 }
3932 }
3933
3934 return pidcount;
3935 }