]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_importance.c
44d1efed80fcbe0237e64c769033a238b4aa28f6
[apple/xnu.git] / osfmk / ipc / ipc_importance.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/notify.h>
31 #include <ipc/ipc_types.h>
32 #include <ipc/ipc_importance.h>
33 #include <ipc/ipc_port.h>
34 #include <ipc/ipc_voucher.h>
35 #include <kern/ipc_kobject.h>
36 #include <kern/ipc_tt.h>
37 #include <kern/mach_param.h>
38 #include <kern/misc_protos.h>
39 #include <kern/kalloc.h>
40 #include <kern/zalloc.h>
41 #include <kern/queue.h>
42 #include <kern/task.h>
43 #include <kern/policy_internal.h>
44
45 #include <sys/kdebug.h>
46
47 #include <mach/mach_voucher_attr_control.h>
48 #include <mach/machine/sdt.h>
49
50 extern int proc_pid(void *);
51 extern int proc_selfpid(void);
52 extern uint64_t proc_uniqueid(void *p);
53 extern char *proc_name_address(void *p);
54
55 /*
56 * Globals for delayed boost drop processing.
57 */
58 static queue_head_t ipc_importance_delayed_drop_queue;
59 static thread_call_t ipc_importance_delayed_drop_call;
60 static uint64_t ipc_importance_delayed_drop_timestamp;
61 static boolean_t ipc_importance_delayed_drop_call_requested = FALSE;
62
63 #define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */
64 #define DENAP_DROP_SKEW (100 * NSEC_PER_MSEC) /* request skew for wakeup */
65 #define DENAP_DROP_LEEWAY (2 * DENAP_DROP_SKEW) /* specified wakeup leeway */
66
67 #define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW)
68 #define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY)
69
70 /*
71 * Importance Voucher Attribute Manager
72 */
73
74 static lck_spin_t ipc_importance_lock_data; /* single lock for now */
75
76
77 #define ipc_importance_lock_init() \
78 lck_spin_init(&ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr)
79 #define ipc_importance_lock_destroy() \
80 lck_spin_destroy(&ipc_importance_lock_data, &ipc_lck_grp)
81 #define ipc_importance_lock() \
82 lck_spin_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
83 #define ipc_importance_lock_try() \
84 lck_spin_try_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
85 #define ipc_importance_unlock() \
86 lck_spin_unlock(&ipc_importance_lock_data)
87 #define ipc_importance_assert_held() \
88 lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED)
89
90 #if IIE_REF_DEBUG
91 #define incr_ref_counter(x) (os_atomic_inc(&(x), relaxed))
92
93 static inline
94 uint32_t
95 ipc_importance_reference_internal(ipc_importance_elem_t elem)
96 {
97 incr_ref_counter(elem->iie_refs_added);
98 return os_atomic_inc(&elem->iie_bits, relaxed) & IIE_REFS_MASK;
99 }
100
101 static inline
102 uint32_t
103 ipc_importance_release_internal(ipc_importance_elem_t elem)
104 {
105 incr_ref_counter(elem->iie_refs_dropped);
106 return os_atomic_dec(&elem->iie_bits, relaxed) & IIE_REFS_MASK;
107 }
108
109 static inline
110 uint32_t
111 ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)
112 {
113 uint32_t out;
114 out = ipc_importance_reference_internal(&task_imp->iit_elem);
115 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added);
116 return out;
117 }
118
119 static inline
120 uint32_t
121 ipc_importance_task_release_internal(ipc_importance_task_t task_imp)
122 {
123 uint32_t out;
124
125 assert(1 < IIT_REFS(task_imp));
126 incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped);
127 out = ipc_importance_release_internal(&task_imp->iit_elem);
128 return out;
129 }
130
131 static inline
132 void
133 ipc_importance_counter_init(ipc_importance_elem_t elem)
134 {
135 elem->iie_refs_added = 0;
136 elem->iie_refs_dropped = 0;
137 elem->iie_kmsg_refs_added = 0;
138 elem->iie_kmsg_refs_inherited = 0;
139 elem->iie_kmsg_refs_coalesced = 0;
140 elem->iie_kmsg_refs_dropped = 0;
141 elem->iie_task_refs_added = 0;
142 elem->iie_task_refs_added_inherit_from = 0;
143 elem->iie_task_refs_added_transition = 0;
144 elem->iie_task_refs_self_added = 0;
145 elem->iie_task_refs_inherited = 0;
146 elem->iie_task_refs_coalesced = 0;
147 elem->iie_task_refs_dropped = 0;
148 }
149 #else
150 #define incr_ref_counter(x)
151 #endif
152
153 #if DEVELOPMENT || DEBUG
154 static queue_head_t global_iit_alloc_queue;
155 #endif
156
157 /* TODO: remove this varibale when interactive daemon audit is complete */
158 boolean_t ipc_importance_interactive_receiver = FALSE;
159
160 static zone_t ipc_importance_task_zone;
161 static zone_t ipc_importance_inherit_zone;
162
163 static ipc_voucher_attr_control_t ipc_importance_control;
164
165 static boolean_t ipc_importance_task_check_transition(ipc_importance_task_t task_imp,
166 iit_update_type_t type, uint32_t delta);
167
168 static void ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp,
169 iit_update_type_t type, boolean_t update_task_imp);
170
171 static ipc_importance_inherit_t ipc_importance_inherit_from_task(task_t from_task, task_t to_task);
172
173 /*
174 * Routine: ipc_importance_kmsg_link
175 * Purpose:
176 * Link the kmsg onto the appropriate propagation chain.
177 * If the element is a task importance, we link directly
178 * on its propagation chain. Otherwise, we link onto the
179 * destination task of the inherit.
180 * Conditions:
181 * Importance lock held.
182 * Caller is donating an importance elem reference to the kmsg.
183 */
184 static void
185 ipc_importance_kmsg_link(
186 ipc_kmsg_t kmsg,
187 ipc_importance_elem_t elem)
188 {
189 ipc_importance_elem_t link_elem;
190
191 assert(IIE_NULL == kmsg->ikm_importance);
192
193 link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
194 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
195 elem;
196
197 queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
198 kmsg->ikm_importance = elem;
199 }
200
201 /*
202 * Routine: ipc_importance_kmsg_unlink
203 * Purpose:
204 * Unlink the kmsg from its current propagation chain.
205 * If the element is a task importance, we unlink directly
206 * from its propagation chain. Otherwise, we unlink from the
207 * destination task of the inherit.
208 * Returns:
209 * The reference to the importance element it was linked on.
210 * Conditions:
211 * Importance lock held.
212 * Caller is responsible for dropping reference on returned elem.
213 */
214 static ipc_importance_elem_t
215 ipc_importance_kmsg_unlink(
216 ipc_kmsg_t kmsg)
217 {
218 ipc_importance_elem_t elem = kmsg->ikm_importance;
219
220 if (IIE_NULL != elem) {
221 ipc_importance_elem_t unlink_elem;
222
223 unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
224 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
225 elem;
226
227 queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
228 kmsg->ikm_importance = IIE_NULL;
229 }
230 return elem;
231 }
232
233 /*
234 * Routine: ipc_importance_inherit_link
235 * Purpose:
236 * Link the inherit onto the appropriate propagation chain.
237 * If the element is a task importance, we link directly
238 * on its propagation chain. Otherwise, we link onto the
239 * destination task of the inherit.
240 * Conditions:
241 * Importance lock held.
242 * Caller is donating an elem importance reference to the inherit.
243 */
244 static void
245 ipc_importance_inherit_link(
246 ipc_importance_inherit_t inherit,
247 ipc_importance_elem_t elem)
248 {
249 ipc_importance_task_t link_task;
250
251 assert(IIE_NULL == inherit->iii_from_elem);
252 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
253 ((ipc_importance_inherit_t)elem)->iii_to_task :
254 (ipc_importance_task_t)elem;
255
256 queue_enter(&link_task->iit_inherits, inherit,
257 ipc_importance_inherit_t, iii_inheritance);
258 inherit->iii_from_elem = elem;
259 }
260
261 /*
262 * Routine: ipc_importance_inherit_find
263 * Purpose:
264 * Find an existing inherit that links the from element to the
265 * to_task at a given nesting depth. As inherits from other
266 * inherits are actually linked off the original inherit's donation
267 * receiving task, we have to conduct our search from there if
268 * the from element is an inherit.
269 * Returns:
270 * A pointer (not a reference) to the matching inherit.
271 * Conditions:
272 * Importance lock held.
273 */
274 static ipc_importance_inherit_t
275 ipc_importance_inherit_find(
276 ipc_importance_elem_t from,
277 ipc_importance_task_t to_task,
278 unsigned int depth)
279 {
280 ipc_importance_task_t link_task;
281 ipc_importance_inherit_t inherit;
282
283 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(from)) ?
284 ((ipc_importance_inherit_t)from)->iii_to_task :
285 (ipc_importance_task_t)from;
286
287 queue_iterate(&link_task->iit_inherits, inherit,
288 ipc_importance_inherit_t, iii_inheritance) {
289 if (inherit->iii_to_task == to_task && inherit->iii_depth == depth) {
290 return inherit;
291 }
292 }
293 return III_NULL;
294 }
295
296 /*
297 * Routine: ipc_importance_inherit_unlink
298 * Purpose:
299 * Unlink the inherit from its current propagation chain.
300 * If the element is a task importance, we unlink directly
301 * from its propagation chain. Otherwise, we unlink from the
302 * destination task of the inherit.
303 * Returns:
304 * The reference to the importance element it was linked on.
305 * Conditions:
306 * Importance lock held.
307 * Caller is responsible for dropping reference on returned elem.
308 */
309 static ipc_importance_elem_t
310 ipc_importance_inherit_unlink(
311 ipc_importance_inherit_t inherit)
312 {
313 ipc_importance_elem_t elem = inherit->iii_from_elem;
314
315 if (IIE_NULL != elem) {
316 ipc_importance_task_t unlink_task;
317
318 unlink_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
319 ((ipc_importance_inherit_t)elem)->iii_to_task :
320 (ipc_importance_task_t)elem;
321
322 queue_remove(&unlink_task->iit_inherits, inherit,
323 ipc_importance_inherit_t, iii_inheritance);
324 inherit->iii_from_elem = IIE_NULL;
325 }
326 return elem;
327 }
328
329 /*
330 * Routine: ipc_importance_reference
331 * Purpose:
332 * Add a reference to the importance element.
333 * Conditions:
334 * Caller must hold a reference on the element.
335 */
336 void
337 ipc_importance_reference(ipc_importance_elem_t elem)
338 {
339 assert(0 < IIE_REFS(elem));
340 ipc_importance_reference_internal(elem);
341 }
342
343 /*
344 * Routine: ipc_importance_release_locked
345 * Purpose:
346 * Release a reference on an importance attribute value,
347 * unlinking and deallocating the attribute if the last reference.
348 * Conditions:
349 * Entered with importance lock held, leaves with it unlocked.
350 */
351 static void
352 ipc_importance_release_locked(ipc_importance_elem_t elem)
353 {
354 assert(0 < IIE_REFS(elem));
355
356 #if IMPORTANCE_DEBUG
357 ipc_importance_inherit_t temp_inherit;
358 ipc_importance_task_t link_task;
359 ipc_kmsg_t temp_kmsg;
360 uint32_t expected = 0;
361
362 if (0 < elem->iie_made) {
363 expected++;
364 }
365
366 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
367 ((ipc_importance_inherit_t)elem)->iii_to_task :
368 (ipc_importance_task_t)elem;
369
370 queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance)
371 if (temp_kmsg->ikm_importance == elem) {
372 expected++;
373 }
374 queue_iterate(&link_task->iit_inherits, temp_inherit,
375 ipc_importance_inherit_t, iii_inheritance)
376 if (temp_inherit->iii_from_elem == elem) {
377 expected++;
378 }
379 if (IIE_REFS(elem) < expected + 1) {
380 panic("ipc_importance_release_locked (%p)", elem);
381 }
382 #endif /* IMPORTANCE_DEBUG */
383
384 if (0 < ipc_importance_release_internal(elem)) {
385 ipc_importance_unlock();
386 return;
387 }
388
389 /* last ref */
390
391 switch (IIE_TYPE(elem)) {
392 /* just a "from" task reference to drop */
393 case IIE_TYPE_TASK:
394 {
395 ipc_importance_task_t task_elem;
396
397 task_elem = (ipc_importance_task_t)elem;
398
399 /* the task can't still hold a reference on the task importance */
400 assert(TASK_NULL == task_elem->iit_task);
401
402 #if DEVELOPMENT || DEBUG
403 queue_remove(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
404 #endif
405
406 ipc_importance_unlock();
407
408 zfree(ipc_importance_task_zone, task_elem);
409 break;
410 }
411
412 /* dropping an inherit element */
413 case IIE_TYPE_INHERIT:
414 {
415 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
416 ipc_importance_task_t to_task = inherit->iii_to_task;
417 ipc_importance_elem_t from_elem;
418
419 assert(IIT_NULL != to_task);
420 assert(ipc_importance_task_is_any_receiver_type(to_task));
421
422 /* unlink the inherit from its source element */
423 from_elem = ipc_importance_inherit_unlink(inherit);
424 assert(IIE_NULL != from_elem);
425
426 /*
427 * The attribute might have pending external boosts if the attribute
428 * was given out during exec, drop them from the appropriate destination
429 * task.
430 *
431 * The attribute will not have any pending external boosts if the
432 * attribute was given out to voucher system since it would have been
433 * dropped by ipc_importance_release_value, but there is not way to
434 * detect that, thus if the attribute has a pending external boost,
435 * drop them from the appropriate destination task.
436 *
437 * The inherit attribute from exec and voucher system would not
438 * get deduped to each other, thus dropping the external boost
439 * from destination task at two different places will not have
440 * any unintended side effects.
441 */
442 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
443 if (inherit->iii_donating) {
444 uint32_t assertcnt = III_EXTERN(inherit);
445
446 assert(ipc_importance_task_is_any_receiver_type(to_task));
447 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
448 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
449 to_task->iit_externcnt -= inherit->iii_externcnt;
450 to_task->iit_externdrop -= inherit->iii_externdrop;
451 inherit->iii_externcnt = 0;
452 inherit->iii_externdrop = 0;
453 inherit->iii_donating = FALSE;
454
455 /* adjust the internal assertions - and propagate as needed */
456 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, assertcnt)) {
457 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
458 }
459 } else {
460 inherit->iii_externcnt = 0;
461 inherit->iii_externdrop = 0;
462 }
463
464 /* release the reference on the source element */
465 ipc_importance_release_locked(from_elem);
466 /* unlocked on return */
467
468 /* release the reference on the destination task */
469 ipc_importance_task_release(to_task);
470
471 /* free the inherit */
472 zfree(ipc_importance_inherit_zone, inherit);
473 break;
474 }
475 }
476 }
477
478 /*
479 * Routine: ipc_importance_release
480 * Purpose:
481 * Release a reference on an importance attribute value,
482 * unlinking and deallocating the attribute if the last reference.
483 * Conditions:
484 * nothing locked on entrance, nothing locked on exit.
485 * May block.
486 */
487 void
488 ipc_importance_release(ipc_importance_elem_t elem)
489 {
490 if (IIE_NULL == elem) {
491 return;
492 }
493
494 ipc_importance_lock();
495 ipc_importance_release_locked(elem);
496 /* unlocked */
497 }
498
499 /*
500 * Routine: ipc_importance_task_reference
501 *
502 *
503 * Purpose:
504 * Retain a reference on a task importance attribute value.
505 * Conditions:
506 * nothing locked on entrance, nothing locked on exit.
507 * caller holds a reference already.
508 */
509 void
510 ipc_importance_task_reference(ipc_importance_task_t task_elem)
511 {
512 if (IIT_NULL == task_elem) {
513 return;
514 }
515 #if IIE_REF_DEBUG
516 incr_ref_counter(task_elem->iit_elem.iie_task_refs_added);
517 #endif
518 ipc_importance_reference(&task_elem->iit_elem);
519 }
520
521 /*
522 * Routine: ipc_importance_task_release
523 * Purpose:
524 * Release a reference on a task importance attribute value,
525 * unlinking and deallocating the attribute if the last reference.
526 * Conditions:
527 * nothing locked on entrance, nothing locked on exit.
528 * May block.
529 */
530 void
531 ipc_importance_task_release(ipc_importance_task_t task_elem)
532 {
533 if (IIT_NULL == task_elem) {
534 return;
535 }
536
537 ipc_importance_lock();
538 #if IIE_REF_DEBUG
539 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
540 #endif
541 ipc_importance_release_locked(&task_elem->iit_elem);
542 /* unlocked */
543 }
544
545 /*
546 * Routine: ipc_importance_task_release_locked
547 * Purpose:
548 * Release a reference on a task importance attribute value,
549 * unlinking and deallocating the attribute if the last reference.
550 * Conditions:
551 * importance lock held on entry, nothing locked on exit.
552 * May block.
553 */
554 static void
555 ipc_importance_task_release_locked(ipc_importance_task_t task_elem)
556 {
557 if (IIT_NULL == task_elem) {
558 ipc_importance_unlock();
559 return;
560 }
561 #if IIE_REF_DEBUG
562 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
563 #endif
564 ipc_importance_release_locked(&task_elem->iit_elem);
565 /* unlocked */
566 }
567
568 /*
569 * Routines for importance donation/inheritance/boosting
570 */
571
572
573 /*
574 * External importance assertions are managed by the process in userspace
575 * Internal importance assertions are the responsibility of the kernel
576 * Assertions are changed from internal to external via task_importance_externalize_assertion
577 */
578
579 /*
580 * Routine: ipc_importance_task_check_transition
581 * Purpose:
582 * Increase or decrement the internal task importance counter of the
583 * specified task and determine if propagation and a task policy
584 * update is required.
585 *
586 * If it is already enqueued for a policy update, steal it from that queue
587 * (as we are reversing that update before it happens).
588 *
589 * Conditions:
590 * Called with the importance lock held.
591 * It is the caller's responsibility to perform the propagation of the
592 * transition and/or policy changes by checking the return value.
593 */
594 static boolean_t
595 ipc_importance_task_check_transition(
596 ipc_importance_task_t task_imp,
597 iit_update_type_t type,
598 uint32_t delta)
599 {
600 #if IMPORTANCE_TRACE
601 task_t target_task = task_imp->iit_task;
602 #endif
603 boolean_t boost = (IIT_UPDATE_HOLD == type);
604 boolean_t before_boosted, after_boosted;
605
606 ipc_importance_assert_held();
607
608 if (!ipc_importance_task_is_any_receiver_type(task_imp)) {
609 return FALSE;
610 }
611
612 #if IMPORTANCE_TRACE
613 int target_pid = task_pid(target_task);
614
615 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START,
616 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
617 #endif
618
619 /* snapshot the effective boosting status before making any changes */
620 before_boosted = (task_imp->iit_assertcnt > 0);
621
622 /* Adjust the assertcnt appropriately */
623 if (boost) {
624 task_imp->iit_assertcnt += delta;
625 #if IMPORTANCE_TRACE
626 DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid,
627 task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt);
628 #endif
629 } else {
630 // assert(delta <= task_imp->iit_assertcnt);
631 if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) {
632 /* TODO: Turn this back into a panic <rdar://problem/12592649> */
633 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
634 } else {
635 task_imp->iit_assertcnt -= delta;
636 }
637 #if IMPORTANCE_TRACE
638 // This convers both legacy and voucher-based importance.
639 DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt);
640 #endif
641 }
642
643 #if IMPORTANCE_TRACE
644 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END,
645 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
646 #endif
647
648 /* did the change result in an effective donor status change? */
649 after_boosted = (task_imp->iit_assertcnt > 0);
650
651 if (after_boosted != before_boosted) {
652 /*
653 * If the task importance is already on an update queue, we just reversed the need for a
654 * pending policy update. If the queue is any other than the delayed-drop-queue, pull it
655 * off that queue and release the reference it got going onto the update queue. If it is
656 * the delayed-drop-queue we leave it in place in case it comes back into the drop state
657 * before its time delay is up.
658 *
659 * We still need to propagate the change downstream to reverse the assertcnt effects,
660 * but we no longer need to update this task's boost policy state.
661 *
662 * Otherwise, mark it as needing a policy update.
663 */
664 assert(0 == task_imp->iit_updatepolicy);
665 if (NULL != task_imp->iit_updateq) {
666 if (&ipc_importance_delayed_drop_queue != task_imp->iit_updateq) {
667 queue_remove(task_imp->iit_updateq, task_imp, ipc_importance_task_t, iit_updates);
668 task_imp->iit_updateq = NULL;
669 ipc_importance_task_release_internal(task_imp); /* can't be last ref */
670 }
671 } else {
672 task_imp->iit_updatepolicy = 1;
673 }
674 return TRUE;
675 }
676
677 return FALSE;
678 }
679
680
681 /*
682 * Routine: ipc_importance_task_propagate_helper
683 * Purpose:
684 * Increase or decrement the internal task importance counter of all
685 * importance tasks inheriting from the specified one. If this causes
686 * that importance task to change state, add it to the list of tasks
687 * to do a policy update against.
688 * Conditions:
689 * Called with the importance lock held.
690 * It is the caller's responsibility to iterate down the generated list
691 * and propagate any subsequent assertion changes from there.
692 */
693 static void
694 ipc_importance_task_propagate_helper(
695 ipc_importance_task_t task_imp,
696 iit_update_type_t type,
697 queue_t propagation)
698 {
699 ipc_importance_task_t temp_task_imp;
700
701 /*
702 * iterate the downstream kmsgs, adjust their boosts,
703 * and capture the next task to adjust for each message
704 */
705
706 ipc_kmsg_t temp_kmsg;
707
708 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
709 mach_msg_header_t *hdr = temp_kmsg->ikm_header;
710 mach_port_delta_t delta;
711 ipc_port_t port;
712
713 /* toggle the kmsg importance bit as a barrier to parallel adjusts */
714 if (IIT_UPDATE_HOLD == type) {
715 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
716 continue;
717 }
718
719 /* mark the message as now carrying importance */
720 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
721 delta = 1;
722 } else {
723 if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
724 continue;
725 }
726
727 /* clear the message as now carrying importance */
728 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
729 delta = -1;
730 }
731
732 /* determine the task importance to adjust as result (if any) */
733 port = hdr->msgh_remote_port;
734 assert(IP_VALID(port));
735 ip_lock(port);
736 temp_task_imp = IIT_NULL;
737 if (!ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &temp_task_imp)) {
738 ip_unlock(port);
739 }
740
741 /* no task importance to adjust associated with the port? */
742 if (IIT_NULL == temp_task_imp) {
743 continue;
744 }
745
746 /* hold a reference on temp_task_imp */
747
748 /* Adjust the task assertions and determine if an edge was crossed */
749 if (ipc_importance_task_check_transition(temp_task_imp, type, 1)) {
750 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
751 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
752 /* reference donated */
753 } else {
754 ipc_importance_task_release_internal(temp_task_imp);
755 }
756 }
757
758 /*
759 * iterate the downstream importance inherits
760 * and capture the next task importance to boost for each
761 */
762 ipc_importance_inherit_t temp_inherit;
763
764 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
765 uint32_t assertcnt = III_EXTERN(temp_inherit);
766
767 temp_task_imp = temp_inherit->iii_to_task;
768 assert(IIT_NULL != temp_task_imp);
769
770 if (IIT_UPDATE_HOLD == type) {
771 /* if no undropped externcnts in the inherit, nothing to do */
772 if (0 == assertcnt) {
773 assert(temp_inherit->iii_donating == FALSE);
774 continue;
775 }
776
777 /* nothing to do if the inherit is already donating (forced donation) */
778 if (temp_inherit->iii_donating) {
779 continue;
780 }
781
782 /* mark it donating and contribute to the task externcnts */
783 temp_inherit->iii_donating = TRUE;
784 temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt;
785 temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop;
786 } else {
787 /* if no contributing assertions, move on */
788 if (0 == assertcnt) {
789 assert(temp_inherit->iii_donating == FALSE);
790 continue;
791 }
792
793 /* nothing to do if the inherit is not donating */
794 if (!temp_inherit->iii_donating) {
795 continue;
796 }
797
798 /* mark it no longer donating */
799 temp_inherit->iii_donating = FALSE;
800
801 /* remove the contribution the inherit made to the to-task */
802 assert(IIT_EXTERN(temp_task_imp) >= III_EXTERN(temp_inherit));
803 assert(temp_task_imp->iit_externcnt >= temp_inherit->iii_externcnt);
804 assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop);
805 temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt;
806 temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop;
807 }
808
809 /* Adjust the task assertions and determine if an edge was crossed */
810 assert(ipc_importance_task_is_any_receiver_type(temp_task_imp));
811 if (ipc_importance_task_check_transition(temp_task_imp, type, assertcnt)) {
812 ipc_importance_task_reference(temp_task_imp);
813 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
814 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
815 }
816 }
817 }
818
819 /*
820 * Routine: ipc_importance_task_process_updates
821 * Purpose:
822 * Process the queue of task importances and apply the policy
823 * update called for. Only process tasks in the queue with an
824 * update timestamp less than the supplied max.
825 * Conditions:
826 * Called and returns with importance locked.
827 * May drop importance lock and block temporarily.
828 */
829 static void
830 ipc_importance_task_process_updates(
831 queue_t supplied_queue,
832 boolean_t boost,
833 uint64_t max_timestamp)
834 {
835 ipc_importance_task_t task_imp;
836 queue_head_t second_chance;
837 queue_t queue = supplied_queue;
838
839 /*
840 * This queue will hold the task's we couldn't trylock on first pass.
841 * By using a second (private) queue, we guarantee all tasks that get
842 * entered on this queue have a timestamp under the maximum.
843 */
844 queue_init(&second_chance);
845
846 /* process any resulting policy updates */
847 retry:
848 while (!queue_empty(queue)) {
849 task_t target_task;
850 struct task_pend_token pend_token = {};
851
852 task_imp = (ipc_importance_task_t)queue_first(queue);
853 assert(0 == task_imp->iit_updatepolicy);
854 assert(queue == task_imp->iit_updateq);
855
856 /* if timestamp is too big, we're done */
857 if (task_imp->iit_updatetime > max_timestamp) {
858 break;
859 }
860
861 /* we were given a reference on each task in the queue */
862
863 /* remove it from the supplied queue */
864 queue_remove(queue, task_imp, ipc_importance_task_t, iit_updates);
865 task_imp->iit_updateq = NULL;
866
867 target_task = task_imp->iit_task;
868
869 /* Is it well on the way to exiting? */
870 if (TASK_NULL == target_task) {
871 ipc_importance_task_release_locked(task_imp);
872 /* importance unlocked */
873 ipc_importance_lock();
874 continue;
875 }
876
877 /* Has the update been reversed on the hysteresis queue? */
878 if (0 < task_imp->iit_assertcnt &&
879 queue == &ipc_importance_delayed_drop_queue) {
880 ipc_importance_task_release_locked(task_imp);
881 /* importance unlocked */
882 ipc_importance_lock();
883 continue;
884 }
885
886 /*
887 * Can we get the task lock out-of-order?
888 * If not, stick this back on the second-chance queue.
889 */
890 if (!task_lock_try(target_task)) {
891 boolean_t should_wait_lock = (queue == &second_chance);
892 task_imp->iit_updateq = &second_chance;
893
894 /*
895 * If we're already processing second-chances on
896 * tasks, keep this task on the front of the queue.
897 * We will wait for the task lock before coming
898 * back and trying again, and we have a better
899 * chance of re-acquiring the lock if we come back
900 * to it right away.
901 */
902 if (should_wait_lock) {
903 task_reference(target_task);
904 queue_enter_first(&second_chance, task_imp,
905 ipc_importance_task_t, iit_updates);
906 } else {
907 queue_enter(&second_chance, task_imp,
908 ipc_importance_task_t, iit_updates);
909 }
910 ipc_importance_unlock();
911
912 if (should_wait_lock) {
913 task_lock(target_task);
914 task_unlock(target_task);
915 task_deallocate(target_task);
916 }
917
918 ipc_importance_lock();
919 continue;
920 }
921
922 /* is it going away? */
923 if (!target_task->active) {
924 task_unlock(target_task);
925 ipc_importance_task_release_locked(task_imp);
926 /* importance unlocked */
927 ipc_importance_lock();
928 continue;
929 }
930
931 /* take a task reference for while we don't have the importance lock */
932 task_reference(target_task);
933
934 /* count the transition */
935 if (boost) {
936 task_imp->iit_transitions++;
937 }
938
939 ipc_importance_unlock();
940
941 /* apply the policy adjust to the target task (while it is still locked) */
942 task_update_boost_locked(target_task, boost, &pend_token);
943
944 /* complete the policy update with the task unlocked */
945 ipc_importance_task_release(task_imp);
946 task_unlock(target_task);
947 task_policy_update_complete_unlocked(target_task, &pend_token);
948 task_deallocate(target_task);
949
950 ipc_importance_lock();
951 }
952
953 /* If there are tasks we couldn't update the first time, try again */
954 if (!queue_empty(&second_chance)) {
955 queue = &second_chance;
956 goto retry;
957 }
958 }
959
960
961 /*
962 * Routine: ipc_importance_task_delayed_drop_scan
963 * Purpose:
964 * The thread call routine to scan the delayed drop queue,
965 * requesting all updates with a deadline up to the last target
966 * for the thread-call (which is DENAP_DROP_SKEW beyond the first
967 * thread's optimum delay).
968 * update to drop its boost.
969 * Conditions:
970 * Nothing locked
971 */
972 static void
973 ipc_importance_task_delayed_drop_scan(
974 __unused void *arg1,
975 __unused void *arg2)
976 {
977 ipc_importance_lock();
978
979 /* process all queued task drops with timestamps up to TARGET(first)+SKEW */
980 ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue,
981 FALSE,
982 ipc_importance_delayed_drop_timestamp);
983
984 /* importance lock may have been temporarily dropped */
985
986 /* If there are any entries left in the queue, re-arm the call here */
987 if (!queue_empty(&ipc_importance_delayed_drop_queue)) {
988 ipc_importance_task_t task_imp;
989 uint64_t deadline;
990 uint64_t leeway;
991
992 task_imp = (ipc_importance_task_t)queue_first(&ipc_importance_delayed_drop_queue);
993
994 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
995 deadline += task_imp->iit_updatetime;
996 ipc_importance_delayed_drop_timestamp = deadline;
997
998 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
999
1000 thread_call_enter_delayed_with_leeway(
1001 ipc_importance_delayed_drop_call,
1002 NULL,
1003 deadline,
1004 leeway,
1005 DENAP_DROP_FLAGS);
1006 } else {
1007 ipc_importance_delayed_drop_call_requested = FALSE;
1008 }
1009 ipc_importance_unlock();
1010 }
1011
1012 /*
1013 * Routine: ipc_importance_task_delayed_drop
1014 * Purpose:
1015 * Queue the specified task importance for delayed policy
1016 * update to drop its boost.
1017 * Conditions:
1018 * Called with the importance lock held.
1019 */
1020 static void
1021 ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)
1022 {
1023 uint64_t timestamp = mach_absolute_time(); /* no mach_approximate_time() in kernel */
1024
1025 assert(ipc_importance_delayed_drop_call != NULL);
1026
1027 /*
1028 * If still on an update queue from a previous change,
1029 * remove it first (and use that reference). Otherwise, take
1030 * a new reference for the delay drop update queue.
1031 */
1032 if (NULL != task_imp->iit_updateq) {
1033 queue_remove(task_imp->iit_updateq, task_imp,
1034 ipc_importance_task_t, iit_updates);
1035 } else {
1036 ipc_importance_task_reference_internal(task_imp);
1037 }
1038
1039 task_imp->iit_updateq = &ipc_importance_delayed_drop_queue;
1040 task_imp->iit_updatetime = timestamp;
1041
1042 queue_enter(&ipc_importance_delayed_drop_queue, task_imp,
1043 ipc_importance_task_t, iit_updates);
1044
1045 /* request the delayed thread-call if not already requested */
1046 if (!ipc_importance_delayed_drop_call_requested) {
1047 uint64_t deadline;
1048 uint64_t leeway;
1049
1050 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
1051 deadline += task_imp->iit_updatetime;
1052 ipc_importance_delayed_drop_timestamp = deadline;
1053
1054 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
1055
1056 ipc_importance_delayed_drop_call_requested = TRUE;
1057 thread_call_enter_delayed_with_leeway(
1058 ipc_importance_delayed_drop_call,
1059 NULL,
1060 deadline,
1061 leeway,
1062 DENAP_DROP_FLAGS);
1063 }
1064 }
1065
1066
1067 /*
1068 * Routine: ipc_importance_task_propagate_assertion_locked
1069 * Purpose:
1070 * Propagate the importance transition type to every item
1071 * If this causes a boost to be applied, determine if that
1072 * boost should propagate downstream.
1073 * Conditions:
1074 * Called with the importance lock held.
1075 */
1076 static void
1077 ipc_importance_task_propagate_assertion_locked(
1078 ipc_importance_task_t task_imp,
1079 iit_update_type_t type,
1080 boolean_t update_task_imp)
1081 {
1082 boolean_t boost = (IIT_UPDATE_HOLD == type);
1083 ipc_importance_task_t temp_task_imp;
1084 queue_head_t propagate;
1085 queue_head_t updates;
1086
1087 queue_init(&updates);
1088 queue_init(&propagate);
1089
1090 ipc_importance_assert_held();
1091
1092 /*
1093 * If we're going to update the policy for the provided task,
1094 * enqueue it on the propagate queue itself. Otherwise, only
1095 * enqueue downstream things.
1096 */
1097 if (update_task_imp) {
1098 ipc_importance_task_reference(task_imp);
1099 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
1100 queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props);
1101 } else {
1102 ipc_importance_task_propagate_helper(task_imp, type, &propagate);
1103 }
1104
1105 /*
1106 * for each item on the propagation list, propagate any change downstream,
1107 * adding new tasks to propagate further if they transistioned as well.
1108 */
1109 while (!queue_empty(&propagate)) {
1110 boolean_t need_update;
1111
1112 queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props);
1113 /* hold a reference on temp_task_imp */
1114
1115 assert(IIT_NULL != temp_task_imp);
1116
1117 /* only propagate for receivers not already marked as a donor */
1118 if (!ipc_importance_task_is_marked_donor(temp_task_imp) &&
1119 ipc_importance_task_is_marked_receiver(temp_task_imp)) {
1120 ipc_importance_task_propagate_helper(temp_task_imp, type, &propagate);
1121 }
1122
1123 /* if we have a policy update to apply, enqueue a reference for later processing */
1124 need_update = (0 != temp_task_imp->iit_updatepolicy);
1125 temp_task_imp->iit_updatepolicy = 0;
1126 if (need_update && TASK_NULL != temp_task_imp->iit_task) {
1127 if (NULL == temp_task_imp->iit_updateq) {
1128 /*
1129 * If a downstream task that needs an update is subjects to AppNap,
1130 * drop boosts according to the delay hysteresis. Otherwise,
1131 * immediate update it.
1132 */
1133 if (!boost && temp_task_imp != task_imp &&
1134 ipc_importance_delayed_drop_call != NULL &&
1135 ipc_importance_task_is_marked_denap_receiver(temp_task_imp)) {
1136 ipc_importance_task_delayed_drop(temp_task_imp);
1137 } else {
1138 temp_task_imp->iit_updatetime = 0;
1139 temp_task_imp->iit_updateq = &updates;
1140 ipc_importance_task_reference_internal(temp_task_imp);
1141 if (boost) {
1142 queue_enter(&updates, temp_task_imp,
1143 ipc_importance_task_t, iit_updates);
1144 } else {
1145 queue_enter_first(&updates, temp_task_imp,
1146 ipc_importance_task_t, iit_updates);
1147 }
1148 }
1149 } else {
1150 /* Must already be on the AppNap hysteresis queue */
1151 assert(ipc_importance_delayed_drop_call != NULL);
1152 assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp));
1153 }
1154 }
1155
1156 ipc_importance_task_release_internal(temp_task_imp);
1157 }
1158
1159 /* apply updates to task (may drop importance lock) */
1160 if (!queue_empty(&updates)) {
1161 ipc_importance_task_process_updates(&updates, boost, 0);
1162 }
1163 }
1164
1165 /*
1166 * Routine: ipc_importance_task_hold_internal_assertion_locked
1167 * Purpose:
1168 * Increment the assertion count on the task importance.
1169 * If this results in a boost state change in that task,
1170 * prepare to update task policy for this task AND, if
1171 * if not just waking out of App Nap, all down-stream
1172 * tasks that have a similar transition through inheriting
1173 * this update.
1174 * Conditions:
1175 * importance locked on entry and exit.
1176 * May temporarily drop importance lock and block.
1177 */
1178 static kern_return_t
1179 ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1180 {
1181 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, count)) {
1182 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
1183 }
1184 return KERN_SUCCESS;
1185 }
1186
1187 /*
1188 * Routine: ipc_importance_task_drop_internal_assertion_locked
1189 * Purpose:
1190 * Decrement the assertion count on the task importance.
1191 * If this results in a boost state change in that task,
1192 * prepare to update task policy for this task AND, if
1193 * if not just waking out of App Nap, all down-stream
1194 * tasks that have a similar transition through inheriting
1195 * this update.
1196 * Conditions:
1197 * importance locked on entry and exit.
1198 * May temporarily drop importance lock and block.
1199 */
1200 static kern_return_t
1201 ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1202 {
1203 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1204 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1205 }
1206 return KERN_SUCCESS;
1207 }
1208
1209 /*
1210 * Routine: ipc_importance_task_hold_internal_assertion
1211 * Purpose:
1212 * Increment the assertion count on the task importance.
1213 * If this results in a 0->1 change in that count,
1214 * prepare to update task policy for this task AND
1215 * (potentially) all down-stream tasks that have a
1216 * similar transition through inheriting this update.
1217 * Conditions:
1218 * Nothing locked
1219 * May block after dropping importance lock.
1220 */
1221 int
1222 ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1223 {
1224 int ret = KERN_SUCCESS;
1225
1226 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1227 ipc_importance_lock();
1228 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1229 ipc_importance_unlock();
1230 }
1231 return ret;
1232 }
1233
1234 /*
1235 * Routine: ipc_importance_task_drop_internal_assertion
1236 * Purpose:
1237 * Decrement the assertion count on the task importance.
1238 * If this results in a X->0 change in that count,
1239 * prepare to update task policy for this task AND
1240 * all down-stream tasks that have a similar transition
1241 * through inheriting this drop update.
1242 * Conditions:
1243 * Nothing locked on entry.
1244 * May block after dropping importance lock.
1245 */
1246 kern_return_t
1247 ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1248 {
1249 kern_return_t ret = KERN_SUCCESS;
1250
1251 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1252 ipc_importance_lock();
1253 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1254 ipc_importance_unlock();
1255 }
1256 return ret;
1257 }
1258
1259 /*
1260 * Routine: ipc_importance_task_hold_file_lock_assertion
1261 * Purpose:
1262 * Increment the file lock assertion count on the task importance.
1263 * If this results in a 0->1 change in that count,
1264 * prepare to update task policy for this task AND
1265 * (potentially) all down-stream tasks that have a
1266 * similar transition through inheriting this update.
1267 * Conditions:
1268 * Nothing locked
1269 * May block after dropping importance lock.
1270 */
1271 kern_return_t
1272 ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1273 {
1274 kern_return_t ret = KERN_SUCCESS;
1275
1276 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1277 ipc_importance_lock();
1278 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1279 if (KERN_SUCCESS == ret) {
1280 task_imp->iit_filelocks += count;
1281 }
1282 ipc_importance_unlock();
1283 }
1284 return ret;
1285 }
1286
1287 /*
1288 * Routine: ipc_importance_task_drop_file_lock_assertion
1289 * Purpose:
1290 * Decrement the assertion count on the task importance.
1291 * If this results in a X->0 change in that count,
1292 * prepare to update task policy for this task AND
1293 * all down-stream tasks that have a similar transition
1294 * through inheriting this drop update.
1295 * Conditions:
1296 * Nothing locked on entry.
1297 * May block after dropping importance lock.
1298 */
1299 kern_return_t
1300 ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1301 {
1302 kern_return_t ret = KERN_SUCCESS;
1303
1304 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1305 ipc_importance_lock();
1306 if (count <= task_imp->iit_filelocks) {
1307 task_imp->iit_filelocks -= count;
1308 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1309 } else {
1310 ret = KERN_INVALID_ARGUMENT;
1311 }
1312 ipc_importance_unlock();
1313 }
1314 return ret;
1315 }
1316
1317 /*
1318 * Routine: ipc_importance_task_hold_legacy_external_assertion
1319 * Purpose:
1320 * Increment the external assertion count on the task importance.
1321 * This cannot result in an 0->1 transition, as the caller must
1322 * already hold an external boost.
1323 * Conditions:
1324 * Nothing locked on entry.
1325 * May block after dropping importance lock.
1326 * A queue of task importance structures is returned
1327 * by ipc_importance_task_hold_assertion_locked(). Each
1328 * needs to be updated (outside the importance lock hold).
1329 */
1330 kern_return_t
1331 ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1332 {
1333 task_t target_task;
1334 uint32_t target_assertcnt;
1335 uint32_t target_externcnt;
1336 uint32_t target_legacycnt;
1337
1338 kern_return_t ret;
1339
1340 ipc_importance_lock();
1341 target_task = task_imp->iit_task;
1342
1343 #if IMPORTANCE_TRACE
1344 int target_pid = task_pid(target_task);
1345
1346 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1347 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1348 #endif
1349
1350 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1351 /* Only allowed to take a new boost assertion when holding an external boost */
1352 /* save data for diagnostic printf below */
1353 target_assertcnt = task_imp->iit_assertcnt;
1354 target_externcnt = IIT_EXTERN(task_imp);
1355 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1356 ret = KERN_FAILURE;
1357 count = 0;
1358 } else {
1359 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1360 assert(0 < task_imp->iit_assertcnt);
1361 assert(0 < IIT_EXTERN(task_imp));
1362 task_imp->iit_assertcnt += count;
1363 task_imp->iit_externcnt += count;
1364 task_imp->iit_legacy_externcnt += count;
1365 ret = KERN_SUCCESS;
1366 }
1367 ipc_importance_unlock();
1368
1369 #if IMPORTANCE_TRACE
1370 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1371 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1372 // This covers the legacy case where a task takes an extra boost.
1373 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt);
1374 #endif
1375
1376 if (KERN_FAILURE == ret && target_task != TASK_NULL) {
1377 printf("BUG in process %s[%d]: "
1378 "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. "
1379 "(%d total, %d external, %d legacy-external)\n",
1380 proc_name_address(target_task->bsd_info), task_pid(target_task),
1381 target_assertcnt, target_externcnt, target_legacycnt);
1382 }
1383
1384 return ret;
1385 }
1386
1387 /*
1388 * Routine: ipc_importance_task_drop_legacy_external_assertion
1389 * Purpose:
1390 * Drop the legacy external assertion count on the task and
1391 * reflect that change to total external assertion count and
1392 * then onto the internal importance count.
1393 *
1394 * If this results in a X->0 change in the internal,
1395 * count, prepare to update task policy for this task AND
1396 * all down-stream tasks that have a similar transition
1397 * through inheriting this update.
1398 * Conditions:
1399 * Nothing locked on entry.
1400 */
1401 kern_return_t
1402 ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1403 {
1404 int ret = KERN_SUCCESS;
1405 task_t target_task;
1406 uint32_t target_assertcnt;
1407 uint32_t target_externcnt;
1408 uint32_t target_legacycnt;
1409
1410 if (count > 1) {
1411 return KERN_INVALID_ARGUMENT;
1412 }
1413
1414 ipc_importance_lock();
1415 target_task = task_imp->iit_task;
1416
1417 #if IMPORTANCE_TRACE
1418 int target_pid = task_pid(target_task);
1419
1420 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1421 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1422 #endif
1423
1424 if (count > IIT_LEGACY_EXTERN(task_imp)) {
1425 /* Process over-released its boost count - save data for diagnostic printf */
1426 /* TODO: If count > 1, we should clear out as many external assertions as there are left. */
1427 target_assertcnt = task_imp->iit_assertcnt;
1428 target_externcnt = IIT_EXTERN(task_imp);
1429 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1430 ret = KERN_FAILURE;
1431 } else {
1432 /*
1433 * decrement legacy external count from the top level and reflect
1434 * into internal for this and all subsequent updates.
1435 */
1436 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1437 assert(IIT_EXTERN(task_imp) >= count);
1438
1439 task_imp->iit_legacy_externdrop += count;
1440 task_imp->iit_externdrop += count;
1441
1442 /* reset extern counters (if appropriate) */
1443 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1444 if (IIT_EXTERN(task_imp) != 0) {
1445 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
1446 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
1447 } else {
1448 task_imp->iit_externcnt = 0;
1449 task_imp->iit_externdrop = 0;
1450 }
1451 task_imp->iit_legacy_externcnt = 0;
1452 task_imp->iit_legacy_externdrop = 0;
1453 }
1454
1455 /* reflect the drop to the internal assertion count (and effect any importance change) */
1456 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1457 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1458 }
1459 ret = KERN_SUCCESS;
1460 }
1461
1462 #if IMPORTANCE_TRACE
1463 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1464 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1465 #endif
1466
1467 ipc_importance_unlock();
1468
1469 /* delayed printf for user-supplied data failures */
1470 if (KERN_FAILURE == ret && TASK_NULL != target_task) {
1471 printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n",
1472 proc_name_address(target_task->bsd_info), task_pid(target_task),
1473 target_assertcnt, target_externcnt, target_legacycnt);
1474 }
1475
1476 return ret;
1477 }
1478
1479
1480 #if LEGACY_IMPORTANCE_DELIVERY
1481 /* Transfer an assertion to legacy userspace responsibility */
1482 static kern_return_t
1483 ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, uint32_t count, __unused int sender_pid)
1484 {
1485 task_t target_task;
1486
1487 assert(IIT_NULL != task_imp);
1488 target_task = task_imp->iit_task;
1489
1490 if (TASK_NULL == target_task ||
1491 !ipc_importance_task_is_any_receiver_type(task_imp)) {
1492 return KERN_FAILURE;
1493 }
1494
1495 #if IMPORTANCE_TRACE
1496 int target_pid = task_pid(target_task);
1497
1498 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
1499 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
1500 #endif
1501
1502 ipc_importance_lock();
1503 /* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */
1504 assert(IIT_EXTERN(task_imp) >= IIT_LEGACY_EXTERN(task_imp));
1505 task_imp->iit_legacy_externcnt += count;
1506 task_imp->iit_externcnt += count;
1507 ipc_importance_unlock();
1508
1509 #if IMPORTANCE_TRACE
1510 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
1511 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1512 // This is the legacy boosting path
1513 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp));
1514 #endif /* IMPORTANCE_TRACE */
1515
1516 return KERN_SUCCESS;
1517 }
1518 #endif /* LEGACY_IMPORTANCE_DELIVERY */
1519
1520 /*
1521 * Routine: ipc_importance_task_update_live_donor
1522 * Purpose:
1523 * Read the live donor status and update the live_donor bit/propagate the change in importance.
1524 * Conditions:
1525 * Nothing locked on entrance, nothing locked on exit.
1526 *
1527 * TODO: Need tracepoints around this function...
1528 */
1529 void
1530 ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)
1531 {
1532 uint32_t task_live_donor;
1533 boolean_t before_donor;
1534 boolean_t after_donor;
1535 task_t target_task;
1536
1537 assert(task_imp != NULL);
1538
1539 /*
1540 * Nothing to do if the task is not marked as expecting
1541 * live donor updates.
1542 */
1543 if (!ipc_importance_task_is_marked_live_donor(task_imp)) {
1544 return;
1545 }
1546
1547 ipc_importance_lock();
1548
1549 /* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */
1550 target_task = task_imp->iit_task;
1551 if (TASK_NULL == target_task) {
1552 ipc_importance_unlock();
1553 return;
1554 }
1555 before_donor = ipc_importance_task_is_marked_donor(task_imp);
1556
1557 /* snapshot task live donor status - may change, but another call will accompany the change */
1558 task_live_donor = target_task->effective_policy.tep_live_donor;
1559
1560 #if IMPORTANCE_TRACE
1561 int target_pid = task_pid(target_task);
1562
1563 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1564 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START,
1565 target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0);
1566 #endif
1567
1568 /* update the task importance live donor status based on the task's value */
1569 task_imp->iit_donor = task_live_donor;
1570
1571 after_donor = ipc_importance_task_is_marked_donor(task_imp);
1572
1573 /* Has the effectiveness of being a donor changed as a result of this update? */
1574 if (before_donor != after_donor) {
1575 iit_update_type_t type;
1576
1577 /* propagate assertions without updating the current task policy (already handled) */
1578 if (0 == before_donor) {
1579 task_imp->iit_transitions++;
1580 type = IIT_UPDATE_HOLD;
1581 } else {
1582 type = IIT_UPDATE_DROP;
1583 }
1584 ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE);
1585 }
1586
1587 #if IMPORTANCE_TRACE
1588 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1589 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END,
1590 target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0);
1591 #endif
1592
1593 ipc_importance_unlock();
1594 }
1595
1596
1597 /*
1598 * Routine: ipc_importance_task_mark_donor
1599 * Purpose:
1600 * Set the task importance donor flag.
1601 * Conditions:
1602 * Nothing locked on entrance, nothing locked on exit.
1603 *
1604 * This is only called while the task is being constructed,
1605 * so no need to update task policy or propagate downstream.
1606 */
1607 void
1608 ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donating)
1609 {
1610 assert(task_imp != NULL);
1611
1612 ipc_importance_lock();
1613
1614 int old_donor = task_imp->iit_donor;
1615
1616 task_imp->iit_donor = (donating ? 1 : 0);
1617
1618 if (task_imp->iit_donor > 0 && old_donor == 0) {
1619 task_imp->iit_transitions++;
1620 }
1621
1622 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1623 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE,
1624 task_pid(task_imp->iit_task), donating,
1625 old_donor, task_imp->iit_donor, 0);
1626
1627 ipc_importance_unlock();
1628 }
1629
1630 /*
1631 * Routine: ipc_importance_task_marked_donor
1632 * Purpose:
1633 * Query the donor flag for the given task importance.
1634 * Conditions:
1635 * May be called without taking the importance lock.
1636 * In that case, donor status can change so you must
1637 * check only once for each donation event.
1638 */
1639 boolean_t
1640 ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)
1641 {
1642 if (IIT_NULL == task_imp) {
1643 return FALSE;
1644 }
1645 return 0 != task_imp->iit_donor;
1646 }
1647
1648 /*
1649 * Routine: ipc_importance_task_mark_live_donor
1650 * Purpose:
1651 * Indicate that the task is eligible for live donor updates.
1652 * Conditions:
1653 * Nothing locked on entrance, nothing locked on exit.
1654 *
1655 * This is only called while the task is being constructed.
1656 */
1657 void
1658 ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp, boolean_t live_donating)
1659 {
1660 assert(task_imp != NULL);
1661
1662 ipc_importance_lock();
1663 task_imp->iit_live_donor = (live_donating ? 1 : 0);
1664 ipc_importance_unlock();
1665 }
1666
1667 /*
1668 * Routine: ipc_importance_task_is_marked_live_donor
1669 * Purpose:
1670 * Query the live donor and donor flags for the given task importance.
1671 * Conditions:
1672 * May be called without taking the importance lock.
1673 * In that case, donor status can change so you must
1674 * check only once for each donation event.
1675 */
1676 boolean_t
1677 ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)
1678 {
1679 if (IIT_NULL == task_imp) {
1680 return FALSE;
1681 }
1682 return 0 != task_imp->iit_live_donor;
1683 }
1684
1685 /*
1686 * Routine: ipc_importance_task_is_donor
1687 * Purpose:
1688 * Query the full donor status for the given task importance.
1689 * Conditions:
1690 * May be called without taking the importance lock.
1691 * In that case, donor status can change so you must
1692 * check only once for each donation event.
1693 */
1694 boolean_t
1695 ipc_importance_task_is_donor(ipc_importance_task_t task_imp)
1696 {
1697 if (IIT_NULL == task_imp) {
1698 return FALSE;
1699 }
1700 return ipc_importance_task_is_marked_donor(task_imp) ||
1701 (ipc_importance_task_is_marked_receiver(task_imp) &&
1702 task_imp->iit_assertcnt > 0);
1703 }
1704
1705 /*
1706 * Routine: ipc_importance_task_is_never_donor
1707 * Purpose:
1708 * Query if a given task can ever donate importance.
1709 * Conditions:
1710 * May be called without taking the importance lock.
1711 * Condition is permanent for a give task.
1712 */
1713 boolean_t
1714 ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)
1715 {
1716 if (IIT_NULL == task_imp) {
1717 return FALSE;
1718 }
1719 return !ipc_importance_task_is_marked_donor(task_imp) &&
1720 !ipc_importance_task_is_marked_live_donor(task_imp) &&
1721 !ipc_importance_task_is_marked_receiver(task_imp);
1722 }
1723
1724 /*
1725 * Routine: ipc_importance_task_mark_receiver
1726 * Purpose:
1727 * Update the task importance receiver flag.
1728 * Conditions:
1729 * Nothing locked on entrance, nothing locked on exit.
1730 * This can only be invoked before the task is discoverable,
1731 * so no worries about atomicity(?)
1732 */
1733 void
1734 ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving)
1735 {
1736 assert(task_imp != NULL);
1737
1738 ipc_importance_lock();
1739 if (receiving) {
1740 assert(task_imp->iit_assertcnt == 0);
1741 assert(task_imp->iit_externcnt == 0);
1742 assert(task_imp->iit_externdrop == 0);
1743 assert(task_imp->iit_denap == 0);
1744 task_imp->iit_receiver = 1; /* task can receive importance boost */
1745 } else if (task_imp->iit_receiver) {
1746 assert(task_imp->iit_denap == 0);
1747 if (task_imp->iit_assertcnt != 0 || IIT_EXTERN(task_imp) != 0) {
1748 panic("disabling imp_receiver on task with pending importance boosts!");
1749 }
1750 task_imp->iit_receiver = 0;
1751 }
1752 ipc_importance_unlock();
1753 }
1754
1755
1756 /*
1757 * Routine: ipc_importance_task_marked_receiver
1758 * Purpose:
1759 * Query the receiver flag for the given task importance.
1760 * Conditions:
1761 * May be called without taking the importance lock as
1762 * the importance flag can never change after task init.
1763 */
1764 boolean_t
1765 ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)
1766 {
1767 return IIT_NULL != task_imp && 0 != task_imp->iit_receiver;
1768 }
1769
1770
1771 /*
1772 * Routine: ipc_importance_task_mark_denap_receiver
1773 * Purpose:
1774 * Update the task importance de-nap receiver flag.
1775 * Conditions:
1776 * Nothing locked on entrance, nothing locked on exit.
1777 * This can only be invoked before the task is discoverable,
1778 * so no worries about atomicity(?)
1779 */
1780 void
1781 ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap)
1782 {
1783 assert(task_imp != NULL);
1784
1785 ipc_importance_lock();
1786 if (denap) {
1787 assert(task_imp->iit_assertcnt == 0);
1788 assert(task_imp->iit_externcnt == 0);
1789 assert(task_imp->iit_receiver == 0);
1790 task_imp->iit_denap = 1; /* task can receive de-nap boost */
1791 } else if (task_imp->iit_denap) {
1792 assert(task_imp->iit_receiver == 0);
1793 if (0 < task_imp->iit_assertcnt || 0 < IIT_EXTERN(task_imp)) {
1794 panic("disabling de-nap on task with pending de-nap boosts!");
1795 }
1796 task_imp->iit_denap = 0;
1797 }
1798 ipc_importance_unlock();
1799 }
1800
1801
1802 /*
1803 * Routine: ipc_importance_task_marked_denap_receiver
1804 * Purpose:
1805 * Query the de-nap receiver flag for the given task importance.
1806 * Conditions:
1807 * May be called without taking the importance lock as
1808 * the de-nap flag can never change after task init.
1809 */
1810 boolean_t
1811 ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)
1812 {
1813 return IIT_NULL != task_imp && 0 != task_imp->iit_denap;
1814 }
1815
1816 /*
1817 * Routine: ipc_importance_task_is_denap_receiver
1818 * Purpose:
1819 * Query the full de-nap receiver status for the given task importance.
1820 * For now, that is simply whether the receiver flag is set.
1821 * Conditions:
1822 * May be called without taking the importance lock as
1823 * the de-nap receiver flag can never change after task init.
1824 */
1825 boolean_t
1826 ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)
1827 {
1828 return ipc_importance_task_is_marked_denap_receiver(task_imp);
1829 }
1830
1831 /*
1832 * Routine: ipc_importance_task_is_any_receiver_type
1833 * Purpose:
1834 * Query if the task is marked to receive boosts - either
1835 * importance or denap.
1836 * Conditions:
1837 * May be called without taking the importance lock as both
1838 * the importance and de-nap receiver flags can never change
1839 * after task init.
1840 */
1841 boolean_t
1842 ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)
1843 {
1844 return ipc_importance_task_is_marked_receiver(task_imp) ||
1845 ipc_importance_task_is_marked_denap_receiver(task_imp);
1846 }
1847
1848 #if 0 /* currently unused */
1849
1850 /*
1851 * Routine: ipc_importance_inherit_reference
1852 * Purpose:
1853 * Add a reference to the inherit importance element.
1854 * Conditions:
1855 * Caller most hold a reference on the inherit element.
1856 */
1857 static inline void
1858 ipc_importance_inherit_reference(ipc_importance_inherit_t inherit)
1859 {
1860 ipc_importance_reference(&inherit->iii_elem);
1861 }
1862 #endif /* currently unused */
1863
1864 /*
1865 * Routine: ipc_importance_inherit_release_locked
1866 * Purpose:
1867 * Release a reference on an inherit importance attribute value,
1868 * unlinking and deallocating the attribute if the last reference.
1869 * Conditions:
1870 * Entered with importance lock held, leaves with it unlocked.
1871 */
1872 static inline void
1873 ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)
1874 {
1875 ipc_importance_release_locked(&inherit->iii_elem);
1876 }
1877
1878 #if 0 /* currently unused */
1879 /*
1880 * Routine: ipc_importance_inherit_release
1881 * Purpose:
1882 * Release a reference on an inherit importance attribute value,
1883 * unlinking and deallocating the attribute if the last reference.
1884 * Conditions:
1885 * nothing locked on entrance, nothing locked on exit.
1886 * May block.
1887 */
1888 void
1889 ipc_importance_inherit_release(ipc_importance_inherit_t inherit)
1890 {
1891 if (III_NULL != inherit) {
1892 ipc_importance_release(&inherit->iii_elem);
1893 }
1894 }
1895 #endif /* 0 currently unused */
1896
1897 /*
1898 * Routine: ipc_importance_for_task
1899 * Purpose:
1900 * Create a reference for the specified task's base importance
1901 * element. If the base importance element doesn't exist, make it and
1902 * bind it to the active task. If the task is inactive, there isn't
1903 * any need to return a new reference.
1904 * Conditions:
1905 * If made is true, a "made" reference is returned (for donating to
1906 * the voucher system). Otherwise an internal reference is returned.
1907 *
1908 * Nothing locked on entry. May block.
1909 */
1910 ipc_importance_task_t
1911 ipc_importance_for_task(task_t task, boolean_t made)
1912 {
1913 ipc_importance_task_t task_elem;
1914 boolean_t first_pass = TRUE;
1915
1916 assert(TASK_NULL != task);
1917
1918 retry:
1919 /* No use returning anything for inactive task */
1920 if (!task->active) {
1921 return IIT_NULL;
1922 }
1923
1924 ipc_importance_lock();
1925 task_elem = task->task_imp_base;
1926 if (IIT_NULL != task_elem) {
1927 /* Add a made reference (borrowing active task ref to do it) */
1928 if (made) {
1929 if (0 == task_elem->iit_made++) {
1930 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1931 ipc_importance_task_reference_internal(task_elem);
1932 }
1933 } else {
1934 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1935 ipc_importance_task_reference_internal(task_elem);
1936 }
1937 ipc_importance_unlock();
1938 return task_elem;
1939 }
1940 ipc_importance_unlock();
1941
1942 if (!first_pass) {
1943 return IIT_NULL;
1944 }
1945 first_pass = FALSE;
1946
1947 /* Need to make one - may race with others (be prepared to drop) */
1948 task_elem = (ipc_importance_task_t)zalloc(ipc_importance_task_zone);
1949 if (IIT_NULL == task_elem) {
1950 goto retry;
1951 }
1952
1953 task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */
1954 task_elem->iit_made = (made) ? 1 : 0;
1955 task_elem->iit_task = task; /* take actual ref when we're sure */
1956 task_elem->iit_updateq = NULL;
1957 task_elem->iit_receiver = 0;
1958 task_elem->iit_denap = 0;
1959 task_elem->iit_donor = 0;
1960 task_elem->iit_live_donor = 0;
1961 task_elem->iit_updatepolicy = 0;
1962 task_elem->iit_reserved = 0;
1963 task_elem->iit_filelocks = 0;
1964 task_elem->iit_updatetime = 0;
1965 task_elem->iit_transitions = 0;
1966 task_elem->iit_assertcnt = 0;
1967 task_elem->iit_externcnt = 0;
1968 task_elem->iit_externdrop = 0;
1969 task_elem->iit_legacy_externcnt = 0;
1970 task_elem->iit_legacy_externdrop = 0;
1971 #if IIE_REF_DEBUG
1972 ipc_importance_counter_init(&task_elem->iit_elem);
1973 #endif
1974 queue_init(&task_elem->iit_kmsgs);
1975 queue_init(&task_elem->iit_inherits);
1976
1977 ipc_importance_lock();
1978 if (!task->active) {
1979 ipc_importance_unlock();
1980 zfree(ipc_importance_task_zone, task_elem);
1981 return IIT_NULL;
1982 }
1983
1984 /* did we lose the race? */
1985 if (IIT_NULL != task->task_imp_base) {
1986 ipc_importance_unlock();
1987 zfree(ipc_importance_task_zone, task_elem);
1988 goto retry;
1989 }
1990
1991 /* we won the race */
1992 task->task_imp_base = task_elem;
1993 task_reference(task);
1994 #if DEVELOPMENT || DEBUG
1995 queue_enter(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
1996 task_importance_update_owner_info(task);
1997 #endif
1998 ipc_importance_unlock();
1999
2000 return task_elem;
2001 }
2002
2003 #if DEVELOPMENT || DEBUG
2004 void
2005 task_importance_update_owner_info(task_t task)
2006 {
2007 if (task != TASK_NULL && task->task_imp_base != IIT_NULL) {
2008 ipc_importance_task_t task_elem = task->task_imp_base;
2009
2010 task_elem->iit_bsd_pid = task_pid(task);
2011 if (task->bsd_info) {
2012 strncpy(&task_elem->iit_procname[0], proc_name_address(task->bsd_info), 16);
2013 task_elem->iit_procname[16] = '\0';
2014 } else {
2015 strncpy(&task_elem->iit_procname[0], "unknown", 16);
2016 }
2017 }
2018 }
2019 #endif
2020
2021 /*
2022 * Routine: ipc_importance_reset_locked
2023 * Purpose:
2024 * Reset a task's IPC importance (the task is going away or exec'ing)
2025 *
2026 * Remove the donor bit and legacy externalized assertions from the
2027 * current task importance and see if that wipes out downstream donations.
2028 * Conditions:
2029 * importance lock held.
2030 */
2031
2032 static void
2033 ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor)
2034 {
2035 boolean_t before_donor, after_donor;
2036
2037 /* remove the donor bit, live-donor bit and externalized boosts */
2038 before_donor = ipc_importance_task_is_donor(task_imp);
2039 if (donor) {
2040 task_imp->iit_donor = 0;
2041 }
2042 assert(IIT_LEGACY_EXTERN(task_imp) <= IIT_EXTERN(task_imp));
2043 assert(task_imp->iit_legacy_externcnt <= task_imp->iit_externcnt);
2044 assert(task_imp->iit_legacy_externdrop <= task_imp->iit_externdrop);
2045 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
2046 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
2047
2048 /* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */
2049 if (IIT_EXTERN(task_imp) < task_imp->iit_assertcnt) {
2050 task_imp->iit_assertcnt -= IIT_LEGACY_EXTERN(task_imp);
2051 } else {
2052 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
2053 }
2054 task_imp->iit_legacy_externcnt = 0;
2055 task_imp->iit_legacy_externdrop = 0;
2056 after_donor = ipc_importance_task_is_donor(task_imp);
2057
2058 #if DEVELOPMENT || DEBUG
2059 if (task_imp->iit_assertcnt > 0 && task_imp->iit_live_donor) {
2060 printf("Live donor task %s[%d] still has %d importance assertions after reset\n",
2061 task_imp->iit_procname, task_imp->iit_bsd_pid, task_imp->iit_assertcnt);
2062 }
2063 #endif
2064
2065 /* propagate a downstream drop if there was a change in donor status */
2066 if (after_donor != before_donor) {
2067 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, FALSE);
2068 }
2069 }
2070
2071 /*
2072 * Routine: ipc_importance_reset
2073 * Purpose:
2074 * Reset a task's IPC importance
2075 *
2076 * The task is being reset, although staying around. Arrange to have the
2077 * external state of the task reset from the importance.
2078 * Conditions:
2079 * importance lock not held.
2080 */
2081
2082 void
2083 ipc_importance_reset(ipc_importance_task_t task_imp, boolean_t donor)
2084 {
2085 if (IIT_NULL == task_imp) {
2086 return;
2087 }
2088 ipc_importance_lock();
2089 ipc_importance_reset_locked(task_imp, donor);
2090 ipc_importance_unlock();
2091 }
2092
2093 /*
2094 * Routine: ipc_importance_disconnect_task
2095 * Purpose:
2096 * Disconnect a task from its importance.
2097 *
2098 * Clear the task pointer from the importance and drop the
2099 * reference the task held on the importance object. Before
2100 * doing that, reset the effects the current task holds on
2101 * the importance and see if that wipes out downstream donations.
2102 *
2103 * We allow the upstream boosts to continue to affect downstream
2104 * even though the local task is being effectively pulled from
2105 * the chain.
2106 * Conditions:
2107 * Nothing locked.
2108 */
2109 void
2110 ipc_importance_disconnect_task(task_t task)
2111 {
2112 ipc_importance_task_t task_imp;
2113
2114 task_lock(task);
2115 ipc_importance_lock();
2116 task_imp = task->task_imp_base;
2117
2118 /* did somebody beat us to it? */
2119 if (IIT_NULL == task_imp) {
2120 ipc_importance_unlock();
2121 task_unlock(task);
2122 return;
2123 }
2124
2125 /* disconnect the task from this importance */
2126 assert(task_imp->iit_task == task);
2127 task_imp->iit_task = TASK_NULL;
2128 task->task_imp_base = IIT_NULL;
2129 task_unlock(task);
2130
2131 /* reset the effects the current task hold on the importance */
2132 ipc_importance_reset_locked(task_imp, TRUE);
2133
2134 ipc_importance_task_release_locked(task_imp);
2135 /* importance unlocked */
2136
2137 /* deallocate the task now that the importance is unlocked */
2138 task_deallocate(task);
2139 }
2140
2141 /*
2142 * Routine: ipc_importance_exec_switch_task
2143 * Purpose:
2144 * Switch importance task base from old task to new task in exec.
2145 *
2146 * Create an ipc importance linkage from old task to new task,
2147 * once the linkage is created, switch the importance task base
2148 * from old task to new task. After the switch, the linkage will
2149 * represent importance linkage from new task to old task with
2150 * watch port importance inheritance linked to new task.
2151 * Conditions:
2152 * Nothing locked.
2153 * Returns a reference on importance inherit.
2154 */
2155 ipc_importance_inherit_t
2156 ipc_importance_exec_switch_task(
2157 task_t old_task,
2158 task_t new_task)
2159 {
2160 ipc_importance_inherit_t inherit = III_NULL;
2161 ipc_importance_task_t old_task_imp = IIT_NULL;
2162 ipc_importance_task_t new_task_imp = IIT_NULL;
2163
2164 task_importance_reset(old_task);
2165
2166 /* Create an importance linkage from old_task to new_task */
2167 inherit = ipc_importance_inherit_from_task(old_task, new_task);
2168
2169 /* Switch task importance base from old task to new task */
2170 ipc_importance_lock();
2171
2172 old_task_imp = old_task->task_imp_base;
2173 new_task_imp = new_task->task_imp_base;
2174
2175 old_task_imp->iit_task = new_task;
2176 new_task_imp->iit_task = old_task;
2177
2178 old_task->task_imp_base = new_task_imp;
2179 new_task->task_imp_base = old_task_imp;
2180
2181 #if DEVELOPMENT || DEBUG
2182 /*
2183 * Update the pid an proc name for importance base if any
2184 */
2185 task_importance_update_owner_info(new_task);
2186 #endif
2187 ipc_importance_unlock();
2188
2189 return inherit;
2190 }
2191
2192 /*
2193 * Routine: ipc_importance_check_circularity
2194 * Purpose:
2195 * Check if queueing "port" in a message for "dest"
2196 * would create a circular group of ports and messages.
2197 *
2198 * If no circularity (FALSE returned), then "port"
2199 * is changed from "in limbo" to "in transit".
2200 *
2201 * That is, we want to set port->ip_destination == dest,
2202 * but guaranteeing that this doesn't create a circle
2203 * port->ip_destination->ip_destination->... == port
2204 *
2205 * Additionally, if port was successfully changed to "in transit",
2206 * propagate boost assertions from the "in limbo" port to all
2207 * the ports in the chain, and, if the destination task accepts
2208 * boosts, to the destination task.
2209 *
2210 * Conditions:
2211 * No ports locked. References held for "port" and "dest".
2212 */
2213
2214 boolean_t
2215 ipc_importance_check_circularity(
2216 ipc_port_t port,
2217 ipc_port_t dest)
2218 {
2219 ipc_importance_task_t imp_task = IIT_NULL;
2220 ipc_importance_task_t release_imp_task = IIT_NULL;
2221 boolean_t imp_lock_held = FALSE;
2222 int assertcnt = 0;
2223 ipc_port_t base;
2224 struct turnstile *send_turnstile = TURNSTILE_NULL;
2225 struct task_watchport_elem *watchport_elem = NULL;
2226
2227 assert(port != IP_NULL);
2228 assert(dest != IP_NULL);
2229
2230 if (port == dest) {
2231 return TRUE;
2232 }
2233 base = dest;
2234
2235 /* Check if destination needs a turnstile */
2236 ipc_port_send_turnstile_prepare(dest);
2237
2238 /* port is in limbo, so donation status is safe to latch */
2239 if (port->ip_impdonation != 0) {
2240 imp_lock_held = TRUE;
2241 ipc_importance_lock();
2242 }
2243
2244 /*
2245 * First try a quick check that can run in parallel.
2246 * No circularity if dest is not in transit.
2247 */
2248 ip_lock(port);
2249
2250 /*
2251 * Even if port is just carrying assertions for others,
2252 * we need the importance lock.
2253 */
2254 if (port->ip_impcount > 0 && !imp_lock_held) {
2255 if (!ipc_importance_lock_try()) {
2256 ip_unlock(port);
2257 ipc_importance_lock();
2258 ip_lock(port);
2259 }
2260 imp_lock_held = TRUE;
2261 }
2262
2263 if (ip_lock_try(dest)) {
2264 if (!ip_active(dest) ||
2265 (dest->ip_receiver_name != MACH_PORT_NULL) ||
2266 (dest->ip_destination == IP_NULL)) {
2267 goto not_circular;
2268 }
2269
2270 /* dest is in transit; further checking necessary */
2271
2272 ip_unlock(dest);
2273 }
2274 ip_unlock(port);
2275
2276 /*
2277 * We're about to pay the cost to serialize,
2278 * just go ahead and grab importance lock.
2279 */
2280 if (!imp_lock_held) {
2281 ipc_importance_lock();
2282 imp_lock_held = TRUE;
2283 }
2284
2285 ipc_port_multiple_lock(); /* massive serialization */
2286
2287 /*
2288 * Search for the end of the chain (a port not in transit),
2289 * acquiring locks along the way.
2290 */
2291
2292 for (;;) {
2293 ip_lock(base);
2294
2295 if (!ip_active(base) ||
2296 (base->ip_receiver_name != MACH_PORT_NULL) ||
2297 (base->ip_destination == IP_NULL)) {
2298 break;
2299 }
2300
2301 base = base->ip_destination;
2302 }
2303
2304 /* all ports in chain from dest to base, inclusive, are locked */
2305
2306 if (port == base) {
2307 /* circularity detected! */
2308
2309 ipc_port_multiple_unlock();
2310
2311 /* port (== base) is in limbo */
2312
2313 require_ip_active(port);
2314 assert(port->ip_receiver_name == MACH_PORT_NULL);
2315 assert(port->ip_destination == IP_NULL);
2316
2317 base = dest;
2318 while (base != IP_NULL) {
2319 ipc_port_t next;
2320
2321 /* base is in transit or in limbo */
2322
2323 require_ip_active(base);
2324 assert(base->ip_receiver_name == MACH_PORT_NULL);
2325
2326 next = base->ip_destination;
2327 ip_unlock(base);
2328 base = next;
2329 }
2330
2331 if (imp_lock_held) {
2332 ipc_importance_unlock();
2333 }
2334
2335 ipc_port_send_turnstile_complete(dest);
2336 return TRUE;
2337 }
2338
2339 /*
2340 * The guarantee: lock port while the entire chain is locked.
2341 * Once port is locked, we can take a reference to dest,
2342 * add port to the chain, and unlock everything.
2343 */
2344
2345 ip_lock(port);
2346 ipc_port_multiple_unlock();
2347
2348 not_circular:
2349 /* port is in limbo */
2350 imq_lock(&port->ip_messages);
2351
2352 require_ip_active(port);
2353 assert(port->ip_receiver_name == MACH_PORT_NULL);
2354 assert(port->ip_destination == IP_NULL);
2355
2356 /* Port is being enqueued in a kmsg, remove the watchport boost in order to push on destination port */
2357 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
2358
2359 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
2360 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
2361 port->ip_sync_bootstrap_checkin = 1;
2362 }
2363
2364 ip_reference(dest);
2365 port->ip_destination = dest;
2366
2367 /* must have been in limbo or still bound to a task */
2368 assert(port->ip_tempowner != 0);
2369
2370 /*
2371 * We delayed dropping assertions from a specific task.
2372 * Cache that info now (we'll drop assertions and the
2373 * task reference below).
2374 */
2375 release_imp_task = port->ip_imp_task;
2376 if (IIT_NULL != release_imp_task) {
2377 port->ip_imp_task = IIT_NULL;
2378 }
2379 assertcnt = port->ip_impcount;
2380
2381 /* take the port out of limbo w.r.t. assertions */
2382 port->ip_tempowner = 0;
2383
2384 /*
2385 * Setup linkage for source port if it has a send turnstile i.e. it has
2386 * a thread waiting in send or has a port enqueued in it or has sync ipc
2387 * push from a special reply port.
2388 */
2389 if (port_send_turnstile(port)) {
2390 send_turnstile = turnstile_prepare((uintptr_t)port,
2391 port_send_turnstile_address(port),
2392 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
2393
2394 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
2395 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
2396
2397 /* update complete and turnstile complete called after dropping all locks */
2398 }
2399 imq_unlock(&port->ip_messages);
2400
2401 /* now unlock chain */
2402
2403 ip_unlock(port);
2404
2405 for (;;) {
2406 ipc_port_t next;
2407 /* every port along chain track assertions behind it */
2408 ipc_port_impcount_delta(dest, assertcnt, base);
2409
2410 if (dest == base) {
2411 break;
2412 }
2413
2414 /* port is in transit */
2415
2416 require_ip_active(dest);
2417 assert(dest->ip_receiver_name == MACH_PORT_NULL);
2418 assert(dest->ip_destination != IP_NULL);
2419 assert(dest->ip_tempowner == 0);
2420
2421 next = dest->ip_destination;
2422 ip_unlock(dest);
2423 dest = next;
2424 }
2425
2426 /* base is not in transit */
2427 assert(!ip_active(base) ||
2428 (base->ip_receiver_name != MACH_PORT_NULL) ||
2429 (base->ip_destination == IP_NULL));
2430
2431 /*
2432 * Find the task to boost (if any).
2433 * We will boost "through" ports that don't know
2434 * about inheritance to deliver receive rights that
2435 * do.
2436 */
2437 if (ip_active(base) && (assertcnt > 0)) {
2438 assert(imp_lock_held);
2439 if (base->ip_tempowner != 0) {
2440 if (IIT_NULL != base->ip_imp_task) {
2441 /* specified tempowner task */
2442 imp_task = base->ip_imp_task;
2443 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2444 }
2445 /* otherwise don't boost current task */
2446 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
2447 ipc_space_t space = base->ip_receiver;
2448
2449 /* only spaces with boost-accepting tasks */
2450 if (space->is_task != TASK_NULL &&
2451 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2452 imp_task = space->is_task->task_imp_base;
2453 }
2454 }
2455
2456 /* take reference before unlocking base */
2457 if (imp_task != IIT_NULL) {
2458 ipc_importance_task_reference(imp_task);
2459 }
2460 }
2461
2462 ip_unlock(base);
2463
2464 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
2465 if (send_turnstile) {
2466 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
2467
2468 /* Take the mq lock to call turnstile complete */
2469 imq_lock(&port->ip_messages);
2470 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
2471 send_turnstile = TURNSTILE_NULL;
2472 imq_unlock(&port->ip_messages);
2473 turnstile_cleanup();
2474 }
2475
2476 /*
2477 * Transfer assertions now that the ports are unlocked.
2478 * Avoid extra overhead if transferring to/from the same task.
2479 *
2480 * NOTE: If a transfer is occurring, the new assertions will
2481 * be added to imp_task BEFORE the importance lock is unlocked.
2482 * This is critical - to avoid decrements coming from the kmsgs
2483 * beating the increment to the task.
2484 */
2485 boolean_t transfer_assertions = (imp_task != release_imp_task);
2486
2487 if (imp_task != IIT_NULL) {
2488 assert(imp_lock_held);
2489 if (transfer_assertions) {
2490 ipc_importance_task_hold_internal_assertion_locked(imp_task, assertcnt);
2491 }
2492 }
2493
2494 if (release_imp_task != IIT_NULL) {
2495 assert(imp_lock_held);
2496 if (transfer_assertions) {
2497 ipc_importance_task_drop_internal_assertion_locked(release_imp_task, assertcnt);
2498 }
2499 }
2500
2501 if (imp_lock_held) {
2502 ipc_importance_unlock();
2503 }
2504
2505 if (imp_task != IIT_NULL) {
2506 ipc_importance_task_release(imp_task);
2507 }
2508
2509 if (release_imp_task != IIT_NULL) {
2510 ipc_importance_task_release(release_imp_task);
2511 }
2512
2513 if (watchport_elem) {
2514 task_watchport_elem_deallocate(watchport_elem);
2515 }
2516
2517 return FALSE;
2518 }
2519
2520 /*
2521 * Routine: ipc_importance_send
2522 * Purpose:
2523 * Post the importance voucher attribute [if sent] or a static
2524 * importance boost depending upon options and conditions.
2525 * Conditions:
2526 * Destination port locked on entry and exit, may be dropped during the call.
2527 * Returns:
2528 * A boolean identifying if the port lock was tempoarily dropped.
2529 */
2530 boolean_t
2531 ipc_importance_send(
2532 ipc_kmsg_t kmsg,
2533 mach_msg_option_t option)
2534 {
2535 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
2536 boolean_t port_lock_dropped = FALSE;
2537 ipc_importance_elem_t elem;
2538 task_t task;
2539 ipc_importance_task_t task_imp;
2540 kern_return_t kr;
2541
2542 assert(IP_VALID(port));
2543
2544 /* If no donation to be made, return quickly */
2545 if ((port->ip_impdonation == 0) ||
2546 (option & MACH_SEND_NOIMPORTANCE) != 0) {
2547 return port_lock_dropped;
2548 }
2549
2550 task = current_task();
2551
2552 /* If forced sending a static boost, go update the port */
2553 if ((option & MACH_SEND_IMPORTANCE) != 0) {
2554 /* acquire the importance lock while trying to hang on to port lock */
2555 if (!ipc_importance_lock_try()) {
2556 port_lock_dropped = TRUE;
2557 ip_unlock(port);
2558 ipc_importance_lock();
2559 }
2560 goto portupdate;
2561 }
2562
2563 task_imp = task->task_imp_base;
2564 assert(IIT_NULL != task_imp);
2565
2566 /* If the sender can never donate importance, nothing to do */
2567 if (ipc_importance_task_is_never_donor(task_imp)) {
2568 return port_lock_dropped;
2569 }
2570
2571 elem = IIE_NULL;
2572
2573 /* If importance receiver and passing a voucher, look for importance in there */
2574 if (IP_VALID(kmsg->ikm_voucher) &&
2575 ipc_importance_task_is_marked_receiver(task_imp)) {
2576 mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED];
2577 mach_voucher_attr_value_handle_array_size_t val_count;
2578 ipc_voucher_t voucher;
2579
2580 assert(ip_kotype(kmsg->ikm_voucher) == IKOT_VOUCHER);
2581 voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject;
2582
2583 /* check to see if the voucher has an importance attribute */
2584 val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED;
2585 kr = mach_voucher_attr_control_get_values(ipc_importance_control, voucher,
2586 vals, &val_count);
2587 assert(KERN_SUCCESS == kr);
2588
2589 /*
2590 * Only use importance associated with our task (either directly
2591 * or through an inherit that donates to our task).
2592 */
2593 if (0 < val_count) {
2594 ipc_importance_elem_t check_elem;
2595
2596 check_elem = (ipc_importance_elem_t)vals[0];
2597 assert(IIE_NULL != check_elem);
2598 if (IIE_TYPE_INHERIT == IIE_TYPE(check_elem)) {
2599 ipc_importance_inherit_t inherit;
2600 inherit = (ipc_importance_inherit_t) check_elem;
2601 if (inherit->iii_to_task == task_imp) {
2602 elem = check_elem;
2603 }
2604 } else if (check_elem == (ipc_importance_elem_t)task_imp) {
2605 elem = check_elem;
2606 }
2607 }
2608 }
2609
2610 /* If we haven't found an importance attribute to send yet, use the task's */
2611 if (IIE_NULL == elem) {
2612 elem = (ipc_importance_elem_t)task_imp;
2613 }
2614
2615 /* take a reference for the message to hold */
2616 ipc_importance_reference_internal(elem);
2617
2618 /* acquire the importance lock while trying to hang on to port lock */
2619 if (!ipc_importance_lock_try()) {
2620 port_lock_dropped = TRUE;
2621 ip_unlock(port);
2622 ipc_importance_lock();
2623 }
2624
2625 /* link kmsg onto the donor element propagation chain */
2626 ipc_importance_kmsg_link(kmsg, elem);
2627 /* elem reference transfered to kmsg */
2628
2629 incr_ref_counter(elem->iie_kmsg_refs_added);
2630
2631 /* If the sender isn't currently a donor, no need to apply boost */
2632 if (!ipc_importance_task_is_donor(task_imp)) {
2633 ipc_importance_unlock();
2634
2635 /* re-acquire port lock, if needed */
2636 if (TRUE == port_lock_dropped) {
2637 ip_lock(port);
2638 }
2639
2640 return port_lock_dropped;
2641 }
2642
2643 portupdate:
2644 /* Mark the fact that we are (currently) donating through this message */
2645 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2646
2647 /*
2648 * If we need to relock the port, do it with the importance still locked.
2649 * This assures we get to add the importance boost through the port to
2650 * the task BEFORE anyone else can attempt to undo that operation if
2651 * the sender lost donor status.
2652 */
2653 if (TRUE == port_lock_dropped) {
2654 ip_lock(port);
2655 }
2656
2657 ipc_importance_assert_held();
2658
2659 #if IMPORTANCE_TRACE
2660 if (kdebug_enable) {
2661 mach_msg_max_trailer_t *dbgtrailer = (mach_msg_max_trailer_t *)
2662 ((vm_offset_t)kmsg->ikm_header + round_msg(kmsg->ikm_header->msgh_size));
2663 unsigned int sender_pid = dbgtrailer->msgh_audit.val[5];
2664 mach_msg_id_t imp_msgh_id = kmsg->ikm_header->msgh_id;
2665 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START,
2666 task_pid(task), sender_pid, imp_msgh_id, 0, 0);
2667 }
2668 #endif /* IMPORTANCE_TRACE */
2669
2670 mach_port_delta_t delta = 1;
2671 boolean_t need_port_lock;
2672 task_imp = IIT_NULL;
2673
2674 /* adjust port boost count (with importance and port locked) */
2675 need_port_lock = ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &task_imp);
2676 /* hold a reference on task_imp */
2677
2678 /* if we need to adjust a task importance as a result, apply that here */
2679 if (IIT_NULL != task_imp && delta != 0) {
2680 assert(delta == 1);
2681
2682 /* if this results in a change of state, propagate the transistion */
2683 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta)) {
2684 /* can't hold the port lock during task transition(s) */
2685 if (!need_port_lock) {
2686 need_port_lock = TRUE;
2687 ip_unlock(port);
2688 }
2689 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
2690 }
2691 }
2692
2693 if (task_imp) {
2694 ipc_importance_task_release_locked(task_imp);
2695 /* importance unlocked */
2696 } else {
2697 ipc_importance_unlock();
2698 }
2699
2700 if (need_port_lock) {
2701 port_lock_dropped = TRUE;
2702 ip_lock(port);
2703 }
2704
2705 return port_lock_dropped;
2706 }
2707
2708 /*
2709 * Routine: ipc_importance_inherit_from_kmsg
2710 * Purpose:
2711 * Create a "made" reference for an importance attribute representing
2712 * an inheritance between the sender of a message (if linked) and the
2713 * current task importance. If the message is not linked, a static
2714 * boost may be created, based on the boost state of the message.
2715 *
2716 * Any transfer from kmsg linkage to inherit linkage must be atomic.
2717 *
2718 * If the task is inactive, there isn't any need to return a new reference.
2719 * Conditions:
2720 * Nothing locked on entry. May block.
2721 */
2722 static ipc_importance_inherit_t
2723 ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg)
2724 {
2725 ipc_importance_task_t task_imp = IIT_NULL;
2726 ipc_importance_elem_t from_elem = kmsg->ikm_importance;
2727 ipc_importance_elem_t elem;
2728 task_t task_self = current_task();
2729
2730 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
2731 ipc_importance_inherit_t inherit = III_NULL;
2732 ipc_importance_inherit_t alloc = III_NULL;
2733 boolean_t cleared_self_donation = FALSE;
2734 boolean_t donating;
2735 uint32_t depth = 1;
2736
2737 /* The kmsg must have an importance donor or static boost to proceed */
2738 if (IIE_NULL == kmsg->ikm_importance &&
2739 !MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2740 return III_NULL;
2741 }
2742
2743 /*
2744 * No need to set up an inherit linkage if the dest isn't a receiver
2745 * of one type or the other.
2746 */
2747 if (!ipc_importance_task_is_any_receiver_type(task_self->task_imp_base)) {
2748 ipc_importance_lock();
2749 goto out_locked;
2750 }
2751
2752 /* Grab a reference on the importance of the destination */
2753 task_imp = ipc_importance_for_task(task_self, FALSE);
2754
2755 ipc_importance_lock();
2756
2757 if (IIT_NULL == task_imp) {
2758 goto out_locked;
2759 }
2760
2761 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_inherit_from);
2762
2763 /* If message is already associated with an inherit... */
2764 if (IIE_TYPE_INHERIT == IIE_TYPE(from_elem)) {
2765 ipc_importance_inherit_t from_inherit = (ipc_importance_inherit_t)from_elem;
2766
2767 /* already targeting our task? - just use it */
2768 if (from_inherit->iii_to_task == task_imp) {
2769 /* clear self-donation if not also present in inherit */
2770 if (!from_inherit->iii_donating &&
2771 MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2772 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2773 cleared_self_donation = TRUE;
2774 }
2775 inherit = from_inherit;
2776 } else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) {
2777 ipc_importance_task_t to_task;
2778 ipc_importance_elem_t unlinked_from;
2779
2780 /*
2781 * Chain too long. Switch to looking
2782 * directly at the from_inherit's to-task
2783 * as our source of importance.
2784 */
2785 to_task = from_inherit->iii_to_task;
2786 ipc_importance_task_reference(to_task);
2787 from_elem = (ipc_importance_elem_t)to_task;
2788 depth = III_DEPTH_RESET | 1;
2789
2790 /* Fixup the kmsg linkage to reflect change */
2791 unlinked_from = ipc_importance_kmsg_unlink(kmsg);
2792 assert(unlinked_from == (ipc_importance_elem_t)from_inherit);
2793 ipc_importance_kmsg_link(kmsg, from_elem);
2794 ipc_importance_inherit_release_locked(from_inherit);
2795 /* importance unlocked */
2796 ipc_importance_lock();
2797 } else {
2798 /* inheriting from an inherit */
2799 depth = from_inherit->iii_depth + 1;
2800 }
2801 }
2802
2803 /*
2804 * Don't allow a task to inherit from itself (would keep it permanently
2805 * boosted even if all other donors to the task went away).
2806 */
2807
2808 if (from_elem == (ipc_importance_elem_t)task_imp) {
2809 goto out_locked;
2810 }
2811
2812 /*
2813 * But if the message isn't associated with any linked source, it is
2814 * intended to be permanently boosting (static boost from kernel).
2815 * In that case DO let the process permanently boost itself.
2816 */
2817 if (IIE_NULL == from_elem) {
2818 assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits));
2819 ipc_importance_task_reference_internal(task_imp);
2820 from_elem = (ipc_importance_elem_t)task_imp;
2821 }
2822
2823 /*
2824 * Now that we have the from_elem figured out,
2825 * check to see if we already have an inherit for this pairing
2826 */
2827 while (III_NULL == inherit) {
2828 inherit = ipc_importance_inherit_find(from_elem, task_imp, depth);
2829
2830 /* Do we have to allocate a new inherit */
2831 if (III_NULL == inherit) {
2832 if (III_NULL != alloc) {
2833 break;
2834 }
2835
2836 /* allocate space */
2837 ipc_importance_unlock();
2838 alloc = (ipc_importance_inherit_t)
2839 zalloc(ipc_importance_inherit_zone);
2840 ipc_importance_lock();
2841 }
2842 }
2843
2844 /* snapshot the donating status while we have importance locked */
2845 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits);
2846
2847 if (III_NULL != inherit) {
2848 /* We found one, piggyback on that */
2849 assert(0 < III_REFS(inherit));
2850 assert(0 < IIE_REFS(inherit->iii_from_elem));
2851 assert(inherit->iii_externcnt >= inherit->iii_made);
2852
2853 /* add in a made reference */
2854 if (0 == inherit->iii_made++) {
2855 assert(III_REFS_MAX > III_REFS(inherit));
2856 ipc_importance_inherit_reference_internal(inherit);
2857 }
2858
2859 /* Reflect the inherit's change of status into the task boosts */
2860 if (0 == III_EXTERN(inherit)) {
2861 assert(!inherit->iii_donating);
2862 inherit->iii_donating = donating;
2863 if (donating) {
2864 task_imp->iit_externcnt += inherit->iii_externcnt;
2865 task_imp->iit_externdrop += inherit->iii_externdrop;
2866 }
2867 } else {
2868 assert(donating == inherit->iii_donating);
2869 }
2870
2871 /* add in a external reference for this use of the inherit */
2872 inherit->iii_externcnt++;
2873 } else {
2874 /* initialize the previously allocated space */
2875 inherit = alloc;
2876 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
2877 inherit->iii_made = 1;
2878 inherit->iii_externcnt = 1;
2879 inherit->iii_externdrop = 0;
2880 inherit->iii_depth = depth;
2881 inherit->iii_to_task = task_imp;
2882 inherit->iii_from_elem = IIE_NULL;
2883 queue_init(&inherit->iii_kmsgs);
2884
2885 if (donating) {
2886 inherit->iii_donating = TRUE;
2887 } else {
2888 inherit->iii_donating = FALSE;
2889 }
2890
2891 /*
2892 * Chain our new inherit on the element it inherits from.
2893 * The new inherit takes our reference on from_elem.
2894 */
2895 ipc_importance_inherit_link(inherit, from_elem);
2896
2897 #if IIE_REF_DEBUG
2898 ipc_importance_counter_init(&inherit->iii_elem);
2899 from_elem->iie_kmsg_refs_inherited++;
2900 task_imp->iit_elem.iie_task_refs_inherited++;
2901 #endif
2902 }
2903
2904 out_locked:
2905 /*
2906 * for those paths that came straight here: snapshot the donating status
2907 * (this should match previous snapshot for other paths).
2908 */
2909 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits);
2910
2911 /* unlink the kmsg inheritance (if any) */
2912 elem = ipc_importance_kmsg_unlink(kmsg);
2913 assert(elem == from_elem);
2914
2915 /* If found inherit and donating, reflect that in the task externcnt */
2916 if (III_NULL != inherit && donating) {
2917 task_imp->iit_externcnt++;
2918 /* The owner of receive right might have changed, take the internal assertion */
2919 ipc_importance_task_hold_internal_assertion_locked(task_imp, 1);
2920 /* may have dropped and retaken importance lock */
2921 }
2922
2923 /* If we didn't create a new inherit, we have some resources to release */
2924 if (III_NULL == inherit || inherit != alloc) {
2925 if (IIE_NULL != from_elem) {
2926 if (III_NULL != inherit) {
2927 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
2928 } else {
2929 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
2930 }
2931 ipc_importance_release_locked(from_elem);
2932 /* importance unlocked */
2933 } else {
2934 ipc_importance_unlock();
2935 }
2936
2937 if (IIT_NULL != task_imp) {
2938 if (III_NULL != inherit) {
2939 incr_ref_counter(task_imp->iit_elem.iie_task_refs_coalesced);
2940 }
2941 ipc_importance_task_release(task_imp);
2942 }
2943
2944 if (III_NULL != alloc) {
2945 zfree(ipc_importance_inherit_zone, alloc);
2946 }
2947 } else {
2948 /* from_elem and task_imp references transferred to new inherit */
2949 ipc_importance_unlock();
2950 }
2951
2952 /*
2953 * decrement port boost count
2954 * This is OK to do without the importance lock as we atomically
2955 * unlinked the kmsg and snapshot the donating state while holding
2956 * the importance lock
2957 */
2958 if (donating || cleared_self_donation) {
2959 ip_lock(port);
2960 /* drop importance from port and destination task */
2961 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
2962 ip_unlock(port);
2963 }
2964 }
2965
2966 if (III_NULL != inherit) {
2967 /* have an associated importance attr, even if currently not donating */
2968 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2969 } else {
2970 /* we won't have an importance attribute associated with our message */
2971 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2972 }
2973
2974 return inherit;
2975 }
2976
2977 /*
2978 * Routine: ipc_importance_inherit_from_task
2979 * Purpose:
2980 * Create a reference for an importance attribute representing
2981 * an inheritance between the to_task and from_task. The iii
2982 * created will be marked as III_FLAGS_FOR_OTHERS.
2983 *
2984 * It will not dedup any iii which are not marked as III_FLAGS_FOR_OTHERS.
2985 *
2986 * If the task is inactive, there isn't any need to return a new reference.
2987 * Conditions:
2988 * Nothing locked on entry. May block.
2989 * It should not be called from voucher subsystem.
2990 */
2991 static ipc_importance_inherit_t
2992 ipc_importance_inherit_from_task(
2993 task_t from_task,
2994 task_t to_task)
2995 {
2996 ipc_importance_task_t to_task_imp = IIT_NULL;
2997 ipc_importance_task_t from_task_imp = IIT_NULL;
2998 ipc_importance_elem_t from_elem = IIE_NULL;
2999
3000 ipc_importance_inherit_t inherit = III_NULL;
3001 ipc_importance_inherit_t alloc = III_NULL;
3002 boolean_t donating;
3003 uint32_t depth = 1;
3004
3005 to_task_imp = ipc_importance_for_task(to_task, FALSE);
3006 from_task_imp = ipc_importance_for_task(from_task, FALSE);
3007 from_elem = (ipc_importance_elem_t)from_task_imp;
3008
3009 ipc_importance_lock();
3010
3011 if (IIT_NULL == to_task_imp || IIT_NULL == from_task_imp) {
3012 goto out_locked;
3013 }
3014
3015 /*
3016 * No need to set up an inherit linkage if the to_task or from_task
3017 * isn't a receiver of one type or the other.
3018 */
3019 if (!ipc_importance_task_is_any_receiver_type(to_task_imp) ||
3020 !ipc_importance_task_is_any_receiver_type(from_task_imp)) {
3021 goto out_locked;
3022 }
3023
3024 /* Do not allow to create a linkage to self */
3025 if (to_task_imp == from_task_imp) {
3026 goto out_locked;
3027 }
3028
3029 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_added_inherit_from);
3030 incr_ref_counter(from_elem->iie_kmsg_refs_added);
3031
3032 /*
3033 * Now that we have the from_elem figured out,
3034 * check to see if we already have an inherit for this pairing
3035 */
3036 while (III_NULL == inherit) {
3037 inherit = ipc_importance_inherit_find(from_elem, to_task_imp, depth);
3038
3039 /* Do we have to allocate a new inherit */
3040 if (III_NULL == inherit) {
3041 if (III_NULL != alloc) {
3042 break;
3043 }
3044
3045 /* allocate space */
3046 ipc_importance_unlock();
3047 alloc = (ipc_importance_inherit_t)
3048 zalloc(ipc_importance_inherit_zone);
3049 ipc_importance_lock();
3050 }
3051 }
3052
3053 /* snapshot the donating status while we have importance locked */
3054 donating = ipc_importance_task_is_donor(from_task_imp);
3055
3056 if (III_NULL != inherit) {
3057 /* We found one, piggyback on that */
3058 assert(0 < III_REFS(inherit));
3059 assert(0 < IIE_REFS(inherit->iii_from_elem));
3060
3061 /* Take a reference for inherit */
3062 assert(III_REFS_MAX > III_REFS(inherit));
3063 ipc_importance_inherit_reference_internal(inherit);
3064
3065 /* Reflect the inherit's change of status into the task boosts */
3066 if (0 == III_EXTERN(inherit)) {
3067 assert(!inherit->iii_donating);
3068 inherit->iii_donating = donating;
3069 if (donating) {
3070 to_task_imp->iit_externcnt += inherit->iii_externcnt;
3071 to_task_imp->iit_externdrop += inherit->iii_externdrop;
3072 }
3073 } else {
3074 assert(donating == inherit->iii_donating);
3075 }
3076
3077 /* add in a external reference for this use of the inherit */
3078 inherit->iii_externcnt++;
3079 } else {
3080 /* initialize the previously allocated space */
3081 inherit = alloc;
3082 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
3083 inherit->iii_made = 0;
3084 inherit->iii_externcnt = 1;
3085 inherit->iii_externdrop = 0;
3086 inherit->iii_depth = depth;
3087 inherit->iii_to_task = to_task_imp;
3088 inherit->iii_from_elem = IIE_NULL;
3089 queue_init(&inherit->iii_kmsgs);
3090
3091 if (donating) {
3092 inherit->iii_donating = TRUE;
3093 } else {
3094 inherit->iii_donating = FALSE;
3095 }
3096
3097 /*
3098 * Chain our new inherit on the element it inherits from.
3099 * The new inherit takes our reference on from_elem.
3100 */
3101 ipc_importance_inherit_link(inherit, from_elem);
3102
3103 #if IIE_REF_DEBUG
3104 ipc_importance_counter_init(&inherit->iii_elem);
3105 from_elem->iie_kmsg_refs_inherited++;
3106 task_imp->iit_elem.iie_task_refs_inherited++;
3107 #endif
3108 }
3109
3110 out_locked:
3111
3112 /* If found inherit and donating, reflect that in the task externcnt */
3113 if (III_NULL != inherit && donating) {
3114 to_task_imp->iit_externcnt++;
3115 /* take the internal assertion */
3116 ipc_importance_task_hold_internal_assertion_locked(to_task_imp, 1);
3117 /* may have dropped and retaken importance lock */
3118 }
3119
3120 /* If we didn't create a new inherit, we have some resources to release */
3121 if (III_NULL == inherit || inherit != alloc) {
3122 if (IIE_NULL != from_elem) {
3123 if (III_NULL != inherit) {
3124 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
3125 } else {
3126 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
3127 }
3128 ipc_importance_release_locked(from_elem);
3129 /* importance unlocked */
3130 } else {
3131 ipc_importance_unlock();
3132 }
3133
3134 if (IIT_NULL != to_task_imp) {
3135 if (III_NULL != inherit) {
3136 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_coalesced);
3137 }
3138 ipc_importance_task_release(to_task_imp);
3139 }
3140
3141 if (III_NULL != alloc) {
3142 zfree(ipc_importance_inherit_zone, alloc);
3143 }
3144 } else {
3145 /* from_elem and to_task_imp references transferred to new inherit */
3146 ipc_importance_unlock();
3147 }
3148
3149 return inherit;
3150 }
3151
3152 /*
3153 * Routine: ipc_importance_receive
3154 * Purpose:
3155 * Process importance attributes in a received message.
3156 *
3157 * If an importance voucher attribute was sent, transform
3158 * that into an attribute value reflecting the inheritance
3159 * from the sender to the receiver.
3160 *
3161 * If a static boost is received (or the voucher isn't on
3162 * a voucher-based boost), export a static boost.
3163 * Conditions:
3164 * Nothing locked.
3165 */
3166 void
3167 ipc_importance_receive(
3168 ipc_kmsg_t kmsg,
3169 mach_msg_option_t option)
3170 {
3171 int impresult = -1;
3172
3173 #if IMPORTANCE_TRACE || LEGACY_IMPORTANCE_DELIVERY
3174 task_t task_self = current_task();
3175 unsigned int sender_pid = ((mach_msg_max_trailer_t *)
3176 ((vm_offset_t)kmsg->ikm_header +
3177 round_msg(kmsg->ikm_header->msgh_size)))->msgh_audit.val[5];
3178 #endif
3179
3180 /* convert to a voucher with an inherit importance attribute? */
3181 if ((option & MACH_RCV_VOUCHER) != 0) {
3182 uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) +
3183 sizeof(mach_voucher_attr_value_handle_t)];
3184 ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0;
3185 ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes;
3186 ipc_voucher_t recv_voucher;
3187 mach_voucher_attr_value_handle_t handle;
3188 ipc_importance_inherit_t inherit;
3189 kern_return_t kr;
3190
3191 /* set up recipe to copy the old voucher */
3192 if (IP_VALID(kmsg->ikm_voucher)) {
3193 ipc_voucher_t sent_voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject;
3194
3195 recipe->key = MACH_VOUCHER_ATTR_KEY_ALL;
3196 recipe->command = MACH_VOUCHER_ATTR_COPY;
3197 recipe->previous_voucher = sent_voucher;
3198 recipe->content_size = 0;
3199 recipe_size += sizeof(*recipe);
3200 }
3201
3202 /*
3203 * create an inheritance attribute from the kmsg (may be NULL)
3204 * transferring any boosts from the kmsg linkage through the
3205 * port directly to the new inheritance object.
3206 */
3207 inherit = ipc_importance_inherit_from_kmsg(kmsg);
3208 handle = (mach_voucher_attr_value_handle_t)inherit;
3209
3210 assert(IIE_NULL == kmsg->ikm_importance);
3211
3212 /*
3213 * Only create a new voucher if we have an inherit object
3214 * (from the ikm_importance field of the incoming message), OR
3215 * we have a valid incoming voucher. If we have neither of
3216 * these things then there is no need to create a new voucher.
3217 */
3218 if (IP_VALID(kmsg->ikm_voucher) || inherit != III_NULL) {
3219 /* replace the importance attribute with the handle we created */
3220 /* our made reference on the inherit is donated to the voucher */
3221 recipe = (ipc_voucher_attr_recipe_t)&recipes[recipe_size];
3222 recipe->key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE;
3223 recipe->command = MACH_VOUCHER_ATTR_SET_VALUE_HANDLE;
3224 recipe->previous_voucher = IPC_VOUCHER_NULL;
3225 recipe->content_size = sizeof(mach_voucher_attr_value_handle_t);
3226 *(mach_voucher_attr_value_handle_t *)(void *)recipe->content = handle;
3227 recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t);
3228
3229 kr = ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control,
3230 recipes,
3231 recipe_size,
3232 &recv_voucher);
3233 assert(KERN_SUCCESS == kr);
3234
3235 /* swap the voucher port (and set voucher bits in case it didn't already exist) */
3236 kmsg->ikm_header->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16);
3237 ipc_port_release_send(kmsg->ikm_voucher);
3238 kmsg->ikm_voucher = convert_voucher_to_port(recv_voucher);
3239 if (III_NULL != inherit) {
3240 impresult = 2;
3241 }
3242 }
3243 } else { /* Don't want a voucher */
3244 /* got linked importance? have to drop */
3245 if (IIE_NULL != kmsg->ikm_importance) {
3246 ipc_importance_elem_t elem;
3247
3248 ipc_importance_lock();
3249 elem = ipc_importance_kmsg_unlink(kmsg);
3250 #if IIE_REF_DEBUG
3251 elem->iie_kmsg_refs_dropped++;
3252 #endif
3253 ipc_importance_release_locked(elem);
3254 /* importance unlocked */
3255 }
3256
3257 /* With kmsg unlinked, can safely examine message importance attribute. */
3258 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3259 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
3260 #if LEGACY_IMPORTANCE_DELIVERY
3261 ipc_importance_task_t task_imp = task_self->task_imp_base;
3262
3263 /* The owner of receive right might have changed, take the internal assertion */
3264 if (KERN_SUCCESS == ipc_importance_task_hold_internal_assertion(task_imp, 1)) {
3265 ipc_importance_task_externalize_legacy_assertion(task_imp, 1, sender_pid);
3266 impresult = 1;
3267 } else
3268 #endif
3269 {
3270 /* The importance boost never applied to task (clear the bit) */
3271 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3272 impresult = 0;
3273 }
3274
3275 /* Drop the boost on the port and the owner of the receive right */
3276 ip_lock(port);
3277 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3278 ip_unlock(port);
3279 }
3280 }
3281 }
3282
3283 #if IMPORTANCE_TRACE
3284 if (-1 < impresult) {
3285 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE,
3286 sender_pid, task_pid(task_self),
3287 kmsg->ikm_header->msgh_id, impresult, 0);
3288 }
3289 if (impresult == 2) {
3290 /*
3291 * This probe only covers new voucher-based path. Legacy importance
3292 * will trigger the probe in ipc_importance_task_externalize_assertion()
3293 * above and have impresult==1 here.
3294 */
3295 DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self), int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt);
3296 }
3297 #endif /* IMPORTANCE_TRACE */
3298 }
3299
3300 /*
3301 * Routine: ipc_importance_unreceive
3302 * Purpose:
3303 * Undo receive of importance attributes in a message.
3304 *
3305 * Conditions:
3306 * Nothing locked.
3307 */
3308 void
3309 ipc_importance_unreceive(
3310 ipc_kmsg_t kmsg,
3311 mach_msg_option_t __unused option)
3312 {
3313 /* importance should already be in the voucher and out of the kmsg */
3314 assert(IIE_NULL == kmsg->ikm_importance);
3315
3316 /* See if there is a legacy boost to be dropped from receiver */
3317 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3318 ipc_importance_task_t task_imp;
3319
3320 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3321 task_imp = current_task()->task_imp_base;
3322 if (!IP_VALID(kmsg->ikm_voucher) && IIT_NULL != task_imp) {
3323 ipc_importance_task_drop_legacy_external_assertion(task_imp, 1);
3324 }
3325 /*
3326 * ipc_kmsg_copyout_dest() will consume the voucher
3327 * and any contained importance.
3328 */
3329 }
3330 }
3331
3332 /*
3333 * Routine: ipc_importance_clean
3334 * Purpose:
3335 * Clean up importance state in a kmsg that is being cleaned.
3336 * Unlink the importance chain if one was set up, and drop
3337 * the reference this kmsg held on the donor. Then check to
3338 * if importance was carried to the port, and remove that if
3339 * needed.
3340 * Conditions:
3341 * Nothing locked.
3342 */
3343 void
3344 ipc_importance_clean(
3345 ipc_kmsg_t kmsg)
3346 {
3347 ipc_port_t port;
3348
3349 /* Is the kmsg still linked? If so, remove that first */
3350 if (IIE_NULL != kmsg->ikm_importance) {
3351 ipc_importance_elem_t elem;
3352
3353 ipc_importance_lock();
3354 elem = ipc_importance_kmsg_unlink(kmsg);
3355 assert(IIE_NULL != elem);
3356 ipc_importance_release_locked(elem);
3357 /* importance unlocked */
3358 }
3359
3360 /* See if there is a legacy importance boost to be dropped from port */
3361 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3362 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3363 port = kmsg->ikm_header->msgh_remote_port;
3364 if (IP_VALID(port)) {
3365 ip_lock(port);
3366 /* inactive ports already had their importance boosts dropped */
3367 if (!ip_active(port) ||
3368 ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3369 ip_unlock(port);
3370 }
3371 }
3372 }
3373 }
3374
3375 void
3376 ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)
3377 {
3378 assert(IIE_NULL == kmsg->ikm_importance);
3379 assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits));
3380 }
3381
3382 /*
3383 * IPC Importance Attribute Manager definition
3384 */
3385
3386 static kern_return_t
3387 ipc_importance_release_value(
3388 ipc_voucher_attr_manager_t manager,
3389 mach_voucher_attr_key_t key,
3390 mach_voucher_attr_value_handle_t value,
3391 mach_voucher_attr_value_reference_t sync);
3392
3393 static kern_return_t
3394 ipc_importance_get_value(
3395 ipc_voucher_attr_manager_t manager,
3396 mach_voucher_attr_key_t key,
3397 mach_voucher_attr_recipe_command_t command,
3398 mach_voucher_attr_value_handle_array_t prev_values,
3399 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3400 mach_voucher_attr_content_t content,
3401 mach_voucher_attr_content_size_t content_size,
3402 mach_voucher_attr_value_handle_t *out_value,
3403 mach_voucher_attr_value_flags_t *out_flags,
3404 ipc_voucher_t *out_value_voucher);
3405
3406 static kern_return_t
3407 ipc_importance_extract_content(
3408 ipc_voucher_attr_manager_t manager,
3409 mach_voucher_attr_key_t key,
3410 mach_voucher_attr_value_handle_array_t values,
3411 mach_voucher_attr_value_handle_array_size_t value_count,
3412 mach_voucher_attr_recipe_command_t *out_command,
3413 mach_voucher_attr_content_t out_content,
3414 mach_voucher_attr_content_size_t *in_out_content_size);
3415
3416 static kern_return_t
3417 ipc_importance_command(
3418 ipc_voucher_attr_manager_t manager,
3419 mach_voucher_attr_key_t key,
3420 mach_voucher_attr_value_handle_array_t values,
3421 mach_msg_type_number_t value_count,
3422 mach_voucher_attr_command_t command,
3423 mach_voucher_attr_content_t in_content,
3424 mach_voucher_attr_content_size_t in_content_size,
3425 mach_voucher_attr_content_t out_content,
3426 mach_voucher_attr_content_size_t *out_content_size);
3427
3428 static void
3429 ipc_importance_manager_release(
3430 ipc_voucher_attr_manager_t manager);
3431
3432 const struct ipc_voucher_attr_manager ipc_importance_manager = {
3433 .ivam_release_value = ipc_importance_release_value,
3434 .ivam_get_value = ipc_importance_get_value,
3435 .ivam_extract_content = ipc_importance_extract_content,
3436 .ivam_command = ipc_importance_command,
3437 .ivam_release = ipc_importance_manager_release,
3438 .ivam_flags = IVAM_FLAGS_NONE,
3439 };
3440
3441 #define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key))
3442 #define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager))
3443
3444 /*
3445 * Routine: ipc_importance_release_value [Voucher Attribute Manager Interface]
3446 * Purpose:
3447 * Release what the voucher system believes is the last "made" reference
3448 * on an importance attribute value handle. The sync parameter is used to
3449 * avoid races with new made references concurrently being returned to the
3450 * voucher system in other threads.
3451 * Conditions:
3452 * Nothing locked on entry. May block.
3453 */
3454 static kern_return_t
3455 ipc_importance_release_value(
3456 ipc_voucher_attr_manager_t __assert_only manager,
3457 mach_voucher_attr_key_t __assert_only key,
3458 mach_voucher_attr_value_handle_t value,
3459 mach_voucher_attr_value_reference_t sync)
3460 {
3461 ipc_importance_elem_t elem;
3462
3463 IMPORTANCE_ASSERT_MANAGER(manager);
3464 IMPORTANCE_ASSERT_KEY(key);
3465 assert(0 < sync);
3466
3467 elem = (ipc_importance_elem_t)value;
3468
3469 ipc_importance_lock();
3470
3471 /* Any oustanding made refs? */
3472 if (sync != elem->iie_made) {
3473 assert(sync < elem->iie_made);
3474 ipc_importance_unlock();
3475 return KERN_FAILURE;
3476 }
3477
3478 /* clear made */
3479 elem->iie_made = 0;
3480
3481 /*
3482 * If there are pending external boosts represented by this attribute,
3483 * drop them from the apropriate task
3484 */
3485 if (IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3486 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
3487
3488 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
3489
3490 if (inherit->iii_donating) {
3491 ipc_importance_task_t imp_task = inherit->iii_to_task;
3492 uint32_t assertcnt = III_EXTERN(inherit);
3493
3494 assert(ipc_importance_task_is_any_receiver_type(imp_task));
3495 assert(imp_task->iit_externcnt >= inherit->iii_externcnt);
3496 assert(imp_task->iit_externdrop >= inherit->iii_externdrop);
3497 imp_task->iit_externcnt -= inherit->iii_externcnt;
3498 imp_task->iit_externdrop -= inherit->iii_externdrop;
3499 inherit->iii_externcnt = 0;
3500 inherit->iii_externdrop = 0;
3501 inherit->iii_donating = FALSE;
3502
3503 /* adjust the internal assertions - and propagate if needed */
3504 if (ipc_importance_task_check_transition(imp_task, IIT_UPDATE_DROP, assertcnt)) {
3505 ipc_importance_task_propagate_assertion_locked(imp_task, IIT_UPDATE_DROP, TRUE);
3506 }
3507 } else {
3508 inherit->iii_externcnt = 0;
3509 inherit->iii_externdrop = 0;
3510 }
3511 }
3512
3513 /* drop the made reference on elem */
3514 ipc_importance_release_locked(elem);
3515 /* returns unlocked */
3516
3517 return KERN_SUCCESS;
3518 }
3519
3520
3521 /*
3522 * Routine: ipc_importance_get_value [Voucher Attribute Manager Interface]
3523 * Purpose:
3524 * Convert command and content data into a reference on a [potentially new]
3525 * attribute value. The importance attribute manager will only allow the
3526 * caller to get a value for the current task's importance, or to redeem
3527 * an importance attribute from an existing voucher.
3528 * Conditions:
3529 * Nothing locked on entry. May block.
3530 */
3531 static kern_return_t
3532 ipc_importance_get_value(
3533 ipc_voucher_attr_manager_t __assert_only manager,
3534 mach_voucher_attr_key_t __assert_only key,
3535 mach_voucher_attr_recipe_command_t command,
3536 mach_voucher_attr_value_handle_array_t prev_values,
3537 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3538 mach_voucher_attr_content_t __unused content,
3539 mach_voucher_attr_content_size_t content_size,
3540 mach_voucher_attr_value_handle_t *out_value,
3541 mach_voucher_attr_value_flags_t *out_flags,
3542 ipc_voucher_t *out_value_voucher)
3543 {
3544 ipc_importance_elem_t elem;
3545 task_t self;
3546
3547 IMPORTANCE_ASSERT_MANAGER(manager);
3548 IMPORTANCE_ASSERT_KEY(key);
3549
3550 if (0 != content_size) {
3551 return KERN_INVALID_ARGUMENT;
3552 }
3553
3554 *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE;
3555 /* never an out voucher */
3556
3557 switch (command) {
3558 case MACH_VOUCHER_ATTR_REDEEM:
3559
3560 /* redeem of previous values is the value */
3561 if (0 < prev_value_count) {
3562 elem = (ipc_importance_elem_t)prev_values[0];
3563 assert(IIE_NULL != elem);
3564
3565 ipc_importance_lock();
3566 assert(0 < elem->iie_made);
3567 elem->iie_made++;
3568 ipc_importance_unlock();
3569
3570 *out_value = prev_values[0];
3571 return KERN_SUCCESS;
3572 }
3573
3574 /* redeem of default is default */
3575 *out_value = 0;
3576 *out_value_voucher = IPC_VOUCHER_NULL;
3577 return KERN_SUCCESS;
3578
3579 case MACH_VOUCHER_ATTR_IMPORTANCE_SELF:
3580 self = current_task();
3581
3582 elem = (ipc_importance_elem_t)ipc_importance_for_task(self, TRUE);
3583 /* made reference added (or IIE_NULL which isn't referenced) */
3584
3585 *out_value = (mach_voucher_attr_value_handle_t)elem;
3586 *out_value_voucher = IPC_VOUCHER_NULL;
3587 return KERN_SUCCESS;
3588
3589 default:
3590 /*
3591 * every other command is unknown
3592 *
3593 * Specifically, there is no mechanism provided to construct an
3594 * importance attribute for a task/process from just a pid or
3595 * task port. It has to be copied (or redeemed) from a previous
3596 * voucher that has it.
3597 */
3598 return KERN_INVALID_ARGUMENT;
3599 }
3600 }
3601
3602 /*
3603 * Routine: ipc_importance_extract_content [Voucher Attribute Manager Interface]
3604 * Purpose:
3605 * Extract meaning from the attribute value present in a voucher. While
3606 * the real goal is to provide commands and data that can reproduce the
3607 * voucher's value "out of thin air", this isn't possible with importance
3608 * attribute values. Instead, return debug info to help track down dependencies.
3609 * Conditions:
3610 * Nothing locked on entry. May block.
3611 */
3612 static kern_return_t
3613 ipc_importance_extract_content(
3614 ipc_voucher_attr_manager_t __assert_only manager,
3615 mach_voucher_attr_key_t __assert_only key,
3616 mach_voucher_attr_value_handle_array_t values,
3617 mach_voucher_attr_value_handle_array_size_t value_count,
3618 mach_voucher_attr_recipe_command_t *out_command,
3619 mach_voucher_attr_content_t out_content,
3620 mach_voucher_attr_content_size_t *in_out_content_size)
3621 {
3622 mach_voucher_attr_content_size_t size = 0;
3623 ipc_importance_elem_t elem;
3624 unsigned int i;
3625
3626 IMPORTANCE_ASSERT_MANAGER(manager);
3627 IMPORTANCE_ASSERT_KEY(key);
3628
3629 /* the first non-default value provides the data */
3630 for (i = 0; i < value_count && *in_out_content_size > 0; i++) {
3631 elem = (ipc_importance_elem_t)values[i];
3632 if (IIE_NULL == elem) {
3633 continue;
3634 }
3635
3636 snprintf((char *)out_content, *in_out_content_size, "Importance for pid ");
3637 size = (mach_voucher_attr_content_size_t)strlen((char *)out_content);
3638
3639 for (;;) {
3640 ipc_importance_inherit_t inherit = III_NULL;
3641 ipc_importance_task_t task_imp;
3642 task_t task;
3643 int t_pid;
3644
3645 if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3646 task_imp = (ipc_importance_task_t)elem;
3647 task = task_imp->iit_task;
3648 t_pid = (TASK_NULL != task) ?
3649 task_pid(task) : -1;
3650 snprintf((char *)out_content + size, *in_out_content_size - size, "%d", t_pid);
3651 } else {
3652 inherit = (ipc_importance_inherit_t)elem;
3653 task_imp = inherit->iii_to_task;
3654 task = task_imp->iit_task;
3655 t_pid = (TASK_NULL != task) ?
3656 task_pid(task) : -1;
3657 snprintf((char *)out_content + size, *in_out_content_size - size,
3658 "%d (%d of %d boosts) %s from pid ", t_pid,
3659 III_EXTERN(inherit), inherit->iii_externcnt,
3660 (inherit->iii_donating) ? "donated" : "linked");
3661 }
3662
3663 size = (mach_voucher_attr_content_size_t)strlen((char *)out_content);
3664
3665 if (III_NULL == inherit) {
3666 break;
3667 }
3668
3669 elem = inherit->iii_from_elem;
3670 }
3671 size++; /* account for NULL */
3672 }
3673 *out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */
3674 *in_out_content_size = size;
3675 return KERN_SUCCESS;
3676 }
3677
3678 /*
3679 * Routine: ipc_importance_command [Voucher Attribute Manager Interface]
3680 * Purpose:
3681 * Run commands against the importance attribute value found in a voucher.
3682 * No such commands are currently supported.
3683 * Conditions:
3684 * Nothing locked on entry. May block.
3685 */
3686 static kern_return_t
3687 ipc_importance_command(
3688 ipc_voucher_attr_manager_t __assert_only manager,
3689 mach_voucher_attr_key_t __assert_only key,
3690 mach_voucher_attr_value_handle_array_t values,
3691 mach_msg_type_number_t value_count,
3692 mach_voucher_attr_command_t command,
3693 mach_voucher_attr_content_t in_content,
3694 mach_voucher_attr_content_size_t in_content_size,
3695 mach_voucher_attr_content_t out_content,
3696 mach_voucher_attr_content_size_t *out_content_size)
3697 {
3698 ipc_importance_inherit_t inherit;
3699 ipc_importance_task_t to_task;
3700 uint32_t refs, *outrefsp;
3701 mach_msg_type_number_t i;
3702 uint32_t externcnt;
3703
3704 IMPORTANCE_ASSERT_MANAGER(manager);
3705 IMPORTANCE_ASSERT_KEY(key);
3706
3707 if (in_content_size != sizeof(refs) ||
3708 (*out_content_size != 0 && *out_content_size != sizeof(refs))) {
3709 return KERN_INVALID_ARGUMENT;
3710 }
3711 refs = *(uint32_t *)(void *)in_content;
3712 outrefsp = (*out_content_size != 0) ? (uint32_t *)(void *)out_content : NULL;
3713
3714 if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL != command) {
3715 return KERN_NOT_SUPPORTED;
3716 }
3717
3718 /* the first non-default value of the apropos type provides the data */
3719 inherit = III_NULL;
3720 for (i = 0; i < value_count; i++) {
3721 ipc_importance_elem_t elem = (ipc_importance_elem_t)values[i];
3722
3723 if (IIE_NULL != elem && IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3724 inherit = (ipc_importance_inherit_t)elem;
3725 break;
3726 }
3727 }
3728 if (III_NULL == inherit) {
3729 return KERN_INVALID_ARGUMENT;
3730 }
3731
3732 ipc_importance_lock();
3733
3734 if (0 == refs) {
3735 if (NULL != outrefsp) {
3736 *outrefsp = III_EXTERN(inherit);
3737 }
3738 ipc_importance_unlock();
3739 return KERN_SUCCESS;
3740 }
3741
3742 to_task = inherit->iii_to_task;
3743 assert(ipc_importance_task_is_any_receiver_type(to_task));
3744
3745 /* if not donating to a denap receiver, it was called incorrectly */
3746 if (!ipc_importance_task_is_marked_denap_receiver(to_task)) {
3747 ipc_importance_unlock();
3748 return KERN_INVALID_TASK; /* keeps dispatch happy */
3749 }
3750
3751 /* Enough external references left to drop? */
3752 if (III_EXTERN(inherit) < refs) {
3753 ipc_importance_unlock();
3754 return KERN_FAILURE;
3755 }
3756
3757 /* re-base external and internal counters at the inherit and the to-task (if apropos) */
3758 if (inherit->iii_donating) {
3759 assert(IIT_EXTERN(to_task) >= III_EXTERN(inherit));
3760 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
3761 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
3762 inherit->iii_externdrop += refs;
3763 to_task->iit_externdrop += refs;
3764 externcnt = III_EXTERN(inherit);
3765 if (0 == externcnt) {
3766 inherit->iii_donating = FALSE;
3767 to_task->iit_externcnt -= inherit->iii_externcnt;
3768 to_task->iit_externdrop -= inherit->iii_externdrop;
3769
3770
3771 /* Start AppNap delay hysteresis - even if not the last boost for the task. */
3772 if (ipc_importance_delayed_drop_call != NULL &&
3773 ipc_importance_task_is_marked_denap_receiver(to_task)) {
3774 ipc_importance_task_delayed_drop(to_task);
3775 }
3776
3777 /* drop task assertions associated with the dropped boosts */
3778 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, refs)) {
3779 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
3780 /* may have dropped and retaken importance lock */
3781 }
3782 } else {
3783 /* assert(to_task->iit_assertcnt >= refs + externcnt); */
3784 /* defensive deduction in case of assertcnt underflow */
3785 if (to_task->iit_assertcnt > refs + externcnt) {
3786 to_task->iit_assertcnt -= refs;
3787 } else {
3788 to_task->iit_assertcnt = externcnt;
3789 }
3790 }
3791 } else {
3792 inherit->iii_externdrop += refs;
3793 externcnt = III_EXTERN(inherit);
3794 }
3795
3796 /* capture result (if requested) */
3797 if (NULL != outrefsp) {
3798 *outrefsp = externcnt;
3799 }
3800
3801 ipc_importance_unlock();
3802 return KERN_SUCCESS;
3803 }
3804
3805 /*
3806 * Routine: ipc_importance_manager_release [Voucher Attribute Manager Interface]
3807 * Purpose:
3808 * Release the Voucher system's reference on the IPC importance attribute
3809 * manager.
3810 * Conditions:
3811 * As this can only occur after the manager drops the Attribute control
3812 * reference granted back at registration time, and that reference is never
3813 * dropped, this should never be called.
3814 */
3815 __abortlike
3816 static void
3817 ipc_importance_manager_release(
3818 ipc_voucher_attr_manager_t __assert_only manager)
3819 {
3820 IMPORTANCE_ASSERT_MANAGER(manager);
3821 panic("Voucher importance manager released");
3822 }
3823
3824 /*
3825 * Routine: ipc_importance_init
3826 * Purpose:
3827 * Initialize the IPC importance manager.
3828 * Conditions:
3829 * Zones and Vouchers are already initialized.
3830 */
3831 void
3832 ipc_importance_init(void)
3833 {
3834 natural_t ipc_importance_max = (task_max + thread_max) * 2;
3835 char temp_buf[26];
3836 kern_return_t kr;
3837
3838 if (PE_parse_boot_argn("imp_interactive_receiver", temp_buf, sizeof(temp_buf))) {
3839 ipc_importance_interactive_receiver = TRUE;
3840 }
3841
3842 ipc_importance_task_zone = zinit(sizeof(struct ipc_importance_task),
3843 ipc_importance_max * sizeof(struct ipc_importance_task),
3844 sizeof(struct ipc_importance_task),
3845 "ipc task importance");
3846 zone_change(ipc_importance_task_zone, Z_NOENCRYPT, TRUE);
3847
3848 ipc_importance_inherit_zone = zinit(sizeof(struct ipc_importance_inherit),
3849 ipc_importance_max * sizeof(struct ipc_importance_inherit),
3850 sizeof(struct ipc_importance_inherit),
3851 "ipc importance inherit");
3852 zone_change(ipc_importance_inherit_zone, Z_NOENCRYPT, TRUE);
3853
3854
3855 #if DEVELOPMENT || DEBUG
3856 queue_init(&global_iit_alloc_queue);
3857 #endif
3858
3859 /* initialize global locking */
3860 ipc_importance_lock_init();
3861
3862 kr = ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager,
3863 (mach_voucher_attr_value_handle_t)0,
3864 MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
3865 &ipc_importance_control);
3866 if (KERN_SUCCESS != kr) {
3867 printf("Voucher importance manager register returned %d", kr);
3868 }
3869 }
3870
3871 /*
3872 * Routine: ipc_importance_thread_call_init
3873 * Purpose:
3874 * Initialize the IPC importance code dependent upon
3875 * thread-call support being available.
3876 * Conditions:
3877 * Thread-call mechanism is already initialized.
3878 */
3879 void
3880 ipc_importance_thread_call_init(void)
3881 {
3882 /* initialize delayed drop queue and thread-call */
3883 queue_init(&ipc_importance_delayed_drop_queue);
3884 ipc_importance_delayed_drop_call =
3885 thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL);
3886 if (NULL == ipc_importance_delayed_drop_call) {
3887 panic("ipc_importance_init");
3888 }
3889 }
3890
3891 /*
3892 * Routing: task_importance_list_pids
3893 * Purpose: list pids where task in donating importance.
3894 * Conditions: To be called only from kdp stackshot code.
3895 * Will panic the system otherwise.
3896 */
3897 extern int
3898 task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count)
3899 {
3900 if (kdp_lck_spin_is_acquired(&ipc_importance_lock_data) ||
3901 max_count < 1 ||
3902 task->task_imp_base == IIT_NULL ||
3903 pid_list == NULL ||
3904 flags != TASK_IMP_LIST_DONATING_PIDS) {
3905 return 0;
3906 }
3907 unsigned int pidcount = 0;
3908 task_t temp_task;
3909 ipc_importance_task_t task_imp = task->task_imp_base;
3910 ipc_kmsg_t temp_kmsg;
3911 ipc_importance_inherit_t temp_inherit;
3912 ipc_importance_elem_t elem;
3913 int target_pid = 0, previous_pid;
3914
3915 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
3916 /* check space in buffer */
3917 if (pidcount >= max_count) {
3918 break;
3919 }
3920 previous_pid = target_pid;
3921 target_pid = -1;
3922
3923 if (temp_inherit->iii_donating) {
3924 #if DEVELOPMENT || DEBUG
3925 target_pid = temp_inherit->iii_to_task->iit_bsd_pid;
3926 #else
3927 temp_task = temp_inherit->iii_to_task->iit_task;
3928 if (temp_task != TASK_NULL) {
3929 target_pid = task_pid(temp_task);
3930 }
3931 #endif
3932 }
3933
3934 if (target_pid != -1 && previous_pid != target_pid) {
3935 memcpy(pid_list, &target_pid, sizeof(target_pid));
3936 pid_list += sizeof(target_pid);
3937 pidcount++;
3938 }
3939 }
3940
3941 target_pid = 0;
3942 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
3943 if (pidcount >= max_count) {
3944 break;
3945 }
3946 previous_pid = target_pid;
3947 target_pid = -1;
3948 elem = temp_kmsg->ikm_importance;
3949 temp_task = TASK_NULL;
3950
3951 if (elem == IIE_NULL) {
3952 continue;
3953 }
3954
3955 if (!(temp_kmsg->ikm_header && MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_kmsg->ikm_header->msgh_bits))) {
3956 continue;
3957 }
3958
3959 if (IIE_TYPE_TASK == IIE_TYPE(elem) &&
3960 (((ipc_importance_task_t)elem)->iit_task != TASK_NULL)) {
3961 target_pid = task_pid(((ipc_importance_task_t)elem)->iit_task);
3962 } else {
3963 temp_inherit = (ipc_importance_inherit_t)elem;
3964 #if DEVELOPMENT || DEBUG
3965 target_pid = temp_inherit->iii_to_task->iit_bsd_pid;
3966 #else
3967 temp_task = temp_inherit->iii_to_task->iit_task;
3968 if (temp_task != TASK_NULL) {
3969 target_pid = task_pid(temp_task);
3970 }
3971 #endif
3972 }
3973
3974 if (target_pid != -1 && previous_pid != target_pid) {
3975 memcpy(pid_list, &target_pid, sizeof(target_pid));
3976 pid_list += sizeof(target_pid);
3977 pidcount++;
3978 }
3979 }
3980
3981 return pidcount;
3982 }