]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_importance.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_importance.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/notify.h>
31 #include <ipc/ipc_types.h>
32 #include <ipc/ipc_importance.h>
33 #include <ipc/ipc_port.h>
34 #include <ipc/ipc_voucher.h>
35 #include <kern/ipc_kobject.h>
36 #include <kern/ipc_tt.h>
37 #include <kern/mach_param.h>
38 #include <kern/misc_protos.h>
39 #include <kern/kalloc.h>
40 #include <kern/zalloc.h>
41 #include <kern/queue.h>
42 #include <kern/task.h>
43 #include <kern/policy_internal.h>
44
45 #include <sys/kdebug.h>
46
47 #include <mach/mach_voucher_attr_control.h>
48 #include <mach/machine/sdt.h>
49
50 extern int proc_pid(void *);
51 extern int proc_selfpid(void);
52 extern uint64_t proc_uniqueid(void *p);
53 extern char *proc_name_address(void *p);
54
55 /*
56 * Globals for delayed boost drop processing.
57 */
58 static queue_head_t ipc_importance_delayed_drop_queue;
59 static thread_call_t ipc_importance_delayed_drop_call;
60 static uint64_t ipc_importance_delayed_drop_timestamp;
61 static boolean_t ipc_importance_delayed_drop_call_requested = FALSE;
62
63 #define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */
64 #define DENAP_DROP_SKEW (100 * NSEC_PER_MSEC) /* request skew for wakeup */
65 #define DENAP_DROP_LEEWAY (2 * DENAP_DROP_SKEW) /* specified wakeup leeway */
66
67 #define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW)
68 #define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY)
69
70 /*
71 * Importance Voucher Attribute Manager
72 */
73
74 static lck_spin_t ipc_importance_lock_data; /* single lock for now */
75
76
77 #define ipc_importance_lock_init() \
78 lck_spin_init(&ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr)
79 #define ipc_importance_lock_destroy() \
80 lck_spin_destroy(&ipc_importance_lock_data, &ipc_lck_grp)
81 #define ipc_importance_lock() \
82 lck_spin_lock(&ipc_importance_lock_data)
83 #define ipc_importance_lock_try() \
84 lck_spin_try_lock(&ipc_importance_lock_data)
85 #define ipc_importance_unlock() \
86 lck_spin_unlock(&ipc_importance_lock_data)
87 #define ipc_importance_sleep(elem) lck_spin_sleep(&ipc_importance_lock_data, \
88 LCK_SLEEP_DEFAULT, \
89 (event_t)(elem), \
90 THREAD_UNINT)
91 #define ipc_importance_wakeup(elem) thread_wakeup((event_t)(elem))
92
93 #if IIE_REF_DEBUG
94 #define incr_ref_counter(x) (hw_atomic_add(&(x), 1))
95
96 static inline
97 uint32_t ipc_importance_reference_internal(ipc_importance_elem_t elem)
98 {
99 incr_ref_counter(elem->iie_refs_added);
100 return (hw_atomic_add(&elem->iie_bits, 1) & IIE_REFS_MASK);
101 }
102
103 static inline
104 uint32_t ipc_importance_release_internal(ipc_importance_elem_t elem)
105 {
106 incr_ref_counter(elem->iie_refs_dropped);
107 return (hw_atomic_sub(&elem->iie_bits, 1) & IIE_REFS_MASK);
108 }
109
110 static inline
111 uint32_t ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)
112 {
113 uint32_t out;
114 out = ipc_importance_reference_internal(&task_imp->iit_elem);
115 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added);
116 return out;
117 }
118
119 static inline
120 uint32_t ipc_importance_task_release_internal(ipc_importance_task_t task_imp)
121 {
122 uint32_t out;
123
124 assert(1 < IIT_REFS(task_imp));
125 incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped);
126 out = ipc_importance_release_internal(&task_imp->iit_elem);
127 return out;
128 }
129
130 static inline
131 void ipc_importance_counter_init(ipc_importance_elem_t elem)
132 {
133
134 elem->iie_refs_added = 0;
135 elem->iie_refs_dropped = 0;
136 elem->iie_kmsg_refs_added = 0;
137 elem->iie_kmsg_refs_inherited = 0;
138 elem->iie_kmsg_refs_coalesced = 0;
139 elem->iie_kmsg_refs_dropped = 0;
140 elem->iie_task_refs_added = 0;
141 elem->iie_task_refs_added_inherit_from = 0;
142 elem->iie_task_refs_added_transition = 0;
143 elem->iie_task_refs_self_added = 0;
144 elem->iie_task_refs_inherited = 0;
145 elem->iie_task_refs_coalesced = 0;
146 elem->iie_task_refs_dropped = 0;
147 }
148 #else
149 #define incr_ref_counter(x)
150 #endif
151
152 #if DEVELOPMENT || DEBUG
153 static queue_head_t global_iit_alloc_queue;
154 #endif
155
156 /* TODO: remove this varibale when interactive daemon audit is complete */
157 boolean_t ipc_importance_interactive_receiver = FALSE;
158
159 static zone_t ipc_importance_task_zone;
160 static zone_t ipc_importance_inherit_zone;
161
162 static ipc_voucher_attr_control_t ipc_importance_control;
163
164 /*
165 * Routine: ipc_importance_kmsg_link
166 * Purpose:
167 * Link the kmsg onto the appropriate propagation chain.
168 * If the element is a task importance, we link directly
169 * on its propagation chain. Otherwise, we link onto the
170 * destination task of the inherit.
171 * Conditions:
172 * Importance lock held.
173 * Caller is donating an importance elem reference to the kmsg.
174 */
175 static void
176 ipc_importance_kmsg_link(
177 ipc_kmsg_t kmsg,
178 ipc_importance_elem_t elem)
179 {
180 ipc_importance_elem_t link_elem;
181
182 assert(IIE_NULL == kmsg->ikm_importance);
183
184 link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
185 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
186 elem;
187
188 queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
189 kmsg->ikm_importance = elem;
190 }
191
192 /*
193 * Routine: ipc_importance_kmsg_unlink
194 * Purpose:
195 * Unlink the kmsg from its current propagation chain.
196 * If the element is a task importance, we unlink directly
197 * from its propagation chain. Otherwise, we unlink from the
198 * destination task of the inherit.
199 * Returns:
200 * The reference to the importance element it was linked on.
201 * Conditions:
202 * Importance lock held.
203 * Caller is responsible for dropping reference on returned elem.
204 */
205 static ipc_importance_elem_t
206 ipc_importance_kmsg_unlink(
207 ipc_kmsg_t kmsg)
208 {
209 ipc_importance_elem_t elem = kmsg->ikm_importance;
210
211 if (IIE_NULL != elem) {
212 ipc_importance_elem_t unlink_elem;
213
214 unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
215 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
216 elem;
217
218 queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
219 kmsg->ikm_importance = IIE_NULL;
220 }
221 return elem;
222 }
223
224 /*
225 * Routine: ipc_importance_inherit_link
226 * Purpose:
227 * Link the inherit onto the appropriate propagation chain.
228 * If the element is a task importance, we link directly
229 * on its propagation chain. Otherwise, we link onto the
230 * destination task of the inherit.
231 * Conditions:
232 * Importance lock held.
233 * Caller is donating an elem importance reference to the inherit.
234 */
235 static void
236 ipc_importance_inherit_link(
237 ipc_importance_inherit_t inherit,
238 ipc_importance_elem_t elem)
239 {
240 ipc_importance_task_t link_task;
241
242 assert(IIE_NULL == inherit->iii_from_elem);
243 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
244 ((ipc_importance_inherit_t)elem)->iii_to_task :
245 (ipc_importance_task_t)elem;
246
247 queue_enter(&link_task->iit_inherits, inherit,
248 ipc_importance_inherit_t, iii_inheritance);
249 inherit->iii_from_elem = elem;
250 }
251
252 /*
253 * Routine: ipc_importance_inherit_find
254 * Purpose:
255 * Find an existing inherit that links the from element to the
256 * to_task at a given nesting depth. As inherits from other
257 * inherits are actually linked off the original inherit's donation
258 * receiving task, we have to conduct our search from there if
259 * the from element is an inherit.
260 * Returns:
261 * A pointer (not a reference) to the matching inherit.
262 * Conditions:
263 * Importance lock held.
264 */
265 static ipc_importance_inherit_t
266 ipc_importance_inherit_find(
267 ipc_importance_elem_t from,
268 ipc_importance_task_t to_task,
269 unsigned int depth)
270 {
271 ipc_importance_task_t link_task;
272 ipc_importance_inherit_t inherit;
273
274 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(from)) ?
275 ((ipc_importance_inherit_t)from)->iii_to_task :
276 (ipc_importance_task_t)from;
277
278 queue_iterate(&link_task->iit_inherits, inherit,
279 ipc_importance_inherit_t, iii_inheritance) {
280 if (inherit->iii_to_task == to_task && inherit->iii_depth == depth)
281 return inherit;
282 }
283 return III_NULL;
284 }
285
286 /*
287 * Routine: ipc_importance_inherit_unlink
288 * Purpose:
289 * Unlink the inherit from its current propagation chain.
290 * If the element is a task importance, we unlink directly
291 * from its propagation chain. Otherwise, we unlink from the
292 * destination task of the inherit.
293 * Returns:
294 * The reference to the importance element it was linked on.
295 * Conditions:
296 * Importance lock held.
297 * Caller is responsible for dropping reference on returned elem.
298 */
299 static ipc_importance_elem_t
300 ipc_importance_inherit_unlink(
301 ipc_importance_inherit_t inherit)
302 {
303 ipc_importance_elem_t elem = inherit->iii_from_elem;
304
305 if (IIE_NULL != elem) {
306 ipc_importance_task_t unlink_task;
307
308 unlink_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
309 ((ipc_importance_inherit_t)elem)->iii_to_task :
310 (ipc_importance_task_t)elem;
311
312 queue_remove(&unlink_task->iit_inherits, inherit,
313 ipc_importance_inherit_t, iii_inheritance);
314 inherit->iii_from_elem = IIE_NULL;
315 }
316 return elem;
317 }
318
319 /*
320 * Routine: ipc_importance_reference
321 * Purpose:
322 * Add a reference to the importance element.
323 * Conditions:
324 * Caller must hold a reference on the element.
325 */
326 void
327 ipc_importance_reference(ipc_importance_elem_t elem)
328 {
329 assert(0 < IIE_REFS(elem));
330 ipc_importance_reference_internal(elem);
331 }
332
333 /*
334 * Routine: ipc_importance_release_locked
335 * Purpose:
336 * Release a reference on an importance attribute value,
337 * unlinking and deallocating the attribute if the last reference.
338 * Conditions:
339 * Entered with importance lock held, leaves with it unlocked.
340 */
341 static void
342 ipc_importance_release_locked(ipc_importance_elem_t elem)
343 {
344 assert(0 < IIE_REFS(elem));
345
346 #if DEVELOPMENT || DEBUG
347 ipc_importance_inherit_t temp_inherit;
348 ipc_importance_task_t link_task;
349 ipc_kmsg_t temp_kmsg;
350 uint32_t expected = 0;
351
352 if (0 < elem->iie_made)
353 expected++;
354
355 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
356 ((ipc_importance_inherit_t)elem)->iii_to_task :
357 (ipc_importance_task_t)elem;
358
359 queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance)
360 if (temp_kmsg->ikm_importance == elem)
361 expected++;
362 queue_iterate(&link_task->iit_inherits, temp_inherit,
363 ipc_importance_inherit_t, iii_inheritance)
364 if (temp_inherit->iii_from_elem == elem)
365 expected++;
366 if (IIE_REFS(elem) < expected + 1)
367 panic("ipc_importance_release_locked (%p)", elem);
368 #endif
369
370 if (0 < ipc_importance_release_internal(elem)) {
371 ipc_importance_unlock();
372 return;
373 }
374
375 /* last ref */
376
377 switch (IIE_TYPE(elem)) {
378
379 /* just a "from" task reference to drop */
380 case IIE_TYPE_TASK:
381 {
382 ipc_importance_task_t task_elem;
383
384 task_elem = (ipc_importance_task_t)elem;
385
386 /* the task can't still hold a reference on the task importance */
387 assert(TASK_NULL == task_elem->iit_task);
388
389 #if DEVELOPMENT || DEBUG
390 queue_remove(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
391 #endif
392
393 ipc_importance_unlock();
394
395 zfree(ipc_importance_task_zone, task_elem);
396 break;
397 }
398
399 /* dropping an inherit element */
400 case IIE_TYPE_INHERIT:
401 {
402 ipc_importance_inherit_t inherit;
403 ipc_importance_elem_t from_elem;
404 ipc_importance_task_t to_task;
405
406
407 inherit = (ipc_importance_inherit_t)elem;
408 to_task = inherit->iii_to_task;
409 assert(IIT_NULL != to_task);
410 assert(!inherit->iii_donating);
411
412 /* unlink and release the inherit */
413 assert(ipc_importance_task_is_any_receiver_type(to_task));
414 from_elem = ipc_importance_inherit_unlink(inherit);
415 assert(IIE_NULL != from_elem);
416 ipc_importance_release_locked(from_elem);
417 /* unlocked on return */
418
419 ipc_importance_task_release(to_task);
420
421 zfree(ipc_importance_inherit_zone, inherit);
422 break;
423 }
424 }
425 }
426
427 /*
428 * Routine: ipc_importance_release
429 * Purpose:
430 * Release a reference on an importance attribute value,
431 * unlinking and deallocating the attribute if the last reference.
432 * Conditions:
433 * nothing locked on entrance, nothing locked on exit.
434 * May block.
435 */
436 void
437 ipc_importance_release(ipc_importance_elem_t elem)
438 {
439 if (IIE_NULL == elem)
440 return;
441
442 ipc_importance_lock();
443 ipc_importance_release_locked(elem);
444 /* unlocked */
445 }
446
447 /*
448 * Routine: ipc_importance_task_reference
449
450
451 * Purpose:
452 * Retain a reference on a task importance attribute value.
453 * Conditions:
454 * nothing locked on entrance, nothing locked on exit.
455 * caller holds a reference already.
456 */
457 void
458 ipc_importance_task_reference(ipc_importance_task_t task_elem)
459 {
460 if (IIT_NULL == task_elem)
461 return;
462 #if IIE_REF_DEBUG
463 incr_ref_counter(task_elem->iit_elem.iie_task_refs_added);
464 #endif
465 ipc_importance_reference(&task_elem->iit_elem);
466 }
467
468 /*
469 * Routine: ipc_importance_task_release
470 * Purpose:
471 * Release a reference on a task importance attribute value,
472 * unlinking and deallocating the attribute if the last reference.
473 * Conditions:
474 * nothing locked on entrance, nothing locked on exit.
475 * May block.
476 */
477 void
478 ipc_importance_task_release(ipc_importance_task_t task_elem)
479 {
480 if (IIT_NULL == task_elem)
481 return;
482
483 ipc_importance_lock();
484 #if IIE_REF_DEBUG
485 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
486 #endif
487 ipc_importance_release_locked(&task_elem->iit_elem);
488 /* unlocked */
489 }
490
491 /*
492 * Routine: ipc_importance_task_release_locked
493 * Purpose:
494 * Release a reference on a task importance attribute value,
495 * unlinking and deallocating the attribute if the last reference.
496 * Conditions:
497 * importance lock held on entry, nothing locked on exit.
498 * May block.
499 */
500 static void
501 ipc_importance_task_release_locked(ipc_importance_task_t task_elem)
502 {
503 if (IIT_NULL == task_elem) {
504 ipc_importance_unlock();
505 return;
506 }
507 #if IIE_REF_DEBUG
508 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
509 #endif
510 ipc_importance_release_locked(&task_elem->iit_elem);
511 /* unlocked */
512 }
513
514 /*
515 * Routines for importance donation/inheritance/boosting
516 */
517
518
519 /*
520 * External importance assertions are managed by the process in userspace
521 * Internal importance assertions are the responsibility of the kernel
522 * Assertions are changed from internal to external via task_importance_externalize_assertion
523 */
524
525 /*
526 * Routine: ipc_importance_task_check_transition
527 * Purpose:
528 * Increase or decrement the internal task importance counter of the
529 * specified task and determine if propagation and a task policy
530 * update is required.
531 *
532 * If it is already enqueued for a policy update, steal it from that queue
533 * (as we are reversing that update before it happens).
534 *
535 * Conditions:
536 * Called with the importance lock held.
537 * It is the caller's responsibility to perform the propagation of the
538 * transition and/or policy changes by checking the return value.
539 */
540 static boolean_t
541 ipc_importance_task_check_transition(
542 ipc_importance_task_t task_imp,
543 iit_update_type_t type,
544 uint32_t delta)
545 {
546
547 task_t target_task = task_imp->iit_task;
548 boolean_t boost = (IIT_UPDATE_HOLD == type);
549 boolean_t before_boosted, after_boosted;
550
551 if (!ipc_importance_task_is_any_receiver_type(task_imp))
552 return FALSE;
553
554 #if IMPORTANCE_DEBUG
555 int target_pid = task_pid(target_task);
556
557 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START,
558 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
559 #endif
560
561 /* snapshot the effective boosting status before making any changes */
562 before_boosted = (task_imp->iit_assertcnt > 0);
563
564 /* Adjust the assertcnt appropriately */
565 if (boost) {
566 task_imp->iit_assertcnt += delta;
567 #if IMPORTANCE_DEBUG
568 DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid,
569 task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt);
570 #endif
571 } else {
572 // assert(delta <= task_imp->iit_assertcnt);
573 if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) {
574 /* TODO: Turn this back into a panic <rdar://problem/12592649> */
575 if (target_task != TASK_NULL) {
576 printf("Over-release of kernel-internal importance assertions for pid %d (%s), "
577 "dropping %d assertion(s) but task only has %d remaining (%d external).\n",
578 task_pid(target_task),
579 (target_task->bsd_info == NULL) ? "" : proc_name_address(target_task->bsd_info),
580 delta,
581 task_imp->iit_assertcnt,
582 IIT_EXTERN(task_imp));
583 }
584 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
585 } else {
586 task_imp->iit_assertcnt -= delta;
587 }
588 #if IMPORTANCE_DEBUG
589 // This convers both legacy and voucher-based importance.
590 DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt);
591 #endif
592 }
593
594 #if IMPORTANCE_DEBUG
595 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END,
596 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
597 #endif
598
599 /* did the change result in an effective donor status change? */
600 after_boosted = (task_imp->iit_assertcnt > 0);
601
602 if (after_boosted != before_boosted) {
603
604 /*
605 * If the task importance is already on an update queue, we just reversed the need for a
606 * pending policy update. If the queue is any other than the delayed-drop-queue, pull it
607 * off that queue and release the reference it got going onto the update queue. If it is
608 * the delayed-drop-queue we leave it in place in case it comes back into the drop state
609 * before its time delay is up.
610 *
611 * We still need to propagate the change downstream to reverse the assertcnt effects,
612 * but we no longer need to update this task's boost policy state.
613 *
614 * Otherwise, mark it as needing a policy update.
615 */
616 assert(0 == task_imp->iit_updatepolicy);
617 if (NULL != task_imp->iit_updateq) {
618 if (&ipc_importance_delayed_drop_queue != task_imp->iit_updateq) {
619 queue_remove(task_imp->iit_updateq, task_imp, ipc_importance_task_t, iit_updates);
620 task_imp->iit_updateq = NULL;
621 ipc_importance_task_release_internal(task_imp); /* can't be last ref */
622 }
623 } else {
624 task_imp->iit_updatepolicy = 1;
625 }
626 return TRUE;
627 }
628
629 return FALSE;
630 }
631
632
633 /*
634 * Routine: ipc_importance_task_propagate_helper
635 * Purpose:
636 * Increase or decrement the internal task importance counter of all
637 * importance tasks inheriting from the specified one. If this causes
638 * that importance task to change state, add it to the list of tasks
639 * to do a policy update against.
640 * Conditions:
641 * Called with the importance lock held.
642 * It is the caller's responsibility to iterate down the generated list
643 * and propagate any subsequent assertion changes from there.
644 */
645 static void
646 ipc_importance_task_propagate_helper(
647 ipc_importance_task_t task_imp,
648 iit_update_type_t type,
649 queue_t propagation)
650 {
651 ipc_importance_task_t temp_task_imp;
652
653 /*
654 * iterate the downstream kmsgs, adjust their boosts,
655 * and capture the next task to adjust for each message
656 */
657
658 ipc_kmsg_t temp_kmsg;
659
660 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
661 mach_msg_header_t *hdr = temp_kmsg->ikm_header;
662 mach_port_delta_t delta;
663 ipc_port_t port;
664
665 /* toggle the kmsg importance bit as a barrier to parallel adjusts */
666 if (IIT_UPDATE_HOLD == type) {
667 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
668 continue;
669 }
670
671 /* mark the message as now carrying importance */
672 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
673 delta = 1;
674 } else {
675 if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
676 continue;
677 }
678
679 /* clear the message as now carrying importance */
680 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
681 delta = -1;
682 }
683
684 /* determine the task importance to adjust as result (if any) */
685 port = (ipc_port_t) hdr->msgh_remote_port;
686 assert(IP_VALID(port));
687 ip_lock(port);
688 temp_task_imp = IIT_NULL;
689 if (!ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &temp_task_imp)) {
690 ip_unlock(port);
691 }
692
693 /* no task importance to adjust associated with the port? */
694 if (IIT_NULL == temp_task_imp) {
695 continue;
696 }
697
698 /* hold a reference on temp_task_imp */
699
700 /* Adjust the task assertions and determine if an edge was crossed */
701 if (ipc_importance_task_check_transition(temp_task_imp, type, 1)) {
702 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
703 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
704 /* reference donated */
705 } else {
706 ipc_importance_task_release_internal(temp_task_imp);
707 }
708 }
709
710 /*
711 * iterate the downstream importance inherits
712 * and capture the next task importance to boost for each
713 */
714 ipc_importance_inherit_t temp_inherit;
715
716 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
717 uint32_t assertcnt = III_EXTERN(temp_inherit);
718
719 temp_task_imp = temp_inherit->iii_to_task;
720 assert(IIT_NULL != temp_task_imp);
721
722 if (IIT_UPDATE_HOLD == type) {
723 /* if no undropped externcnts in the inherit, nothing to do */
724 if (0 == assertcnt) {
725 assert(temp_inherit->iii_donating == FALSE);
726 continue;
727 }
728
729 /* nothing to do if the inherit is already donating (forced donation) */
730 if (temp_inherit->iii_donating) {
731 continue;
732 }
733
734 /* mark it donating and contribute to the task externcnts */
735 temp_inherit->iii_donating = TRUE;
736 temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt;
737 temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop;
738
739 } else {
740 /* if no contributing assertions, move on */
741 if (0 == assertcnt) {
742 assert(temp_inherit->iii_donating == FALSE);
743 continue;
744 }
745
746 /* nothing to do if the inherit is not donating */
747 if (!temp_inherit->iii_donating) {
748 continue;
749 }
750
751 /* mark it no longer donating */
752 temp_inherit->iii_donating = FALSE;
753
754 /* remove the contribution the inherit made to the to-task */
755 assert(IIT_EXTERN(temp_task_imp) >= III_EXTERN(temp_inherit));
756 assert(temp_task_imp->iit_externcnt >= temp_inherit->iii_externcnt);
757 assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop);
758 temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt;
759 temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop;
760
761 }
762
763 /* Adjust the task assertions and determine if an edge was crossed */
764 assert(ipc_importance_task_is_any_receiver_type(temp_task_imp));
765 if (ipc_importance_task_check_transition(temp_task_imp, type, assertcnt)) {
766 ipc_importance_task_reference(temp_task_imp);
767 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
768 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
769 }
770 }
771 }
772
773 /*
774 * Routine: ipc_importance_task_process_updates
775 * Purpose:
776 * Process the queue of task importances and apply the policy
777 * update called for. Only process tasks in the queue with an
778 * update timestamp less than the supplied max.
779 * Conditions:
780 * Called and returns with importance locked.
781 * May drop importance lock and block temporarily.
782 */
783 static void
784 ipc_importance_task_process_updates(
785 queue_t supplied_queue,
786 boolean_t boost,
787 uint64_t max_timestamp)
788 {
789 ipc_importance_task_t task_imp;
790 queue_head_t second_chance;
791 queue_t queue = supplied_queue;
792
793 /*
794 * This queue will hold the task's we couldn't trylock on first pass.
795 * By using a second (private) queue, we guarantee all tasks that get
796 * entered on this queue have a timestamp under the maximum.
797 */
798 queue_init(&second_chance);
799
800 /* process any resulting policy updates */
801 retry:
802 while(!queue_empty(queue)) {
803 task_t target_task;
804 struct task_pend_token pend_token = {};
805
806 task_imp = (ipc_importance_task_t)queue_first(queue);
807 assert(0 == task_imp->iit_updatepolicy);
808 assert(queue == task_imp->iit_updateq);
809
810 /* if timestamp is too big, we're done */
811 if (task_imp->iit_updatetime > max_timestamp) {
812 break;
813 }
814
815 /* we were given a reference on each task in the queue */
816
817 /* remove it from the supplied queue */
818 queue_remove(queue, task_imp, ipc_importance_task_t, iit_updates);
819 task_imp->iit_updateq = NULL;
820
821 target_task = task_imp->iit_task;
822
823 /* Is it well on the way to exiting? */
824 if (TASK_NULL == target_task) {
825 ipc_importance_task_release_locked(task_imp);
826 /* importance unlocked */
827 ipc_importance_lock();
828 continue;
829 }
830
831 /* Has the update been reversed on the hysteresis queue? */
832 if (0 < task_imp->iit_assertcnt &&
833 queue == &ipc_importance_delayed_drop_queue) {
834 ipc_importance_task_release_locked(task_imp);
835 /* importance unlocked */
836 ipc_importance_lock();
837 continue;
838 }
839
840 /*
841 * Can we get the task lock out-of-order?
842 * If not, stick this back on the second-chance queue.
843 */
844 if (!task_lock_try(target_task)) {
845 boolean_t should_wait_lock = (queue == &second_chance);
846 task_imp->iit_updateq = &second_chance;
847
848 /*
849 * If we're already processing second-chances on
850 * tasks, keep this task on the front of the queue.
851 * We will wait for the task lock before coming
852 * back and trying again, and we have a better
853 * chance of re-acquiring the lock if we come back
854 * to it right away.
855 */
856 if (should_wait_lock){
857 task_reference(target_task);
858 queue_enter_first(&second_chance, task_imp,
859 ipc_importance_task_t, iit_updates);
860 } else {
861 queue_enter(&second_chance, task_imp,
862 ipc_importance_task_t, iit_updates);
863 }
864 ipc_importance_unlock();
865
866 if (should_wait_lock) {
867 task_lock(target_task);
868 task_unlock(target_task);
869 task_deallocate(target_task);
870 }
871
872 ipc_importance_lock();
873 continue;
874 }
875
876 /* is it going away? */
877 if (!target_task->active) {
878 task_unlock(target_task);
879 ipc_importance_task_release_locked(task_imp);
880 /* importance unlocked */
881 ipc_importance_lock();
882 continue;
883 }
884
885 /* take a task reference for while we don't have the importance lock */
886 task_reference(target_task);
887
888 /* count the transition */
889 if (boost)
890 task_imp->iit_transitions++;
891
892 ipc_importance_unlock();
893
894 /* apply the policy adjust to the target task (while it is still locked) */
895 task_update_boost_locked(target_task, boost, &pend_token);
896
897 /* complete the policy update with the task unlocked */
898 ipc_importance_task_release(task_imp);
899 task_unlock(target_task);
900 task_policy_update_complete_unlocked(target_task, &pend_token);
901 task_deallocate(target_task);
902
903 ipc_importance_lock();
904 }
905
906 /* If there are tasks we couldn't update the first time, try again */
907 if (!queue_empty(&second_chance)) {
908 queue = &second_chance;
909 goto retry;
910 }
911 }
912
913
914 /*
915 * Routine: ipc_importance_task_delayed_drop_scan
916 * Purpose:
917 * The thread call routine to scan the delayed drop queue,
918 * requesting all updates with a deadline up to the last target
919 * for the thread-call (which is DENAP_DROP_SKEW beyond the first
920 * thread's optimum delay).
921 * update to drop its boost.
922 * Conditions:
923 * Nothing locked
924 */
925 static void
926 ipc_importance_task_delayed_drop_scan(
927 __unused void *arg1,
928 __unused void *arg2)
929 {
930 ipc_importance_lock();
931
932 /* process all queued task drops with timestamps up to TARGET(first)+SKEW */
933 ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue,
934 FALSE,
935 ipc_importance_delayed_drop_timestamp);
936
937 /* importance lock may have been temporarily dropped */
938
939 /* If there are any entries left in the queue, re-arm the call here */
940 if (!queue_empty(&ipc_importance_delayed_drop_queue)) {
941 ipc_importance_task_t task_imp;
942 uint64_t deadline;
943 uint64_t leeway;
944
945 task_imp = (ipc_importance_task_t)queue_first(&ipc_importance_delayed_drop_queue);
946
947 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
948 deadline += task_imp->iit_updatetime;
949 ipc_importance_delayed_drop_timestamp = deadline;
950
951 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
952
953 thread_call_enter_delayed_with_leeway(
954 ipc_importance_delayed_drop_call,
955 NULL,
956 deadline,
957 leeway,
958 DENAP_DROP_FLAGS);
959 } else {
960 ipc_importance_delayed_drop_call_requested = FALSE;
961 }
962 ipc_importance_unlock();
963 }
964
965 /*
966 * Routine: ipc_importance_task_delayed_drop
967 * Purpose:
968 * Queue the specified task importance for delayed policy
969 * update to drop its boost.
970 * Conditions:
971 * Called with the importance lock held.
972 */
973 static void
974 ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)
975 {
976 uint64_t timestamp = mach_absolute_time(); /* no mach_approximate_time() in kernel */
977
978 assert(ipc_importance_delayed_drop_call != NULL);
979
980 /*
981 * If still on an update queue from a previous change,
982 * remove it first (and use that reference). Otherwise, take
983 * a new reference for the delay drop update queue.
984 */
985 if (NULL != task_imp->iit_updateq) {
986 queue_remove(task_imp->iit_updateq, task_imp,
987 ipc_importance_task_t, iit_updates);
988 } else {
989 ipc_importance_task_reference_internal(task_imp);
990 }
991
992 task_imp->iit_updateq = &ipc_importance_delayed_drop_queue;
993 task_imp->iit_updatetime = timestamp;
994
995 queue_enter(&ipc_importance_delayed_drop_queue, task_imp,
996 ipc_importance_task_t, iit_updates);
997
998 /* request the delayed thread-call if not already requested */
999 if (!ipc_importance_delayed_drop_call_requested) {
1000 uint64_t deadline;
1001 uint64_t leeway;
1002
1003 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
1004 deadline += task_imp->iit_updatetime;
1005 ipc_importance_delayed_drop_timestamp = deadline;
1006
1007 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
1008
1009 ipc_importance_delayed_drop_call_requested = TRUE;
1010 thread_call_enter_delayed_with_leeway(
1011 ipc_importance_delayed_drop_call,
1012 NULL,
1013 deadline,
1014 leeway,
1015 DENAP_DROP_FLAGS);
1016 }
1017 }
1018
1019
1020 /*
1021 * Routine: ipc_importance_task_propagate_assertion_locked
1022 * Purpose:
1023 * Propagate the importance transition type to every item
1024 * If this causes a boost to be applied, determine if that
1025 * boost should propagate downstream.
1026 * Conditions:
1027 * Called with the importance lock held.
1028 */
1029 static void
1030 ipc_importance_task_propagate_assertion_locked(
1031 ipc_importance_task_t task_imp,
1032 iit_update_type_t type,
1033 boolean_t update_task_imp)
1034 {
1035 boolean_t boost = (IIT_UPDATE_HOLD == type);
1036 ipc_importance_task_t temp_task_imp;
1037 queue_head_t propagate;
1038 queue_head_t updates;
1039
1040 queue_init(&updates);
1041 queue_init(&propagate);
1042
1043 /*
1044 * If we're going to update the policy for the provided task,
1045 * enqueue it on the propagate queue itself. Otherwise, only
1046 * enqueue downstream things.
1047 */
1048 if (update_task_imp) {
1049 queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props);
1050 } else {
1051 ipc_importance_task_propagate_helper(task_imp, type, &propagate);
1052 }
1053
1054 /*
1055 * for each item on the propagation list, propagate any change downstream,
1056 * adding new tasks to propagate further if they transistioned as well.
1057 */
1058 while (!queue_empty(&propagate)) {
1059 boolean_t need_update;
1060
1061 queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props);
1062 assert(IIT_NULL != temp_task_imp);
1063
1064 /* only propagate for receivers not already marked as a donor */
1065 if (!ipc_importance_task_is_marked_donor(temp_task_imp) &&
1066 ipc_importance_task_is_marked_receiver(temp_task_imp)) {
1067 ipc_importance_task_propagate_helper(temp_task_imp, type, &propagate);
1068 }
1069
1070 /* if we have a policy update to apply, enqueue a reference for later processing */
1071 need_update = (0 != temp_task_imp->iit_updatepolicy);
1072 temp_task_imp->iit_updatepolicy = 0;
1073 if (need_update && TASK_NULL != temp_task_imp->iit_task) {
1074 if (NULL == temp_task_imp->iit_updateq) {
1075
1076 /*
1077 * If a downstream task that needs an update is subjects to AppNap,
1078 * drop boosts according to the delay hysteresis. Otherwise,
1079 * immediate update it.
1080 */
1081 if (!boost && temp_task_imp != task_imp &&
1082 ipc_importance_delayed_drop_call != NULL &&
1083 ipc_importance_task_is_marked_denap_receiver(temp_task_imp)) {
1084 ipc_importance_task_delayed_drop(temp_task_imp);
1085 } else {
1086 temp_task_imp->iit_updatetime = 0;
1087 temp_task_imp->iit_updateq = &updates;
1088 ipc_importance_task_reference_internal(temp_task_imp);
1089 if (boost) {
1090 queue_enter(&updates, temp_task_imp,
1091 ipc_importance_task_t, iit_updates);
1092 } else {
1093 queue_enter_first(&updates, temp_task_imp,
1094 ipc_importance_task_t, iit_updates);
1095 }
1096 }
1097 } else {
1098 /* Must already be on the AppNap hysteresis queue */
1099 assert(ipc_importance_delayed_drop_call != NULL);
1100 assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp));
1101 }
1102 }
1103 }
1104
1105 /* apply updates to task (may drop importance lock) */
1106 if (!queue_empty(&updates)) {
1107 ipc_importance_task_process_updates(&updates, boost, 0);
1108 }
1109 }
1110
1111 /*
1112 * Routine: ipc_importance_task_hold_internal_assertion_locked
1113 * Purpose:
1114 * Increment the assertion count on the task importance.
1115 * If this results in a boost state change in that task,
1116 * prepare to update task policy for this task AND, if
1117 * if not just waking out of App Nap, all down-stream
1118 * tasks that have a similar transition through inheriting
1119 * this update.
1120 * Conditions:
1121 * importance locked on entry and exit.
1122 * May temporarily drop importance lock and block.
1123 */
1124 static kern_return_t
1125 ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1126 {
1127 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, count)) {
1128 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
1129 }
1130 return KERN_SUCCESS;
1131 }
1132
1133 /*
1134 * Routine: ipc_importance_task_drop_internal_assertion_locked
1135 * Purpose:
1136 * Decrement the assertion count on the task importance.
1137 * If this results in a boost state change in that task,
1138 * prepare to update task policy for this task AND, if
1139 * if not just waking out of App Nap, all down-stream
1140 * tasks that have a similar transition through inheriting
1141 * this update.
1142 * Conditions:
1143 * importance locked on entry and exit.
1144 * May temporarily drop importance lock and block.
1145 */
1146 static kern_return_t
1147 ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1148 {
1149 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1150 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1151 }
1152 return KERN_SUCCESS;
1153 }
1154
1155 /*
1156 * Routine: ipc_importance_task_hold_internal_assertion
1157 * Purpose:
1158 * Increment the assertion count on the task importance.
1159 * If this results in a 0->1 change in that count,
1160 * prepare to update task policy for this task AND
1161 * (potentially) all down-stream tasks that have a
1162 * similar transition through inheriting this update.
1163 * Conditions:
1164 * Nothing locked
1165 * May block after dropping importance lock.
1166 */
1167 int
1168 ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1169 {
1170 int ret = KERN_SUCCESS;
1171
1172 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1173 ipc_importance_lock();
1174 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1175 ipc_importance_unlock();
1176 }
1177 return ret;
1178 }
1179
1180 /*
1181 * Routine: ipc_importance_task_drop_internal_assertion
1182 * Purpose:
1183 * Decrement the assertion count on the task importance.
1184 * If this results in a X->0 change in that count,
1185 * prepare to update task policy for this task AND
1186 * all down-stream tasks that have a similar transition
1187 * through inheriting this drop update.
1188 * Conditions:
1189 * Nothing locked on entry.
1190 * May block after dropping importance lock.
1191 */
1192 kern_return_t
1193 ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1194 {
1195 kern_return_t ret = KERN_SUCCESS;
1196
1197 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1198 ipc_importance_lock();
1199 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1200 ipc_importance_unlock();
1201 }
1202 return ret;
1203 }
1204
1205 /*
1206 * Routine: ipc_importance_task_hold_file_lock_assertion
1207 * Purpose:
1208 * Increment the file lock assertion count on the task importance.
1209 * If this results in a 0->1 change in that count,
1210 * prepare to update task policy for this task AND
1211 * (potentially) all down-stream tasks that have a
1212 * similar transition through inheriting this update.
1213 * Conditions:
1214 * Nothing locked
1215 * May block after dropping importance lock.
1216 */
1217 kern_return_t
1218 ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1219 {
1220 kern_return_t ret = KERN_SUCCESS;
1221
1222 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1223 ipc_importance_lock();
1224 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1225 if (KERN_SUCCESS == ret) {
1226 task_imp->iit_filelocks += count;
1227 }
1228 ipc_importance_unlock();
1229 }
1230 return ret;
1231 }
1232
1233 /*
1234 * Routine: ipc_importance_task_drop_file_lock_assertion
1235 * Purpose:
1236 * Decrement the assertion count on the task importance.
1237 * If this results in a X->0 change in that count,
1238 * prepare to update task policy for this task AND
1239 * all down-stream tasks that have a similar transition
1240 * through inheriting this drop update.
1241 * Conditions:
1242 * Nothing locked on entry.
1243 * May block after dropping importance lock.
1244 */
1245 kern_return_t
1246 ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1247 {
1248 kern_return_t ret = KERN_SUCCESS;
1249
1250 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1251 ipc_importance_lock();
1252 if (count <= task_imp->iit_filelocks) {
1253 task_imp->iit_filelocks -= count;
1254 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1255 } else {
1256 ret = KERN_INVALID_ARGUMENT;
1257 }
1258 ipc_importance_unlock();
1259 }
1260 return ret;
1261 }
1262
1263 /*
1264 * Routine: ipc_importance_task_hold_legacy_external_assertion
1265 * Purpose:
1266 * Increment the external assertion count on the task importance.
1267 * This cannot result in an 0->1 transition, as the caller must
1268 * already hold an external boost.
1269 * Conditions:
1270 * Nothing locked on entry.
1271 * May block after dropping importance lock.
1272 * A queue of task importance structures is returned
1273 * by ipc_importance_task_hold_assertion_locked(). Each
1274 * needs to be updated (outside the importance lock hold).
1275 */
1276 kern_return_t
1277 ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1278 {
1279 task_t target_task;
1280 uint32_t target_assertcnt;
1281 uint32_t target_externcnt;
1282 uint32_t target_legacycnt;
1283
1284 kern_return_t ret;
1285
1286 ipc_importance_lock();
1287 target_task = task_imp->iit_task;
1288
1289 #if IMPORTANCE_DEBUG
1290 int target_pid = task_pid(target_task);
1291
1292 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1293 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1294 #endif
1295
1296 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1297 /* Only allowed to take a new boost assertion when holding an external boost */
1298 /* save data for diagnostic printf below */
1299 target_assertcnt = task_imp->iit_assertcnt;
1300 target_externcnt = IIT_EXTERN(task_imp);
1301 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1302 ret = KERN_FAILURE;
1303 count = 0;
1304 } else {
1305 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1306 assert(0 < task_imp->iit_assertcnt);
1307 assert(0 < IIT_EXTERN(task_imp));
1308 task_imp->iit_assertcnt += count;
1309 task_imp->iit_externcnt += count;
1310 task_imp->iit_legacy_externcnt += count;
1311 ret = KERN_SUCCESS;
1312 }
1313 ipc_importance_unlock();
1314
1315 #if IMPORTANCE_DEBUG
1316 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1317 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1318 // This covers the legacy case where a task takes an extra boost.
1319 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt);
1320 #endif
1321
1322 if (KERN_FAILURE == ret && target_task != TASK_NULL) {
1323 printf("BUG in process %s[%d]: "
1324 "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. "
1325 "(%d total, %d external, %d legacy-external)\n",
1326 proc_name_address(target_task->bsd_info), task_pid(target_task),
1327 target_assertcnt, target_externcnt, target_legacycnt);
1328 }
1329
1330 return(ret);
1331 }
1332
1333 /*
1334 * Routine: ipc_importance_task_drop_legacy_external_assertion
1335 * Purpose:
1336 * Drop the legacy external assertion count on the task and
1337 * reflect that change to total external assertion count and
1338 * then onto the internal importance count.
1339 *
1340 * If this results in a X->0 change in the internal,
1341 * count, prepare to update task policy for this task AND
1342 * all down-stream tasks that have a similar transition
1343 * through inheriting this update.
1344 * Conditions:
1345 * Nothing locked on entry.
1346 */
1347 kern_return_t
1348 ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1349 {
1350 int ret = KERN_SUCCESS;
1351 task_t target_task;
1352 uint32_t target_assertcnt;
1353 uint32_t target_externcnt;
1354 uint32_t target_legacycnt;
1355
1356 if (count > 1) {
1357 return KERN_INVALID_ARGUMENT;
1358 }
1359
1360 ipc_importance_lock();
1361 target_task = task_imp->iit_task;
1362
1363 #if IMPORTANCE_DEBUG
1364 int target_pid = task_pid(target_task);
1365
1366 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1367 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1368 #endif
1369
1370 if (count > IIT_LEGACY_EXTERN(task_imp)) {
1371 /* Process over-released its boost count - save data for diagnostic printf */
1372 /* TODO: If count > 1, we should clear out as many external assertions as there are left. */
1373 target_assertcnt = task_imp->iit_assertcnt;
1374 target_externcnt = IIT_EXTERN(task_imp);
1375 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1376 ret = KERN_FAILURE;
1377 } else {
1378 /*
1379 * decrement legacy external count from the top level and reflect
1380 * into internal for this and all subsequent updates.
1381 */
1382 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1383 assert(IIT_EXTERN(task_imp) >= count);
1384
1385 task_imp->iit_legacy_externdrop += count;
1386 task_imp->iit_externdrop += count;
1387
1388 /* reset extern counters (if appropriate) */
1389 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1390 if (IIT_EXTERN(task_imp) != 0) {
1391 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
1392 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
1393 } else {
1394 task_imp->iit_externcnt = 0;
1395 task_imp->iit_externdrop = 0;
1396 }
1397 task_imp->iit_legacy_externcnt = 0;
1398 task_imp->iit_legacy_externdrop = 0;
1399 }
1400
1401 /* reflect the drop to the internal assertion count (and effect any importance change) */
1402 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1403 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1404 }
1405 ret = KERN_SUCCESS;
1406 }
1407
1408 #if IMPORTANCE_DEBUG
1409 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1410 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1411 #endif
1412
1413 ipc_importance_unlock();
1414
1415 /* delayed printf for user-supplied data failures */
1416 if (KERN_FAILURE == ret && TASK_NULL != target_task) {
1417 printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n",
1418 proc_name_address(target_task->bsd_info), task_pid(target_task),
1419 target_assertcnt, target_externcnt, target_legacycnt);
1420 }
1421
1422 return(ret);
1423 }
1424
1425
1426
1427 /* Transfer an assertion to legacy userspace responsibility */
1428 static kern_return_t
1429 ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, uint32_t count, __unused int sender_pid)
1430 {
1431 task_t target_task;
1432
1433 assert(IIT_NULL != task_imp);
1434 target_task = task_imp->iit_task;
1435
1436 if (TASK_NULL == target_task ||
1437 !ipc_importance_task_is_any_receiver_type(task_imp)) {
1438 return KERN_FAILURE;
1439 }
1440
1441 #if IMPORTANCE_DEBUG
1442 int target_pid = task_pid(target_task);
1443
1444 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
1445 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
1446 #endif
1447
1448 ipc_importance_lock();
1449 /* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */
1450 assert(IIT_EXTERN(task_imp) >= IIT_LEGACY_EXTERN(task_imp));
1451 task_imp->iit_legacy_externcnt += count;
1452 task_imp->iit_externcnt += count;
1453 ipc_importance_unlock();
1454
1455 #if IMPORTANCE_DEBUG
1456 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
1457 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1458 // This is the legacy boosting path
1459 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp));
1460 #endif /* IMPORTANCE_DEBUG */
1461
1462 return(KERN_SUCCESS);
1463 }
1464
1465 /*
1466 * Routine: ipc_importance_task_update_live_donor
1467 * Purpose:
1468 * Read the live donor status and update the live_donor bit/propagate the change in importance.
1469 * Conditions:
1470 * Nothing locked on entrance, nothing locked on exit.
1471 *
1472 * TODO: Need tracepoints around this function...
1473 */
1474 void
1475 ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)
1476 {
1477 uint32_t task_live_donor;
1478 boolean_t before_donor;
1479 boolean_t after_donor;
1480 task_t target_task;
1481
1482 assert(task_imp != NULL);
1483
1484 /*
1485 * Nothing to do if the task is not marked as expecting
1486 * live donor updates.
1487 */
1488 if (!ipc_importance_task_is_marked_live_donor(task_imp)) {
1489 return;
1490 }
1491
1492 ipc_importance_lock();
1493
1494 /* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */
1495 target_task = task_imp->iit_task;
1496 if (TASK_NULL == target_task) {
1497 ipc_importance_unlock();
1498 return;
1499 }
1500 before_donor = ipc_importance_task_is_marked_donor(task_imp);
1501
1502 /* snapshot task live donor status - may change, but another call will accompany the change */
1503 task_live_donor = target_task->effective_policy.tep_live_donor;
1504
1505 #if IMPORTANCE_DEBUG
1506 int target_pid = task_pid(target_task);
1507
1508 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1509 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START,
1510 target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0);
1511 #endif
1512
1513 /* update the task importance live donor status based on the task's value */
1514 task_imp->iit_donor = task_live_donor;
1515
1516 after_donor = ipc_importance_task_is_marked_donor(task_imp);
1517
1518 /* Has the effectiveness of being a donor changed as a result of this update? */
1519 if (before_donor != after_donor) {
1520 iit_update_type_t type;
1521
1522 /* propagate assertions without updating the current task policy (already handled) */
1523 if (0 == before_donor) {
1524 task_imp->iit_transitions++;
1525 type = IIT_UPDATE_HOLD;
1526 } else {
1527 type = IIT_UPDATE_DROP;
1528 }
1529 ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE);
1530 }
1531
1532 #if IMPORTANCE_DEBUG
1533 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1534 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END,
1535 target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0);
1536 #endif
1537
1538 ipc_importance_unlock();
1539 }
1540
1541
1542 /*
1543 * Routine: ipc_importance_task_mark_donor
1544 * Purpose:
1545 * Set the task importance donor flag.
1546 * Conditions:
1547 * Nothing locked on entrance, nothing locked on exit.
1548 *
1549 * This is only called while the task is being constructed,
1550 * so no need to update task policy or propagate downstream.
1551 */
1552 void
1553 ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donating)
1554 {
1555 assert(task_imp != NULL);
1556
1557 ipc_importance_lock();
1558
1559 int old_donor = task_imp->iit_donor;
1560
1561 task_imp->iit_donor = (donating ? 1 : 0);
1562
1563 if (task_imp->iit_donor > 0 && old_donor == 0)
1564 task_imp->iit_transitions++;
1565
1566 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1567 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE,
1568 task_pid(task_imp->iit_task), donating,
1569 old_donor, task_imp->iit_donor, 0);
1570
1571 ipc_importance_unlock();
1572 }
1573
1574 /*
1575 * Routine: ipc_importance_task_marked_donor
1576 * Purpose:
1577 * Query the donor flag for the given task importance.
1578 * Conditions:
1579 * May be called without taking the importance lock.
1580 * In that case, donor status can change so you must
1581 * check only once for each donation event.
1582 */
1583 boolean_t
1584 ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)
1585 {
1586 if (IIT_NULL == task_imp) {
1587 return FALSE;
1588 }
1589 return (0 != task_imp->iit_donor);
1590 }
1591
1592 /*
1593 * Routine: ipc_importance_task_mark_live_donor
1594 * Purpose:
1595 * Indicate that the task is eligible for live donor updates.
1596 * Conditions:
1597 * Nothing locked on entrance, nothing locked on exit.
1598 *
1599 * This is only called while the task is being constructed.
1600 */
1601 void
1602 ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp, boolean_t live_donating)
1603 {
1604 assert(task_imp != NULL);
1605
1606 ipc_importance_lock();
1607 task_imp->iit_live_donor = (live_donating ? 1 : 0);
1608 ipc_importance_unlock();
1609 }
1610
1611 /*
1612 * Routine: ipc_importance_task_marked_live_donor
1613 * Purpose:
1614 * Query the live donor and donor flags for the given task importance.
1615 * Conditions:
1616 * May be called without taking the importance lock.
1617 * In that case, donor status can change so you must
1618 * check only once for each donation event.
1619 */
1620 boolean_t
1621 ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)
1622 {
1623 if (IIT_NULL == task_imp) {
1624 return FALSE;
1625 }
1626 return (0 != task_imp->iit_live_donor);
1627 }
1628
1629 /*
1630 * Routine: ipc_importance_task_is_donor
1631 * Purpose:
1632 * Query the full donor status for the given task importance.
1633 * Conditions:
1634 * May be called without taking the importance lock.
1635 * In that case, donor status can change so you must
1636 * check only once for each donation event.
1637 */
1638 boolean_t
1639 ipc_importance_task_is_donor(ipc_importance_task_t task_imp)
1640 {
1641 if (IIT_NULL == task_imp) {
1642 return FALSE;
1643 }
1644 return (ipc_importance_task_is_marked_donor(task_imp) ||
1645 (ipc_importance_task_is_marked_receiver(task_imp) &&
1646 task_imp->iit_assertcnt > 0));
1647 }
1648
1649 /*
1650 * Routine: ipc_importance_task_is_never_donor
1651 * Purpose:
1652 * Query if a given task can ever donate importance.
1653 * Conditions:
1654 * May be called without taking the importance lock.
1655 * Condition is permanent for a give task.
1656 */
1657 boolean_t
1658 ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)
1659 {
1660 if (IIT_NULL == task_imp) {
1661 return FALSE;
1662 }
1663 return (!ipc_importance_task_is_marked_donor(task_imp) &&
1664 !ipc_importance_task_is_marked_live_donor(task_imp) &&
1665 !ipc_importance_task_is_marked_receiver(task_imp));
1666 }
1667
1668 /*
1669 * Routine: ipc_importance_task_mark_receiver
1670 * Purpose:
1671 * Update the task importance receiver flag.
1672 * Conditions:
1673 * Nothing locked on entrance, nothing locked on exit.
1674 * This can only be invoked before the task is discoverable,
1675 * so no worries about atomicity(?)
1676 */
1677 void
1678 ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving)
1679 {
1680 assert(task_imp != NULL);
1681
1682 ipc_importance_lock();
1683 if (receiving) {
1684 assert(task_imp->iit_assertcnt == 0);
1685 assert(task_imp->iit_externcnt == 0);
1686 assert(task_imp->iit_externdrop == 0);
1687 assert(task_imp->iit_denap == 0);
1688 task_imp->iit_receiver = 1; /* task can receive importance boost */
1689 } else if (task_imp->iit_receiver) {
1690 assert(task_imp->iit_denap == 0);
1691 if (task_imp->iit_assertcnt != 0 || IIT_EXTERN(task_imp) != 0) {
1692 panic("disabling imp_receiver on task with pending importance boosts!");
1693 }
1694 task_imp->iit_receiver = 0;
1695 }
1696 ipc_importance_unlock();
1697 }
1698
1699
1700 /*
1701 * Routine: ipc_importance_task_marked_receiver
1702 * Purpose:
1703 * Query the receiver flag for the given task importance.
1704 * Conditions:
1705 * May be called without taking the importance lock as
1706 * the importance flag can never change after task init.
1707 */
1708 boolean_t
1709 ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)
1710 {
1711 return (IIT_NULL != task_imp && 0 != task_imp->iit_receiver);
1712 }
1713
1714
1715 /*
1716 * Routine: ipc_importance_task_mark_denap_receiver
1717 * Purpose:
1718 * Update the task importance de-nap receiver flag.
1719 * Conditions:
1720 * Nothing locked on entrance, nothing locked on exit.
1721 * This can only be invoked before the task is discoverable,
1722 * so no worries about atomicity(?)
1723 */
1724 void
1725 ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap)
1726 {
1727 assert(task_imp != NULL);
1728
1729 ipc_importance_lock();
1730 if (denap) {
1731 assert(task_imp->iit_assertcnt == 0);
1732 assert(task_imp->iit_externcnt == 0);
1733 assert(task_imp->iit_receiver == 0);
1734 task_imp->iit_denap = 1; /* task can receive de-nap boost */
1735 } else if (task_imp->iit_denap) {
1736 assert(task_imp->iit_receiver == 0);
1737 if (0 < task_imp->iit_assertcnt || 0 < IIT_EXTERN(task_imp)) {
1738 panic("disabling de-nap on task with pending de-nap boosts!");
1739 }
1740 task_imp->iit_denap = 0;
1741 }
1742 ipc_importance_unlock();
1743 }
1744
1745
1746 /*
1747 * Routine: ipc_importance_task_marked_denap_receiver
1748 * Purpose:
1749 * Query the de-nap receiver flag for the given task importance.
1750 * Conditions:
1751 * May be called without taking the importance lock as
1752 * the de-nap flag can never change after task init.
1753 */
1754 boolean_t
1755 ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)
1756 {
1757 return (IIT_NULL != task_imp && 0 != task_imp->iit_denap);
1758 }
1759
1760 /*
1761 * Routine: ipc_importance_task_is_denap_receiver
1762 * Purpose:
1763 * Query the full de-nap receiver status for the given task importance.
1764 * For now, that is simply whether the receiver flag is set.
1765 * Conditions:
1766 * May be called without taking the importance lock as
1767 * the de-nap receiver flag can never change after task init.
1768 */
1769 boolean_t
1770 ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)
1771 {
1772 return (ipc_importance_task_is_marked_denap_receiver(task_imp));
1773 }
1774
1775 /*
1776 * Routine: ipc_importance_task_is_any_receiver_type
1777 * Purpose:
1778 * Query if the task is marked to receive boosts - either
1779 * importance or denap.
1780 * Conditions:
1781 * May be called without taking the importance lock as both
1782 * the importance and de-nap receiver flags can never change
1783 * after task init.
1784 */
1785 boolean_t
1786 ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)
1787 {
1788 return (ipc_importance_task_is_marked_receiver(task_imp) ||
1789 ipc_importance_task_is_marked_denap_receiver(task_imp));
1790 }
1791
1792 #if 0 /* currently unused */
1793
1794 /*
1795 * Routine: ipc_importance_inherit_reference
1796 * Purpose:
1797 * Add a reference to the inherit importance element.
1798 * Conditions:
1799 * Caller most hold a reference on the inherit element.
1800 */
1801 static inline void
1802 ipc_importance_inherit_reference(ipc_importance_inherit_t inherit)
1803 {
1804 ipc_importance_reference(&inherit->iii_elem);
1805 }
1806 #endif /* currently unused */
1807
1808 /*
1809 * Routine: ipc_importance_inherit_release_locked
1810 * Purpose:
1811 * Release a reference on an inherit importance attribute value,
1812 * unlinking and deallocating the attribute if the last reference.
1813 * Conditions:
1814 * Entered with importance lock held, leaves with it unlocked.
1815 */
1816 static inline void
1817 ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)
1818 {
1819 ipc_importance_release_locked(&inherit->iii_elem);
1820 }
1821
1822 #if 0 /* currently unused */
1823 /*
1824 * Routine: ipc_importance_inherit_release
1825 * Purpose:
1826 * Release a reference on an inherit importance attribute value,
1827 * unlinking and deallocating the attribute if the last reference.
1828 * Conditions:
1829 * nothing locked on entrance, nothing locked on exit.
1830 * May block.
1831 */
1832 void
1833 ipc_importance_inherit_release(ipc_importance_inherit_t inherit)
1834 {
1835 if (III_NULL != inherit)
1836 ipc_importance_release(&inherit->iii_elem);
1837 }
1838 #endif /* 0 currently unused */
1839
1840 /*
1841 * Routine: ipc_importance_for_task
1842 * Purpose:
1843 * Create a reference for the specified task's base importance
1844 * element. If the base importance element doesn't exist, make it and
1845 * bind it to the active task. If the task is inactive, there isn't
1846 * any need to return a new reference.
1847 * Conditions:
1848 * If made is true, a "made" reference is returned (for donating to
1849 * the voucher system). Otherwise an internal reference is returned.
1850 *
1851 * Nothing locked on entry. May block.
1852 */
1853 ipc_importance_task_t
1854 ipc_importance_for_task(task_t task, boolean_t made)
1855 {
1856 ipc_importance_task_t task_elem;
1857 boolean_t first_pass = TRUE;
1858
1859 assert(TASK_NULL != task);
1860
1861 retry:
1862 /* No use returning anything for inactive task */
1863 if (!task->active)
1864 return IIT_NULL;
1865
1866 ipc_importance_lock();
1867 task_elem = task->task_imp_base;
1868 if (IIT_NULL != task_elem) {
1869 /* Add a made reference (borrowing active task ref to do it) */
1870 if (made) {
1871 if (0 == task_elem->iit_made++) {
1872 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1873 ipc_importance_task_reference_internal(task_elem);
1874 }
1875 } else {
1876 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1877 ipc_importance_task_reference_internal(task_elem);
1878 }
1879 ipc_importance_unlock();
1880 return task_elem;
1881 }
1882 ipc_importance_unlock();
1883
1884 if (!first_pass)
1885 return IIT_NULL;
1886 first_pass = FALSE;
1887
1888 /* Need to make one - may race with others (be prepared to drop) */
1889 task_elem = (ipc_importance_task_t)zalloc(ipc_importance_task_zone);
1890 if (IIT_NULL == task_elem)
1891 goto retry;
1892
1893 task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */
1894 task_elem->iit_made = (made) ? 1 : 0;
1895 task_elem->iit_task = task; /* take actual ref when we're sure */
1896 task_elem->iit_updateq = NULL;
1897 task_elem->iit_receiver = 0;
1898 task_elem->iit_denap = 0;
1899 task_elem->iit_donor = 0;
1900 task_elem->iit_live_donor = 0;
1901 task_elem->iit_updatepolicy = 0;
1902 task_elem->iit_reserved = 0;
1903 task_elem->iit_filelocks = 0;
1904 task_elem->iit_updatetime = 0;
1905 task_elem->iit_transitions = 0;
1906 task_elem->iit_assertcnt = 0;
1907 task_elem->iit_externcnt = 0;
1908 task_elem->iit_externdrop = 0;
1909 task_elem->iit_legacy_externcnt = 0;
1910 task_elem->iit_legacy_externdrop = 0;
1911 #if IIE_REF_DEBUG
1912 ipc_importance_counter_init(&task_elem->iit_elem);
1913 #endif
1914 queue_init(&task_elem->iit_kmsgs);
1915 queue_init(&task_elem->iit_inherits);
1916
1917 ipc_importance_lock();
1918 if (!task->active) {
1919 ipc_importance_unlock();
1920 zfree(ipc_importance_task_zone, task_elem);
1921 return IIT_NULL;
1922 }
1923
1924 /* did we lose the race? */
1925 if (IIT_NULL != task->task_imp_base) {
1926 ipc_importance_unlock();
1927 zfree(ipc_importance_task_zone, task_elem);
1928 goto retry;
1929 }
1930
1931 /* we won the race */
1932 task->task_imp_base = task_elem;
1933 task_reference(task);
1934 #if DEVELOPMENT || DEBUG
1935 queue_enter(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
1936 task_importance_update_owner_info(task);
1937 #endif
1938 ipc_importance_unlock();
1939
1940 return task_elem;
1941 }
1942
1943 #if DEVELOPMENT || DEBUG
1944 void task_importance_update_owner_info(task_t task) {
1945
1946 if (task != TASK_NULL && task->task_imp_base != IIT_NULL) {
1947 ipc_importance_task_t task_elem = task->task_imp_base;
1948
1949 task_elem->iit_bsd_pid = task_pid(task);
1950 if (task->bsd_info) {
1951 strncpy(&task_elem->iit_procname[0], proc_name_address(task->bsd_info), 16);
1952 task_elem->iit_procname[16] = '\0';
1953 } else {
1954 strncpy(&task_elem->iit_procname[0], "unknown", 16);
1955 }
1956 }
1957 }
1958 #endif
1959
1960 /*
1961 * Routine: ipc_importance_reset_locked
1962 * Purpose:
1963 * Reset a task's IPC importance (the task is going away or exec'ing)
1964 *
1965 * Remove the donor bit and legacy externalized assertions from the
1966 * current task importance and see if that wipes out downstream donations.
1967 * Conditions:
1968 * importance lock held.
1969 */
1970
1971 static void
1972 ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor)
1973 {
1974 boolean_t before_donor, after_donor;
1975
1976 /* remove the donor bit, live-donor bit and externalized boosts */
1977 before_donor = ipc_importance_task_is_donor(task_imp);
1978 if (donor) {
1979 task_imp->iit_donor = 0;
1980 }
1981 assert(IIT_LEGACY_EXTERN(task_imp) <= IIT_EXTERN(task_imp));
1982 assert(task_imp->iit_legacy_externcnt <= task_imp->iit_externcnt);
1983 assert(task_imp->iit_legacy_externdrop <= task_imp->iit_externdrop);
1984 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
1985 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
1986
1987 /* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */
1988 if (IIT_EXTERN(task_imp) < task_imp->iit_assertcnt) {
1989 task_imp->iit_assertcnt -= IIT_LEGACY_EXTERN(task_imp);
1990 } else {
1991 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
1992 }
1993 task_imp->iit_legacy_externcnt = 0;
1994 task_imp->iit_legacy_externdrop = 0;
1995 after_donor = ipc_importance_task_is_donor(task_imp);
1996
1997 #if DEVELOPMENT || DEBUG
1998 if (task_imp->iit_assertcnt > 0 && task_imp->iit_live_donor) {
1999 printf("Live donor task %s[%d] still has %d importance assertions after reset\n",
2000 task_imp->iit_procname, task_imp->iit_bsd_pid, task_imp->iit_assertcnt);
2001 }
2002 #endif
2003
2004 /* propagate a downstream drop if there was a change in donor status */
2005 if (after_donor != before_donor) {
2006 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, FALSE);
2007 }
2008 }
2009
2010 /*
2011 * Routine: ipc_importance_reset
2012 * Purpose:
2013 * Reset a task's IPC importance
2014 *
2015 * The task is being reset, although staying around. Arrange to have the
2016 * external state of the task reset from the importance.
2017 * Conditions:
2018 * importance lock not held.
2019 */
2020
2021 void
2022 ipc_importance_reset(ipc_importance_task_t task_imp, boolean_t donor)
2023 {
2024 if (IIT_NULL == task_imp) {
2025 return;
2026 }
2027 ipc_importance_lock();
2028 ipc_importance_reset_locked(task_imp, donor);
2029 ipc_importance_unlock();
2030 }
2031
2032 /*
2033 * Routine: ipc_importance_disconnect_task
2034 * Purpose:
2035 * Disconnect a task from its importance.
2036 *
2037 * Clear the task pointer from the importance and drop the
2038 * reference the task held on the importance object. Before
2039 * doing that, reset the effects the current task holds on
2040 * the importance and see if that wipes out downstream donations.
2041 *
2042 * We allow the upstream boosts to continue to affect downstream
2043 * even though the local task is being effectively pulled from
2044 * the chain.
2045 * Conditions:
2046 * Nothing locked.
2047 */
2048 void
2049 ipc_importance_disconnect_task(task_t task)
2050 {
2051 ipc_importance_task_t task_imp;
2052
2053 task_lock(task);
2054 ipc_importance_lock();
2055 task_imp = task->task_imp_base;
2056
2057 /* did somebody beat us to it? */
2058 if (IIT_NULL == task_imp) {
2059 ipc_importance_unlock();
2060 task_unlock(task);
2061 return;
2062 }
2063
2064 /* disconnect the task from this importance */
2065 assert(task_imp->iit_task == task);
2066 task_imp->iit_task = TASK_NULL;
2067 task->task_imp_base = IIT_NULL;
2068 task_unlock(task);
2069
2070 /* reset the effects the current task hold on the importance */
2071 ipc_importance_reset_locked(task_imp, TRUE);
2072
2073 ipc_importance_task_release_locked(task_imp);
2074 /* importance unlocked */
2075
2076 /* deallocate the task now that the importance is unlocked */
2077 task_deallocate(task);
2078 }
2079
2080 /*
2081 * Routine: ipc_importance_check_circularity
2082 * Purpose:
2083 * Check if queueing "port" in a message for "dest"
2084 * would create a circular group of ports and messages.
2085 *
2086 * If no circularity (FALSE returned), then "port"
2087 * is changed from "in limbo" to "in transit".
2088 *
2089 * That is, we want to set port->ip_destination == dest,
2090 * but guaranteeing that this doesn't create a circle
2091 * port->ip_destination->ip_destination->... == port
2092 *
2093 * Additionally, if port was successfully changed to "in transit",
2094 * propagate boost assertions from the "in limbo" port to all
2095 * the ports in the chain, and, if the destination task accepts
2096 * boosts, to the destination task.
2097 *
2098 * Conditions:
2099 * No ports locked. References held for "port" and "dest".
2100 */
2101
2102 boolean_t
2103 ipc_importance_check_circularity(
2104 ipc_port_t port,
2105 ipc_port_t dest)
2106 {
2107 ipc_importance_task_t imp_task = IIT_NULL;
2108 ipc_importance_task_t release_imp_task = IIT_NULL;
2109 boolean_t imp_lock_held = FALSE;
2110 int assertcnt = 0;
2111 ipc_port_t base;
2112
2113 assert(port != IP_NULL);
2114 assert(dest != IP_NULL);
2115
2116 if (port == dest)
2117 return TRUE;
2118 base = dest;
2119
2120 /* port is in limbo, so donation status is safe to latch */
2121 if (port->ip_impdonation != 0) {
2122 imp_lock_held = TRUE;
2123 ipc_importance_lock();
2124 }
2125
2126 /*
2127 * First try a quick check that can run in parallel.
2128 * No circularity if dest is not in transit.
2129 */
2130 ip_lock(port);
2131
2132 /*
2133 * Even if port is just carrying assertions for others,
2134 * we need the importance lock.
2135 */
2136 if (port->ip_impcount > 0 && !imp_lock_held) {
2137 if (!ipc_importance_lock_try()) {
2138 ip_unlock(port);
2139 ipc_importance_lock();
2140 ip_lock(port);
2141 }
2142 imp_lock_held = TRUE;
2143 }
2144
2145 if (ip_lock_try(dest)) {
2146 if (!ip_active(dest) ||
2147 (dest->ip_receiver_name != MACH_PORT_NULL) ||
2148 (dest->ip_destination == IP_NULL))
2149 goto not_circular;
2150
2151 /* dest is in transit; further checking necessary */
2152
2153 ip_unlock(dest);
2154 }
2155 ip_unlock(port);
2156
2157 /*
2158 * We're about to pay the cost to serialize,
2159 * just go ahead and grab importance lock.
2160 */
2161 if (!imp_lock_held) {
2162 ipc_importance_lock();
2163 imp_lock_held = TRUE;
2164 }
2165
2166 ipc_port_multiple_lock(); /* massive serialization */
2167
2168 /*
2169 * Search for the end of the chain (a port not in transit),
2170 * acquiring locks along the way.
2171 */
2172
2173 for (;;) {
2174 ip_lock(base);
2175
2176 if (!ip_active(base) ||
2177 (base->ip_receiver_name != MACH_PORT_NULL) ||
2178 (base->ip_destination == IP_NULL))
2179 break;
2180
2181 base = base->ip_destination;
2182 }
2183
2184 /* all ports in chain from dest to base, inclusive, are locked */
2185
2186 if (port == base) {
2187 /* circularity detected! */
2188
2189 ipc_port_multiple_unlock();
2190
2191 /* port (== base) is in limbo */
2192
2193 assert(ip_active(port));
2194 assert(port->ip_receiver_name == MACH_PORT_NULL);
2195 assert(port->ip_destination == IP_NULL);
2196
2197 while (dest != IP_NULL) {
2198 ipc_port_t next;
2199
2200 /* dest is in transit or in limbo */
2201
2202 assert(ip_active(dest));
2203 assert(dest->ip_receiver_name == MACH_PORT_NULL);
2204
2205 next = dest->ip_destination;
2206 ip_unlock(dest);
2207 dest = next;
2208 }
2209
2210 if (imp_lock_held)
2211 ipc_importance_unlock();
2212
2213 return TRUE;
2214 }
2215
2216 /*
2217 * The guarantee: lock port while the entire chain is locked.
2218 * Once port is locked, we can take a reference to dest,
2219 * add port to the chain, and unlock everything.
2220 */
2221
2222 ip_lock(port);
2223 ipc_port_multiple_unlock();
2224
2225 not_circular:
2226
2227 /* port is in limbo */
2228
2229 assert(ip_active(port));
2230 assert(port->ip_receiver_name == MACH_PORT_NULL);
2231 assert(port->ip_destination == IP_NULL);
2232
2233 ip_reference(dest);
2234 port->ip_destination = dest;
2235
2236 /* must have been in limbo or still bound to a task */
2237 assert(port->ip_tempowner != 0);
2238
2239 /*
2240 * We delayed dropping assertions from a specific task.
2241 * Cache that info now (we'll drop assertions and the
2242 * task reference below).
2243 */
2244 release_imp_task = port->ip_imp_task;
2245 if (IIT_NULL != release_imp_task) {
2246 port->ip_imp_task = IIT_NULL;
2247 }
2248 assertcnt = port->ip_impcount;
2249
2250 /* take the port out of limbo w.r.t. assertions */
2251 port->ip_tempowner = 0;
2252
2253 /* now unlock chain */
2254
2255 ip_unlock(port);
2256
2257 for (;;) {
2258
2259 /* every port along chain track assertions behind it */
2260 ipc_port_impcount_delta(dest, assertcnt, base);
2261
2262 if (dest == base)
2263 break;
2264
2265 /* port is in transit */
2266
2267 assert(ip_active(dest));
2268 assert(dest->ip_receiver_name == MACH_PORT_NULL);
2269 assert(dest->ip_destination != IP_NULL);
2270 assert(dest->ip_tempowner == 0);
2271
2272 port = dest->ip_destination;
2273 ip_unlock(dest);
2274 dest = port;
2275 }
2276
2277 /* base is not in transit */
2278 assert(!ip_active(base) ||
2279 (base->ip_receiver_name != MACH_PORT_NULL) ||
2280 (base->ip_destination == IP_NULL));
2281
2282 /*
2283 * Find the task to boost (if any).
2284 * We will boost "through" ports that don't know
2285 * about inheritance to deliver receive rights that
2286 * do.
2287 */
2288 if (ip_active(base) && (assertcnt > 0)) {
2289 assert(imp_lock_held);
2290 if (base->ip_tempowner != 0) {
2291 if (IIT_NULL != base->ip_imp_task) {
2292 /* specified tempowner task */
2293 imp_task = base->ip_imp_task;
2294 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2295 }
2296 /* otherwise don't boost current task */
2297
2298 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
2299 ipc_space_t space = base->ip_receiver;
2300
2301 /* only spaces with boost-accepting tasks */
2302 if (space->is_task != TASK_NULL &&
2303 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base))
2304 imp_task = space->is_task->task_imp_base;
2305 }
2306
2307 /* take reference before unlocking base */
2308 if (imp_task != IIT_NULL) {
2309 ipc_importance_task_reference(imp_task);
2310 }
2311 }
2312
2313 ip_unlock(base);
2314
2315 /*
2316 * Transfer assertions now that the ports are unlocked.
2317 * Avoid extra overhead if transferring to/from the same task.
2318 *
2319 * NOTE: If a transfer is occurring, the new assertions will
2320 * be added to imp_task BEFORE the importance lock is unlocked.
2321 * This is critical - to avoid decrements coming from the kmsgs
2322 * beating the increment to the task.
2323 */
2324 boolean_t transfer_assertions = (imp_task != release_imp_task);
2325
2326 if (imp_task != IIT_NULL) {
2327 assert(imp_lock_held);
2328 if (transfer_assertions)
2329 ipc_importance_task_hold_internal_assertion_locked(imp_task, assertcnt);
2330 }
2331
2332 if (release_imp_task != IIT_NULL) {
2333 assert(imp_lock_held);
2334 if (transfer_assertions)
2335 ipc_importance_task_drop_internal_assertion_locked(release_imp_task, assertcnt);
2336 }
2337
2338 if (imp_lock_held)
2339 ipc_importance_unlock();
2340
2341 if (imp_task != IIT_NULL)
2342 ipc_importance_task_release(imp_task);
2343
2344 if (release_imp_task != IIT_NULL)
2345 ipc_importance_task_release(release_imp_task);
2346
2347 return FALSE;
2348 }
2349
2350 /*
2351 * Routine: ipc_importance_send
2352 * Purpose:
2353 * Post the importance voucher attribute [if sent] or a static
2354 * importance boost depending upon options and conditions.
2355 * Conditions:
2356 * Destination port locked on entry and exit, may be dropped during the call.
2357 * Returns:
2358 * A boolean identifying if the port lock was tempoarily dropped.
2359 */
2360 boolean_t
2361 ipc_importance_send(
2362 ipc_kmsg_t kmsg,
2363 mach_msg_option_t option)
2364 {
2365 ipc_port_t port = (ipc_port_t) kmsg->ikm_header->msgh_remote_port;
2366 boolean_t port_lock_dropped = FALSE;
2367 ipc_importance_elem_t elem;
2368 task_t task;
2369 ipc_importance_task_t task_imp;
2370 kern_return_t kr;
2371
2372 assert(IP_VALID(port));
2373
2374 /* If no donation to be made, return quickly */
2375 if ((port->ip_impdonation == 0) ||
2376 (option & MACH_SEND_NOIMPORTANCE) != 0) {
2377 return port_lock_dropped;
2378 }
2379
2380 task = current_task();
2381
2382 /* If forced sending a static boost, go update the port */
2383 if ((option & MACH_SEND_IMPORTANCE) != 0) {
2384 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2385 goto portupdate;
2386 }
2387
2388 task_imp = task->task_imp_base;
2389 assert(IIT_NULL != task_imp);
2390
2391 /* If the sender can never donate importance, nothing to do */
2392 if (ipc_importance_task_is_never_donor(task_imp)) {
2393 return port_lock_dropped;
2394 }
2395
2396 elem = IIE_NULL;
2397
2398 /* If importance receiver and passing a voucher, look for importance in there */
2399 if (IP_VALID(kmsg->ikm_voucher) &&
2400 ipc_importance_task_is_marked_receiver(task_imp)) {
2401 mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED];
2402 mach_voucher_attr_value_handle_array_size_t val_count;
2403 ipc_voucher_t voucher;
2404
2405 assert(ip_kotype(kmsg->ikm_voucher) == IKOT_VOUCHER);
2406 voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject;
2407
2408 /* check to see if the voucher has an importance attribute */
2409 val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED;
2410 kr = mach_voucher_attr_control_get_values(ipc_importance_control, voucher,
2411 vals, &val_count);
2412 assert(KERN_SUCCESS == kr);
2413
2414 /*
2415 * Only use importance associated with our task (either directly
2416 * or through an inherit that donates to our task).
2417 */
2418 if (0 < val_count) {
2419 ipc_importance_elem_t check_elem;
2420
2421 check_elem = (ipc_importance_elem_t)vals[0];
2422 assert(IIE_NULL != check_elem);
2423 if (IIE_TYPE_INHERIT == IIE_TYPE(check_elem)) {
2424 ipc_importance_inherit_t inherit;
2425 inherit = (ipc_importance_inherit_t) check_elem;
2426 if (inherit->iii_to_task == task_imp) {
2427 elem = check_elem;
2428 }
2429 } else if (check_elem == (ipc_importance_elem_t)task_imp) {
2430 elem = check_elem;
2431 }
2432 }
2433 }
2434
2435 /* If we haven't found an importance attribute to send yet, use the task's */
2436 if (IIE_NULL == elem) {
2437 elem = (ipc_importance_elem_t)task_imp;
2438 }
2439
2440 /* take a reference for the message to hold */
2441 ipc_importance_reference_internal(elem);
2442
2443 /* acquire the importance lock while trying to hang on to port lock */
2444 if (!ipc_importance_lock_try()) {
2445 port_lock_dropped = TRUE;
2446 ip_unlock(port);
2447 ipc_importance_lock();
2448 }
2449
2450 /* link kmsg onto the donor element propagation chain */
2451 ipc_importance_kmsg_link(kmsg, elem);
2452 /* elem reference transfered to kmsg */
2453
2454 incr_ref_counter(elem->iie_kmsg_refs_added);
2455
2456 /* If the sender isn't currently a donor, no need to apply boost */
2457 if (!ipc_importance_task_is_donor(task_imp)) {
2458 ipc_importance_unlock();
2459
2460 /* re-acquire port lock, if needed */
2461 if (TRUE == port_lock_dropped)
2462 ip_lock(port);
2463
2464 return port_lock_dropped;
2465 }
2466
2467 /* Mark the fact that we are (currently) donating through this message */
2468 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2469
2470 /*
2471 * If we need to relock the port, do it with the importance still locked.
2472 * This assures we get to add the importance boost through the port to
2473 * the task BEFORE anyone else can attempt to undo that operation if
2474 * the sender lost donor status.
2475 */
2476 if (TRUE == port_lock_dropped) {
2477 ip_lock(port);
2478 }
2479
2480 portupdate:
2481
2482 #if IMPORTANCE_DEBUG
2483 if (kdebug_enable) {
2484 mach_msg_max_trailer_t *dbgtrailer = (mach_msg_max_trailer_t *)
2485 ((vm_offset_t)kmsg->ikm_header + round_msg(kmsg->ikm_header->msgh_size));
2486 unsigned int sender_pid = dbgtrailer->msgh_audit.val[5];
2487 mach_msg_id_t imp_msgh_id = kmsg->ikm_header->msgh_id;
2488 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START,
2489 task_pid(task), sender_pid, imp_msgh_id, 0, 0);
2490 }
2491 #endif /* IMPORTANCE_DEBUG */
2492
2493 mach_port_delta_t delta = 1;
2494 boolean_t need_port_lock;
2495 task_imp = IIT_NULL;
2496
2497 /* adjust port boost count (with importance and port locked) */
2498 need_port_lock = ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &task_imp);
2499
2500 /* if we need to adjust a task importance as a result, apply that here */
2501 if (IIT_NULL != task_imp && delta != 0) {
2502 assert(delta == 1);
2503
2504 /* if this results in a change of state, propagate the transistion */
2505 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta)) {
2506
2507 /* can't hold the port lock during task transition(s) */
2508 if (!need_port_lock) {
2509 need_port_lock = TRUE;
2510 ip_unlock(port);
2511 }
2512 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
2513 }
2514 }
2515
2516 ipc_importance_unlock();
2517
2518 if (need_port_lock) {
2519 port_lock_dropped = TRUE;
2520 ip_lock(port);
2521 }
2522
2523 return port_lock_dropped;
2524 }
2525
2526 /*
2527 * Routine: ipc_importance_inherit_from
2528 * Purpose:
2529 * Create a "made" reference for an importance attribute representing
2530 * an inheritance between the sender of a message (if linked) and the
2531 * current task importance. If the message is not linked, a static
2532 * boost may be created, based on the boost state of the message.
2533 *
2534 * Any transfer from kmsg linkage to inherit linkage must be atomic.
2535 *
2536 * If the task is inactive, there isn't any need to return a new reference.
2537 * Conditions:
2538 * Nothing locked on entry. May block.
2539 */
2540 static ipc_importance_inherit_t
2541 ipc_importance_inherit_from(ipc_kmsg_t kmsg)
2542 {
2543 ipc_importance_task_t task_imp = IIT_NULL;
2544 ipc_importance_elem_t from_elem = kmsg->ikm_importance;
2545 ipc_importance_elem_t elem;
2546 task_t task_self = current_task();
2547
2548 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
2549 ipc_importance_inherit_t inherit = III_NULL;
2550 ipc_importance_inherit_t alloc = III_NULL;
2551 boolean_t cleared_self_donation = FALSE;
2552 boolean_t donating;
2553 uint32_t depth = 1;
2554
2555 /* The kmsg must have an importance donor or static boost to proceed */
2556 if (IIE_NULL == kmsg->ikm_importance &&
2557 !MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2558 return III_NULL;
2559 }
2560
2561 /*
2562 * No need to set up an inherit linkage if the dest isn't a receiver
2563 * of one type or the other.
2564 */
2565 if (!ipc_importance_task_is_any_receiver_type(task_self->task_imp_base)) {
2566 ipc_importance_lock();
2567 goto out_locked;
2568 }
2569
2570 /* Grab a reference on the importance of the destination */
2571 task_imp = ipc_importance_for_task(task_self, FALSE);
2572
2573 ipc_importance_lock();
2574
2575 if (IIT_NULL == task_imp) {
2576 goto out_locked;
2577 }
2578
2579 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_inherit_from);
2580
2581 /* If message is already associated with an inherit... */
2582 if (IIE_TYPE_INHERIT == IIE_TYPE(from_elem)) {
2583 ipc_importance_inherit_t from_inherit = (ipc_importance_inherit_t)from_elem;
2584
2585 /* already targeting our task? - just use it */
2586 if (from_inherit->iii_to_task == task_imp) {
2587 /* clear self-donation if not also present in inherit */
2588 if (!from_inherit->iii_donating &&
2589 MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2590 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2591 cleared_self_donation = TRUE;
2592 }
2593 inherit = from_inherit;
2594
2595 } else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) {
2596 ipc_importance_task_t to_task;
2597 ipc_importance_elem_t unlinked_from;
2598
2599 /*
2600 * Chain too long. Switch to looking
2601 * directly at the from_inherit's to-task
2602 * as our source of importance.
2603 */
2604 to_task = from_inherit->iii_to_task;
2605 ipc_importance_task_reference(to_task);
2606 from_elem = (ipc_importance_elem_t)to_task;
2607 depth = III_DEPTH_RESET | 1;
2608
2609 /* Fixup the kmsg linkage to reflect change */
2610 unlinked_from = ipc_importance_kmsg_unlink(kmsg);
2611 assert(unlinked_from == (ipc_importance_elem_t)from_inherit);
2612 ipc_importance_kmsg_link(kmsg, from_elem);
2613 ipc_importance_inherit_release_locked(from_inherit);
2614 /* importance unlocked */
2615 ipc_importance_lock();
2616
2617 } else {
2618 /* inheriting from an inherit */
2619 depth = from_inherit->iii_depth + 1;
2620 }
2621 }
2622
2623 /*
2624 * Don't allow a task to inherit from itself (would keep it permanently
2625 * boosted even if all other donors to the task went away).
2626 */
2627
2628 if (from_elem == (ipc_importance_elem_t)task_imp) {
2629 goto out_locked;
2630 }
2631
2632 /*
2633 * But if the message isn't associated with any linked source, it is
2634 * intended to be permanently boosting (static boost from kernel).
2635 * In that case DO let the process permanently boost itself.
2636 */
2637 if (IIE_NULL == from_elem) {
2638 assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits));
2639 ipc_importance_task_reference_internal(task_imp);
2640 from_elem = (ipc_importance_elem_t)task_imp;
2641 }
2642
2643 /*
2644 * Now that we have the from_elem figured out,
2645 * check to see if we already have an inherit for this pairing
2646 */
2647 while (III_NULL == inherit) {
2648 inherit = ipc_importance_inherit_find(from_elem, task_imp, depth);
2649
2650 /* Do we have to allocate a new inherit */
2651 if (III_NULL == inherit) {
2652 if (III_NULL != alloc) {
2653 break;
2654 }
2655
2656 /* allocate space */
2657 ipc_importance_unlock();
2658 alloc = (ipc_importance_inherit_t)
2659 zalloc(ipc_importance_inherit_zone);
2660 ipc_importance_lock();
2661 }
2662 }
2663
2664 /* snapshot the donating status while we have importance locked */
2665 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits);
2666
2667 if (III_NULL != inherit) {
2668 /* We found one, piggyback on that */
2669 assert(0 < III_REFS(inherit));
2670 assert(0 < IIE_REFS(inherit->iii_from_elem));
2671 assert(inherit->iii_externcnt >= inherit->iii_made);
2672
2673 /* add in a made reference */
2674 if (0 == inherit->iii_made++) {
2675 assert(III_REFS_MAX > III_REFS(inherit));
2676 ipc_importance_inherit_reference_internal(inherit);
2677 }
2678
2679 /* Reflect the inherit's change of status into the task boosts */
2680 if (0 == III_EXTERN(inherit)) {
2681 assert(!inherit->iii_donating);
2682 inherit->iii_donating = donating;
2683 if (donating) {
2684 task_imp->iit_externcnt += inherit->iii_externcnt;
2685 task_imp->iit_externdrop += inherit->iii_externdrop;
2686 }
2687 } else {
2688 assert(donating == inherit->iii_donating);
2689 }
2690
2691 /* add in a external reference for this use of the inherit */
2692 inherit->iii_externcnt++;
2693 } else {
2694 /* initialize the previously allocated space */
2695 inherit = alloc;
2696 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
2697 inherit->iii_made = 1;
2698 inherit->iii_externcnt = 1;
2699 inherit->iii_externdrop = 0;
2700 inherit->iii_depth = depth;
2701 inherit->iii_to_task = task_imp;
2702 inherit->iii_from_elem = IIE_NULL;
2703 queue_init(&inherit->iii_kmsgs);
2704
2705 if (donating) {
2706 inherit->iii_donating = TRUE;
2707 } else {
2708 inherit->iii_donating = FALSE;
2709 }
2710
2711 /*
2712 * Chain our new inherit on the element it inherits from.
2713 * The new inherit takes our reference on from_elem.
2714 */
2715 ipc_importance_inherit_link(inherit, from_elem);
2716
2717 #if IIE_REF_DEBUG
2718 ipc_importance_counter_init(&inherit->iii_elem);
2719 from_elem->iie_kmsg_refs_inherited++;
2720 task_imp->iit_elem.iie_task_refs_inherited++;
2721 #endif
2722 }
2723
2724 out_locked:
2725 /*
2726 * for those paths that came straight here: snapshot the donating status
2727 * (this should match previous snapshot for other paths).
2728 */
2729 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits);
2730
2731 /* unlink the kmsg inheritance (if any) */
2732 elem = ipc_importance_kmsg_unlink(kmsg);
2733 assert(elem == from_elem);
2734
2735 /* If found inherit and donating, reflect that in the task externcnt */
2736 if (III_NULL != inherit && donating) {
2737 task_imp->iit_externcnt++;
2738 /* The owner of receive right might have changed, take the internal assertion */
2739 ipc_importance_task_hold_internal_assertion_locked(task_imp, 1);
2740 /* may have dropped and retaken importance lock */
2741 }
2742
2743 /* If we didn't create a new inherit, we have some resources to release */
2744 if (III_NULL == inherit || inherit != alloc) {
2745 if (IIE_NULL != from_elem) {
2746 if (III_NULL != inherit) {
2747 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
2748 } else {
2749 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
2750 }
2751 ipc_importance_release_locked(from_elem);
2752 /* importance unlocked */
2753 } else {
2754 ipc_importance_unlock();
2755 }
2756
2757 if (IIT_NULL != task_imp) {
2758 if (III_NULL != inherit) {
2759 incr_ref_counter(task_imp->iit_elem.iie_task_refs_coalesced);
2760 }
2761 ipc_importance_task_release(task_imp);
2762 }
2763
2764 if (III_NULL != alloc)
2765 zfree(ipc_importance_inherit_zone, alloc);
2766 } else {
2767 /* from_elem and task_imp references transferred to new inherit */
2768 ipc_importance_unlock();
2769 }
2770
2771 /*
2772 * decrement port boost count
2773 * This is OK to do without the importance lock as we atomically
2774 * unlinked the kmsg and snapshot the donating state while holding
2775 * the importance lock
2776 */
2777 if (donating || cleared_self_donation) {
2778 ip_lock(port);
2779 /* drop importance from port and destination task */
2780 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
2781 ip_unlock(port);
2782 }
2783 }
2784
2785 if (III_NULL != inherit) {
2786 /* have an associated importance attr, even if currently not donating */
2787 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2788 } else {
2789 /* we won't have an importance attribute associated with our message */
2790 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2791 }
2792
2793 return inherit;
2794 }
2795
2796 /*
2797 * Routine: ipc_importance_receive
2798 * Purpose:
2799 * Process importance attributes in a received message.
2800 *
2801 * If an importance voucher attribute was sent, transform
2802 * that into an attribute value reflecting the inheritance
2803 * from the sender to the receiver.
2804 *
2805 * If a static boost is received (or the voucher isn't on
2806 * a voucher-based boost), export a static boost.
2807 * Conditions:
2808 * Nothing locked.
2809 */
2810 void
2811 ipc_importance_receive(
2812 ipc_kmsg_t kmsg,
2813 mach_msg_option_t option)
2814 {
2815 unsigned int sender_pid = ((mach_msg_max_trailer_t *)
2816 ((vm_offset_t)kmsg->ikm_header +
2817 round_msg(kmsg->ikm_header->msgh_size)))->msgh_audit.val[5];
2818 task_t task_self = current_task();
2819 int impresult = -1;
2820
2821 /* convert to a voucher with an inherit importance attribute? */
2822 if ((option & MACH_RCV_VOUCHER) != 0) {
2823 uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) +
2824 sizeof(mach_voucher_attr_value_handle_t)];
2825 ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0;
2826 ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes;
2827 ipc_voucher_t recv_voucher;
2828 mach_voucher_attr_value_handle_t handle;
2829 ipc_importance_inherit_t inherit;
2830 kern_return_t kr;
2831
2832 /* set up recipe to copy the old voucher */
2833 if (IP_VALID(kmsg->ikm_voucher)) {
2834 ipc_voucher_t sent_voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject;
2835
2836 recipe->key = MACH_VOUCHER_ATTR_KEY_ALL;
2837 recipe->command = MACH_VOUCHER_ATTR_COPY;
2838 recipe->previous_voucher = sent_voucher;
2839 recipe->content_size = 0;
2840 recipe_size += sizeof(*recipe);
2841 }
2842
2843 /*
2844 * create an inheritance attribute from the kmsg (may be NULL)
2845 * transferring any boosts from the kmsg linkage through the
2846 * port directly to the new inheritance object.
2847 */
2848 inherit = ipc_importance_inherit_from(kmsg);
2849 handle = (mach_voucher_attr_value_handle_t)inherit;
2850
2851 assert(IIE_NULL == kmsg->ikm_importance);
2852
2853 /*
2854 * Only create a new voucher if we have an inherit object
2855 * (from the ikm_importance field of the incoming message), OR
2856 * we have a valid incoming voucher. If we have neither of
2857 * these things then there is no need to create a new voucher.
2858 */
2859 if (IP_VALID(kmsg->ikm_voucher) || inherit != III_NULL) {
2860 /* replace the importance attribute with the handle we created */
2861 /* our made reference on the inherit is donated to the voucher */
2862 recipe = (ipc_voucher_attr_recipe_t)&recipes[recipe_size];
2863 recipe->key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE;
2864 recipe->command = MACH_VOUCHER_ATTR_SET_VALUE_HANDLE;
2865 recipe->previous_voucher = IPC_VOUCHER_NULL;
2866 recipe->content_size = sizeof(mach_voucher_attr_value_handle_t);
2867 *(mach_voucher_attr_value_handle_t *)(void *)recipe->content = handle;
2868 recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t);
2869
2870 kr = ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control,
2871 recipes,
2872 recipe_size,
2873 &recv_voucher);
2874 assert(KERN_SUCCESS == kr);
2875
2876 /* swap the voucher port (and set voucher bits in case it didn't already exist) */
2877 kmsg->ikm_header->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16);
2878 ipc_port_release_send(kmsg->ikm_voucher);
2879 kmsg->ikm_voucher = convert_voucher_to_port(recv_voucher);
2880 if (III_NULL != inherit)
2881 impresult = 2;
2882 }
2883 } else { /* Don't want a voucher */
2884
2885 /* got linked importance? have to drop */
2886 if (IIE_NULL != kmsg->ikm_importance) {
2887 ipc_importance_elem_t elem;
2888
2889 ipc_importance_lock();
2890 elem = ipc_importance_kmsg_unlink(kmsg);
2891 #if IIE_REF_DEBUG
2892 elem->iie_kmsg_refs_dropped++;
2893 #endif
2894 ipc_importance_release_locked(elem);
2895 /* importance unlocked */
2896 }
2897
2898 /* With kmsg unlinked, can safely examine message importance attribute. */
2899 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2900 ipc_importance_task_t task_imp = task_self->task_imp_base;
2901 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
2902
2903 /* The owner of receive right might have changed, take the internal assertion */
2904 if (KERN_SUCCESS == ipc_importance_task_hold_internal_assertion(task_imp, 1)) {
2905 ipc_importance_task_externalize_legacy_assertion(task_imp, 1, sender_pid);
2906 impresult = 1;
2907 } else {
2908 /* The importance boost never applied to task (clear the bit) */
2909 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2910 impresult = 0;
2911 }
2912
2913 /* Drop the boost on the port and the owner of the receive right */
2914 ip_lock(port);
2915 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
2916 ip_unlock(port);
2917 }
2918 }
2919 }
2920
2921 #if IMPORTANCE_DEBUG
2922 if (-1 < impresult)
2923 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE,
2924 sender_pid, task_pid(task_self),
2925 kmsg->ikm_header->msgh_id, impresult, 0);
2926 if (impresult == 2){
2927 /*
2928 * This probe only covers new voucher-based path. Legacy importance
2929 * will trigger the probe in ipc_importance_task_externalize_assertion()
2930 * above and have impresult==1 here.
2931 */
2932 DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self), int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt);
2933 }
2934 #endif /* IMPORTANCE_DEBUG */
2935 }
2936
2937 /*
2938 * Routine: ipc_importance_unreceive
2939 * Purpose:
2940 * Undo receive of importance attributes in a message.
2941 *
2942 * Conditions:
2943 * Nothing locked.
2944 */
2945 void
2946 ipc_importance_unreceive(
2947 ipc_kmsg_t kmsg,
2948 mach_msg_option_t __unused option)
2949 {
2950 /* importance should already be in the voucher and out of the kmsg */
2951 assert(IIE_NULL == kmsg->ikm_importance);
2952
2953 /* See if there is a legacy boost to be dropped from receiver */
2954 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2955 ipc_importance_task_t task_imp;
2956
2957 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2958 task_imp = current_task()->task_imp_base;
2959 if (!IP_VALID(kmsg->ikm_voucher) && IIT_NULL != task_imp) {
2960 ipc_importance_task_drop_legacy_external_assertion(task_imp, 1);
2961 }
2962 /*
2963 * ipc_kmsg_copyout_dest() will consume the voucher
2964 * and any contained importance.
2965 */
2966 }
2967 }
2968
2969 /*
2970 * Routine: ipc_importance_clean
2971 * Purpose:
2972 * Clean up importance state in a kmsg that is being cleaned.
2973 * Unlink the importance chain if one was set up, and drop
2974 * the reference this kmsg held on the donor. Then check to
2975 * if importance was carried to the port, and remove that if
2976 * needed.
2977 * Conditions:
2978 * Nothing locked.
2979 */
2980 void
2981 ipc_importance_clean(
2982 ipc_kmsg_t kmsg)
2983 {
2984 ipc_port_t port;
2985
2986 /* Is the kmsg still linked? If so, remove that first */
2987 if (IIE_NULL != kmsg->ikm_importance) {
2988 ipc_importance_elem_t elem;
2989
2990 ipc_importance_lock();
2991 elem = ipc_importance_kmsg_unlink(kmsg);
2992 assert(IIE_NULL != elem);
2993 ipc_importance_release_locked(elem);
2994 /* importance unlocked */
2995 }
2996
2997 /* See if there is a legacy importance boost to be dropped from port */
2998 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2999 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3000 port = kmsg->ikm_header->msgh_remote_port;
3001 if (IP_VALID(port)) {
3002 ip_lock(port);
3003 /* inactive ports already had their importance boosts dropped */
3004 if (!ip_active(port) ||
3005 ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3006 ip_unlock(port);
3007 }
3008 }
3009 }
3010 }
3011
3012 void
3013 ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)
3014 {
3015 assert(IIE_NULL == kmsg->ikm_importance);
3016 assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits));
3017 }
3018
3019 /*
3020 * IPC Importance Attribute Manager definition
3021 */
3022
3023 static kern_return_t
3024 ipc_importance_release_value(
3025 ipc_voucher_attr_manager_t manager,
3026 mach_voucher_attr_key_t key,
3027 mach_voucher_attr_value_handle_t value,
3028 mach_voucher_attr_value_reference_t sync);
3029
3030 static kern_return_t
3031 ipc_importance_get_value(
3032 ipc_voucher_attr_manager_t manager,
3033 mach_voucher_attr_key_t key,
3034 mach_voucher_attr_recipe_command_t command,
3035 mach_voucher_attr_value_handle_array_t prev_values,
3036 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3037 mach_voucher_attr_content_t content,
3038 mach_voucher_attr_content_size_t content_size,
3039 mach_voucher_attr_value_handle_t *out_value,
3040 mach_voucher_attr_value_flags_t *out_flags,
3041 ipc_voucher_t *out_value_voucher);
3042
3043 static kern_return_t
3044 ipc_importance_extract_content(
3045 ipc_voucher_attr_manager_t manager,
3046 mach_voucher_attr_key_t key,
3047 mach_voucher_attr_value_handle_array_t values,
3048 mach_voucher_attr_value_handle_array_size_t value_count,
3049 mach_voucher_attr_recipe_command_t *out_command,
3050 mach_voucher_attr_content_t out_content,
3051 mach_voucher_attr_content_size_t *in_out_content_size);
3052
3053 static kern_return_t
3054 ipc_importance_command(
3055 ipc_voucher_attr_manager_t manager,
3056 mach_voucher_attr_key_t key,
3057 mach_voucher_attr_value_handle_array_t values,
3058 mach_msg_type_number_t value_count,
3059 mach_voucher_attr_command_t command,
3060 mach_voucher_attr_content_t in_content,
3061 mach_voucher_attr_content_size_t in_content_size,
3062 mach_voucher_attr_content_t out_content,
3063 mach_voucher_attr_content_size_t *out_content_size);
3064
3065 static void
3066 ipc_importance_manager_release(
3067 ipc_voucher_attr_manager_t manager);
3068
3069 struct ipc_voucher_attr_manager ipc_importance_manager = {
3070 .ivam_release_value = ipc_importance_release_value,
3071 .ivam_get_value = ipc_importance_get_value,
3072 .ivam_extract_content = ipc_importance_extract_content,
3073 .ivam_command = ipc_importance_command,
3074 .ivam_release = ipc_importance_manager_release,
3075 .ivam_flags = IVAM_FLAGS_NONE,
3076 };
3077
3078 #define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key))
3079 #define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager))
3080
3081 /*
3082 * Routine: ipc_importance_release_value [Voucher Attribute Manager Interface]
3083 * Purpose:
3084 * Release what the voucher system believes is the last "made" reference
3085 * on an importance attribute value handle. The sync parameter is used to
3086 * avoid races with new made references concurrently being returned to the
3087 * voucher system in other threads.
3088 * Conditions:
3089 * Nothing locked on entry. May block.
3090 */
3091 static kern_return_t
3092 ipc_importance_release_value(
3093 ipc_voucher_attr_manager_t __assert_only manager,
3094 mach_voucher_attr_key_t __assert_only key,
3095 mach_voucher_attr_value_handle_t value,
3096 mach_voucher_attr_value_reference_t sync)
3097 {
3098 ipc_importance_elem_t elem;
3099
3100 IMPORTANCE_ASSERT_MANAGER(manager);
3101 IMPORTANCE_ASSERT_KEY(key);
3102 assert(0 < sync);
3103
3104 elem = (ipc_importance_elem_t)value;
3105
3106 ipc_importance_lock();
3107
3108 /* Any oustanding made refs? */
3109 if (sync != elem->iie_made) {
3110 assert(sync < elem->iie_made);
3111 ipc_importance_unlock();
3112 return KERN_FAILURE;
3113 }
3114
3115 /* clear made */
3116 elem->iie_made = 0;
3117
3118 /*
3119 * If there are pending external boosts represented by this attribute,
3120 * drop them from the apropriate task
3121 */
3122 if (IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3123 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
3124
3125 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
3126
3127 if (inherit->iii_donating) {
3128 ipc_importance_task_t imp_task = inherit->iii_to_task;
3129 uint32_t assertcnt = III_EXTERN(inherit);
3130
3131 assert(ipc_importance_task_is_any_receiver_type(imp_task));
3132 assert(imp_task->iit_externcnt >= inherit->iii_externcnt);
3133 assert(imp_task->iit_externdrop >= inherit->iii_externdrop);
3134 imp_task->iit_externcnt -= inherit->iii_externcnt;
3135 imp_task->iit_externdrop -= inherit->iii_externdrop;
3136 inherit->iii_externcnt = 0;
3137 inherit->iii_externdrop = 0;
3138 inherit->iii_donating = FALSE;
3139
3140 /* adjust the internal assertions - and propagate if needed */
3141 if (ipc_importance_task_check_transition(imp_task, IIT_UPDATE_DROP, assertcnt)) {
3142 ipc_importance_task_propagate_assertion_locked(imp_task, IIT_UPDATE_DROP, TRUE);
3143 }
3144 } else {
3145 inherit->iii_externcnt = 0;
3146 inherit->iii_externdrop = 0;
3147 }
3148 }
3149
3150 /* drop the made reference on elem */
3151 ipc_importance_release_locked(elem);
3152 /* returns unlocked */
3153
3154 return KERN_SUCCESS;
3155 }
3156
3157
3158 /*
3159 * Routine: ipc_importance_get_value [Voucher Attribute Manager Interface]
3160 * Purpose:
3161 * Convert command and content data into a reference on a [potentially new]
3162 * attribute value. The importance attribute manager will only allow the
3163 * caller to get a value for the current task's importance, or to redeem
3164 * an importance attribute from an existing voucher.
3165 * Conditions:
3166 * Nothing locked on entry. May block.
3167 */
3168 static kern_return_t
3169 ipc_importance_get_value(
3170 ipc_voucher_attr_manager_t __assert_only manager,
3171 mach_voucher_attr_key_t __assert_only key,
3172 mach_voucher_attr_recipe_command_t command,
3173 mach_voucher_attr_value_handle_array_t prev_values,
3174 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3175 mach_voucher_attr_content_t __unused content,
3176 mach_voucher_attr_content_size_t content_size,
3177 mach_voucher_attr_value_handle_t *out_value,
3178 mach_voucher_attr_value_flags_t *out_flags,
3179 ipc_voucher_t *out_value_voucher)
3180 {
3181 ipc_importance_elem_t elem;
3182 task_t self;
3183
3184 IMPORTANCE_ASSERT_MANAGER(manager);
3185 IMPORTANCE_ASSERT_KEY(key);
3186
3187 if (0 != content_size)
3188 return KERN_INVALID_ARGUMENT;
3189
3190 *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE;
3191 /* never an out voucher */
3192
3193 switch (command) {
3194
3195 case MACH_VOUCHER_ATTR_REDEEM:
3196
3197 /* redeem of previous values is the value */
3198 if (0 < prev_value_count) {
3199 elem = (ipc_importance_elem_t)prev_values[0];
3200 assert(IIE_NULL != elem);
3201
3202 ipc_importance_lock();
3203 assert(0 < elem->iie_made);
3204 elem->iie_made++;
3205 ipc_importance_unlock();
3206
3207 *out_value = prev_values[0];
3208 return KERN_SUCCESS;
3209 }
3210
3211 /* redeem of default is default */
3212 *out_value = 0;
3213 *out_value_voucher = IPC_VOUCHER_NULL;
3214 return KERN_SUCCESS;
3215
3216 case MACH_VOUCHER_ATTR_IMPORTANCE_SELF:
3217 self = current_task();
3218
3219 elem = (ipc_importance_elem_t)ipc_importance_for_task(self, TRUE);
3220 /* made reference added (or IIE_NULL which isn't referenced) */
3221
3222 *out_value = (mach_voucher_attr_value_handle_t)elem;
3223 *out_value_voucher = IPC_VOUCHER_NULL;
3224 return KERN_SUCCESS;
3225
3226 default:
3227 /*
3228 * every other command is unknown
3229 *
3230 * Specifically, there is no mechanism provided to construct an
3231 * importance attribute for a task/process from just a pid or
3232 * task port. It has to be copied (or redeemed) from a previous
3233 * voucher that has it.
3234 */
3235 return KERN_INVALID_ARGUMENT;
3236 }
3237 }
3238
3239 /*
3240 * Routine: ipc_importance_extract_content [Voucher Attribute Manager Interface]
3241 * Purpose:
3242 * Extract meaning from the attribute value present in a voucher. While
3243 * the real goal is to provide commands and data that can reproduce the
3244 * voucher's value "out of thin air", this isn't possible with importance
3245 * attribute values. Instead, return debug info to help track down dependencies.
3246 * Conditions:
3247 * Nothing locked on entry. May block.
3248 */
3249 static kern_return_t
3250 ipc_importance_extract_content(
3251 ipc_voucher_attr_manager_t __assert_only manager,
3252 mach_voucher_attr_key_t __assert_only key,
3253 mach_voucher_attr_value_handle_array_t values,
3254 mach_voucher_attr_value_handle_array_size_t value_count,
3255 mach_voucher_attr_recipe_command_t *out_command,
3256 mach_voucher_attr_content_t out_content,
3257 mach_voucher_attr_content_size_t *in_out_content_size)
3258 {
3259 mach_voucher_attr_content_size_t size = 0;
3260 ipc_importance_elem_t elem;
3261 unsigned int i;
3262
3263 IMPORTANCE_ASSERT_MANAGER(manager);
3264 IMPORTANCE_ASSERT_KEY(key);
3265
3266 /* the first non-default value provides the data */
3267 for (i = 0; i < value_count ; i++) {
3268 elem = (ipc_importance_elem_t)values[i];
3269 if (IIE_NULL == elem)
3270 continue;
3271
3272 snprintf((char *)out_content, *in_out_content_size, "Importance for pid ");
3273 size = (mach_voucher_attr_content_size_t)strlen((char *)out_content);
3274
3275 for(;;) {
3276 ipc_importance_inherit_t inherit = III_NULL;
3277 ipc_importance_task_t task_imp;
3278 task_t task;
3279 int t_pid;
3280
3281 if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3282 task_imp = (ipc_importance_task_t)elem;
3283 task = task_imp->iit_task;
3284 t_pid = (TASK_NULL != task) ?
3285 task_pid(task) : -1;
3286 snprintf((char *)out_content + size, *in_out_content_size - size, "%d", t_pid);
3287 } else {
3288 inherit = (ipc_importance_inherit_t)elem;
3289 task_imp = inherit->iii_to_task;
3290 task = task_imp->iit_task;
3291 t_pid = (TASK_NULL != task) ?
3292 task_pid(task) : -1;
3293 snprintf((char *)out_content + size, *in_out_content_size - size,
3294 "%d (%d of %d boosts) %s from pid ", t_pid,
3295 III_EXTERN(inherit), inherit->iii_externcnt,
3296 (inherit->iii_donating) ? "donated" : "linked");
3297 }
3298
3299 size = (mach_voucher_attr_content_size_t)strlen((char *)out_content);
3300
3301 if (III_NULL == inherit)
3302 break;
3303
3304 elem = inherit->iii_from_elem;
3305 }
3306 size++; /* account for NULL */
3307 }
3308 *out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */
3309 *in_out_content_size = size;
3310 return KERN_SUCCESS;
3311 }
3312
3313 /*
3314 * Routine: ipc_importance_command [Voucher Attribute Manager Interface]
3315 * Purpose:
3316 * Run commands against the importance attribute value found in a voucher.
3317 * No such commands are currently supported.
3318 * Conditions:
3319 * Nothing locked on entry. May block.
3320 */
3321 static kern_return_t
3322 ipc_importance_command(
3323 ipc_voucher_attr_manager_t __assert_only manager,
3324 mach_voucher_attr_key_t __assert_only key,
3325 mach_voucher_attr_value_handle_array_t values,
3326 mach_msg_type_number_t value_count,
3327 mach_voucher_attr_command_t command,
3328 mach_voucher_attr_content_t in_content,
3329 mach_voucher_attr_content_size_t in_content_size,
3330 mach_voucher_attr_content_t out_content,
3331 mach_voucher_attr_content_size_t *out_content_size)
3332 {
3333 ipc_importance_inherit_t inherit;
3334 ipc_importance_task_t to_task;
3335 uint32_t refs, *outrefsp;
3336 mach_msg_type_number_t i;
3337 uint32_t externcnt;
3338
3339 IMPORTANCE_ASSERT_MANAGER(manager);
3340 IMPORTANCE_ASSERT_KEY(key);
3341
3342 if (in_content_size != sizeof(refs) ||
3343 (*out_content_size != 0 && *out_content_size != sizeof(refs))) {
3344 return KERN_INVALID_ARGUMENT;
3345 }
3346 refs = *(uint32_t *)(void *)in_content;
3347 outrefsp = (*out_content_size != 0) ? (uint32_t *)(void *)out_content : NULL;
3348
3349 if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL != command) {
3350 return KERN_NOT_SUPPORTED;
3351 }
3352
3353 /* the first non-default value of the apropos type provides the data */
3354 inherit = III_NULL;
3355 for (i = 0; i < value_count; i++) {
3356 ipc_importance_elem_t elem = (ipc_importance_elem_t)values[i];
3357
3358 if (IIE_NULL != elem && IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3359 inherit = (ipc_importance_inherit_t)elem;
3360 break;
3361 }
3362 }
3363 if (III_NULL == inherit) {
3364 return KERN_INVALID_ARGUMENT;
3365 }
3366
3367 ipc_importance_lock();
3368
3369 if (0 == refs) {
3370 if (NULL != outrefsp) {
3371 *outrefsp = III_EXTERN(inherit);
3372 }
3373 ipc_importance_unlock();
3374 return KERN_SUCCESS;
3375 }
3376
3377 to_task = inherit->iii_to_task;
3378 assert(ipc_importance_task_is_any_receiver_type(to_task));
3379
3380 /* if not donating to a denap receiver, it was called incorrectly */
3381 if (!ipc_importance_task_is_marked_denap_receiver(to_task)) {
3382 ipc_importance_unlock();
3383 return KERN_INVALID_TASK; /* keeps dispatch happy */
3384 }
3385
3386 /* Enough external references left to drop? */
3387 if (III_EXTERN(inherit) < refs) {
3388 ipc_importance_unlock();
3389 return KERN_FAILURE;
3390 }
3391
3392 /* re-base external and internal counters at the inherit and the to-task (if apropos) */
3393 if (inherit->iii_donating) {
3394 assert(IIT_EXTERN(to_task) >= III_EXTERN(inherit));
3395 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
3396 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
3397 inherit->iii_externdrop += refs;
3398 to_task->iit_externdrop += refs;
3399 externcnt = III_EXTERN(inherit);
3400 if (0 == externcnt) {
3401 inherit->iii_donating = FALSE;
3402 to_task->iit_externcnt -= inherit->iii_externcnt;
3403 to_task->iit_externdrop -= inherit->iii_externdrop;
3404
3405
3406 /* Start AppNap delay hysteresis - even if not the last boost for the task. */
3407 if (ipc_importance_delayed_drop_call != NULL &&
3408 ipc_importance_task_is_marked_denap_receiver(to_task)) {
3409 ipc_importance_task_delayed_drop(to_task);
3410 }
3411
3412 /* drop task assertions associated with the dropped boosts */
3413 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, refs)) {
3414 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
3415 /* may have dropped and retaken importance lock */
3416 }
3417 } else {
3418 /* assert(to_task->iit_assertcnt >= refs + externcnt); */
3419 /* defensive deduction in case of assertcnt underflow */
3420 if (to_task->iit_assertcnt > refs + externcnt) {
3421 to_task->iit_assertcnt -= refs;
3422 } else {
3423 to_task->iit_assertcnt = externcnt;
3424 }
3425 }
3426 } else {
3427 inherit->iii_externdrop += refs;
3428 externcnt = III_EXTERN(inherit);
3429 }
3430
3431 /* capture result (if requested) */
3432 if (NULL != outrefsp) {
3433 *outrefsp = externcnt;
3434 }
3435
3436 ipc_importance_unlock();
3437 return KERN_SUCCESS;
3438 }
3439
3440 /*
3441 * Routine: ipc_importance_manager_release [Voucher Attribute Manager Interface]
3442 * Purpose:
3443 * Release the Voucher system's reference on the IPC importance attribute
3444 * manager.
3445 * Conditions:
3446 * As this can only occur after the manager drops the Attribute control
3447 * reference granted back at registration time, and that reference is never
3448 * dropped, this should never be called.
3449 */
3450 static void
3451 ipc_importance_manager_release(
3452 ipc_voucher_attr_manager_t __assert_only manager)
3453 {
3454 IMPORTANCE_ASSERT_MANAGER(manager);
3455 panic("Voucher importance manager released");
3456 }
3457
3458 /*
3459 * Routine: ipc_importance_init
3460 * Purpose:
3461 * Initialize the IPC importance manager.
3462 * Conditions:
3463 * Zones and Vouchers are already initialized.
3464 */
3465 void
3466 ipc_importance_init(void)
3467 {
3468 natural_t ipc_importance_max = (task_max + thread_max) * 2;
3469 char temp_buf[26];
3470 kern_return_t kr;
3471
3472 if (PE_parse_boot_argn("imp_interactive_receiver", temp_buf, sizeof(temp_buf))) {
3473 ipc_importance_interactive_receiver = TRUE;
3474 }
3475
3476 ipc_importance_task_zone = zinit(sizeof(struct ipc_importance_task),
3477 ipc_importance_max * sizeof(struct ipc_importance_task),
3478 sizeof(struct ipc_importance_task),
3479 "ipc task importance");
3480 zone_change(ipc_importance_task_zone, Z_NOENCRYPT, TRUE);
3481
3482 ipc_importance_inherit_zone = zinit(sizeof(struct ipc_importance_inherit),
3483 ipc_importance_max * sizeof(struct ipc_importance_inherit),
3484 sizeof(struct ipc_importance_inherit),
3485 "ipc importance inherit");
3486 zone_change(ipc_importance_inherit_zone, Z_NOENCRYPT, TRUE);
3487
3488
3489 #if DEVELOPMENT || DEBUG
3490 queue_init(&global_iit_alloc_queue);
3491 #endif
3492
3493 /* initialize global locking */
3494 ipc_importance_lock_init();
3495
3496 kr = ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager,
3497 (mach_voucher_attr_value_handle_t)0,
3498 MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
3499 &ipc_importance_control);
3500 if (KERN_SUCCESS != kr)
3501 printf("Voucher importance manager register returned %d", kr);
3502 }
3503
3504 /*
3505 * Routine: ipc_importance_thread_call_init
3506 * Purpose:
3507 * Initialize the IPC importance code dependent upon
3508 * thread-call support being available.
3509 * Conditions:
3510 * Thread-call mechanism is already initialized.
3511 */
3512 void
3513 ipc_importance_thread_call_init(void)
3514 {
3515 /* initialize delayed drop queue and thread-call */
3516 queue_init(&ipc_importance_delayed_drop_queue);
3517 ipc_importance_delayed_drop_call =
3518 thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL);
3519 if (NULL == ipc_importance_delayed_drop_call) {
3520 panic("ipc_importance_init");
3521 }
3522 }
3523
3524 /*
3525 * Routing: task_importance_list_pids
3526 * Purpose: list pids where task in donating importance.
3527 * Conditions: To be called only from kdp stackshot code.
3528 * Will panic the system otherwise.
3529 */
3530 extern int
3531 task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count)
3532 {
3533 if (kdp_lck_spin_is_acquired(&ipc_importance_lock_data) ||
3534 max_count < 1 ||
3535 task->task_imp_base == IIT_NULL ||
3536 pid_list == NULL ||
3537 flags != TASK_IMP_LIST_DONATING_PIDS) {
3538 return 0;
3539 }
3540 unsigned int pidcount = 0;
3541 task_t temp_task;
3542 ipc_importance_task_t task_imp = task->task_imp_base;
3543 ipc_kmsg_t temp_kmsg;
3544 ipc_importance_inherit_t temp_inherit;
3545 ipc_importance_elem_t elem;
3546 int target_pid = 0, previous_pid;
3547
3548 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
3549 /* check space in buffer */
3550 if (pidcount >= max_count)
3551 break;
3552 previous_pid = target_pid;
3553 target_pid = -1;
3554
3555 if (temp_inherit->iii_donating) {
3556
3557 #if DEVELOPMENT || DEBUG
3558 target_pid = temp_inherit->iii_to_task->iit_bsd_pid;
3559 #else
3560 temp_task = temp_inherit->iii_to_task->iit_task;
3561 if (temp_task != TASK_NULL) {
3562 target_pid = task_pid(temp_task);
3563 }
3564 #endif
3565 }
3566
3567 if (target_pid != -1 && previous_pid != target_pid) {
3568 memcpy(pid_list, &target_pid, sizeof(target_pid));
3569 pid_list += sizeof(target_pid);
3570 pidcount++;
3571 }
3572
3573 }
3574
3575 target_pid = 0;
3576 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
3577 if (pidcount >= max_count)
3578 break;
3579 previous_pid = target_pid;
3580 target_pid = -1;
3581 elem = temp_kmsg->ikm_importance;
3582 temp_task = TASK_NULL;
3583
3584 if (elem == IIE_NULL) {
3585 continue;
3586 }
3587
3588 if (!(temp_kmsg->ikm_header && MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_kmsg->ikm_header->msgh_bits))) {
3589 continue;
3590 }
3591
3592 if (IIE_TYPE_TASK == IIE_TYPE(elem) &&
3593 (((ipc_importance_task_t)elem)->iit_task != TASK_NULL)) {
3594 target_pid = task_pid(((ipc_importance_task_t)elem)->iit_task);
3595 } else {
3596 temp_inherit = (ipc_importance_inherit_t)elem;
3597 #if DEVELOPMENT || DEBUG
3598 target_pid = temp_inherit->iii_to_task->iit_bsd_pid;
3599 #else
3600 temp_task = temp_inherit->iii_to_task->iit_task;
3601 if (temp_task != TASK_NULL) {
3602 target_pid = task_pid(temp_task);
3603 }
3604 #endif
3605 }
3606
3607 if (target_pid != -1 && previous_pid != target_pid) {
3608 memcpy(pid_list, &target_pid, sizeof(target_pid));
3609 pid_list += sizeof(target_pid);
3610 pidcount++;
3611 }
3612 }
3613
3614 return pidcount;
3615 }
3616