]>
Commit | Line | Data |
---|---|---|
fe8ab488 A |
1 | /* |
2 | * Copyright (c) 2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <mach/notify.h> | |
31 | #include <ipc/ipc_types.h> | |
32 | #include <ipc/ipc_importance.h> | |
33 | #include <ipc/ipc_port.h> | |
34 | #include <ipc/ipc_voucher.h> | |
35 | #include <kern/ipc_kobject.h> | |
36 | #include <kern/ipc_tt.h> | |
37 | #include <kern/mach_param.h> | |
38 | #include <kern/misc_protos.h> | |
39 | #include <kern/kalloc.h> | |
40 | #include <kern/zalloc.h> | |
41 | #include <kern/queue.h> | |
42 | #include <kern/task.h> | |
43 | ||
44 | #include <sys/kdebug.h> | |
45 | ||
46 | #include <mach/mach_voucher_attr_control.h> | |
47 | #include <mach/machine/sdt.h> | |
48 | ||
49 | extern int proc_pid(void *); | |
50 | extern int proc_selfpid(void); | |
51 | extern uint64_t proc_uniqueid(void *p); | |
52 | extern char *proc_name_address(void *p); | |
53 | ||
54 | /* | |
55 | * Globals for delayed boost drop processing. | |
56 | */ | |
57 | static queue_head_t ipc_importance_delayed_drop_queue; | |
58 | static thread_call_t ipc_importance_delayed_drop_call; | |
59 | static uint64_t ipc_importance_delayed_drop_timestamp; | |
60 | static boolean_t ipc_importance_delayed_drop_call_requested = FALSE; | |
61 | ||
62 | #define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */ | |
63 | #define DENAP_DROP_SKEW (100 * NSEC_PER_MSEC) /* request skew for wakeup */ | |
64 | #define DENAP_DROP_LEEWAY (2 * DENAP_DROP_SKEW) /* specified wakeup leeway */ | |
65 | ||
66 | #define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW) | |
67 | #define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY) | |
68 | ||
69 | /* | |
70 | * Importance Voucher Attribute Manager | |
71 | */ | |
72 | ||
73 | static lck_spin_t ipc_importance_lock_data; /* single lock for now */ | |
74 | ||
75 | ||
76 | #define ipc_importance_lock_init() \ | |
77 | lck_spin_init(&ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr) | |
78 | #define ipc_importance_lock_destroy() \ | |
79 | lck_spin_destroy(&ipc_importance_lock_data, &ipc_lck_grp) | |
80 | #define ipc_importance_lock() \ | |
81 | lck_spin_lock(&ipc_importance_lock_data) | |
82 | #define ipc_importance_lock_try() \ | |
83 | lck_spin_try_lock(&ipc_importance_lock_data) | |
84 | #define ipc_importance_unlock() \ | |
85 | lck_spin_unlock(&ipc_importance_lock_data) | |
86 | #define ipc_importance_sleep(elem) lck_spin_sleep(&ipc_importance_lock_data, \ | |
87 | LCK_SLEEP_DEFAULT, \ | |
88 | (event_t)(elem), \ | |
89 | THREAD_UNINT) | |
90 | #define ipc_importance_wakeup(elem) thread_wakeup((event_t)(elem)) | |
91 | ||
92 | #if IIE_REF_DEBUG | |
93 | #define incr_ref_counter(x) (hw_atomic_add(&(x), 1)) | |
94 | ||
95 | static inline | |
96 | uint32_t ipc_importance_reference_internal(ipc_importance_elem_t elem) | |
97 | { | |
98 | incr_ref_counter(elem->iie_refs_added); | |
99 | return (hw_atomic_add(&elem->iie_bits, 1) & IIE_REFS_MASK); | |
100 | } | |
101 | ||
102 | static inline | |
103 | uint32_t ipc_importance_release_internal(ipc_importance_elem_t elem) | |
104 | { | |
105 | incr_ref_counter(elem->iie_refs_dropped); | |
106 | return (hw_atomic_sub(&elem->iie_bits, 1) & IIE_REFS_MASK); | |
107 | } | |
108 | ||
109 | static inline | |
110 | uint32_t ipc_importance_task_reference_internal(ipc_importance_task_t task_imp) | |
111 | { | |
112 | uint32_t out; | |
113 | out = ipc_importance_reference_internal(&task_imp->iit_elem); | |
114 | incr_ref_counter(task_imp->iit_elem.iie_task_refs_added); | |
115 | return out; | |
116 | } | |
117 | ||
118 | static inline | |
119 | uint32_t ipc_importance_task_release_internal(ipc_importance_task_t task_imp) | |
120 | { | |
121 | uint32_t out; | |
122 | ||
123 | assert(1 < IIT_REFS(task_imp)); | |
124 | incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped); | |
125 | out = ipc_importance_release_internal(&task_imp->iit_elem); | |
126 | return out; | |
127 | } | |
128 | ||
129 | static inline | |
130 | void ipc_importance_counter_init(ipc_importance_elem_t elem) | |
131 | { | |
132 | ||
133 | elem->iie_refs_added = 0; | |
134 | elem->iie_refs_dropped = 0; | |
135 | elem->iie_kmsg_refs_added = 0; | |
136 | elem->iie_kmsg_refs_inherited = 0; | |
137 | elem->iie_kmsg_refs_coalesced = 0; | |
138 | elem->iie_kmsg_refs_dropped = 0; | |
139 | elem->iie_task_refs_added = 0; | |
140 | elem->iie_task_refs_added_inherit_from = 0; | |
141 | elem->iie_task_refs_added_transition = 0; | |
142 | elem->iie_task_refs_self_added = 0; | |
143 | elem->iie_task_refs_inherited = 0; | |
144 | elem->iie_task_refs_coalesced = 0; | |
145 | elem->iie_task_refs_dropped = 0; | |
146 | } | |
147 | #else | |
148 | #define incr_ref_counter(x) | |
149 | #endif | |
150 | ||
151 | #if DEVELOPMENT || DEBUG | |
152 | static queue_head_t global_iit_alloc_queue; | |
153 | #endif | |
154 | ||
155 | /* TODO: remove this varibale when interactive daemon audit is complete */ | |
156 | boolean_t ipc_importance_interactive_receiver = FALSE; | |
157 | ||
158 | static zone_t ipc_importance_task_zone; | |
159 | static zone_t ipc_importance_inherit_zone; | |
160 | ||
161 | static ipc_voucher_attr_control_t ipc_importance_control; | |
162 | ||
163 | /* | |
164 | * Routine: ipc_importance_kmsg_link | |
165 | * Purpose: | |
166 | * Link the kmsg onto the appropriate propagation chain. | |
167 | * If the element is a task importance, we link directly | |
168 | * on its propagation chain. Otherwise, we link onto the | |
169 | * destination task of the inherit. | |
170 | * Conditions: | |
171 | * Importance lock held. | |
172 | * Caller is donating an importance elem reference to the kmsg. | |
173 | */ | |
174 | static void | |
175 | ipc_importance_kmsg_link( | |
176 | ipc_kmsg_t kmsg, | |
177 | ipc_importance_elem_t elem) | |
178 | { | |
179 | ipc_importance_elem_t link_elem; | |
180 | ||
181 | assert(IIE_NULL == kmsg->ikm_importance); | |
182 | ||
183 | link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? | |
184 | (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task : | |
185 | elem; | |
186 | ||
187 | queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance); | |
188 | kmsg->ikm_importance = elem; | |
189 | } | |
190 | ||
191 | /* | |
192 | * Routine: ipc_importance_kmsg_unlink | |
193 | * Purpose: | |
194 | * Unlink the kmsg from its current propagation chain. | |
195 | * If the element is a task importance, we unlink directly | |
196 | * from its propagation chain. Otherwise, we unlink from the | |
197 | * destination task of the inherit. | |
198 | * Returns: | |
199 | * The reference to the importance element it was linked on. | |
200 | * Conditions: | |
201 | * Importance lock held. | |
202 | * Caller is responsible for dropping reference on returned elem. | |
203 | */ | |
204 | static ipc_importance_elem_t | |
205 | ipc_importance_kmsg_unlink( | |
206 | ipc_kmsg_t kmsg) | |
207 | { | |
208 | ipc_importance_elem_t elem = kmsg->ikm_importance; | |
209 | ||
210 | if (IIE_NULL != elem) { | |
211 | ipc_importance_elem_t unlink_elem; | |
212 | ||
213 | unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? | |
214 | (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task : | |
215 | elem; | |
216 | ||
217 | queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance); | |
218 | kmsg->ikm_importance = IIE_NULL; | |
219 | } | |
220 | return elem; | |
221 | } | |
222 | ||
223 | /* | |
224 | * Routine: ipc_importance_inherit_link | |
225 | * Purpose: | |
226 | * Link the inherit onto the appropriate propagation chain. | |
227 | * If the element is a task importance, we link directly | |
228 | * on its propagation chain. Otherwise, we link onto the | |
229 | * destination task of the inherit. | |
230 | * Conditions: | |
231 | * Importance lock held. | |
232 | * Caller is donating an elem importance reference to the inherit. | |
233 | */ | |
234 | static void | |
235 | ipc_importance_inherit_link( | |
236 | ipc_importance_inherit_t inherit, | |
237 | ipc_importance_elem_t elem) | |
238 | { | |
239 | ipc_importance_elem_t link_elem; | |
240 | ||
241 | assert(IIE_NULL == inherit->iii_from_elem); | |
242 | link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? | |
243 | (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task : | |
244 | elem; | |
245 | ||
246 | queue_enter(&link_elem->iie_inherits, inherit, | |
247 | ipc_importance_inherit_t, iii_inheritance); | |
248 | inherit->iii_from_elem = elem; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Routine: ipc_importance_inherit_unlink | |
253 | * Purpose: | |
254 | * Unlink the inherit from its current propagation chain. | |
255 | * If the element is a task importance, we unlink directly | |
256 | * from its propagation chain. Otherwise, we unlink from the | |
257 | * destination task of the inherit. | |
258 | * Returns: | |
259 | * The reference to the importance element it was linked on. | |
260 | * Conditions: | |
261 | * Importance lock held. | |
262 | * Caller is responsible for dropping reference on returned elem. | |
263 | */ | |
264 | static ipc_importance_elem_t | |
265 | ipc_importance_inherit_unlink( | |
266 | ipc_importance_inherit_t inherit) | |
267 | { | |
268 | ipc_importance_elem_t elem = inherit->iii_from_elem; | |
269 | ||
270 | if (IIE_NULL != elem) { | |
271 | ipc_importance_elem_t unlink_elem; | |
272 | ||
273 | unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? | |
274 | (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task : | |
275 | elem; | |
276 | ||
277 | queue_remove(&unlink_elem->iie_inherits, inherit, | |
278 | ipc_importance_inherit_t, iii_inheritance); | |
279 | inherit->iii_from_elem = IIE_NULL; | |
280 | } | |
281 | return elem; | |
282 | } | |
283 | ||
284 | /* | |
285 | * Routine: ipc_importance_reference | |
286 | * Purpose: | |
287 | * Add a reference to the importance element. | |
288 | * Conditions: | |
289 | * Caller must hold a reference on the element. | |
290 | */ | |
291 | void | |
292 | ipc_importance_reference(ipc_importance_elem_t elem) | |
293 | { | |
294 | assert(0 < IIE_REFS(elem)); | |
295 | ipc_importance_reference_internal(elem); | |
296 | } | |
297 | ||
298 | /* | |
299 | * Routine: ipc_importance_release_locked | |
300 | * Purpose: | |
301 | * Release a reference on an importance attribute value, | |
302 | * unlinking and deallocating the attribute if the last reference. | |
303 | * Conditions: | |
304 | * Entered with importance lock held, leaves with it unlocked. | |
305 | */ | |
306 | static void | |
307 | ipc_importance_release_locked(ipc_importance_elem_t elem) | |
308 | { | |
309 | assert(0 < IIE_REFS(elem)); | |
310 | ||
311 | if (0 < ipc_importance_release_internal(elem)) { | |
312 | ||
313 | #if DEVELOPMENT || DEBUG | |
314 | ipc_importance_inherit_t temp_inherit; | |
315 | ipc_importance_task_t link_task; | |
316 | ipc_kmsg_t temp_kmsg; | |
317 | uint32_t expected = 0; | |
318 | ||
319 | if (0 < elem->iie_made) | |
320 | expected++; | |
321 | ||
322 | link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? | |
323 | ((ipc_importance_inherit_t)elem)->iii_to_task : | |
324 | (ipc_importance_task_t)elem; | |
325 | ||
326 | queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) | |
327 | if (temp_kmsg->ikm_importance == elem) | |
328 | expected++; | |
329 | queue_iterate(&link_task->iit_inherits, temp_inherit, | |
330 | ipc_importance_inherit_t, iii_inheritance) | |
331 | if (temp_inherit->iii_from_elem == elem) | |
332 | expected++; | |
333 | ||
334 | if (IIE_REFS(elem) < expected) | |
335 | panic("ipc_importance_release_locked (%p)", elem); | |
336 | #endif | |
337 | ipc_importance_unlock(); | |
338 | return; | |
339 | } | |
340 | ||
341 | /* last ref */ | |
342 | /* can't get to no refs if we contribute to something else's importance */ | |
343 | assert(queue_empty(&elem->iie_kmsgs)); | |
344 | assert(queue_empty(&elem->iie_inherits)); | |
345 | ||
346 | switch (IIE_TYPE(elem)) { | |
347 | ||
348 | /* just a "from" task reference to drop */ | |
349 | case IIE_TYPE_TASK: | |
350 | { | |
351 | ipc_importance_task_t task_elem; | |
352 | ||
353 | task_elem = (ipc_importance_task_t)elem; | |
354 | assert(TASK_NULL == task_elem->iit_task); | |
355 | ||
356 | #if DEVELOPMENT || DEBUG | |
357 | queue_remove(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation); | |
358 | #endif | |
359 | ||
360 | ipc_importance_unlock(); | |
361 | ||
362 | zfree(ipc_importance_task_zone, task_elem); | |
363 | break; | |
364 | } | |
365 | ||
366 | /* dropping an inherit element */ | |
367 | case IIE_TYPE_INHERIT: | |
368 | { | |
369 | ipc_importance_inherit_t inherit; | |
370 | ipc_importance_elem_t from_elem; | |
371 | ipc_importance_task_t to_task; | |
372 | ||
373 | ||
374 | inherit = (ipc_importance_inherit_t)elem; | |
375 | to_task = inherit->iii_to_task; | |
376 | assert(IIT_NULL != to_task); | |
377 | assert(!inherit->iii_donating); | |
378 | ||
379 | /* unlink and release the inherit */ | |
380 | assert(ipc_importance_task_is_any_receiver_type(to_task)); | |
381 | from_elem = ipc_importance_inherit_unlink(inherit); | |
382 | assert(IIE_NULL != from_elem); | |
383 | ipc_importance_release_locked(from_elem); | |
384 | /* unlocked on return */ | |
385 | ||
386 | ipc_importance_task_release(to_task); | |
387 | ||
388 | zfree(ipc_importance_inherit_zone, inherit); | |
389 | break; | |
390 | } | |
391 | } | |
392 | } | |
393 | ||
394 | /* | |
395 | * Routine: ipc_importance_release | |
396 | * Purpose: | |
397 | * Release a reference on an importance attribute value, | |
398 | * unlinking and deallocating the attribute if the last reference. | |
399 | * Conditions: | |
400 | * nothing locked on entrance, nothing locked on exit. | |
401 | * May block. | |
402 | */ | |
403 | void | |
404 | ipc_importance_release(ipc_importance_elem_t elem) | |
405 | { | |
406 | if (IIE_NULL == elem) | |
407 | return; | |
408 | ||
409 | ipc_importance_lock(); | |
410 | ipc_importance_release_locked(elem); | |
411 | /* unlocked */ | |
412 | } | |
413 | ||
414 | /* | |
415 | * Routine: ipc_importance_task_reference | |
416 | ||
417 | ||
418 | * Purpose: | |
419 | * Retain a reference on a task importance attribute value. | |
420 | * Conditions: | |
421 | * nothing locked on entrance, nothing locked on exit. | |
422 | * caller holds a reference already. | |
423 | */ | |
424 | void | |
425 | ipc_importance_task_reference(ipc_importance_task_t task_elem) | |
426 | { | |
427 | if (IIT_NULL == task_elem) | |
428 | return; | |
429 | #if IIE_REF_DEBUG | |
430 | incr_ref_counter(task_elem->iit_elem.iie_task_refs_added); | |
431 | #endif | |
432 | ipc_importance_reference(&task_elem->iit_elem); | |
433 | } | |
434 | ||
435 | /* | |
436 | * Routine: ipc_importance_task_release | |
437 | * Purpose: | |
438 | * Release a reference on a task importance attribute value, | |
439 | * unlinking and deallocating the attribute if the last reference. | |
440 | * Conditions: | |
441 | * nothing locked on entrance, nothing locked on exit. | |
442 | * May block. | |
443 | */ | |
444 | void | |
445 | ipc_importance_task_release(ipc_importance_task_t task_elem) | |
446 | { | |
447 | if (IIT_NULL == task_elem) | |
448 | return; | |
449 | ||
450 | ipc_importance_lock(); | |
451 | #if IIE_REF_DEBUG | |
452 | incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped); | |
453 | #endif | |
454 | ipc_importance_release_locked(&task_elem->iit_elem); | |
455 | /* unlocked */ | |
456 | } | |
457 | ||
458 | /* | |
459 | * Routine: ipc_importance_task_release_locked | |
460 | * Purpose: | |
461 | * Release a reference on a task importance attribute value, | |
462 | * unlinking and deallocating the attribute if the last reference. | |
463 | * Conditions: | |
464 | * importance lock held on entry, nothing locked on exit. | |
465 | * May block. | |
466 | */ | |
467 | static void | |
468 | ipc_importance_task_release_locked(ipc_importance_task_t task_elem) | |
469 | { | |
470 | if (IIT_NULL == task_elem) { | |
471 | ipc_importance_unlock(); | |
472 | return; | |
473 | } | |
474 | #if IIE_REF_DEBUG | |
475 | incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped); | |
476 | #endif | |
477 | ipc_importance_release_locked(&task_elem->iit_elem); | |
478 | /* unlocked */ | |
479 | } | |
480 | ||
481 | /* | |
482 | * Routines for importance donation/inheritance/boosting | |
483 | */ | |
484 | ||
485 | ||
486 | /* | |
487 | * External importance assertions are managed by the process in userspace | |
488 | * Internal importance assertions are the responsibility of the kernel | |
489 | * Assertions are changed from internal to external via task_importance_externalize_assertion | |
490 | */ | |
491 | ||
492 | /* | |
493 | * Routine: ipc_importance_task_check_transition | |
494 | * Purpose: | |
495 | * Increase or decrement the internal task importance counter of the | |
496 | * specified task and determine if propagation and a task policy | |
497 | * update is required. | |
498 | * | |
499 | * If it is already enqueued for a policy update, steal it from that queue | |
500 | * (as we are reversing that update before it happens). | |
501 | * | |
502 | * Conditions: | |
503 | * Called with the importance lock held. | |
504 | * It is the caller's responsibility to perform the propagation of the | |
505 | * transition and/or policy changes by checking the return value. | |
506 | */ | |
507 | static boolean_t | |
508 | ipc_importance_task_check_transition( | |
509 | ipc_importance_task_t task_imp, | |
510 | iit_update_type_t type, | |
511 | uint32_t delta) | |
512 | { | |
513 | ||
514 | task_t target_task = task_imp->iit_task; | |
515 | boolean_t boost = (IIT_UPDATE_HOLD == type); | |
516 | boolean_t before_boosted, after_boosted; | |
517 | ||
518 | if (!ipc_importance_task_is_any_receiver_type(task_imp)) | |
519 | return FALSE; | |
520 | ||
521 | #if IMPORTANCE_DEBUG | |
522 | int target_pid = (TASK_NULL != target_task) ? audit_token_pid_from_task(target_task) : -1; | |
523 | ||
524 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START, | |
525 | proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); | |
526 | #endif | |
527 | ||
528 | /* snapshot the effective boosting status before making any changes */ | |
529 | before_boosted = (task_imp->iit_assertcnt > 0); | |
530 | ||
531 | /* Adjust the assertcnt appropriately */ | |
532 | if (boost) { | |
533 | task_imp->iit_assertcnt += delta; | |
534 | #if IMPORTANCE_DEBUG | |
535 | DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid, | |
536 | task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt); | |
537 | #endif | |
538 | } else { | |
539 | // assert(delta <= task_imp->iit_assertcnt); | |
540 | if (delta > task_imp->iit_assertcnt - IIT_EXTERN(task_imp)) { | |
541 | /* TODO: Turn this back into a panic <rdar://problem/12592649> */ | |
542 | if (target_task != TASK_NULL) { | |
543 | printf("Over-release of kernel-internal importance assertions for pid %d (%s), " | |
544 | "dropping %d assertion(s) but task only has %d remaining (%d external).\n", | |
545 | audit_token_pid_from_task(target_task), | |
546 | (target_task->bsd_info == NULL) ? "" : proc_name_address(target_task->bsd_info), | |
547 | delta, | |
548 | task_imp->iit_assertcnt, | |
549 | IIT_EXTERN(task_imp)); | |
550 | } | |
551 | task_imp->iit_assertcnt = IIT_EXTERN(task_imp); | |
552 | } else { | |
553 | task_imp->iit_assertcnt -= delta; | |
554 | } | |
555 | #if IMPORTANCE_DEBUG | |
556 | // This convers both legacy and voucher-based importance. | |
557 | DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt); | |
558 | #endif | |
559 | } | |
560 | ||
561 | #if IMPORTANCE_DEBUG | |
562 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END, | |
563 | proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); | |
564 | #endif | |
565 | ||
566 | /* did the change result in an effective donor status change? */ | |
567 | after_boosted = (task_imp->iit_assertcnt > 0); | |
568 | ||
569 | if (after_boosted != before_boosted) { | |
570 | ||
571 | /* | |
572 | * If the task importance is already on an update queue, we just reversed the need for a | |
573 | * pending policy update. If the queue is any other than the delayed-drop-queue, pull it | |
574 | * off that queue and release the reference it got going onto the update queue. If it is | |
575 | * the delayed-drop-queue we leave it in place in case it comes back into the drop state | |
576 | * before its time delay is up. | |
577 | * | |
578 | * We still need to propagate the change downstream to reverse the assertcnt effects, | |
579 | * but we no longer need to update this task's boost policy state. | |
580 | * | |
581 | * Otherwise, mark it as needing a policy update. | |
582 | */ | |
583 | assert(0 == task_imp->iit_updatepolicy); | |
584 | if (NULL != task_imp->iit_updateq) { | |
585 | if (&ipc_importance_delayed_drop_queue != task_imp->iit_updateq) { | |
586 | queue_remove(task_imp->iit_updateq, task_imp, ipc_importance_task_t, iit_updates); | |
587 | task_imp->iit_updateq = NULL; | |
588 | ipc_importance_task_release_internal(task_imp); /* can't be last ref */ | |
589 | } | |
590 | } else { | |
591 | task_imp->iit_updatepolicy = 1; | |
592 | } | |
593 | return TRUE; | |
594 | } | |
595 | ||
596 | return FALSE; | |
597 | } | |
598 | ||
599 | ||
600 | /* | |
601 | * Routine: ipc_importance_task_propagate_helper | |
602 | * Purpose: | |
603 | * Increase or decrement the internal task importance counter of all | |
604 | * importance tasks inheriting from the specified one. If this causes | |
605 | * that importance task to change state, add it to the list of tasks | |
606 | * to do a policy update against. | |
607 | * Conditions: | |
608 | * Called with the importance lock held. | |
609 | * It is the caller's responsibility to iterate down the generated list | |
610 | * and propagate any subsequent assertion changes from there. | |
611 | */ | |
612 | static void | |
613 | ipc_importance_task_propagate_helper( | |
614 | ipc_importance_task_t task_imp, | |
615 | iit_update_type_t type, | |
616 | queue_t propagation) | |
617 | { | |
618 | ipc_importance_task_t temp_task_imp; | |
619 | ||
620 | /* | |
621 | * iterate the downstream kmsgs, adjust their boosts, | |
622 | * and capture the next task to adjust for each message | |
623 | */ | |
624 | ||
625 | ipc_kmsg_t temp_kmsg; | |
626 | ||
627 | queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) { | |
628 | mach_msg_header_t *hdr = temp_kmsg->ikm_header; | |
629 | mach_port_delta_t delta; | |
630 | ipc_port_t port; | |
631 | ||
632 | /* toggle the kmsg importance bit as a barrier to parallel adjusts */ | |
633 | if (IIT_UPDATE_HOLD == type) { | |
634 | if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) { | |
635 | continue; | |
636 | } | |
637 | ||
638 | /* mark the message as now carrying importance */ | |
639 | hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP; | |
640 | delta = 1; | |
641 | } else { | |
642 | if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) { | |
643 | continue; | |
644 | } | |
645 | ||
646 | /* clear the message as now carrying importance */ | |
647 | hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP; | |
648 | delta = -1; | |
649 | } | |
650 | ||
651 | /* determine the task importance to adjust as result (if any) */ | |
652 | port = (ipc_port_t) hdr->msgh_remote_port; | |
653 | assert(IP_VALID(port)); | |
654 | ip_lock(port); | |
655 | temp_task_imp = IIT_NULL; | |
656 | if (!ipc_port_importance_delta_internal(port, &delta, &temp_task_imp)) { | |
657 | ip_unlock(port); | |
658 | } | |
659 | ||
660 | /* no task importance to adjust associated with the port? */ | |
661 | if (IIT_NULL == temp_task_imp) { | |
662 | continue; | |
663 | } | |
664 | ||
665 | /* hold a reference on temp_task_imp */ | |
666 | ||
667 | /* Adjust the task assertions and determine if an edge was crossed */ | |
668 | if (ipc_importance_task_check_transition(temp_task_imp, type, 1)) { | |
669 | incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition); | |
670 | queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props); | |
671 | /* reference donated */ | |
672 | } else { | |
673 | ipc_importance_task_release_internal(temp_task_imp); | |
674 | } | |
675 | } | |
676 | ||
677 | /* | |
678 | * iterate the downstream importance inherits | |
679 | * and capture the next task importance to boost for each | |
680 | */ | |
681 | ipc_importance_inherit_t temp_inherit; | |
682 | ||
683 | queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) { | |
684 | uint32_t assertcnt = III_EXTERN(temp_inherit); | |
685 | ||
686 | temp_task_imp = temp_inherit->iii_to_task; | |
687 | assert(IIT_NULL != temp_task_imp); | |
688 | ||
689 | if (IIT_UPDATE_HOLD == type) { | |
690 | /* if no undropped externcnts in the inherit, nothing to do */ | |
691 | if (0 == assertcnt) { | |
692 | assert(temp_inherit->iii_donating == FALSE); | |
693 | continue; | |
694 | } | |
695 | ||
696 | /* nothing to do if the inherit is already donating (forced donation) */ | |
697 | if (temp_inherit->iii_donating) { | |
698 | continue; | |
699 | } | |
700 | ||
701 | /* mark it donating and contribute to the task externcnts */ | |
702 | temp_inherit->iii_donating = TRUE; | |
703 | temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt; | |
704 | temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop; | |
705 | ||
706 | } else { | |
707 | /* if no contributing assertions, move on */ | |
708 | if (0 == assertcnt) { | |
709 | assert(temp_inherit->iii_donating == FALSE); | |
710 | continue; | |
711 | } | |
712 | ||
713 | /* nothing to do if the inherit is not donating */ | |
714 | if (!temp_inherit->iii_donating) { | |
715 | continue; | |
716 | } | |
717 | ||
718 | /* mark it no longer donating */ | |
719 | temp_inherit->iii_donating = FALSE; | |
720 | ||
721 | /* remove the contribution the inherit made to the to-task */ | |
722 | assert(IIT_EXTERN(temp_task_imp) >= III_EXTERN(temp_inherit)); | |
723 | assert(temp_task_imp->iit_externcnt >= temp_inherit->iii_externcnt); | |
724 | assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop); | |
725 | temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt; | |
726 | temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop; | |
727 | ||
728 | } | |
729 | ||
730 | /* Adjust the task assertions and determine if an edge was crossed */ | |
731 | assert(ipc_importance_task_is_any_receiver_type(temp_task_imp)); | |
732 | if (ipc_importance_task_check_transition(temp_task_imp, type, assertcnt)) { | |
733 | ipc_importance_task_reference(temp_task_imp); | |
734 | incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition); | |
735 | queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props); | |
736 | } | |
737 | } | |
738 | } | |
739 | ||
740 | /* | |
741 | * Routine: ipc_importance_task_process_updates | |
742 | * Purpose: | |
743 | * Process the queue of task importances and apply the policy | |
744 | * update called for. Only process tasks in the queue with an | |
745 | * update timestamp less than the supplied max. | |
746 | * Conditions: | |
747 | * Called and returns with importance locked. | |
748 | * May drop importance lock and block temporarily. | |
749 | */ | |
750 | static void | |
751 | ipc_importance_task_process_updates( | |
752 | queue_t supplied_queue, | |
753 | boolean_t boost, | |
754 | uint64_t max_timestamp) | |
755 | { | |
756 | ipc_importance_task_t task_imp; | |
757 | queue_head_t second_chance; | |
758 | queue_t queue = supplied_queue; | |
759 | ||
760 | /* | |
761 | * This queue will hold the task's we couldn't trylock on first pass. | |
762 | * By using a second (private) queue, we guarantee all tasks that get | |
763 | * entered on this queue have a timestamp under the maximum. | |
764 | */ | |
765 | queue_init(&second_chance); | |
766 | ||
767 | /* process any resulting policy updates */ | |
768 | retry: | |
769 | while(!queue_empty(queue)) { | |
770 | task_t target_task; | |
771 | struct task_pend_token pend_token = {}; | |
772 | ||
773 | task_imp = (ipc_importance_task_t)queue_first(queue); | |
774 | assert(0 == task_imp->iit_updatepolicy); | |
775 | assert(queue == task_imp->iit_updateq); | |
776 | ||
777 | /* if timestamp is too big, we're done */ | |
778 | if (task_imp->iit_updatetime > max_timestamp) { | |
779 | break; | |
780 | } | |
781 | ||
782 | /* we were given a reference on each task in the queue */ | |
783 | ||
784 | /* remove it from the supplied queue */ | |
785 | queue_remove(queue, task_imp, ipc_importance_task_t, iit_updates); | |
786 | task_imp->iit_updateq = NULL; | |
787 | ||
788 | target_task = task_imp->iit_task; | |
789 | ||
790 | /* Is it well on the way to exiting? */ | |
791 | if (TASK_NULL == target_task) { | |
792 | ipc_importance_task_release_locked(task_imp); | |
793 | /* importance unlocked */ | |
794 | ipc_importance_lock(); | |
795 | continue; | |
796 | } | |
797 | ||
798 | /* Has the update been reversed on the hysteresis queue? */ | |
799 | if (0 < task_imp->iit_assertcnt && | |
800 | queue == &ipc_importance_delayed_drop_queue) { | |
801 | ipc_importance_task_release_locked(task_imp); | |
802 | /* importance unlocked */ | |
803 | ipc_importance_lock(); | |
804 | continue; | |
805 | } | |
806 | ||
807 | /* | |
808 | * Can we get the task lock out-of-order? | |
809 | * If not, stick this back on the second-chance queue. | |
810 | */ | |
811 | if (!task_lock_try(target_task)) { | |
812 | boolean_t should_wait_lock = (queue == &second_chance); | |
813 | task_imp->iit_updateq = &second_chance; | |
814 | ||
815 | /* | |
816 | * If we're already processing second-chances on | |
817 | * tasks, keep this task on the front of the queue. | |
818 | * We will wait for the task lock before coming | |
819 | * back and trying again, and we have a better | |
820 | * chance of re-acquiring the lock if we come back | |
821 | * to it right away. | |
822 | */ | |
823 | if (should_wait_lock){ | |
824 | task_reference(target_task); | |
825 | queue_enter_first(&second_chance, task_imp, | |
826 | ipc_importance_task_t, iit_updates); | |
827 | } else { | |
828 | queue_enter(&second_chance, task_imp, | |
829 | ipc_importance_task_t, iit_updates); | |
830 | } | |
831 | ipc_importance_unlock(); | |
832 | ||
833 | if (should_wait_lock) { | |
834 | task_lock(target_task); | |
835 | task_unlock(target_task); | |
836 | task_deallocate(target_task); | |
837 | } | |
838 | ||
839 | ipc_importance_lock(); | |
840 | continue; | |
841 | } | |
842 | ||
843 | /* is it going away? */ | |
844 | if (!target_task->active) { | |
845 | task_unlock(target_task); | |
846 | ipc_importance_task_release_locked(task_imp); | |
847 | /* importance unlocked */ | |
848 | ipc_importance_lock(); | |
849 | continue; | |
850 | } | |
851 | ||
852 | /* take a task reference for while we don't have the importance lock */ | |
853 | task_reference(target_task); | |
854 | ||
855 | /* count the transition */ | |
856 | if (boost) | |
857 | task_imp->iit_transitions++; | |
858 | ||
859 | ipc_importance_unlock(); | |
860 | ||
861 | /* apply the policy adjust to the target task (while it is still locked) */ | |
862 | task_update_boost_locked(target_task, boost, &pend_token); | |
863 | ||
864 | /* complete the policy update with the task unlocked */ | |
865 | ipc_importance_task_release(task_imp); | |
866 | task_unlock(target_task); | |
867 | task_policy_update_complete_unlocked(target_task, THREAD_NULL, &pend_token); | |
868 | task_deallocate(target_task); | |
869 | ||
870 | ipc_importance_lock(); | |
871 | } | |
872 | ||
873 | /* If there are tasks we couldn't update the first time, try again */ | |
874 | if (!queue_empty(&second_chance)) { | |
875 | queue = &second_chance; | |
876 | goto retry; | |
877 | } | |
878 | } | |
879 | ||
880 | ||
881 | /* | |
882 | * Routine: ipc_importance_task_delayed_drop_scan | |
883 | * Purpose: | |
884 | * The thread call routine to scan the delayed drop queue, | |
885 | * requesting all updates with a deadline up to the last target | |
886 | * for the thread-call (which is DENAP_DROP_SKEW beyond the first | |
887 | * thread's optimum delay). | |
888 | * update to drop its boost. | |
889 | * Conditions: | |
890 | * Nothing locked | |
891 | */ | |
892 | static void | |
893 | ipc_importance_task_delayed_drop_scan( | |
894 | __unused void *arg1, | |
895 | __unused void *arg2) | |
896 | { | |
897 | ipc_importance_lock(); | |
898 | ||
899 | /* process all queued task drops with timestamps up to TARGET(first)+SKEW */ | |
900 | ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue, | |
901 | FALSE, | |
902 | ipc_importance_delayed_drop_timestamp); | |
903 | ||
904 | /* importance lock may have been temporarily dropped */ | |
905 | ||
906 | /* If there are any entries left in the queue, re-arm the call here */ | |
907 | if (!queue_empty(&ipc_importance_delayed_drop_queue)) { | |
908 | ipc_importance_task_t task_imp; | |
909 | uint64_t deadline; | |
910 | uint64_t leeway; | |
911 | ||
912 | task_imp = (ipc_importance_task_t)queue_first(&ipc_importance_delayed_drop_queue); | |
913 | ||
914 | nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline); | |
915 | deadline += task_imp->iit_updatetime; | |
916 | ipc_importance_delayed_drop_timestamp = deadline; | |
917 | ||
918 | nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway); | |
919 | ||
920 | thread_call_enter_delayed_with_leeway( | |
921 | ipc_importance_delayed_drop_call, | |
922 | NULL, | |
923 | deadline, | |
924 | leeway, | |
925 | DENAP_DROP_FLAGS); | |
926 | } else { | |
927 | ipc_importance_delayed_drop_call_requested = FALSE; | |
928 | } | |
929 | ipc_importance_unlock(); | |
930 | } | |
931 | ||
932 | /* | |
933 | * Routine: ipc_importance_task_delayed_drop | |
934 | * Purpose: | |
935 | * Queue the specified task importance for delayed policy | |
936 | * update to drop its boost. | |
937 | * Conditions: | |
938 | * Called with the importance lock held. | |
939 | */ | |
940 | static void | |
941 | ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp) | |
942 | { | |
943 | uint64_t timestamp = mach_absolute_time(); /* no mach_approximate_time() in kernel */ | |
944 | ||
945 | assert(ipc_importance_delayed_drop_call != NULL); | |
946 | ||
947 | /* | |
948 | * If still on an update queue from a previous change, | |
949 | * remove it first (and use that reference). Otherwise, take | |
950 | * a new reference for the delay drop update queue. | |
951 | */ | |
952 | if (NULL != task_imp->iit_updateq) { | |
953 | queue_remove(task_imp->iit_updateq, task_imp, | |
954 | ipc_importance_task_t, iit_updates); | |
955 | } else { | |
956 | ipc_importance_task_reference_internal(task_imp); | |
957 | } | |
958 | ||
959 | task_imp->iit_updateq = &ipc_importance_delayed_drop_queue; | |
960 | task_imp->iit_updatetime = timestamp; | |
961 | ||
962 | queue_enter(&ipc_importance_delayed_drop_queue, task_imp, | |
963 | ipc_importance_task_t, iit_updates); | |
964 | ||
965 | /* request the delayed thread-call if not already requested */ | |
966 | if (!ipc_importance_delayed_drop_call_requested) { | |
967 | uint64_t deadline; | |
968 | uint64_t leeway; | |
969 | ||
970 | nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline); | |
971 | deadline += task_imp->iit_updatetime; | |
972 | ipc_importance_delayed_drop_timestamp = deadline; | |
973 | ||
974 | nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway); | |
975 | ||
976 | ipc_importance_delayed_drop_call_requested = TRUE; | |
977 | thread_call_enter_delayed_with_leeway( | |
978 | ipc_importance_delayed_drop_call, | |
979 | NULL, | |
980 | deadline, | |
981 | leeway, | |
982 | DENAP_DROP_FLAGS); | |
983 | } | |
984 | } | |
985 | ||
986 | ||
987 | /* | |
988 | * Routine: ipc_importance_task_propagate_assertion_locked | |
989 | * Purpose: | |
990 | * Propagate the importance transition type to every item | |
991 | * If this causes a boost to be applied, determine if that | |
992 | * boost should propagate downstream. | |
993 | * Conditions: | |
994 | * Called with the importance lock held. | |
995 | */ | |
996 | static void | |
997 | ipc_importance_task_propagate_assertion_locked( | |
998 | ipc_importance_task_t task_imp, | |
999 | iit_update_type_t type, | |
1000 | boolean_t update_task_imp) | |
1001 | { | |
1002 | boolean_t boost = (IIT_UPDATE_HOLD == type); | |
1003 | ipc_importance_task_t temp_task_imp; | |
1004 | queue_head_t propagate; | |
1005 | queue_head_t updates; | |
1006 | ||
1007 | queue_init(&updates); | |
1008 | queue_init(&propagate); | |
1009 | ||
1010 | /* | |
1011 | * If we're going to update the policy for the provided task, | |
1012 | * enqueue it on the propagate queue itself. Otherwise, only | |
1013 | * enqueue downstream things. | |
1014 | */ | |
1015 | if (update_task_imp) { | |
1016 | queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props); | |
1017 | } else { | |
1018 | ipc_importance_task_propagate_helper(task_imp, type, &propagate); | |
1019 | } | |
1020 | ||
1021 | /* | |
1022 | * for each item on the propagation list, propagate any change downstream, | |
1023 | * adding new tasks to propagate further if they transistioned as well. | |
1024 | */ | |
1025 | while (!queue_empty(&propagate)) { | |
1026 | boolean_t need_update; | |
1027 | ||
1028 | queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props); | |
1029 | assert(IIT_NULL != temp_task_imp); | |
1030 | ||
1031 | /* only propagate for receivers not already marked as a donor */ | |
1032 | if (!ipc_importance_task_is_marked_donor(temp_task_imp) && | |
1033 | ipc_importance_task_is_marked_receiver(temp_task_imp)) { | |
1034 | ipc_importance_task_propagate_helper(temp_task_imp, type, &propagate); | |
1035 | } | |
1036 | ||
1037 | /* if we have a policy update to apply, enqueue a reference for later processing */ | |
1038 | need_update = (0 != temp_task_imp->iit_updatepolicy); | |
1039 | temp_task_imp->iit_updatepolicy = 0; | |
1040 | if (need_update && TASK_NULL != temp_task_imp->iit_task) { | |
1041 | if (NULL == temp_task_imp->iit_updateq) { | |
1042 | temp_task_imp->iit_updatetime = 0; | |
1043 | temp_task_imp->iit_updateq = &updates; | |
1044 | ipc_importance_task_reference_internal(temp_task_imp); | |
1045 | if (boost) { | |
1046 | queue_enter(&updates, temp_task_imp, | |
1047 | ipc_importance_task_t, iit_updates); | |
1048 | } else { | |
1049 | queue_enter_first(&updates, temp_task_imp, | |
1050 | ipc_importance_task_t, iit_updates); | |
1051 | } | |
1052 | } else { | |
1053 | /* Must already be on the AppNap hysteresis queue */ | |
1054 | assert(&ipc_importance_delayed_drop_queue); | |
1055 | assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp)); | |
1056 | } | |
1057 | } | |
1058 | } | |
1059 | ||
1060 | /* apply updates to task (may drop importance lock) */ | |
1061 | if (!queue_empty(&updates)) { | |
1062 | ipc_importance_task_process_updates(&updates, boost, 0); | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | /* | |
1067 | * Routine: ipc_importance_task_hold_internal_assertion_locked | |
1068 | * Purpose: | |
1069 | * Increment the assertion count on the task importance. | |
1070 | * If this results in a boost state change in that task, | |
1071 | * prepare to update task policy for this task AND, if | |
1072 | * if not just waking out of App Nap, all down-stream | |
1073 | * tasks that have a similar transition through inheriting | |
1074 | * this update. | |
1075 | * Conditions: | |
1076 | * importance locked on entry and exit. | |
1077 | * May temporarily drop importance lock and block. | |
1078 | */ | |
1079 | static kern_return_t | |
1080 | ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count) | |
1081 | { | |
1082 | if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, count)) { | |
1083 | ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE); | |
1084 | } | |
1085 | return KERN_SUCCESS; | |
1086 | } | |
1087 | ||
1088 | /* | |
1089 | * Routine: ipc_importance_task_drop_internal_assertion_locked | |
1090 | * Purpose: | |
1091 | * Decrement the assertion count on the task importance. | |
1092 | * If this results in a boost state change in that task, | |
1093 | * prepare to update task policy for this task AND, if | |
1094 | * if not just waking out of App Nap, all down-stream | |
1095 | * tasks that have a similar transition through inheriting | |
1096 | * this update. | |
1097 | * Conditions: | |
1098 | * importance locked on entry and exit. | |
1099 | * May temporarily drop importance lock and block. | |
1100 | */ | |
1101 | static kern_return_t | |
1102 | ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count) | |
1103 | { | |
1104 | if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) { | |
1105 | ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE); | |
1106 | } | |
1107 | return KERN_SUCCESS; | |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * Routine: ipc_importance_task_hold_internal_assertion | |
1112 | * Purpose: | |
1113 | * Increment the assertion count on the task importance. | |
1114 | * If this results in a 0->1 change in that count, | |
1115 | * prepare to update task policy for this task AND | |
1116 | * (potentially) all down-stream tasks that have a | |
1117 | * similar transition through inheriting this update. | |
1118 | * Conditions: | |
1119 | * Nothing locked | |
1120 | * May block after dropping importance lock. | |
1121 | */ | |
1122 | int | |
1123 | ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp, uint32_t count) | |
1124 | { | |
1125 | int ret = KERN_SUCCESS; | |
1126 | ||
1127 | if (ipc_importance_task_is_any_receiver_type(task_imp)) { | |
1128 | ipc_importance_lock(); | |
1129 | ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count); | |
1130 | ipc_importance_unlock(); | |
1131 | } | |
1132 | return ret; | |
1133 | } | |
1134 | ||
1135 | /* | |
1136 | * Routine: ipc_importance_task_drop_internal_assertion | |
1137 | * Purpose: | |
1138 | * Decrement the assertion count on the task importance. | |
1139 | * If this results in a X->0 change in that count, | |
1140 | * prepare to update task policy for this task AND | |
1141 | * all down-stream tasks that have a similar transition | |
1142 | * through inheriting this drop update. | |
1143 | * Conditions: | |
1144 | * Nothing locked on entry. | |
1145 | * May block after dropping importance lock. | |
1146 | */ | |
1147 | kern_return_t | |
1148 | ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp, uint32_t count) | |
1149 | { | |
1150 | kern_return_t ret = KERN_SUCCESS; | |
1151 | ||
1152 | if (ipc_importance_task_is_any_receiver_type(task_imp)) { | |
1153 | ipc_importance_lock(); | |
1154 | ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count); | |
1155 | ipc_importance_unlock(); | |
1156 | } | |
1157 | return ret; | |
1158 | } | |
1159 | ||
1160 | /* | |
1161 | * Routine: ipc_importance_task_hold_file_lock_assertion | |
1162 | * Purpose: | |
1163 | * Increment the file lock assertion count on the task importance. | |
1164 | * If this results in a 0->1 change in that count, | |
1165 | * prepare to update task policy for this task AND | |
1166 | * (potentially) all down-stream tasks that have a | |
1167 | * similar transition through inheriting this update. | |
1168 | * Conditions: | |
1169 | * Nothing locked | |
1170 | * May block after dropping importance lock. | |
1171 | */ | |
1172 | kern_return_t | |
1173 | ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count) | |
1174 | { | |
1175 | kern_return_t ret = KERN_SUCCESS; | |
1176 | ||
1177 | if (ipc_importance_task_is_any_receiver_type(task_imp)) { | |
1178 | ipc_importance_lock(); | |
1179 | ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count); | |
1180 | if (KERN_SUCCESS == ret) { | |
1181 | task_imp->iit_filelocks += count; | |
1182 | } | |
1183 | ipc_importance_unlock(); | |
1184 | } | |
1185 | return ret; | |
1186 | } | |
1187 | ||
1188 | /* | |
1189 | * Routine: ipc_importance_task_drop_file_lock_assertion | |
1190 | * Purpose: | |
1191 | * Decrement the assertion count on the task importance. | |
1192 | * If this results in a X->0 change in that count, | |
1193 | * prepare to update task policy for this task AND | |
1194 | * all down-stream tasks that have a similar transition | |
1195 | * through inheriting this drop update. | |
1196 | * Conditions: | |
1197 | * Nothing locked on entry. | |
1198 | * May block after dropping importance lock. | |
1199 | */ | |
1200 | kern_return_t | |
1201 | ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count) | |
1202 | { | |
1203 | kern_return_t ret = KERN_SUCCESS; | |
1204 | ||
1205 | if (ipc_importance_task_is_any_receiver_type(task_imp)) { | |
1206 | ipc_importance_lock(); | |
1207 | if (count <= task_imp->iit_filelocks) { | |
1208 | task_imp->iit_filelocks -= count; | |
1209 | ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count); | |
1210 | } else { | |
1211 | ret = KERN_INVALID_ARGUMENT; | |
1212 | } | |
1213 | ipc_importance_unlock(); | |
1214 | } | |
1215 | return ret; | |
1216 | } | |
1217 | ||
1218 | /* | |
1219 | * Routine: ipc_importance_task_hold_legacy_external_assertion | |
1220 | * Purpose: | |
1221 | * Increment the external assertion count on the task importance. | |
1222 | * This cannot result in an 0->1 transition, as the caller must | |
1223 | * already hold an external boost. | |
1224 | * Conditions: | |
1225 | * Nothing locked on entry. | |
1226 | * May block after dropping importance lock. | |
1227 | * A queue of task importance structures is returned | |
1228 | * by ipc_importance_task_hold_assertion_locked(). Each | |
1229 | * needs to be updated (outside the importance lock hold). | |
1230 | */ | |
1231 | kern_return_t | |
1232 | ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count) | |
1233 | { | |
1234 | task_t target_task; | |
1235 | uint32_t target_assertcnt; | |
1236 | uint32_t target_externcnt; | |
1237 | uint32_t target_legacycnt; | |
1238 | ||
1239 | kern_return_t ret; | |
1240 | ||
1241 | ipc_importance_lock(); | |
1242 | target_task = task_imp->iit_task; | |
1243 | ||
1244 | #if IMPORTANCE_DEBUG | |
1245 | int target_pid = (TASK_NULL != target_task) ? audit_token_pid_from_task(target_task) : -1; | |
1246 | ||
1247 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START, | |
1248 | proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); | |
1249 | #endif | |
1250 | ||
1251 | if (IIT_LEGACY_EXTERN(task_imp) == 0) { | |
1252 | /* Only allowed to take a new boost assertion when holding an external boost */ | |
1253 | /* save data for diagnostic printf below */ | |
1254 | target_assertcnt = task_imp->iit_assertcnt; | |
1255 | target_externcnt = IIT_EXTERN(task_imp); | |
1256 | target_legacycnt = IIT_LEGACY_EXTERN(task_imp); | |
1257 | ret = KERN_FAILURE; | |
1258 | count = 0; | |
1259 | } else { | |
1260 | assert(ipc_importance_task_is_any_receiver_type(task_imp)); | |
1261 | assert(0 < task_imp->iit_assertcnt); | |
1262 | assert(0 < IIT_EXTERN(task_imp)); | |
1263 | task_imp->iit_assertcnt += count; | |
1264 | task_imp->iit_externcnt += count; | |
1265 | task_imp->iit_legacy_externcnt += count; | |
1266 | ret = KERN_SUCCESS; | |
1267 | } | |
1268 | ipc_importance_unlock(); | |
1269 | ||
1270 | #if IMPORTANCE_DEBUG | |
1271 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END, | |
1272 | proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); | |
1273 | // This covers the legacy case where a task takes an extra boost. | |
1274 | DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt); | |
1275 | #endif | |
1276 | ||
1277 | if (KERN_FAILURE == ret && target_task != TASK_NULL) { | |
1278 | printf("BUG in process %s[%d]: " | |
1279 | "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. " | |
1280 | "(%d total, %d external, %d legacy-external)\n", | |
1281 | proc_name_address(target_task->bsd_info), audit_token_pid_from_task(target_task), | |
1282 | target_assertcnt, target_externcnt, target_legacycnt); | |
1283 | } | |
1284 | ||
1285 | return(ret); | |
1286 | } | |
1287 | ||
1288 | /* | |
1289 | * Routine: ipc_importance_task_drop_legacy_external_assertion | |
1290 | * Purpose: | |
1291 | * Drop the legacy external assertion count on the task and | |
1292 | * reflect that change to total external assertion count and | |
1293 | * then onto the internal importance count. | |
1294 | * | |
1295 | * If this results in a X->0 change in the internal, | |
1296 | * count, prepare to update task policy for this task AND | |
1297 | * all down-stream tasks that have a similar transition | |
1298 | * through inheriting this update. | |
1299 | * Conditions: | |
1300 | * Nothing locked on entry. | |
1301 | */ | |
1302 | kern_return_t | |
1303 | ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count) | |
1304 | { | |
1305 | int ret = KERN_SUCCESS; | |
1306 | task_t target_task; | |
1307 | uint32_t target_assertcnt; | |
1308 | uint32_t target_externcnt; | |
1309 | uint32_t target_legacycnt; | |
1310 | ||
1311 | if (count > 1) { | |
1312 | return KERN_INVALID_ARGUMENT; | |
1313 | } | |
1314 | ||
1315 | ipc_importance_lock(); | |
1316 | target_task = task_imp->iit_task; | |
1317 | ||
1318 | #if IMPORTANCE_DEBUG | |
1319 | int target_pid = (TASK_NULL != target_task) ? audit_token_pid_from_task(target_task) : -1; | |
1320 | ||
1321 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START, | |
1322 | proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); | |
1323 | #endif | |
1324 | ||
1325 | if (count > IIT_LEGACY_EXTERN(task_imp)) { | |
1326 | /* Process over-released its boost count - save data for diagnostic printf */ | |
1327 | /* TODO: If count > 1, we should clear out as many external assertions as there are left. */ | |
1328 | target_assertcnt = task_imp->iit_assertcnt; | |
1329 | target_externcnt = IIT_EXTERN(task_imp); | |
1330 | target_legacycnt = IIT_LEGACY_EXTERN(task_imp); | |
1331 | ret = KERN_FAILURE; | |
1332 | } else { | |
1333 | /* | |
1334 | * decrement legacy external count from the top level and reflect | |
1335 | * into internal for this and all subsequent updates. | |
1336 | */ | |
1337 | assert(ipc_importance_task_is_any_receiver_type(task_imp)); | |
1338 | assert(IIT_EXTERN(task_imp) >= count); | |
1339 | ||
1340 | task_imp->iit_legacy_externdrop += count; | |
1341 | task_imp->iit_externdrop += count; | |
1342 | ||
1343 | /* reset extern counters (if appropriate) */ | |
1344 | if (IIT_LEGACY_EXTERN(task_imp) == 0) { | |
1345 | if (IIT_EXTERN(task_imp) != 0) { | |
1346 | task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt; | |
1347 | task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop; | |
1348 | } else { | |
1349 | task_imp->iit_externcnt = 0; | |
1350 | task_imp->iit_externdrop = 0; | |
1351 | } | |
1352 | task_imp->iit_legacy_externcnt = 0; | |
1353 | task_imp->iit_legacy_externdrop = 0; | |
1354 | } | |
1355 | ||
1356 | /* reflect the drop to the internal assertion count (and effect any importance change) */ | |
1357 | if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) { | |
1358 | ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE); | |
1359 | } | |
1360 | ret = KERN_SUCCESS; | |
1361 | } | |
1362 | ||
1363 | #if IMPORTANCE_DEBUG | |
1364 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END, | |
1365 | proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); | |
1366 | #endif | |
1367 | ||
1368 | ipc_importance_unlock(); | |
1369 | ||
1370 | /* delayed printf for user-supplied data failures */ | |
1371 | if (KERN_FAILURE == ret && TASK_NULL != target_task) { | |
1372 | printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n", | |
1373 | proc_name_address(target_task->bsd_info), audit_token_pid_from_task(target_task), | |
1374 | target_assertcnt, target_externcnt, target_legacycnt); | |
1375 | } | |
1376 | ||
1377 | return(ret); | |
1378 | } | |
1379 | ||
1380 | ||
1381 | ||
1382 | /* Transfer an assertion to legacy userspace responsibility */ | |
1383 | static kern_return_t | |
1384 | ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, uint32_t count, __unused int sender_pid) | |
1385 | { | |
1386 | task_t target_task; | |
1387 | ||
1388 | assert(IIT_NULL != task_imp); | |
1389 | target_task = task_imp->iit_task; | |
1390 | ||
1391 | if (TASK_NULL == target_task || | |
1392 | !ipc_importance_task_is_any_receiver_type(task_imp)) { | |
1393 | return KERN_FAILURE; | |
1394 | } | |
1395 | ||
1396 | #if IMPORTANCE_DEBUG | |
1397 | int target_pid = audit_token_pid_from_task(target_task); | |
1398 | ||
1399 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START, | |
1400 | proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); | |
1401 | #endif | |
1402 | ||
1403 | ipc_importance_lock(); | |
1404 | /* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */ | |
1405 | assert(IIT_EXTERN(task_imp) >= IIT_LEGACY_EXTERN(task_imp)); | |
1406 | task_imp->iit_legacy_externcnt += count; | |
1407 | task_imp->iit_externcnt += count; | |
1408 | ipc_importance_unlock(); | |
1409 | ||
1410 | #if IMPORTANCE_DEBUG | |
1411 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END, | |
1412 | proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); | |
1413 | // This is the legacy boosting path | |
1414 | DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp)); | |
1415 | #endif /* IMPORTANCE_DEBUG */ | |
1416 | ||
1417 | return(KERN_SUCCESS); | |
1418 | } | |
1419 | ||
1420 | /* | |
1421 | * Routine: ipc_importance_task_update_live_donor | |
1422 | * Purpose: | |
1423 | * Read the live donor status and update the live_donor bit/propagate the change in importance. | |
1424 | * Conditions: | |
1425 | * Nothing locked on entrance, nothing locked on exit. | |
1426 | * | |
1427 | * TODO: Need tracepoints around this function... | |
1428 | */ | |
1429 | void | |
1430 | ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp) | |
1431 | { | |
1432 | uint32_t task_live_donor; | |
1433 | boolean_t before_donor; | |
1434 | boolean_t after_donor; | |
1435 | task_t target_task; | |
1436 | ||
1437 | assert(task_imp != NULL); | |
1438 | ||
1439 | /* | |
1440 | * Nothing to do if the task is not marked as expecting | |
1441 | * live donor updates. | |
1442 | */ | |
1443 | if (!ipc_importance_task_is_marked_live_donor(task_imp)) { | |
1444 | return; | |
1445 | } | |
1446 | ||
1447 | ipc_importance_lock(); | |
1448 | ||
1449 | /* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */ | |
1450 | target_task = task_imp->iit_task; | |
1451 | if (TASK_NULL == target_task) { | |
1452 | ipc_importance_unlock(); | |
1453 | return; | |
1454 | } | |
1455 | before_donor = ipc_importance_task_is_marked_donor(task_imp); | |
1456 | ||
1457 | /* snapshot task live donor status - may change, but another call will accompany the change */ | |
1458 | task_live_donor = target_task->effective_policy.t_live_donor; | |
1459 | ||
1460 | #if IMPORTANCE_DEBUG | |
1461 | int target_pid = audit_token_pid_from_task(target_task); | |
1462 | ||
1463 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
1464 | (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START, | |
1465 | target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0); | |
1466 | #endif | |
1467 | ||
1468 | /* update the task importance live donor status based on the task's value */ | |
1469 | task_imp->iit_donor = task_live_donor; | |
1470 | ||
1471 | after_donor = ipc_importance_task_is_marked_donor(task_imp); | |
1472 | ||
1473 | /* Has the effectiveness of being a donor changed as a result of this update? */ | |
1474 | if (before_donor != after_donor) { | |
1475 | iit_update_type_t type; | |
1476 | ||
1477 | /* propagate assertions without updating the current task policy (already handled) */ | |
1478 | if (0 == before_donor) { | |
1479 | task_imp->iit_transitions++; | |
1480 | type = IIT_UPDATE_HOLD; | |
1481 | } else { | |
1482 | type = IIT_UPDATE_DROP; | |
1483 | } | |
1484 | ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE); | |
1485 | } | |
1486 | ||
1487 | #if IMPORTANCE_DEBUG | |
1488 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
1489 | (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END, | |
1490 | target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0); | |
1491 | #endif | |
1492 | ||
1493 | ipc_importance_unlock(); | |
1494 | } | |
1495 | ||
1496 | ||
1497 | /* | |
1498 | * Routine: ipc_importance_task_mark_donor | |
1499 | * Purpose: | |
1500 | * Set the task importance donor flag. | |
1501 | * Conditions: | |
1502 | * Nothing locked on entrance, nothing locked on exit. | |
1503 | * | |
1504 | * This is only called while the task is being constructed, | |
1505 | * so no need to update task policy or propagate downstream. | |
1506 | */ | |
1507 | void | |
1508 | ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donating) | |
1509 | { | |
1510 | assert(task_imp != NULL); | |
1511 | ||
1512 | ipc_importance_lock(); | |
1513 | ||
1514 | int old_donor = task_imp->iit_donor; | |
1515 | ||
1516 | task_imp->iit_donor = (donating ? 1 : 0); | |
1517 | ||
1518 | if (task_imp->iit_donor > 0 && old_donor == 0) | |
1519 | task_imp->iit_transitions++; | |
1520 | ||
1521 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
1522 | (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE, | |
1523 | audit_token_pid_from_task(task_imp->iit_task), donating, | |
1524 | old_donor, task_imp->iit_donor, 0); | |
1525 | ||
1526 | ipc_importance_unlock(); | |
1527 | } | |
1528 | ||
1529 | /* | |
1530 | * Routine: ipc_importance_task_marked_donor | |
1531 | * Purpose: | |
1532 | * Query the donor flag for the given task importance. | |
1533 | * Conditions: | |
1534 | * May be called without taking the importance lock. | |
1535 | * In that case, donor status can change so you must | |
1536 | * check only once for each donation event. | |
1537 | */ | |
1538 | boolean_t | |
1539 | ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp) | |
1540 | { | |
1541 | if (IIT_NULL == task_imp) { | |
1542 | return FALSE; | |
1543 | } | |
1544 | return (0 != task_imp->iit_donor); | |
1545 | } | |
1546 | ||
1547 | /* | |
1548 | * Routine: ipc_importance_task_mark_live_donor | |
1549 | * Purpose: | |
1550 | * Indicate that the task is eligible for live donor updates. | |
1551 | * Conditions: | |
1552 | * Nothing locked on entrance, nothing locked on exit. | |
1553 | * | |
1554 | * This is only called while the task is being constructed. | |
1555 | */ | |
1556 | void | |
1557 | ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp, boolean_t live_donating) | |
1558 | { | |
1559 | assert(task_imp != NULL); | |
1560 | ||
1561 | ipc_importance_lock(); | |
1562 | task_imp->iit_live_donor = (live_donating ? 1 : 0); | |
1563 | ipc_importance_unlock(); | |
1564 | } | |
1565 | ||
1566 | /* | |
1567 | * Routine: ipc_importance_task_marked_live_donor | |
1568 | * Purpose: | |
1569 | * Query the live donor and donor flags for the given task importance. | |
1570 | * Conditions: | |
1571 | * May be called without taking the importance lock. | |
1572 | * In that case, donor status can change so you must | |
1573 | * check only once for each donation event. | |
1574 | */ | |
1575 | boolean_t | |
1576 | ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp) | |
1577 | { | |
1578 | if (IIT_NULL == task_imp) { | |
1579 | return FALSE; | |
1580 | } | |
1581 | return (0 != task_imp->iit_live_donor); | |
1582 | } | |
1583 | ||
1584 | /* | |
1585 | * Routine: ipc_importance_task_is_donor | |
1586 | * Purpose: | |
1587 | * Query the full donor status for the given task importance. | |
1588 | * Conditions: | |
1589 | * May be called without taking the importance lock. | |
1590 | * In that case, donor status can change so you must | |
1591 | * check only once for each donation event. | |
1592 | */ | |
1593 | boolean_t | |
1594 | ipc_importance_task_is_donor(ipc_importance_task_t task_imp) | |
1595 | { | |
1596 | if (IIT_NULL == task_imp) { | |
1597 | return FALSE; | |
1598 | } | |
1599 | return (ipc_importance_task_is_marked_donor(task_imp) || | |
1600 | (ipc_importance_task_is_marked_receiver(task_imp) && | |
1601 | task_imp->iit_assertcnt > 0)); | |
1602 | } | |
1603 | ||
1604 | /* | |
1605 | * Routine: ipc_importance_task_is_never_donor | |
1606 | * Purpose: | |
1607 | * Query if a given task can ever donate importance. | |
1608 | * Conditions: | |
1609 | * May be called without taking the importance lock. | |
1610 | * Condition is permanent for a give task. | |
1611 | */ | |
1612 | boolean_t | |
1613 | ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp) | |
1614 | { | |
1615 | if (IIT_NULL == task_imp) { | |
1616 | return FALSE; | |
1617 | } | |
1618 | return (!ipc_importance_task_is_marked_donor(task_imp) && | |
1619 | !ipc_importance_task_is_marked_live_donor(task_imp) && | |
1620 | !ipc_importance_task_is_marked_receiver(task_imp)); | |
1621 | } | |
1622 | ||
1623 | /* | |
1624 | * Routine: ipc_importance_task_mark_receiver | |
1625 | * Purpose: | |
1626 | * Update the task importance receiver flag. | |
1627 | * Conditions: | |
1628 | * Nothing locked on entrance, nothing locked on exit. | |
1629 | * This can only be invoked before the task is discoverable, | |
1630 | * so no worries about atomicity(?) | |
1631 | */ | |
1632 | void | |
1633 | ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving) | |
1634 | { | |
1635 | assert(task_imp != NULL); | |
1636 | ||
1637 | ipc_importance_lock(); | |
1638 | if (receiving) { | |
1639 | assert(task_imp->iit_assertcnt == 0); | |
1640 | assert(task_imp->iit_externcnt == 0); | |
1641 | assert(task_imp->iit_externdrop == 0); | |
1642 | assert(task_imp->iit_denap == 0); | |
1643 | task_imp->iit_receiver = 1; /* task can receive importance boost */ | |
1644 | } else if (task_imp->iit_receiver) { | |
1645 | assert(task_imp->iit_denap == 0); | |
1646 | if (task_imp->iit_assertcnt != 0 || IIT_EXTERN(task_imp) != 0) { | |
1647 | panic("disabling imp_receiver on task with pending importance boosts!"); | |
1648 | } | |
1649 | task_imp->iit_receiver = 0; | |
1650 | } | |
1651 | ipc_importance_unlock(); | |
1652 | } | |
1653 | ||
1654 | ||
1655 | /* | |
1656 | * Routine: ipc_importance_task_marked_receiver | |
1657 | * Purpose: | |
1658 | * Query the receiver flag for the given task importance. | |
1659 | * Conditions: | |
1660 | * May be called without taking the importance lock as | |
1661 | * the importance flag can never change after task init. | |
1662 | */ | |
1663 | boolean_t | |
1664 | ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp) | |
1665 | { | |
1666 | return (IIT_NULL != task_imp && 0 != task_imp->iit_receiver); | |
1667 | } | |
1668 | ||
1669 | ||
1670 | /* | |
1671 | * Routine: ipc_importance_task_mark_denap_receiver | |
1672 | * Purpose: | |
1673 | * Update the task importance de-nap receiver flag. | |
1674 | * Conditions: | |
1675 | * Nothing locked on entrance, nothing locked on exit. | |
1676 | * This can only be invoked before the task is discoverable, | |
1677 | * so no worries about atomicity(?) | |
1678 | */ | |
1679 | void | |
1680 | ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap) | |
1681 | { | |
1682 | assert(task_imp != NULL); | |
1683 | ||
1684 | ipc_importance_lock(); | |
1685 | if (denap) { | |
1686 | assert(task_imp->iit_assertcnt == 0); | |
1687 | assert(task_imp->iit_externcnt == 0); | |
1688 | assert(task_imp->iit_receiver == 0); | |
1689 | task_imp->iit_denap = 1; /* task can receive de-nap boost */ | |
1690 | } else if (task_imp->iit_denap) { | |
1691 | assert(task_imp->iit_receiver == 0); | |
1692 | if (0 < task_imp->iit_assertcnt || 0 < IIT_EXTERN(task_imp)) { | |
1693 | panic("disabling de-nap on task with pending de-nap boosts!"); | |
1694 | } | |
1695 | task_imp->iit_denap = 0; | |
1696 | } | |
1697 | ipc_importance_unlock(); | |
1698 | } | |
1699 | ||
1700 | ||
1701 | /* | |
1702 | * Routine: ipc_importance_task_marked_denap_receiver | |
1703 | * Purpose: | |
1704 | * Query the de-nap receiver flag for the given task importance. | |
1705 | * Conditions: | |
1706 | * May be called without taking the importance lock as | |
1707 | * the de-nap flag can never change after task init. | |
1708 | */ | |
1709 | boolean_t | |
1710 | ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp) | |
1711 | { | |
1712 | return (IIT_NULL != task_imp && 0 != task_imp->iit_denap); | |
1713 | } | |
1714 | ||
1715 | /* | |
1716 | * Routine: ipc_importance_task_is_denap_receiver | |
1717 | * Purpose: | |
1718 | * Query the full de-nap receiver status for the given task importance. | |
1719 | * For now, that is simply whether the receiver flag is set. | |
1720 | * Conditions: | |
1721 | * May be called without taking the importance lock as | |
1722 | * the de-nap receiver flag can never change after task init. | |
1723 | */ | |
1724 | boolean_t | |
1725 | ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp) | |
1726 | { | |
1727 | return (ipc_importance_task_is_marked_denap_receiver(task_imp)); | |
1728 | } | |
1729 | ||
1730 | /* | |
1731 | * Routine: ipc_importance_task_is_any_receiver_type | |
1732 | * Purpose: | |
1733 | * Query if the task is marked to receive boosts - either | |
1734 | * importance or denap. | |
1735 | * Conditions: | |
1736 | * May be called without taking the importance lock as both | |
1737 | * the importance and de-nap receiver flags can never change | |
1738 | * after task init. | |
1739 | */ | |
1740 | boolean_t | |
1741 | ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp) | |
1742 | { | |
1743 | return (ipc_importance_task_is_marked_receiver(task_imp) || | |
1744 | ipc_importance_task_is_marked_denap_receiver(task_imp)); | |
1745 | } | |
1746 | ||
1747 | #if 0 /* currently unused */ | |
1748 | ||
1749 | /* | |
1750 | * Routine: ipc_importance_inherit_reference | |
1751 | * Purpose: | |
1752 | * Add a reference to the inherit importance element. | |
1753 | * Conditions: | |
1754 | * Caller most hold a reference on the inherit element. | |
1755 | */ | |
1756 | static inline void | |
1757 | ipc_importance_inherit_reference(ipc_importance_inherit_t inherit) | |
1758 | { | |
1759 | ipc_importance_reference(&inherit->iii_elem); | |
1760 | } | |
1761 | #endif /* currently unused */ | |
1762 | ||
1763 | /* | |
1764 | * Routine: ipc_importance_inherit_release_locked | |
1765 | * Purpose: | |
1766 | * Release a reference on an inherit importance attribute value, | |
1767 | * unlinking and deallocating the attribute if the last reference. | |
1768 | * Conditions: | |
1769 | * Entered with importance lock held, leaves with it unlocked. | |
1770 | */ | |
1771 | static inline void | |
1772 | ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit) | |
1773 | { | |
1774 | ipc_importance_release_locked(&inherit->iii_elem); | |
1775 | } | |
1776 | ||
1777 | #if 0 /* currently unused */ | |
1778 | /* | |
1779 | * Routine: ipc_importance_inherit_release | |
1780 | * Purpose: | |
1781 | * Release a reference on an inherit importance attribute value, | |
1782 | * unlinking and deallocating the attribute if the last reference. | |
1783 | * Conditions: | |
1784 | * nothing locked on entrance, nothing locked on exit. | |
1785 | * May block. | |
1786 | */ | |
1787 | void | |
1788 | ipc_importance_inherit_release(ipc_importance_inherit_t inherit) | |
1789 | { | |
1790 | if (III_NULL != inherit) | |
1791 | ipc_importance_release(&inherit->iii_elem); | |
1792 | } | |
1793 | #endif /* 0 currently unused */ | |
1794 | ||
1795 | /* | |
1796 | * Routine: ipc_importance_for_task | |
1797 | * Purpose: | |
1798 | * Create a reference for the specified task's base importance | |
1799 | * element. If the base importance element doesn't exist, make it and | |
1800 | * bind it to the active task. If the task is inactive, there isn't | |
1801 | * any need to return a new reference. | |
1802 | * Conditions: | |
1803 | * If made is true, a "made" reference is returned (for donating to | |
1804 | * the voucher system). Otherwise an internal reference is returned. | |
1805 | * | |
1806 | * Nothing locked on entry. May block. | |
1807 | */ | |
1808 | ipc_importance_task_t | |
1809 | ipc_importance_for_task(task_t task, boolean_t made) | |
1810 | { | |
1811 | ipc_importance_task_t task_elem; | |
1812 | boolean_t first_pass = TRUE; | |
1813 | ||
1814 | assert(TASK_NULL != task); | |
1815 | ||
1816 | retry: | |
1817 | /* No use returning anything for inactive task */ | |
1818 | if (!task->active) | |
1819 | return IIT_NULL; | |
1820 | ||
1821 | ipc_importance_lock(); | |
1822 | task_elem = task->task_imp_base; | |
1823 | if (IIT_NULL != task_elem) { | |
1824 | /* Add a made reference (borrowing active task ref to do it) */ | |
1825 | if (made) { | |
1826 | if (0 == task_elem->iit_made++) { | |
1827 | assert(IIT_REFS_MAX > IIT_REFS(task_elem)); | |
1828 | ipc_importance_task_reference_internal(task_elem); | |
1829 | } | |
1830 | } else { | |
1831 | assert(IIT_REFS_MAX > IIT_REFS(task_elem)); | |
1832 | ipc_importance_task_reference_internal(task_elem); | |
1833 | } | |
1834 | ipc_importance_unlock(); | |
1835 | return task_elem; | |
1836 | } | |
1837 | ipc_importance_unlock(); | |
1838 | ||
1839 | if (!first_pass) | |
1840 | return IIT_NULL; | |
1841 | first_pass = FALSE; | |
1842 | ||
1843 | /* Need to make one - may race with others (be prepared to drop) */ | |
1844 | task_elem = (ipc_importance_task_t)zalloc(ipc_importance_task_zone); | |
1845 | if (IIT_NULL == task_elem) | |
1846 | goto retry; | |
1847 | ||
1848 | task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */ | |
1849 | task_elem->iit_made = (made) ? 1 : 0; | |
1850 | task_elem->iit_task = task; /* take actual ref when we're sure */ | |
1851 | task_elem->iit_updateq = NULL; | |
1852 | task_elem->iit_receiver = 0; | |
1853 | task_elem->iit_denap = 0; | |
1854 | task_elem->iit_donor = 0; | |
1855 | task_elem->iit_live_donor = 0; | |
1856 | task_elem->iit_updatepolicy = 0; | |
1857 | task_elem->iit_reserved = 0; | |
1858 | task_elem->iit_filelocks = 0; | |
1859 | task_elem->iit_updatetime = 0; | |
1860 | task_elem->iit_transitions = 0; | |
1861 | task_elem->iit_assertcnt = 0; | |
1862 | task_elem->iit_externcnt = 0; | |
1863 | task_elem->iit_externdrop = 0; | |
1864 | task_elem->iit_legacy_externcnt = 0; | |
1865 | task_elem->iit_legacy_externdrop = 0; | |
1866 | #if IIE_REF_DEBUG | |
1867 | ipc_importance_counter_init(&task_elem->iit_elem); | |
1868 | #endif | |
1869 | queue_init(&task_elem->iit_kmsgs); | |
1870 | queue_init(&task_elem->iit_inherits); | |
1871 | ||
1872 | ipc_importance_lock(); | |
1873 | if (!task->active) { | |
1874 | ipc_importance_unlock(); | |
1875 | zfree(ipc_importance_task_zone, task_elem); | |
1876 | return IIT_NULL; | |
1877 | } | |
1878 | ||
1879 | /* did we lose the race? */ | |
1880 | if (IIT_NULL != task->task_imp_base) { | |
1881 | ipc_importance_unlock(); | |
1882 | zfree(ipc_importance_task_zone, task_elem); | |
1883 | goto retry; | |
1884 | } | |
1885 | ||
1886 | /* we won the race */ | |
1887 | task->task_imp_base = task_elem; | |
1888 | task_reference(task); | |
1889 | #if DEVELOPMENT || DEBUG | |
1890 | queue_enter(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation); | |
1891 | task_importance_update_owner_info(task); | |
1892 | #endif | |
1893 | ipc_importance_unlock(); | |
1894 | ||
1895 | return task_elem; | |
1896 | } | |
1897 | ||
1898 | #if DEVELOPMENT || DEBUG | |
1899 | void task_importance_update_owner_info(task_t task) { | |
1900 | ||
1901 | if (task != TASK_NULL && task->task_imp_base != IIT_NULL) { | |
1902 | ipc_importance_task_t task_elem = task->task_imp_base; | |
1903 | ||
1904 | task_elem->iit_bsd_pid = audit_token_pid_from_task(task); | |
1905 | if (task->bsd_info) { | |
1906 | strncpy(&task_elem->iit_procname[0], proc_name_address(task->bsd_info), 16); | |
1907 | task_elem->iit_procname[16] = '\0'; | |
1908 | } else { | |
1909 | strncpy(&task_elem->iit_procname[0], "unknown", 16); | |
1910 | } | |
1911 | } | |
1912 | } | |
1913 | #endif | |
1914 | ||
1915 | /* | |
1916 | * Routine: ipc_importance_reset_locked | |
1917 | * Purpose: | |
1918 | * Reset a task's IPC importance (the task is going away or exec'ing) | |
1919 | * | |
1920 | * Remove the donor bit and legacy externalized assertions from the | |
1921 | * current task importance and see if that wipes out downstream donations. | |
1922 | * Conditions: | |
1923 | * importance lock held. | |
1924 | */ | |
1925 | ||
1926 | static void | |
1927 | ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor) | |
1928 | { | |
1929 | boolean_t before_donor, after_donor; | |
1930 | ||
1931 | /* remove the donor bit, live-donor bit and externalized boosts */ | |
1932 | before_donor = ipc_importance_task_is_donor(task_imp); | |
1933 | if (donor) { | |
1934 | task_imp->iit_donor = 0; | |
1935 | } | |
1936 | assert(IIT_LEGACY_EXTERN(task_imp) <= IIT_EXTERN(task_imp)); | |
1937 | assert(task_imp->iit_legacy_externcnt <= task_imp->iit_externcnt); | |
1938 | assert(task_imp->iit_legacy_externdrop <= task_imp->iit_externdrop); | |
1939 | task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt; | |
1940 | task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop; | |
1941 | ||
1942 | /* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */ | |
1943 | if (IIT_LEGACY_EXTERN(task_imp) < task_imp->iit_assertcnt) { | |
1944 | task_imp->iit_assertcnt -= IIT_LEGACY_EXTERN(task_imp); | |
1945 | } else { | |
1946 | assert(IIT_LEGACY_EXTERN(task_imp) == task_imp->iit_assertcnt); | |
1947 | task_imp->iit_assertcnt = 0; | |
1948 | } | |
1949 | task_imp->iit_legacy_externcnt = 0; | |
1950 | task_imp->iit_legacy_externdrop = 0; | |
1951 | after_donor = ipc_importance_task_is_donor(task_imp); | |
1952 | ||
1953 | #if DEVELOPMENT || DEBUG | |
1954 | if (task_imp->iit_assertcnt > 0 && task_imp->iit_live_donor) { | |
1955 | printf("Live donor task %s[%d] still has %d importance assertions after reset\n", | |
1956 | task_imp->iit_procname, task_imp->iit_bsd_pid, task_imp->iit_assertcnt); | |
1957 | } | |
1958 | #endif | |
1959 | ||
1960 | /* propagate a downstream drop if there was a change in donor status */ | |
1961 | if (after_donor != before_donor) { | |
1962 | ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, FALSE); | |
1963 | } | |
1964 | } | |
1965 | ||
1966 | /* | |
1967 | * Routine: ipc_importance_reset | |
1968 | * Purpose: | |
1969 | * Reset a task's IPC importance | |
1970 | * | |
1971 | * The task is being reset, although staying around. Arrange to have the | |
1972 | * external state of the task reset from the importance. | |
1973 | * Conditions: | |
1974 | * importance lock not held. | |
1975 | */ | |
1976 | ||
1977 | void | |
1978 | ipc_importance_reset(ipc_importance_task_t task_imp, boolean_t donor) | |
1979 | { | |
1980 | if (IIT_NULL == task_imp) { | |
1981 | return; | |
1982 | } | |
1983 | ipc_importance_lock(); | |
1984 | ipc_importance_reset_locked(task_imp, donor); | |
1985 | ipc_importance_unlock(); | |
1986 | } | |
1987 | ||
1988 | /* | |
1989 | * Routine: ipc_importance_disconnect_task | |
1990 | * Purpose: | |
1991 | * Disconnect a task from its importance. | |
1992 | * | |
1993 | * Clear the task pointer from the importance and drop the | |
1994 | * reference the task held on the importance object. Before | |
1995 | * doing that, reset the effects the current task holds on | |
1996 | * the importance and see if that wipes out downstream donations. | |
1997 | * | |
1998 | * We allow the upstream boosts to continue to affect downstream | |
1999 | * even though the local task is being effectively pulled from | |
2000 | * the chain. | |
2001 | * Conditions: | |
2002 | * Nothing locked. | |
2003 | */ | |
2004 | void | |
2005 | ipc_importance_disconnect_task(task_t task) | |
2006 | { | |
2007 | ipc_importance_task_t task_imp; | |
2008 | ||
2009 | task_lock(task); | |
2010 | ipc_importance_lock(); | |
2011 | task_imp = task->task_imp_base; | |
2012 | ||
2013 | /* did somebody beat us to it? */ | |
2014 | if (IIT_NULL == task_imp) { | |
2015 | ipc_importance_unlock(); | |
2016 | task_unlock(task); | |
2017 | return; | |
2018 | } | |
2019 | ||
2020 | /* disconnect the task from this importance */ | |
2021 | assert(task_imp->iit_task == task); | |
2022 | task_imp->iit_task = TASK_NULL; | |
2023 | task->task_imp_base = IIT_NULL; | |
2024 | task_unlock(task); | |
2025 | ||
2026 | /* reset the effects the current task hold on the importance */ | |
2027 | ipc_importance_reset_locked(task_imp, TRUE); | |
2028 | ||
2029 | ipc_importance_task_release_locked(task_imp); | |
2030 | /* importance unlocked */ | |
2031 | ||
2032 | /* deallocate the task now that the importance is unlocked */ | |
2033 | task_deallocate(task); | |
2034 | } | |
2035 | ||
2036 | /* | |
2037 | * Routine: ipc_importance_send | |
2038 | * Purpose: | |
2039 | * Post the importance voucher attribute [if sent] or a static | |
2040 | * importance boost depending upon options and conditions. | |
2041 | * Conditions: | |
2042 | * Destination port locked on entry and exit, may be dropped during the call. | |
2043 | * Returns: | |
2044 | * A boolean identifying if the port lock was tempoarily dropped. | |
2045 | */ | |
2046 | boolean_t | |
2047 | ipc_importance_send( | |
2048 | ipc_kmsg_t kmsg, | |
2049 | mach_msg_option_t option) | |
2050 | { | |
2051 | ipc_port_t port = (ipc_port_t) kmsg->ikm_header->msgh_remote_port; | |
2052 | boolean_t port_lock_dropped = FALSE; | |
2053 | ipc_importance_elem_t elem; | |
2054 | task_t task; | |
2055 | ipc_importance_task_t task_imp; | |
2056 | kern_return_t kr; | |
2057 | ||
2058 | ||
2059 | assert(IP_VALID(port)); | |
2060 | ||
2061 | /* If no donation to be made, return quickly */ | |
2062 | if ((port->ip_impdonation == 0) || | |
2063 | (option & MACH_SEND_NOIMPORTANCE) != 0) { | |
2064 | return port_lock_dropped; | |
2065 | } | |
2066 | ||
2067 | task = current_task(); | |
2068 | ||
2069 | /* If forced sending a static boost, go update the port */ | |
2070 | if ((option & MACH_SEND_IMPORTANCE) != 0) { | |
2071 | kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP; | |
2072 | goto portupdate; | |
2073 | } | |
2074 | ||
2075 | task_imp = task->task_imp_base; | |
2076 | assert(IIT_NULL != task_imp); | |
2077 | ||
2078 | /* If the sender can never donate importance, nothing to do */ | |
2079 | if (ipc_importance_task_is_never_donor(task_imp)) { | |
2080 | return port_lock_dropped; | |
2081 | } | |
2082 | ||
2083 | elem = IIE_NULL; | |
2084 | ||
2085 | /* If importance receiver and passing a voucher, look for importance in there */ | |
2086 | if (IP_VALID(kmsg->ikm_voucher) && | |
2087 | ipc_importance_task_is_marked_receiver(task_imp)) { | |
2088 | mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED]; | |
2089 | mach_voucher_attr_value_handle_array_size_t val_count; | |
2090 | ipc_voucher_t voucher; | |
2091 | ||
2092 | assert(ip_kotype(kmsg->ikm_voucher) == IKOT_VOUCHER); | |
2093 | voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject; | |
2094 | ||
2095 | /* check to see if the voucher has an importance attribute */ | |
2096 | val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED; | |
2097 | kr = mach_voucher_attr_control_get_values(ipc_importance_control, voucher, | |
2098 | vals, &val_count); | |
2099 | assert(KERN_SUCCESS == kr); | |
2100 | ||
2101 | /* | |
2102 | * Only use importance associated with our task (either directly | |
2103 | * or through an inherit that donates to our task). | |
2104 | */ | |
2105 | if (0 < val_count) { | |
2106 | ipc_importance_elem_t check_elem; | |
2107 | ||
2108 | check_elem = (ipc_importance_elem_t)vals[0]; | |
2109 | assert(IIE_NULL != check_elem); | |
2110 | if (IIE_TYPE_INHERIT == IIE_TYPE(check_elem)) { | |
2111 | ipc_importance_inherit_t inherit; | |
2112 | inherit = (ipc_importance_inherit_t) check_elem; | |
2113 | if (inherit->iii_to_task == task_imp) { | |
2114 | elem = check_elem; | |
2115 | } | |
2116 | } else if (check_elem == (ipc_importance_elem_t)task_imp) { | |
2117 | elem = check_elem; | |
2118 | } | |
2119 | } | |
2120 | } | |
2121 | ||
2122 | /* If we haven't found an importance attribute to send yet, use the task's */ | |
2123 | if (IIE_NULL == elem) { | |
2124 | elem = (ipc_importance_elem_t)task_imp; | |
2125 | } | |
2126 | ||
2127 | /* take a reference for the message to hold */ | |
2128 | ipc_importance_reference_internal(elem); | |
2129 | ||
2130 | /* acquire the importance lock while trying to hang on to port lock */ | |
2131 | if (!ipc_importance_lock_try()) { | |
2132 | port_lock_dropped = TRUE; | |
2133 | ip_unlock(port); | |
2134 | ipc_importance_lock(); | |
2135 | } | |
2136 | ||
2137 | /* link kmsg onto the donor element propagation chain */ | |
2138 | ipc_importance_kmsg_link(kmsg, elem); | |
2139 | /* elem reference transfered to kmsg */ | |
2140 | ||
2141 | incr_ref_counter(elem->iie_kmsg_refs_added); | |
2142 | ||
2143 | /* If the sender isn't currently a donor, no need to apply boost */ | |
2144 | if (!ipc_importance_task_is_donor(task_imp)) { | |
2145 | ipc_importance_unlock(); | |
2146 | ||
2147 | /* re-acquire port lock, if needed */ | |
2148 | if (TRUE == port_lock_dropped) | |
2149 | ip_lock(port); | |
2150 | ||
2151 | return port_lock_dropped; | |
2152 | } | |
2153 | ||
2154 | /* Mark the fact that we are (currently) donating through this message */ | |
2155 | kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP; | |
2156 | ||
2157 | /* | |
2158 | * If we need to relock the port, do it with the importance still locked. | |
2159 | * This assures we get to add the importance boost through the port to | |
2160 | * the task BEFORE anyone else can attempt to undo that operation because | |
2161 | * the sender lost donor status. | |
2162 | */ | |
2163 | if (TRUE == port_lock_dropped) { | |
2164 | ip_lock(port); | |
2165 | } | |
2166 | ipc_importance_unlock(); | |
2167 | ||
2168 | portupdate: | |
2169 | ||
2170 | #if IMPORTANCE_DEBUG | |
2171 | if (kdebug_enable) { | |
2172 | mach_msg_max_trailer_t *dbgtrailer = (mach_msg_max_trailer_t *) | |
2173 | ((vm_offset_t)kmsg->ikm_header + round_msg(kmsg->ikm_header->msgh_size)); | |
2174 | unsigned int sender_pid = dbgtrailer->msgh_audit.val[5]; | |
2175 | mach_msg_id_t imp_msgh_id = kmsg->ikm_header->msgh_id; | |
2176 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START, | |
2177 | audit_token_pid_from_task(task), sender_pid, imp_msgh_id, 0, 0); | |
2178 | } | |
2179 | #endif /* IMPORTANCE_DEBUG */ | |
2180 | ||
2181 | /* adjust port boost count (with port locked) */ | |
2182 | if (TRUE == ipc_port_importance_delta(port, 1)) { | |
2183 | port_lock_dropped = TRUE; | |
2184 | ip_lock(port); | |
2185 | } | |
2186 | return port_lock_dropped; | |
2187 | } | |
2188 | ||
2189 | /* | |
2190 | * Routine: ipc_importance_inherit_from | |
2191 | * Purpose: | |
2192 | * Create a "made" reference for an importance attribute representing | |
2193 | * an inheritance between the sender of a message (if linked) and the | |
2194 | * current task importance. If the message is not linked, a static | |
2195 | * boost may be created, based on the boost state of the message. | |
2196 | * | |
2197 | * Any transfer from kmsg linkage to inherit linkage must be atomic. | |
2198 | * | |
2199 | * If the task is inactive, there isn't any need to return a new reference. | |
2200 | * Conditions: | |
2201 | * Nothing locked on entry. May block. | |
2202 | */ | |
2203 | static ipc_importance_inherit_t | |
2204 | ipc_importance_inherit_from(ipc_kmsg_t kmsg) | |
2205 | { | |
2206 | ipc_importance_task_t task_imp = IIT_NULL; | |
2207 | ipc_importance_elem_t from_elem = kmsg->ikm_importance; | |
2208 | ipc_importance_elem_t elem; | |
2209 | task_t task_self = current_task(); | |
2210 | ||
2211 | ipc_port_t port = kmsg->ikm_header->msgh_remote_port; | |
2212 | ipc_importance_inherit_t inherit = III_NULL; | |
2213 | ipc_importance_inherit_t alloc = III_NULL; | |
2214 | ipc_importance_inherit_t temp_inherit; | |
2215 | boolean_t cleared_self_donation = FALSE; | |
2216 | boolean_t donating; | |
2217 | uint32_t depth = 1; | |
2218 | ||
2219 | /* The kmsg must have an importance donor or static boost to proceed */ | |
2220 | if (IIE_NULL == kmsg->ikm_importance && | |
2221 | !MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) { | |
2222 | return III_NULL; | |
2223 | } | |
2224 | ||
2225 | /* | |
2226 | * No need to set up an inherit linkage if the dest isn't a receiver | |
2227 | * of one type or the other. | |
2228 | */ | |
2229 | if (!ipc_importance_task_is_any_receiver_type(task_self->task_imp_base)) { | |
2230 | ipc_importance_lock(); | |
2231 | goto out_locked; | |
2232 | } | |
2233 | ||
2234 | /* Grab a reference on the importance of the destination */ | |
2235 | task_imp = ipc_importance_for_task(task_self, FALSE); | |
2236 | ||
2237 | ipc_importance_lock(); | |
2238 | ||
2239 | if (IIT_NULL == task_imp) { | |
2240 | goto out_locked; | |
2241 | } | |
2242 | ||
2243 | incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_inherit_from); | |
2244 | ||
2245 | /* If message is already associated with an inherit... */ | |
2246 | if (IIE_TYPE_INHERIT == IIE_TYPE(from_elem)) { | |
2247 | ipc_importance_inherit_t from_inherit = (ipc_importance_inherit_t)from_elem; | |
2248 | ||
2249 | /* already targeting our task? - just use it */ | |
2250 | if (from_inherit->iii_to_task == task_imp) { | |
2251 | /* clear self-donation if not also present in inherit */ | |
2252 | if (!from_inherit->iii_donating && | |
2253 | MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) { | |
2254 | kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP; | |
2255 | cleared_self_donation = TRUE; | |
2256 | } | |
2257 | inherit = from_inherit; | |
2258 | ||
2259 | } else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) { | |
2260 | ipc_importance_task_t to_task; | |
2261 | ipc_importance_elem_t unlinked_from; | |
2262 | ||
2263 | /* | |
2264 | * Chain too long. Switch to looking | |
2265 | * directly at the from_inherit's to-task | |
2266 | * as our source of importance. | |
2267 | */ | |
2268 | to_task = from_inherit->iii_to_task; | |
2269 | ipc_importance_task_reference(to_task); | |
2270 | from_elem = (ipc_importance_elem_t)to_task; | |
2271 | depth = III_DEPTH_RESET | 1; | |
2272 | ||
2273 | /* Fixup the kmsg linkage to reflect change */ | |
2274 | unlinked_from = ipc_importance_kmsg_unlink(kmsg); | |
2275 | assert(unlinked_from == (ipc_importance_elem_t)from_inherit); | |
2276 | ipc_importance_kmsg_link(kmsg, from_elem); | |
2277 | ipc_importance_inherit_release_locked(from_inherit); | |
2278 | /* importance unlocked */ | |
2279 | ipc_importance_lock(); | |
2280 | ||
2281 | } else { | |
2282 | /* inheriting from an inherit */ | |
2283 | depth = from_inherit->iii_depth + 1; | |
2284 | } | |
2285 | } | |
2286 | ||
2287 | /* | |
2288 | * Don't allow a task to inherit from itself (would keep it permanently | |
2289 | * boosted even if all other donors to the task went away). | |
2290 | */ | |
2291 | ||
2292 | if (from_elem == (ipc_importance_elem_t)task_imp) { | |
2293 | goto out_locked; | |
2294 | } | |
2295 | ||
2296 | /* | |
2297 | * But if the message isn't associated with any linked source, it is | |
2298 | * intended to be permanently boosting (static boost from kernel). | |
2299 | * In that case DO let the process permanently boost itself. | |
2300 | */ | |
2301 | if (IIE_NULL == from_elem) { | |
2302 | assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)); | |
2303 | ipc_importance_task_reference_internal(task_imp); | |
2304 | from_elem = (ipc_importance_elem_t)task_imp; | |
2305 | } | |
2306 | ||
2307 | /* | |
2308 | * Now that we have the from_elem figured out, | |
2309 | * check to see if we already have an inherit for this pairing | |
2310 | */ | |
2311 | while (III_NULL == inherit) { | |
2312 | queue_iterate(&from_elem->iie_inherits, temp_inherit, | |
2313 | ipc_importance_inherit_t, iii_inheritance) { | |
2314 | if (temp_inherit->iii_to_task == task_imp && | |
2315 | temp_inherit->iii_depth == depth) { | |
2316 | inherit = temp_inherit; | |
2317 | break; | |
2318 | } | |
2319 | } | |
2320 | ||
2321 | /* Do we have to allocate a new inherit */ | |
2322 | if (III_NULL == inherit) { | |
2323 | if (III_NULL != alloc) { | |
2324 | break; | |
2325 | } | |
2326 | ||
2327 | /* allocate space */ | |
2328 | ipc_importance_unlock(); | |
2329 | alloc = (ipc_importance_inherit_t) | |
2330 | zalloc(ipc_importance_inherit_zone); | |
2331 | ipc_importance_lock(); | |
2332 | } | |
2333 | } | |
2334 | ||
2335 | /* snapshot the donating status while we have importance locked */ | |
2336 | donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits); | |
2337 | ||
2338 | if (III_NULL != inherit) { | |
2339 | /* We found one, piggyback on that */ | |
2340 | assert(0 < III_REFS(inherit)); | |
2341 | assert(0 < IIE_REFS(inherit->iii_from_elem)); | |
2342 | assert(inherit->iii_externcnt >= inherit->iii_made); | |
2343 | ||
2344 | /* add in a made reference */ | |
2345 | if (0 == inherit->iii_made++) { | |
2346 | assert(III_REFS_MAX > III_REFS(inherit)); | |
2347 | ipc_importance_inherit_reference_internal(inherit); | |
2348 | } | |
2349 | ||
2350 | /* Reflect the inherit's change of status into the task boosts */ | |
2351 | if (0 == III_EXTERN(inherit)) { | |
2352 | assert(!inherit->iii_donating); | |
2353 | inherit->iii_donating = donating; | |
2354 | if (donating) { | |
2355 | task_imp->iit_externcnt += inherit->iii_externcnt; | |
2356 | task_imp->iit_externdrop += inherit->iii_externdrop; | |
2357 | } | |
2358 | } else { | |
2359 | assert(donating == inherit->iii_donating); | |
2360 | } | |
2361 | ||
2362 | /* add in a external reference for this use of the inherit */ | |
2363 | inherit->iii_externcnt++; | |
2364 | if (donating) { | |
2365 | task_imp->iit_externcnt++; | |
2366 | } | |
2367 | } else { | |
2368 | /* initialize the previously allocated space */ | |
2369 | inherit = alloc; | |
2370 | inherit->iii_bits = IIE_TYPE_INHERIT | 1; | |
2371 | inherit->iii_made = 1; | |
2372 | inherit->iii_externcnt = 1; | |
2373 | inherit->iii_externdrop = 0; | |
2374 | inherit->iii_depth = depth; | |
2375 | inherit->iii_to_task = task_imp; | |
2376 | inherit->iii_from_elem = IIE_NULL; | |
2377 | queue_init(&inherit->iii_kmsgs); | |
2378 | queue_init(&inherit->iii_inherits); | |
2379 | ||
2380 | /* If donating, reflect that in the task externcnt */ | |
2381 | if (donating) { | |
2382 | inherit->iii_donating = TRUE; | |
2383 | task_imp->iit_externcnt++; | |
2384 | } else { | |
2385 | inherit->iii_donating = FALSE; | |
2386 | } | |
2387 | ||
2388 | /* | |
2389 | * Chain our new inherit on the element it inherits from. | |
2390 | * The new inherit takes our reference on from_elem. | |
2391 | */ | |
2392 | ipc_importance_inherit_link(inherit, from_elem); | |
2393 | ||
2394 | #if IIE_REF_DEBUG | |
2395 | ipc_importance_counter_init(&inherit->iii_elem); | |
2396 | from_elem->iie_kmsg_refs_inherited++; | |
2397 | task_imp->iit_elem.iie_task_refs_inherited++; | |
2398 | #endif | |
2399 | } | |
2400 | ||
2401 | out_locked: | |
2402 | /* | |
2403 | * for those paths that came straight here: snapshot the donating status | |
2404 | * (this should match previous snapshot for other paths). | |
2405 | */ | |
2406 | donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits); | |
2407 | ||
2408 | /* unlink the kmsg inheritance (if any) */ | |
2409 | elem = ipc_importance_kmsg_unlink(kmsg); | |
2410 | assert(elem == from_elem); | |
2411 | ||
2412 | /* If we didn't create a new inherit, we have some resources to release */ | |
2413 | if (III_NULL == inherit || inherit != alloc) { | |
2414 | if (IIE_NULL != from_elem) { | |
2415 | if (III_NULL != inherit) { | |
2416 | incr_ref_counter(from_elem->iie_kmsg_refs_coalesced); | |
2417 | } else { | |
2418 | incr_ref_counter(from_elem->iie_kmsg_refs_dropped); | |
2419 | } | |
2420 | ipc_importance_release_locked(from_elem); | |
2421 | /* importance unlocked */ | |
2422 | } else { | |
2423 | ipc_importance_unlock(); | |
2424 | } | |
2425 | ||
2426 | if (IIT_NULL != task_imp) { | |
2427 | if (III_NULL != inherit) { | |
2428 | incr_ref_counter(task_imp->iit_elem.iie_task_refs_coalesced); | |
2429 | } | |
2430 | ipc_importance_task_release(task_imp); | |
2431 | } | |
2432 | ||
2433 | if (III_NULL != alloc) | |
2434 | zfree(ipc_importance_inherit_zone, alloc); | |
2435 | } else { | |
2436 | /* from_elem and task_imp references transferred to new inherit */ | |
2437 | ipc_importance_unlock(); | |
2438 | } | |
2439 | ||
2440 | /* decrement port boost count */ | |
2441 | if (donating) { | |
2442 | ip_lock(port); | |
2443 | if (III_NULL != inherit) { | |
2444 | /* task assertions transferred to inherit, just adjust port count */ | |
2445 | ipc_port_impcount_delta(port, -1, IP_NULL); | |
2446 | ip_unlock(port); | |
2447 | } else { | |
2448 | /* drop importance from port and destination task */ | |
2449 | if (ipc_port_importance_delta(port, -1) == FALSE) { | |
2450 | ip_unlock(port); | |
2451 | } | |
2452 | } | |
2453 | } else if (cleared_self_donation) { | |
2454 | ip_lock(port); | |
2455 | /* drop cleared donation from port and destination task */ | |
2456 | if (ipc_port_importance_delta(port, -1) == FALSE) { | |
2457 | ip_unlock(port); | |
2458 | } | |
2459 | } | |
2460 | ||
2461 | if (III_NULL != inherit) { | |
2462 | /* have an associated importance attr, even if currently not donating */ | |
2463 | kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP; | |
2464 | } else { | |
2465 | /* we won't have an importance attribute associated with our message */ | |
2466 | kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP; | |
2467 | } | |
2468 | ||
2469 | return inherit; | |
2470 | } | |
2471 | ||
2472 | /* | |
2473 | * Routine: ipc_importance_receive | |
2474 | * Purpose: | |
2475 | * Process importance attributes in a received message. | |
2476 | * | |
2477 | * If an importance voucher attribute was sent, transform | |
2478 | * that into an attribute value reflecting the inheritance | |
2479 | * from the sender to the receiver. | |
2480 | * | |
2481 | * If a static boost is received (or the voucher isn't on | |
2482 | * a voucher-based boost), export a static boost. | |
2483 | * Conditions: | |
2484 | * Nothing locked. | |
2485 | */ | |
2486 | void | |
2487 | ipc_importance_receive( | |
2488 | ipc_kmsg_t kmsg, | |
2489 | mach_msg_option_t option) | |
2490 | { | |
2491 | unsigned int sender_pid = ((mach_msg_max_trailer_t *) | |
2492 | ((vm_offset_t)kmsg->ikm_header + | |
2493 | round_msg(kmsg->ikm_header->msgh_size)))->msgh_audit.val[5]; | |
2494 | task_t task_self = current_task(); | |
2495 | int impresult = -1; | |
2496 | ||
2497 | /* convert to a voucher with an inherit importance attribute? */ | |
2498 | if ((option & MACH_RCV_VOUCHER) != 0) { | |
2499 | uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) + | |
2500 | sizeof(mach_voucher_attr_value_handle_t)]; | |
2501 | ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0; | |
2502 | ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes; | |
2503 | ipc_voucher_t recv_voucher; | |
2504 | mach_voucher_attr_value_handle_t handle; | |
2505 | ipc_importance_inherit_t inherit; | |
2506 | kern_return_t kr; | |
2507 | ||
2508 | /* set up recipe to copy the old voucher */ | |
2509 | if (IP_VALID(kmsg->ikm_voucher)) { | |
2510 | ipc_voucher_t sent_voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject; | |
2511 | ||
2512 | recipe->key = MACH_VOUCHER_ATTR_KEY_ALL; | |
2513 | recipe->command = MACH_VOUCHER_ATTR_COPY; | |
2514 | recipe->previous_voucher = sent_voucher; | |
2515 | recipe->content_size = 0; | |
2516 | recipe_size += sizeof(*recipe); | |
2517 | } | |
2518 | ||
2519 | /* | |
2520 | * create an inheritance attribute from the kmsg (may be NULL) | |
2521 | * transferring any boosts from the kmsg linkage through the | |
2522 | * port directly to the new inheritance object. | |
2523 | */ | |
2524 | inherit = ipc_importance_inherit_from(kmsg); | |
2525 | handle = (mach_voucher_attr_value_handle_t)inherit; | |
2526 | ||
2527 | assert(IIE_NULL == kmsg->ikm_importance); | |
2528 | ||
2529 | /* replace the importance attribute with the handle we created */ | |
2530 | /* our made reference on the inhert is donated to the voucher */ | |
2531 | recipe = (ipc_voucher_attr_recipe_t)&recipes[recipe_size]; | |
2532 | recipe->key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE; | |
2533 | recipe->command = MACH_VOUCHER_ATTR_SET_VALUE_HANDLE; | |
2534 | recipe->previous_voucher = IPC_VOUCHER_NULL; | |
2535 | recipe->content_size = sizeof(mach_voucher_attr_value_handle_t); | |
2536 | *(mach_voucher_attr_value_handle_t *)(void *)recipe->content = handle; | |
2537 | recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t); | |
2538 | ||
2539 | kr = ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control, | |
2540 | recipes, | |
2541 | recipe_size, | |
2542 | &recv_voucher); | |
2543 | assert(KERN_SUCCESS == kr); | |
2544 | ||
2545 | /* swap the voucher port (and set voucher bits in case it didn't already exist) */ | |
2546 | kmsg->ikm_header->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16); | |
2547 | ipc_port_release_send(kmsg->ikm_voucher); | |
2548 | kmsg->ikm_voucher = convert_voucher_to_port(recv_voucher); | |
2549 | if (III_NULL != inherit) | |
2550 | impresult = 2; | |
2551 | ||
2552 | } else { /* Don't want a voucher */ | |
2553 | ||
2554 | /* got linked importance? have to drop */ | |
2555 | if (IIE_NULL != kmsg->ikm_importance) { | |
2556 | ipc_importance_elem_t elem; | |
2557 | ||
2558 | ipc_importance_lock(); | |
2559 | elem = ipc_importance_kmsg_unlink(kmsg); | |
2560 | #if IIE_REF_DEBUG | |
2561 | elem->iie_kmsg_refs_dropped++; | |
2562 | #endif | |
2563 | ipc_importance_release_locked(elem); | |
2564 | /* importance unlocked */ | |
2565 | } | |
2566 | ||
2567 | /* With kmsg unlinked, can safely examine message importance attribute. */ | |
2568 | if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) { | |
2569 | ipc_importance_task_t task_imp = task_self->task_imp_base; | |
2570 | ipc_port_t port = kmsg->ikm_header->msgh_remote_port; | |
2571 | ||
2572 | /* defensive deduction for release builds lacking the assert */ | |
2573 | ip_lock(port); | |
2574 | ipc_port_impcount_delta(port, -1, IP_NULL); | |
2575 | ip_unlock(port); | |
2576 | ||
2577 | /* will user accept legacy responsibility for the importance boost */ | |
2578 | if (KERN_SUCCESS == ipc_importance_task_externalize_legacy_assertion(task_imp, 1, sender_pid)) { | |
2579 | impresult = 1; | |
2580 | } else { | |
2581 | /* The importance boost never applied to task (clear the bit) */ | |
2582 | kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP; | |
2583 | impresult = 0; | |
2584 | } | |
2585 | } | |
2586 | } | |
2587 | ||
2588 | #if IMPORTANCE_DEBUG | |
2589 | if (-1 < impresult) | |
2590 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE, | |
2591 | sender_pid, audit_token_pid_from_task(task_self), | |
2592 | kmsg->ikm_header->msgh_id, impresult, 0); | |
2593 | if (impresult == 2){ | |
2594 | /* | |
2595 | * This probe only covers new voucher-based path. Legacy importance | |
2596 | * will trigger the probe in ipc_importance_task_externalize_assertion() | |
2597 | * above and have impresult==1 here. | |
2598 | */ | |
2599 | DTRACE_BOOST5(receive_boost, task_t, task_self, int, audit_token_pid_from_task(task_self), int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt); | |
2600 | } | |
2601 | #endif /* IMPORTANCE_DEBUG */ | |
2602 | } | |
2603 | ||
2604 | /* | |
2605 | * Routine: ipc_importance_unreceive | |
2606 | * Purpose: | |
2607 | * Undo receive of importance attributes in a message. | |
2608 | * | |
2609 | * Conditions: | |
2610 | * Nothing locked. | |
2611 | */ | |
2612 | void | |
2613 | ipc_importance_unreceive( | |
2614 | ipc_kmsg_t kmsg, | |
2615 | mach_msg_option_t __unused option) | |
2616 | { | |
2617 | /* importance should already be in the voucher and out of the kmsg */ | |
2618 | assert(IIE_NULL == kmsg->ikm_importance); | |
2619 | ||
2620 | /* See if there is a legacy boost to be dropped from receiver */ | |
2621 | if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) { | |
2622 | ipc_importance_task_t task_imp; | |
2623 | ||
2624 | kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP; | |
2625 | task_imp = current_task()->task_imp_base; | |
2626 | if (!IP_VALID(kmsg->ikm_voucher) && IIT_NULL != task_imp) { | |
2627 | ipc_importance_task_drop_legacy_external_assertion(task_imp, 1); | |
2628 | } | |
2629 | /* | |
2630 | * ipc_kmsg_copyout_dest() will consume the voucher | |
2631 | * and any contained importance. | |
2632 | */ | |
2633 | } | |
2634 | } | |
2635 | ||
2636 | /* | |
2637 | * Routine: ipc_importance_clean | |
2638 | * Purpose: | |
2639 | * Clean up importance state in a kmsg that is being cleaned. | |
2640 | * Unlink the importance chain if one was set up, and drop | |
2641 | * the reference this kmsg held on the donor. Then check to | |
2642 | * if importance was carried to the port, and remove that if | |
2643 | * needed. | |
2644 | * Conditions: | |
2645 | * Nothing locked. | |
2646 | */ | |
2647 | void | |
2648 | ipc_importance_clean( | |
2649 | ipc_kmsg_t kmsg) | |
2650 | { | |
2651 | ipc_port_t port; | |
2652 | ||
2653 | /* Is the kmsg still linked? If so, remove that first */ | |
2654 | if (IIE_NULL != kmsg->ikm_importance) { | |
2655 | ipc_importance_elem_t elem; | |
2656 | ||
2657 | ipc_importance_lock(); | |
2658 | elem = ipc_importance_kmsg_unlink(kmsg); | |
2659 | assert(IIE_NULL != elem); | |
2660 | ipc_importance_release_locked(elem); | |
2661 | /* importance unlocked */ | |
2662 | } | |
2663 | ||
2664 | /* See if there is a legacy importance boost to be dropped from port */ | |
2665 | if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) { | |
2666 | kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP; | |
2667 | port = kmsg->ikm_header->msgh_remote_port; | |
2668 | if (IP_VALID(port)) { | |
2669 | ip_lock(port); | |
2670 | /* inactive ports already had their importance boosts dropped */ | |
2671 | if (!ip_active(port) || | |
2672 | ipc_port_importance_delta(port, -1) == FALSE) { | |
2673 | ip_unlock(port); | |
2674 | } | |
2675 | } | |
2676 | } | |
2677 | } | |
2678 | ||
2679 | void | |
2680 | ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg) | |
2681 | { | |
2682 | assert(IIE_NULL == kmsg->ikm_importance); | |
2683 | assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)); | |
2684 | } | |
2685 | ||
2686 | /* | |
2687 | * IPC Importance Attribute Manager definition | |
2688 | */ | |
2689 | ||
2690 | static kern_return_t | |
2691 | ipc_importance_release_value( | |
2692 | ipc_voucher_attr_manager_t manager, | |
2693 | mach_voucher_attr_key_t key, | |
2694 | mach_voucher_attr_value_handle_t value, | |
2695 | mach_voucher_attr_value_reference_t sync); | |
2696 | ||
2697 | static kern_return_t | |
2698 | ipc_importance_get_value( | |
2699 | ipc_voucher_attr_manager_t manager, | |
2700 | mach_voucher_attr_key_t key, | |
2701 | mach_voucher_attr_recipe_command_t command, | |
2702 | mach_voucher_attr_value_handle_array_t prev_values, | |
2703 | mach_voucher_attr_value_handle_array_size_t prev_value_count, | |
2704 | mach_voucher_attr_content_t content, | |
2705 | mach_voucher_attr_content_size_t content_size, | |
2706 | mach_voucher_attr_value_handle_t *out_value, | |
2707 | ipc_voucher_t *out_value_voucher); | |
2708 | ||
2709 | static kern_return_t | |
2710 | ipc_importance_extract_content( | |
2711 | ipc_voucher_attr_manager_t manager, | |
2712 | mach_voucher_attr_key_t key, | |
2713 | mach_voucher_attr_value_handle_array_t values, | |
2714 | mach_voucher_attr_value_handle_array_size_t value_count, | |
2715 | mach_voucher_attr_recipe_command_t *out_command, | |
2716 | mach_voucher_attr_content_t out_content, | |
2717 | mach_voucher_attr_content_size_t *in_out_content_size); | |
2718 | ||
2719 | static kern_return_t | |
2720 | ipc_importance_command( | |
2721 | ipc_voucher_attr_manager_t manager, | |
2722 | mach_voucher_attr_key_t key, | |
2723 | mach_voucher_attr_value_handle_array_t values, | |
2724 | mach_msg_type_number_t value_count, | |
2725 | mach_voucher_attr_command_t command, | |
2726 | mach_voucher_attr_content_t in_content, | |
2727 | mach_voucher_attr_content_size_t in_content_size, | |
2728 | mach_voucher_attr_content_t out_content, | |
2729 | mach_voucher_attr_content_size_t *out_content_size); | |
2730 | ||
2731 | static void | |
2732 | ipc_importance_manager_release( | |
2733 | ipc_voucher_attr_manager_t manager); | |
2734 | ||
2735 | struct ipc_voucher_attr_manager ipc_importance_manager = { | |
2736 | .ivam_release_value = ipc_importance_release_value, | |
2737 | .ivam_get_value = ipc_importance_get_value, | |
2738 | .ivam_extract_content = ipc_importance_extract_content, | |
2739 | .ivam_command = ipc_importance_command, | |
2740 | .ivam_release = ipc_importance_manager_release, | |
2741 | }; | |
2742 | ||
2743 | #define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key)) | |
2744 | #define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager)) | |
2745 | ||
2746 | /* | |
2747 | * Routine: ipc_importance_release_value [Voucher Attribute Manager Interface] | |
2748 | * Purpose: | |
2749 | * Release what the voucher system believes is the last "made" reference | |
2750 | * on an importance attribute value handle. The sync parameter is used to | |
2751 | * avoid races with new made references concurrently being returned to the | |
2752 | * voucher system in other threads. | |
2753 | * Conditions: | |
2754 | * Nothing locked on entry. May block. | |
2755 | */ | |
2756 | static kern_return_t | |
2757 | ipc_importance_release_value( | |
2758 | ipc_voucher_attr_manager_t __assert_only manager, | |
2759 | mach_voucher_attr_key_t __assert_only key, | |
2760 | mach_voucher_attr_value_handle_t value, | |
2761 | mach_voucher_attr_value_reference_t sync) | |
2762 | { | |
2763 | ipc_importance_elem_t elem; | |
2764 | ||
2765 | IMPORTANCE_ASSERT_MANAGER(manager); | |
2766 | IMPORTANCE_ASSERT_KEY(key); | |
2767 | assert(0 < sync); | |
2768 | ||
2769 | elem = (ipc_importance_elem_t)value; | |
2770 | ||
2771 | ipc_importance_lock(); | |
2772 | ||
2773 | /* Any oustanding made refs? */ | |
2774 | if (sync != elem->iie_made) { | |
2775 | assert(sync < elem->iie_made); | |
2776 | ipc_importance_unlock(); | |
2777 | return KERN_FAILURE; | |
2778 | } | |
2779 | ||
2780 | /* clear made */ | |
2781 | elem->iie_made = 0; | |
2782 | ||
2783 | /* | |
2784 | * If there are pending external boosts represented by this attribute, | |
2785 | * drop them from the apropriate task | |
2786 | */ | |
2787 | if (IIE_TYPE_INHERIT == IIE_TYPE(elem)) { | |
2788 | ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem; | |
2789 | ||
2790 | assert(inherit->iii_externcnt >= inherit->iii_externdrop); | |
2791 | ||
2792 | if (inherit->iii_donating) { | |
2793 | ipc_importance_task_t imp_task = inherit->iii_to_task; | |
2794 | uint32_t assertcnt = III_EXTERN(inherit); | |
2795 | ||
2796 | assert(ipc_importance_task_is_any_receiver_type(imp_task)); | |
2797 | assert(imp_task->iit_externcnt >= inherit->iii_externcnt); | |
2798 | assert(imp_task->iit_externdrop >= inherit->iii_externdrop); | |
2799 | imp_task->iit_externcnt -= inherit->iii_externcnt; | |
2800 | imp_task->iit_externdrop -= inherit->iii_externdrop; | |
2801 | inherit->iii_externcnt = 0; | |
2802 | inherit->iii_externdrop = 0; | |
2803 | inherit->iii_donating = FALSE; | |
2804 | ||
2805 | /* adjust the internal assertions - and propagate if needed */ | |
2806 | if (ipc_importance_task_check_transition(imp_task, IIT_UPDATE_DROP, assertcnt)) { | |
2807 | ipc_importance_task_propagate_assertion_locked(imp_task, IIT_UPDATE_DROP, TRUE); | |
2808 | } | |
2809 | } else { | |
2810 | inherit->iii_externcnt = 0; | |
2811 | inherit->iii_externdrop = 0; | |
2812 | } | |
2813 | } | |
2814 | ||
2815 | /* drop the made reference on elem */ | |
2816 | ipc_importance_release_locked(elem); | |
2817 | /* returns unlocked */ | |
2818 | ||
2819 | return KERN_SUCCESS; | |
2820 | } | |
2821 | ||
2822 | ||
2823 | /* | |
2824 | * Routine: ipc_importance_get_value [Voucher Attribute Manager Interface] | |
2825 | * Purpose: | |
2826 | * Convert command and content data into a reference on a [potentially new] | |
2827 | * attribute value. The importance attribute manager will only allow the | |
2828 | * caller to get a value for the current task's importance, or to redeem | |
2829 | * an importance attribute from an existing voucher. | |
2830 | * Conditions: | |
2831 | * Nothing locked on entry. May block. | |
2832 | */ | |
2833 | static kern_return_t | |
2834 | ipc_importance_get_value( | |
2835 | ipc_voucher_attr_manager_t __assert_only manager, | |
2836 | mach_voucher_attr_key_t __assert_only key, | |
2837 | mach_voucher_attr_recipe_command_t command, | |
2838 | mach_voucher_attr_value_handle_array_t prev_values, | |
2839 | mach_voucher_attr_value_handle_array_size_t prev_value_count, | |
2840 | mach_voucher_attr_content_t __unused content, | |
2841 | mach_voucher_attr_content_size_t content_size, | |
2842 | mach_voucher_attr_value_handle_t *out_value, | |
2843 | ipc_voucher_t *out_value_voucher) | |
2844 | { | |
2845 | ipc_importance_elem_t elem; | |
2846 | task_t self; | |
2847 | ||
2848 | IMPORTANCE_ASSERT_MANAGER(manager); | |
2849 | IMPORTANCE_ASSERT_KEY(key); | |
2850 | ||
2851 | if (0 != content_size) | |
2852 | return KERN_INVALID_ARGUMENT; | |
2853 | ||
2854 | /* never an out voucher */ | |
2855 | ||
2856 | switch (command) { | |
2857 | ||
2858 | case MACH_VOUCHER_ATTR_REDEEM: | |
2859 | ||
2860 | /* redeem of previous values is the value */ | |
2861 | if (0 < prev_value_count) { | |
2862 | elem = (ipc_importance_elem_t)prev_values[0]; | |
2863 | assert(IIE_NULL != elem); | |
2864 | ||
2865 | ipc_importance_lock(); | |
2866 | assert(0 < elem->iie_made); | |
2867 | elem->iie_made++; | |
2868 | ipc_importance_unlock(); | |
2869 | ||
2870 | *out_value = prev_values[0]; | |
2871 | return KERN_SUCCESS; | |
2872 | } | |
2873 | ||
2874 | /* redeem of default is default */ | |
2875 | *out_value = 0; | |
2876 | *out_value_voucher = IPC_VOUCHER_NULL; | |
2877 | return KERN_SUCCESS; | |
2878 | ||
2879 | case MACH_VOUCHER_ATTR_IMPORTANCE_SELF: | |
2880 | self = current_task(); | |
2881 | ||
2882 | elem = (ipc_importance_elem_t)ipc_importance_for_task(self, TRUE); | |
2883 | /* made reference added (or IIE_NULL which isn't referenced) */ | |
2884 | ||
2885 | *out_value = (mach_voucher_attr_value_handle_t)elem; | |
2886 | *out_value_voucher = IPC_VOUCHER_NULL; | |
2887 | return KERN_SUCCESS; | |
2888 | ||
2889 | default: | |
2890 | /* | |
2891 | * every other command is unknown | |
2892 | * | |
2893 | * Specifically, there is no mechanism provided to construct an | |
2894 | * importance attribute for a task/process from just a pid or | |
2895 | * task port. It has to be copied (or redeemed) from a previous | |
2896 | * voucher that has it. | |
2897 | */ | |
2898 | return KERN_INVALID_ARGUMENT; | |
2899 | } | |
2900 | } | |
2901 | ||
2902 | /* | |
2903 | * Routine: ipc_importance_extract_content [Voucher Attribute Manager Interface] | |
2904 | * Purpose: | |
2905 | * Extract meaning from the attribute value present in a voucher. While | |
2906 | * the real goal is to provide commands and data that can reproduce the | |
2907 | * voucher's value "out of thin air", this isn't possible with importance | |
2908 | * attribute values. Instead, return debug info to help track down dependencies. | |
2909 | * Conditions: | |
2910 | * Nothing locked on entry. May block. | |
2911 | */ | |
2912 | static kern_return_t | |
2913 | ipc_importance_extract_content( | |
2914 | ipc_voucher_attr_manager_t __assert_only manager, | |
2915 | mach_voucher_attr_key_t __assert_only key, | |
2916 | mach_voucher_attr_value_handle_array_t values, | |
2917 | mach_voucher_attr_value_handle_array_size_t value_count, | |
2918 | mach_voucher_attr_recipe_command_t *out_command, | |
2919 | mach_voucher_attr_content_t out_content, | |
2920 | mach_voucher_attr_content_size_t *in_out_content_size) | |
2921 | { | |
2922 | mach_voucher_attr_content_size_t size = 0; | |
2923 | ipc_importance_elem_t elem; | |
2924 | unsigned int i; | |
2925 | ||
2926 | IMPORTANCE_ASSERT_MANAGER(manager); | |
2927 | IMPORTANCE_ASSERT_KEY(key); | |
2928 | ||
2929 | /* the first non-default value provides the data */ | |
2930 | for (i = 0; i < value_count ; i++) { | |
2931 | elem = (ipc_importance_elem_t)values[i]; | |
2932 | if (IIE_NULL == elem) | |
2933 | continue; | |
2934 | ||
2935 | snprintf((char *)out_content, *in_out_content_size, "Importance for pid "); | |
2936 | size = (mach_voucher_attr_content_size_t)strlen((char *)out_content); | |
2937 | ||
2938 | for(;;) { | |
2939 | ipc_importance_inherit_t inherit = III_NULL; | |
2940 | ipc_importance_task_t task_imp; | |
2941 | task_t task; | |
2942 | int task_pid; | |
2943 | ||
2944 | if (IIE_TYPE_TASK == IIE_TYPE(elem)) { | |
2945 | task_imp = (ipc_importance_task_t)elem; | |
2946 | task = task_imp->iit_task; | |
2947 | task_pid = (TASK_NULL != task) ? | |
2948 | audit_token_pid_from_task(task) : -1; | |
2949 | snprintf((char *)out_content + size, *in_out_content_size - size, "%d", task_pid); | |
2950 | } else { | |
2951 | inherit = (ipc_importance_inherit_t)elem; | |
2952 | task_imp = inherit->iii_to_task; | |
2953 | task = task_imp->iit_task; | |
2954 | task_pid = (TASK_NULL != task) ? | |
2955 | audit_token_pid_from_task(task) : -1; | |
2956 | snprintf((char *)out_content + size, *in_out_content_size - size, | |
2957 | "%d (%d of %d boosts) %s from pid ", task_pid, | |
2958 | III_EXTERN(inherit), inherit->iii_externcnt, | |
2959 | (inherit->iii_donating) ? "donated" : "linked"); | |
2960 | } | |
2961 | ||
2962 | size = (mach_voucher_attr_content_size_t)strlen((char *)out_content); | |
2963 | ||
2964 | if (III_NULL == inherit) | |
2965 | break; | |
2966 | ||
2967 | elem = inherit->iii_from_elem; | |
2968 | } | |
2969 | size++; /* account for NULL */ | |
2970 | } | |
2971 | *out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */ | |
2972 | *in_out_content_size = size; | |
2973 | return KERN_SUCCESS; | |
2974 | } | |
2975 | ||
2976 | /* | |
2977 | * Routine: ipc_importance_command [Voucher Attribute Manager Interface] | |
2978 | * Purpose: | |
2979 | * Run commands against the importance attribute value found in a voucher. | |
2980 | * No such commands are currently supported. | |
2981 | * Conditions: | |
2982 | * Nothing locked on entry. May block. | |
2983 | */ | |
2984 | static kern_return_t | |
2985 | ipc_importance_command( | |
2986 | ipc_voucher_attr_manager_t __assert_only manager, | |
2987 | mach_voucher_attr_key_t __assert_only key, | |
2988 | mach_voucher_attr_value_handle_array_t values, | |
2989 | mach_msg_type_number_t value_count, | |
2990 | mach_voucher_attr_command_t command, | |
2991 | mach_voucher_attr_content_t in_content, | |
2992 | mach_voucher_attr_content_size_t in_content_size, | |
2993 | mach_voucher_attr_content_t out_content, | |
2994 | mach_voucher_attr_content_size_t *out_content_size) | |
2995 | { | |
2996 | ipc_importance_inherit_t inherit; | |
2997 | ipc_importance_task_t to_task; | |
2998 | uint32_t refs, *outrefsp; | |
2999 | mach_msg_type_number_t i; | |
3000 | uint32_t externcnt; | |
3001 | ||
3002 | IMPORTANCE_ASSERT_MANAGER(manager); | |
3003 | IMPORTANCE_ASSERT_KEY(key); | |
3004 | ||
3005 | if (in_content_size != sizeof(refs) || | |
3006 | (*out_content_size != 0 && *out_content_size != sizeof(refs))) { | |
3007 | return KERN_INVALID_ARGUMENT; | |
3008 | } | |
3009 | refs = *(uint32_t *)(void *)in_content; | |
3010 | outrefsp = (*out_content_size != 0) ? (uint32_t *)(void *)out_content : NULL; | |
3011 | ||
3012 | if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL != command) { | |
3013 | return KERN_NOT_SUPPORTED; | |
3014 | } | |
3015 | ||
3016 | /* the first non-default value of the apropos type provides the data */ | |
3017 | inherit = III_NULL; | |
3018 | for (i = 0; i < value_count; i++) { | |
3019 | ipc_importance_elem_t elem = (ipc_importance_elem_t)values[i]; | |
3020 | ||
3021 | if (IIE_NULL != elem && IIE_TYPE_INHERIT == IIE_TYPE(elem)) { | |
3022 | inherit = (ipc_importance_inherit_t)elem; | |
3023 | break; | |
3024 | } | |
3025 | } | |
3026 | if (III_NULL == inherit) { | |
3027 | return KERN_INVALID_ARGUMENT; | |
3028 | } | |
3029 | ||
3030 | ipc_importance_lock(); | |
3031 | ||
3032 | if (0 == refs) { | |
3033 | if (NULL != outrefsp) { | |
3034 | *outrefsp = III_EXTERN(inherit); | |
3035 | } | |
3036 | ipc_importance_unlock(); | |
3037 | return KERN_SUCCESS; | |
3038 | } | |
3039 | ||
3040 | /* Enough external references left to drop? */ | |
3041 | if (III_EXTERN(inherit) < refs) { | |
3042 | ipc_importance_unlock(); | |
3043 | return KERN_FAILURE; | |
3044 | } | |
3045 | ||
3046 | to_task = inherit->iii_to_task; | |
3047 | assert(ipc_importance_task_is_any_receiver_type(to_task)); | |
3048 | ||
3049 | /* re-base external and internal counters at the inherit and the to-task (if apropos) */ | |
3050 | if (inherit->iii_donating) { | |
3051 | assert(IIT_EXTERN(to_task) >= III_EXTERN(inherit)); | |
3052 | assert(to_task->iit_externcnt >= inherit->iii_externcnt); | |
3053 | assert(to_task->iit_externdrop >= inherit->iii_externdrop); | |
3054 | inherit->iii_externdrop += refs; | |
3055 | to_task->iit_externdrop += refs; | |
3056 | externcnt = III_EXTERN(inherit); | |
3057 | if (0 == externcnt) { | |
3058 | inherit->iii_donating = FALSE; | |
3059 | to_task->iit_externcnt -= inherit->iii_externcnt; | |
3060 | to_task->iit_externdrop -= inherit->iii_externdrop; | |
3061 | ||
3062 | ||
3063 | /* Start AppNap delay hysteresis - even if not the last boost for the task. */ | |
3064 | if (ipc_importance_delayed_drop_call != NULL && | |
3065 | ipc_importance_task_is_marked_denap_receiver(to_task)) { | |
3066 | ipc_importance_task_delayed_drop(to_task); | |
3067 | } | |
3068 | ||
3069 | /* drop task assertions associated with the dropped boosts */ | |
3070 | if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, refs)) { | |
3071 | ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE); | |
3072 | /* may have dropped and retaken importance lock */ | |
3073 | } | |
3074 | } else { | |
3075 | /* assert(to_task->iit_assertcnt >= refs + externcnt); */ | |
3076 | /* defensive deduction in case of assertcnt underflow */ | |
3077 | if (to_task->iit_assertcnt > refs + externcnt) { | |
3078 | to_task->iit_assertcnt -= refs; | |
3079 | } else { | |
3080 | to_task->iit_assertcnt = externcnt; | |
3081 | } | |
3082 | } | |
3083 | } else { | |
3084 | inherit->iii_externdrop += refs; | |
3085 | externcnt = III_EXTERN(inherit); | |
3086 | } | |
3087 | ||
3088 | /* capture result (if requested) */ | |
3089 | if (NULL != outrefsp) { | |
3090 | *outrefsp = externcnt; | |
3091 | } | |
3092 | ||
3093 | ipc_importance_unlock(); | |
3094 | return KERN_SUCCESS; | |
3095 | } | |
3096 | ||
3097 | /* | |
3098 | * Routine: ipc_importance_manager_release [Voucher Attribute Manager Interface] | |
3099 | * Purpose: | |
3100 | * Release the Voucher system's reference on the IPC importance attribute | |
3101 | * manager. | |
3102 | * Conditions: | |
3103 | * As this can only occur after the manager drops the Attribute control | |
3104 | * reference granted back at registration time, and that reference is never | |
3105 | * dropped, this should never be called. | |
3106 | */ | |
3107 | static void | |
3108 | ipc_importance_manager_release( | |
3109 | ipc_voucher_attr_manager_t __assert_only manager) | |
3110 | { | |
3111 | IMPORTANCE_ASSERT_MANAGER(manager); | |
3112 | panic("Voucher importance manager released"); | |
3113 | } | |
3114 | ||
3115 | /* | |
3116 | * Routine: ipc_importance_init | |
3117 | * Purpose: | |
3118 | * Initialize the IPC importance manager. | |
3119 | * Conditions: | |
3120 | * Zones and Vouchers are already initialized. | |
3121 | */ | |
3122 | void | |
3123 | ipc_importance_init(void) | |
3124 | { | |
3125 | natural_t ipc_importance_max = (task_max + thread_max) * 2; | |
3126 | char temp_buf[26]; | |
3127 | kern_return_t kr; | |
3128 | ||
3129 | if (PE_parse_boot_argn("imp_interactive_receiver", temp_buf, sizeof(temp_buf))) { | |
3130 | ipc_importance_interactive_receiver = TRUE; | |
3131 | } | |
3132 | ||
3133 | ipc_importance_task_zone = zinit(sizeof(struct ipc_importance_task), | |
3134 | ipc_importance_max * sizeof(struct ipc_importance_task), | |
3135 | sizeof(struct ipc_importance_task), | |
3136 | "ipc task importance"); | |
3137 | zone_change(ipc_importance_task_zone, Z_NOENCRYPT, TRUE); | |
3138 | ||
3139 | ipc_importance_inherit_zone = zinit(sizeof(struct ipc_importance_inherit), | |
3140 | ipc_importance_max * sizeof(struct ipc_importance_inherit), | |
3141 | sizeof(struct ipc_importance_inherit), | |
3142 | "ipc importance inherit"); | |
3143 | zone_change(ipc_importance_inherit_zone, Z_NOENCRYPT, TRUE); | |
3144 | ||
3145 | ||
3146 | #if DEVELOPMENT || DEBUG | |
3147 | queue_init(&global_iit_alloc_queue); | |
3148 | #endif | |
3149 | ||
3150 | /* initialize global locking */ | |
3151 | ipc_importance_lock_init(); | |
3152 | ||
3153 | kr = ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager, | |
3154 | (mach_voucher_attr_value_handle_t)0, | |
3155 | MACH_VOUCHER_ATTR_KEY_IMPORTANCE, | |
3156 | &ipc_importance_control); | |
3157 | if (KERN_SUCCESS != kr) | |
3158 | printf("Voucher importance manager register returned %d", kr); | |
3159 | } | |
3160 | ||
3161 | /* | |
3162 | * Routine: ipc_importance_thread_call_init | |
3163 | * Purpose: | |
3164 | * Initialize the IPC importance code dependent upon | |
3165 | * thread-call support being available. | |
3166 | * Conditions: | |
3167 | * Thread-call mechanism is already initialized. | |
3168 | */ | |
3169 | void | |
3170 | ipc_importance_thread_call_init(void) | |
3171 | { | |
3172 | /* initialize delayed drop queue and thread-call */ | |
3173 | queue_init(&ipc_importance_delayed_drop_queue); | |
3174 | ipc_importance_delayed_drop_call = | |
3175 | thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL); | |
3176 | if (NULL == ipc_importance_delayed_drop_call) { | |
3177 | panic("ipc_importance_init"); | |
3178 | } | |
3179 | } | |
3180 | ||
3181 | /* | |
3182 | * Routing: task_importance_list_pids | |
3183 | * Purpose: list pids where task in donating importance. | |
3184 | * Conditions: To be called only from kdp stackshot code. | |
3185 | * Will panic the system otherwise. | |
3186 | */ | |
3187 | extern int | |
3188 | task_importance_list_pids(task_t task, int flags, int *pid_list, unsigned int max_count) | |
3189 | { | |
3190 | if (lck_spin_is_acquired(&ipc_importance_lock_data) || | |
3191 | max_count < 1 || | |
3192 | task->task_imp_base == IIT_NULL || | |
3193 | pid_list == NULL || | |
3194 | flags != TASK_IMP_LIST_DONATING_PIDS) { | |
3195 | return 0; | |
3196 | } | |
3197 | unsigned int pidcount = 0; | |
3198 | task_t temp_task; | |
3199 | ipc_importance_task_t task_imp = task->task_imp_base; | |
3200 | ipc_kmsg_t temp_kmsg; | |
3201 | ipc_importance_inherit_t temp_inherit; | |
3202 | ipc_importance_elem_t elem; | |
3203 | int target_pid; | |
3204 | ||
3205 | queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) { | |
3206 | /* check space in buffer */ | |
3207 | if (pidcount >= max_count) | |
3208 | break; | |
3209 | target_pid = -1; | |
3210 | ||
3211 | if (temp_inherit->iii_donating) { | |
3212 | ||
3213 | #if DEVELOPMENT || DEBUG | |
3214 | target_pid = temp_inherit->iii_to_task->iit_bsd_pid; | |
3215 | #else | |
3216 | temp_task = temp_inherit->iii_to_task->iit_task; | |
3217 | if (temp_task != TASK_NULL) { | |
3218 | target_pid = audit_token_pid_from_task(temp_task); | |
3219 | } | |
3220 | #endif | |
3221 | } | |
3222 | ||
3223 | if (target_pid != -1) { | |
3224 | pid_list[pidcount++] = target_pid; | |
3225 | } | |
3226 | ||
3227 | } | |
3228 | ||
3229 | queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) { | |
3230 | if (pidcount >= max_count) | |
3231 | break; | |
3232 | target_pid = -1; | |
3233 | elem = temp_kmsg->ikm_importance; | |
3234 | temp_task = TASK_NULL; | |
3235 | ||
3236 | if (elem == IIE_NULL) { | |
3237 | continue; | |
3238 | } | |
3239 | ||
3240 | if (!(temp_kmsg->ikm_header && MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_kmsg->ikm_header->msgh_bits))) { | |
3241 | continue; | |
3242 | } | |
3243 | ||
3244 | if (IIE_TYPE_TASK == IIE_TYPE(elem) && | |
3245 | (((ipc_importance_task_t)elem)->iit_task != TASK_NULL)) { | |
3246 | target_pid = audit_token_pid_from_task(((ipc_importance_task_t)elem)->iit_task); | |
3247 | } else { | |
3248 | temp_inherit = (ipc_importance_inherit_t)elem; | |
3249 | #if DEVELOPMENT || DEBUG | |
3250 | target_pid = temp_inherit->iii_to_task->iit_bsd_pid; | |
3251 | #else | |
3252 | temp_task = temp_inherit->iii_to_task->iit_task; | |
3253 | if (temp_task != TASK_NULL) { | |
3254 | target_pid = audit_token_pid_from_task(temp_task); | |
3255 | } | |
3256 | #endif | |
3257 | } | |
3258 | ||
3259 | if (target_pid != -1) { | |
3260 | pid_list[pidcount++] = target_pid; | |
3261 | } | |
3262 | } | |
3263 | ||
3264 | return pidcount; | |
3265 | } | |
3266 |