lck_spin_try_lock(&ipc_importance_lock_data)
#define ipc_importance_unlock() \
lck_spin_unlock(&ipc_importance_lock_data)
-#define ipc_importance_sleep(elem) lck_spin_sleep(&ipc_importance_lock_data, \
- LCK_SLEEP_DEFAULT, \
- (event_t)(elem), \
- THREAD_UNINT)
-#define ipc_importance_wakeup(elem) thread_wakeup((event_t)(elem))
+#define ipc_importance_assert_held() \
+ lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED)
#if IIE_REF_DEBUG
#define incr_ref_counter(x) (hw_atomic_add(&(x), 1))
{
assert(0 < IIE_REFS(elem));
-#if DEVELOPMENT || DEBUG
+#if IMPORTANCE_DEBUG
ipc_importance_inherit_t temp_inherit;
ipc_importance_task_t link_task;
ipc_kmsg_t temp_kmsg;
expected++;
if (IIE_REFS(elem) < expected + 1)
panic("ipc_importance_release_locked (%p)", elem);
-#endif
+#endif /* IMPORTANCE_DEBUG */
if (0 < ipc_importance_release_internal(elem)) {
ipc_importance_unlock();
iit_update_type_t type,
uint32_t delta)
{
-
+#if IMPORTANCE_TRACE
task_t target_task = task_imp->iit_task;
+#endif
boolean_t boost = (IIT_UPDATE_HOLD == type);
boolean_t before_boosted, after_boosted;
+ ipc_importance_assert_held();
+
if (!ipc_importance_task_is_any_receiver_type(task_imp))
return FALSE;
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
int target_pid = task_pid(target_task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START,
/* Adjust the assertcnt appropriately */
if (boost) {
task_imp->iit_assertcnt += delta;
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid,
task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt);
#endif
// assert(delta <= task_imp->iit_assertcnt);
if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) {
/* TODO: Turn this back into a panic <rdar://problem/12592649> */
- if (target_task != TASK_NULL) {
- printf("Over-release of kernel-internal importance assertions for pid %d (%s), "
- "dropping %d assertion(s) but task only has %d remaining (%d external).\n",
- task_pid(target_task),
- (target_task->bsd_info == NULL) ? "" : proc_name_address(target_task->bsd_info),
- delta,
- task_imp->iit_assertcnt,
- IIT_EXTERN(task_imp));
- }
task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
} else {
task_imp->iit_assertcnt -= delta;
}
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
// This convers both legacy and voucher-based importance.
DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt);
#endif
}
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END,
proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
#endif
/* Adjust the task assertions and determine if an edge was crossed */
if (ipc_importance_task_check_transition(temp_task_imp, type, 1)) {
- incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
+ incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
/* reference donated */
} else {
assert(ipc_importance_task_is_any_receiver_type(temp_task_imp));
if (ipc_importance_task_check_transition(temp_task_imp, type, assertcnt)) {
ipc_importance_task_reference(temp_task_imp);
- incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
+ incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
}
}
queue_init(&updates);
queue_init(&propagate);
+ ipc_importance_assert_held();
+
/*
* If we're going to update the policy for the provided task,
* enqueue it on the propagate queue itself. Otherwise, only
* enqueue downstream things.
*/
if (update_task_imp) {
+ ipc_importance_task_reference(task_imp);
+ incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props);
} else {
ipc_importance_task_propagate_helper(task_imp, type, &propagate);
boolean_t need_update;
queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props);
+ /* hold a reference on temp_task_imp */
+
assert(IIT_NULL != temp_task_imp);
/* only propagate for receivers not already marked as a donor */
assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp));
}
}
+
+ ipc_importance_task_release_internal(temp_task_imp);
}
/* apply updates to task (may drop importance lock) */
ipc_importance_lock();
target_task = task_imp->iit_task;
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
int target_pid = task_pid(target_task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
}
ipc_importance_unlock();
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
// This covers the legacy case where a task takes an extra boost.
ipc_importance_lock();
target_task = task_imp->iit_task;
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
int target_pid = task_pid(target_task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
ret = KERN_SUCCESS;
}
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
#endif
return KERN_FAILURE;
}
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
int target_pid = task_pid(target_task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
task_imp->iit_externcnt += count;
ipc_importance_unlock();
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
// This is the legacy boosting path
DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp));
-#endif /* IMPORTANCE_DEBUG */
+#endif /* IMPORTANCE_TRACE */
return(KERN_SUCCESS);
}
/* snapshot task live donor status - may change, but another call will accompany the change */
task_live_donor = target_task->effective_policy.tep_live_donor;
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
int target_pid = task_pid(target_task);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE);
}
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END,
target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0);
}
/*
- * Routine: ipc_importance_task_marked_live_donor
+ * Routine: ipc_importance_task_is_marked_live_donor
* Purpose:
* Query the live donor and donor flags for the given task importance.
* Conditions:
/* Create an importance linkage from old_task to new_task */
inherit = ipc_importance_inherit_from_task(old_task, new_task);
- if (inherit == III_NULL) {
- return inherit;
- }
/* Switch task importance base from old task to new task */
ipc_importance_lock();
boolean_t imp_lock_held = FALSE;
int assertcnt = 0;
ipc_port_t base;
+ struct turnstile *send_turnstile = TURNSTILE_NULL;
assert(port != IP_NULL);
assert(dest != IP_NULL);
return TRUE;
base = dest;
+ /* Check if destination needs a turnstile */
+ ipc_port_send_turnstile_prepare(dest);
+
/* port is in limbo, so donation status is safe to latch */
if (port->ip_impdonation != 0) {
imp_lock_held = TRUE;
assert(port->ip_receiver_name == MACH_PORT_NULL);
assert(port->ip_destination == IP_NULL);
- while (dest != IP_NULL) {
+ base = dest;
+ while (base != IP_NULL) {
ipc_port_t next;
- /* dest is in transit or in limbo */
+ /* base is in transit or in limbo */
- assert(ip_active(dest));
- assert(dest->ip_receiver_name == MACH_PORT_NULL);
+ assert(ip_active(base));
+ assert(base->ip_receiver_name == MACH_PORT_NULL);
- next = dest->ip_destination;
- ip_unlock(dest);
- dest = next;
+ next = base->ip_destination;
+ ip_unlock(base);
+ base = next;
}
if (imp_lock_held)
ipc_importance_unlock();
+ ipc_port_send_turnstile_complete(dest);
return TRUE;
}
ip_lock(port);
ipc_port_multiple_unlock();
- not_circular:
-
+not_circular:
/* port is in limbo */
+ imq_lock(&port->ip_messages);
assert(ip_active(port));
assert(port->ip_receiver_name == MACH_PORT_NULL);
/* take the port out of limbo w.r.t. assertions */
port->ip_tempowner = 0;
+ /*
+ * Setup linkage for source port if it has a send turnstile i.e. it has
+ * a thread waiting in send or has a port enqueued in it or has sync ipc
+ * push from a special reply port.
+ */
+ if (port_send_turnstile(port)) {
+ send_turnstile = turnstile_prepare((uintptr_t)port,
+ port_send_turnstile_address(port),
+ TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
+
+ turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
+ (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
+
+ /* update complete and turnstile complete called after dropping all locks */
+ }
+ imq_unlock(&port->ip_messages);
+
/* now unlock chain */
ip_unlock(port);
for (;;) {
+ ipc_port_t next;
/* every port along chain track assertions behind it */
ipc_port_impcount_delta(dest, assertcnt, base);
assert(dest->ip_destination != IP_NULL);
assert(dest->ip_tempowner == 0);
- port = dest->ip_destination;
+ next = dest->ip_destination;
ip_unlock(dest);
- dest = port;
+ dest = next;
}
/* base is not in transit */
if (imp_lock_held)
ipc_importance_unlock();
+ /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
+ if (send_turnstile) {
+ turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
+
+ /* Take the mq lock to call turnstile complete */
+ imq_lock(&port->ip_messages);
+ turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL);
+ send_turnstile = TURNSTILE_NULL;
+ imq_unlock(&port->ip_messages);
+ turnstile_cleanup();
+ }
+
if (imp_task != IIT_NULL)
ipc_importance_task_release(imp_task);
/* If forced sending a static boost, go update the port */
if ((option & MACH_SEND_IMPORTANCE) != 0) {
- kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
+ /* acquire the importance lock while trying to hang on to port lock */
+ if (!ipc_importance_lock_try()) {
+ port_lock_dropped = TRUE;
+ ip_unlock(port);
+ ipc_importance_lock();
+ }
goto portupdate;
}
return port_lock_dropped;
}
+portupdate:
/* Mark the fact that we are (currently) donating through this message */
kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
ip_lock(port);
}
- portupdate:
-
-#if IMPORTANCE_DEBUG
+ ipc_importance_assert_held();
+
+#if IMPORTANCE_TRACE
if (kdebug_enable) {
mach_msg_max_trailer_t *dbgtrailer = (mach_msg_max_trailer_t *)
((vm_offset_t)kmsg->ikm_header + round_msg(kmsg->ikm_header->msgh_size));
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START,
task_pid(task), sender_pid, imp_msgh_id, 0, 0);
}
-#endif /* IMPORTANCE_DEBUG */
+#endif /* IMPORTANCE_TRACE */
mach_port_delta_t delta = 1;
boolean_t need_port_lock;
/* adjust port boost count (with importance and port locked) */
need_port_lock = ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &task_imp);
+ /* hold a reference on task_imp */
/* if we need to adjust a task importance as a result, apply that here */
if (IIT_NULL != task_imp && delta != 0) {
}
}
- ipc_importance_unlock();
+ if (task_imp) {
+ ipc_importance_task_release_locked(task_imp);
+ /* importance unlocked */
+ } else {
+ ipc_importance_unlock();
+ }
if (need_port_lock) {
port_lock_dropped = TRUE;
}
}
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
if (-1 < impresult)
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE,
sender_pid, task_pid(task_self),
*/
DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self), int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt);
}
-#endif /* IMPORTANCE_DEBUG */
+#endif /* IMPORTANCE_TRACE */
}
/*