#include <kern/thread.h>
#include <kern/misc_protos.h>
#include <kern/waitq.h>
+#include <kern/policy_internal.h>
+#include <kern/debug.h>
+#include <kern/kcdata.h>
#include <ipc/ipc_entry.h>
#include <ipc/ipc_space.h>
#include <ipc/ipc_object.h>
#include <ipc/ipc_notify.h>
#include <ipc/ipc_table.h>
#include <ipc/ipc_importance.h>
+#include <machine/machlimits.h>
#include <security/mac_mach_internal.h>
if (port->ip_impdonation != 0 &&
port->ip_spimportant == 0 &&
(task_is_importance_donor(current_task()))) {
- port->ip_spimportant = 1;
*importantp = TRUE;
}
#endif /* IMPORTANCE_INHERTANCE */
* (or armed with importance in that version).
*/
-#if IMPORTANCE_INHERITANCE
boolean_t
ipc_port_request_sparm(
ipc_port_t port,
__assert_only mach_port_name_t name,
ipc_port_request_index_t index,
- mach_msg_option_t option)
-#else
-boolean_t
-ipc_port_request_sparm(
- ipc_port_t port,
- __assert_only mach_port_name_t name,
- ipc_port_request_index_t index)
-#endif /* IMPORTANCE_INHERITANCE */
+ mach_msg_option_t option,
+ mach_msg_priority_t override)
{
if (index != IE_REQ_NONE) {
ipc_port_request_t ipr, table;
ipr = &table[index];
assert(ipr->ipr_name == name);
+ /* Is there a valid destination? */
if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
port->ip_sprequests = 1;
+
+ if (option & MACH_SEND_OVERRIDE) {
+ /* apply override to message queue */
+ ipc_mqueue_override_send(&port->ip_messages, override);
+ }
+
#if IMPORTANCE_INHERITANCE
if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
(port->ip_impdonation != 0) &&
(port->ip_spimportant == 0) &&
(((option & MACH_SEND_IMPORTANCE) != 0) ||
(task_is_importance_donor(current_task())))) {
- port->ip_spimportant = 1;
return TRUE;
}
#else
/*
* Routine: ipc_port_clear_receiver
* Purpose:
- * Prepares a receive right for transmission/destruction.
+ * Prepares a receive right for transmission/destruction,
+ * optionally performs mqueue destruction (with port lock held)
+ *
* Conditions:
* The port is locked and active.
+ * Returns:
+ * If should_destroy is TRUE, then the return value indicates
+ * whether the caller needs to reap kmsg structures that should
+ * be destroyed (by calling ipc_kmsg_reap_delayed)
+ *
+ * If should_destroy is FALSE, this always returns FALSE
*/
-void
+boolean_t
ipc_port_clear_receiver(
- ipc_port_t port)
+ ipc_port_t port,
+ boolean_t should_destroy)
{
- spl_t s;
-
- assert(ip_active(port));
+ ipc_mqueue_t mqueue = &port->ip_messages;
+ boolean_t reap_messages = FALSE;
/*
- * pull ourselves from any sets.
+ * Pull ourselves out of any sets to which we belong.
+ * We hold the port locked, so even though this acquires and releases
+ * the mqueue lock, we know we won't be added to any other sets.
*/
if (port->ip_in_pset != 0) {
ipc_pset_remove_from_all(port);
* Send anyone waiting on the port's queue directly away.
* Also clear the mscount and seqno.
*/
- s = splsched();
- imq_lock(&port->ip_messages);
- ipc_mqueue_changed(&port->ip_messages);
- ipc_port_set_mscount(port, 0);
- port->ip_messages.imq_seqno = 0;
+ imq_lock(mqueue);
+ ipc_mqueue_changed(mqueue);
+ port->ip_mscount = 0;
+ mqueue->imq_seqno = 0;
port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
+
+ if (should_destroy) {
+ /*
+ * Mark the mqueue invalid, preventing further send/receive
+ * operations from succeeding. It's important for this to be
+ * done under the same lock hold as the ipc_mqueue_changed
+ * call to avoid additional threads blocking on an mqueue
+ * that's being destroyed.
+ */
+ reap_messages = ipc_mqueue_destroy_locked(mqueue);
+ }
+
imq_unlock(&port->ip_messages);
- splx(s);
+
+ return reap_messages;
}
/*
port->ip_strict_guard = 0;
port->ip_impcount = 0;
- port->ip_reserved = 0;
+ port->ip_specialreply = 0;
+ port->ip_link_sync_qos = 0;
ipc_mqueue_init(&port->ip_messages,
FALSE /* !set */, NULL /* no reserved link */);
{
ipc_port_request_index_t index = 0;
ipc_table_elems_t size = 0;
-#if IMPORTANCE_INHERITANCE
- boolean_t dropassert = FALSE;
-#endif /* IMPORTANCE_INHERITANCE */
/*
* If the port has no send-possible request
#if IMPORTANCE_INHERITANCE
if (port->ip_spimportant != 0) {
port->ip_spimportant = 0;
- if (ipc_port_impcount_delta(port, -1, IP_NULL) == -1) {
- dropassert = TRUE;
+ if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
+ ip_lock(port);
}
}
#endif /* IMPORTANCE_INHERITANCE */
if (port->ip_sprequests == 0) {
ip_unlock(port);
- goto out;
+ return;
}
port->ip_sprequests = 0;
}
}
ip_unlock(port);
-out:
-#if IMPORTANCE_INHERITANCE
- if (dropassert == TRUE && ipc_importance_task_is_any_receiver_type(current_task()->task_imp_base)) {
- /* drop internal assertion */
- ipc_importance_task_drop_internal_assertion(current_task()->task_imp_base, 1);
- }
-#endif /* IMPORTANCE_INHERITANCE */
return;
}
*/
void
-ipc_port_destroy(
- ipc_port_t port)
+ipc_port_destroy(ipc_port_t port)
{
ipc_port_t pdrequest, nsrequest;
ipc_mqueue_t mqueue;
ipc_kmsg_t kmsg;
+ boolean_t special_reply = port->ip_specialreply;
#if IMPORTANCE_INHERITANCE
ipc_importance_task_t release_imp_task = IIT_NULL;
assert(ip_active(port));
/* port->ip_receiver_name is garbage */
/* port->ip_receiver/port->ip_destination is garbage */
- assert(port->ip_in_pset == 0);
- assert(port->ip_mscount == 0);
/* check for a backup port */
pdrequest = port->ip_pdrequest;
#endif /* IMPORTANCE_INHERITANCE */
if (pdrequest != IP_NULL) {
+ /* clear receiver, don't destroy the port */
+ (void)ipc_port_clear_receiver(port, FALSE);
+ assert(port->ip_in_pset == 0);
+ assert(port->ip_mscount == 0);
+
/* we assume the ref for pdrequest */
port->ip_pdrequest = IP_NULL;
port->ip_destination = IP_NULL;
ip_unlock(port);
+ if (special_reply) {
+ ipc_port_unlink_special_reply_port(port,
+ IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE);
+ }
/* consumes our refs for port and pdrequest */
ipc_notify_port_destroyed(pdrequest, port);
goto drop_assertions;
}
- /* once port is dead, we don't need to keep it locked */
-
port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
port->ip_timestamp = ipc_port_timestamp();
nsrequest = port->ip_nsrequest;
+ /*
+ * The mach_msg_* paths don't hold a port lock, they only hold a
+ * reference to the port object. If a thread raced us and is now
+ * blocked waiting for message reception on this mqueue (or waiting
+ * for ipc_mqueue_full), it will never be woken up. We call
+ * ipc_port_clear_receiver() here, _after_ the port has been marked
+ * inactive, to wakeup any threads which may be blocked and ensure
+ * that no other thread can get lost waiting for a wake up on a
+ * port/mqueue that's been destroyed.
+ */
+ boolean_t reap_msgs = FALSE;
+ reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks mqueue inactive */
+ assert(port->ip_in_pset == 0);
+ assert(port->ip_mscount == 0);
+
/*
* If the port has a preallocated message buffer and that buffer
* is not inuse, free it. If it has an inuse one, then the kmsg
* free will detect that we freed the association and it can free it
* like a normal buffer.
+ *
+ * Once the port is marked inactive we don't need to keep it locked.
*/
if (IP_PREALLOC(port)) {
ipc_port_t inuse_port;
ip_unlock(port);
}
+ /* unlink the kmsg from special reply port */
+ if (special_reply) {
+ ipc_port_unlink_special_reply_port(port,
+ IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE);
+ }
+
/* throw away no-senders request */
if (nsrequest != IP_NULL)
ipc_notify_send_once(nsrequest); /* consumes ref */
- /* destroy any queued messages */
+ /*
+ * Reap any kmsg objects waiting to be destroyed.
+ * This must be done after we've released the port lock.
+ */
+ if (reap_msgs)
+ ipc_kmsg_reap_delayed();
+
mqueue = &port->ip_messages;
- ipc_mqueue_destroy(mqueue);
/* cleanup waitq related resources */
ipc_mqueue_deinit(mqueue);
* but guaranteeing that this doesn't create a circle
* port->ip_destination->ip_destination->... == port
*
- * Additionally, if port was successfully changed to "in transit",
- * propagate boost assertions from the "in limbo" port to all
- * the ports in the chain, and, if the destination task accepts
- * boosts, to the destination task.
- *
* Conditions:
* No ports locked. References held for "port" and "dest".
*/
ipc_port_t port,
ipc_port_t dest)
{
- ipc_port_t base;
-
#if IMPORTANCE_INHERITANCE
- ipc_importance_task_t imp_task = IIT_NULL;
- ipc_importance_task_t release_imp_task = IIT_NULL;
- int assertcnt = 0;
-#endif /* IMPORTANCE_INHERITANCE */
+ /* adjust importance counts at the same time */
+ return ipc_importance_check_circularity(port, dest);
+#else
+ ipc_port_t base;
+ sync_qos_count_t sync_qos_delta_add[THREAD_QOS_LAST] = {0};
+ sync_qos_count_t sync_qos_delta_sub[THREAD_QOS_LAST] = {0};
+ boolean_t update_knote = FALSE;
assert(port != IP_NULL);
assert(dest != IP_NULL);
* First try a quick check that can run in parallel.
* No circularity if dest is not in transit.
*/
-
ip_lock(port);
if (ip_lock_try(dest)) {
if (!ip_active(dest) ||
ip_lock(port);
ipc_port_multiple_unlock();
- not_circular:
+not_circular:
+ imq_lock(&base->ip_messages);
/* port is in limbo */
ip_reference(dest);
port->ip_destination = dest;
-#if IMPORTANCE_INHERITANCE
- /* must have been in limbo or still bound to a task */
- assert(port->ip_tempowner != 0);
-
- /*
- * We delayed dropping assertions from a specific task.
- * Cache that info now (we'll drop assertions and the
- * task reference below).
- */
- release_imp_task = port->ip_imp_task;
- if (IIT_NULL != release_imp_task) {
- port->ip_imp_task = IIT_NULL;
+ /* Capture the sync qos count delta */
+ for (int i = 0; i < THREAD_QOS_LAST; i++) {
+ sync_qos_delta_add[i] = port_sync_qos(port, i);
}
- assertcnt = port->ip_impcount;
-
- /* take the port out of limbo w.r.t. assertions */
- port->ip_tempowner = 0;
-
-#endif /* IMPORTANCE_INHERITANCE */
/* now unlock chain */
ip_unlock(port);
for (;;) {
-
-#if IMPORTANCE_INHERITANCE
- /* every port along chain track assertions behind it */
- dest->ip_impcount += assertcnt;
-#endif /* IMPORTANCE_INHERITANCE */
-
+ /* every port along chain tracks override behind it */
+ update_knote = ipc_port_sync_qos_delta(dest, sync_qos_delta_add, sync_qos_delta_sub);
if (dest == base)
break;
assert(dest->ip_receiver_name == MACH_PORT_NULL);
assert(dest->ip_destination != IP_NULL);
-#if IMPORTANCE_INHERITANCE
- assert(dest->ip_tempowner == 0);
-#endif /* IMPORTANCE_INHERITANCE */
-
port = dest->ip_destination;
ip_unlock(dest);
dest = port;
(base->ip_receiver_name != MACH_PORT_NULL) ||
(base->ip_destination == IP_NULL));
-#if IMPORTANCE_INHERITANCE
- /*
- * Find the task to boost (if any).
- * We will boost "through" ports that don't know
- * about inheritance to deliver receive rights that
- * do.
- */
- if (ip_active(base) && (assertcnt > 0)) {
- if (base->ip_tempowner != 0) {
- if (IIT_NULL != base->ip_imp_task) {
- /* specified tempowner task */
- imp_task = base->ip_imp_task;
- assert(ipc_importance_task_is_any_receiver_type(imp_task));
+ if (update_knote) {
+ KNOTE(&base->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&base->ip_messages);
+
+ ip_unlock(base);
+
+ return FALSE;
+#endif /* !IMPORTANCE_INHERITANCE */
+}
+
+/*
+ * Routine: ipc_port_link_special_reply_port_with_qos
+ * Purpose:
+ * Link the special reply port with the destination port.
+ * Update the sync qos count of special reply port,
+ * destination port.
+ *
+ * Conditions:
+ * Nothing is locked.
+ */
+kern_return_t
+ipc_port_link_special_reply_port_with_qos(
+ ipc_port_t special_reply_port,
+ ipc_port_t dest_port,
+ int qos)
+{
+ ipc_port_t next, base;
+ sync_qos_count_t sync_qos_delta_add[THREAD_QOS_LAST] = {0};
+ sync_qos_count_t sync_qos_delta_sub[THREAD_QOS_LAST] = {0};
+ boolean_t update_knote = FALSE;
+ boolean_t multiple_lock = FALSE;
+
+ ip_lock(dest_port);
+
+ /* Check if dest is active */
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ return KERN_FAILURE;
+ }
+
+ if ((dest_port->ip_receiver_name == MACH_PORT_NULL) &&
+ (dest_port->ip_destination != IP_NULL)) {
+ /* dest_port is in transit; need to take the serialize lock */
+ ip_unlock(dest_port);
+ goto take_multiple_lock;
+ }
+
+ /* Check if the port is a special reply port */
+ if (ip_lock_try(special_reply_port)) {
+ if (!special_reply_port->ip_specialreply ||
+ !special_reply_port->ip_link_sync_qos ||
+ (special_reply_port->ip_sync_qos_override_port != IP_NULL &&
+ special_reply_port->ip_sync_qos_override_port != dest_port)) {
+
+ boolean_t link_sync_qos = special_reply_port->ip_link_sync_qos;
+ ip_unlock(special_reply_port);
+ ip_unlock(dest_port);
+ /* return KERN_SUCCESS when link_sync_qos is not set */
+ if (!link_sync_qos) {
+ return KERN_SUCCESS;
}
- /* otherwise don't boost current task */
+ return KERN_FAILURE;
+ } else {
+ goto both_ports_locked;
+ }
+ }
- } else if (base->ip_receiver_name != MACH_PORT_NULL) {
- ipc_space_t space = base->ip_receiver;
+ ip_unlock(dest_port);
- /* only spaces with boost-accepting tasks */
- if (space->is_task != TASK_NULL &&
- ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base))
- imp_task = space->is_task->task_imp_base;
+take_multiple_lock:
+
+ ipc_port_multiple_lock(); /* massive serialization */
+ multiple_lock = TRUE;
+
+ ip_lock(special_reply_port);
+
+ /* Check if the special reply port is marked regular */
+ if (!special_reply_port->ip_specialreply ||
+ !special_reply_port->ip_link_sync_qos ||
+ (special_reply_port->ip_sync_qos_override_port != IP_NULL &&
+ special_reply_port->ip_sync_qos_override_port != dest_port)) {
+
+ boolean_t link_sync_qos = special_reply_port->ip_link_sync_qos;
+ ip_unlock(special_reply_port);
+ ipc_port_multiple_unlock();
+ /* return KERN_SUCCESS when link_sync_qos is not set */
+ if (!link_sync_qos) {
+ return KERN_SUCCESS;
}
+ return KERN_FAILURE;
+ }
+
+ ip_lock(dest_port);
+
+both_ports_locked:
+ next = dest_port;
- /* take reference before unlocking base */
- if (imp_task != IIT_NULL) {
- ipc_importance_task_reference(imp_task);
+ /* Apply the qos to special reply port, capture the old qos */
+ if (special_reply_port->ip_sync_qos_override_port != IP_NULL) {
+ /* Check if qos needs to be updated */
+ if ((sync_qos_count_t)qos <= port_special_qos(special_reply_port)) {
+ imq_lock(&dest_port->ip_messages);
+ goto done_update;
}
+ sync_qos_delta_sub[port_special_qos(special_reply_port)]++;
}
-#endif /* IMPORTANCE_INHERITANCE */
+ set_port_special_qos(special_reply_port, (sync_qos_count_t)qos);
+ sync_qos_delta_add[qos]++;
+
+ /* Link the special reply port to dest port */
+ if (special_reply_port->ip_sync_qos_override_port == IP_NULL) {
+ /* take a reference on dest_port */
+ ip_reference(dest_port);
+ special_reply_port->ip_sync_qos_override_port = dest_port;
+ }
+
+ /* Apply the sync qos delta to all in-transit ports */
+ for (;;) {
+ boolean_t port_not_in_transit = FALSE;
+ if (!ip_active(next) ||
+ (next->ip_receiver_name != MACH_PORT_NULL) ||
+ (next->ip_destination == IP_NULL)) {
+ /* Get the mqueue lock for destination port to update knotes */
+ imq_lock(&next->ip_messages);
+ port_not_in_transit = TRUE;
+ }
+ /* Apply the sync qos delta */
+ update_knote = ipc_port_sync_qos_delta(next, sync_qos_delta_add, sync_qos_delta_sub);
+
+ if (port_not_in_transit)
+ break;
+
+ next = next->ip_destination;
+ ip_lock(next);
+ }
+done_update:
+
+ if (multiple_lock) {
+ ipc_port_multiple_unlock();
+ }
+
+ ip_unlock(special_reply_port);
+ base = next;
+ next = dest_port;
+
+ while (next != base) {
+ ipc_port_t prev = next;
+ next = next->ip_destination;
+
+ ip_unlock(prev);
+ }
+
+ if (update_knote) {
+ KNOTE(&base->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&base->ip_messages);
ip_unlock(base);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_unlink_special_reply_port_locked
+ * Purpose:
+ * If the special port is linked to a port, adjust it's sync qos override and unlink the port.
+ * Condition:
+ * Special reply port locked on entry.
+ * Special reply port unlocked on return.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_unlink_special_reply_port_locked(
+ ipc_port_t special_reply_port,
+ struct knote *kn,
+ uint8_t flags)
+{
+ ipc_port_t dest_port;
+ sync_qos_count_t sync_qos;
+ sync_qos_count_t sync_qos_delta_add[THREAD_QOS_LAST] = {0};
+ sync_qos_count_t sync_qos_delta_sub[THREAD_QOS_LAST] = {0};
+
+ /* Return if called from copy out in pseudo receive */
+ if (kn == ITH_KNOTE_PSEUDO) {
+ ip_unlock(special_reply_port);
+ return;
+ }
+
+ /* check if special port has a port linked to it */
+ if (special_reply_port->ip_specialreply == 0 ||
+ special_reply_port->ip_sync_qos_override_port == IP_NULL) {
+ set_port_special_qos(special_reply_port, 0);
+ if (flags & IPC_PORT_UNLINK_SR_CLEAR_SPECIAL_REPLY) {
+ special_reply_port->ip_specialreply = 0;
+ }
+ if (flags & IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE) {
+ special_reply_port->ip_link_sync_qos = 1;
+ }
+ ip_unlock(special_reply_port);
+ return;
+ }
-#if IMPORTANCE_INHERITANCE
/*
- * Transfer assertions now that the ports are unlocked.
- * Avoid extra overhead if transferring to/from the same task.
+ * port->ip_sync_qos_override_port is not null and it is safe
+ * to access it since ip_specialreply is set.
*/
- boolean_t transfer_assertions = (imp_task != release_imp_task) ? TRUE : FALSE;
+ dest_port = special_reply_port->ip_sync_qos_override_port;
+ sync_qos_delta_sub[port_special_qos(special_reply_port)]++;
+ sync_qos = port_special_qos(special_reply_port);
+
+ /* Clear qos delta for special reply port */
+ set_port_special_qos(special_reply_port, 0);
+ special_reply_port->ip_sync_qos_override_port = IP_NULL;
+ if (flags & IPC_PORT_UNLINK_SR_CLEAR_SPECIAL_REPLY) {
+ special_reply_port->ip_specialreply = 0;
+ }
- if (imp_task != IIT_NULL) {
- if (transfer_assertions)
- ipc_importance_task_hold_internal_assertion(imp_task, assertcnt);
- ipc_importance_task_release(imp_task);
- imp_task = IIT_NULL;
+ if (flags & IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE) {
+ special_reply_port->ip_link_sync_qos = 1;
+ } else {
+ special_reply_port->ip_link_sync_qos = 0;
}
- if (release_imp_task != IIT_NULL) {
- if (transfer_assertions)
- ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
- ipc_importance_task_release(release_imp_task);
- release_imp_task = IIT_NULL;
+ ip_unlock(special_reply_port);
+
+ /* Add the sync qos on knote */
+ if (ITH_KNOTE_VALID(kn)) {
+ knote_adjust_sync_qos(kn, sync_qos, TRUE);
}
-#endif /* IMPORTANCE_INHERITANCE */
- return FALSE;
+ /* Adjust the sync qos of destination */
+ ipc_port_adjust_sync_qos(dest_port, sync_qos_delta_add, sync_qos_delta_sub);
+ ip_release(dest_port);
+}
+
+/*
+ * Routine: ipc_port_unlink_special_reply_port
+ * Purpose:
+ * If the special port is linked to a port, adjust it's sync qos override and unlink the port.
+ * Condition:
+ * Nothing locked.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_unlink_special_reply_port(
+ ipc_port_t special_reply_port,
+ uint8_t flags)
+{
+ ip_lock(special_reply_port);
+ ipc_port_unlink_special_reply_port_locked(special_reply_port, NULL, flags);
+ /* special_reply_port unlocked */
+}
+
+/*
+ * Routine: ipc_port_sync_qos_delta
+ * Purpose:
+ * Adjust the sync qos count associated with a port.
+ *
+ * For now, be defensive during deductions to make sure the
+ * sync_qos count for the port doesn't underflow zero.
+ * Returns:
+ * TRUE: if max sync qos of the port changes.
+ * FALSE: otherwise.
+ * Conditions:
+ * The port is referenced and locked.
+ * The mqueue is locked if port is not in-transit.
+ */
+boolean_t
+ipc_port_sync_qos_delta(
+ ipc_port_t port,
+ sync_qos_count_t *sync_qos_delta_add,
+ sync_qos_count_t *sync_qos_delta_sub)
+{
+ sync_qos_count_t max_sync_qos_index;
+
+ if (!ip_active(port)) {
+ return FALSE;
+ }
+
+ max_sync_qos_index = ipc_port_get_max_sync_qos_index(port);
+
+ for (int i = 0; i < THREAD_QOS_LAST; i++) {
+ sync_qos_count_t port_sync_qos_count = port_sync_qos(port, i);
+ /* Do not let the sync qos underflow */
+ if (sync_qos_delta_sub[i] > port_sync_qos_count) {
+ KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS, IMP_SYNC_IPC_QOS_UNDERFLOW),
+ i, VM_KERNEL_UNSLIDE_OR_PERM(port),
+ port_sync_qos_count, sync_qos_delta_sub[i]);
+
+ set_port_sync_qos(port, i, 0);
+ } else if (sync_qos_delta_sub[i] != 0) {
+ KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS, IMP_SYNC_IPC_QOS_REMOVED),
+ i, VM_KERNEL_UNSLIDE_OR_PERM(port),
+ port_sync_qos_count, sync_qos_delta_sub[i]);
+
+ set_port_sync_qos(port, i, (port_sync_qos_count - sync_qos_delta_sub[i]));
+ }
+
+ port_sync_qos_count = port_sync_qos(port, i);
+ /* Do not let the sync qos overflow */
+ if (UCHAR_MAX - sync_qos_delta_add[i] < port_sync_qos_count) {
+ KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS, IMP_SYNC_IPC_QOS_OVERFLOW),
+ i, VM_KERNEL_UNSLIDE_OR_PERM(port),
+ port_sync_qos_count, sync_qos_delta_add[i]);
+
+ set_port_sync_qos(port, i, UCHAR_MAX);
+ } else if (sync_qos_delta_add[i] != 0) {
+ KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS, IMP_SYNC_IPC_QOS_APPLIED),
+ i, VM_KERNEL_UNSLIDE_OR_PERM(port),
+ port_sync_qos_count, sync_qos_delta_add[i]);
+
+ set_port_sync_qos(port, i, (port_sync_qos_count + sync_qos_delta_add[i]));
+ }
+ }
+ return (ipc_port_get_max_sync_qos_index(port) != max_sync_qos_index);
+}
+
+/*
+ * Routine: ipc_port_get_max_sync_qos_index
+ * Purpose:
+ * Return the max sync qos of the port.
+ *
+ * Conditions:
+ */
+sync_qos_count_t
+ipc_port_get_max_sync_qos_index(
+ ipc_port_t port)
+{
+ int i;
+ for (i = THREAD_QOS_LAST - 1; i >= 0; i--) {
+ if (port_sync_qos(port, i) != 0) {
+ return i;
+ }
+ }
+ return THREAD_QOS_UNSPECIFIED;
+}
+
+/*
+ * Routine: ipc_port_adjust_sync_qos
+ * Purpose:
+ * Adjust sync qos of the port and it's destination
+ * port if the port is in transit.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_sync_qos(
+ ipc_port_t port,
+ sync_qos_count_t *sync_qos_delta_add,
+ sync_qos_count_t *sync_qos_delta_sub)
+{
+ boolean_t update_knote;
+ boolean_t multiple_lock = FALSE;
+ ipc_port_t dest, base, next;
+
+ ip_lock(port);
+
+ /* Check if the port is in transit */
+ if (!ip_active(port) ||
+ (port->ip_receiver_name != MACH_PORT_NULL) ||
+ (port->ip_destination == IP_NULL)) {
+ /* lock the mqueue since port is not in-transit */
+ imq_lock(&port->ip_messages);
+ update_knote = ipc_port_sync_qos_delta(port, sync_qos_delta_add, sync_qos_delta_sub);
+ if (update_knote) {
+ KNOTE(&port->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+ return;
+ }
+
+ dest = port->ip_destination;
+ assert(dest != IP_NULL);
+
+ if (ip_lock_try(dest)) {
+ if (!ip_active(dest) ||
+ (dest->ip_receiver_name != MACH_PORT_NULL) ||
+ (dest->ip_destination == IP_NULL)) {
+ update_knote = ipc_port_sync_qos_delta(port, sync_qos_delta_add, sync_qos_delta_sub);
+ ip_unlock(port);
+
+ /* lock the mqueue since dest is not in-transit */
+ imq_lock(&dest->ip_messages);
+ update_knote = ipc_port_sync_qos_delta(dest, sync_qos_delta_add, sync_qos_delta_sub);
+ if (update_knote) {
+ KNOTE(&dest->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&dest->ip_messages);
+ ip_unlock(dest);
+ return;
+ }
+
+ /* dest is in transit; need to take the serialize lock */
+ ip_unlock(dest);
+ }
+
+ ip_unlock(port);
+
+ ipc_port_multiple_lock(); /* massive serialization */
+ multiple_lock = TRUE;
+
+ ip_lock(port);
+ next = port;
+
+ /* Apply the sync qos delta to all in-transit ports */
+ for (;;) {
+ boolean_t port_not_in_transit = FALSE;
+
+ if (!ip_active(next) ||
+ (next->ip_receiver_name != MACH_PORT_NULL) ||
+ (next->ip_destination == IP_NULL)) {
+ /* Get the mqueue lock for destination port to update knotes */
+ imq_lock(&next->ip_messages);
+ port_not_in_transit = TRUE;
+ }
+
+ /* Apply the sync qos delta */
+ update_knote = ipc_port_sync_qos_delta(next, sync_qos_delta_add, sync_qos_delta_sub);
+
+ if (port_not_in_transit)
+ break;
+
+ next = next->ip_destination;
+ ip_lock(next);
+ }
+
+ if (multiple_lock) {
+ ipc_port_multiple_unlock();
+ }
+
+ base = next;
+ next = port;
+
+ while (next != base) {
+ ipc_port_t prev = next;
+ next = next->ip_destination;
+
+ ip_unlock(prev);
+ }
+
+ if (update_knote) {
+ KNOTE(&base->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&base->ip_messages);
+ ip_unlock(base);
}
/*
}
absdelta = 0 - delta;
- //assert(port->ip_impcount >= absdelta);
- /* if we have enough to deduct, we're done */
if (port->ip_impcount >= absdelta) {
port->ip_impcount -= absdelta;
return delta;
}
-#if DEVELOPMENT || DEBUG
+#if (DEVELOPMENT || DEBUG)
if (port->ip_receiver_name != MACH_PORT_NULL) {
task_t target_task = port->ip_receiver->is_task;
ipc_importance_task_t target_imp = target_task->task_imp_base;
printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
"dropping %d assertion(s) but port only has %d remaining.\n",
port->ip_receiver_name,
- target_imp->iit_bsd_pid, target_imp->iit_procname,
+ target_pid, target_procname,
absdelta, port->ip_impcount);
} else if (base != IP_NULL) {
target_procname = "unknown";
target_pid = -1;
}
- printf("Over-release of importance assertions for port %p "
+ printf("Over-release of importance assertions for port 0x%lx "
"enqueued on port 0x%x with receiver pid %d (%s), "
"dropping %d assertion(s) but port only has %d remaining.\n",
- port, base->ip_receiver_name,
- target_imp->iit_bsd_pid, target_imp->iit_procname,
+ (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
+ base->ip_receiver_name,
+ target_pid, target_procname,
absdelta, port->ip_impcount);
}
#endif
+
delta = 0 - port->ip_impcount;
port->ip_impcount = 0;
return delta;
* and if so, apply the delta.
* Conditions:
* The port is referenced and locked on entry.
+ * Importance may be locked.
* Nothing else is locked.
* The lock may be dropped on exit.
* Returns TRUE if lock was dropped.
boolean_t
ipc_port_importance_delta_internal(
ipc_port_t port,
+ natural_t options,
mach_port_delta_t *deltap,
ipc_importance_task_t *imp_task)
{
if (*deltap == 0)
return FALSE;
+ assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
+
base = port;
/* if port is in transit, have to search for end of chain */
ipc_port_multiple_unlock();
}
- /* unlock down to the base, adding a boost at each level */
+ /*
+ * If the port lock is dropped b/c the port is in transit, there is a
+ * race window where another thread can drain messages and/or fire a
+ * send possible notification before we get here.
+ *
+ * We solve this race by checking to see if our caller armed the send
+ * possible notification, whether or not it's been fired yet, and
+ * whether or not we've already set the port's ip_spimportant bit. If
+ * we don't need a send-possible boost, then we'll just apply a
+ * harmless 0-boost to the port.
+ */
+ if (options & IPID_OPTION_SENDPOSSIBLE) {
+ assert(*deltap == 1);
+ if (port->ip_sprequests && port->ip_spimportant == 0)
+ port->ip_spimportant = 1;
+ else
+ *deltap = 0;
+ }
+
+ /* unlock down to the base, adjusting boost(s) at each level */
for (;;) {
- /*
- * JMM TODO - because of the port unlock to grab the multiple lock
- * above, a subsequent drop of importance could race and beat
- * the "previous" increase - causing the port impcount to go
- * negative briefly. The defensive deduction performed by
- * ipc_port_impcount_delta() defeats that, and therefore can
- * cause an importance leak once the increase finally arrives.
- *
- * Need to rework the importance delta logic to be more like
- * ipc_importance_inherit_from() where it locks all it needs in
- * one pass to avoid any lock drops - to keep that race from
- * ever occuring.
- */
*deltap = ipc_port_impcount_delta(port, *deltap, base);
if (port == base) {
boolean_t
ipc_port_importance_delta(
ipc_port_t port,
+ natural_t options,
mach_port_delta_t delta)
{
ipc_importance_task_t imp_task = IIT_NULL;
boolean_t dropped;
- dropped = ipc_port_importance_delta_internal(port, &delta, &imp_task);
+ dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
- if (IIT_NULL == imp_task)
+ if (IIT_NULL == imp_task || delta == 0)
return dropped;
- if (!dropped) {
- dropped = TRUE;
+ if (!dropped)
ip_unlock(port);
- }
assert(ipc_importance_task_is_any_receiver_type(imp_task));
ipc_importance_task_drop_internal_assertion(imp_task, -delta);
ipc_importance_task_release(imp_task);
- return dropped;
+ return TRUE;
}
#endif /* IMPORTANCE_INHERITANCE */
ip_lock(port);
assert(port->ip_srights > 0);
+ if (port->ip_srights == 0) {
+ panic("Over-release of port %p send right!", port);
+ }
+
port->ip_srights--;
if (!ip_active(port)) {
if (!IP_VALID(port))
return;
+ ipc_port_unlink_special_reply_port(port, IPC_PORT_UNLINK_SR_NONE);
+
ip_lock(port);
assert(port->ip_sorights > 0);
+ if (port->ip_sorights == 0) {
+ panic("Over-release of port %p send-once right!", port);
+ }
port->ip_sorights--;
#endif /* MACH_ASSERT */
}
+/*
+ * Routine: kdp_mqueue_send_find_owner
+ * Purpose:
+ * Discover the owner of the ipc_mqueue that contains the input
+ * waitq object. The thread blocked on the waitq should be
+ * waiting for an IPC_MQUEUE_FULL event.
+ * Conditions:
+ * The 'waitinfo->wait_type' value should already be set to
+ * kThreadWaitPortSend.
+ * Note:
+ * If we find out that the containing port is actually in
+ * transit, we reset the wait_type field to reflect this.
+ */
+void
+kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
+{
+ assert(waitinfo->wait_type == kThreadWaitPortSend);
+ assert(event == IPC_MQUEUE_FULL);
+
+ ipc_mqueue_t mqueue = imq_from_waitq(waitq);
+ ipc_port_t port = ip_from_mq(mqueue); /* we are blocking on send */
+ assert(kdp_is_in_zone(port, "ipc ports"));
+
+ waitinfo->owner = 0;
+ waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
+ if (ip_lock_held_kdp(port)) {
+ /*
+ * someone has the port locked: it may be in an
+ * inconsistent state: bail
+ */
+ waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
+ return;
+ }
+
+ if (ip_active(port)) {
+ if (port->ip_tempowner) {
+ if (port->ip_imp_task != IIT_NULL && port->ip_imp_task->iit_task != NULL) {
+ /* port is held by a tempowner */
+ waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
+ } else {
+ waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
+ }
+ } else if (port->ip_receiver_name) {
+ /* port in a space */
+ if (port->ip_receiver == ipc_space_kernel) {
+ /*
+ * The kernel pid is 0, make this
+ * distinguishable from no-owner and
+ * inconsistent port state.
+ */
+ waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
+ } else {
+ waitinfo->owner = pid_from_task(port->ip_receiver->is_task);
+ }
+ } else if (port->ip_destination != IP_NULL) {
+ /* port in transit */
+ waitinfo->wait_type = kThreadWaitPortSendInTransit;
+ waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM(port->ip_destination);
+ }
+ }
+}
+
+/*
+ * Routine: kdp_mqueue_recv_find_owner
+ * Purpose:
+ * Discover the "owner" of the ipc_mqueue that contains the input
+ * waitq object. The thread blocked on the waitq is trying to
+ * receive on the mqueue.
+ * Conditions:
+ * The 'waitinfo->wait_type' value should already be set to
+ * kThreadWaitPortReceive.
+ * Note:
+ * If we find that we are actualy waiting on a port set, we reset
+ * the wait_type field to reflect this.
+ */
+void
+kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
+{
+ assert(waitinfo->wait_type == kThreadWaitPortReceive);
+ assert(event == IPC_MQUEUE_RECEIVE);
+
+ ipc_mqueue_t mqueue = imq_from_waitq(waitq);
+ waitinfo->owner = 0;
+ if (imq_is_set(mqueue)) { /* we are waiting on a port set */
+ ipc_pset_t set = ips_from_mq(mqueue);
+ assert(kdp_is_in_zone(set, "ipc port sets"));
+
+ /* Reset wait type to specify waiting on port set receive */
+ waitinfo->wait_type = kThreadWaitPortSetReceive;
+ waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set);
+ if (ips_lock_held_kdp(set)) {
+ waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
+ }
+ /* There is no specific owner "at the other end" of a port set, so leave unset. */
+ } else {
+ ipc_port_t port = ip_from_mq(mqueue);
+ assert(kdp_is_in_zone(port, "ipc ports"));
+
+ waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
+ if (ip_lock_held_kdp(port)) {
+ waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
+ return;
+ }
+
+ if (ip_active(port)) {
+ if (port->ip_receiver_name != MACH_PORT_NULL) {
+ waitinfo->owner = port->ip_receiver_name;
+ } else {
+ waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
+ }
+ }
+ }
+}
+
#if MACH_ASSERT
#include <kern/machine.h>