+ boolean_t link_sync_qos = special_reply_port->ip_link_sync_qos;
+ ip_unlock(special_reply_port);
+ ipc_port_multiple_unlock();
+ /* return KERN_SUCCESS when link_sync_qos is not set */
+ if (!link_sync_qos) {
+ return KERN_SUCCESS;
+ }
+ return KERN_FAILURE;
+ }
+
+ ip_lock(dest_port);
+
+both_ports_locked:
+ next = dest_port;
+
+ /* Apply the qos to special reply port, capture the old qos */
+ if (special_reply_port->ip_sync_qos_override_port != IP_NULL) {
+ /* Check if qos needs to be updated */
+ if ((sync_qos_count_t)qos <= port_special_qos(special_reply_port)) {
+ imq_lock(&dest_port->ip_messages);
+ goto done_update;
+ }
+ sync_qos_delta_sub[port_special_qos(special_reply_port)]++;
+ }
+
+ set_port_special_qos(special_reply_port, (sync_qos_count_t)qos);
+ sync_qos_delta_add[qos]++;
+
+ /* Link the special reply port to dest port */
+ if (special_reply_port->ip_sync_qos_override_port == IP_NULL) {
+ /* take a reference on dest_port */
+ ip_reference(dest_port);
+ special_reply_port->ip_sync_qos_override_port = dest_port;
+ }
+
+ /* Apply the sync qos delta to all in-transit ports */
+ for (;;) {
+ boolean_t port_not_in_transit = FALSE;
+ if (!ip_active(next) ||
+ (next->ip_receiver_name != MACH_PORT_NULL) ||
+ (next->ip_destination == IP_NULL)) {
+ /* Get the mqueue lock for destination port to update knotes */
+ imq_lock(&next->ip_messages);
+ port_not_in_transit = TRUE;
+ }
+ /* Apply the sync qos delta */
+ update_knote = ipc_port_sync_qos_delta(next, sync_qos_delta_add, sync_qos_delta_sub);
+
+ if (port_not_in_transit)
+ break;
+
+ next = next->ip_destination;
+ ip_lock(next);
+ }
+done_update:
+
+ if (multiple_lock) {
+ ipc_port_multiple_unlock();
+ }
+
+ ip_unlock(special_reply_port);
+ base = next;
+ next = dest_port;
+
+ while (next != base) {
+ ipc_port_t prev = next;
+ next = next->ip_destination;
+
+ ip_unlock(prev);
+ }
+
+ if (update_knote) {
+ KNOTE(&base->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&base->ip_messages);
+ ip_unlock(base);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_unlink_special_reply_port_locked
+ * Purpose:
+ * If the special port is linked to a port, adjust it's sync qos override and unlink the port.
+ * Condition:
+ * Special reply port locked on entry.
+ * Special reply port unlocked on return.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_unlink_special_reply_port_locked(
+ ipc_port_t special_reply_port,
+ struct knote *kn,
+ uint8_t flags)
+{
+ ipc_port_t dest_port;
+ sync_qos_count_t sync_qos;
+ sync_qos_count_t sync_qos_delta_add[THREAD_QOS_LAST] = {0};
+ sync_qos_count_t sync_qos_delta_sub[THREAD_QOS_LAST] = {0};
+
+ /* Return if called from copy out in pseudo receive */
+ if (kn == ITH_KNOTE_PSEUDO) {
+ ip_unlock(special_reply_port);
+ return;
+ }
+
+ /* check if special port has a port linked to it */
+ if (special_reply_port->ip_specialreply == 0 ||
+ special_reply_port->ip_sync_qos_override_port == IP_NULL) {
+ set_port_special_qos(special_reply_port, 0);
+ if (flags & IPC_PORT_UNLINK_SR_CLEAR_SPECIAL_REPLY) {
+ special_reply_port->ip_specialreply = 0;
+ }
+ if (flags & IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE) {
+ special_reply_port->ip_link_sync_qos = 1;
+ }
+ ip_unlock(special_reply_port);
+ return;
+ }
+
+ /*
+ * port->ip_sync_qos_override_port is not null and it is safe
+ * to access it since ip_specialreply is set.
+ */
+ dest_port = special_reply_port->ip_sync_qos_override_port;
+ sync_qos_delta_sub[port_special_qos(special_reply_port)]++;
+ sync_qos = port_special_qos(special_reply_port);
+
+ /* Clear qos delta for special reply port */
+ set_port_special_qos(special_reply_port, 0);
+ special_reply_port->ip_sync_qos_override_port = IP_NULL;
+ if (flags & IPC_PORT_UNLINK_SR_CLEAR_SPECIAL_REPLY) {
+ special_reply_port->ip_specialreply = 0;
+ }
+
+ if (flags & IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE) {
+ special_reply_port->ip_link_sync_qos = 1;
+ } else {
+ special_reply_port->ip_link_sync_qos = 0;
+ }
+
+ ip_unlock(special_reply_port);
+
+ /* Add the sync qos on knote */
+ if (ITH_KNOTE_VALID(kn)) {
+ knote_adjust_sync_qos(kn, sync_qos, TRUE);
+ }
+
+ /* Adjust the sync qos of destination */
+ ipc_port_adjust_sync_qos(dest_port, sync_qos_delta_add, sync_qos_delta_sub);
+ ip_release(dest_port);
+}
+
+/*
+ * Routine: ipc_port_unlink_special_reply_port
+ * Purpose:
+ * If the special port is linked to a port, adjust it's sync qos override and unlink the port.
+ * Condition:
+ * Nothing locked.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_unlink_special_reply_port(
+ ipc_port_t special_reply_port,
+ uint8_t flags)
+{
+ ip_lock(special_reply_port);
+ ipc_port_unlink_special_reply_port_locked(special_reply_port, NULL, flags);
+ /* special_reply_port unlocked */
+}
+
+/*
+ * Routine: ipc_port_sync_qos_delta
+ * Purpose:
+ * Adjust the sync qos count associated with a port.
+ *
+ * For now, be defensive during deductions to make sure the
+ * sync_qos count for the port doesn't underflow zero.
+ * Returns:
+ * TRUE: if max sync qos of the port changes.
+ * FALSE: otherwise.
+ * Conditions:
+ * The port is referenced and locked.
+ * The mqueue is locked if port is not in-transit.
+ */
+boolean_t
+ipc_port_sync_qos_delta(
+ ipc_port_t port,
+ sync_qos_count_t *sync_qos_delta_add,
+ sync_qos_count_t *sync_qos_delta_sub)
+{
+ sync_qos_count_t max_sync_qos_index;
+
+ if (!ip_active(port)) {
+ return FALSE;
+ }
+
+ max_sync_qos_index = ipc_port_get_max_sync_qos_index(port);
+
+ for (int i = 0; i < THREAD_QOS_LAST; i++) {
+ sync_qos_count_t port_sync_qos_count = port_sync_qos(port, i);
+ /* Do not let the sync qos underflow */
+ if (sync_qos_delta_sub[i] > port_sync_qos_count) {
+ KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS, IMP_SYNC_IPC_QOS_UNDERFLOW),
+ i, VM_KERNEL_UNSLIDE_OR_PERM(port),
+ port_sync_qos_count, sync_qos_delta_sub[i]);
+
+ set_port_sync_qos(port, i, 0);
+ } else if (sync_qos_delta_sub[i] != 0) {
+ KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS, IMP_SYNC_IPC_QOS_REMOVED),
+ i, VM_KERNEL_UNSLIDE_OR_PERM(port),
+ port_sync_qos_count, sync_qos_delta_sub[i]);
+
+ set_port_sync_qos(port, i, (port_sync_qos_count - sync_qos_delta_sub[i]));
+ }
+
+ port_sync_qos_count = port_sync_qos(port, i);
+ /* Do not let the sync qos overflow */
+ if (UCHAR_MAX - sync_qos_delta_add[i] < port_sync_qos_count) {
+ KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS, IMP_SYNC_IPC_QOS_OVERFLOW),
+ i, VM_KERNEL_UNSLIDE_OR_PERM(port),
+ port_sync_qos_count, sync_qos_delta_add[i]);
+
+ set_port_sync_qos(port, i, UCHAR_MAX);
+ } else if (sync_qos_delta_add[i] != 0) {
+ KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS, IMP_SYNC_IPC_QOS_APPLIED),
+ i, VM_KERNEL_UNSLIDE_OR_PERM(port),
+ port_sync_qos_count, sync_qos_delta_add[i]);
+
+ set_port_sync_qos(port, i, (port_sync_qos_count + sync_qos_delta_add[i]));
+ }
+ }
+ return (ipc_port_get_max_sync_qos_index(port) != max_sync_qos_index);
+}
+
+/*
+ * Routine: ipc_port_get_max_sync_qos_index
+ * Purpose:
+ * Return the max sync qos of the port.
+ *
+ * Conditions:
+ */
+sync_qos_count_t
+ipc_port_get_max_sync_qos_index(
+ ipc_port_t port)
+{
+ int i;
+ for (i = THREAD_QOS_LAST - 1; i >= 0; i--) {
+ if (port_sync_qos(port, i) != 0) {
+ return i;
+ }
+ }
+ return THREAD_QOS_UNSPECIFIED;
+}
+
+/*
+ * Routine: ipc_port_adjust_sync_qos
+ * Purpose:
+ * Adjust sync qos of the port and it's destination
+ * port if the port is in transit.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_sync_qos(
+ ipc_port_t port,
+ sync_qos_count_t *sync_qos_delta_add,
+ sync_qos_count_t *sync_qos_delta_sub)
+{
+ boolean_t update_knote;
+ boolean_t multiple_lock = FALSE;
+ ipc_port_t dest, base, next;
+
+ ip_lock(port);
+
+ /* Check if the port is in transit */
+ if (!ip_active(port) ||
+ (port->ip_receiver_name != MACH_PORT_NULL) ||
+ (port->ip_destination == IP_NULL)) {
+ /* lock the mqueue since port is not in-transit */
+ imq_lock(&port->ip_messages);
+ update_knote = ipc_port_sync_qos_delta(port, sync_qos_delta_add, sync_qos_delta_sub);
+ if (update_knote) {
+ KNOTE(&port->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+ return;
+ }
+
+ dest = port->ip_destination;
+ assert(dest != IP_NULL);
+
+ if (ip_lock_try(dest)) {
+ if (!ip_active(dest) ||
+ (dest->ip_receiver_name != MACH_PORT_NULL) ||
+ (dest->ip_destination == IP_NULL)) {
+ update_knote = ipc_port_sync_qos_delta(port, sync_qos_delta_add, sync_qos_delta_sub);
+ ip_unlock(port);
+
+ /* lock the mqueue since dest is not in-transit */
+ imq_lock(&dest->ip_messages);
+ update_knote = ipc_port_sync_qos_delta(dest, sync_qos_delta_add, sync_qos_delta_sub);
+ if (update_knote) {
+ KNOTE(&dest->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&dest->ip_messages);
+ ip_unlock(dest);
+ return;
+ }
+
+ /* dest is in transit; need to take the serialize lock */
+ ip_unlock(dest);
+ }
+
+ ip_unlock(port);
+
+ ipc_port_multiple_lock(); /* massive serialization */
+ multiple_lock = TRUE;
+
+ ip_lock(port);
+ next = port;
+
+ /* Apply the sync qos delta to all in-transit ports */
+ for (;;) {
+ boolean_t port_not_in_transit = FALSE;
+
+ if (!ip_active(next) ||
+ (next->ip_receiver_name != MACH_PORT_NULL) ||
+ (next->ip_destination == IP_NULL)) {
+ /* Get the mqueue lock for destination port to update knotes */
+ imq_lock(&next->ip_messages);
+ port_not_in_transit = TRUE;
+ }
+
+ /* Apply the sync qos delta */
+ update_knote = ipc_port_sync_qos_delta(next, sync_qos_delta_add, sync_qos_delta_sub);
+
+ if (port_not_in_transit)
+ break;
+
+ next = next->ip_destination;
+ ip_lock(next);
+ }
+
+ if (multiple_lock) {
+ ipc_port_multiple_unlock();
+ }
+
+ base = next;
+ next = port;
+
+ while (next != base) {
+ ipc_port_t prev = next;
+ next = next->ip_destination;
+
+ ip_unlock(prev);
+ }
+
+ if (update_knote) {
+ KNOTE(&base->ip_messages.imq_klist, 0);
+ }
+ imq_unlock(&base->ip_messages);
+ ip_unlock(base);
+}
+
+/*
+ * Routine: ipc_port_impcount_delta
+ * Purpose:
+ * Adjust only the importance count associated with a port.
+ * If there are any adjustments to be made to receiver task,
+ * those are handled elsewhere.
+ *
+ * For now, be defensive during deductions to make sure the
+ * impcount for the port doesn't underflow zero. This will
+ * go away when the port boost addition is made atomic (see
+ * note in ipc_port_importance_delta()).
+ * Conditions:
+ * The port is referenced and locked.
+ * Nothing else is locked.
+ */
+mach_port_delta_t
+ipc_port_impcount_delta(
+ ipc_port_t port,
+ mach_port_delta_t delta,
+ ipc_port_t __unused base)
+{
+ mach_port_delta_t absdelta;
+
+ if (!ip_active(port)) {
+ return 0;
+ }
+
+ /* adding/doing nothing is easy */
+ if (delta >= 0) {
+ port->ip_impcount += delta;
+ return delta;
+ }
+
+ absdelta = 0 - delta;
+ if (port->ip_impcount >= absdelta) {
+ port->ip_impcount -= absdelta;
+ return delta;
+ }
+
+#if (DEVELOPMENT || DEBUG)
+ if (port->ip_receiver_name != MACH_PORT_NULL) {
+ task_t target_task = port->ip_receiver->is_task;
+ ipc_importance_task_t target_imp = target_task->task_imp_base;
+ const char *target_procname;
+ int target_pid;
+
+ if (target_imp != IIT_NULL) {
+ target_procname = target_imp->iit_procname;
+ target_pid = target_imp->iit_bsd_pid;
+ } else {
+ target_procname = "unknown";
+ target_pid = -1;
+ }
+ printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
+ "dropping %d assertion(s) but port only has %d remaining.\n",
+ port->ip_receiver_name,
+ target_pid, target_procname,
+ absdelta, port->ip_impcount);
+
+ } else if (base != IP_NULL) {
+ task_t target_task = base->ip_receiver->is_task;
+ ipc_importance_task_t target_imp = target_task->task_imp_base;
+ const char *target_procname;
+ int target_pid;
+
+ if (target_imp != IIT_NULL) {
+ target_procname = target_imp->iit_procname;
+ target_pid = target_imp->iit_bsd_pid;
+ } else {
+ target_procname = "unknown";
+ target_pid = -1;
+ }
+ printf("Over-release of importance assertions for port 0x%lx "
+ "enqueued on port 0x%x with receiver pid %d (%s), "
+ "dropping %d assertion(s) but port only has %d remaining.\n",
+ (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
+ base->ip_receiver_name,
+ target_pid, target_procname,
+ absdelta, port->ip_impcount);
+ }
+#endif
+
+ delta = 0 - port->ip_impcount;
+ port->ip_impcount = 0;
+ return delta;
+}
+
+/*
+ * Routine: ipc_port_importance_delta_internal
+ * Purpose:
+ * Adjust the importance count through the given port.
+ * If the port is in transit, apply the delta throughout
+ * the chain. Determine if the there is a task at the
+ * base of the chain that wants/needs to be adjusted,
+ * and if so, apply the delta.
+ * Conditions:
+ * The port is referenced and locked on entry.
+ * Importance may be locked.
+ * Nothing else is locked.
+ * The lock may be dropped on exit.
+ * Returns TRUE if lock was dropped.
+ */
+#if IMPORTANCE_INHERITANCE
+
+boolean_t
+ipc_port_importance_delta_internal(
+ ipc_port_t port,
+ natural_t options,
+ mach_port_delta_t *deltap,
+ ipc_importance_task_t *imp_task)
+{
+ ipc_port_t next, base;
+ boolean_t dropped = FALSE;
+
+ *imp_task = IIT_NULL;
+
+ if (*deltap == 0)
+ return FALSE;
+
+ assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
+
+ base = port;
+
+ /* if port is in transit, have to search for end of chain */
+ if (ip_active(port) &&
+ port->ip_destination != IP_NULL &&
+ port->ip_receiver_name == MACH_PORT_NULL) {
+
+ dropped = TRUE;
+
+ ip_unlock(port);
+ ipc_port_multiple_lock(); /* massive serialization */
+ ip_lock(base);
+
+ while(ip_active(base) &&
+ base->ip_destination != IP_NULL &&
+ base->ip_receiver_name == MACH_PORT_NULL) {
+
+ base = base->ip_destination;
+ ip_lock(base);
+ }
+ ipc_port_multiple_unlock();
+ }
+
+ /*
+ * If the port lock is dropped b/c the port is in transit, there is a
+ * race window where another thread can drain messages and/or fire a
+ * send possible notification before we get here.
+ *
+ * We solve this race by checking to see if our caller armed the send
+ * possible notification, whether or not it's been fired yet, and
+ * whether or not we've already set the port's ip_spimportant bit. If
+ * we don't need a send-possible boost, then we'll just apply a
+ * harmless 0-boost to the port.
+ */
+ if (options & IPID_OPTION_SENDPOSSIBLE) {
+ assert(*deltap == 1);
+ if (port->ip_sprequests && port->ip_spimportant == 0)
+ port->ip_spimportant = 1;
+ else
+ *deltap = 0;
+ }
+
+ /* unlock down to the base, adjusting boost(s) at each level */
+ for (;;) {
+ *deltap = ipc_port_impcount_delta(port, *deltap, base);
+
+ if (port == base) {
+ break;
+ }
+
+ /* port is in transit */
+ assert(port->ip_tempowner == 0);
+ next = port->ip_destination;
+ ip_unlock(port);
+ port = next;
+ }
+
+ /* find the task (if any) to boost according to the base */
+ if (ip_active(base)) {
+ if (base->ip_tempowner != 0) {
+ if (IIT_NULL != base->ip_imp_task)
+ *imp_task = base->ip_imp_task;
+ /* otherwise don't boost */
+
+ } else if (base->ip_receiver_name != MACH_PORT_NULL) {
+ ipc_space_t space = base->ip_receiver;
+
+ /* only spaces with boost-accepting tasks */
+ if (space->is_task != TASK_NULL &&
+ ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
+ *imp_task = space->is_task->task_imp_base;
+ }
+ }
+ }
+
+ /*
+ * Only the base is locked. If we have to hold or drop task
+ * importance assertions, we'll have to drop that lock as well.
+ */
+ if (*imp_task != IIT_NULL) {
+ /* take a reference before unlocking base */
+ ipc_importance_task_reference(*imp_task);
+ }
+
+ if (dropped == TRUE) {
+ ip_unlock(base);
+ }
+
+ return dropped;
+}
+#endif /* IMPORTANCE_INHERITANCE */
+
+/*
+ * Routine: ipc_port_importance_delta
+ * Purpose:
+ * Adjust the importance count through the given port.
+ * If the port is in transit, apply the delta throughout
+ * the chain.
+ *
+ * If there is a task at the base of the chain that wants/needs
+ * to be adjusted, apply the delta.
+ * Conditions:
+ * The port is referenced and locked on entry.
+ * Nothing else is locked.
+ * The lock may be dropped on exit.
+ * Returns TRUE if lock was dropped.
+ */
+#if IMPORTANCE_INHERITANCE
+
+boolean_t
+ipc_port_importance_delta(
+ ipc_port_t port,
+ natural_t options,
+ mach_port_delta_t delta)
+{
+ ipc_importance_task_t imp_task = IIT_NULL;
+ boolean_t dropped;
+
+ dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
+
+ if (IIT_NULL == imp_task || delta == 0)
+ return dropped;
+
+ if (!dropped)
+ ip_unlock(port);
+
+ assert(ipc_importance_task_is_any_receiver_type(imp_task));
+
+ if (delta > 0)
+ ipc_importance_task_hold_internal_assertion(imp_task, delta);
+ else
+ ipc_importance_task_drop_internal_assertion(imp_task, -delta);
+
+ ipc_importance_task_release(imp_task);
+ return TRUE;
+}
+#endif /* IMPORTANCE_INHERITANCE */
+
+/*
+ * Routine: ipc_port_lookup_notify
+ * Purpose:
+ * Make a send-once notify port from a receive right.
+ * Returns IP_NULL if name doesn't denote a receive right.
+ * Conditions:
+ * The space must be locked (read or write) and active.
+ * Being the active space, we can rely on thread server_id
+ * context to give us the proper server level sub-order
+ * within the space.
+ */
+
+ipc_port_t
+ipc_port_lookup_notify(
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ ipc_port_t port;
+ ipc_entry_t entry;
+
+ assert(is_active(space));
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL)
+ return IP_NULL;
+ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ return IP_NULL;
+
+ __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object);
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ ip_reference(port);
+ port->ip_sorights++;
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_make_send_locked
+ * Purpose:
+ * Make a naked send right from a receive right.
+ *
+ * Conditions:
+ * port locked and active.
+ */
+ipc_port_t
+ipc_port_make_send_locked(
+ ipc_port_t port)
+{
+ assert(ip_active(port));
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ return port;
+}
+
+/*