+ return kr;
+}
+
+os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
+
+/*
+ * task_add_turnstile_watchports:
+ * Setup watchports to boost the main thread of the task.
+ *
+ * Arguments:
+ * task: task being spawned
+ * thread: main thread of task
+ * portwatch_ports: array of watchports
+ * portwatch_count: number of watchports
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+void
+task_add_turnstile_watchports(
+ task_t task,
+ thread_t thread,
+ ipc_port_t *portwatch_ports,
+ uint32_t portwatch_count)
+{
+ struct task_watchports *watchports = NULL;
+ struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
+ os_ref_count_t refs;
+
+ /* Check if the task has terminated */
+ if (!task->active) {
+ return;
+ }
+
+ assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
+
+ watchports = task_watchports_alloc_init(task, thread, portwatch_count);
+
+ /* Lock the ipc space */
+ is_write_lock(task->itk_space);
+
+ /* Setup watchports to boost the main thread */
+ refs = task_add_turnstile_watchports_locked(task,
+ watchports, previous_elem_array, portwatch_ports,
+ portwatch_count);
+
+ /* Drop the space lock */
+ is_write_unlock(task->itk_space);
+
+ if (refs == 0) {
+ task_watchports_deallocate(watchports);
+ }
+
+ /* Drop the ref on previous_elem_array */
+ for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
+ task_watchport_elem_deallocate(previous_elem_array[i]);
+ }
+}
+
+/*
+ * task_remove_turnstile_watchports:
+ * Clear all turnstile boost on the task from watchports.
+ *
+ * Arguments:
+ * task: task being terminated
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+void
+task_remove_turnstile_watchports(
+ task_t task)
+{
+ os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
+ struct task_watchports *watchports = NULL;
+ ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
+ uint32_t portwatch_count;
+
+ /* Lock the ipc space */
+ is_write_lock(task->itk_space);
+
+ /* Check if watchport boost exist */
+ if (task->watchports == NULL) {
+ is_write_unlock(task->itk_space);
+ return;
+ }
+ watchports = task->watchports;
+ portwatch_count = watchports->tw_elem_array_count;
+
+ refs = task_remove_turnstile_watchports_locked(task, watchports,
+ port_freelist);
+
+ is_write_unlock(task->itk_space);
+
+ /* Drop all the port references */
+ for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
+ ip_release(port_freelist[i]);
+ }
+
+ /* Clear the task and thread references for task_watchport */
+ if (refs == 0) {
+ task_watchports_deallocate(watchports);
+ }
+}
+
+/*
+ * task_transfer_turnstile_watchports:
+ * Transfer all watchport turnstile boost from old task to new task.
+ *
+ * Arguments:
+ * old_task: task calling exec
+ * new_task: new exec'ed task
+ * thread: main thread of new task
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+void
+task_transfer_turnstile_watchports(
+ task_t old_task,
+ task_t new_task,
+ thread_t new_thread)
+{
+ struct task_watchports *old_watchports = NULL;
+ struct task_watchports *new_watchports = NULL;
+ os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
+ os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
+ uint32_t portwatch_count;
+
+ if (old_task->watchports == NULL || !new_task->active) {
+ return;
+ }
+
+ /* Get the watch port count from the old task */
+ is_write_lock(old_task->itk_space);
+ if (old_task->watchports == NULL) {
+ is_write_unlock(old_task->itk_space);
+ return;
+ }
+
+ portwatch_count = old_task->watchports->tw_elem_array_count;
+ is_write_unlock(old_task->itk_space);
+
+ new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
+
+ /* Lock the ipc space for old task */
+ is_write_lock(old_task->itk_space);
+
+ /* Lock the ipc space for new task */
+ is_write_lock(new_task->itk_space);
+
+ /* Check if watchport boost exist */
+ if (old_task->watchports == NULL || !new_task->active) {
+ is_write_unlock(new_task->itk_space);
+ is_write_unlock(old_task->itk_space);
+ (void)task_watchports_release(new_watchports);
+ task_watchports_deallocate(new_watchports);
+ return;
+ }
+
+ old_watchports = old_task->watchports;
+ assert(portwatch_count == old_task->watchports->tw_elem_array_count);
+
+ /* Setup new task watchports */
+ new_task->watchports = new_watchports;
+
+ for (uint32_t i = 0; i < portwatch_count; i++) {
+ ipc_port_t port = old_watchports->tw_elem[i].twe_port;
+
+ if (port == NULL) {
+ task_watchport_elem_clear(&new_watchports->tw_elem[i]);
+ continue;
+ }
+
+ /* Lock the port and check if it has the entry */
+ ip_lock(port);
+ imq_lock(&port->ip_messages);
+
+ task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
+
+ if (ipc_port_replace_watchport_elem_conditional_locked(port,
+ &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
+ task_watchport_elem_clear(&old_watchports->tw_elem[i]);
+
+ task_watchports_retain(new_watchports);
+ old_refs = task_watchports_release(old_watchports);
+
+ /* Check if all ports are cleaned */
+ if (old_refs == 0) {
+ old_task->watchports = NULL;
+ }
+ } else {
+ task_watchport_elem_clear(&new_watchports->tw_elem[i]);
+ }
+ /* mqueue and port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
+ }
+
+ /* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
+ new_refs = task_watchports_release(new_watchports);
+ if (new_refs == 0) {
+ new_task->watchports = NULL;
+ }
+
+ is_write_unlock(new_task->itk_space);
+ is_write_unlock(old_task->itk_space);
+
+ /* Clear the task and thread references for old_watchport */
+ if (old_refs == 0) {
+ task_watchports_deallocate(old_watchports);
+ }
+
+ /* Clear the task and thread references for new_watchport */
+ if (new_refs == 0) {
+ task_watchports_deallocate(new_watchports);
+ }