+ return kr;
+}
+
+os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
+
+/*
+ * task_add_turnstile_watchports:
+ * Setup watchports to boost the main thread of the task.
+ *
+ * Arguments:
+ * task: task being spawned
+ * thread: main thread of task
+ * portwatch_ports: array of watchports
+ * portwatch_count: number of watchports
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+void
+task_add_turnstile_watchports(
+ task_t task,
+ thread_t thread,
+ ipc_port_t *portwatch_ports,
+ uint32_t portwatch_count)
+{
+ struct task_watchports *watchports = NULL;
+ struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
+ os_ref_count_t refs;
+
+ /* Check if the task has terminated */
+ if (!task->active) {
+ return;
+ }
+
+ assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
+
+ watchports = task_watchports_alloc_init(task, thread, portwatch_count);
+
+ /* Lock the ipc space */
+ is_write_lock(task->itk_space);
+
+ /* Setup watchports to boost the main thread */
+ refs = task_add_turnstile_watchports_locked(task,
+ watchports, previous_elem_array, portwatch_ports,
+ portwatch_count);
+
+ /* Drop the space lock */
+ is_write_unlock(task->itk_space);
+
+ if (refs == 0) {
+ task_watchports_deallocate(watchports);
+ }
+
+ /* Drop the ref on previous_elem_array */
+ for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
+ task_watchport_elem_deallocate(previous_elem_array[i]);
+ }
+}
+
+/*
+ * task_remove_turnstile_watchports:
+ * Clear all turnstile boost on the task from watchports.
+ *
+ * Arguments:
+ * task: task being terminated
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+void
+task_remove_turnstile_watchports(
+ task_t task)
+{
+ os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
+ struct task_watchports *watchports = NULL;
+ ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
+ uint32_t portwatch_count;
+
+ /* Lock the ipc space */
+ is_write_lock(task->itk_space);
+
+ /* Check if watchport boost exist */
+ if (task->watchports == NULL) {
+ is_write_unlock(task->itk_space);
+ return;
+ }
+ watchports = task->watchports;
+ portwatch_count = watchports->tw_elem_array_count;
+
+ refs = task_remove_turnstile_watchports_locked(task, watchports,
+ port_freelist);
+
+ is_write_unlock(task->itk_space);
+
+ /* Drop all the port references */
+ for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
+ ip_release(port_freelist[i]);
+ }
+
+ /* Clear the task and thread references for task_watchport */
+ if (refs == 0) {
+ task_watchports_deallocate(watchports);
+ }
+}
+
+/*
+ * task_transfer_turnstile_watchports:
+ * Transfer all watchport turnstile boost from old task to new task.
+ *
+ * Arguments:
+ * old_task: task calling exec
+ * new_task: new exec'ed task
+ * thread: main thread of new task
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+void
+task_transfer_turnstile_watchports(
+ task_t old_task,
+ task_t new_task,
+ thread_t new_thread)
+{
+ struct task_watchports *old_watchports = NULL;
+ struct task_watchports *new_watchports = NULL;
+ os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
+ os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
+ uint32_t portwatch_count;
+
+ if (old_task->watchports == NULL || !new_task->active) {
+ return;
+ }
+
+ /* Get the watch port count from the old task */
+ is_write_lock(old_task->itk_space);
+ if (old_task->watchports == NULL) {
+ is_write_unlock(old_task->itk_space);
+ return;
+ }
+
+ portwatch_count = old_task->watchports->tw_elem_array_count;
+ is_write_unlock(old_task->itk_space);
+
+ new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
+
+ /* Lock the ipc space for old task */
+ is_write_lock(old_task->itk_space);
+
+ /* Lock the ipc space for new task */
+ is_write_lock(new_task->itk_space);
+
+ /* Check if watchport boost exist */
+ if (old_task->watchports == NULL || !new_task->active) {
+ is_write_unlock(new_task->itk_space);
+ is_write_unlock(old_task->itk_space);
+ (void)task_watchports_release(new_watchports);
+ task_watchports_deallocate(new_watchports);
+ return;
+ }
+
+ old_watchports = old_task->watchports;
+ assert(portwatch_count == old_task->watchports->tw_elem_array_count);
+
+ /* Setup new task watchports */
+ new_task->watchports = new_watchports;
+
+ for (uint32_t i = 0; i < portwatch_count; i++) {
+ ipc_port_t port = old_watchports->tw_elem[i].twe_port;
+
+ if (port == NULL) {
+ task_watchport_elem_clear(&new_watchports->tw_elem[i]);
+ continue;
+ }
+
+ /* Lock the port and check if it has the entry */
+ ip_lock(port);
+ imq_lock(&port->ip_messages);
+
+ task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
+
+ if (ipc_port_replace_watchport_elem_conditional_locked(port,
+ &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
+ task_watchport_elem_clear(&old_watchports->tw_elem[i]);
+
+ task_watchports_retain(new_watchports);
+ old_refs = task_watchports_release(old_watchports);
+
+ /* Check if all ports are cleaned */
+ if (old_refs == 0) {
+ old_task->watchports = NULL;
+ }
+ } else {
+ task_watchport_elem_clear(&new_watchports->tw_elem[i]);
+ }
+ /* mqueue and port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
+ }
+
+ /* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
+ new_refs = task_watchports_release(new_watchports);
+ if (new_refs == 0) {
+ new_task->watchports = NULL;
+ }
+
+ is_write_unlock(new_task->itk_space);
+ is_write_unlock(old_task->itk_space);
+
+ /* Clear the task and thread references for old_watchport */
+ if (old_refs == 0) {
+ task_watchports_deallocate(old_watchports);
+ }
+
+ /* Clear the task and thread references for new_watchport */
+ if (new_refs == 0) {
+ task_watchports_deallocate(new_watchports);
+ }
+}
+
+/*
+ * task_add_turnstile_watchports_locked:
+ * Setup watchports to boost the main thread of the task.
+ *
+ * Arguments:
+ * task: task to boost
+ * watchports: watchport structure to be attached to the task
+ * previous_elem_array: an array of old watchport_elem to be returned to caller
+ * portwatch_ports: array of watchports
+ * portwatch_count: number of watchports
+ *
+ * Conditions:
+ * ipc space of the task locked.
+ * returns array of old watchport_elem in previous_elem_array
+ */
+static os_ref_count_t
+task_add_turnstile_watchports_locked(
+ task_t task,
+ struct task_watchports *watchports,
+ struct task_watchport_elem **previous_elem_array,
+ ipc_port_t *portwatch_ports,
+ uint32_t portwatch_count)
+{
+ os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
+
+ /* Check if the task is still active */
+ if (!task->active) {
+ refs = task_watchports_release(watchports);
+ return refs;
+ }
+
+ assert(task->watchports == NULL);
+ task->watchports = watchports;
+
+ for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
+ ipc_port_t port = portwatch_ports[i];
+
+ task_watchport_elem_init(&watchports->tw_elem[i], task, port);
+ if (port == NULL) {
+ task_watchport_elem_clear(&watchports->tw_elem[i]);
+ continue;
+ }
+
+ ip_lock(port);
+ imq_lock(&port->ip_messages);
+
+ /* Check if port is in valid state to be setup as watchport */
+ if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
+ &previous_elem_array[j]) != KERN_SUCCESS) {
+ task_watchport_elem_clear(&watchports->tw_elem[i]);
+ continue;
+ }
+ /* port and mqueue unlocked on return */
+
+ ip_reference(port);
+ task_watchports_retain(watchports);
+ if (previous_elem_array[j] != NULL) {
+ j++;
+ }
+ }
+
+ /* Drop the reference on task_watchport struct returned by os_ref_init */
+ refs = task_watchports_release(watchports);
+ if (refs == 0) {
+ task->watchports = NULL;
+ }
+
+ return refs;
+}
+
+/*
+ * task_remove_turnstile_watchports_locked:
+ * Clear all turnstile boost on the task from watchports.
+ *
+ * Arguments:
+ * task: task to remove watchports from
+ * watchports: watchports structure for the task
+ * port_freelist: array of ports returned with ref to caller
+ *
+ *
+ * Conditions:
+ * ipc space of the task locked.
+ * array of ports with refs are returned in port_freelist
+ */
+static os_ref_count_t
+task_remove_turnstile_watchports_locked(
+ task_t task,
+ struct task_watchports *watchports,
+ ipc_port_t *port_freelist)
+{
+ os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
+
+ for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
+ ipc_port_t port = watchports->tw_elem[i].twe_port;
+ if (port == NULL) {
+ continue;
+ }
+
+ /* Lock the port and check if it has the entry */
+ ip_lock(port);
+ imq_lock(&port->ip_messages);
+ if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
+ &watchports->tw_elem[i]) == KERN_SUCCESS) {
+ task_watchport_elem_clear(&watchports->tw_elem[i]);
+ port_freelist[j++] = port;
+ refs = task_watchports_release(watchports);
+
+ /* Check if all ports are cleaned */
+ if (refs == 0) {
+ task->watchports = NULL;
+ break;
+ }
+ }
+ /* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
+ }
+ return refs;
+}
+
+/*
+ * task_watchports_alloc_init:
+ * Allocate and initialize task watchport struct.
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+static struct task_watchports *
+task_watchports_alloc_init(
+ task_t task,
+ thread_t thread,
+ uint32_t count)
+{
+ struct task_watchports *watchports = kalloc(sizeof(struct task_watchports) +
+ count * sizeof(struct task_watchport_elem));
+
+ task_reference(task);
+ thread_reference(thread);
+ watchports->tw_task = task;
+ watchports->tw_thread = thread;
+ watchports->tw_elem_array_count = count;
+ os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
+
+ return watchports;