+ /* Get the watch port count from the old task */
+ is_write_lock(old_task->itk_space);
+ if (old_task->watchports == NULL) {
+ is_write_unlock(old_task->itk_space);
+ return;
+ }
+
+ portwatch_count = old_task->watchports->tw_elem_array_count;
+ is_write_unlock(old_task->itk_space);
+
+ new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
+
+ /* Lock the ipc space for old task */
+ is_write_lock(old_task->itk_space);
+
+ /* Lock the ipc space for new task */
+ is_write_lock(new_task->itk_space);
+
+ /* Check if watchport boost exist */
+ if (old_task->watchports == NULL || !new_task->active) {
+ is_write_unlock(new_task->itk_space);
+ is_write_unlock(old_task->itk_space);
+ (void)task_watchports_release(new_watchports);
+ task_watchports_deallocate(new_watchports);
+ return;
+ }
+
+ old_watchports = old_task->watchports;
+ assert(portwatch_count == old_task->watchports->tw_elem_array_count);
+
+ /* Setup new task watchports */
+ new_task->watchports = new_watchports;
+
+ for (uint32_t i = 0; i < portwatch_count; i++) {
+ ipc_port_t port = old_watchports->tw_elem[i].twe_port;
+
+ if (port == NULL) {
+ task_watchport_elem_clear(&new_watchports->tw_elem[i]);
+ continue;
+ }
+
+ /* Lock the port and check if it has the entry */
+ ip_lock(port);
+ imq_lock(&port->ip_messages);
+
+ task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
+
+ if (ipc_port_replace_watchport_elem_conditional_locked(port,
+ &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
+ task_watchport_elem_clear(&old_watchports->tw_elem[i]);
+
+ task_watchports_retain(new_watchports);
+ old_refs = task_watchports_release(old_watchports);
+
+ /* Check if all ports are cleaned */
+ if (old_refs == 0) {
+ old_task->watchports = NULL;
+ }
+ } else {
+ task_watchport_elem_clear(&new_watchports->tw_elem[i]);
+ }
+ /* mqueue and port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
+ }
+
+ /* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
+ new_refs = task_watchports_release(new_watchports);
+ if (new_refs == 0) {
+ new_task->watchports = NULL;
+ }
+
+ is_write_unlock(new_task->itk_space);
+ is_write_unlock(old_task->itk_space);
+
+ /* Clear the task and thread references for old_watchport */
+ if (old_refs == 0) {
+ task_watchports_deallocate(old_watchports);
+ }
+
+ /* Clear the task and thread references for new_watchport */
+ if (new_refs == 0) {
+ task_watchports_deallocate(new_watchports);
+ }
+}
+
+/*
+ * task_add_turnstile_watchports_locked:
+ * Setup watchports to boost the main thread of the task.
+ *
+ * Arguments:
+ * task: task to boost
+ * watchports: watchport structure to be attached to the task
+ * previous_elem_array: an array of old watchport_elem to be returned to caller
+ * portwatch_ports: array of watchports
+ * portwatch_count: number of watchports
+ *
+ * Conditions:
+ * ipc space of the task locked.
+ * returns array of old watchport_elem in previous_elem_array
+ */
+static os_ref_count_t
+task_add_turnstile_watchports_locked(
+ task_t task,
+ struct task_watchports *watchports,
+ struct task_watchport_elem **previous_elem_array,
+ ipc_port_t *portwatch_ports,
+ uint32_t portwatch_count)
+{
+ os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
+
+ /* Check if the task is still active */
+ if (!task->active) {
+ refs = task_watchports_release(watchports);
+ return refs;
+ }
+
+ assert(task->watchports == NULL);
+ task->watchports = watchports;
+
+ for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
+ ipc_port_t port = portwatch_ports[i];
+
+ task_watchport_elem_init(&watchports->tw_elem[i], task, port);
+ if (port == NULL) {
+ task_watchport_elem_clear(&watchports->tw_elem[i]);
+ continue;
+ }
+
+ ip_lock(port);
+ imq_lock(&port->ip_messages);
+
+ /* Check if port is in valid state to be setup as watchport */
+ if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
+ &previous_elem_array[j]) != KERN_SUCCESS) {
+ task_watchport_elem_clear(&watchports->tw_elem[i]);
+ continue;
+ }
+ /* port and mqueue unlocked on return */
+
+ ip_reference(port);
+ task_watchports_retain(watchports);
+ if (previous_elem_array[j] != NULL) {
+ j++;
+ }
+ }
+
+ /* Drop the reference on task_watchport struct returned by os_ref_init */
+ refs = task_watchports_release(watchports);
+ if (refs == 0) {
+ task->watchports = NULL;
+ }
+
+ return refs;
+}
+
+/*
+ * task_remove_turnstile_watchports_locked:
+ * Clear all turnstile boost on the task from watchports.
+ *
+ * Arguments:
+ * task: task to remove watchports from
+ * watchports: watchports structure for the task
+ * port_freelist: array of ports returned with ref to caller
+ *
+ *
+ * Conditions:
+ * ipc space of the task locked.
+ * array of ports with refs are returned in port_freelist
+ */
+static os_ref_count_t
+task_remove_turnstile_watchports_locked(
+ task_t task,
+ struct task_watchports *watchports,
+ ipc_port_t *port_freelist)
+{
+ os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
+
+ for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
+ ipc_port_t port = watchports->tw_elem[i].twe_port;
+ if (port == NULL) {
+ continue;
+ }
+
+ /* Lock the port and check if it has the entry */
+ ip_lock(port);
+ imq_lock(&port->ip_messages);
+ if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
+ &watchports->tw_elem[i]) == KERN_SUCCESS) {
+ task_watchport_elem_clear(&watchports->tw_elem[i]);
+ port_freelist[j++] = port;
+ refs = task_watchports_release(watchports);
+
+ /* Check if all ports are cleaned */
+ if (refs == 0) {
+ task->watchports = NULL;
+ break;
+ }
+ }
+ /* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
+ }
+ return refs;
+}
+
+/*
+ * task_watchports_alloc_init:
+ * Allocate and initialize task watchport struct.
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+static struct task_watchports *
+task_watchports_alloc_init(
+ task_t task,
+ thread_t thread,
+ uint32_t count)
+{
+ struct task_watchports *watchports = kalloc(sizeof(struct task_watchports) +
+ count * sizeof(struct task_watchport_elem));
+
+ task_reference(task);
+ thread_reference(thread);
+ watchports->tw_task = task;
+ watchports->tw_thread = thread;
+ watchports->tw_elem_array_count = count;
+ os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
+
+ return watchports;
+}
+
+/*
+ * task_watchports_deallocate:
+ * Deallocate task watchport struct.
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+static void
+task_watchports_deallocate(
+ struct task_watchports *watchports)
+{
+ uint32_t portwatch_count = watchports->tw_elem_array_count;
+
+ task_deallocate(watchports->tw_task);
+ thread_deallocate(watchports->tw_thread);
+ kfree(watchports, sizeof(struct task_watchports) + portwatch_count * sizeof(struct task_watchport_elem));
+}
+
+/*
+ * task_watchport_elem_deallocate:
+ * Deallocate task watchport element and release its ref on task_watchport.
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+void
+task_watchport_elem_deallocate(
+ struct task_watchport_elem *watchport_elem)
+{
+ os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
+ task_t task = watchport_elem->twe_task;
+ struct task_watchports *watchports = NULL;
+ ipc_port_t port = NULL;
+
+ assert(task != NULL);
+
+ /* Take the space lock to modify the elememt */
+ is_write_lock(task->itk_space);
+
+ watchports = task->watchports;
+ assert(watchports != NULL);
+
+ port = watchport_elem->twe_port;
+ assert(port != NULL);
+
+ task_watchport_elem_clear(watchport_elem);
+ refs = task_watchports_release(watchports);
+
+ if (refs == 0) {
+ task->watchports = NULL;
+ }
+
+ is_write_unlock(task->itk_space);
+
+ ip_release(port);
+ if (refs == 0) {
+ task_watchports_deallocate(watchports);
+ }
+}
+
+/*
+ * task_has_watchports:
+ * Return TRUE if task has watchport boosts.
+ *
+ * Conditions:
+ * Nothing locked.
+ */
+boolean_t
+task_has_watchports(task_t task)
+{
+ return task->watchports != NULL;
+}
+
+#if DEVELOPMENT || DEBUG
+
+extern void IOSleep(int);
+
+kern_return_t
+task_disconnect_page_mappings(task_t task)
+{
+ int n;
+
+ if (task == TASK_NULL || task == kernel_task) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * this function is used to strip all of the mappings from
+ * the pmap for the specified task to force the task to
+ * re-fault all of the pages it is actively using... this
+ * allows us to approximate the true working set of the
+ * specified task. We only engage if at least 1 of the
+ * threads in the task is runnable, but we want to continuously
+ * sweep (at least for a while - I've arbitrarily set the limit at
+ * 100 sweeps to be re-looked at as we gain experience) to get a better
+ * view into what areas within a page are being visited (as opposed to only
+ * seeing the first fault of a page after the task becomes
+ * runnable)... in the future I may
+ * try to block until awakened by a thread in this task
+ * being made runnable, but for now we'll periodically poll from the
+ * user level debug tool driving the sysctl
+ */
+ for (n = 0; n < 100; n++) {
+ thread_t thread;
+ boolean_t runnable;
+ boolean_t do_unnest;
+ int page_count;
+
+ runnable = FALSE;
+ do_unnest = FALSE;
+
+ task_lock(task);
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ if (thread->state & TH_RUN) {
+ runnable = TRUE;
+ break;
+ }
+ }
+ if (n == 0) {
+ task->task_disconnected_count++;
+ }
+
+ if (task->task_unnested == FALSE) {
+ if (runnable == TRUE) {
+ task->task_unnested = TRUE;
+ do_unnest = TRUE;
+ }
+ }
+ task_unlock(task);
+
+ if (runnable == FALSE) {
+ break;
+ }
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
+ task, do_unnest, task->task_disconnected_count, 0, 0);
+
+ page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
+ task, page_count, 0, 0, 0);
+
+ if ((n % 5) == 4) {
+ IOSleep(1);
+ }
+ }
+ return KERN_SUCCESS;
+}
+
+#endif
+
+
+#if CONFIG_FREEZE
+
+/*
+ * task_freeze:
+ *
+ * Freeze a task.
+ *
+ * Conditions:
+ * The caller holds a reference to the task
+ */
+extern void vm_wake_compactor_swapper(void);
+extern queue_head_t c_swapout_list_head;
+extern struct freezer_context freezer_context_global;
+
+kern_return_t