+/*
+ * Resume the task using a suspension token. Consumes the token's ref.
+ */
+kern_return_t
+task_resume2(
+ register task_suspension_token_t task)
+{
+ kern_return_t kr;
+
+ kr = task_resume_internal(task);
+ task_suspension_token_deallocate(task);
+
+ return (kr);
+}
+
+boolean_t
+task_suspension_notify(mach_msg_header_t *request_header)
+{
+ ipc_port_t port = (ipc_port_t) request_header->msgh_remote_port;
+ task_t task = convert_port_to_task_suspension_token(port);
+ mach_msg_type_number_t not_count;
+
+ if (task == TASK_NULL || task == kernel_task)
+ return TRUE; /* nothing to do */
+
+ switch (request_header->msgh_id) {
+
+ case MACH_NOTIFY_SEND_ONCE:
+ /* release the hold held by this specific send-once right */
+ task_lock(task);
+ release_task_hold(task, TASK_HOLD_NORMAL);
+ task_unlock(task);
+ break;
+
+ case MACH_NOTIFY_NO_SENDERS:
+ not_count = ((mach_no_senders_notification_t *)request_header)->not_count;
+
+ task_lock(task);
+ ip_lock(port);
+ if (port->ip_mscount == not_count) {
+
+ /* release all the [remaining] outstanding legacy holds */
+ assert(port->ip_nsrequest == IP_NULL);
+ ip_unlock(port);
+ release_task_hold(task, TASK_HOLD_LEGACY_ALL);
+ task_unlock(task);
+
+ } else if (port->ip_nsrequest == IP_NULL) {
+ ipc_port_t old_notify;
+
+ task_unlock(task);
+ /* new send rights, re-arm notification at current make-send count */
+ ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
+ assert(old_notify == IP_NULL);
+ /* port unlocked */
+ } else {
+ ip_unlock(port);
+ task_unlock(task);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ task_suspension_token_deallocate(task); /* drop token reference */
+ return TRUE;
+}
+
+kern_return_t
+task_pidsuspend_locked(task_t task)
+{
+ kern_return_t kr;
+
+ if (task->pidsuspended) {
+ kr = KERN_FAILURE;
+ goto out;
+ }
+
+ task->pidsuspended = TRUE;
+
+ kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
+ if (kr != KERN_SUCCESS) {
+ task->pidsuspended = FALSE;
+ }
+out:
+ return(kr);
+}
+
+
+/*
+ * task_pidsuspend:
+ *
+ * Suspends a task by placing a hold on its threads.
+ *
+ * Conditions:
+ * The caller holds a reference to the task
+ */
+kern_return_t
+task_pidsuspend(
+ register task_t task)
+{
+ kern_return_t kr;
+
+ if (task == TASK_NULL || task == kernel_task)
+ return (KERN_INVALID_ARGUMENT);
+
+ task_lock(task);
+
+ kr = task_pidsuspend_locked(task);
+
+ task_unlock(task);
+
+ return (kr);
+}
+
+/* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */
+#define THAW_ON_RESUME 1
+
+/*
+ * task_pidresume:
+ * Resumes a previously suspended task.
+ *
+ * Conditions:
+ * The caller holds a reference to the task
+ */
+kern_return_t
+task_pidresume(
+ register task_t task)
+{
+ kern_return_t kr;
+
+ if (task == TASK_NULL || task == kernel_task)
+ return (KERN_INVALID_ARGUMENT);
+
+ task_lock(task);
+
+#if (CONFIG_FREEZE && THAW_ON_RESUME)
+
+ while (task->changing_freeze_state) {
+
+ assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
+ task_unlock(task);
+ thread_block(THREAD_CONTINUE_NULL);
+
+ task_lock(task);
+ }
+ task->changing_freeze_state = TRUE;
+#endif
+
+ kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
+
+ task_unlock(task);
+
+#if (CONFIG_FREEZE && THAW_ON_RESUME)
+ if ((kr == KERN_SUCCESS) && (task->frozen == TRUE)) {
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+
+ kr = KERN_SUCCESS;
+ } else {
+
+ kr = vm_map_thaw(task->map);
+ }
+ }
+ task_lock(task);
+
+ if (kr == KERN_SUCCESS)
+ task->frozen = FALSE;
+ task->changing_freeze_state = FALSE;
+ thread_wakeup(&task->changing_freeze_state);
+
+ task_unlock(task);
+#endif
+
+ return (kr);
+}
+
+#if CONFIG_FREEZE
+
+/*
+ * task_freeze:
+ *
+ * Freeze a task.
+ *
+ * Conditions:
+ * The caller holds a reference to the task
+ */
+kern_return_t
+task_freeze(
+ register task_t task,
+ uint32_t *purgeable_count,
+ uint32_t *wired_count,
+ uint32_t *clean_count,
+ uint32_t *dirty_count,
+ uint32_t dirty_budget,
+ boolean_t *shared,
+ boolean_t walk_only)
+{
+ kern_return_t kr;
+
+ if (task == TASK_NULL || task == kernel_task)
+ return (KERN_INVALID_ARGUMENT);
+
+ task_lock(task);
+
+ while (task->changing_freeze_state) {
+
+ assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
+ task_unlock(task);
+ thread_block(THREAD_CONTINUE_NULL);
+
+ task_lock(task);
+ }
+ if (task->frozen) {
+ task_unlock(task);
+ return (KERN_FAILURE);
+ }
+ task->changing_freeze_state = TRUE;
+
+ task_unlock(task);
+
+ if (walk_only) {
+ kr = vm_map_freeze_walk(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
+ } else {
+ kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
+ }
+
+ task_lock(task);
+
+ if (walk_only == FALSE && kr == KERN_SUCCESS)
+ task->frozen = TRUE;
+ task->changing_freeze_state = FALSE;
+ thread_wakeup(&task->changing_freeze_state);
+
+ task_unlock(task);
+
+ return (kr);
+}
+
+/*
+ * task_thaw:
+ *
+ * Thaw a currently frozen task.
+ *
+ * Conditions:
+ * The caller holds a reference to the task
+ */
+extern void
+vm_consider_waking_compactor_swapper(void);
+
+kern_return_t
+task_thaw(
+ register task_t task)
+{
+ kern_return_t kr;
+
+ if (task == TASK_NULL || task == kernel_task)
+ return (KERN_INVALID_ARGUMENT);
+
+ task_lock(task);
+
+ while (task->changing_freeze_state) {
+
+ assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
+ task_unlock(task);
+ thread_block(THREAD_CONTINUE_NULL);
+
+ task_lock(task);
+ }
+ if (!task->frozen) {
+ task_unlock(task);
+ return (KERN_FAILURE);
+ }
+ task->changing_freeze_state = TRUE;
+
+ if (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE) {
+ task_unlock(task);
+
+ kr = vm_map_thaw(task->map);
+
+ task_lock(task);
+
+ if (kr == KERN_SUCCESS)
+ task->frozen = FALSE;
+ } else {
+ task->frozen = FALSE;
+ kr = KERN_SUCCESS;
+ }
+
+ task->changing_freeze_state = FALSE;
+ thread_wakeup(&task->changing_freeze_state);
+
+ task_unlock(task);
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ vm_consider_waking_compactor_swapper();
+ }
+
+ return (kr);
+}
+
+#endif /* CONFIG_FREEZE */
+
+kern_return_t
+host_security_set_task_token(
+ host_security_t host_security,
+ task_t task,
+ security_token_t sec_token,
+ audit_token_t audit_token,
+ host_priv_t host_priv)
+{
+ ipc_port_t host_port;
+ kern_return_t kr;
+
+ if (task == TASK_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (host_security == HOST_NULL)
+ return(KERN_INVALID_SECURITY);
+
+ task_lock(task);
+ task->sec_token = sec_token;
+ task->audit_token = audit_token;
+
+ task_unlock(task);
+
+ if (host_priv != HOST_PRIV_NULL) {
+ kr = host_get_host_priv_port(host_priv, &host_port);
+ } else {
+ kr = host_get_host_port(host_priv_self(), &host_port);
+ }
+ assert(kr == KERN_SUCCESS);
+ kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
+ return(kr);
+}
+
+kern_return_t
+task_send_trace_memory(
+ task_t target_task,
+ __unused uint32_t pid,
+ __unused uint64_t uniqueid)
+{
+ kern_return_t kr = KERN_INVALID_ARGUMENT;
+ if (target_task == TASK_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+#if CONFIG_ATM
+ kr = atm_send_proc_inspect_notification(target_task,
+ pid,
+ uniqueid);
+
+#endif
+ return (kr);
+}
+/*
+ * This routine was added, pretty much exclusively, for registering the
+ * RPC glue vector for in-kernel short circuited tasks. Rather than
+ * removing it completely, I have only disabled that feature (which was
+ * the only feature at the time). It just appears that we are going to
+ * want to add some user data to tasks in the future (i.e. bsd info,
+ * task names, etc...), so I left it in the formal task interface.
+ */
+kern_return_t
+task_set_info(
+ task_t task,
+ task_flavor_t flavor,
+ __unused task_info_t task_info_in, /* pointer to IN array */
+ __unused mach_msg_type_number_t task_info_count)
+{
+ if (task == TASK_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ switch (flavor) {
+
+#if CONFIG_ATM
+ case TASK_TRACE_MEMORY_INFO:
+ {
+ if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT)
+ return (KERN_INVALID_ARGUMENT);
+
+ assert(task_info_in != NULL);
+ task_trace_memory_info_t mem_info;
+ mem_info = (task_trace_memory_info_t) task_info_in;
+ kern_return_t kr = atm_register_trace_memory(task,
+ mem_info->user_memory_address,
+ mem_info->buffer_size,
+ mem_info->mailbox_array_size);
+ return kr;
+ break;
+ }
+
+#endif
+ default:
+ return (KERN_INVALID_ARGUMENT);
+ }
+ return (KERN_SUCCESS);
+}
+
+kern_return_t
+task_info(
+ task_t task,
+ task_flavor_t flavor,
+ task_info_t task_info_out,
+ mach_msg_type_number_t *task_info_count)
+{
+ kern_return_t error = KERN_SUCCESS;
+
+ if (task == TASK_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ task_lock(task);
+
+ if ((task != current_task()) && (!task->active)) {
+ task_unlock(task);
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ switch (flavor) {