+
+out:
+#if CONFIG_MACF
+ mac_exc_free_label(crash_label);
+#endif
+ return kr;
+}
+
+/*
+ * task_clear_corpse
+ *
+ * Clears the corpse pending bit on task.
+ * Removes inspection bit on the threads.
+ */
+void
+task_clear_corpse(task_t task)
+{
+ thread_t th_iter = NULL;
+
+ task_lock(task);
+ queue_iterate(&task->threads, th_iter, thread_t, task_threads)
+ {
+ thread_mtx_lock(th_iter);
+ th_iter->inspection = FALSE;
+ thread_mtx_unlock(th_iter);
+ }
+
+ thread_terminate_crashed_threads();
+ /* remove the pending corpse report flag */
+ task_clear_corpse_pending_report(task);
+
+ task_unlock(task);
+}
+
+/*
+ * task_port_notify
+ *
+ * Called whenever the Mach port system detects no-senders on
+ * the task port of a corpse.
+ * Each notification that comes in should terminate the task (corpse).
+ */
+void
+task_port_notify(mach_msg_header_t *msg)
+{
+ mach_no_senders_notification_t *notification = (void *)msg;
+ ipc_port_t port = notification->not_header.msgh_remote_port;
+ task_t task;
+
+ assert(ip_active(port));
+ assert(IKOT_TASK == ip_kotype(port));
+ task = (task_t) port->ip_kobject;
+
+ assert(task_is_a_corpse(task));
+
+ /* Remove the task from global corpse task list */
+ task_remove_from_corpse_task_list(task);
+
+ task_clear_corpse(task);
+ task_terminate_internal(task);
+}
+
+/*
+ * task_wait_till_threads_terminate_locked
+ *
+ * Wait till all the threads in the task are terminated.
+ * Might release the task lock and re-acquire it.
+ */
+void
+task_wait_till_threads_terminate_locked(task_t task)
+{
+ /* wait for all the threads in the task to terminate */
+ while (task->active_thread_count != 0) {
+ assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
+ task_unlock(task);
+ thread_block(THREAD_CONTINUE_NULL);
+
+ task_lock(task);
+ }
+}
+
+/*
+ * task_duplicate_map_and_threads
+ *
+ * Copy vmmap of source task.
+ * Copy active threads from source task to destination task.
+ * Source task would be suspended during the copy.
+ */
+kern_return_t
+task_duplicate_map_and_threads(
+ task_t task,
+ void *p,
+ task_t new_task,
+ thread_t *thread_ret,
+ uint64_t **udata_buffer,
+ int *size,
+ int *num_udata)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ int active;
+ thread_t thread, self, thread_return = THREAD_NULL;
+ thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
+ thread_t *thread_array;
+ uint32_t active_thread_count = 0, array_count = 0, i;
+ vm_map_t oldmap;
+ uint64_t *buffer = NULL;
+ int buf_size = 0;
+ int est_knotes = 0, num_knotes = 0;
+
+ self = current_thread();
+
+ /*
+ * Suspend the task to copy thread state, use the internal
+ * variant so that no user-space process can resume
+ * the task from under us
+ */
+ kr = task_suspend_internal(task);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ if (task->map->disable_vmentry_reuse == TRUE) {
+ /*
+ * Quite likely GuardMalloc (or some debugging tool)
+ * is being used on this task. And it has gone through
+ * its limit. Making a corpse will likely encounter
+ * a lot of VM entries that will need COW.
+ *
+ * Skip it.
+ */
+#if DEVELOPMENT || DEBUG
+ memorystatus_abort_vm_map_fork(task);
+#endif
+ task_resume_internal(task);
+ return KERN_FAILURE;
+ }
+
+ /* Check with VM if vm_map_fork is allowed for this task */
+ if (memorystatus_allowed_vm_map_fork(task)) {
+
+ /* Setup new task's vmmap, switch from parent task's map to it COW map */
+ oldmap = new_task->map;
+ new_task->map = vm_map_fork(new_task->ledger,
+ task->map,
+ (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
+ VM_MAP_FORK_PRESERVE_PURGEABLE |
+ VM_MAP_FORK_CORPSE_FOOTPRINT));
+ vm_map_deallocate(oldmap);
+
+ /* copy ledgers that impact the memory footprint */
+ vm_map_copy_footprint_ledgers(task, new_task);
+
+ /* Get all the udata pointers from kqueue */
+ est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
+ if (est_knotes > 0) {
+ buf_size = (est_knotes + 32) * sizeof(uint64_t);
+ buffer = (uint64_t *) kalloc(buf_size);
+ num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
+ if (num_knotes > est_knotes + 32) {
+ num_knotes = est_knotes + 32;
+ }
+ }
+ }
+
+ active_thread_count = task->active_thread_count;
+ if (active_thread_count == 0) {
+ if (buffer != NULL) {
+ kfree(buffer, buf_size);
+ }
+ task_resume_internal(task);
+ return KERN_FAILURE;
+ }
+
+ thread_array = (thread_t *) kalloc(sizeof(thread_t) * active_thread_count);
+
+ /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
+ task_lock(task);
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ /* Skip inactive threads */
+ active = thread->active;
+ if (!active) {
+ continue;
+ }
+
+ if (array_count >= active_thread_count) {
+ break;
+ }
+
+ thread_array[array_count++] = thread;
+ thread_reference(thread);
+ }
+ task_unlock(task);
+
+ for (i = 0; i < array_count; i++) {
+
+ kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
+ if (kr != KERN_SUCCESS) {
+ break;
+ }
+
+ /* Equivalent of current thread in corpse */
+ if (thread_array[i] == self) {
+ thread_return = new_thread;
+ new_task->crashed_thread_id = thread_tid(new_thread);
+ } else if (first_thread == NULL) {
+ first_thread = new_thread;
+ } else {
+ /* drop the extra ref returned by thread_create_with_continuation */
+ thread_deallocate(new_thread);
+ }
+
+ kr = thread_dup2(thread_array[i], new_thread);
+ if (kr != KERN_SUCCESS) {
+ thread_mtx_lock(new_thread);
+ new_thread->corpse_dup = TRUE;
+ thread_mtx_unlock(new_thread);
+ continue;
+ }
+
+ /* Copy thread name */
+ bsd_copythreadname(new_thread->uthread, thread_array[i]->uthread);
+ new_thread->thread_tag = thread_array[i]->thread_tag;
+ thread_copy_resource_info(new_thread, thread_array[i]);
+ }
+
+ /* return the first thread if we couldn't find the equivalent of current */
+ if (thread_return == THREAD_NULL) {
+ thread_return = first_thread;
+ }
+ else if (first_thread != THREAD_NULL) {
+ /* drop the extra ref returned by thread_create_with_continuation */
+ thread_deallocate(first_thread);
+ }
+
+ task_resume_internal(task);
+
+ for (i = 0; i < array_count; i++) {
+ thread_deallocate(thread_array[i]);
+ }
+ kfree(thread_array, sizeof(thread_t) * active_thread_count);
+
+ if (kr == KERN_SUCCESS) {
+ *thread_ret = thread_return;
+ *udata_buffer = buffer;
+ *size = buf_size;
+ *num_udata = num_knotes;
+ } else {
+ if (thread_return != THREAD_NULL) {
+ thread_deallocate(thread_return);
+ }
+ if (buffer != NULL) {
+ kfree(buffer, buf_size);
+ }
+ }
+