+/*
+ * thread_terminate_enqueue:
+ *
+ * Enqueue a terminating thread for final disposition.
+ *
+ * Called at splsched.
+ */
+void
+thread_terminate_enqueue(
+ thread_t thread)
+{
+ KERNEL_DEBUG_CONSTANT(TRACE_DATA_THREAD_TERMINATE | DBG_FUNC_NONE, thread->thread_id, 0, 0, 0, 0);
+
+ simple_lock(&thread_terminate_lock);
+ enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
+ simple_unlock(&thread_terminate_lock);
+
+ thread_wakeup((event_t)&thread_terminate_queue);
+}
+
+/*
+ * thread_terminate_crashed_threads:
+ * walk the list of crashed therds and put back set of threads
+ * who are no longer being inspected.
+ */
+void
+thread_terminate_crashed_threads()
+{
+ thread_t th_iter, th_remove;
+ boolean_t should_wake_terminate_queue = FALSE;
+
+ simple_lock(&thread_terminate_lock);
+ /*
+ * loop through the crashed threads queue
+ * to put any threads that are not being inspected anymore
+ */
+ th_iter = (thread_t)queue_first(&crashed_threads_queue);
+ while (!queue_end(&crashed_threads_queue, (queue_entry_t)th_iter)) {
+ th_remove = th_iter;
+ th_iter = (thread_t)queue_next(&th_iter->links);
+
+ /* make sure current_thread is never in crashed queue */
+ assert(th_remove != current_thread());
+ if (th_remove->inspection != TRUE){
+ remque((queue_entry_t)th_remove);
+ enqueue_tail(&thread_terminate_queue, (queue_entry_t)th_remove);
+ should_wake_terminate_queue = TRUE;
+ }
+ }
+
+ simple_unlock(&thread_terminate_lock);
+ if (should_wake_terminate_queue == TRUE) {
+ thread_wakeup((event_t)&thread_terminate_queue);
+ }
+}
+
+/*
+ * thread_stack_daemon:
+ *
+ * Perform stack allocation as required due to
+ * invoke failures.
+ */
+static void
+thread_stack_daemon(void)
+{
+ thread_t thread;
+ spl_t s;
+
+ s = splsched();
+ simple_lock(&thread_stack_lock);
+
+ while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
+ simple_unlock(&thread_stack_lock);
+ splx(s);
+
+ /* allocate stack with interrupts enabled so that we can call into VM */
+ stack_alloc(thread);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
+
+ s = splsched();
+ thread_lock(thread);
+ thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
+ thread_unlock(thread);
+
+ simple_lock(&thread_stack_lock);
+ }
+
+ assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
+ simple_unlock(&thread_stack_lock);
+ splx(s);
+
+ thread_block((thread_continue_t)thread_stack_daemon);
+ /*NOTREACHED*/
+}
+
+/*
+ * thread_stack_enqueue:
+ *
+ * Enqueue a thread for stack allocation.
+ *
+ * Called at splsched.
+ */
+void
+thread_stack_enqueue(
+ thread_t thread)
+{
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
+
+ simple_lock(&thread_stack_lock);
+ enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
+ simple_unlock(&thread_stack_lock);
+
+ thread_wakeup((event_t)&thread_stack_queue);
+}
+
+void
+thread_daemon_init(void)
+{
+ kern_return_t result;
+ thread_t thread = NULL;
+
+ simple_lock_init(&thread_terminate_lock, 0);
+ queue_init(&thread_terminate_queue);
+ queue_init(&crashed_threads_queue);
+
+ result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
+ if (result != KERN_SUCCESS)
+ panic("thread_daemon_init: thread_terminate_daemon");
+
+ thread_deallocate(thread);
+
+ simple_lock_init(&thread_stack_lock, 0);
+ queue_init(&thread_stack_queue);
+
+ result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
+ if (result != KERN_SUCCESS)
+ panic("thread_daemon_init: thread_stack_daemon");
+
+ thread_deallocate(thread);
+}
+
+#define TH_OPTION_NONE 0x00
+#define TH_OPTION_NOCRED 0x01
+#define TH_OPTION_NOSUSP 0x02
+/*
+ * Create a new thread.
+ * Doesn't start the thread running.
+ *
+ * Task and tasks_threads_lock are returned locked on success.
+ */
+static kern_return_t
+thread_create_internal(
+ task_t parent_task,
+ integer_t priority,
+ thread_continue_t continuation,
+ int options,
+ thread_t *out_thread)
+{
+ thread_t new_thread;
+ static thread_t first_thread;
+
+ /*
+ * Allocate a thread and initialize static fields
+ */
+ if (first_thread == THREAD_NULL)
+ new_thread = first_thread = current_thread();
+ else
+ new_thread = (thread_t)zalloc(thread_zone);
+ if (new_thread == THREAD_NULL)
+ return (KERN_RESOURCE_SHORTAGE);
+
+ if (new_thread != first_thread)
+ *new_thread = thread_template;
+
+#ifdef MACH_BSD
+ new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
+ if (new_thread->uthread == NULL) {
+ zfree(thread_zone, new_thread);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+#endif /* MACH_BSD */
+
+ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
+#ifdef MACH_BSD
+ void *ut = new_thread->uthread;
+
+ new_thread->uthread = NULL;
+ /* cred free may not be necessary */
+ uthread_cleanup(parent_task, ut, parent_task->bsd_info, FALSE);
+ uthread_cred_free(ut);
+ uthread_zone_free(ut);
+#endif /* MACH_BSD */
+
+ zfree(thread_zone, new_thread);
+ return (KERN_FAILURE);
+ }
+
+ new_thread->task = parent_task;
+
+ thread_lock_init(new_thread);
+ wake_lock_init(new_thread);
+
+ lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);
+
+ ipc_thread_init(new_thread);
+
+ new_thread->continuation = continuation;
+
+ /* Allocate I/O Statistics structure */
+ new_thread->thread_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info));
+ assert(new_thread->thread_io_stats != NULL);
+ bzero(new_thread->thread_io_stats, sizeof(struct io_stat_info));
+
+#if CONFIG_IOSCHED
+ /* Clear out the I/O Scheduling info for AppleFSCompression */
+ new_thread->decmp_upl = NULL;
+#endif /* CONFIG_IOSCHED */
+
+ lck_mtx_lock(&tasks_threads_lock);
+ task_lock(parent_task);
+
+ if ( !parent_task->active || parent_task->halting ||
+ ((options & TH_OPTION_NOSUSP) != 0 &&
+ parent_task->suspend_count > 0) ||
+ (parent_task->thread_count >= task_threadmax &&
+ parent_task != kernel_task) ) {
+ task_unlock(parent_task);
+ lck_mtx_unlock(&tasks_threads_lock);
+
+#ifdef MACH_BSD
+ {
+ void *ut = new_thread->uthread;
+
+ new_thread->uthread = NULL;
+ uthread_cleanup(parent_task, ut, parent_task->bsd_info, FALSE);
+ /* cred free may not be necessary */
+ uthread_cred_free(ut);
+ uthread_zone_free(ut);
+ }
+#endif /* MACH_BSD */
+ ipc_thread_disable(new_thread);
+ ipc_thread_terminate(new_thread);
+ kfree(new_thread->thread_io_stats, sizeof(struct io_stat_info));
+ lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
+ machine_thread_destroy(new_thread);
+ zfree(thread_zone, new_thread);
+ return (KERN_FAILURE);
+ }
+
+ /* New threads inherit any default state on the task */
+ machine_thread_inherit_taskwide(new_thread, parent_task);
+
+ task_reference_internal(parent_task);
+
+ if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
+ /*
+ * This task has a per-thread CPU limit; make sure this new thread
+ * gets its limit set too, before it gets out of the kernel.
+ */
+ set_astledger(new_thread);
+ }
+
+ /* Instantiate a thread ledger. Do not fail thread creation if ledger creation fails. */
+ if ((new_thread->t_threadledger = ledger_instantiate(thread_ledger_template,
+ LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) {
+
+ ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time);
+ }
+
+ new_thread->cpu_time_last_qos = 0;
+#ifdef CONFIG_BANK
+ new_thread->t_bankledger = LEDGER_NULL;
+ new_thread->t_deduct_bank_ledger_time = 0;
+#endif
+
+ new_thread->t_ledger = new_thread->task->ledger;
+ if (new_thread->t_ledger)
+ ledger_reference(new_thread->t_ledger);
+
+#if defined(CONFIG_SCHED_MULTIQ)
+ /* Cache the task's sched_group */
+ new_thread->sched_group = parent_task->sched_group;
+#endif /* defined(CONFIG_SCHED_MULTIQ) */
+
+ /* Cache the task's map */
+ new_thread->map = parent_task->map;
+
+ timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
+ timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
+
+#if KPC
+ kpc_thread_create(new_thread);
+#endif
+
+ /* Only need to update policies pushed from task to thread */
+ new_thread->requested_policy.bg_iotier = parent_task->effective_policy.bg_iotier;
+ new_thread->requested_policy.terminated = parent_task->effective_policy.terminated;
+
+ /* Set the thread's scheduling parameters */
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+ new_thread->sched_stamp = sched_tick;
+ new_thread->pri_shift = sched_pri_shift;
+#endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
+
+ new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
+ new_thread->sched_flags = 0;
+ new_thread->max_priority = parent_task->max_priority;
+ new_thread->task_priority = parent_task->priority;
+
+ int new_priority = (priority < 0) ? parent_task->priority: priority;
+ new_priority = (priority < 0)? parent_task->priority: priority;
+ if (new_priority > new_thread->max_priority)
+ new_priority = new_thread->max_priority;
+
+ new_thread->importance = new_priority - new_thread->task_priority;
+ new_thread->saved_importance = new_thread->importance;
+
+ if (parent_task->max_priority <= MAXPRI_THROTTLE) {
+ sched_set_thread_throttled(new_thread, TRUE);
+ }
+
+ sched_set_thread_base_priority(new_thread, new_priority);
+
+ thread_policy_create(new_thread);
+
+ /* Chain the thread onto the task's list */
+ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
+ parent_task->thread_count++;
+
+ /* So terminating threads don't need to take the task lock to decrement */
+ hw_atomic_add(&parent_task->active_thread_count, 1);
+
+ /* Protected by the tasks_threads_lock */
+ new_thread->thread_id = ++thread_unique_id;
+
+ queue_enter(&threads, new_thread, thread_t, threads);
+ threads_count++;
+
+ new_thread->active = TRUE;
+ new_thread->inspection = FALSE;
+ *out_thread = new_thread;
+
+ {
+ long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
+
+ kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
+ (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);
+
+ kdbg_trace_string(parent_task->bsd_info,
+ &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
+ }
+
+ DTRACE_PROC1(lwp__create, thread_t, *out_thread);
+
+ return (KERN_SUCCESS);
+}
+
+static kern_return_t
+thread_create_internal2(
+ task_t task,
+ thread_t *new_thread,
+ boolean_t from_user,
+ thread_continue_t continuation)
+{
+ kern_return_t result;
+ thread_t thread;
+
+ if (task == TASK_NULL || task == kernel_task)
+ return (KERN_INVALID_ARGUMENT);
+
+ result = thread_create_internal(task, -1, continuation, TH_OPTION_NONE, &thread);
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ thread->user_stop_count = 1;
+ thread_hold(thread);
+ if (task->suspend_count > 0)
+ thread_hold(thread);
+
+ if (from_user)
+ extmod_statistics_incr_thread_create(task);
+
+ task_unlock(task);
+ lck_mtx_unlock(&tasks_threads_lock);
+
+ *new_thread = thread;
+
+ return (KERN_SUCCESS);
+}
+
+/* No prototype, since task_server.h has the _from_user version if KERNEL_SERVER */
+kern_return_t
+thread_create(
+ task_t task,
+ thread_t *new_thread);