+static void
+thread_deallocate_queue_invoke(mpsc_queue_chain_t e,
+ __assert_only mpsc_daemon_queue_t dq)
+{
+ thread_t thread = mpsc_queue_element(e, struct thread, mpsc_links);
+
+ assert(dq == &thread_deallocate_queue);
+
+ thread_deallocate_complete(thread);
+}
+
+/*
+ * thread_terminate_enqueue:
+ *
+ * Enqueue a terminating thread for final disposition.
+ *
+ * Called at splsched.
+ */
+void
+thread_terminate_enqueue(
+ thread_t thread)
+{
+ KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE, thread->thread_id);
+
+ mpsc_daemon_enqueue(&thread_terminate_queue, &thread->mpsc_links,
+ MPSC_QUEUE_DISABLE_PREEMPTION);
+}
+
+/*
+ * thread_deallocate_enqueue:
+ *
+ * Enqueue a thread for final deallocation.
+ */
+static void
+thread_deallocate_enqueue(
+ thread_t thread)
+{
+ mpsc_daemon_enqueue(&thread_deallocate_queue, &thread->mpsc_links,
+ MPSC_QUEUE_DISABLE_PREEMPTION);
+}
+
+/*
+ * thread_terminate_crashed_threads:
+ * walk the list of crashed threads and put back set of threads
+ * who are no longer being inspected.
+ */
+void
+thread_terminate_crashed_threads(void)
+{
+ thread_t th_remove;
+
+ simple_lock(&crashed_threads_lock, &thread_lck_grp);
+ /*
+ * loop through the crashed threads queue
+ * to put any threads that are not being inspected anymore
+ */
+
+ qe_foreach_element_safe(th_remove, &crashed_threads_queue, runq_links) {
+ /* make sure current_thread is never in crashed queue */
+ assert(th_remove != current_thread());
+
+ if (th_remove->inspection == FALSE) {
+ remqueue(&th_remove->runq_links);
+ mpsc_daemon_enqueue(&thread_terminate_queue, &th_remove->mpsc_links,
+ MPSC_QUEUE_NONE);
+ }
+ }
+
+ simple_unlock(&crashed_threads_lock);
+}
+
+/*
+ * thread_stack_queue_invoke:
+ *
+ * Perform stack allocation as required due to
+ * invoke failures.
+ */
+static void
+thread_stack_queue_invoke(mpsc_queue_chain_t elm,
+ __assert_only mpsc_daemon_queue_t dq)
+{
+ thread_t thread = mpsc_queue_element(elm, struct thread, mpsc_links);
+
+ assert(dq == &thread_stack_queue);
+
+ /* allocate stack with interrupts enabled so that we can call into VM */
+ stack_alloc(thread);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
+
+ spl_t s = splsched();
+ thread_lock(thread);
+ thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * thread_stack_enqueue:
+ *
+ * Enqueue a thread for stack allocation.
+ *
+ * Called at splsched.
+ */
+void
+thread_stack_enqueue(
+ thread_t thread)
+{
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
+ assert_thread_magic(thread);
+
+ mpsc_daemon_enqueue(&thread_stack_queue, &thread->mpsc_links,
+ MPSC_QUEUE_DISABLE_PREEMPTION);
+}
+
+void
+thread_daemon_init(void)
+{
+ kern_return_t result;
+
+ thread_deallocate_daemon_init();
+
+ thread_deallocate_daemon_register_queue(&thread_terminate_queue,
+ thread_terminate_queue_invoke);
+
+ thread_deallocate_daemon_register_queue(&thread_deallocate_queue,
+ thread_deallocate_queue_invoke);
+
+ simple_lock_init(&crashed_threads_lock, 0);
+ queue_init(&crashed_threads_queue);
+
+ result = mpsc_daemon_queue_init_with_thread(&thread_stack_queue,
+ thread_stack_queue_invoke, BASEPRI_PREEMPT_HIGH,
+ "daemon.thread-stack");
+ if (result != KERN_SUCCESS) {
+ panic("thread_daemon_init: thread_stack_daemon");
+ }
+
+ result = mpsc_daemon_queue_init_with_thread(&thread_exception_queue,
+ thread_exception_queue_invoke, MINPRI_KERNEL,
+ "daemon.thread-exception");
+ if (result != KERN_SUCCESS) {
+ panic("thread_daemon_init: thread_exception_daemon");
+ }
+}
+
+__options_decl(thread_create_internal_options_t, uint32_t, {
+ TH_OPTION_NONE = 0x00,
+ TH_OPTION_NOCRED = 0x01,
+ TH_OPTION_NOSUSP = 0x02,
+ TH_OPTION_WORKQ = 0x04,
+ TH_OPTION_IMMOVABLE = 0x08,
+ TH_OPTION_PINNED = 0x10,
+});
+