+static void __attribute__((noinline))
+thread_call_invoke(thread_call_func_t func,
+ thread_call_param_t param0,
+ thread_call_param_t param1,
+ __unused thread_call_t call)
+{
+#if DEVELOPMENT || DEBUG
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_CALLOUT) | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE(func), VM_KERNEL_ADDRHIDE(param0), VM_KERNEL_ADDRHIDE(param1), 0, 0);
+#endif /* DEVELOPMENT || DEBUG */
+
+#if CONFIG_DTRACE
+ uint64_t tc_ttd = call->tc_ttd;
+ boolean_t is_delayed = call->tc_flags & THREAD_CALL_DELAYED;
+ DTRACE_TMR6(thread_callout__start, thread_call_func_t, func, int, 0, int, (tc_ttd >> 32),
+ (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call);
+#endif
+
+ (*func)(param0, param1);
+
+#if CONFIG_DTRACE
+ DTRACE_TMR6(thread_callout__end, thread_call_func_t, func, int, 0, int, (tc_ttd >> 32),
+ (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call);
+#endif
+
+#if DEVELOPMENT || DEBUG
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_CALLOUT) | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE(func), 0, 0, 0, 0);
+#endif /* DEVELOPMENT || DEBUG */
+}
+
+/*
+ * thread_call_thread:
+ */
+static void
+thread_call_thread(
+ thread_call_group_t group,
+ wait_result_t wres)
+{
+ thread_t self = current_thread();
+
+ if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) {
+ (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT);
+ }
+
+ /*
+ * A wakeup with THREAD_INTERRUPTED indicates that
+ * we should terminate.
+ */
+ if (wres == THREAD_INTERRUPTED) {
+ thread_terminate(self);
+
+ /* NOTREACHED */
+ panic("thread_terminate() returned?");
+ }
+
+ spl_t s = disable_ints_and_lock(group);
+
+ struct thread_call_thread_state thc_state = { .thc_group = group };
+ self->thc_state = &thc_state;
+
+ thread_sched_call(self, sched_call_thread);
+
+ while (group->pending_count > 0) {
+ thread_call_t call = qe_dequeue_head(&group->pending_queue,
+ struct thread_call, tc_qlink);
+ assert(call != NULL);
+
+ group->pending_count--;
+ if (group->pending_count == 0) {
+ assert(queue_empty(&group->pending_queue));
+ }
+
+ thread_call_func_t func = call->tc_func;
+ thread_call_param_t param0 = call->tc_param0;
+ thread_call_param_t param1 = call->tc_param1;
+
+ call->tc_queue = NULL;
+
+ if (_is_internal_call(call)) {
+ _internal_call_release(call);
+ }
+
+ /*
+ * Can only do wakeups for thread calls whose storage
+ * we control.
+ */
+ bool needs_finish = false;
+ if (call->tc_flags & THREAD_CALL_ALLOC) {
+ call->tc_refs++; /* Delay free until we're done */
+ }
+ if (call->tc_flags & (THREAD_CALL_ALLOC | THREAD_CALL_ONCE)) {
+ /*
+ * If THREAD_CALL_ONCE is used, and the timer wasn't
+ * THREAD_CALL_ALLOC, then clients swear they will use
+ * thread_call_cancel_wait() before destroying
+ * the thread call.
+ *
+ * Else, the storage for the thread call might have
+ * disappeared when thread_call_invoke() ran.
+ */
+ needs_finish = true;
+ call->tc_flags |= THREAD_CALL_RUNNING;
+ }
+
+ thc_state.thc_call = call;
+ thc_state.thc_call_pending_timestamp = call->tc_pending_timestamp;
+ thc_state.thc_call_soft_deadline = call->tc_soft_deadline;
+ thc_state.thc_call_hard_deadline = call->tc_pqlink.deadline;
+ thc_state.thc_func = func;
+ thc_state.thc_param0 = param0;
+ thc_state.thc_param1 = param1;
+ thc_state.thc_IOTES_invocation_timestamp = 0;
+
+ enable_ints_and_unlock(group, s);
+
+ thc_state.thc_call_start = mach_absolute_time();
+
+ thread_call_invoke(func, param0, param1, call);
+
+ thc_state.thc_call = NULL;
+
+ if (get_preemption_level() != 0) {
+ int pl = get_preemption_level();
+ panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
+ pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1);
+ }
+
+ s = disable_ints_and_lock(group);
+
+ if (needs_finish) {
+ /* Release refcount, may free, may temporarily drop lock */
+ thread_call_finish(call, group, &s);
+ }
+ }
+
+ thread_sched_call(self, NULL);
+ group->active_count--;
+
+ if (self->callout_woken_from_icontext && !self->callout_woke_thread) {
+ ledger_credit(self->t_ledger, task_ledgers.interrupt_wakeups, 1);
+ if (self->callout_woken_from_platform_idle) {
+ ledger_credit(self->t_ledger, task_ledgers.platform_idle_wakeups, 1);
+ }
+ }
+
+ self->callout_woken_from_icontext = FALSE;
+ self->callout_woken_from_platform_idle = FALSE;
+ self->callout_woke_thread = FALSE;
+
+ self->thc_state = NULL;
+
+ if (group_isparallel(group)) {
+ /*
+ * For new style of thread group, thread always blocks.
+ * If we have more than the target number of threads,
+ * and this is the first to block, and it isn't active
+ * already, set a timer for deallocating a thread if we
+ * continue to have a surplus.
+ */
+ group->idle_count++;
+
+ if (group->idle_count == 1) {
+ group->idle_timestamp = mach_absolute_time();
+ }
+
+ if (((group->tcg_flags & TCG_DEALLOC_ACTIVE) == 0) &&
+ ((group->active_count + group->idle_count) > group->target_thread_count)) {
+ thread_call_start_deallocate_timer(group);
+ }
+
+ /* Wait for more work (or termination) */
+ wres = waitq_assert_wait64(&group->idle_waitq, CAST_EVENT64_T(group), THREAD_INTERRUPTIBLE, 0);
+ if (wres != THREAD_WAITING) {
+ panic("kcall worker unable to assert wait?");
+ }
+
+ enable_ints_and_unlock(group, s);
+
+ thread_block_parameter((thread_continue_t)thread_call_thread, group);
+ } else {
+ if (group->idle_count < group->target_thread_count) {
+ group->idle_count++;
+
+ waitq_assert_wait64(&group->idle_waitq, CAST_EVENT64_T(group), THREAD_UNINT, 0); /* Interrupted means to exit */
+
+ enable_ints_and_unlock(group, s);
+
+ thread_block_parameter((thread_continue_t)thread_call_thread, group);
+ /* NOTREACHED */
+ }
+ }
+
+ enable_ints_and_unlock(group, s);
+
+ thread_terminate(self);
+ /* NOTREACHED */
+}