+void
+thread_call_delayed_timer_rescan_all(void)
+{
+ for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) {
+ for (thread_call_flavor_t flavor = 0; flavor < TCF_COUNT; flavor++) {
+ thread_call_delayed_timer_rescan(&thread_call_groups[i], flavor);
+ }
+ }
+}
+
+/*
+ * Timer callback to tell a thread to terminate if
+ * we have an excess of threads and at least one has been
+ * idle for a long time.
+ */
+static void
+thread_call_dealloc_timer(
+ timer_call_param_t p0,
+ __unused timer_call_param_t p1)
+{
+ thread_call_group_t group = (thread_call_group_t)p0;
+ uint64_t now;
+ kern_return_t res;
+ bool terminated = false;
+
+ thread_call_lock_spin(group);
+
+ assert(group->tcg_flags & TCG_DEALLOC_ACTIVE);
+
+ now = mach_absolute_time();
+
+ if (group->idle_count > 0) {
+ if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) {
+ terminated = true;
+ group->idle_count--;
+ res = waitq_wakeup64_one(&group->idle_waitq, CAST_EVENT64_T(group),
+ THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES);
+ if (res != KERN_SUCCESS) {
+ panic("Unable to wake up idle thread for termination?");
+ }
+ }
+ }
+
+ group->tcg_flags &= ~TCG_DEALLOC_ACTIVE;
+
+ /*
+ * If we still have an excess of threads, schedule another
+ * invocation of this function.
+ */
+ if (group->idle_count > 0 && (group->idle_count + group->active_count > group->target_thread_count)) {
+ /*
+ * If we killed someone just now, push out the
+ * next deadline.
+ */
+ if (terminated) {
+ group->idle_timestamp = now;
+ }
+
+ thread_call_start_deallocate_timer(group);
+ }
+
+ thread_call_unlock(group);
+}
+
+/*
+ * Wait for the invocation of the thread call to complete
+ * We know there's only one in flight because of the 'once' flag.
+ *
+ * If a subsequent invocation comes in before we wake up, that's OK
+ *
+ * TODO: Here is where we will add priority inheritance to the thread executing
+ * the thread call in case it's lower priority than the current thread
+ * <rdar://problem/30321792> Priority inheritance for thread_call_wait_once
+ *
+ * Takes the thread call lock locked, returns unlocked
+ * This lets us avoid a spurious take/drop after waking up from thread_block
+ *
+ * This thread could be a thread call thread itself, blocking and therefore making a
+ * sched_call upcall into the thread call subsystem, needing the group lock.
+ * However, we're saved from deadlock because the 'block' upcall is made in
+ * thread_block, not in assert_wait.
+ */
+static bool
+thread_call_wait_once_locked(thread_call_t call, spl_t s)
+{
+ assert(call->tc_flags & THREAD_CALL_ALLOC);
+ assert(call->tc_flags & THREAD_CALL_ONCE);
+
+ thread_call_group_t group = thread_call_get_group(call);
+
+ if ((call->tc_flags & THREAD_CALL_RUNNING) == 0) {
+ enable_ints_and_unlock(group, s);
+ return false;
+ }
+
+ /* call is running, so we have to wait for it */
+ call->tc_flags |= THREAD_CALL_WAIT;
+
+ wait_result_t res = waitq_assert_wait64(&group->waiters_waitq, CAST_EVENT64_T(call), THREAD_UNINT, 0);
+ if (res != THREAD_WAITING) {
+ panic("Unable to assert wait: %d", res);
+ }
+
+ enable_ints_and_unlock(group, s);
+
+ res = thread_block(THREAD_CONTINUE_NULL);
+ if (res != THREAD_AWAKENED) {
+ panic("Awoken with %d?", res);
+ }
+
+ /* returns unlocked */
+ return true;
+}
+
+/*
+ * Wait for an in-flight invocation to complete
+ * Does NOT try to cancel, so the client doesn't need to hold their
+ * lock while calling this function.
+ *
+ * Returns whether or not it had to wait.
+ *
+ * Only works for THREAD_CALL_ONCE calls.
+ */
+boolean_t
+thread_call_wait_once(thread_call_t call)
+{
+ if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) {
+ panic("thread_call_wait_once: can't wait on thread call whose storage I don't own");
+ }
+
+ if ((call->tc_flags & THREAD_CALL_ONCE) == 0) {
+ panic("thread_call_wait_once: can't wait_once on a non-once call");
+ }
+
+ if (!ml_get_interrupts_enabled()) {
+ panic("unsafe thread_call_wait_once");
+ }
+
+ thread_t self = current_thread();
+
+ if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) &&
+ self->thc_state && self->thc_state->thc_call == call) {
+ panic("thread_call_wait_once: deadlock waiting on self from inside call: %p to function %p",
+ call, call->tc_func);
+ }
+
+ thread_call_group_t group = thread_call_get_group(call);
+
+ spl_t s = disable_ints_and_lock(group);
+
+ bool waited = thread_call_wait_once_locked(call, s);
+ /* thread call lock unlocked */
+
+ return waited;
+}
+
+
+/*
+ * Wait for all requested invocations of a thread call prior to now
+ * to finish. Can only be invoked on thread calls whose storage we manage.
+ * Just waits for the finish count to catch up to the submit count we find
+ * at the beginning of our wait.
+ *
+ * Called with thread_call_lock held. Returns with lock released.
+ */
+static void
+thread_call_wait_locked(thread_call_t call, spl_t s)
+{
+ thread_call_group_t group = thread_call_get_group(call);
+
+ assert(call->tc_flags & THREAD_CALL_ALLOC);
+
+ uint64_t submit_count = call->tc_submit_count;
+
+ while (call->tc_finish_count < submit_count) {
+ call->tc_flags |= THREAD_CALL_WAIT;
+
+ wait_result_t res = waitq_assert_wait64(&group->waiters_waitq,
+ CAST_EVENT64_T(call), THREAD_UNINT, 0);
+
+ if (res != THREAD_WAITING) {
+ panic("Unable to assert wait: %d", res);
+ }
+
+ enable_ints_and_unlock(group, s);
+
+ res = thread_block(THREAD_CONTINUE_NULL);
+ if (res != THREAD_AWAKENED) {
+ panic("Awoken with %d?", res);
+ }
+
+ s = disable_ints_and_lock(group);
+ }
+
+ enable_ints_and_unlock(group, s);
+}
+
+/*
+ * Determine whether a thread call is either on a queue or
+ * currently being executed.
+ */
+boolean_t
+thread_call_isactive(thread_call_t call)
+{
+ thread_call_group_t group = thread_call_get_group(call);
+
+ spl_t s = disable_ints_and_lock(group);
+ boolean_t active = (call->tc_submit_count > call->tc_finish_count);
+ enable_ints_and_unlock(group, s);
+
+ return active;
+}
+
+/*
+ * adjust_cont_time_thread_calls
+ * on wake, reenqueue delayed call timer for continuous time thread call groups
+ */
+void
+adjust_cont_time_thread_calls(void)
+{
+ for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) {
+ thread_call_group_t group = &thread_call_groups[i];
+ spl_t s = disable_ints_and_lock(group);
+
+ /* only the continuous timers need to be re-armed */
+
+ _arm_delayed_call_timer(NULL, group, TCF_CONTINUOUS);
+ enable_ints_and_unlock(group, s);
+ }