+static void
+thread_call_delayed_timer_rescan(timer_call_param_t p0, __unused timer_call_param_t p1)
+{
+ thread_call_t call;
+ thread_call_group_t group = p0;
+ uint64_t timestamp;
+ boolean_t istate;
+
+ istate = ml_set_interrupts_enabled(FALSE);
+ thread_call_lock_spin();
+
+ assert(ml_timer_forced_evaluation() == TRUE);
+ timestamp = mach_absolute_time();
+
+ call = TC(queue_first(&group->delayed_queue));
+
+ while (!queue_end(&group->delayed_queue, qe(call))) {
+ if (call->tc_soft_deadline <= timestamp) {
+ _pending_call_enqueue(call, group);
+ call = TC(queue_first(&group->delayed_queue));
+ }
+ else {
+ uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline;
+ assert (call->tc_call.deadline >= call->tc_soft_deadline);
+ /* On a latency quality-of-service level change,
+ * re-sort potentially rate-limited callout. The platform
+ * layer determines which timers require this.
+ */
+ if (timer_resort_threshold(skew)) {
+ _call_dequeue(call, group);
+ _delayed_call_enqueue(call, group, call->tc_soft_deadline);
+ }
+ call = TC(queue_next(qe(call)));
+ }
+ }
+
+ if (!queue_empty(&group->delayed_queue))
+ _set_delayed_call_timer(TC(queue_first(&group->delayed_queue)), group);
+ thread_call_unlock();
+ ml_set_interrupts_enabled(istate);
+}
+
+void
+thread_call_delayed_timer_rescan_all(void) {
+ thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_LOW], NULL);
+ thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_USER], NULL);
+ thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], NULL);
+ thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], NULL);
+}
+