+}
+
+uint32_t timer_queue_expire_lock_skips;
+uint64_t
+timer_queue_expire_with_options(
+ mpqueue_head_t *queue,
+ uint64_t deadline,
+ boolean_t rescan)
+{
+ timer_call_t call = NULL;
+ uint32_t tc_iterations = 0;
+ DBG("timer_queue_expire(%p,)\n", queue);
+
+ uint64_t cur_deadline = deadline;
+ timer_queue_lock_spin(queue);
+
+ while (!queue_empty(&queue->head)) {
+ /* Upon processing one or more timer calls, refresh the
+ * deadline to account for time elapsed in the callout
+ */
+ if (++tc_iterations > 1)
+ cur_deadline = mach_absolute_time();
+
+ if (call == NULL)
+ call = TIMER_CALL(queue_first(&queue->head));
+
+ if (call->soft_deadline <= cur_deadline) {
+ timer_call_func_t func;
+ timer_call_param_t param0, param1;
+
+ TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0);
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_EXPIRE | DBG_FUNC_NONE,
+ call,
+ call->soft_deadline,
+ CE(call)->deadline,
+ CE(call)->entry_time, 0);
+
+ /* Bit 0 of the "soft" deadline indicates that
+ * this particular timer call is rate-limited
+ * and hence shouldn't be processed before its
+ * hard deadline.
+ */
+ if ((call->soft_deadline & 0x1) &&
+ (CE(call)->deadline > cur_deadline)) {
+ if (rescan == FALSE)
+ break;
+ }
+
+ if (!simple_lock_try(&call->lock)) {
+ /* case (2b) lock inversion, dequeue and skip */
+ timer_queue_expire_lock_skips++;
+ timer_call_entry_dequeue_async(call);
+ call = NULL;
+ continue;
+ }
+
+ timer_call_entry_dequeue(call);
+
+ func = CE(call)->func;
+ param0 = CE(call)->param0;
+ param1 = CE(call)->param1;
+
+ simple_unlock(&call->lock);
+ timer_queue_unlock(queue);
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_CALLOUT | DBG_FUNC_START,
+ call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
+
+#if CONFIG_DTRACE
+ DTRACE_TMR7(callout__start, timer_call_func_t, func,
+ timer_call_param_t, param0, unsigned, call->flags,
+ 0, (call->ttd >> 32),
+ (unsigned) (call->ttd & 0xFFFFFFFF), call);
+#endif
+ /* Maintain time-to-deadline in per-processor data
+ * structure for thread wakeup deadline statistics.
+ */
+ uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
+ *ttdp = call->ttd;
+ (*func)(param0, param1);
+ *ttdp = 0;
+#if CONFIG_DTRACE
+ DTRACE_TMR4(callout__end, timer_call_func_t, func,
+ param0, param1, call);
+#endif
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_CALLOUT | DBG_FUNC_END,
+ call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
+ call = NULL;
+ timer_queue_lock_spin(queue);
+ } else {
+ if (__probable(rescan == FALSE)) {
+ break;
+ } else {
+ int64_t skew = CE(call)->deadline - call->soft_deadline;
+ assert(CE(call)->deadline >= call->soft_deadline);
+
+ /* DRK: On a latency quality-of-service level change,
+ * re-sort potentially rate-limited timers. The platform
+ * layer determines which timers require
+ * this. In the absence of the per-callout
+ * synchronization requirement, a global resort could
+ * be more efficient. The re-sort effectively
+ * annuls all timer adjustments, i.e. the "soft
+ * deadline" is the sort key.
+ */
+
+ if (timer_resort_threshold(skew)) {
+ if (__probable(simple_lock_try(&call->lock))) {
+ timer_call_entry_dequeue(call);
+ timer_call_entry_enqueue_deadline(call, queue, call->soft_deadline);
+ simple_unlock(&call->lock);
+ call = NULL;
+ }
+ }
+ if (call) {
+ call = TIMER_CALL(queue_next(qe(call)));
+ if (queue_end(&queue->head, qe(call)))
+ break;
+ }
+ }
+ }
+ }
+
+ if (!queue_empty(&queue->head)) {
+ call = TIMER_CALL(queue_first(&queue->head));
+ cur_deadline = CE(call)->deadline;
+ queue->earliest_soft_deadline = call->soft_deadline;
+ } else {
+ queue->earliest_soft_deadline = cur_deadline = UINT64_MAX;
+ }
+
+ timer_queue_unlock(queue);