+ if (!simple_lock_try(&call->lock)) {
+ /* case (2b) lock inversion, dequeue and skip */
+ timer_queue_expire_lock_skips++;
+ (void) remque(qe(call));
+ call->async_dequeue = TRUE;
+ continue;
+ }
+
+ timer_call_entry_dequeue(call);
+
+ func = CE(call)->func;
+ param0 = CE(call)->param0;
+ param1 = CE(call)->param1;
+
+ simple_unlock(&call->lock);
+ timer_call_unlock(queue);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ DECR_TIMER_CALLOUT | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
+
+#if CONFIG_DTRACE
+ DTRACE_TMR6(callout__start, timer_call_func_t, func,
+ timer_call_param_t, param0, unsigned, call->flags,
+ 0, (call->ttd >> 32),
+ (unsigned) (call->ttd & 0xFFFFFFFF));
+#endif
+
+ /* Maintain time-to-deadline in per-processor data
+ * structure for thread wakeup deadline statistics.
+ */
+ uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
+ *ttdp = call->ttd;
+ (*func)(param0, param1);
+ *ttdp = 0;
+
+#if CONFIG_DTRACE
+ DTRACE_TMR3(callout__end, timer_call_func_t, func,
+ timer_call_param_t, param0, timer_call_param_t,
+ param1);
+#endif