#include <sys/kdebug.h>
-#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
+#if CONFIG_DTRACE
#include <mach/sdt.h>
#endif
#define MPQUEUE(x) ((mpqueue_head_t *)(x))
#define TIMER_CALL(x) ((timer_call_t)(x))
+
+uint64_t past_deadline_timers;
+uint64_t past_deadline_deltas;
+uint64_t past_deadline_longest;
+uint64_t past_deadline_shortest = ~0ULL;
+enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS = 10 * 1000};
+
+uint64_t past_deadline_timer_adjustment;
+
static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags);
boolean_t mach_timer_coalescing_enabled = TRUE;
lck_attr_setdefault(&timer_call_lck_attr);
lck_grp_attr_setdefault(&timer_call_lck_grp_attr);
lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr);
+ nanotime_to_absolutetime(0, PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment);
}
deadline += slop;
}
+#if defined(__i386__) || defined(__x86_64__)
+ uint64_t ctime = mach_absolute_time();
+ if (__improbable(deadline < ctime)) {
+ uint64_t delta = (ctime - deadline);
+
+ past_deadline_timers++;
+ past_deadline_deltas += delta;
+ if (delta > past_deadline_longest)
+ past_deadline_longest = deadline;
+ if (delta < past_deadline_shortest)
+ past_deadline_shortest = delta;
+
+ deadline = ctime + past_deadline_timer_adjustment;
+ call->soft_deadline = deadline;
+ }
+#endif
+ call->ttd = call->soft_deadline - ctime;
+
+#if CONFIG_DTRACE
+ DTRACE_TMR6(callout__create, timer_call_func_t, CE(call)->func,
+ timer_call_param_t, CE(call)->param0, uint32_t, call->flags,
+ (deadline - call->soft_deadline),
+ (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
+#endif
+
queue = timer_queue_assign(deadline);
old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline);
}
splx(s);
+#if CONFIG_DTRACE
+ DTRACE_TMR6(callout__cancel, timer_call_func_t, CE(call)->func,
+ timer_call_param_t, CE(call)->param0, uint32_t, call->flags, 0,
+ (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
+#endif
+
return (old_queue != NULL);
}
simple_unlock(&call->lock);
timer_call_unlock(queue);
- KERNEL_DEBUG_CONSTANT(DECR_TIMER_CALLOUT | DBG_FUNC_START,
- func,
- param0,
- param1, 0, 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ DECR_TIMER_CALLOUT | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
-#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
- DTRACE_TMR3(callout__start, timer_call_func_t, func,
- timer_call_param_t, param0,
- timer_call_param_t, param1);
+#if CONFIG_DTRACE
+ DTRACE_TMR6(callout__start, timer_call_func_t, func,
+ timer_call_param_t, param0, unsigned, call->flags,
+ 0, (call->ttd >> 32),
+ (unsigned) (call->ttd & 0xFFFFFFFF));
#endif
+ /* Maintain time-to-deadline in per-processor data
+ * structure for thread wakeup deadline statistics.
+ */
+ uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
+ *ttdp = call->ttd;
(*func)(param0, param1);
+ *ttdp = 0;
-#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
- DTRACE_TMR3(callout__end, timer_call_func_t, func,
- timer_call_param_t, param0,
- timer_call_param_t, param1);
+#if CONFIG_DTRACE
+ DTRACE_TMR3(callout__end, timer_call_func_t, func,
+ timer_call_param_t, param0, timer_call_param_t,
+ param1);
#endif
- KERNEL_DEBUG_CONSTANT(DECR_TIMER_CALLOUT | DBG_FUNC_END,
- func,
- param0,
- param1, 0, 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ DECR_TIMER_CALLOUT | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
timer_call_lock_spin(queue);
}