+boolean_t
+timer_call_quantum_timer_enter(
+ timer_call_t call,
+ timer_call_param_t param1,
+ uint64_t deadline,
+ uint64_t ctime)
+{
+ assert(call->call_entry.func != NULL);
+ assert(ml_get_interrupts_enabled() == FALSE);
+
+ uint32_t flags = TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL;
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ VM_KERNEL_ADDRHIDE(param1), deadline,
+ flags, 0);
+
+ if (__improbable(deadline < ctime)) {
+ deadline = past_deadline_timer_handle(deadline, ctime);
+ }
+
+ uint64_t ttd = deadline - ctime;
+#if CONFIG_DTRACE
+ DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func,
+ timer_call_param_t, TCE(call)->param0, uint32_t, flags, 0,
+ (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call);
+#endif
+
+ quantum_timer_set_deadline(deadline);
+ TCE(call)->deadline = deadline;
+ TCE(call)->param1 = param1;
+ call->ttd = ttd;
+ call->flags = flags;
+
+#if TIMER_TRACE
+ TCE(call)->entry_time = ctime;
+#endif
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ 1, deadline, 0, 0);
+
+ return true;
+}
+
+
+boolean_t
+timer_call_quantum_timer_cancel(
+ timer_call_t call)
+{
+ assert(ml_get_interrupts_enabled() == FALSE);
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_CANCEL | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE_OR_PERM(call), TCE(call)->deadline,
+ 0, call->flags, 0);
+
+ TCE(call)->deadline = 0;
+ quantum_timer_set_deadline(0);
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_CANCEL | DBG_FUNC_END,
+ VM_KERNEL_UNSLIDE_OR_PERM(call), 0,
+ TCE(call)->deadline - mach_absolute_time(),
+ TCE(call)->deadline - TCE(call)->entry_time, 0);
+
+#if CONFIG_DTRACE
+ DTRACE_TMR6(callout__cancel, timer_call_func_t, TCE(call)->func,
+ timer_call_param_t, TCE(call)->param0, uint32_t, call->flags, 0,
+ (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
+#endif
+
+ return true;
+}
+