+ mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue);
+ if (old_queue) {
+ old_queue->count--;
+ (void) remque(qe(entry));
+ entry->async_dequeue = TRUE;
+ }
+ return;
+}
+
+#if TIMER_ASSERT
+unsigned timer_call_enqueue_deadline_unlocked_async1;
+unsigned timer_call_enqueue_deadline_unlocked_async2;
+#endif
+/*
+ * Assumes call_entry and queues unlocked, interrupts disabled.
+ */
+__inline__ mpqueue_head_t *
+timer_call_enqueue_deadline_unlocked(
+ timer_call_t call,
+ mpqueue_head_t *queue,
+ uint64_t deadline,
+ uint64_t soft_deadline,
+ uint64_t ttd,
+ timer_call_param_t param1,
+ uint32_t callout_flags)
+{
+ call_entry_t entry = TCE(call);
+ mpqueue_head_t *old_queue;
+
+ DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);
+
+ simple_lock(&call->lock);
+
+ old_queue = MPQUEUE(entry->queue);
+
+ if (old_queue != NULL) {
+ timer_queue_lock_spin(old_queue);
+ if (call->async_dequeue) {
+ /* collision (1c): timer already dequeued, clear flag */
+#if TIMER_ASSERT
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ call->async_dequeue,
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
+ 0x1c, 0);
+ timer_call_enqueue_deadline_unlocked_async1++;
+#endif
+ call->async_dequeue = FALSE;
+ entry->queue = NULL;
+ } else if (old_queue != queue) {
+ timer_call_entry_dequeue(call);
+#if TIMER_ASSERT
+ timer_call_enqueue_deadline_unlocked_async2++;
+#endif
+ }
+ if (old_queue == timer_longterm_queue)
+ timer_longterm_dequeued_locked(call);
+ if (old_queue != queue) {
+ timer_queue_unlock(old_queue);
+ timer_queue_lock_spin(queue);
+ }
+ } else {
+ timer_queue_lock_spin(queue);
+ }
+
+ call->soft_deadline = soft_deadline;
+ call->flags = callout_flags;
+ TCE(call)->param1 = param1;
+ call->ttd = ttd;
+
+ timer_call_entry_enqueue_deadline(call, queue, deadline);
+ timer_queue_unlock(queue);
+ simple_unlock(&call->lock);
+
+ return (old_queue);
+}
+
+#if TIMER_ASSERT
+unsigned timer_call_dequeue_unlocked_async1;
+unsigned timer_call_dequeue_unlocked_async2;
+#endif
+mpqueue_head_t *
+timer_call_dequeue_unlocked(
+ timer_call_t call)
+{
+ call_entry_t entry = TCE(call);
+ mpqueue_head_t *old_queue;
+
+ DBG("timer_call_dequeue_unlocked(%p)\n", call);
+
+ simple_lock(&call->lock);
+ old_queue = MPQUEUE(entry->queue);
+#if TIMER_ASSERT
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ call->async_dequeue,
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
+ 0, 0);
+#endif
+ if (old_queue != NULL) {
+ timer_queue_lock_spin(old_queue);
+ if (call->async_dequeue) {
+ /* collision (1c): timer already dequeued, clear flag */
+#if TIMER_ASSERT
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ call->async_dequeue,
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
+ 0x1c, 0);
+ timer_call_dequeue_unlocked_async1++;
+#endif
+ call->async_dequeue = FALSE;
+ entry->queue = NULL;
+ } else {
+ timer_call_entry_dequeue(call);
+ }
+ if (old_queue == timer_longterm_queue)
+ timer_longterm_dequeued_locked(call);
+ timer_queue_unlock(old_queue);
+ }
+ simple_unlock(&call->lock);
+ return (old_queue);
+}
+
+static uint64_t
+past_deadline_timer_handle(uint64_t deadline, uint64_t ctime)
+{
+ uint64_t delta = (ctime - deadline);
+
+ past_deadline_timers++;
+ past_deadline_deltas += delta;
+ if (delta > past_deadline_longest)
+ past_deadline_longest = deadline;
+ if (delta < past_deadline_shortest)
+ past_deadline_shortest = delta;
+
+ return (ctime + past_deadline_timer_adjustment);
+}
+
+/*
+ * Timer call entry locking model
+ * ==============================
+ *
+ * Timer call entries are linked on per-cpu timer queues which are protected
+ * by the queue lock and the call entry lock. The locking protocol is:
+ *
+ * 0) The canonical locking order is timer call entry followed by queue.
+ *
+ * 1) With only the entry lock held, entry.queue is valid:
+ * 1a) NULL: the entry is not queued, or
+ * 1b) non-NULL: this queue must be locked before the entry is modified.
+ * After locking the queue, the call.async_dequeue flag must be checked:
+ * 1c) TRUE: the entry was removed from the queue by another thread
+ * and we must NULL the entry.queue and reset this flag, or
+ * 1d) FALSE: (ie. queued), the entry can be manipulated.
+ *
+ * 2) If a queue lock is obtained first, the queue is stable:
+ * 2a) If a try-lock of a queued entry succeeds, the call can be operated on
+ * and dequeued.
+ * 2b) If a try-lock fails, it indicates that another thread is attempting
+ * to change the entry and move it to a different position in this queue
+ * or to different queue. The entry can be dequeued but it should not be
+ * operated upon since it is being changed. Furthermore, we don't null
+ * the entry.queue pointer (protected by the entry lock we don't own).
+ * Instead, we set the async_dequeue flag -- see (1c).
+ * 2c) Same as 2b but occurring when a longterm timer is matured.
+ * 3) A callout's parameters (deadline, flags, parameters, soft deadline &c.)
+ * should be manipulated with the appropriate timer queue lock held,
+ * to prevent queue traversal observations from observing inconsistent
+ * updates to an in-flight callout.
+ */
+
+/*
+ * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline()
+ * cast between pointer types (mpqueue_head_t *) and (queue_t) so that
+ * we can use the call_entry_dequeue() and call_entry_enqueue_deadline()
+ * methods to operate on timer_call structs as if they are call_entry structs.
+ * These structures are identical except for their queue head pointer fields.
+ *
+ * In the debug case, we assert that the timer call locking protocol
+ * is being obeyed.
+ */
+
+static boolean_t
+timer_call_enter_internal(
+ timer_call_t call,
+ timer_call_param_t param1,
+ uint64_t deadline,
+ uint64_t leeway,
+ uint32_t flags,
+ boolean_t ratelimited)
+{
+ mpqueue_head_t *queue = NULL;
+ mpqueue_head_t *old_queue;