]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/timer_call.c
xnu-1699.24.23.tar.gz
[apple/xnu.git] / osfmk / kern / timer_call.c
index e1e4bb649f3d43cb0f8afdc576e75498d278ee5e..83eb0e43ac25286868ce3c3eef04c2a7e8b79b2e 100644 (file)
 /*
- * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc.
- * All rights reserved.
+ * Copyright (c) 1993-2008 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * Timer interrupt callout module.
- *
- * HISTORY
- *
- * 20 December 2000 (debo)
- *     Created.
  */
 
 #include <mach/mach_types.h>
 
 #include <kern/clock.h>
-
+#include <kern/processor.h>
+#include <kern/etimer.h>
 #include <kern/timer_call.h>
+#include <kern/timer_queue.h>
 #include <kern/call_entry.h>
 
-decl_simple_lock_data(static,timer_call_lock)
+#include <sys/kdebug.h>
 
-static
-queue_head_t
-       delayed_call_queues[NCPUS];
+#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
+#include <mach/sdt.h>
+#endif
 
-static struct {
-       int             pending_num,
-                       pending_hiwat;
-       int             delayed_num,
-                       delayed_hiwat;
-} timer_calls;
 
-static boolean_t
-       timer_call_initialized = FALSE;
+#if DEBUG
+#define TIMER_ASSERT   1
+#endif
 
-static void
-timer_call_interrupt(
-       AbsoluteTime            timestamp);
+//#define TIMER_ASSERT 1
+//#define TIMER_DBG    1
 
-#define qe(x)          ((queue_entry_t)(x))
-#define TC(x)          ((timer_call_t)(x))
+#if TIMER_DBG
+#define DBG(x...) kprintf("DBG: " x);
+#else
+#define DBG(x...)
+#endif
 
-void
-timer_call_initialize(void)
-{
-       spl_t                           s;
-       int                                     i;
+lck_grp_t               timer_call_lck_grp;
+lck_attr_t              timer_call_lck_attr;
+lck_grp_attr_t          timer_call_lck_grp_attr;
 
-       if (timer_call_initialized)
-               panic("timer_call_initialize");
 
-       simple_lock_init(&timer_call_lock, ETAP_MISC_TIMER);
+#define timer_call_lock_spin(queue)            \
+       lck_mtx_lock_spin_always(&queue->lock_data)
 
-       s = splclock();
-       simple_lock(&timer_call_lock);
+#define timer_call_unlock(queue)               \
+       lck_mtx_unlock_always(&queue->lock_data)
 
-       for (i = 0; i < NCPUS; i++)
-               queue_init(&delayed_call_queues[i]);
 
-       clock_set_timer_func((clock_timer_func_t)timer_call_interrupt);
+#define QUEUE(x)       ((queue_t)(x))
+#define MPQUEUE(x)     ((mpqueue_head_t *)(x))
+#define TIMER_CALL(x)  ((timer_call_t)(x))
 
-       timer_call_initialized = TRUE;
+static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags);
+boolean_t      mach_timer_coalescing_enabled = TRUE;
+
+mpqueue_head_t *timer_call_enqueue_deadline_unlocked(
+                       timer_call_t            call,
+                       mpqueue_head_t          *queue,
+                       uint64_t                deadline);
+
+mpqueue_head_t *timer_call_dequeue_unlocked(
+                       timer_call_t            call);
 
-       simple_unlock(&timer_call_lock);
-       splx(s);
-}
 
 void
-timer_call_setup(
-       timer_call_t                    call,
-       timer_call_func_t               func,
-       timer_call_param_t              param0)
+timer_call_initialize(void)
 {
-       call_entry_setup(call, func, param0);
+       lck_attr_setdefault(&timer_call_lck_attr);
+       lck_grp_attr_setdefault(&timer_call_lck_grp_attr);
+       lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr);
 }
 
-static __inline__
+
 void
-_delayed_call_enqueue(
-       queue_t                                 queue,
-       timer_call_t                    call)
+timer_call_initialize_queue(mpqueue_head_t *queue)
 {
-       timer_call_t    current;
-
-       current = TC(queue_first(queue));
+       DBG("timer_call_initialize_queue(%p)\n", queue);
+       mpqueue_init(queue, &timer_call_lck_grp, &timer_call_lck_attr);
+}
 
-       while (TRUE) {
-               if (    queue_end(queue, qe(current))                                           ||
-                                       CMP_ABSOLUTETIME(&call->deadline,
-                                                                                       &current->deadline) < 0         ) {
-                       current = TC(queue_prev(qe(current)));
-                       break;
-               }
 
-               current = TC(queue_next(qe(current)));
-       }
+void
+timer_call_setup(
+       timer_call_t                    call,
+       timer_call_func_t               func,
+       timer_call_param_t              param0)
+{
+       DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0);
+       call_entry_setup(CE(call), func, param0);
+       simple_lock_init(&(call)->lock, 0);
+       call->async_dequeue = FALSE;
+}
 
-       insque(qe(call), qe(current));
-       if (++timer_calls.delayed_num > timer_calls.delayed_hiwat)
-               timer_calls.delayed_hiwat = timer_calls.delayed_num;
+/*
+ * Timer call entry locking model
+ * ==============================
+ *
+ * Timer call entries are linked on per-cpu timer queues which are protected
+ * by the queue lock and the call entry lock. The locking protocol is:
+ *
+ *  0) The canonical locking order is timer call entry followed by queue.
+ *
+ *  1) With only the entry lock held, entry.queue is valid:
+ *    1a) NULL: the entry is not queued, or
+ *    1b) non-NULL: this queue must be locked before the entry is modified.
+ *        After locking the queue, the call.async_dequeue flag must be checked:
+ *    1c) TRUE: the entry was removed from the queue by another thread
+ *             and we must NULL the entry.queue and reset this flag, or
+ *    1d) FALSE: (ie. queued), the entry can be manipulated.
+ *
+ *  2) If a queue lock is obtained first, the queue is stable:
+ *    2a) If a try-lock of a queued entry succeeds, the call can be operated on
+ *       and dequeued.
+ *    2b) If a try-lock fails, it indicates that another thread is attempting
+ *        to change the entry and move it to a different position in this queue
+ *        or to different queue. The entry can be dequeued but it should not be
+ *        operated upon since it is being changed. Furthermore, we don't null
+ *       the entry.queue pointer (protected by the entry lock we don't own).
+ *       Instead, we set the async_dequeue flag -- see (1c).
+ */
 
-       call->state = DELAYED;
+/*
+ * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline()
+ * cast between pointer types (mpqueue_head_t *) and (queue_t) so that
+ * we can use the call_entry_dequeue() and call_entry_enqueue_deadline()
+ * methods to operate on timer_call structs as if they are call_entry structs.
+ * These structures are identical except for their queue head pointer fields.
+ *
+ * In the debug case, we assert that the timer call locking protocol 
+ * is being obeyed.
+ */
+#if TIMER_ASSERT
+static __inline__ mpqueue_head_t *
+timer_call_entry_dequeue(
+       timer_call_t            entry)
+{
+        mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue);
+
+       if (!hw_lock_held((hw_lock_t)&entry->lock))
+               panic("_call_entry_dequeue() "
+                       "entry %p is not locked\n", entry);
+       /*
+        * XXX The queue lock is actually a mutex in spin mode
+        *     but there's no way to test for it being held
+        *     so we pretend it's a spinlock!
+        */
+       if (!hw_lock_held((hw_lock_t)&old_queue->lock_data))
+               panic("_call_entry_dequeue() "
+                       "queue %p is not locked\n", old_queue);
+
+       call_entry_dequeue(CE(entry));
+
+       return (old_queue);
 }
 
-static __inline__
-void
-_delayed_call_dequeue(
-       timer_call_t                    call)
+static __inline__ mpqueue_head_t *
+timer_call_entry_enqueue_deadline(
+       timer_call_t            entry,
+       mpqueue_head_t          *queue,
+       uint64_t                deadline)
 {
-       (void)remque(qe(call));
-       timer_calls.delayed_num--;
-
-       call->state = IDLE;
+       mpqueue_head_t  *old_queue = MPQUEUE(CE(entry)->queue);
+
+       if (!hw_lock_held((hw_lock_t)&entry->lock))
+               panic("_call_entry_enqueue_deadline() "
+                       "entry %p is not locked\n", entry);
+       /* XXX More lock pretense:  */
+       if (!hw_lock_held((hw_lock_t)&queue->lock_data))
+               panic("_call_entry_enqueue_deadline() "
+                       "queue %p is not locked\n", queue);
+       if (old_queue != NULL && old_queue != queue)
+               panic("_call_entry_enqueue_deadline() "
+                       "old_queue %p != queue", old_queue);
+
+       call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline);
+
+       return (old_queue);
 }
 
-static __inline__
-void
-_pending_call_enqueue(
-       queue_t                                 queue,
-       timer_call_t                    call)
+#else
+
+static __inline__ mpqueue_head_t *
+timer_call_entry_dequeue(
+       timer_call_t            entry)
 {
-       enqueue_tail(queue, qe(call));
-       if (++timer_calls.pending_num > timer_calls.pending_hiwat)
-               timer_calls.pending_hiwat = timer_calls.pending_num;
+       return MPQUEUE(call_entry_dequeue(CE(entry)));
+}
 
-       call->state = PENDING;
+static __inline__ mpqueue_head_t *
+timer_call_entry_enqueue_deadline(
+       timer_call_t                    entry,
+       mpqueue_head_t                  *queue,
+       uint64_t                        deadline)
+{
+       return MPQUEUE(call_entry_enqueue_deadline(CE(entry),
+                                                  QUEUE(queue), deadline));
 }
 
-static __inline__
-void
-_pending_call_dequeue(
-       timer_call_t                    call)
+#endif
+
+#if TIMER_ASSERT
+unsigned timer_call_enqueue_deadline_unlocked_async1;
+unsigned timer_call_enqueue_deadline_unlocked_async2;
+#endif
+/*
+ * Assumes call_entry and queues unlocked, interrupts disabled.
+ */
+__inline__ mpqueue_head_t *
+timer_call_enqueue_deadline_unlocked(
+       timer_call_t                    call,
+       mpqueue_head_t                  *queue,
+       uint64_t                        deadline)
 {
-       (void)remque(qe(call));
-       timer_calls.pending_num--;
+       call_entry_t    entry = CE(call);
+       mpqueue_head_t  *old_queue;
+
+       DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);
+
+       simple_lock(&call->lock);
+       old_queue = MPQUEUE(entry->queue);
+       if (old_queue != NULL) {
+               timer_call_lock_spin(old_queue);
+               if (call->async_dequeue) {
+                       /* collision (1c): null queue pointer and reset flag */
+                       call->async_dequeue = FALSE;
+                       entry->queue = NULL;
+#if TIMER_ASSERT
+                       timer_call_enqueue_deadline_unlocked_async1++;
+#endif
+               } else if (old_queue != queue) {
+                       (void)remque(qe(entry));
+                       entry->queue = NULL;
+#if TIMER_ASSERT
+                       timer_call_enqueue_deadline_unlocked_async2++;
+#endif
+               }
+               if (old_queue != queue) {
+                       timer_call_unlock(old_queue);
+                       timer_call_lock_spin(queue);
+               }
+       } else {
+               timer_call_lock_spin(queue);
+       }
 
-       call->state = IDLE;
+       timer_call_entry_enqueue_deadline(call, queue, deadline);
+       timer_call_unlock(queue);
+       simple_unlock(&call->lock);
+
+       return (old_queue);
 }
 
-static __inline__
-void
-_set_delayed_call_timer(
-       timer_call_t                    call)
+#if TIMER_ASSERT
+unsigned timer_call_dequeue_unlocked_async1;
+unsigned timer_call_dequeue_unlocked_async2;
+#endif
+mpqueue_head_t *
+timer_call_dequeue_unlocked(
+       timer_call_t            call)
 {
-       clock_set_timer_deadline(call->deadline);
+       call_entry_t    entry = CE(call);
+       mpqueue_head_t  *old_queue;
+
+       DBG("timer_call_dequeue_unlocked(%p)\n", call);
+
+       simple_lock(&call->lock);
+       old_queue = MPQUEUE(entry->queue);
+       if (old_queue != NULL) {
+               timer_call_lock_spin(old_queue);
+               if (call->async_dequeue) {
+                       /* collision (1c): null queue pointer and reset flag */
+                       call->async_dequeue = FALSE;
+#if TIMER_ASSERT
+                       timer_call_dequeue_unlocked_async1++;
+#endif
+               } else {
+                       (void)remque(qe(entry));
+#if TIMER_ASSERT
+                       timer_call_dequeue_unlocked_async2++;
+#endif
+               }
+               entry->queue = NULL;
+               timer_call_unlock(old_queue);
+       }
+       simple_unlock(&call->lock);
+       return (old_queue);
 }
 
-boolean_t
-timer_call_enter(
-       timer_call_t                    call,
-       AbsoluteTime                    deadline)
+static boolean_t 
+timer_call_enter_internal(
+       timer_call_t            call,
+       timer_call_param_t      param1,
+       uint64_t                deadline,
+       uint32_t                flags)
 {
-       boolean_t               result = TRUE;
-       queue_t                 delayed;
+       mpqueue_head_t          *queue;
+       mpqueue_head_t          *old_queue;
        spl_t                   s;
+       uint64_t                slop = 0;
 
        s = splclock();
-       simple_lock(&timer_call_lock);
 
-       if (call->state == PENDING)
-               _pending_call_dequeue(call);
-       else if (call->state == DELAYED)
-               _delayed_call_dequeue(call);
-       else if (call->state == IDLE)
-               result = FALSE;
+       call->soft_deadline = deadline;
+       call->flags = flags;
 
-       call->param1    = 0;
-       call->deadline  = deadline;
+       if ((flags & TIMER_CALL_CRITICAL) == 0 &&
+            mach_timer_coalescing_enabled) {
+               slop = timer_call_slop(deadline);
+               deadline += slop;
+       }
 
-       delayed = &delayed_call_queues[cpu_number()];
+       queue = timer_queue_assign(deadline);
 
-       _delayed_call_enqueue(delayed, call);
+       old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline);
 
-       if (queue_first(delayed) == qe(call))
-               _set_delayed_call_timer(call);
+       CE(call)->param1 = param1;
 
-       simple_unlock(&timer_call_lock);
        splx(s);
 
-       return (result);
+       return (old_queue != NULL);
 }
 
 boolean_t
-timer_call_enter1(
-       timer_call_t                    call,
-       timer_call_param_t              param1,
-       AbsoluteTime                    deadline)
+timer_call_enter(
+       timer_call_t            call,
+       uint64_t                deadline,
+       uint32_t                flags)
 {
-       boolean_t               result = TRUE;
-       queue_t                 delayed;
-       spl_t                   s;
-
-       s = splclock();
-       simple_lock(&timer_call_lock);
-
-       if (call->state == PENDING)
-               _pending_call_dequeue(call);
-       else if (call->state == DELAYED)
-               _delayed_call_dequeue(call);
-       else if (call->state == IDLE)
-               result = FALSE;
-
-       call->param1    = param1;
-       call->deadline  = deadline;
-
-       delayed = &delayed_call_queues[cpu_number()];
-
-       _delayed_call_enqueue(delayed, call);
-
-       if (queue_first(delayed) == qe(call))
-               _set_delayed_call_timer(call);
-
-       simple_unlock(&timer_call_lock);
-       splx(s);
+       return timer_call_enter_internal(call, NULL, deadline, flags);
+}
 
-       return (result);
+boolean_t
+timer_call_enter1(
+       timer_call_t            call,
+       timer_call_param_t      param1,
+       uint64_t                deadline,
+       uint32_t                flags)
+{
+       return timer_call_enter_internal(call, param1, deadline, flags);
 }
 
 boolean_t
 timer_call_cancel(
-       timer_call_t                    call)
+       timer_call_t            call)
 {
-       boolean_t               result = TRUE;
+       mpqueue_head_t          *old_queue;
        spl_t                   s;
 
        s = splclock();
-       simple_lock(&timer_call_lock);
 
-       if (call->state == PENDING)
-               _pending_call_dequeue(call);
-       else if (call->state == DELAYED)
-               _delayed_call_dequeue(call);
-       else
-               result = FALSE;
+       old_queue = timer_call_dequeue_unlocked(call);
 
-       simple_unlock(&timer_call_lock);
+       if (old_queue != NULL) {
+               timer_call_lock_spin(old_queue);
+               if (!queue_empty(&old_queue->head))
+                       timer_queue_cancel(old_queue, CE(call)->deadline, CE(queue_first(&old_queue->head))->deadline);
+               else
+                       timer_queue_cancel(old_queue, CE(call)->deadline, UINT64_MAX);
+               timer_call_unlock(old_queue);
+       }
        splx(s);
 
-       return (result);
+       return (old_queue != NULL);
 }
 
-boolean_t
-timer_call_is_delayed(
-       timer_call_t                    call,
-       AbsoluteTime                    *deadline)
+uint32_t       timer_queue_shutdown_lock_skips;
+void
+timer_queue_shutdown(
+       mpqueue_head_t          *queue)
 {
-       boolean_t               result = FALSE;
+       timer_call_t            call;
+       mpqueue_head_t          *new_queue;
        spl_t                   s;
 
+       DBG("timer_queue_shutdown(%p)\n", queue);
+
        s = splclock();
-       simple_lock(&timer_call_lock);
 
-       if (call->state == DELAYED) {
-               if (deadline != NULL)
-                       *deadline = call->deadline;
-               result = TRUE;
+       /* Note comma operator in while expression re-locking each iteration */
+       while (timer_call_lock_spin(queue), !queue_empty(&queue->head)) {
+               call = TIMER_CALL(queue_first(&queue->head));
+               if (!simple_lock_try(&call->lock)) {
+                       /*
+                        * case (2b) lock order inversion, dequeue and skip
+                        * Don't change the call_entry queue back-pointer
+                        * but set the async_dequeue field.
+                        */
+                       timer_queue_shutdown_lock_skips++;
+                       (void) remque(qe(call));
+                       call->async_dequeue = TRUE;
+                       timer_call_unlock(queue);
+                       continue;
+               }
+
+               /* remove entry from old queue */
+               timer_call_entry_dequeue(call);
+               timer_call_unlock(queue);
+
+               /* and queue it on new */
+               new_queue = timer_queue_assign(CE(call)->deadline);
+               timer_call_lock_spin(new_queue);
+               timer_call_entry_enqueue_deadline(
+                       call, new_queue, CE(call)->deadline);
+               timer_call_unlock(new_queue);
+
+               simple_unlock(&call->lock);
        }
 
-       simple_unlock(&timer_call_lock);
+       timer_call_unlock(queue);
        splx(s);
-
-       return (result);
 }
 
-static
-void
-timer_call_interrupt(
-       AbsoluteTime                    timestamp)
+uint32_t       timer_queue_expire_lock_skips;
+uint64_t
+timer_queue_expire(
+       mpqueue_head_t          *queue,
+       uint64_t                deadline)
 {
-       timer_call_t            call;
-       queue_t                         delayed = &delayed_call_queues[cpu_number()];
+       timer_call_t    call;
+
+       DBG("timer_queue_expire(%p,)\n", queue);
 
-       simple_lock(&timer_call_lock);
+       timer_call_lock_spin(queue);
 
-       call = TC(queue_first(delayed));
+       while (!queue_empty(&queue->head)) {
+               call = TIMER_CALL(queue_first(&queue->head));
 
-       while (!queue_end(delayed, qe(call))) {
-               if (CMP_ABSOLUTETIME(&call->deadline, &timestamp) <= 0) {
+               if (call->soft_deadline <= deadline) {
                        timer_call_func_t               func;
                        timer_call_param_t              param0, param1;
 
-                       _delayed_call_dequeue(call);
+                       if (!simple_lock_try(&call->lock)) {
+                               /* case (2b) lock inversion, dequeue and skip */
+                               timer_queue_expire_lock_skips++;
+                               (void) remque(qe(call));
+                               call->async_dequeue = TRUE;
+                               continue;
+                       }
 
-                       func = call->func;
-                       param0 = call->param0;
-                       param1 = call->param1;
+                       timer_call_entry_dequeue(call);
 
-                       simple_unlock(&timer_call_lock);
+                       func = CE(call)->func;
+                       param0 = CE(call)->param0;
+                       param1 = CE(call)->param1;
+
+                       simple_unlock(&call->lock);
+                       timer_call_unlock(queue);
+
+                       KERNEL_DEBUG_CONSTANT(DECR_TIMER_CALLOUT | DBG_FUNC_START,
+                                             func,
+                                             param0,
+                                             param1, 0, 0);
+
+#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
+                       DTRACE_TMR3(callout__start, timer_call_func_t, func, 
+                                                                               timer_call_param_t, param0, 
+                                                                               timer_call_param_t, param1);
+#endif
 
                        (*func)(param0, param1);
 
-                       simple_lock(&timer_call_lock);
+#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
+                       DTRACE_TMR3(callout__end, timer_call_func_t, func, 
+                                                                               timer_call_param_t, param0, 
+                                                                               timer_call_param_t, param1);
+#endif
+
+                       KERNEL_DEBUG_CONSTANT(DECR_TIMER_CALLOUT | DBG_FUNC_END,
+                                             func,
+                                             param0,
+                                             param1, 0, 0);
+
+                       timer_call_lock_spin(queue);
                }
                else
                        break;
+       }
+
+       if (!queue_empty(&queue->head))
+               deadline = CE(call)->deadline;
+       else
+               deadline = UINT64_MAX;
+
+       timer_call_unlock(queue);
+
+       return (deadline);
+}
 
-               call = TC(queue_first(delayed));
+
+extern int serverperfmode;
+uint32_t       timer_queue_migrate_lock_skips;
+/*
+ * timer_queue_migrate() is called by etimer_queue_migrate()
+ * to move timer requests from the local processor (queue_from)
+ * to a target processor's (queue_to).
+ */
+int
+timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to)
+{
+       timer_call_t    call;
+       timer_call_t    head_to;
+       int             timers_migrated = 0;
+
+       DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to);
+
+       assert(!ml_get_interrupts_enabled());
+       assert(queue_from != queue_to);
+
+       if (serverperfmode) {
+               /*
+                * if we're running a high end server
+                * avoid migrations... they add latency
+                * and don't save us power under typical
+                * server workloads
+                */
+               return -4;
+       }
+
+       /*
+        * Take both local (from) and target (to) timer queue locks while
+        * moving the timers from the local queue to the target processor.
+        * We assume that the target is always the boot processor.
+        * But only move if all of the following is true:
+        *  - the target queue is non-empty
+        *  - the local queue is non-empty
+        *  - the local queue's first deadline is later than the target's
+        *  - the local queue contains no non-migrateable "local" call
+        * so that we need not have the target resync.
+        */
+
+        timer_call_lock_spin(queue_to);
+
+       head_to = TIMER_CALL(queue_first(&queue_to->head));
+       if (queue_empty(&queue_to->head)) {
+               timers_migrated = -1;
+               goto abort1;
+       }
+
+        timer_call_lock_spin(queue_from);
+
+       if (queue_empty(&queue_from->head)) {
+               timers_migrated = -2;
+               goto abort2;
+       }
+
+       call = TIMER_CALL(queue_first(&queue_from->head));
+       if (CE(call)->deadline < CE(head_to)->deadline) {
+               timers_migrated = 0;
+               goto abort2;
+       }
+
+       /* perform scan for non-migratable timers */
+       do {
+               if (call->flags & TIMER_CALL_LOCAL) {
+                       timers_migrated = -3;
+                       goto abort2;
+               }
+               call = TIMER_CALL(queue_next(qe(call)));
+       } while (!queue_end(&queue_from->head, qe(call)));
+
+       /* migration loop itself -- both queues are locked */
+       while (!queue_empty(&queue_from->head)) {
+               call = TIMER_CALL(queue_first(&queue_from->head));
+               if (!simple_lock_try(&call->lock)) {
+                       /* case (2b) lock order inversion, dequeue only */
+                       timer_queue_migrate_lock_skips++;
+                       (void) remque(qe(call));
+                       call->async_dequeue = TRUE;
+                       continue;
+               }
+               timer_call_entry_dequeue(call);
+               timer_call_entry_enqueue_deadline(
+                       call, queue_to, CE(call)->deadline);
+               timers_migrated++;
+               simple_unlock(&call->lock);
        }
 
-       if (!queue_end(delayed, qe(call)))
-               _set_delayed_call_timer(call);
+abort2:
+               timer_call_unlock(queue_from);
+abort1:
+               timer_call_unlock(queue_to);
 
-       simple_unlock(&timer_call_lock);
+       return timers_migrated;
 }