]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/thread_call.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
index b69dcdd30efcae91807a5df3e261aeefecf72495..d10c7b4bbffe6678e89b4efd851f9ab5d415e66c 100644 (file)
@@ -1,49 +1,51 @@
 /*
- * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc.
- * All rights reserved.
+ * Copyright (c) 1993-1995, 1999-2007 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- * Thread-based callout module.
- *
- * HISTORY
- *
- * 10 July 1999 (debo)
- *  Pulled into Mac OS X (microkernel).
- *
- * 3 July 1993 (debo)
- *     Created.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
  
 #include <mach/mach_types.h>
+#include <mach/thread_act.h>
 
+#include <kern/kern_types.h>
+#include <kern/kalloc.h>
 #include <kern/sched_prim.h>
 #include <kern/clock.h>
 #include <kern/task.h>
 #include <kern/thread.h>
+#include <kern/wait_queue.h>
+
+#include <vm/vm_pageout.h>
 
 #include <kern/thread_call.h>
 #include <kern/call_entry.h>
 
 #include <kern/timer_call.h>
 
+#include <sys/kdebug.h>
+
 #define internal_call_num      768
 
 #define thread_call_thread_min 4
@@ -56,20 +58,16 @@ decl_simple_lock_data(static,thread_call_lock)
 
 static
 timer_call_data_t
-       thread_call_delayed_timer;
+       thread_call_delaytimer;
 
 static
 queue_head_t
-       internal_call_free_queue,
-       pending_call_queue, delayed_call_queue;
+       thread_call_xxx_queue,
+       thread_call_pending_queue, thread_call_delayed_queue;
 
 static
 struct wait_queue
-       call_thread_idle_queue;
-
-static
-thread_t
-       activate_thread;
+       call_thread_waitqueue;
 
 static
 boolean_t
@@ -87,10 +85,7 @@ static struct {
        int             thread_num,
                        thread_hiwat,
                        thread_lowat;
-} thread_calls;
-
-static boolean_t
-       thread_call_initialized = FALSE;
+} thread_call_vars;
 
 static __inline__ thread_call_t
        _internal_call_allocate(void);
@@ -114,7 +109,7 @@ _delayed_call_dequeue(
        thread_call_t           call
 );
 
-static void __inline__
+static __inline__ void
 _set_delayed_call_timer(
        thread_call_t           call
 );
@@ -131,7 +126,7 @@ _remove_from_delayed_queue(
        boolean_t                       remove_all
 );
 
-static __inline__ void
+static inline void
        _call_thread_wake(void);
 
 static void
@@ -161,43 +156,43 @@ _delayed_call_timer(
 void
 thread_call_initialize(void)
 {
-    thread_call_t              call;
-       spl_t                           s;
+       kern_return_t   result;
+       thread_t                thread;
+    thread_call_t      call;
+       spl_t                   s;
 
-    if (thread_call_initialized)
-       panic("thread_call_initialize");
-
-    simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER);
+    simple_lock_init(&thread_call_lock, 0);
 
        s = splsched();
        simple_lock(&thread_call_lock);
 
-    queue_init(&pending_call_queue);
-    queue_init(&delayed_call_queue);
+    queue_init(&thread_call_pending_queue);
+    queue_init(&thread_call_delayed_queue);
 
-    queue_init(&internal_call_free_queue);
+    queue_init(&thread_call_xxx_queue);
     for (
                call = internal_call_storage;
                        call < &internal_call_storage[internal_call_num];
                        call++) {
 
-               enqueue_tail(&internal_call_free_queue, qe(call));
+               enqueue_tail(&thread_call_xxx_queue, qe(call));
     }
 
-       timer_call_setup(&thread_call_delayed_timer, _delayed_call_timer, NULL);
+       timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
 
-       wait_queue_init(&call_thread_idle_queue, SYNC_POLICY_FIFO);
-       thread_calls.thread_lowat = thread_call_thread_min;
+       wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
+       thread_call_vars.thread_lowat = thread_call_thread_min;
 
        activate_thread_awake = TRUE;
-    thread_call_initialized = TRUE;
 
        simple_unlock(&thread_call_lock);
        splx(s);
 
-    activate_thread = kernel_thread_with_priority(
-                                                                               kernel_task, MAXPRI_KERNEL - 2,
-                                                                                               _activate_thread, TRUE, TRUE);
+       result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread);
+       if (result != KERN_SUCCESS)
+               panic("thread_call_initialize");
+
+       thread_deallocate(thread);
 }
 
 void
@@ -225,10 +220,10 @@ _internal_call_allocate(void)
 {
     thread_call_t              call;
     
-    if (queue_empty(&internal_call_free_queue))
+    if (queue_empty(&thread_call_xxx_queue))
        panic("_internal_call_allocate");
        
-    call = TC(dequeue_head(&internal_call_free_queue));
+    call = TC(dequeue_head(&thread_call_xxx_queue));
     
     return (call);
 }
@@ -252,7 +247,7 @@ _internal_call_release(
 {
     if (    call >= internal_call_storage                                              &&
                    call < &internal_call_storage[internal_call_num]            )
-               enqueue_tail(&internal_call_free_queue, qe(call));
+               enqueue_head(&thread_call_xxx_queue, qe(call));
 }
 
 /*
@@ -272,9 +267,9 @@ _pending_call_enqueue(
     thread_call_t              call
 )
 {
-    enqueue_tail(&pending_call_queue, qe(call));
-       if (++thread_calls.pending_num > thread_calls.pending_hiwat)
-               thread_calls.pending_hiwat = thread_calls.pending_num;
+    enqueue_tail(&thread_call_pending_queue, qe(call));
+       if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
+               thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
 
     call->state = PENDING;
 }
@@ -297,7 +292,7 @@ _pending_call_dequeue(
 )
 {
     (void)remque(qe(call));
-       thread_calls.pending_num--;
+       thread_call_vars.pending_num--;
     
     call->state = IDLE;
 }
@@ -322,10 +317,10 @@ _delayed_call_enqueue(
 {
     thread_call_t              current;
     
-    current = TC(queue_first(&delayed_call_queue));
+    current = TC(queue_first(&thread_call_delayed_queue));
     
     while (TRUE) {
-       if (    queue_end(&delayed_call_queue, qe(current))             ||
+       if (    queue_end(&thread_call_delayed_queue, qe(current))              ||
                                        call->deadline < current->deadline                      ) {
                        current = TC(queue_prev(qe(current)));
                        break;
@@ -335,8 +330,8 @@ _delayed_call_enqueue(
     }
 
     insque(qe(call), qe(current));
-       if (++thread_calls.delayed_num > thread_calls.delayed_hiwat)
-               thread_calls.delayed_hiwat = thread_calls.delayed_num;
+       if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
+               thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
     
     call->state = DELAYED;
 }
@@ -359,7 +354,7 @@ _delayed_call_dequeue(
 )
 {
     (void)remque(qe(call));
-       thread_calls.delayed_num--;
+       thread_call_vars.delayed_num--;
     
     call->state = IDLE;
 }
@@ -380,7 +375,7 @@ _set_delayed_call_timer(
     thread_call_t              call
 )
 {
-    timer_call_enter(&thread_call_delayed_timer, call->deadline);
+    timer_call_enter(&thread_call_delaytimer, call->deadline);
 }
 
 /*
@@ -408,9 +403,9 @@ _remove_from_pending_queue(
        boolean_t                       call_removed = FALSE;
        thread_call_t           call;
     
-    call = TC(queue_first(&pending_call_queue));
+    call = TC(queue_first(&thread_call_pending_queue));
     
-    while (!queue_end(&pending_call_queue, qe(call))) {
+    while (!queue_end(&thread_call_pending_queue, qe(call))) {
        if (    call->func == func                      &&
                                call->param0 == param0                  ) {
                        thread_call_t   next = TC(queue_next(qe(call)));
@@ -457,9 +452,9 @@ _remove_from_delayed_queue(
     boolean_t                  call_removed = FALSE;
     thread_call_t              call;
     
-    call = TC(queue_first(&delayed_call_queue));
+    call = TC(queue_first(&thread_call_delayed_queue));
     
-    while (!queue_end(&delayed_call_queue, qe(call))) {
+    while (!queue_end(&thread_call_delayed_queue, qe(call))) {
        if (    call->func == func                      &&
                                call->param0 == param0                  ) {
                        thread_call_t   next = TC(queue_next(qe(call)));
@@ -502,17 +497,14 @@ thread_call_func(
 )
 {
     thread_call_t              call;
-    int                                        s;
+    spl_t                              s;
     
-    if (!thread_call_initialized)
-       panic("thread_call_func");
-       
     s = splsched();
     simple_lock(&thread_call_lock);
     
-    call = TC(queue_first(&pending_call_queue));
+    call = TC(queue_first(&thread_call_pending_queue));
     
-       while (unique_call && !queue_end(&pending_call_queue, qe(call))) {
+       while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
        if (    call->func == func                      &&
                                call->param0 == param                   ) {
                        break;
@@ -521,15 +513,15 @@ thread_call_func(
                call = TC(queue_next(qe(call)));
     }
     
-    if (!unique_call || queue_end(&pending_call_queue, qe(call))) {
+    if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
                call = _internal_call_allocate();
                call->func                      = func;
                call->param0            = param;
-               call->param1            = 0;
+               call->param1            = NULL;
        
                _pending_call_enqueue(call);
                
-               if (thread_calls.active_num <= 0)
+               if (thread_call_vars.active_num <= 0)
                        _call_thread_wake();
     }
 
@@ -557,11 +549,8 @@ thread_call_func_delayed(
 )
 {
     thread_call_t              call;
-    int                                        s;
+    spl_t                              s;
     
-    if (!thread_call_initialized)
-       panic("thread_call_func_delayed");
-
     s = splsched();
     simple_lock(&thread_call_lock);
     
@@ -573,7 +562,7 @@ thread_call_func_delayed(
     
     _delayed_call_enqueue(call);
     
-    if (queue_first(&delayed_call_queue) == qe(call))
+    if (queue_first(&thread_call_delayed_queue) == qe(call))
        _set_delayed_call_timer(call);
     
     simple_unlock(&thread_call_lock);
@@ -606,7 +595,7 @@ thread_call_func_cancel(
 )
 {
        boolean_t                       result;
-    int                                        s;
+    spl_t                              s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
@@ -666,7 +655,7 @@ thread_call_free(
     thread_call_t              call
 )
 {
-    int                        s;
+    spl_t              s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
@@ -681,7 +670,7 @@ thread_call_free(
     simple_unlock(&thread_call_lock);
     splx(s);
     
-    kfree((vm_offset_t)call, sizeof (thread_call_data_t));
+    kfree(call, sizeof (thread_call_data_t));
 
        return (TRUE);
 }
@@ -706,7 +695,7 @@ thread_call_enter(
 )
 {
        boolean_t               result = TRUE;
-    int                                s;
+    spl_t                      s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
@@ -719,7 +708,7 @@ thread_call_enter(
 
        _pending_call_enqueue(call);
                
-               if (thread_calls.active_num <= 0)
+               if (thread_call_vars.active_num <= 0)
                        _call_thread_wake();
        }
 
@@ -738,7 +727,7 @@ thread_call_enter1(
 )
 {
        boolean_t                       result = TRUE;
-    int                                        s;
+    spl_t                              s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
@@ -751,7 +740,7 @@ thread_call_enter1(
 
        _pending_call_enqueue(call);
 
-               if (thread_calls.active_num <= 0)
+               if (thread_call_vars.active_num <= 0)
                        _call_thread_wake();
     }
 
@@ -784,7 +773,7 @@ thread_call_enter_delayed(
 )
 {
        boolean_t               result = TRUE;
-    int                                s;
+    spl_t                      s;
 
     s = splsched();
     simple_lock(&thread_call_lock);
@@ -801,7 +790,7 @@ thread_call_enter_delayed(
 
        _delayed_call_enqueue(call);
 
-       if (queue_first(&delayed_call_queue) == qe(call))
+       if (queue_first(&thread_call_delayed_queue) == qe(call))
                _set_delayed_call_timer(call);
 
     simple_unlock(&thread_call_lock);
@@ -818,7 +807,7 @@ thread_call_enter1_delayed(
 )
 {
        boolean_t                       result = TRUE;
-    int                                        s;
+    spl_t                              s;
 
     s = splsched();
     simple_lock(&thread_call_lock);
@@ -835,7 +824,7 @@ thread_call_enter1_delayed(
 
        _delayed_call_enqueue(call);
 
-       if (queue_first(&delayed_call_queue) == qe(call))
+       if (queue_first(&thread_call_delayed_queue) == qe(call))
                _set_delayed_call_timer(call);
 
     simple_unlock(&thread_call_lock);
@@ -864,7 +853,7 @@ thread_call_cancel(
 )
 {
        boolean_t               result = TRUE;
-    int                                s;
+    spl_t                      s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
@@ -902,7 +891,7 @@ thread_call_is_delayed(
        uint64_t                        *deadline)
 {
        boolean_t               result = FALSE;
-       int                             s;
+       spl_t                   s;
 
        s = splsched();
        simple_lock(&thread_call_lock);
@@ -932,69 +921,51 @@ thread_call_is_delayed(
  * Postconditions:     None.
  */
 
-static __inline__
-void
+static inline void
 _call_thread_wake(void)
 {
-       if (wait_queue_wakeup_one(
-                                       &call_thread_idle_queue, &call_thread_idle_queue,
-                                                                               THREAD_AWAKENED) == KERN_SUCCESS) {
-               thread_calls.idle_thread_num--;
+       if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
+               thread_call_vars.idle_thread_num--;
 
-               if (++thread_calls.active_num > thread_calls.active_hiwat)
-                       thread_calls.active_hiwat = thread_calls.active_num;
+               if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
+                       thread_call_vars.active_hiwat = thread_call_vars.active_num;
        }
        else
        if (!activate_thread_awake) {
-               clear_wait(activate_thread, THREAD_AWAKENED);
+               thread_wakeup_one(&activate_thread_awake);
                activate_thread_awake = TRUE;
        }
 }
 
 /*
- * Routine:    call_thread_block [private]
- *
- * Purpose:    Hook via thread dispatch on
- *             the occasion of a callout blocking.
- *
- * Preconditions:      splsched.
+ *     sched_call_thread:
  *
- * Postconditions:     None.
+ *     Call out invoked by the scheduler.
  */
 
-void
-call_thread_block(void)
+static void
+sched_call_thread(
+                       int                     type,
+__unused       thread_t        thread)
 {
        simple_lock(&thread_call_lock);
 
-       if (--thread_calls.active_num < thread_calls.active_lowat)
-               thread_calls.active_lowat = thread_calls.active_num;
+       switch (type) {
 
-       if (    thread_calls.active_num <= 0    &&
-                       thread_calls.pending_num > 0            )
-               _call_thread_wake();
-
-       simple_unlock(&thread_call_lock);
-}
+       case SCHED_CALL_BLOCK:
+               if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
+                       thread_call_vars.active_lowat = thread_call_vars.active_num;
 
-/*
- * Routine:    call_thread_unblock [private]
- *
- * Purpose:    Hook via thread wakeup on
- *             the occasion of a callout unblocking.
- *
- * Preconditions:      splsched.
- *
- * Postconditions:     None.
- */
-
-void
-call_thread_unblock(void)
-{
-       simple_lock(&thread_call_lock);
+               if (    thread_call_vars.active_num <= 0        &&
+                               thread_call_vars.pending_num > 0                )
+                       _call_thread_wake();
+               break;
 
-       if (++thread_calls.active_num > thread_calls.active_hiwat)
-               thread_calls.active_hiwat = thread_calls.active_num;
+       case SCHED_CALL_UNBLOCK:
+               if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
+                       thread_call_vars.active_hiwat = thread_call_vars.active_num;
+               break;
+       }
 
        simple_unlock(&thread_call_lock);
 }
@@ -1018,15 +989,15 @@ _call_thread_continue(void)
     (void) splsched();
     simple_lock(&thread_call_lock);
 
-       self->active_callout = TRUE;
+       thread_sched_call(self, sched_call_thread);
 
-    while (thread_calls.pending_num > 0) {
+    while (thread_call_vars.pending_num > 0) {
                thread_call_t                   call;
                thread_call_func_t              func;
                thread_call_param_t             param0, param1;
 
-               call = TC(dequeue_head(&pending_call_queue));
-               thread_calls.pending_num--;
+               call = TC(dequeue_head(&thread_call_pending_queue));
+               thread_call_vars.pending_num--;
 
                func = call->func;
                param0 = call->param0;
@@ -1039,6 +1010,10 @@ _call_thread_continue(void)
                simple_unlock(&thread_call_lock);
                (void) spllo();
 
+               KERNEL_DEBUG_CONSTANT(
+                       MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
+                               (int)func, (int)param0, (int)param1, 0, 0);
+
                (*func)(param0, param1);
 
                (void)thread_funnel_set(self->funnel_lock, FALSE);
@@ -1047,31 +1022,29 @@ _call_thread_continue(void)
                simple_lock(&thread_call_lock);
     }
 
-       self->active_callout = FALSE;
+       thread_sched_call(self, NULL);
 
-       if (--thread_calls.active_num < thread_calls.active_lowat)
-               thread_calls.active_lowat = thread_calls.active_num;
+       if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
+               thread_call_vars.active_lowat = thread_call_vars.active_num;
        
-    if (thread_calls.idle_thread_num < thread_calls.thread_lowat) {
-               thread_calls.idle_thread_num++;
+    if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
+               thread_call_vars.idle_thread_num++;
 
-               wait_queue_assert_wait(
-                                       &call_thread_idle_queue, &call_thread_idle_queue,
-                                                                                                               THREAD_INTERRUPTIBLE);
+               wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0);
        
                simple_unlock(&thread_call_lock);
                (void) spllo();
 
-               thread_block(_call_thread_continue);
+               thread_block((thread_continue_t)_call_thread_continue);
                /* NOTREACHED */
     }
     
-    thread_calls.thread_num--;
+    thread_call_vars.thread_num--;
     
     simple_unlock(&thread_call_lock);
     (void) spllo();
     
-    (void) thread_terminate(self->top_act);
+    thread_terminate(self);
        /* NOTREACHED */
 }
 
@@ -1079,10 +1052,6 @@ static
 void
 _call_thread(void)
 {
-       thread_t                                        self = current_thread();
-
-    stack_privilege(self);
-
     _call_thread_continue();
     /* NOTREACHED */
 }
@@ -1101,24 +1070,30 @@ static
 void
 _activate_thread_continue(void)
 {
+       kern_return_t   result;
+       thread_t                thread;
+
     (void) splsched();
     simple_lock(&thread_call_lock);
         
-       while (         thread_calls.active_num <= 0    &&
-                               thread_calls.pending_num > 0            ) {
+       while (         thread_call_vars.active_num <= 0        &&
+                               thread_call_vars.pending_num > 0                ) {
 
-               if (++thread_calls.active_num > thread_calls.active_hiwat)
-                       thread_calls.active_hiwat = thread_calls.active_num;
+               if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
+                       thread_call_vars.active_hiwat = thread_call_vars.active_num;
 
-               if (++thread_calls.thread_num > thread_calls.thread_hiwat)
-                       thread_calls.thread_hiwat = thread_calls.thread_num;
+               if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
+                       thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
 
                simple_unlock(&thread_call_lock);
                (void) spllo();
        
-               (void) kernel_thread_with_priority(
-                                                                       kernel_task, MAXPRI_KERNEL - 1,
-                                                                                                       _call_thread, TRUE, TRUE);
+               result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread);
+               if (result != KERN_SUCCESS)
+                       panic("activate_thread");
+
+               thread_deallocate(thread);
+
                (void) splsched();
                simple_lock(&thread_call_lock);
     }
@@ -1129,7 +1104,7 @@ _activate_thread_continue(void)
     simple_unlock(&thread_call_lock);
        (void) spllo();
     
-       thread_block(_activate_thread_continue);
+       thread_block((thread_continue_t)_activate_thread_continue);
        /* NOTREACHED */
 }
 
@@ -1137,11 +1112,10 @@ static
 void
 _activate_thread(void)
 {
-       thread_t                self = current_thread();
+       thread_t        self = current_thread();
 
-       self->vm_privilege = TRUE;
+       self->options |= TH_OPT_VMPRIV;
        vm_page_free_reserve(2);        /* XXX */
-    stack_privilege(self);
     
     _activate_thread_continue();
     /* NOTREACHED */
@@ -1150,23 +1124,23 @@ _activate_thread(void)
 static
 void
 _delayed_call_timer(
-       timer_call_param_t              p0,
-       timer_call_param_t              p1
+       __unused timer_call_param_t             p0,
+       __unused timer_call_param_t             p1
 )
 {
        uint64_t                        timestamp;
     thread_call_t              call;
        boolean_t                       new_pending = FALSE;
-    int                                        s;
+    spl_t                              s;
 
     s = splsched();
     simple_lock(&thread_call_lock);
 
        clock_get_uptime(&timestamp);
     
-    call = TC(queue_first(&delayed_call_queue));
+    call = TC(queue_first(&thread_call_delayed_queue));
     
-    while (!queue_end(&delayed_call_queue, qe(call))) {
+    while (!queue_end(&thread_call_delayed_queue, qe(call))) {
        if (call->deadline <= timestamp) {
                        _delayed_call_dequeue(call);
 
@@ -1176,13 +1150,13 @@ _delayed_call_timer(
                else
                        break;
            
-               call = TC(queue_first(&delayed_call_queue));
+               call = TC(queue_first(&thread_call_delayed_queue));
     }
 
-       if (!queue_end(&delayed_call_queue, qe(call)))
+       if (!queue_end(&thread_call_delayed_queue, qe(call)))
                _set_delayed_call_timer(call);
 
-    if (new_pending && thread_calls.active_num <= 0)
+    if (new_pending && thread_call_vars.active_num <= 0)
                _call_thread_wake();
 
     simple_unlock(&thread_call_lock);