]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/thread_call.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
index 15cd7e8a4eb190a940f5aec43464c4fa81a0c890..92f0b642b9d15503020e79d1846da479ef3be081 100644 (file)
@@ -1,17 +1,19 @@
 /*
 /*
- * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc.
- * All rights reserved.
+ * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
  *
  *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- * Thread-based callout module.
- *
- * HISTORY
- *
- * 10 July 1999 (debo)
- *  Pulled into Mac OS X (microkernel).
- *
- * 3 July 1993 (debo)
- *     Created.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
  
 #include <mach/mach_types.h>
  */
  
 #include <mach/mach_types.h>
+#include <mach/thread_act.h>
 
 
+#include <kern/kern_types.h>
+#include <kern/zalloc.h>
 #include <kern/sched_prim.h>
 #include <kern/clock.h>
 #include <kern/task.h>
 #include <kern/thread.h>
 #include <kern/sched_prim.h>
 #include <kern/clock.h>
 #include <kern/task.h>
 #include <kern/thread.h>
+#include <kern/wait_queue.h>
+
+#include <vm/vm_pageout.h>
 
 #include <kern/thread_call.h>
 #include <kern/call_entry.h>
 
 #include <kern/timer_call.h>
 
 
 #include <kern/thread_call.h>
 #include <kern/call_entry.h>
 
 #include <kern/timer_call.h>
 
-#define internal_call_num      768
+#include <sys/kdebug.h>
 
 
-#define thread_call_thread_min 4
+decl_simple_lock_data(static,thread_call_lock)
 
 
-static
-thread_call_data_t
-       internal_call_storage[internal_call_num];
+static zone_t          thread_call_zone;
 
 
-decl_simple_lock_data(static,thread_call_lock)
+struct thread_call_group {
+       queue_head_t            pending_queue;
+       uint32_t                        pending_count;
 
 
-static
-timer_call_data_t
-       thread_call_delayed_timer;
+       queue_head_t            delayed_queue;
 
 
-static
-queue_head_t
-       internal_call_free_queue,
-       pending_call_queue, delayed_call_queue;
+       timer_call_data_t       delayed_timer;
 
 
-static
-struct wait_queue
-       call_thread_idle_queue;
+       struct wait_queue       idle_wqueue;
+       struct wait_queue       daemon_wqueue;
+       uint32_t                        idle_count, active_count;
+};
 
 
-static
-thread_t
-       activate_thread;
+typedef struct thread_call_group       *thread_call_group_t;
 
 
-static
-boolean_t
-       activate_thread_awake;
-
-static struct {
-       int             pending_num,
-                       pending_hiwat;
-       int             active_num,
-                       active_hiwat,
-                       active_lowat;
-       int             delayed_num,
-                       delayed_hiwat;
-       int             idle_thread_num;
-       int             thread_num,
-                       thread_hiwat,
-                       thread_lowat;
-} thread_calls;
+static struct thread_call_group                thread_call_group0;
 
 
-static boolean_t
-       thread_call_initialized = FALSE;
+static boolean_t                       thread_call_daemon_awake;
 
 
-static __inline__ thread_call_t
-       _internal_call_allocate(void);
+#define thread_call_thread_min 4
 
 
-static __inline__ void
-_internal_call_release(
-       thread_call_t           call
-);
+#define internal_call_count    768
 
 
-static __inline__ void
-_pending_call_enqueue(
-       thread_call_t           call
-),
-_pending_call_dequeue(
-       thread_call_t           call
-),
-_delayed_call_enqueue(
-       thread_call_t           call
-),
-_delayed_call_dequeue(
-       thread_call_t           call
-);
+static thread_call_data_t      internal_call_storage[internal_call_count];
+static queue_head_t                    thread_call_internal_queue;
 
 
-static void __inline__
-_set_delayed_call_timer(
-       thread_call_t           call
-);
-                                       
-static boolean_t
-_remove_from_pending_queue(
-       thread_call_func_t      func,
-       thread_call_param_t     param0,
-       boolean_t                       remove_all
-),
-_remove_from_delayed_queue(
-       thread_call_func_t      func,
-       thread_call_param_t     param0,
-       boolean_t                       remove_all
-);
+static __inline__ thread_call_t                _internal_call_allocate(void);
 
 
-static __inline__ void
-       _call_thread_wake(void);
+static __inline__ void _internal_call_release(
+                                                       thread_call_t           call);
 
 
-static void
-       _call_thread(void),
-       _activate_thread(void);
+static __inline__ boolean_t    _pending_call_enqueue(
+                                                               thread_call_t           call,
+                                                               thread_call_group_t     group),
+                                                       _delayed_call_enqueue(
+                                                               thread_call_t           call,
+                                                               thread_call_group_t     group,
+                                                               uint64_t                        deadline),
+                                                       _call_dequeue(
+                                                               thread_call_t           call,
+                                                               thread_call_group_t     group);
 
 
-static void
-_delayed_call_timer(
-       timer_call_param_t              p0,
-       timer_call_param_t              p1
-);
+static __inline__ void thread_call_wake(
+                                                       thread_call_group_t     group);
+
+static __inline__ void _set_delayed_call_timer(
+                                                       thread_call_t           call,
+                                                       thread_call_group_t     group);
+                                       
+static boolean_t       _remove_from_pending_queue(
+                                               thread_call_func_t              func,
+                                               thread_call_param_t             param0,
+                                               boolean_t                               remove_all),
+                                       _remove_from_delayed_queue(
+                                               thread_call_func_t              func,
+                                               thread_call_param_t             param0,
+                                               boolean_t                               remove_all);
+
+static void            thread_call_daemon(
+                                       thread_call_group_t             group),
+                               thread_call_thread(
+                                       thread_call_group_t             group);
+
+static void            thread_call_delayed_timer(
+                                       timer_call_param_t              p0,
+                                       timer_call_param_t              p1);
 
 #define qe(x)          ((queue_entry_t)(x))
 #define TC(x)          ((thread_call_t)(x))
 
 /*
 
 #define qe(x)          ((queue_entry_t)(x))
 #define TC(x)          ((thread_call_t)(x))
 
 /*
- * Routine:    thread_call_initialize [public]
- *
- * Description:        Initialize this module, called
- *             early during system initialization.
+ *     thread_call_initialize:
  *
  *
- * Preconditions:      None.
- *
- * Postconditions:     None.
+ *     Initialize this module, called
+ *     early during system initialization.
  */
  */
-
 void
 thread_call_initialize(void)
 {
 void
 thread_call_initialize(void)
 {
-    thread_call_t              call;
-       spl_t                           s;
+    thread_call_t                      call;
+       thread_call_group_t             group = &thread_call_group0;
+       kern_return_t                   result;
+       thread_t                                thread;
+       int                                             i;
+       spl_t                                   s;
 
 
-    if (thread_call_initialized)
-       panic("thread_call_initialize");
+       i = sizeof (thread_call_data_t);
+       thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
 
 
-    simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER);
+    simple_lock_init(&thread_call_lock, 0);
 
        s = splsched();
        simple_lock(&thread_call_lock);
 
 
        s = splsched();
        simple_lock(&thread_call_lock);
 
-    queue_init(&pending_call_queue);
-    queue_init(&delayed_call_queue);
+    queue_init(&group->pending_queue);
+    queue_init(&group->delayed_queue);
 
 
-    queue_init(&internal_call_free_queue);
+       timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
+
+       wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
+       wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);
+
+    queue_init(&thread_call_internal_queue);
     for (
                call = internal_call_storage;
     for (
                call = internal_call_storage;
-                       call < &internal_call_storage[internal_call_num];
+                       call < &internal_call_storage[internal_call_count];
                        call++) {
 
                        call++) {
 
-               enqueue_tail(&internal_call_free_queue, qe(call));
+               enqueue_tail(&thread_call_internal_queue, qe(call));
     }
 
     }
 
-       timer_call_setup(&thread_call_delayed_timer, _delayed_call_timer, NULL);
-
-       wait_queue_init(&call_thread_idle_queue, SYNC_POLICY_FIFO);
-       thread_calls.thread_lowat = thread_call_thread_min;
-
-       activate_thread_awake = TRUE;
-    thread_call_initialized = TRUE;
+       thread_call_daemon_awake = TRUE;
 
        simple_unlock(&thread_call_lock);
        splx(s);
 
 
        simple_unlock(&thread_call_lock);
        splx(s);
 
-    activate_thread = kernel_thread_with_priority(
-                                                                               kernel_task, MAXPRI_KERNEL - 2,
-                                                                                               _activate_thread, TRUE, TRUE);
+       result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
+       if (result != KERN_SUCCESS)
+               panic("thread_call_initialize");
+
+       thread_deallocate(thread);
 }
 
 void
 thread_call_setup(
        thread_call_t                   call,
        thread_call_func_t              func,
 }
 
 void
 thread_call_setup(
        thread_call_t                   call,
        thread_call_func_t              func,
-       thread_call_param_t             param0
-)
+       thread_call_param_t             param0)
 {
        call_entry_setup(call, func, param0);
 }
 
 /*
 {
        call_entry_setup(call, func, param0);
 }
 
 /*
- * Routine:    _internal_call_allocate [private, inline]
- *
- * Purpose:    Allocate an internal callout entry.
+ *     _internal_call_allocate:
  *
  *
- * Preconditions:      thread_call_lock held.
+ *     Allocate an internal callout entry.
  *
  *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
 static __inline__ thread_call_t
 _internal_call_allocate(void)
 {
     thread_call_t              call;
     
 static __inline__ thread_call_t
 _internal_call_allocate(void)
 {
     thread_call_t              call;
     
-    if (queue_empty(&internal_call_free_queue))
+    if (queue_empty(&thread_call_internal_queue))
        panic("_internal_call_allocate");
        
        panic("_internal_call_allocate");
        
-    call = TC(dequeue_head(&internal_call_free_queue));
+    call = TC(dequeue_head(&thread_call_internal_queue));
     
     return (call);
 }
 
 /*
     
     return (call);
 }
 
 /*
- * Routine:    _internal_call_release [private, inline]
- *
- * Purpose:    Release an internal callout entry which
- *             is no longer pending (or delayed).
+ *     _internal_call_release:
  *
  *
- * Preconditions:      thread_call_lock held.
+ *     Release an internal callout entry which
+ *     is no longer pending (or delayed).
  *
  *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
-static __inline__
-void
+static __inline__ void
 _internal_call_release(
 _internal_call_release(
-    thread_call_t              call
-)
+    thread_call_t              call)
 {
     if (    call >= internal_call_storage                                              &&
 {
     if (    call >= internal_call_storage                                              &&
-                   call < &internal_call_storage[internal_call_num]            )
-               enqueue_tail(&internal_call_free_queue, qe(call));
+                   call < &internal_call_storage[internal_call_count]          )
+               enqueue_head(&thread_call_internal_queue, qe(call));
 }
 
 /*
 }
 
 /*
- * Routine:    _pending_call_enqueue [private, inline]
+ *     _pending_call_enqueue:
  *
  *
- * Purpose:    Place an entry at the end of the
- *             pending queue, to be executed soon.
+ *     Place an entry at the end of the
+ *     pending queue, to be executed soon.
  *
  *
- * Preconditions:      thread_call_lock held.
+ *     Returns TRUE if the entry was already
+ *     on a queue.
  *
  *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
-static __inline__
-void
+static __inline__ boolean_t
 _pending_call_enqueue(
 _pending_call_enqueue(
-    thread_call_t              call
-)
+    thread_call_t              call,
+       thread_call_group_t     group)
 {
 {
-    enqueue_tail(&pending_call_queue, qe(call));
-       if (++thread_calls.pending_num > thread_calls.pending_hiwat)
-               thread_calls.pending_hiwat = thread_calls.pending_num;
+       queue_t         old_queue;
 
 
-    call->state = PENDING;
-}
+       old_queue = call_entry_enqueue_tail(call, &group->pending_queue);
 
 
-/*
- * Routine:    _pending_call_dequeue [private, inline]
- *
- * Purpose:    Remove an entry from the pending queue,
- *             effectively unscheduling it.
- *
- * Preconditions:      thread_call_lock held.
- *
- * Postconditions:     None.
- */
+       group->pending_count++;
 
 
-static __inline__
-void
-_pending_call_dequeue(
-    thread_call_t              call
-)
-{
-    (void)remque(qe(call));
-       thread_calls.pending_num--;
-    
-    call->state = IDLE;
+       return (old_queue != NULL);
 }
 
 /*
 }
 
 /*
- * Routine:    _delayed_call_enqueue [private, inline]
+ *     _delayed_call_enqueue:
  *
  *
- * Purpose:    Place an entry on the delayed queue,
- *             after existing entries with an earlier
- *             (or identical) deadline.
+ *     Place an entry on the delayed queue,
+ *     after existing entries with an earlier
+ *     (or identical) deadline.
  *
  *
- * Preconditions:      thread_call_lock held.
+ *     Returns TRUE if the entry was already
+ *     on a queue.
  *
  *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
-static __inline__
-void
+static __inline__ boolean_t
 _delayed_call_enqueue(
 _delayed_call_enqueue(
-    thread_call_t              call
-)
+    thread_call_t              call,
+       thread_call_group_t     group,
+       uint64_t                        deadline)
 {
 {
-    thread_call_t              current;
-    
-    current = TC(queue_first(&delayed_call_queue));
-    
-    while (TRUE) {
-       if (    queue_end(&delayed_call_queue, qe(current))             ||
-                                       call->deadline < current->deadline                      ) {
-                       current = TC(queue_prev(qe(current)));
-                       break;
-               }
-           
-               current = TC(queue_next(qe(current)));
-    }
+       queue_t                 old_queue;
 
 
-    insque(qe(call), qe(current));
-       if (++thread_calls.delayed_num > thread_calls.delayed_hiwat)
-               thread_calls.delayed_hiwat = thread_calls.delayed_num;
-    
-    call->state = DELAYED;
+       old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);
+
+       if (old_queue == &group->pending_queue)
+               group->pending_count--;
+
+       return (old_queue != NULL);
 }
 
 /*
 }
 
 /*
- * Routine:    _delayed_call_dequeue [private, inline]
+ *     _call_dequeue:
  *
  *
- * Purpose:    Remove an entry from the delayed queue,
- *             effectively unscheduling it.
+ *     Remove an entry from a queue.
  *
  *
- * Preconditions:      thread_call_lock held.
+ *     Returns TRUE if the entry was on a queue.
  *
  *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
-static __inline__
-void
-_delayed_call_dequeue(
-    thread_call_t              call
-)
+static __inline__ boolean_t
+_call_dequeue(
+       thread_call_t           call,
+       thread_call_group_t     group)
 {
 {
-    (void)remque(qe(call));
-       thread_calls.delayed_num--;
-    
-    call->state = IDLE;
+       queue_t                 old_queue;
+
+       old_queue = call_entry_dequeue(call);
+
+       if (old_queue == &group->pending_queue)
+               group->pending_count--;
+
+       return (old_queue != NULL);
 }
 
 /*
 }
 
 /*
- * Routine:    _set_delayed_call_timer [private]
- *
- * Purpose:    Reset the timer so that it
- *             next expires when the entry is due.
+ *     _set_delayed_call_timer:
  *
  *
- * Preconditions:      thread_call_lock held.
+ *     Reset the timer so that it
+ *     next expires when the entry is due.
  *
  *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
 static __inline__ void
 _set_delayed_call_timer(
 static __inline__ void
 _set_delayed_call_timer(
-    thread_call_t              call
-)
+    thread_call_t              call,
+       thread_call_group_t     group)
 {
 {
-    timer_call_enter(&thread_call_delayed_timer, call->deadline);
+    timer_call_enter(&group->delayed_timer, call->deadline);
 }
 
 /*
 }
 
 /*
- * Routine:    _remove_from_pending_queue [private]
+ *     _remove_from_pending_queue:
  *
  *
- * Purpose:    Remove the first (or all) matching
- *             entries from the pending queue,
- *             effectively unscheduling them.
- *             Returns whether any matching entries
- *             were found.
+ *     Remove the first (or all) matching
+ *     entries from the pending queue.
  *
  *
- * Preconditions:      thread_call_lock held.
+ *     Returns TRUE if any matching entries
+ *     were found.
  *
  *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
-static
-boolean_t
+static boolean_t
 _remove_from_pending_queue(
     thread_call_func_t         func,
     thread_call_param_t                param0,
 _remove_from_pending_queue(
     thread_call_func_t         func,
     thread_call_param_t                param0,
-    boolean_t                          remove_all
-)
+    boolean_t                          remove_all)
 {
 {
-       boolean_t                       call_removed = FALSE;
-       thread_call_t           call;
+       boolean_t                               call_removed = FALSE;
+       thread_call_t                   call;
+       thread_call_group_t             group = &thread_call_group0;
     
     
-    call = TC(queue_first(&pending_call_queue));
+    call = TC(queue_first(&group->pending_queue));
     
     
-    while (!queue_end(&pending_call_queue, qe(call))) {
+    while (!queue_end(&group->pending_queue, qe(call))) {
        if (    call->func == func                      &&
                                call->param0 == param0                  ) {
                        thread_call_t   next = TC(queue_next(qe(call)));
                
        if (    call->func == func                      &&
                                call->param0 == param0                  ) {
                        thread_call_t   next = TC(queue_next(qe(call)));
                
-                       _pending_call_dequeue(call);
+                       _call_dequeue(call, group);
 
                        _internal_call_release(call);
            
 
                        _internal_call_release(call);
            
@@ -436,38 +358,34 @@ _remove_from_pending_queue(
 }
 
 /*
 }
 
 /*
- * Routine:    _remove_from_delayed_queue [private]
+ *     _remove_from_delayed_queue:
  *
  *
- * Purpose:    Remove the first (or all) matching
- *             entries from the delayed queue,
- *             effectively unscheduling them.
- *             Returns whether any matching entries
- *             were found.
+ *     Remove the first (or all) matching
+ *     entries from the delayed queue.
  *
  *
- * Preconditions:      thread_call_lock held.
+ *     Returns TRUE if any matching entries
+ *     were found.
  *
  *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
-static
-boolean_t
+static boolean_t
 _remove_from_delayed_queue(
     thread_call_func_t         func,
     thread_call_param_t                param0,
 _remove_from_delayed_queue(
     thread_call_func_t         func,
     thread_call_param_t                param0,
-    boolean_t                          remove_all
-)
+    boolean_t                          remove_all)
 {
 {
-    boolean_t                  call_removed = FALSE;
-    thread_call_t              call;
+    boolean_t                          call_removed = FALSE;
+    thread_call_t                      call;
+       thread_call_group_t             group = &thread_call_group0;
     
     
-    call = TC(queue_first(&delayed_call_queue));
+    call = TC(queue_first(&group->delayed_queue));
     
     
-    while (!queue_end(&delayed_call_queue, qe(call))) {
+    while (!queue_end(&group->delayed_queue, qe(call))) {
        if (    call->func == func                      &&
                                call->param0 == param0                  ) {
                        thread_call_t   next = TC(queue_next(qe(call)));
                
        if (    call->func == func                      &&
                                call->param0 == param0                  ) {
                        thread_call_t   next = TC(queue_next(qe(call)));
                
-                       _delayed_call_dequeue(call);
+                       _call_dequeue(call, group);
            
                        _internal_call_release(call);
            
            
                        _internal_call_release(call);
            
@@ -484,38 +402,32 @@ _remove_from_delayed_queue(
     return (call_removed);
 }
 
     return (call_removed);
 }
 
+#ifndef        __LP64__
+
 /*
 /*
- * Routine:    thread_call_func [public]
- *
- * Purpose:    Schedule a function callout.
- *             Guarantees { function, argument }
- *             uniqueness if unique_call is TRUE.
+ *     thread_call_func:
  *
  *
- * Preconditions:      Callable from an interrupt context
- *                                     below splsched.
+ *     Enqueue a function callout.
  *
  *
- * Postconditions:     None.
+ *     Guarantees { function, argument }
+ *     uniqueness if unique_call is TRUE.
  */
  */
-
 void
 thread_call_func(
     thread_call_func_t         func,
     thread_call_param_t                param,
 void
 thread_call_func(
     thread_call_func_t         func,
     thread_call_param_t                param,
-    boolean_t                          unique_call
-)
+    boolean_t                          unique_call)
 {
 {
-    thread_call_t              call;
-    int                                        s;
+    thread_call_t                      call;
+       thread_call_group_t             group = &thread_call_group0;
+    spl_t                                      s;
     
     
-    if (!thread_call_initialized)
-       panic("thread_call_func");
-       
     s = splsched();
     simple_lock(&thread_call_lock);
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
-    call = TC(queue_first(&pending_call_queue));
+    call = TC(queue_first(&group->pending_queue));
     
     
-       while (unique_call && !queue_end(&pending_call_queue, qe(call))) {
+       while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
        if (    call->func == func                      &&
                                call->param0 == param                   ) {
                        break;
        if (    call->func == func                      &&
                                call->param0 == param                   ) {
                        break;
@@ -524,47 +436,40 @@ thread_call_func(
                call = TC(queue_next(qe(call)));
     }
     
                call = TC(queue_next(qe(call)));
     }
     
-    if (!unique_call || queue_end(&pending_call_queue, qe(call))) {
+    if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
                call = _internal_call_allocate();
                call->func                      = func;
                call->param0            = param;
                call = _internal_call_allocate();
                call->func                      = func;
                call->param0            = param;
-               call->param1            = 0;
+               call->param1            = NULL;
        
        
-               _pending_call_enqueue(call);
+               _pending_call_enqueue(call, group);
                
                
-               if (thread_calls.active_num <= 0)
-                       _call_thread_wake();
+               if (group->active_count == 0)
+                       thread_call_wake(group);
     }
 
        simple_unlock(&thread_call_lock);
     splx(s);
 }
 
     }
 
        simple_unlock(&thread_call_lock);
     splx(s);
 }
 
+#endif /* __LP64__ */
+
 /*
 /*
- * Routine:    thread_call_func_delayed [public]
+ *     thread_call_func_delayed:
  *
  *
- * Purpose:    Schedule a function callout to
- *             occur at the stated time.
- *
- * Preconditions:      Callable from an interrupt context
- *                                     below splsched.
- *
- * Postconditions:     None.
+ *     Enqueue a function callout to
+ *     occur at the stated time.
  */
  */
-
 void
 thread_call_func_delayed(
     thread_call_func_t         func,
     thread_call_param_t                param,
 void
 thread_call_func_delayed(
     thread_call_func_t         func,
     thread_call_param_t                param,
-    uint64_t                           deadline
-)
+    uint64_t                           deadline)
 {
 {
-    thread_call_t              call;
-    int                                        s;
+    thread_call_t                      call;
+       thread_call_group_t             group = &thread_call_group0;
+    spl_t                                      s;
     
     
-    if (!thread_call_initialized)
-       panic("thread_call_func_delayed");
-
     s = splsched();
     simple_lock(&thread_call_lock);
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
@@ -572,44 +477,36 @@ thread_call_func_delayed(
     call->func                 = func;
     call->param0               = param;
     call->param1               = 0;
     call->func                 = func;
     call->param0               = param;
     call->param1               = 0;
-    call->deadline             = deadline;
     
     
-    _delayed_call_enqueue(call);
+    _delayed_call_enqueue(call, group, deadline);
     
     
-    if (queue_first(&delayed_call_queue) == qe(call))
-       _set_delayed_call_timer(call);
+    if (queue_first(&group->delayed_queue) == qe(call))
+       _set_delayed_call_timer(call, group);
     
     simple_unlock(&thread_call_lock);
     splx(s);
 }
 
 /*
     
     simple_unlock(&thread_call_lock);
     splx(s);
 }
 
 /*
- * Routine:    thread_call_func_cancel [public]
+ *     thread_call_func_cancel:
  *
  *
- * Purpose:    Unschedule a function callout.
- *             Removes one (or all)
- *             { function, argument }
- *             instance(s) from either (or both)
- *             the pending and the delayed queue,
- *             in that order.  Returns a boolean
- *             indicating whether any calls were
- *             cancelled.
+ *     Dequeue a function callout.
  *
  *
- * Preconditions:      Callable from an interrupt context
- *                                     below splsched.
+ *     Removes one (or all) { function, argument }
+ *     instance(s) from either (or both)
+ *     the pending and the delayed queue,
+ *     in that order.
  *
  *
- * Postconditions:     None.
+ *     Returns TRUE if any calls were cancelled.
  */
  */
-
 boolean_t
 thread_call_func_cancel(
     thread_call_func_t         func,
     thread_call_param_t                param,
 boolean_t
 thread_call_func_cancel(
     thread_call_func_t         func,
     thread_call_param_t                param,
-    boolean_t                          cancel_all
-)
+    boolean_t                          cancel_all)
 {
        boolean_t                       result;
 {
        boolean_t                       result;
-    int                                        s;
+    spl_t                              s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
     s = splsched();
     simple_lock(&thread_call_lock);
@@ -628,53 +525,37 @@ thread_call_func_cancel(
 }
 
 /*
 }
 
 /*
- * Routine:    thread_call_allocate [public]
- *
- * Purpose:    Allocate an external callout
- *             entry.
- *
- * Preconditions:      None.
+ *     thread_call_allocate:
  *
  *
- * Postconditions:     None.
+ *     Allocate a callout entry.
  */
  */
-
 thread_call_t
 thread_call_allocate(
     thread_call_func_t         func,
 thread_call_t
 thread_call_allocate(
     thread_call_func_t         func,
-    thread_call_param_t                param0
-)
+    thread_call_param_t                param0)
 {
 {
-    thread_call_t              call = (void *)kalloc(sizeof (thread_call_data_t));
-    
-    call->func                 = func;
-    call->param0               = param0;
-    call->state                        = IDLE;
-    
+    thread_call_t              call = zalloc(thread_call_zone);
+
+       call_entry_setup(call, func, param0);
+
     return (call);
 }
 
 /*
     return (call);
 }
 
 /*
- * Routine:    thread_call_free [public]
- *
- * Purpose:    Free an external callout
- *             entry.
- *
- * Preconditions:      None.
+ *     thread_call_free:
  *
  *
- * Postconditions:     None.
+ *     Free a callout entry.
  */
  */
-
 boolean_t
 thread_call_free(
 boolean_t
 thread_call_free(
-    thread_call_t              call
-)
+    thread_call_t              call)
 {
 {
-    int                        s;
+    spl_t              s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
-    if (call->state != IDLE) {
+    if (call->queue != NULL) {
        simple_unlock(&thread_call_lock);
                splx(s);
 
        simple_unlock(&thread_call_lock);
                splx(s);
 
@@ -684,46 +565,35 @@ thread_call_free(
     simple_unlock(&thread_call_lock);
     splx(s);
     
     simple_unlock(&thread_call_lock);
     splx(s);
     
-    kfree((vm_offset_t)call, sizeof (thread_call_data_t));
+       zfree(thread_call_zone, call);
 
        return (TRUE);
 }
 
 /*
 
        return (TRUE);
 }
 
 /*
- * Routine:    thread_call_enter [public]
+ *     thread_call_enter:
  *
  *
- * Purpose:    Schedule an external callout 
- *             entry to occur "soon".  Returns a
- *             boolean indicating whether the call
- *             had been already scheduled.
+ *     Enqueue a callout entry to occur "soon".
  *
  *
- * Preconditions:      Callable from an interrupt context
- *                                     below splsched.
- *
- * Postconditions:     None.
+ *     Returns TRUE if the call was
+ *     already on a queue.
  */
  */
-
 boolean_t
 thread_call_enter(
 boolean_t
 thread_call_enter(
-    thread_call_t              call
-)
+    thread_call_t              call)
 {
 {
-       boolean_t               result = TRUE;
-    int                                s;
+       boolean_t                               result = TRUE;
+       thread_call_group_t             group = &thread_call_group0;
+    spl_t                                      s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
-    if (call->state != PENDING) {
-               if (call->state == DELAYED)
-                       _delayed_call_dequeue(call);
-               else if (call->state == IDLE)
-                       result = FALSE;
-
-       _pending_call_enqueue(call);
+    if (call->queue != &group->pending_queue) {
+       result = _pending_call_enqueue(call, group);
                
                
-               if (thread_calls.active_num <= 0)
-                       _call_thread_wake();
+               if (group->active_count == 0)
+                       thread_call_wake(group);
        }
 
        call->param1 = 0;
        }
 
        call->param1 = 0;
@@ -737,26 +607,21 @@ thread_call_enter(
 boolean_t
 thread_call_enter1(
     thread_call_t                      call,
 boolean_t
 thread_call_enter1(
     thread_call_t                      call,
-    thread_call_param_t                param1
-)
+    thread_call_param_t                param1)
 {
 {
-       boolean_t                       result = TRUE;
-    int                                        s;
+       boolean_t                               result = TRUE;
+       thread_call_group_t             group = &thread_call_group0;
+    spl_t                                      s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
-    if (call->state != PENDING) {
-               if (call->state == DELAYED)
-                       _delayed_call_dequeue(call);
-               else if (call->state == IDLE)
-                       result = FALSE;
-
-       _pending_call_enqueue(call);
-
-               if (thread_calls.active_num <= 0)
-                       _call_thread_wake();
-    }
+    if (call->queue != &group->pending_queue) {
+       result = _pending_call_enqueue(call, group);
+               
+               if (group->active_count == 0)
+                       thread_call_wake(group);
+       }
 
        call->param1 = param1;
 
 
        call->param1 = param1;
 
@@ -767,45 +632,32 @@ thread_call_enter1(
 }
 
 /*
 }
 
 /*
- * Routine:    thread_call_enter_delayed [public]
- *
- * Purpose:    Schedule an external callout 
- *             entry to occur at the stated time.
- *             Returns a boolean indicating whether
- *             the call had been already scheduled.
+ *     thread_call_enter_delayed:
  *
  *
- * Preconditions:      Callable from an interrupt context
- *                                     below splsched.
+ *     Enqueue a callout entry to occur
+ *     at the stated time.
  *
  *
- * Postconditions:     None.
+ *     Returns TRUE if the call was
+ *     already on a queue.
  */
  */
-
 boolean_t
 thread_call_enter_delayed(
     thread_call_t              call,
 boolean_t
 thread_call_enter_delayed(
     thread_call_t              call,
-    uint64_t                   deadline
-)
+    uint64_t                   deadline)
 {
 {
-       boolean_t               result = TRUE;
-    int                                s;
+       boolean_t                               result = TRUE;
+       thread_call_group_t             group = &thread_call_group0;
+    spl_t                                      s;
 
     s = splsched();
     simple_lock(&thread_call_lock);
 
 
     s = splsched();
     simple_lock(&thread_call_lock);
 
-       if (call->state == PENDING)
-               _pending_call_dequeue(call);
-       else if (call->state == DELAYED)
-               _delayed_call_dequeue(call);
-       else if (call->state == IDLE)
-               result = FALSE;
+       result = _delayed_call_enqueue(call, group, deadline);
 
 
-       call->param1    = 0;
-       call->deadline  = deadline;
+       if (queue_first(&group->delayed_queue) == qe(call))
+               _set_delayed_call_timer(call, group);
 
 
-       _delayed_call_enqueue(call);
-
-       if (queue_first(&delayed_call_queue) == qe(call))
-               _set_delayed_call_timer(call);
+       call->param1 = 0;
 
     simple_unlock(&thread_call_lock);
     splx(s);
 
     simple_unlock(&thread_call_lock);
     splx(s);
@@ -817,29 +669,21 @@ boolean_t
 thread_call_enter1_delayed(
     thread_call_t                      call,
     thread_call_param_t                param1,
 thread_call_enter1_delayed(
     thread_call_t                      call,
     thread_call_param_t                param1,
-    uint64_t                           deadline
-)
+    uint64_t                           deadline)
 {
 {
-       boolean_t                       result = TRUE;
-    int                                        s;
+       boolean_t                               result = TRUE;
+       thread_call_group_t             group = &thread_call_group0;
+    spl_t                                      s;
 
     s = splsched();
     simple_lock(&thread_call_lock);
 
 
     s = splsched();
     simple_lock(&thread_call_lock);
 
-       if (call->state == PENDING)
-               _pending_call_dequeue(call);
-       else if (call->state == DELAYED)
-               _delayed_call_dequeue(call);
-       else if (call->state == IDLE)
-               result = FALSE;
-
-       call->param1    = param1;
-       call->deadline  = deadline;
+       result = _delayed_call_enqueue(call, group, deadline);
 
 
-       _delayed_call_enqueue(call);
+       if (queue_first(&group->delayed_queue) == qe(call))
+               _set_delayed_call_timer(call, group);
 
 
-       if (queue_first(&delayed_call_queue) == qe(call))
-               _set_delayed_call_timer(call);
+       call->param1 = param1;
 
     simple_unlock(&thread_call_lock);
     splx(s);
 
     simple_unlock(&thread_call_lock);
     splx(s);
@@ -848,36 +692,25 @@ thread_call_enter1_delayed(
 }
 
 /*
 }
 
 /*
- * Routine:    thread_call_cancel [public]
+ *     thread_call_cancel:
  *
  *
- * Purpose:    Unschedule a callout entry.
- *             Returns a boolean indicating
- *             whether the call had actually
- *             been scheduled.
+ *     Dequeue a callout entry.
  *
  *
- * Preconditions:      Callable from an interrupt context
- *                                     below splsched.
- *
- * Postconditions:     None.
+ *     Returns TRUE if the call was
+ *     on a queue.
  */
  */
-
 boolean_t
 thread_call_cancel(
 boolean_t
 thread_call_cancel(
-    thread_call_t              call
-)
+    thread_call_t              call)
 {
 {
-       boolean_t               result = TRUE;
-    int                                s;
+       boolean_t                               result;
+       thread_call_group_t             group = &thread_call_group0;
+    spl_t                                      s;
     
     s = splsched();
     simple_lock(&thread_call_lock);
     
     s = splsched();
     simple_lock(&thread_call_lock);
-    
-    if (call->state == PENDING)
-       _pending_call_dequeue(call);
-    else if (call->state == DELAYED)
-       _delayed_call_dequeue(call);
-    else
-       result = FALSE;
+
+       result = _call_dequeue(call, group);
        
     simple_unlock(&thread_call_lock);
     splx(s);
        
     simple_unlock(&thread_call_lock);
     splx(s);
@@ -885,32 +718,29 @@ thread_call_cancel(
        return (result);
 }
 
        return (result);
 }
 
+#ifndef        __LP64__
+
 /*
 /*
- * Routine:    thread_call_is_delayed [public]
- *
- * Purpose:    Returns a boolean indicating
- *             whether a call is currently scheduled
- *             to occur at a later time.  Optionally
- *             returns the expiration time.
+ *     thread_call_is_delayed:
  *
  *
- * Preconditions:      Callable from an interrupt context
- *                                     below splsched.
+ *     Returns TRUE if the call is
+ *     currently on a delayed queue.
  *
  *
- * Postconditions:     None.
+ *     Optionally returns the expiration time.
  */
  */
-
 boolean_t
 thread_call_is_delayed(
        thread_call_t           call,
        uint64_t                        *deadline)
 {
 boolean_t
 thread_call_is_delayed(
        thread_call_t           call,
        uint64_t                        *deadline)
 {
-       boolean_t               result = FALSE;
-       int                             s;
+       boolean_t                               result = FALSE;
+       thread_call_group_t             group = &thread_call_group0;
+       spl_t                                   s;
 
        s = splsched();
        simple_lock(&thread_call_lock);
 
 
        s = splsched();
        simple_lock(&thread_call_lock);
 
-       if (call->state == DELAYED) {
+       if (call->queue == &group->delayed_queue) {
                if (deadline != NULL)
                        *deadline = call->deadline;
                result = TRUE;
                if (deadline != NULL)
                        *deadline = call->deadline;
                result = TRUE;
@@ -922,272 +752,213 @@ thread_call_is_delayed(
        return (result);
 }
 
        return (result);
 }
 
+#endif /* __LP64__ */
+
 /*
 /*
- * Routine:    _call_thread_wake [private, inline]
+ *     thread_call_wake:
  *
  *
- * Purpose:    Wake a callout thread to service
- *             pending callout entries.  May wake
- *             the activate thread in order to
- *             create additional callout threads.
+ *     Wake a call thread to service
+ *     pending call entries.  May wake
+ *     the daemon thread in order to
+ *     create additional call threads.
  *
  *
- * Preconditions:      thread_call_lock held.
- *
- * Postconditions:     None.
+ *     Called with thread_call_lock held.
  */
  */
-
-static __inline__
-void
-_call_thread_wake(void)
+static __inline__ void
+thread_call_wake(
+       thread_call_group_t             group)
 {
 {
-       if (wait_queue_wakeup_one(
-                                       &call_thread_idle_queue, &call_thread_idle_queue,
-                                                                               THREAD_AWAKENED) == KERN_SUCCESS) {
-               thread_calls.idle_thread_num--;
-
-               if (++thread_calls.active_num > thread_calls.active_hiwat)
-                       thread_calls.active_hiwat = thread_calls.active_num;
+       if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
+               group->idle_count--; group->active_count++;
        }
        else
        }
        else
-       if (!activate_thread_awake) {
-               clear_wait(activate_thread, THREAD_AWAKENED);
-               activate_thread_awake = TRUE;
+       if (!thread_call_daemon_awake) {
+               thread_call_daemon_awake = TRUE;
+               wait_queue_wakeup_one(&group->daemon_wqueue, NULL, THREAD_AWAKENED);
        }
 }
 
 /*
        }
 }
 
 /*
- * Routine:    call_thread_block [private]
+ *     sched_call_thread:
  *
  *
- * Purpose:    Hook via thread dispatch on
- *             the occasion of a callout blocking.
- *
- * Preconditions:      splsched.
- *
- * Postconditions:     None.
+ *     Call out invoked by the scheduler.
  */
  */
-
-void
-call_thread_block(void)
+static void
+sched_call_thread(
+       int                             type,
+__unused       thread_t                thread)
 {
 {
-       simple_lock(&thread_call_lock);
+       thread_call_group_t             group = &thread_call_group0;
 
 
-       if (--thread_calls.active_num < thread_calls.active_lowat)
-               thread_calls.active_lowat = thread_calls.active_num;
-
-       if (    thread_calls.active_num <= 0    &&
-                       thread_calls.pending_num > 0            )
-               _call_thread_wake();
-
-       simple_unlock(&thread_call_lock);
-}
+       simple_lock(&thread_call_lock);
 
 
-/*
- * Routine:    call_thread_unblock [private]
- *
- * Purpose:    Hook via thread wakeup on
- *             the occasion of a callout unblocking.
- *
- * Preconditions:      splsched.
- *
- * Postconditions:     None.
- */
+       switch (type) {
 
 
-void
-call_thread_unblock(void)
-{
-       simple_lock(&thread_call_lock);
+       case SCHED_CALL_BLOCK:
+               if (--group->active_count == 0 && group->pending_count > 0)
+                       thread_call_wake(group);
+               break;
 
 
-       if (++thread_calls.active_num > thread_calls.active_hiwat)
-               thread_calls.active_hiwat = thread_calls.active_num;
+       case SCHED_CALL_UNBLOCK:
+               group->active_count++;
+               break;
+       }
 
        simple_unlock(&thread_call_lock);
 }
 
 /*
 
        simple_unlock(&thread_call_lock);
 }
 
 /*
- * Routine:    _call_thread [private]
- *
- * Purpose:    Executed by a callout thread.
- *
- * Preconditions:      None.
- *
- * Postconditions:     None.
+ *     thread_call_thread:
  */
  */
-
-static
-void
-_call_thread_continue(void)
+static void
+thread_call_thread(
+       thread_call_group_t             group)
 {
        thread_t                self = current_thread();
 
     (void) splsched();
     simple_lock(&thread_call_lock);
 
 {
        thread_t                self = current_thread();
 
     (void) splsched();
     simple_lock(&thread_call_lock);
 
-       self->active_callout = TRUE;
+       thread_sched_call(self, sched_call_thread);
 
 
-    while (thread_calls.pending_num > 0) {
+    while (group->pending_count > 0) {
                thread_call_t                   call;
                thread_call_func_t              func;
                thread_call_param_t             param0, param1;
 
                thread_call_t                   call;
                thread_call_func_t              func;
                thread_call_param_t             param0, param1;
 
-               call = TC(dequeue_head(&pending_call_queue));
-               thread_calls.pending_num--;
+               call = TC(dequeue_head(&group->pending_queue));
+               group->pending_count--;
 
                func = call->func;
                param0 = call->param0;
                param1 = call->param1;
        
 
                func = call->func;
                param0 = call->param0;
                param1 = call->param1;
        
-               call->state = IDLE;
+               call->queue = NULL;
 
                _internal_call_release(call);
 
                simple_unlock(&thread_call_lock);
                (void) spllo();
 
 
                _internal_call_release(call);
 
                simple_unlock(&thread_call_lock);
                (void) spllo();
 
+               KERNEL_DEBUG_CONSTANT(
+                       MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
+                               func, param0, param1, 0, 0);
+
                (*func)(param0, param1);
 
                (*func)(param0, param1);
 
-               (void)thread_funnel_set(self->funnel_lock, FALSE);
+               (void)thread_funnel_set(self->funnel_lock, FALSE);              /* XXX */
 
                (void) splsched();
                simple_lock(&thread_call_lock);
     }
 
 
                (void) splsched();
                simple_lock(&thread_call_lock);
     }
 
-       self->active_callout = FALSE;
+       thread_sched_call(self, NULL);
+       group->active_count--;
 
 
-       if (--thread_calls.active_num < thread_calls.active_lowat)
-               thread_calls.active_lowat = thread_calls.active_num;
-       
-    if (thread_calls.idle_thread_num < thread_calls.thread_lowat) {
-               thread_calls.idle_thread_num++;
+    if (group->idle_count < thread_call_thread_min) {
+               group->idle_count++;
 
 
-               wait_queue_assert_wait(
-                                       &call_thread_idle_queue, &call_thread_idle_queue,
-                                                                                                               THREAD_INTERRUPTIBLE);
+               wait_queue_assert_wait(&group->idle_wqueue, NULL, THREAD_UNINT, 0);
        
                simple_unlock(&thread_call_lock);
                (void) spllo();
 
        
                simple_unlock(&thread_call_lock);
                (void) spllo();
 
-               thread_block(_call_thread_continue);
+               thread_block_parameter((thread_continue_t)thread_call_thread, group);
                /* NOTREACHED */
     }
                /* NOTREACHED */
     }
-    
-    thread_calls.thread_num--;
-    
+
     simple_unlock(&thread_call_lock);
     (void) spllo();
     
     simple_unlock(&thread_call_lock);
     (void) spllo();
     
-    (void) thread_terminate(self->top_act);
+    thread_terminate(self);
        /* NOTREACHED */
 }
 
        /* NOTREACHED */
 }
 
-static
-void
-_call_thread(void)
-{
-       thread_t                                        self = current_thread();
-
-    stack_privilege(self);
-
-    _call_thread_continue();
-    /* NOTREACHED */
-}
-
 /*
 /*
- * Routine:    _activate_thread [private]
- *
- * Purpose:    Executed by the activate thread.
- *
- * Preconditions:      None.
- *
- * Postconditions:     Never terminates.
+ *     thread_call_daemon:
  */
  */
-
-static
-void
-_activate_thread_continue(void)
+static void
+thread_call_daemon_continue(
+       thread_call_group_t             group)
 {
 {
+       kern_return_t   result;
+       thread_t                thread;
+
     (void) splsched();
     simple_lock(&thread_call_lock);
         
     (void) splsched();
     simple_lock(&thread_call_lock);
         
-       while (         thread_calls.active_num <= 0    &&
-                               thread_calls.pending_num > 0            ) {
-
-               if (++thread_calls.active_num > thread_calls.active_hiwat)
-                       thread_calls.active_hiwat = thread_calls.active_num;
-
-               if (++thread_calls.thread_num > thread_calls.thread_hiwat)
-                       thread_calls.thread_hiwat = thread_calls.thread_num;
+       while (group->active_count == 0 && group->pending_count > 0) {
+               group->active_count++;
 
                simple_unlock(&thread_call_lock);
                (void) spllo();
        
 
                simple_unlock(&thread_call_lock);
                (void) spllo();
        
-               (void) kernel_thread_with_priority(
-                                                                       kernel_task, MAXPRI_KERNEL - 1,
-                                                                                                       _call_thread, TRUE, TRUE);
+               result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
+               if (result != KERN_SUCCESS)
+                       panic("thread_call_daemon");
+
+               thread_deallocate(thread);
+
                (void) splsched();
                simple_lock(&thread_call_lock);
     }
                (void) splsched();
                simple_lock(&thread_call_lock);
     }
-               
-    assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
-       activate_thread_awake = FALSE;
+
+    thread_call_daemon_awake = FALSE;
+    wait_queue_assert_wait(&group->daemon_wqueue, NULL, THREAD_UNINT, 0);
     
     simple_unlock(&thread_call_lock);
        (void) spllo();
     
     
     simple_unlock(&thread_call_lock);
        (void) spllo();
     
-       thread_block(_activate_thread_continue);
+       thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
        /* NOTREACHED */
 }
 
        /* NOTREACHED */
 }
 
-static
-void
-_activate_thread(void)
+static void
+thread_call_daemon(
+       thread_call_group_t             group)
 {
 {
-       thread_t                self = current_thread();
+       thread_t        self = current_thread();
 
 
-       self->vm_privilege = TRUE;
+       self->options |= TH_OPT_VMPRIV;
        vm_page_free_reserve(2);        /* XXX */
        vm_page_free_reserve(2);        /* XXX */
-    stack_privilege(self);
     
     
-    _activate_thread_continue();
+    thread_call_daemon_continue(group);
     /* NOTREACHED */
 }
 
     /* NOTREACHED */
 }
 
-static
-void
-_delayed_call_timer(
-       timer_call_param_t              p0,
-       timer_call_param_t              p1
+static void
+thread_call_delayed_timer(
+       timer_call_param_t                              p0,
+       __unused timer_call_param_t             p1
 )
 {
 )
 {
-       uint64_t                        timestamp;
-    thread_call_t              call;
-       boolean_t                       new_pending = FALSE;
-    int                                        s;
+    thread_call_t                      call;
+       thread_call_group_t             group = p0;
+       boolean_t                               new_pending = FALSE;
+       uint64_t                                timestamp;
 
 
-    s = splsched();
     simple_lock(&thread_call_lock);
 
     simple_lock(&thread_call_lock);
 
-       clock_get_uptime(&timestamp);
+       timestamp = mach_absolute_time();
     
     
-    call = TC(queue_first(&delayed_call_queue));
+    call = TC(queue_first(&group->delayed_queue));
     
     
-    while (!queue_end(&delayed_call_queue, qe(call))) {
+    while (!queue_end(&group->delayed_queue, qe(call))) {
        if (call->deadline <= timestamp) {
        if (call->deadline <= timestamp) {
-                       _delayed_call_dequeue(call);
-
-                       _pending_call_enqueue(call);
+                       _pending_call_enqueue(call, group);
                        new_pending = TRUE;
                }
                else
                        break;
            
                        new_pending = TRUE;
                }
                else
                        break;
            
-               call = TC(queue_first(&delayed_call_queue));
+               call = TC(queue_first(&group->delayed_queue));
     }
 
     }
 
-       if (!queue_end(&delayed_call_queue, qe(call)))
-               _set_delayed_call_timer(call);
+       if (!queue_end(&group->delayed_queue, qe(call)))
+               _set_delayed_call_timer(call, group);
 
 
-    if (new_pending && thread_calls.active_num <= 0)
-               _call_thread_wake();
+    if (new_pending && group->active_count == 0)
+               thread_call_wake(group);
 
     simple_unlock(&thread_call_lock);
 
     simple_unlock(&thread_call_lock);
-    splx(s);
 }
 }