]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/thread.h
xnu-792.6.61.tar.gz
[apple/xnu.git] / osfmk / kern / thread.h
index 80127b04dba7973b3d0d85bc2be46cf12dcf1d70..cde616313efd9a750dad3a68533792e6cda339ac 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
 #include <mach/mach_types.h>
 #include <mach/message.h>
 #include <mach/boolean.h>
-#include <mach/vm_types.h>
-#include <mach/vm_prot.h>
+#include <mach/vm_param.h>
 #include <mach/thread_info.h>
 #include <mach/thread_status.h>
-#include <kern/cpu_data.h>             /* for current_thread */
+#include <mach/exception_types.h>
+
 #include <kern/kern_types.h>
 
-/*
- * Logically, a thread of control consists of two parts:
- *     a thread_shuttle, which may migrate during an RPC, and
- *     a thread_activation, which remains attached to a task.
- * The thread_shuttle is the larger portion of the two-part thread,
- * and contains scheduling info, messaging support, accounting info,
- * and links to the thread_activation within which the shuttle is
- * currently operating.
- *
- * It might make sense to have the thread_shuttle be a proper sub-structure
- * of the thread, with the thread containing links to both the shuttle and
- * activation.  In order to reduce the scope and complexity of source
- * changes and the overhead of maintaining these linkages, we have subsumed
- * the shuttle into the thread, calling it a thread_shuttle.
- *
- * User accesses to threads always come in via the user's thread port,
- * which gets translated to a pointer to the target thread_activation.
- * Kernel accesses intended to effect the entire thread, typically use
- * a pointer to the thread_shuttle (current_thread()) as the target of
- * their operations.  This makes sense given that we have subsumed the
- * shuttle into the thread_shuttle, eliminating one set of linkages.
- * Operations effecting only the shuttle may use a thread_shuttle_t
- * to indicate this.
- *
- * The current_act() macro returns a pointer to the current thread_act, while
- * the current_thread() macro returns a pointer to the currently active
- * thread_shuttle (representing the thread in its entirety).
- */
+#include <sys/cdefs.h>
 
-/*
- *     Possible results of thread_block - returned in
- *     current_thread()->wait_result.
- */
-#define THREAD_AWAKENED                0               /* normal wakeup */
-#define THREAD_TIMED_OUT       1               /* timeout expired */
-#define THREAD_INTERRUPTED     2               /* interrupted by clear_wait */
-#define THREAD_RESTART         3               /* restart operation entirely */
+#ifdef MACH_KERNEL_PRIVATE
 
-/*
- * Interruptible flags for assert_wait
- *
- */
-#define THREAD_UNINT           0               /* not interruptible      */
-#define THREAD_INTERRUPTIBLE   1               /* may not be restartable */
-#define THREAD_ABORTSAFE       2               /* abortable safely       */
+#include <cputypes.h>
 
-#ifdef MACH_KERNEL_PRIVATE
-#include <cpus.h>
-#include <hw_footprint.h>
+#include <mach_assert.h>
 #include <mach_host.h>
 #include <mach_prof.h>
-#include <mach_lock_mon.h>
 #include <mach_ldebug.h>
 
+#include <ipc/ipc_types.h>
+
 #include <mach/port.h>
-#include <kern/ast.h>
 #include <kern/cpu_number.h>
 #include <kern/queue.h>
-#include <kern/time_out.h>
 #include <kern/timer.h>
 #include <kern/lock.h>
+#include <kern/locks.h>
 #include <kern/sched.h>
 #include <kern/sched_prim.h>
-#include <kern/thread_pool.h>
 #include <kern/thread_call.h>
 #include <kern/timer_call.h>
 #include <kern/task.h>
-#include <ipc/ipc_kmsg.h>
-#include <machine/thread.h>
+#include <kern/exception.h>
 
-typedef struct {
-       int                     fnl_type;       /* funnel type */
-       mutex_t *       fnl_mutex;      /* underlying mutex for the funnel */
-       void *          fnl_mtxholder; /* thread (last)holdng mutex */
-       void *          fnl_mtxrelease; /* thread (last)releasing mutex */
-       mutex_t *       fnl_oldmutex;   /* Mutex before collapsing split funnel */
-} funnel_t;
+#include <ipc/ipc_kmsg.h>
 
+#include <machine/cpu_data.h>
+#include <machine/thread.h>
 
-typedef struct thread_shuttle {
+struct thread {
        /*
-        * Beginning of thread_shuttle proper.  When the thread is on
-        * a wait queue, these three fields are in treated as an un-
-        * official union with a wait_queue_element.  If you change
-        * these, you must change that definition as well.
+        *      NOTE:   The runq field in the thread structure has an unusual
+        *      locking protocol.  If its value is RUN_QUEUE_NULL, then it is
+        *      locked by the thread_lock, but if its value is something else
+        *      (i.e. a run_queue) then it is locked by that run_queue's lock.
+        *
+        *      When the thread is on a wait queue, these first three fields
+        *      are treated as an unofficial union with a wait_queue_element.
+        *      If you change these, you must change that definition as well
+        *      (kern/wait_queue.h).
         */
-       queue_chain_t   links;          /* current run/wait queue links */
-       run_queue_t     runq;                   /* run queue p is on SEE BELOW */
-       int             whichq;                         /* which queue level p is on */
-
-/*
- *     NOTE:   The runq field in the thread structure has an unusual
- *     locking protocol.  If its value is RUN_QUEUE_NULL, then it is
- *     locked by the thread_lock, but if its value is something else
- *     (i.e. a run_queue) then it is locked by that run_queue's lock.
- */
-
-       /* Thread bookkeeping */
-       queue_chain_t   pset_threads;   /* list of all shuttles in proc set */
-
-       /* Self-preservation */
-       decl_simple_lock_data(,lock)    /* scheduling lock (thread_lock()) */
-       decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/
-       decl_mutex_data(,rpc_lock)      /* RPC lock (rpc_lock()) */
-       int             ref_count;      /* number of references to me */
-        
-        vm_offset_t     kernel_stack;   /* accurate only if the thread is 
-                                           not swapped and not executing */
-
-       vm_offset_t     stack_privilege;/* reserved kernel stack */
-
-       /* Blocking information */
-       int             reason;         /* why we blocked */
-       event_t         wait_event;     /* event we are waiting on */
-       kern_return_t   wait_result;    /* outcome of wait -
-                                          may be examined by this thread
-                                          WITHOUT locking */
-       wait_queue_t    wait_queue;     /* wait queue we are currently on */
-       queue_chain_t   wait_link;      /* event's wait queue link */
-       boolean_t       wake_active;    /* Someone is waiting for this
-                                          thread to become suspended */
-       int             state;          /* Thread state: */
-       boolean_t       preempt;        /* Thread is undergoing preemption */
-       boolean_t       interruptible;  /* Thread is interruptible */
-
-#if    ETAP_EVENT_MONITOR
-       int             etap_reason;    /* real reason why we blocked */
-       boolean_t       etap_trace;     /* ETAP trace status */
-#endif /* ETAP_EVENT_MONITOR */
-
+       /* Items examined often, modified infrequently */
+       queue_chain_t   links;                          /* run/wait queue links */
+       run_queue_t             runq;                           /* run queue thread is on SEE BELOW */
+       wait_queue_t    wait_queue;                     /* wait queue we are currently on */
+       event64_t               wait_event;                     /* wait queue event */
+       integer_t               options;                        /* options set by thread itself */
+#define TH_OPT_INTMASK         0x03            /* interrupt / abort level */
+#define TH_OPT_VMPRIV          0x04            /* may allocate reserved memory */
+#define TH_OPT_DELAYIDLE       0x08            /* performing delayed idle */
+#define TH_OPT_CALLOUT         0x10            /* executing as callout */
+
+       /* Data updated during assert_wait/thread_wakeup */
+       decl_simple_lock_data(,sched_lock)      /* scheduling lock (thread_lock()) */
+       decl_simple_lock_data(,wake_lock)       /* covers wake_active (wake_lock())*/
+       boolean_t                       wake_active;    /* Someone is waiting for this */
+       int                                     at_safe_point;  /* thread_abort_safely allowed */
+       ast_t                           reason;                 /* why we blocked */
+       wait_result_t           wait_result;    /* outcome of wait -
+                                                                                * may be examined by this thread
+                                                                                * WITHOUT locking */
+       thread_continue_t       continuation;   /* continue here next dispatch */
+       void                            *parameter;             /* continuation parameter */
+
+       /* Data updated/used in thread_invoke */
+    struct funnel_lock *funnel_lock;           /* Non-reentrancy funnel */
+    int                                    funnel_state;
+#define TH_FN_OWNED                    0x1                             /* we own the funnel */
+#define TH_FN_REFUNNEL         0x2                             /* re-acquire funnel on dispatch */
+
+       vm_offset_t             kernel_stack;           /* current kernel stack */
+       vm_offset_t                     reserved_stack;         /* reserved kernel stack */
+
+       /* Thread state: */
+       int                                     state;
 /*
  *     Thread states [bits or'ed]
  */
-#define TH_WAIT                        0x01    /* thread is queued for waiting */
-#define TH_SUSP                        0x02    /* thread has been asked to stop */
-#define TH_RUN                 0x04    /* thread is running or on runq */
-#define TH_UNINT               0x08    /* thread is waiting uninteruptibly */
-#define        TH_HALTED               0x10    /* thread is halted at clean point ? */
+#define TH_WAIT                        0x01                    /* queued for waiting */
+#define TH_SUSP                        0x02                    /* stopped or requested to stop */
+#define TH_RUN                 0x04                    /* running or on runq */
+#define TH_UNINT               0x08                    /* waiting uninteruptibly */
+#define        TH_TERMINATE    0x10                    /* halted at termination */
 
-#define TH_ABORT               0x20    /* abort interruptible waits */
-#define TH_SWAPPED_OUT 0x40    /* thread is swapped out */
+#define TH_ABORT               0x20                    /* abort interruptible waits */
+#define TH_ABORT_SAFELY        0x40                    /* ... but only those at safe point */
 
-#define TH_IDLE                        0x80    /* thread is an idle thread */
+#define TH_IDLE                        0x80                    /* processor idle thread */
 
 #define        TH_SCHED_STATE  (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
 
-#define        TH_STACK_HANDOFF        0x0100  /* thread has no kernel stack */
-#define        TH_STACK_COMING_IN      0x0200  /* thread is waiting for kernel stack */
-#define        TH_STACK_STATE  (TH_STACK_HANDOFF | TH_STACK_COMING_IN)
+       /* Scheduling information */
+       integer_t                       sched_mode;                     /* scheduling mode bits */
+#define TH_MODE_REALTIME               0x0001          /* time constraints supplied */
+#define TH_MODE_TIMESHARE              0x0002          /* use timesharing algorithm */
+#define TH_MODE_PREEMPT                        0x0004          /* can preempt kernel contexts */
+#define TH_MODE_FAILSAFE               0x0008          /* fail-safe has tripped */
+#define        TH_MODE_PROMOTED                0x0010          /* sched pri has been promoted */
+#define        TH_MODE_DEPRESS                 0x0020          /* normal depress yield */
+#define TH_MODE_POLLDEPRESS            0x0040          /* polled depress yield */
+#define TH_MODE_ISDEPRESSED            (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
 
-#define        TH_TERMINATE            0x0400  /* thread is terminating */
+       integer_t                       sched_pri;                      /* scheduled (current) priority */
+       integer_t                       priority;                       /* base priority */
+       integer_t                       max_priority;           /* max base priority */
+       integer_t                       task_priority;          /* copy of task base priority */
 
-       /* Stack handoff information */
-       void            (*continuation)(void);  /* start here next time dispatched */
-       int                     cont_arg;                               /* XXX continuation argument */
+       integer_t                       promotions;                     /* level of promotion */
+       integer_t                       pending_promoter_index;
+       void                            *pending_promoter[2];
 
-       /* Scheduling information */
-       integer_t                       importance;             /* task-relative importance */
-       integer_t                       sched_mode;             /* scheduling mode bits */
-#define TH_MODE_REALTIME       0x0001
-       struct {                                                        /* see mach/thread_policy.h */
-               natural_t                       period;
-               natural_t                       computation;
-               natural_t                       constraint;
+       integer_t                       importance;                     /* task-relative importance */
+
+                                                                                       /* real-time parameters */
+       struct {                                                                /* see mach/thread_policy.h */
+               uint32_t                        period;
+               uint32_t                        computation;
+               uint32_t                        constraint;
                boolean_t                       preemptible;
+
+               uint64_t                        deadline;
        }                                       realtime;
 
-       integer_t                       priority;                       /* base priority */
-       integer_t                       sched_pri;                      /* scheduled (current) priority */
-       integer_t                       depress_priority;       /* priority to restore */
-       integer_t                       max_priority;
+       uint32_t                        current_quantum;        /* duration of current quantum */
 
-       natural_t                       cpu_usage;              /* exp. decaying cpu usage [%cpu] */
-       natural_t                       sched_usage;    /* load-weighted cpu usage [sched] */
-       natural_t                       sched_stamp;    /* last time priority was updated */
-       natural_t                       sleep_stamp;    /* last time in TH_WAIT state */
+  /* Data used during setrun/dispatch */
+       timer_data_t            system_timer;           /* system mode timer */
+       processor_set_t         processor_set;          /* assigned processor set */
+       processor_t                     bound_processor;        /* bound to a processor? */
+       processor_t                     last_processor;         /* processor last dispatched on */
+       uint64_t                        last_switch;            /* time of last context switch */
 
-       /* 'Obsolete' stuff that cannot be removed yet */
-       integer_t                       policy;
-       integer_t                       sp_state;
-       integer_t                       unconsumed_quantum;
+       /* Fail-safe computation since last unblock or qualifying yield */
+       uint64_t                        computation_metered;
+       uint64_t                        computation_epoch;
+       integer_t                       safe_mode;              /* saved mode during fail-safe */
+       natural_t                       safe_release;   /* when to release fail-safe */
 
-       /* VM global variables */
-       boolean_t       vm_privilege;   /* can use reserved memory? */
-       vm_offset_t     recover;        /* page fault recovery (copyin/out) */
+       /* Statistics and timesharing calculations */
+       natural_t                       sched_stamp;    /* last scheduler tick */
+       natural_t                       sched_usage;    /* timesharing cpu usage [sched] */
+       natural_t                       pri_shift;              /* usage -> priority from pset */
+       natural_t                       cpu_usage;              /* instrumented cpu usage [%cpu] */
+       natural_t                       cpu_delta;              /* accumulated cpu_usage delta */
 
-       /* IPC data structures */
+       /* Timing data structures */
+       timer_data_t            user_timer;                     /* user mode timer */
+       uint64_t                        system_timer_save;      /* saved system timer value */
+       uint64_t                        user_timer_save;        /* saved user timer value */
 
-       struct ipc_kmsg_queue ith_messages;
+       /* Timed wait expiration */
+       timer_call_data_t       wait_timer;
+       integer_t                       wait_timer_active;
+       boolean_t                       wait_timer_is_set;
 
-       mach_port_t ith_mig_reply;      /* reply port for mig */
-       mach_port_t ith_rpc_reply;      /* reply port for kernel RPCs */
+       /* Priority depression expiration */
+       timer_call_data_t       depress_timer;
+       integer_t                       depress_timer_active;
 
        /* Various bits of stashed state */
        union {
                struct {
-                       mach_msg_return_t state;        /* receive state */
-                       ipc_object_t      object;       /* object received on */
-                       mach_msg_header_t *msg;         /* receive buffer pointer */
-                       mach_msg_size_t   msize;        /* max size for recvd msg */
-                       mach_msg_option_t option;       /* options for receive */
-                       mach_msg_size_t   slist_size;   /* scatter list size */
-                       struct ipc_kmsg   *kmsg;        /* received message */
-                       mach_port_seqno_t seqno;        /* seqno of recvd message */
-                       void                      (*continuation)(mach_msg_return_t);
+                       mach_msg_return_t       state;          /* receive state */
+                       ipc_object_t            object;         /* object received on */
+                       mach_vm_address_t       msg_addr;       /* receive buffer pointer */
+                       mach_msg_size_t         msize;          /* max size for recvd msg */
+                       mach_msg_option_t       option;         /* options for receive */
+                       mach_msg_size_t         slist_size;     /* scatter list size */
+                       struct ipc_kmsg         *kmsg;          /* received message */
+                       mach_port_seqno_t       seqno;          /* seqno of recvd message */
+                       mach_msg_continue_t     continuation;
                } receive;
                struct {
-                       struct semaphore  *waitsemaphore;   /* semaphore ref */
-                       struct semaphore  *signalsemaphore; /* semaphore ref */
-                       int               options;      /* semaphore options */
-                       kern_return_t     result;       /* primary result */
-                       void              (*continuation)(kern_return_t);
+                       struct semaphore        *waitsemaphore;         /* semaphore ref */
+                       struct semaphore        *signalsemaphore;       /* semaphore ref */
+                       int                                     options;                        /* semaphore options */
+                       kern_return_t           result;                         /* primary result */
+                       mach_msg_continue_t continuation;
                } sema;
                struct {
-                       struct sf_policy  *policy;      /* scheduling policy */
-                       int               option;       /* switch option */
+                       int                                     option;         /* switch option */
                } swtch;
-               char *other;            /* catch-all for other state */
+               int                                             misc;           /* catch-all for other state */
        } saved;
 
-       /* Timing data structures */
-       timer_data_t    user_timer;     /* user mode timer */
-       timer_data_t    system_timer;   /* system mode timer */
-       timer_data_t    depressed_timer;/* depressed priority timer */
-       timer_save_data_t user_timer_save;  /* saved user timer value */
-       timer_save_data_t system_timer_save;  /* saved sys timer val. */
-       /*** ??? should the next two fields be moved to SP-specific struct?***/
-       unsigned int    cpu_delta;      /* cpu usage since last update */
-       unsigned int    sched_delta;    /* weighted cpu usage since update */
-
-       /* Timed wait expiration */
-       timer_call_data_t               wait_timer;
-       integer_t                               wait_timer_active;
-       boolean_t                               wait_timer_is_set;
-
-       /* Priority depression expiration */
-       thread_call_data_t              depress_timer;
+       /* IPC data structures */
+       struct ipc_kmsg_queue ith_messages;
+       mach_port_t ith_rpc_reply;                      /* reply port for kernel RPCs */
 
        /* Ast/Halt data structures */
-       boolean_t       active;         /* how alive is the thread */
+       vm_offset_t                     recover;                /* page fault recover(copyin/out) */
+       int                                     ref_count;              /* number of references to me */
 
-       /* Processor data structures */
-       processor_set_t processor_set;  /* assigned processor set */
-#if    NCPUS > 1
-       processor_t     bound_processor;        /* bound to processor ?*/
-#endif /* NCPUS > 1 */
+       /* Processor set info */
+       queue_chain_t           pset_threads;   /* list of all threads in pset */
 #if    MACH_HOST
-       boolean_t       may_assign;     /* may assignment change? */
-       boolean_t       assign_active;  /* someone waiting for may_assign */
+       boolean_t                       may_assign;             /* may assignment change? */
+       boolean_t                       assign_active;  /* waiting for may_assign */
 #endif /* MACH_HOST */
 
-#if    XKMACHKERNEL
-       int             xk_type;
-#endif /* XKMACHKERNEL */
-
-#if    NCPUS > 1
-       processor_t     last_processor; /* processor this last ran on */
-#if    MACH_LOCK_MON
-       unsigned        lock_stack;     /* number of locks held */
-#endif  /* MACH_LOCK_MON */
-#endif /* NCPUS > 1 */
-
-       int             at_safe_point;  /* thread_abort_safely allowed */
-    int            funnel_state;
-#define TH_FN_OWNED    0x1  /* we own the funnel lock */
-#define TH_FN_REFUNNEL 0x2  /* must reaquire funnel lock when unblocking */
-    funnel_t   *funnel_lock;
-#if    MACH_LDEBUG
-       /*
-        *      Debugging:  track acquired mutexes and locks.
-        *      Because a thread can block while holding such
-        *      synchronizers, we think of the thread as
-        *      "owning" them.
-        */
-#define        MUTEX_STACK_DEPTH       20
-#define        LOCK_STACK_DEPTH        20
-       mutex_t         *mutex_stack[MUTEX_STACK_DEPTH];
-       lock_t          *lock_stack[LOCK_STACK_DEPTH];
-       unsigned int    mutex_stack_index;
-       unsigned int    lock_stack_index;
-       unsigned        mutex_count;    /* XXX to be deleted XXX */
-       boolean_t       kthread;        /* thread is a kernel thread */
-#endif /* MACH_LDEBUG */
+       /* Activation */
+               queue_chain_t                   task_threads;
 
-       /*
-        * End of thread_shuttle proper
-        */
+               /*** Machine-dependent state ***/
+               struct machine_thread   machine;
 
-       /*
-        * Migration and thread_activation linkage information
-        */
-       struct thread_activation *top_act; /* "current" thr_act */
+               /* Task membership */
+               struct task                             *task;
+               vm_map_t                                map;
+
+               decl_mutex_data(,mutex)
+
+               /* Kernel holds on this thread  */
+               int                                             suspend_count;
+
+               /* User level suspensions */
+               int                                             user_stop_count;
+
+               /* Pending thread ast(s) */
+               ast_t                                   ast;
 
-} Thread_Shuttle;
+               /* Miscellaneous bits guarded by mutex */
+               uint32_t
+               /* Indicates that the thread has not been terminated */
+                                               active:1,
 
-#define THREAD_SHUTTLE_NULL    ((thread_shuttle_t)0)
+          /* Indicates that the thread has been started after creation */
+                                               started:1,
+                                               :0;
+
+               /* Return Handers */
+               struct ReturnHandler {
+                       struct ReturnHandler    *next;
+                       void            (*handler)(
+                                                       struct ReturnHandler            *rh,
+                                                       struct thread                           *thread);
+               } *handlers, special_handler;
+
+               /* Ports associated with this thread */
+               struct ipc_port                 *ith_self;              /* not a right, doesn't hold ref */
+               struct ipc_port                 *ith_sself;             /* a send right */
+               struct exception_action exc_actions[EXC_TYPES_COUNT];
+
+               /* Owned ulocks (a lock set element) */
+               queue_head_t                    held_ulocks;
+
+#if    MACH_PROF
+               /* Profiling */
+               boolean_t                               profiled;
+               boolean_t                               profiled_own;
+               struct prof_data                *profil_buffer;
+#endif /* MACH_PROF */
+
+#ifdef MACH_BSD
+               void                                    *uthread;
+#endif
+};
 
 #define ith_state              saved.receive.state
 #define ith_object             saved.receive.object
-#define ith_msg                        saved.receive.msg
+#define ith_msg_addr                   saved.receive.msg_addr
 #define ith_msize              saved.receive.msize
 #define        ith_option              saved.receive.option
 #define ith_scatter_list_size  saved.receive.slist_size
@@ -402,233 +365,310 @@ typedef struct thread_shuttle {
 #define sth_result             saved.sema.result
 #define sth_continuation       saved.sema.continuation
 
-extern thread_act_t active_kloaded[NCPUS];     /* "" kernel-loaded acts */
-extern vm_offset_t active_stacks[NCPUS];       /* active kernel stacks */
-extern vm_offset_t kernel_stack[NCPUS];
+extern void                    thread_bootstrap(void);
 
-#ifndef MACHINE_STACK_STASH
-/*
- * MD Macro to fill up global stack state,
- * keeping the MD structure sizes + games private
- */
-#define MACHINE_STACK_STASH(stack)                                                             \
-MACRO_BEGIN                                                                                                            \
-       mp_disable_preemption();                                                                        \
-       active_stacks[cpu_number()] = (stack);                                          \
-       kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE;       \
-       mp_enable_preemption();                                                                         \
-MACRO_END
-#endif /* MACHINE_STACK_STASH */
+extern void                    thread_init(void);
 
-/*
- *     Kernel-only routines
- */
+extern void                    thread_daemon_init(void);
 
-/* Initialize thread module */
-extern void            thread_init(void);
+#define        thread_reference_internal(thread)       \
+                       hw_atomic_add(&(thread)->ref_count, 1)
 
-/* Take reference on thread (make sure it doesn't go away) */
-extern void            thread_reference(
-                                       thread_t                thread);
+#define thread_deallocate_internal(thread)     \
+                       hw_atomic_sub(&(thread)->ref_count, 1)
 
-/* Release reference on thread */
-extern void            thread_deallocate(
-                                       thread_t                thread);
+#define thread_reference(thread)                                       \
+MACRO_BEGIN                                                                                    \
+       if ((thread) != THREAD_NULL)                                    \
+               thread_reference_internal(thread);                      \
+MACRO_END
 
-/* Set priority of calling thread */
-extern void            thread_set_own_priority(
-                                       int                             priority);
+extern void                    thread_deallocate(
+                                               thread_t                thread);
 
-/* Start a thread at specified routine */
-#define thread_start(thread, start)                                            \
-                                       (thread)->continuation = (start)
+extern void                    thread_terminate_self(void);
 
+extern kern_return_t   thread_terminate_internal(
+                                                       thread_t                thread);
 
-/* Reaps threads waiting to be destroyed */
-extern void            thread_reaper(void);
+extern void                    thread_terminate_enqueue(
+                                               thread_t                thread);
 
+extern void                    thread_stack_enqueue(
+                                               thread_t                thread);
 
-#if    MACH_HOST
-/* Preclude thread processor set assignement */
-extern void            thread_freeze(
-                                       thread_t                thread);
+extern void                    thread_hold(
+                                               thread_t        thread);
 
-/* Assign thread to a processor set */
-extern void            thread_doassign(
-                                       thread_t                thread,
-                                       processor_set_t new_pset,
-                                       boolean_t               release_freeze);
+extern void                    thread_release(
+                                               thread_t        thread);
 
-/* Allow thread processor set assignement */
-extern void            thread_unfreeze(
-                                       thread_t                thread);
+#define        thread_lock_init(th)    simple_lock_init(&(th)->sched_lock, 0)
+#define thread_lock(th)                        simple_lock(&(th)->sched_lock)
+#define thread_unlock(th)              simple_unlock(&(th)->sched_lock)
+#define thread_lock_try(th)            simple_lock_try(&(th)->sched_lock)
 
-#endif /* MACH_HOST */
+#define thread_should_halt_fast(thread)                (!(thread)->active)
 
-/* Insure thread always has a kernel stack */
-extern void            stack_privilege(
-                                       thread_t                thread);
+#define wake_lock_init(th)             simple_lock_init(&(th)->wake_lock, 0)
+#define wake_lock(th)                  simple_lock(&(th)->wake_lock)
+#define wake_unlock(th)                        simple_unlock(&(th)->wake_lock)
+#define wake_lock_try(th)              simple_lock_try(&(th)->wake_lock)
 
-extern void            consider_thread_collect(void);
+extern void                            stack_alloc(
+                                                       thread_t                thread);
 
-/*
- *     Arguments to specify aggressiveness to thread halt.
- *     Can't have MUST_HALT and SAFELY at the same time.
- */
-#define        THREAD_HALT_NORMAL      0
-#define        THREAD_HALT_MUST_HALT   1       /* no deadlock checks */
-#define        THREAD_HALT_SAFELY      2       /* result must be restartable */
+extern void                            stack_free(
+                                                       thread_t                thread);
 
-/*
- *     Macro-defined routines
- */
+extern void                            stack_free_stack(
+                                                       vm_offset_t             stack);
 
-#define thread_pcb(th)         ((th)->pcb)
+extern boolean_t               stack_alloc_try(
+                                                       thread_t            thread);
 
-#define        thread_lock_init(th)                                                                                    \
-                               simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK)
-#define thread_lock(th)                simple_lock(&(th)->lock)
-#define thread_unlock(th)      simple_unlock(&(th)->lock)
+extern void                            stack_collect(void);
 
-#define thread_should_halt_fast(thread)        \
-       (!(thread)->top_act || \
-       !(thread)->top_act->active || \
-       (thread)->top_act->ast & (AST_HALT|AST_TERMINATE))
+extern void                            stack_init(void);
 
-#define thread_should_halt(thread) thread_should_halt_fast(thread)
+extern kern_return_t    thread_state_initialize(
+                                                       thread_t                                thread);
 
-#define rpc_lock_init(th)      mutex_init(&(th)->rpc_lock, ETAP_THREAD_RPC)
-#define rpc_lock(th)           mutex_lock(&(th)->rpc_lock)
-#define rpc_lock_try(th)       mutex_try(&(th)->rpc_lock)
-#define rpc_unlock(th)         mutex_unlock(&(th)->rpc_lock)
+extern kern_return_t   thread_setstatus(
+                                                       thread_t                                thread,
+                                                       int                                             flavor,
+                                                       thread_state_t                  tstate,
+                                                       mach_msg_type_number_t  count);
 
-/*
- * Lock to cover wake_active only; like thread_lock(), is taken
- * at splsched().  Used to avoid calling into scheduler with a
- * thread_lock() held.  Precedes thread_lock() (and other scheduling-
- * related locks) in the system lock ordering.
- */
-#define wake_lock_init(th)                                     \
-                       simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
-#define wake_lock(th)          simple_lock(&(th)->wake_lock)
-#define wake_unlock(th)                simple_unlock(&(th)->wake_lock)
+extern kern_return_t   thread_getstatus(
+                                                       thread_t                                thread,
+                                                       int                                             flavor,
+                                                       thread_state_t                  tstate,
+                                                       mach_msg_type_number_t  *count);
 
-static __inline__ vm_offset_t current_stack(void);
-static __inline__ vm_offset_t
-current_stack(void)
-{
-       vm_offset_t     ret;
+extern kern_return_t   thread_info_internal(
+                                                       thread_t                                thread,
+                                                       thread_flavor_t                 flavor,
+                                                       thread_info_t                   thread_info_out,
+                                                       mach_msg_type_number_t  *thread_info_count);
 
-       mp_disable_preemption();
-       ret = active_stacks[cpu_number()];
-       mp_enable_preemption();
-       return ret;
-}
+extern void                            thread_task_priority(
+                                                       thread_t                thread,
+                                                       integer_t               priority,
+                                                       integer_t               max_priority);
 
+extern void                            thread_policy_reset(
+                                                       thread_t                thread);
 
-extern void            pcb_module_init(void);
+extern kern_return_t   kernel_thread_create(
+                                                       thread_continue_t       continuation,
+                                                       void                            *parameter,
+                                                       integer_t                       priority,
+                                                       thread_t                        *new_thread);
 
-extern void            pcb_init(
-                                       thread_act_t    thr_act);
+extern kern_return_t   kernel_thread_start_priority(
+                                                       thread_continue_t       continuation,
+                                                       void                            *parameter,
+                                                       integer_t                       priority,
+                                                       thread_t                        *new_thread);
 
-extern void            pcb_terminate(
-                                       thread_act_t    thr_act);
+extern void                            machine_stack_attach(
+                                                       thread_t                thread,
+                                                       vm_offset_t             stack);
 
-extern void            pcb_collect(
-                                       thread_act_t    thr_act);
+extern vm_offset_t             machine_stack_detach(
+                                                       thread_t                thread);
 
-extern void            pcb_user_to_kernel(
-                                       thread_act_t    thr_act);
+extern void                            machine_stack_handoff(
+                                                       thread_t                old,
+                                                       thread_t                new);
 
-extern kern_return_t   thread_setstatus(
-                                                       thread_act_t                    thr_act,
-                                                       int                                             flavor,
-                                                       thread_state_t                  tstate,
+extern thread_t                        machine_switch_context(
+                                                       thread_t                        old_thread,
+                                                       thread_continue_t       continuation,
+                                                       thread_t                        new_thread);
+
+extern void                            machine_load_context(
+                                                       thread_t                thread);
+
+extern kern_return_t   machine_thread_state_initialize(
+                                                       thread_t                                thread);
+
+extern kern_return_t   machine_thread_set_state(
+                                                       thread_t                                thread,
+                                                       thread_flavor_t                 flavor,
+                                                       thread_state_t                  state,
                                                        mach_msg_type_number_t  count);
 
-extern kern_return_t   thread_getstatus(
-                                                       thread_act_t                    thr_act,
-                                                       int                                             flavor,
-                                                       thread_state_t                  tstate,
+extern kern_return_t   machine_thread_get_state(
+                                                       thread_t                                thread,
+                                                       thread_flavor_t                 flavor,
+                                                       thread_state_t                  state,
                                                        mach_msg_type_number_t  *count);
 
-extern boolean_t               stack_alloc_try(
-                                                       thread_t                            thread,
-                                                       void                                    (*start_pos)(thread_t));
+extern kern_return_t   machine_thread_dup(
+                                                       thread_t                self,
+                                                       thread_t                target);
+
+extern void                            machine_thread_init(void);
+
+extern kern_return_t   machine_thread_create(
+                                                       thread_t                thread,
+                                                       task_t                  task);
 
-/* This routine now used only internally */
-extern kern_return_t   thread_info_shuttle(
-                                                       thread_act_t                    thr_act,
+extern void                machine_thread_destroy(
+                                                       thread_t                thread);
+
+extern void                            machine_set_current_thread(
+                                                       thread_t                        thread);
+
+extern void                    machine_thread_terminate_self(void);
+
+extern kern_return_t   machine_thread_get_kern_state(
+                                                       thread_t                                thread,
                                                        thread_flavor_t                 flavor,
-                                                       thread_info_t                   thread_info_out,
-                                                       mach_msg_type_number_t  *thread_info_count);
+                                                       thread_state_t                  tstate,
+                                                       mach_msg_type_number_t  *count);
 
-extern void            thread_user_to_kernel(
-                                       thread_t                thread);
 
-/* Machine-dependent routines */
-extern void            thread_machine_init(void);
+/*
+ * XXX Funnel locks XXX
+ */
 
-extern void            thread_machine_set_current(
-                                       thread_t                thread );
+struct funnel_lock {
+       int                     fnl_type;                       /* funnel type */
+       lck_mtx_t       *fnl_mutex;                     /* underlying mutex for the funnel */
+       void *          fnl_mtxholder;          /* thread (last)holdng mutex */
+       void *          fnl_mtxrelease;         /* thread (last)releasing mutex */
+       lck_mtx_t       *fnl_oldmutex;          /* Mutex before collapsing split funnel */
+};
 
-extern kern_return_t   thread_machine_create(
-                                                       thread_t                        thread,
-                                                       thread_act_t            thr_act,
-                                                       void                            (*start_pos)(thread_t));
+typedef struct ReturnHandler           ReturnHandler;
 
-extern void            thread_set_syscall_return(
-                                       thread_t                thread,
-                                       kern_return_t   retval);
+#define        thread_mtx_lock(thread)                 mutex_lock(&(thread)->mutex)
+#define        thread_mtx_try(thread)                  mutex_try(&(thread)->mutex)
+#define        thread_mtx_unlock(thread)               mutex_unlock(&(thread)->mutex)
 
-extern void            thread_machine_destroy(
-                                       thread_t                thread );
+extern void                    act_execute_returnhandlers(void);
 
-extern void            thread_machine_flush(
-                               thread_act_t thr_act);
+extern void                    install_special_handler(
+                                               thread_t                thread);
 
-extern thread_t     kernel_thread_with_priority(
-                    task_t          task,
-                                       integer_t               priority,
-                    void            (*start)(void),
-                    boolean_t       start_running);
+extern void                    special_handler(
+                                               ReturnHandler   *rh,
+                                               thread_t                thread);
 
-extern void            funnel_lock(funnel_t *);
+#else  /* MACH_KERNEL_PRIVATE */
 
-extern void            funnel_unlock(funnel_t *);
+__BEGIN_DECLS
 
-#else /* !MACH_KERNEL_PRIVATE */
+extern thread_t                current_thread(void);
 
-typedef struct __funnel__ funnel_t;
+extern void                    thread_reference(
+                                               thread_t        thread);
 
-extern boolean_t thread_should_halt(thread_t);
+extern void                    thread_deallocate(
+                                               thread_t        thread);
 
-#endif /* !MACH_KERNEL_PRIVATE */
+__END_DECLS
 
-#define THR_FUNNEL_NULL (funnel_t *)0
+#endif /* MACH_KERNEL_PRIVATE */
+
+#ifdef KERNEL_PRIVATE
+
+typedef struct funnel_lock             funnel_t;
+
+#ifdef MACH_KERNEL_PRIVATE
+
+extern void            funnel_lock(
+                                               funnel_t        *lock);
+
+extern void            funnel_unlock(
+                                               funnel_t        *lock);
+
+vm_offset_t                    min_valid_stack_address(void);
+vm_offset_t                    max_valid_stack_address(void);
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+__BEGIN_DECLS
+
+extern funnel_t                *thread_funnel_get(void);
+
+extern boolean_t       thread_funnel_set(
+                                               funnel_t        *lock,
+                                               boolean_t        funneled);
 
 extern thread_t                kernel_thread(
-                                       task_t  task,
-                                       void    (*start)(void));
+                                               task_t          task,
+                                               void            (*start)(void));
 
-extern void                    thread_terminate_self(void);
+__END_DECLS
 
-extern funnel_t *      funnel_alloc(int);
+#endif /* KERNEL_PRIVATE */
 
-extern funnel_t *      thread_funnel_get(void);
+__BEGIN_DECLS
 
-extern boolean_t       thread_funnel_set(funnel_t * fnl, boolean_t funneled);
+#ifdef XNU_KERNEL_PRIVATE
 
-extern boolean_t       thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl);
+/*
+ * XXX Funnel locks XXX
+ */
+
+#define THR_FUNNEL_NULL (funnel_t *)0
+
+extern funnel_t                 *funnel_alloc(
+                                               int                     type);
+
+extern void                    funnel_free(
+                                               funnel_t        *lock);
+
+extern void                    thread_read_times(
+                                               thread_t                thread,
+                                               time_value_t    *user_time,
+                                               time_value_t    *system_time);
+
+extern void                    thread_setuserstack(
+                                               thread_t                thread,
+                                               mach_vm_offset_t        user_stack);
 
-extern void         thread_set_cont_arg(int);
+extern uint64_t                thread_adjuserstack(
+                                               thread_t                thread,
+                                               int                             adjust);
 
-extern int          thread_get_cont_arg(void);
+extern void                    thread_setentrypoint(
+                                               thread_t                thread,
+                                               mach_vm_offset_t        entry);
+
+extern kern_return_t   thread_wire_internal(
+                                                       host_priv_t             host_priv,
+                                                       thread_t                thread,
+                                                       boolean_t               wired,
+                                                       boolean_t               *prev_state);
 
 /* JMM - These are only temporary */
 extern boolean_t       is_thread_running(thread_t); /* True is TH_RUN */
 extern boolean_t       is_thread_idle(thread_t); /* True is TH_IDLE */
-extern event_t         get_thread_waitevent(thread_t);
-extern kern_return_t   get_thread_waitresult(thread_t);
+
+extern kern_return_t   thread_dup(thread_t);
+
+extern task_t  get_threadtask(thread_t);
+
+extern void            *get_bsdthread_info(thread_t);
+extern void            set_bsdthread_info(thread_t, void *);
+extern void            *uthread_alloc(task_t, thread_t);
+extern void            uthread_free(task_t, void *, void *); 
+
+extern boolean_t       thread_should_halt(
+                                               thread_t                thread);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+extern kern_return_t   kernel_thread_start(
+                                                       thread_continue_t       continuation,
+                                                       void                            *parameter,
+                                                       thread_t                        *new_thread);
+
+__END_DECLS
 
 #endif /* _KERN_THREAD_H_ */