]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/thread.h
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / kern / thread.h
index 559a640e04d0d2956929a4ff2fcf040ac1b95766..47ffc95ff34c9d222382ad9231e7622778d40c70 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_FREE_COPYRIGHT@
 #include <mach/mach_types.h>
 #include <mach/message.h>
 #include <mach/boolean.h>
-#include <mach/vm_types.h>
-#include <mach/vm_prot.h>
+#include <mach/vm_param.h>
 #include <mach/thread_info.h>
 #include <mach/thread_status.h>
-#include <kern/cpu_data.h>             /* for current_thread */
+#include <mach/exception_types.h>
+
 #include <kern/kern_types.h>
 
-/*
- * Logically, a thread of control consists of two parts:
- *     a thread_shuttle, which may migrate during an RPC, and
- *     a thread_activation, which remains attached to a task.
- * The thread_shuttle is the larger portion of the two-part thread,
- * and contains scheduling info, messaging support, accounting info,
- * and links to the thread_activation within which the shuttle is
- * currently operating.
- *
- * It might make sense to have the thread_shuttle be a proper sub-structure
- * of the thread, with the thread containing links to both the shuttle and
- * activation.  In order to reduce the scope and complexity of source
- * changes and the overhead of maintaining these linkages, we have subsumed
- * the shuttle into the thread, calling it a thread_shuttle.
- *
- * User accesses to threads always come in via the user's thread port,
- * which gets translated to a pointer to the target thread_activation.
- * Kernel accesses intended to effect the entire thread, typically use
- * a pointer to the thread_shuttle (current_thread()) as the target of
- * their operations.  This makes sense given that we have subsumed the
- * shuttle into the thread_shuttle, eliminating one set of linkages.
- * Operations effecting only the shuttle may use a thread_shuttle_t
- * to indicate this.
- *
- * The current_act() macro returns a pointer to the current thread_act, while
- * the current_thread() macro returns a pointer to the currently active
- * thread_shuttle (representing the thread in its entirety).
- */
+#include <sys/cdefs.h>
 
-/*
- *     Possible results of thread_block - returned in
- *     current_thread()->wait_result.
- */
-#define THREAD_AWAKENED                0               /* normal wakeup */
-#define THREAD_TIMED_OUT       1               /* timeout expired */
-#define THREAD_INTERRUPTED     2               /* interrupted by clear_wait */
-#define THREAD_RESTART         3               /* restart operation entirely */
+#ifdef MACH_KERNEL_PRIVATE
 
-/*
- * Interruptible flags for assert_wait
- *
- */
-#define THREAD_UNINT                   0               /* not interruptible      */
-#define THREAD_INTERRUPTIBLE   1               /* may not be restartable */
-#define THREAD_ABORTSAFE               2               /* abortable safely       */
-
-#ifdef MACH_KERNEL_PRIVATE
-#include <cpus.h>
-#include <hw_footprint.h>
-#include <mach_host.h>
-#include <mach_prof.h>
-#include <mach_lock_mon.h>
+#include <cputypes.h>
+
+#include <mach_assert.h>
 #include <mach_ldebug.h>
 
+#include <ipc/ipc_types.h>
+
 #include <mach/port.h>
-#include <kern/ast.h>
 #include <kern/cpu_number.h>
 #include <kern/queue.h>
-#include <kern/time_out.h>
 #include <kern/timer.h>
 #include <kern/lock.h>
+#include <kern/locks.h>
 #include <kern/sched.h>
 #include <kern/sched_prim.h>
-#include <kern/thread_pool.h>
 #include <kern/thread_call.h>
 #include <kern/timer_call.h>
 #include <kern/task.h>
+#include <kern/exception.h>
+#include <kern/affinity.h>
+
 #include <ipc/ipc_kmsg.h>
+
+#include <machine/cpu_data.h>
 #include <machine/thread.h>
 
-struct thread_shuttle {
+struct thread {
        /*
-        * Beginning of thread_shuttle proper.  When the thread is on
-        * a wait queue, these three fields are in treated as an un-
-        * official union with a wait_queue_element.  If you change
-        * these, you must change that definition as well.
+        *      NOTE:   The runq field in the thread structure has an unusual
+        *      locking protocol.  If its value is PROCESSOR_NULL, then it is
+        *      locked by the thread_lock, but if its value is something else
+        *      then it is locked by the associated run queue lock.
+        *
+        *      When the thread is on a wait queue, these first three fields
+        *      are treated as an unofficial union with a wait_queue_element.
+        *      If you change these, you must change that definition as well
+        *      (kern/wait_queue.h).
         */
-       queue_chain_t   links;                          /* current run/wait queue links */
-       run_queue_t             runq;                           /* run queue p is on SEE BELOW */
-       int                             whichq;                         /* which queue level p is on */
-/*
- *     NOTE:   The runq field in the thread structure has an unusual
- *     locking protocol.  If its value is RUN_QUEUE_NULL, then it is
- *     locked by the thread_lock, but if its value is something else
- *     (i.e. a run_queue) then it is locked by that run_queue's lock.
- */
-
-       /* Thread bookkeeping */
-       queue_chain_t   pset_threads;           /* list of all shuttles in proc set */
-
-       /* Synchronization */
-       decl_simple_lock_data(,lock)            /* scheduling lock (thread_lock()) */
-       decl_simple_lock_data(,wake_lock)       /* covers wake_active (wake_lock())*/
-       decl_mutex_data(,rpc_lock)                      /* RPC lock (rpc_lock()) */
-       int                             ref_count;                      /* number of references to me */
-        
-       vm_offset_t     kernel_stack;
-       vm_offset_t             stack_privilege;        /* reserved kernel stack */
-
-       /* Blocking information */
-       int                             reason;                         /* why we blocked */
-
-       event_t                 wait_event;                     /* event we are waiting on */
-       kern_return_t   wait_result;            /* outcome of wait -
+       /* Items examined often, modified infrequently */
+       queue_chain_t   links;                          /* run/wait queue links */
+       processor_t             runq;                           /* run queue assignment */
+       wait_queue_t    wait_queue;                     /* wait queue we are currently on */
+       event64_t               wait_event;                     /* wait queue event */
+       /* Data updated during assert_wait/thread_wakeup */
+       decl_simple_lock_data(,sched_lock)      /* scheduling lock (thread_lock()) */
+       decl_simple_lock_data(,wake_lock)       /* for thread stop / wait (wake_lock()) */
+       integer_t               options;                        /* options set by thread itself */
+#define TH_OPT_INTMASK         0x0003          /* interrupt / abort level */
+#define TH_OPT_VMPRIV          0x0004          /* may allocate reserved memory */
+#define TH_OPT_DTRACE          0x0008          /* executing under dtrace_probe */
+#define TH_OPT_SYSTEM_CRITICAL 0x0010          /* Thread must always be allowed to run - even under heavy load */
+#define TH_OPT_PROC_CPULIMIT   0x0020          /* Thread has a task-wide CPU limit applied to it */
+#define TH_OPT_PRVT_CPULIMIT   0x0040          /* Thread has a thread-private CPU limit applied to it */
+#define TH_OPT_IDLE_THREAD             0x0080          /* Thread is a per-processor idle thread */
+
+       boolean_t                       wake_active;    /* wake event on stop */
+       int                                     at_safe_point;  /* thread_abort_safely allowed */
+       ast_t                           reason;                 /* why we blocked */
+       thread_continue_t       continuation;   /* continue here next dispatch */
+       void                            *parameter;             /* continuation parameter */
+       wait_result_t           wait_result;    /* outcome of wait -
                                                                                 * may be examined by this thread
                                                                                 * WITHOUT locking */
 
-       wait_queue_t    wait_queue;                     /* wait queue we are currently on */
-       queue_chain_t   wait_link;                      /* event's wait queue link */
+       /* Data updated/used in thread_invoke */
+    int                                    funnel_state;
+    struct funnel_lock *funnel_lock;           /* Non-reentrancy funnel */
+#define TH_FN_OWNED                    0x1                             /* we own the funnel */
+#define TH_FN_REFUNNEL         0x2                             /* re-acquire funnel on dispatch */
 
-       boolean_t               wake_active;            /* Someone is waiting for this
-                                                                                * thread to become suspended */
-
-       boolean_t               interruptible;          /* Thread is "interruptible" */
+       vm_offset_t             kernel_stack;           /* current kernel stack */
+       vm_offset_t                     reserved_stack;         /* reserved kernel stack */
 
        /* Thread state: */
-       int                             state;
+       int                                     state;
 /*
  *     Thread states [bits or'ed]
  */
-#define TH_WAIT                        0x01    /* thread is queued for waiting */
-#define TH_SUSP                        0x02    /* thread has been asked to stop */
-#define TH_RUN                 0x04    /* thread is running or on runq */
-#define TH_UNINT               0x08    /* thread is waiting uninteruptibly */
-#define        TH_HALTED               0x10    /* thread is halted at clean point ? */
-
-#define TH_ABORT               0x20    /* abort interruptible waits */
+#define TH_WAIT                        0x01                    /* queued for waiting */
+#define TH_SUSP                        0x02                    /* stopped or requested to stop */
+#define TH_RUN                 0x04                    /* running or on runq */
+#define TH_UNINT               0x08                    /* waiting uninteruptibly */
+#define        TH_TERMINATE    0x10                    /* halted at termination */
+#define        TH_TERMINATE2   0x20                    /* added to termination queue */
 
-#define TH_IDLE                        0x80    /* thread is an idle thread */
-
-#define        TH_SCHED_STATE  (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
-
-#define        TH_STACK_HANDOFF        0x0100  /* thread has no kernel stack */
-#define        TH_STACK_ALLOC          0x0200  /* thread is waiting for kernel stack */
-#define        TH_STACK_STATE  (TH_STACK_HANDOFF | TH_STACK_ALLOC)
-
-#define        TH_TERMINATE            0x0400  /* thread is terminating */
-
-       /* Stack handoff information */
-       void            (*continuation)(void);  /* re-start here next dispatch */
+#define TH_IDLE                        0x80                    /* idling processor */
 
        /* Scheduling information */
-       integer_t                       importance;             /* task-relative importance */
-       integer_t                       sched_mode;             /* scheduling mode bits */
-#define TH_MODE_REALTIME               0x0001
-#define TH_MODE_TIMESHARE              0x0002
-#define TH_MODE_FAILSAFE               0x0004
-#define TH_MODE_POLLDEPRESS            0x0008
-       integer_t                       safe_mode;              /* saved mode during fail-safe */
-       struct {                                                        /* see mach/thread_policy.h */
+       sched_mode_t                    sched_mode;             /* scheduling mode */
+       sched_mode_t                    saved_mode;             /* saved mode during forced mode demotion */
+       
+       unsigned int                    sched_flags;            /* current flag bits */
+#define TH_SFLAG_FAIRSHARE_TRIPPED     0x0001          /* fairshare scheduling activated */
+#define TH_SFLAG_FAILSAFE              0x0002          /* fail-safe has tripped */
+#define TH_SFLAG_THROTTLED             0x0004      /* owner task in throttled state */
+#define TH_SFLAG_DEMOTED_MASK      (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE | TH_SFLAG_FAIRSHARE_TRIPPED)
+
+#define        TH_SFLAG_PROMOTED               0x0008          /* sched pri has been promoted */
+#define TH_SFLAG_ABORT                 0x0010          /* abort interruptible waits */
+#define TH_SFLAG_ABORTSAFELY           0x0020          /* ... but only those at safe point */
+#define TH_SFLAG_ABORTED_MASK          (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
+#define        TH_SFLAG_DEPRESS                0x0040          /* normal depress yield */
+#define TH_SFLAG_POLLDEPRESS           0x0080          /* polled depress yield */
+#define TH_SFLAG_DEPRESSED_MASK                (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
+#define TH_SFLAG_PRI_UPDATE            0x0100          /* Updating priority */
+#define TH_SFLAG_EAGERPREEMPT          0x0200          /* Any preemption of this thread should be treated as if AST_URGENT applied */
+#define TH_SFLAG_RW_PROMOTED           0x0400          /* sched pri has been promoted due to blocking with RW lock held */
+#define TH_SFLAG_PROMOTED_MASK         (TH_SFLAG_PROMOTED | TH_SFLAG_RW_PROMOTED)
+
+#define TH_SFLAG_RW_PROMOTED_BIT       (10)    /* 0x400 */
+
+/*
+ * A thread can either be completely unthrottled, about to be throttled,
+ * throttled (TH_SFLAG_THROTTLED), or about to be unthrottled
+ */
+#define        TH_SFLAG_PENDING_THROTTLE_DEMOTION      0x1000  /* Pending sched_mode demotion */
+#define        TH_SFLAG_PENDING_THROTTLE_PROMOTION     0x2000  /* Pending sched_mode promition */
+#define        TH_SFLAG_PENDING_THROTTLE_MASK          (TH_SFLAG_PENDING_THROTTLE_DEMOTION | TH_SFLAG_PENDING_THROTTLE_PROMOTION)
+
+       int16_t                         sched_pri;                      /* scheduled (current) priority */
+       int16_t                         priority;                       /* base priority */
+       int16_t                         max_priority;           /* max base priority */
+       int16_t                         task_priority;          /* copy of task base priority */
+#if defined(CONFIG_SCHED_GRRR)
+#if 0
+       uint16_t                        grrr_deficit;           /* fixed point (1/1000th quantum) fractional deficit */
+#endif
+#endif
+       
+       int16_t                         promotions;                     /* level of promotion */
+       int16_t                         pending_promoter_index;
+       uint32_t                        ref_count;              /* number of references to me */
+       void                            *pending_promoter[2];
+
+       uint32_t                        rwlock_count;   /* Number of lck_rw_t locks held by thread */
+
+       integer_t                       importance;                     /* task-relative importance */
+       /* Priority depression expiration */
+       integer_t                       depress_timer_active;
+       timer_call_data_t       depress_timer;
+                                                                               /* real-time parameters */
+       struct {                                                                /* see mach/thread_policy.h */
                uint32_t                        period;
                uint32_t                        computation;
                uint32_t                        constraint;
                boolean_t                       preemptible;
+               uint64_t                        deadline;
        }                                       realtime;
 
-       integer_t                       priority;                       /* base priority */
-       integer_t                       sched_pri;                      /* scheduled (current) priority */
-       integer_t                       depress_priority;       /* priority to restore */
-       integer_t                       max_priority;
-       integer_t                       task_priority;          /* copy of task base priority */
-
+       uint32_t                        was_promoted_on_wakeup;
        uint32_t                        current_quantum;        /* duration of current quantum */
+       uint64_t                        last_run_time;          /* time when thread was switched away from */
+       uint64_t                        last_quantum_refill_time;       /* time when current_quantum was refilled after expiration */
+
+  /* Data used during setrun/dispatch */
+       timer_data_t            system_timer;           /* system mode timer */
+       processor_t                     bound_processor;        /* bound to a processor? */
+       processor_t                     last_processor;         /* processor last dispatched on */
+       processor_t                     chosen_processor;       /* Where we want to run this thread */
 
        /* Fail-safe computation since last unblock or qualifying yield */
-       uint64_t                        metered_computation;
+       uint64_t                        computation_metered;
        uint64_t                        computation_epoch;
+       uint64_t                        safe_release;   /* when to release fail-safe */
+
+       /* Call out from scheduler */
+       void                            (*sched_call)(
+                                                       int                     type,
+                                                       thread_t        thread);
+#if defined(CONFIG_SCHED_PROTO)
+       uint32_t                        runqueue_generation;    /* last time runqueue was drained */
+#endif
+       
+       /* Statistics and timesharing calculations */
+#if defined(CONFIG_SCHED_TRADITIONAL)
+       natural_t                       sched_stamp;    /* last scheduler tick */
+       natural_t                       sched_usage;    /* timesharing cpu usage [sched] */
+       natural_t                       pri_shift;              /* usage -> priority from pset */
+       natural_t                       cpu_usage;              /* instrumented cpu usage [%cpu] */
+       natural_t                       cpu_delta;              /* accumulated cpu_usage delta */
+#endif
+       uint32_t                        c_switch;               /* total context switches */
+       uint32_t                        p_switch;               /* total processor switches */
+       uint32_t                        ps_switch;              /* total pset switches */
 
-       natural_t                       cpu_usage;              /* exp. decaying cpu usage [%cpu] */
-       natural_t                       cpu_delta;              /* cpu usage since last update */
-       natural_t                       sched_usage;    /* load-weighted cpu usage [sched] */
-       natural_t                       sched_delta;    /* weighted cpu usage since update */
-       natural_t                       sched_stamp;    /* when priority was updated */
-       natural_t                       sleep_stamp;    /* when entered TH_WAIT state */
-       natural_t                       safe_release;   /* when to release fail-safe */
-
-       /* VM global variables */
-       boolean_t                       vm_privilege;   /* can use reserved memory? */
-       vm_offset_t                     recover;                /* page fault recovery (copyin/out) */
+       /* Timing data structures */
+       int                                     precise_user_kernel_time; /* precise user/kernel enabled for this thread */
+       timer_data_t            user_timer;                     /* user mode timer */
+       uint64_t                        user_timer_save;        /* saved user timer value */
+       uint64_t                        system_timer_save;      /* saved system timer value */
+       uint64_t                        vtimer_user_save;       /* saved values for vtimers */
+       uint64_t                        vtimer_prof_save;
+       uint64_t                        vtimer_rlim_save;
 
-       /* IPC data structures */
+       /* Timed wait expiration */
+       timer_call_data_t       wait_timer;
+       integer_t                       wait_timer_active;
+       boolean_t                       wait_timer_is_set;
 
-       struct ipc_kmsg_queue ith_messages;
 
-       mach_port_t ith_mig_reply;      /* reply port for mig */
-       mach_port_t ith_rpc_reply;      /* reply port for kernel RPCs */
+       /*
+        * Processor/cache affinity
+        * - affinity_threads links task threads with the same affinity set
+        */
+       affinity_set_t                  affinity_set;
+       queue_chain_t                   affinity_threads;
 
        /* Various bits of stashed state */
        union {
                struct {
                        mach_msg_return_t       state;          /* receive state */
+                       mach_port_seqno_t       seqno;          /* seqno of recvd message */
                        ipc_object_t            object;         /* object received on */
-                       mach_msg_header_t       *msg;           /* receive buffer pointer */
+                       mach_vm_address_t       msg_addr;       /* receive buffer pointer */
                        mach_msg_size_t         msize;          /* max size for recvd msg */
                        mach_msg_option_t       option;         /* options for receive */
                        mach_msg_size_t         slist_size;     /* scatter list size */
+                       mach_port_name_t        receiver_name;  /* the receive port name */
                        struct ipc_kmsg         *kmsg;          /* received message */
-                       mach_port_seqno_t       seqno;          /* seqno of recvd message */
-                       void                    (*continuation)(mach_msg_return_t);
-               }                       receive;
+                       mach_msg_continue_t     continuation;
+               } receive;
                struct {
                        struct semaphore        *waitsemaphore;         /* semaphore ref */
                        struct semaphore        *signalsemaphore;       /* semaphore ref */
                        int                                     options;                        /* semaphore options */
                        kern_return_t           result;                         /* primary result */
-                       void                    (*continuation)(kern_return_t);
-               }                       sema;
+                       mach_msg_continue_t continuation;
+               } sema;
                struct {
-                       int                             option;         /* switch option */
-               }                       swtch;
-               int                                     misc;           /* catch-all for other state */
-       }               saved;
+                       int                                     option;         /* switch option */
+                       boolean_t                               reenable_workq_callback;        /* on entry, callbacks were suspended */
+               } swtch;
+               int                                             misc;           /* catch-all for other state */
+       } saved;
 
-       /* Timing data structures */
-       timer_data_t                    user_timer;                     /* user mode timer */
-       timer_data_t                    system_timer;           /* system mode timer */
-       timer_save_data_t               user_timer_save;        /* saved user timer value */
-       timer_save_data_t               system_timer_save;      /* saved system timer value */
+       /* Structure to save information about guard exception */
+       struct {
+               unsigned                                type;           /* EXC_GUARD reason/type */
+               mach_exception_data_type_t              code;           /* Exception code */
+               mach_exception_data_type_t              subcode;        /* Exception sub-code */
+       } guard_exc_info;
 
-       /* Timed wait expiration */
-       timer_call_data_t               wait_timer;
-       integer_t                               wait_timer_active;
-       boolean_t                               wait_timer_is_set;
 
-       /* Priority depression expiration */
-       timer_call_data_t               depress_timer;
-       integer_t                               depress_timer_active;
+       /* IPC data structures */
+#if IMPORTANCE_INHERITANCE
+       natural_t ith_assertions;                       /* assertions pending drop */
+#endif
+       struct ipc_kmsg_queue ith_messages;             /* messages to reap */
+       mach_port_t ith_rpc_reply;                      /* reply port for kernel RPCs */
 
        /* Ast/Halt data structures */
-       boolean_t                       active;                         /* thread is active */
-
-       int                                     at_safe_point;          /* thread_abort_safely allowed */
-
-       /* Processor data structures */
-       processor_set_t         processor_set;          /* assigned processor set */
-       processor_t                     bound_processor;        /* bound to processor ?*/
-#if    MACH_HOST
-       boolean_t                       may_assign;                     /* may assignment change? */
-       boolean_t                       assign_active;          /* waiting for may_assign */
-#endif /* MACH_HOST */
-
-       processor_t                     last_processor;         /* processor last ran on */
+       vm_offset_t                                     recover;                /* page fault recover(copyin/out) */
 
-       /* Non-reentrancy funnel */
-    struct funnel_lock         *funnel_lock;
-    int                                        funnel_state;
-#define TH_FN_OWNED                            0x1                     /* we own the funnel */
-#define TH_FN_REFUNNEL                 0x2                     /* re-acquire funnel on dispatch */
-
-/* BEGIN TRACING/DEBUG */
-
-#if    MACH_LOCK_MON
-       unsigned                        lock_stack;                     /* number of locks held */
-#endif  /* MACH_LOCK_MON */
-
-#if    ETAP_EVENT_MONITOR
-       int                                     etap_reason;            /* real reason why we blocked */
-       boolean_t                       etap_trace;                     /* ETAP trace status */
-#endif /* ETAP_EVENT_MONITOR */
-
-#if    MACH_LDEBUG
-       /*
-        *      Debugging:  track acquired mutexes and locks.
-        *      Because a thread can block while holding such
-        *      synchronizers, we think of the thread as
-        *      "owning" them.
-        */
-#define        MUTEX_STACK_DEPTH       20
-#define        LOCK_STACK_DEPTH        20
-       mutex_t                         *mutex_stack[MUTEX_STACK_DEPTH];
-       lock_t                          *lock_stack[LOCK_STACK_DEPTH];
-       unsigned int            mutex_stack_index;
-       unsigned int            lock_stack_index;
-       unsigned                        mutex_count;            /* XXX to be deleted XXX */
-       boolean_t                       kthread;                        /* thread is a kernel thread */
-#endif /* MACH_LDEBUG */
-
-/* END TRACING/DEBUG */
-
-       /* Migration and thread_activation linkage */
-       struct thread_activation        *top_act;               /* "current" thr_act */
+       queue_chain_t                           threads;                /* global list of all threads */
+
+       /* Activation */
+               queue_chain_t                   task_threads;
+
+               /*** Machine-dependent state ***/
+               struct machine_thread   machine;
+
+               /* Task membership */
+               struct task                             *task;
+               vm_map_t                                map;
+
+               decl_lck_mtx_data(,mutex)
+
+
+               /* Pending thread ast(s) */
+               ast_t                                   ast;
+
+               /* Miscellaneous bits guarded by mutex */
+               uint32_t
+                       active:1,                               /* Thread is active and has not been terminated */
+                       started:1,                              /* Thread has been started after creation */
+                       static_param:1,                 /* Disallow policy parameter changes */
+                       :0;
+
+               /* Return Handers */
+               struct ReturnHandler {
+                       struct ReturnHandler    *next;
+                       void            (*handler)(
+                                                       struct ReturnHandler            *rh,
+                                                       struct thread                           *thread);
+               } *handlers, special_handler;
+
+               /* Ports associated with this thread */
+               struct ipc_port                 *ith_self;              /* not a right, doesn't hold ref */
+               struct ipc_port                 *ith_sself;             /* a send right */
+               struct exception_action *exc_actions;
+
+#ifdef MACH_BSD
+               void                                    *uthread;
+#endif
+
+#if CONFIG_DTRACE
+               uint32_t t_dtrace_flags;        /* DTrace thread states */
+#define        TH_DTRACE_EXECSUCCESS   0x01
+               uint32_t t_dtrace_predcache;/* DTrace per thread predicate value hint */
+               int64_t t_dtrace_tracing;       /* Thread time under dtrace_probe() */
+               int64_t t_dtrace_vtime;
+#endif
+
+               clock_sec_t t_page_creation_time;
+               uint32_t    t_page_creation_count;
+
+#define T_CHUD_MARKED           0x01          /* this thread is marked by CHUD */
+#define T_IN_CHUD               0x02          /* this thread is already in a CHUD handler */
+#define THREAD_PMC_FLAG         0x04          /* Bit in "t_chud" signifying PMC interest */    
+#define T_AST_CALLSTACK         0x08          /* Thread scheduled to dump a
+                                              * callstack on its next
+                                              * AST */
+#define T_AST_NAME              0x10          /* Thread scheduled to dump
+                                              * its name on its next
+                                              * AST */
+#define T_NAME_DONE             0x20          /* Thread has previously
+                                              * recorded its name */
+#define T_KPC_ALLOC             0x40          /* Thread needs a kpc_buf */
+
+               uint32_t t_chud;        /* CHUD flags, used for Shark */
+               uint32_t chud_c_switch; /* last dispatch detection */
+
+               integer_t mutex_count;  /* total count of locks held */
+
+#ifdef KPC
+       /* accumulated performance counters for this thread */
+       uint64_t *kpc_buf;
+#endif
+
+#ifdef KPERF
+       /* count of how many times a thread has been sampled since it was last scheduled */
+       uint64_t kperf_pet_cnt;
+#endif
+
+               uint64_t thread_id;     /*system wide unique thread-id*/
+
+       /* Statistics accumulated per-thread and aggregated per-task */
+       uint32_t                syscalls_unix;
+       uint32_t                syscalls_mach;
+       ledger_t                t_ledger;
+       ledger_t                t_threadledger; /* per thread ledger */
+
+       /* policy is protected by the task lock */
+       struct task_requested_policy     requested_policy;
+       struct task_effective_policy     effective_policy;
+       struct task_pended_policy        pended_policy;
+
+       int     iotier_override; /* atomic operations to set, cleared on ret to user */
+
+
+       integer_t               saved_importance;               /* saved task-relative importance */
+
+       uint32_t                        thread_callout_interrupt_wakeups;
+       uint32_t                        thread_callout_platform_idle_wakeups;
+       uint32_t                        thread_timer_wakeups_bin_1;
+       uint32_t                        thread_timer_wakeups_bin_2;
+       uint16_t                        thread_tag;
+       uint16_t                        callout_woken_from_icontext:1,
+                                       callout_woken_from_platform_idle:1,
+                                       callout_woke_thread:1,
+                                       thread_bitfield_unused:13;
+       /* Kernel holds on this thread  */
+       int16_t                                         suspend_count;
+       /* User level suspensions */
+       int16_t                                         user_stop_count;
 };
 
-#define THREAD_SHUTTLE_NULL    ((thread_shuttle_t)0)
-
 #define ith_state              saved.receive.state
 #define ith_object             saved.receive.object
-#define ith_msg                        saved.receive.msg
+#define ith_msg_addr                   saved.receive.msg_addr
 #define ith_msize              saved.receive.msize
 #define        ith_option              saved.receive.option
 #define ith_scatter_list_size  saved.receive.slist_size
+#define ith_receiver_name      saved.receive.receiver_name
 #define ith_continuation       saved.receive.continuation
 #define ith_kmsg               saved.receive.kmsg
 #define ith_seqno              saved.receive.seqno
@@ -390,246 +483,466 @@ struct thread_shuttle {
 #define sth_result             saved.sema.result
 #define sth_continuation       saved.sema.continuation
 
+extern void                    thread_bootstrap(void);
+
+extern void                    thread_init(void);
+
+extern void                    thread_daemon_init(void);
+
+#define        thread_reference_internal(thread)       \
+                       (void)hw_atomic_add(&(thread)->ref_count, 1)
+
+#define thread_deallocate_internal(thread)     \
+                       hw_atomic_sub(&(thread)->ref_count, 1)
+
+#define thread_reference(thread)                                       \
+MACRO_BEGIN                                                                                    \
+       if ((thread) != THREAD_NULL)                                    \
+               thread_reference_internal(thread);              \
+MACRO_END
+
+extern void                    thread_deallocate(
+                                               thread_t                thread);
+
+extern void                    thread_terminate_self(void);
+
+extern kern_return_t   thread_terminate_internal(
+                                                       thread_t                thread);
+
+extern void                    thread_start_internal(
+                                                       thread_t                        thread) __attribute__ ((noinline));
+
+extern void                    thread_terminate_enqueue(
+                                               thread_t                thread);
+
+extern void                    thread_stack_enqueue(
+                                               thread_t                thread);
+
+extern void                    thread_hold(
+                                               thread_t        thread);
+
+extern void                    thread_release(
+                                               thread_t        thread);
+
+
+#define        thread_lock_init(th)    simple_lock_init(&(th)->sched_lock, 0)
+#define thread_lock(th)                        simple_lock(&(th)->sched_lock)
+#define thread_unlock(th)              simple_unlock(&(th)->sched_lock)
+
+#define wake_lock_init(th)             simple_lock_init(&(th)->wake_lock, 0)
+#define wake_lock(th)                  simple_lock(&(th)->wake_lock)
+#define wake_unlock(th)                        simple_unlock(&(th)->wake_lock)
+
+#define thread_should_halt_fast(thread)                (!(thread)->active)
+
+extern void                            stack_alloc(
+                                                       thread_t                thread);
+
+extern void                    stack_handoff(
+                                                       thread_t                from,
+                                                       thread_t                to);
+
+extern void                            stack_free(
+                                                       thread_t                thread);
+
+extern void                            stack_free_reserved(
+                                                       thread_t                thread);
+
+extern boolean_t               stack_alloc_try(
+                                                       thread_t            thread);
+
+extern void                            stack_collect(void);
+
+extern void                            stack_init(void);
+
+
+extern kern_return_t   thread_info_internal(
+                                                       thread_t                                thread,
+                                                       thread_flavor_t                 flavor,
+                                                       thread_info_t                   thread_info_out,
+                                                       mach_msg_type_number_t  *thread_info_count);
+
+extern void                            thread_task_priority(
+                                                       thread_t                thread,
+                                                       integer_t               priority,
+                                                       integer_t               max_priority);
+
+extern void                            thread_policy_reset(
+                                                       thread_t                thread);
+
+extern kern_return_t   kernel_thread_create(
+                                                       thread_continue_t       continuation,
+                                                       void                            *parameter,
+                                                       integer_t                       priority,
+                                                       thread_t                        *new_thread);
+
+extern kern_return_t   kernel_thread_start_priority(
+                                                       thread_continue_t       continuation,
+                                                       void                            *parameter,
+                                                       integer_t                       priority,
+                                                       thread_t                        *new_thread);
+
+extern void                            machine_stack_attach(
+                                                       thread_t                thread,
+                                                       vm_offset_t             stack);
+
+extern vm_offset_t             machine_stack_detach(
+                                                       thread_t                thread);
+
+extern void                            machine_stack_handoff(
+                                                       thread_t                old,
+                                                       thread_t                new);
+
+extern thread_t                        machine_switch_context(
+                                                       thread_t                        old_thread,
+                                                       thread_continue_t       continuation,
+                                                       thread_t                        new_thread);
+
+extern void                            machine_load_context(
+                                                       thread_t                thread);
+
+extern kern_return_t   machine_thread_state_initialize(
+                                                       thread_t                                thread);
+
+extern kern_return_t   machine_thread_set_state(
+                                                       thread_t                                thread,
+                                                       thread_flavor_t                 flavor,
+                                                       thread_state_t                  state,
+                                                       mach_msg_type_number_t  count);
+
+extern kern_return_t   machine_thread_get_state(
+                                                       thread_t                                thread,
+                                                       thread_flavor_t                 flavor,
+                                                       thread_state_t                  state,
+                                                       mach_msg_type_number_t  *count);
+
+extern kern_return_t   machine_thread_dup(
+                                                       thread_t                self,
+                                                       thread_t                target);
+
+extern void                            machine_thread_init(void);
+
+extern kern_return_t   machine_thread_create(
+                                                       thread_t                thread,
+                                                       task_t                  task);
+extern void            machine_thread_switch_addrmode(
+                                                      thread_t                 thread);
+
+extern void                machine_thread_destroy(
+                                                       thread_t                thread);
+
+extern void                            machine_set_current_thread(
+                                                       thread_t                        thread);
+
+extern kern_return_t   machine_thread_get_kern_state(
+                                                       thread_t                                thread,
+                                                       thread_flavor_t                 flavor,
+                                                       thread_state_t                  tstate,
+                                                       mach_msg_type_number_t  *count);
+
+extern kern_return_t   machine_thread_inherit_taskwide(
+                                                       thread_t                thread,
+                                                       task_t                  parent_task);
+
+/*
+ * XXX Funnel locks XXX
+ */
+
 struct funnel_lock {
        int                     fnl_type;                       /* funnel type */
-       mutex_t         *fnl_mutex;                     /* underlying mutex for the funnel */
+       lck_mtx_t       *fnl_mutex;                     /* underlying mutex for the funnel */
        void *          fnl_mtxholder;          /* thread (last)holdng mutex */
        void *          fnl_mtxrelease;         /* thread (last)releasing mutex */
-       mutex_t         *fnl_oldmutex;          /* Mutex before collapsing split funnel */
+       lck_mtx_t       *fnl_oldmutex;          /* Mutex before collapsing split funnel */
 };
 
-typedef struct funnel_lock             funnel_t;
+typedef struct ReturnHandler           ReturnHandler;
 
-extern thread_act_t active_kloaded[NCPUS];     /* "" kernel-loaded acts */
-extern vm_offset_t active_stacks[NCPUS];       /* active kernel stacks */
-extern vm_offset_t kernel_stack[NCPUS];
+#define        thread_mtx_lock(thread)                 lck_mtx_lock(&(thread)->mutex)
+#define        thread_mtx_try(thread)                  lck_mtx_try_lock(&(thread)->mutex)
+#define        thread_mtx_unlock(thread)               lck_mtx_unlock(&(thread)->mutex)
 
-#ifndef MACHINE_STACK_STASH
-/*
- * MD Macro to fill up global stack state,
- * keeping the MD structure sizes + games private
- */
-#define MACHINE_STACK_STASH(stack)                                                             \
-MACRO_BEGIN                                                                                                            \
-       mp_disable_preemption();                                                                        \
-       active_stacks[cpu_number()] = (stack);                                          \
-       kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE;       \
-       mp_enable_preemption();                                                                         \
-MACRO_END
-#endif /* MACHINE_STACK_STASH */
+extern void                    act_execute_returnhandlers(void);
 
-/*
- *     Kernel-only routines
- */
+extern void                    install_special_handler(
+                                               thread_t                thread);
 
-/* Initialize thread module */
-extern void            thread_init(void);
+extern void                    special_handler(
+                                               ReturnHandler   *rh,
+                                               thread_t                thread);
 
-/* Take reference on thread (make sure it doesn't go away) */
-extern void            thread_reference(
-                                       thread_t                thread);
+void act_machine_sv_free(thread_t, int);
 
-/* Release reference on thread */
-extern void            thread_deallocate(
-                                       thread_t                thread);
+vm_offset_t                    min_valid_stack_address(void);
+vm_offset_t                    max_valid_stack_address(void);
 
-/* Set task priority of member thread */
-extern void            thread_task_priority(
-                                       thread_t                thread,
-                                       integer_t               priority,
-                                       integer_t               max_priority);
+extern void            funnel_lock(
+                                               struct funnel_lock      *lock);
 
-/* Start a thread at specified routine */
-#define thread_start(thread, start)                                            \
-                                       (thread)->continuation = (start)
+extern void            funnel_unlock(
+                                               struct funnel_lock      *lock);
 
 
-/* Reaps threads waiting to be destroyed */
-extern void            thread_reaper(void);
+static inline uint16_t thread_set_tag_internal(thread_t        thread, uint16_t tag) {
+       return __sync_fetch_and_or(&thread->thread_tag, tag);
+}
 
+static inline uint16_t thread_get_tag_internal(thread_t        thread) {
+       return thread->thread_tag;
+}
 
-#if    MACH_HOST
-/* Preclude thread processor set assignement */
-extern void            thread_freeze(
-                                       thread_t                thread);
+#else  /* MACH_KERNEL_PRIVATE */
 
-/* Assign thread to a processor set */
-extern void            thread_doassign(
-                                       thread_t                thread,
-                                       processor_set_t new_pset,
-                                       boolean_t               release_freeze);
+__BEGIN_DECLS
 
-/* Allow thread processor set assignement */
-extern void            thread_unfreeze(
-                                       thread_t                thread);
+extern thread_t                current_thread(void);
 
-#endif /* MACH_HOST */
+extern void                    thread_reference(
+                                               thread_t        thread);
 
-/* Insure thread always has a kernel stack */
-extern void            stack_privilege(
-                                       thread_t                thread);
+extern void                    thread_deallocate(
+                                               thread_t        thread);
 
-extern void            consider_thread_collect(void);
+__END_DECLS
 
-/*
- *     Arguments to specify aggressiveness to thread halt.
- *     Can't have MUST_HALT and SAFELY at the same time.
- */
-#define        THREAD_HALT_NORMAL      0
-#define        THREAD_HALT_MUST_HALT   1       /* no deadlock checks */
-#define        THREAD_HALT_SAFELY      2       /* result must be restartable */
+#endif /* MACH_KERNEL_PRIVATE */
 
-/*
- *     Macro-defined routines
- */
+#ifdef KERNEL_PRIVATE
 
-#define thread_pcb(th)         ((th)->pcb)
+__BEGIN_DECLS
 
-#define        thread_lock_init(th)                                                                                    \
-                               simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK)
-#define thread_lock(th)                simple_lock(&(th)->lock)
-#define thread_unlock(th)      simple_unlock(&(th)->lock)
 
-#define thread_should_halt_fast(thread)        \
-       (!(thread)->top_act || \
-       !(thread)->top_act->active || \
-       (thread)->top_act->ast & (AST_HALT|AST_TERMINATE))
+extern uint64_t                        thread_tid(
+                                               thread_t thread);
 
-#define thread_should_halt(thread) thread_should_halt_fast(thread)
+extern uint64_t                        thread_dispatchqaddr(
+                                               thread_t thread);
 
-#define rpc_lock_init(th)      mutex_init(&(th)->rpc_lock, ETAP_THREAD_RPC)
-#define rpc_lock(th)           mutex_lock(&(th)->rpc_lock)
-#define rpc_lock_try(th)       mutex_try(&(th)->rpc_lock)
-#define rpc_unlock(th)         mutex_unlock(&(th)->rpc_lock)
+__END_DECLS
 
-/*
- * Lock to cover wake_active only; like thread_lock(), is taken
- * at splsched().  Used to avoid calling into scheduler with a
- * thread_lock() held.  Precedes thread_lock() (and other scheduling-
- * related locks) in the system lock ordering.
- */
-#define wake_lock_init(th)                                     \
-                       simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
-#define wake_lock(th)          simple_lock(&(th)->wake_lock)
-#define wake_unlock(th)                simple_unlock(&(th)->wake_lock)
-
-static __inline__ vm_offset_t current_stack(void);
-static __inline__ vm_offset_t
-current_stack(void)
-{
-       vm_offset_t     ret;
-
-       mp_disable_preemption();
-       ret = active_stacks[cpu_number()];
-       mp_enable_preemption();
-       return ret;
-}
+#endif /* KERNEL_PRIVATE */
 
+__BEGIN_DECLS
 
-extern void            pcb_module_init(void);
+#ifdef XNU_KERNEL_PRIVATE
 
-extern void            pcb_init(
-                                       thread_act_t    thr_act);
+/*
+ * Thread tags; for easy identification.
+ */
+#define        THREAD_TAG_MAINTHREAD 0x1
+#define        THREAD_TAG_CALLOUT 0x2
+#define        THREAD_TAG_IOWORKLOOP 0x4
 
-extern void            pcb_terminate(
-                                       thread_act_t    thr_act);
+uint16_t       thread_set_tag(thread_t, uint16_t);
+uint16_t       thread_get_tag(thread_t);
 
-extern void            pcb_collect(
-                                       thread_act_t    thr_act);
 
-extern void            pcb_user_to_kernel(
-                                       thread_act_t    thr_act);
+extern kern_return_t    thread_state_initialize(
+                                                       thread_t                                thread);
 
 extern kern_return_t   thread_setstatus(
-                                                       thread_act_t                    thr_act,
+                                                       thread_t                                thread,
                                                        int                                             flavor,
                                                        thread_state_t                  tstate,
                                                        mach_msg_type_number_t  count);
 
 extern kern_return_t   thread_getstatus(
-                                                       thread_act_t                    thr_act,
+                                                       thread_t                                thread,
                                                        int                                             flavor,
                                                        thread_state_t                  tstate,
                                                        mach_msg_type_number_t  *count);
 
-extern boolean_t               stack_alloc_try(
-                                                       thread_t                            thread,
-                                                       void                                    (*start_pos)(thread_t));
+extern kern_return_t   thread_create_workq(
+                                                       task_t                  task,
+                                                       thread_continue_t       thread_return,
+                                                       thread_t                *new_thread);
 
-/* This routine now used only internally */
-extern kern_return_t   thread_info_shuttle(
-                                                       thread_act_t                    thr_act,
-                                                       thread_flavor_t                 flavor,
-                                                       thread_info_t                   thread_info_out,
-                                                       mach_msg_type_number_t  *thread_info_count);
+extern void    thread_yield_internal(
+       mach_msg_timeout_t      interval);
 
-extern void            thread_user_to_kernel(
-                                       thread_t                thread);
+/*
+ * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
+ * 
+ * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
+ * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
+ * 3) Disable. Remove any existing CPU limit.
+ */
+#define THREAD_CPULIMIT_BLOCK          0x1
+#define THREAD_CPULIMIT_EXCEPTION      0x2
+#define        THREAD_CPULIMIT_DISABLE         0x3
 
-/* Machine-dependent routines */
-extern void            thread_machine_init(void);
+struct _thread_ledger_indices {
+       int cpu_time;
+};
 
-extern void            thread_machine_set_current(
-                                       thread_t                thread );
+extern struct _thread_ledger_indices thread_ledgers;
 
-extern kern_return_t   thread_machine_create(
-                                                       thread_t                        thread,
-                                                       thread_act_t            thr_act,
-                                                       void                            (*start_pos)(thread_t));
+extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
+extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
 
-extern void            thread_set_syscall_return(
-                                       thread_t                thread,
-                                       kern_return_t   retval);
+typedef struct funnel_lock             funnel_t;
 
-extern void            thread_machine_destroy(
-                                       thread_t                thread );
+#define THR_FUNNEL_NULL (funnel_t *)0
 
-extern void            thread_machine_flush(
-                               thread_act_t thr_act);
+extern funnel_t                 *funnel_alloc(
+                                               int                     type);
 
-extern thread_t     kernel_thread_with_priority(
-                    task_t          task,
-                                       integer_t               priority,
-                    void            (*start)(void),
-                                       boolean_t               alloc_stack,
-                    boolean_t       start_running);
+extern void                    funnel_free(
+                                               funnel_t        *lock);
 
-extern void            funnel_lock(funnel_t *);
+extern funnel_t                *thread_funnel_get(void);
 
-extern void            funnel_unlock(funnel_t *);
+extern boolean_t       thread_funnel_set(
+                                               funnel_t        *lock,
+                                               boolean_t        funneled);
 
-#else /* !MACH_KERNEL_PRIVATE */
+extern void                    thread_read_times(
+                                               thread_t                thread,
+                                               time_value_t    *user_time,
+                                               time_value_t    *system_time);
 
-typedef struct funnel_lock             funnel_t;
+extern void                    thread_setuserstack(
+                                               thread_t                thread,
+                                               mach_vm_offset_t        user_stack);
 
-extern boolean_t thread_should_halt(thread_t);
+extern uint64_t                thread_adjuserstack(
+                                               thread_t                thread,
+                                               int                             adjust);
 
-#endif /* !MACH_KERNEL_PRIVATE */
+extern void                    thread_setentrypoint(
+                                               thread_t                thread,
+                                               mach_vm_offset_t        entry);
 
-#define THR_FUNNEL_NULL (funnel_t *)0
+extern kern_return_t   thread_setsinglestep(
+                                               thread_t                thread,
+                                               int                     on);
 
-extern thread_t                kernel_thread(
-                                       task_t  task,
-                                       void    (*start)(void));
+extern kern_return_t   thread_userstack(
+                                               thread_t,
+                                               int,
+                                               thread_state_t,
+                                               unsigned int,
+                                               mach_vm_offset_t *,
+                                               int *);
 
-extern void                    thread_terminate_self(void);
+extern kern_return_t   thread_entrypoint(
+                                               thread_t,
+                                               int,
+                                               thread_state_t,
+                                               unsigned int,
+                                               mach_vm_offset_t *); 
 
-extern funnel_t *      funnel_alloc(int);
+extern kern_return_t   thread_userstackdefault(
+                                               thread_t,
+                                               mach_vm_offset_t *);
 
-extern funnel_t *      thread_funnel_get(void);
+extern kern_return_t   thread_wire_internal(
+                                                       host_priv_t             host_priv,
+                                                       thread_t                thread,
+                                                       boolean_t               wired,
+                                                       boolean_t               *prev_state);
 
-extern boolean_t       thread_funnel_set(funnel_t * fnl, boolean_t funneled);
+extern kern_return_t   thread_dup(thread_t);
 
-extern boolean_t       thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl);
+typedef void   (*sched_call_t)(
+                                       int                             type,
+                                       thread_t                thread);
 
-extern void         thread_set_cont_arg(int);
+#define SCHED_CALL_BLOCK               0x1
+#define SCHED_CALL_UNBLOCK             0x2
 
-extern int          thread_get_cont_arg(void);
+extern void            thread_sched_call(
+                                       thread_t                thread,
+                                       sched_call_t    call);
 
-/* JMM - These are only temporary */
-extern boolean_t       is_thread_running(thread_t); /* True is TH_RUN */
-extern boolean_t       is_thread_idle(thread_t); /* True is TH_IDLE */
-extern event_t         get_thread_waitevent(thread_t);
-extern kern_return_t   get_thread_waitresult(thread_t);
+extern void            thread_static_param(
+                                       thread_t                thread,
+                                       boolean_t               state);
+
+extern kern_return_t   thread_policy_set_internal(
+                                       thread_t                thread,
+                                       thread_policy_flavor_t  flavor,
+                                       thread_policy_t         policy_info,
+                                       mach_msg_type_number_t  count);
+
+
+extern task_t  get_threadtask(thread_t);
+#define thread_is_64bit(thd)   \
+       task_has_64BitAddr(get_threadtask(thd))
+
+
+extern void            *get_bsdthread_info(thread_t);
+extern void            set_bsdthread_info(thread_t, void *);
+extern void            *uthread_alloc(task_t, thread_t, int);
+extern void            uthread_cleanup(task_t, void *, void *); 
+extern void            uthread_zone_free(void *); 
+extern void            uthread_cred_free(void *); 
+
+extern boolean_t       thread_should_halt(
+                                               thread_t                thread);
+
+extern boolean_t       thread_should_abort(
+                                               thread_t);
+
+extern int is_64signalregset(void);
+
+void act_set_apc(thread_t);
+void act_set_kperf(thread_t);
+
+extern uint32_t dtrace_get_thread_predcache(thread_t);
+extern int64_t dtrace_get_thread_vtime(thread_t);
+extern int64_t dtrace_get_thread_tracing(thread_t);
+extern boolean_t dtrace_get_thread_reentering(thread_t);
+extern vm_offset_t dtrace_get_kernel_stack(thread_t);
+extern void dtrace_set_thread_predcache(thread_t, uint32_t);
+extern void dtrace_set_thread_vtime(thread_t, int64_t);
+extern void dtrace_set_thread_tracing(thread_t, int64_t);
+extern void dtrace_set_thread_reentering(thread_t, boolean_t);
+extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
+extern void dtrace_thread_bootstrap(void);
+extern void dtrace_thread_didexec(thread_t);
+
+extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
+
+
+extern kern_return_t   thread_set_wq_state32(
+                                             thread_t          thread,
+                                             thread_state_t    tstate);
+
+extern kern_return_t   thread_set_wq_state64(
+                                             thread_t          thread,
+                                             thread_state_t    tstate);
+
+extern vm_offset_t     kernel_stack_mask;
+extern vm_offset_t     kernel_stack_size;
+extern vm_offset_t     kernel_stack_depth_max;
+
+void guard_ast(thread_t thread);
+extern void fd_guard_ast(thread_t thread);
+extern void mach_port_guard_ast(thread_t thread);
+extern void thread_guard_violation(thread_t thread, unsigned type);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+/*! @function kernel_thread_start
+    @abstract Create a kernel thread.
+    @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread).
+    @param continuation A C-function pointer where the thread will begin execution.
+    @param parameter Caller specified data to be passed to the new thread.
+    @param new_thread Reference to the new thread is returned in this parameter.
+    @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
+*/
+
+extern kern_return_t   kernel_thread_start(
+                                                       thread_continue_t       continuation,
+                                                       void                            *parameter,
+                                                       thread_t                        *new_thread);
+#ifdef KERNEL_PRIVATE
+void thread_set_eager_preempt(thread_t thread);
+void thread_clear_eager_preempt(thread_t thread);
+extern ipc_port_t convert_thread_to_port(thread_t);
+#endif /* KERNEL_PRIVATE */
+
+__END_DECLS
 
 #endif /* _KERN_THREAD_H_ */