]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/thread.h
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / kern / thread.h
index 46ce011ca16ea1d94b5d97ce19080b90867f9bfc..b497fa3facfd3e7999dd50c42d37bc9e5726bf38 100644 (file)
@@ -1,31 +1,29 @@
 /*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * This file contains Original Code and/or Modifications of Original Code 
- * as defined in and that are subject to the Apple Public Source License 
- * Version 2.0 (the 'License'). You may not use this file except in 
- * compliance with the License.  The rights granted to you under the 
- * License may not be used to create, or enable the creation or 
- * redistribution of, unlawful or unlicensed copies of an Apple operating 
- * system, or to circumvent, violate, or enable the circumvention or 
- * violation of, any terms of an Apple operating system software license 
- * agreement.
- *
- * Please obtain a copy of the License at 
- * http://www.opensource.apple.com/apsl/ and read it before using this 
- * file.
- *
- * The Original Code and all software distributed under the License are 
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
- * Please see the License for the specific language governing rights and 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
  * limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_FREE_COPYRIGHT@
 #include <cputypes.h>
 
 #include <mach_assert.h>
-#include <mach_host.h>
-#include <mach_prof.h>
 #include <mach_ldebug.h>
 
 #include <ipc/ipc_types.h>
 #include <kern/timer_call.h>
 #include <kern/task.h>
 #include <kern/exception.h>
+#include <kern/affinity.h>
 
 #include <ipc/ipc_kmsg.h>
 
 struct thread {
        /*
         *      NOTE:   The runq field in the thread structure has an unusual
-        *      locking protocol.  If its value is RUN_QUEUE_NULL, then it is
+        *      locking protocol.  If its value is PROCESSOR_NULL, then it is
         *      locked by the thread_lock, but if its value is something else
-        *      (i.e. a run_queue) then it is locked by that run_queue's lock.
+        *      then it is locked by the associated run queue lock.
         *
         *      When the thread is on a wait queue, these first three fields
         *      are treated as an unofficial union with a wait_queue_element.
@@ -142,19 +139,21 @@ struct thread {
         */
        /* Items examined often, modified infrequently */
        queue_chain_t   links;                          /* run/wait queue links */
-       run_queue_t             runq;                           /* run queue thread is on SEE BELOW */
+       processor_t             runq;                           /* run queue assignment */
        wait_queue_t    wait_queue;                     /* wait queue we are currently on */
        event64_t               wait_event;                     /* wait queue event */
        integer_t               options;                        /* options set by thread itself */
 #define TH_OPT_INTMASK         0x03            /* interrupt / abort level */
 #define TH_OPT_VMPRIV          0x04            /* may allocate reserved memory */
-#define TH_OPT_DELAYIDLE       0x08            /* performing delayed idle */
-#define TH_OPT_CALLOUT         0x10            /* executing as callout */
+#define TH_OPT_DTRACE          0x08            /* executing under dtrace_probe */
+#define TH_OPT_SYSTEM_CRITICAL 0x10            /* Thread must always be allowed to run - even under heavy load */
+#define TH_OPT_PROC_CPULIMIT   0x20            /* Thread has a task-wide CPU limit applied to it */
+#define TH_OPT_PRVT_CPULIMIT   0x40            /* Thread has a thread-private CPU limit applied to it */
 
        /* Data updated during assert_wait/thread_wakeup */
        decl_simple_lock_data(,sched_lock)      /* scheduling lock (thread_lock()) */
-       decl_simple_lock_data(,wake_lock)       /* covers wake_active (wake_lock())*/
-       boolean_t                       wake_active;    /* Someone is waiting for this */
+       decl_simple_lock_data(,wake_lock)       /* for thread stop / wait (wake_lock()) */
+       boolean_t                       wake_active;    /* wake event on stop */
        int                                     at_safe_point;  /* thread_abort_safely allowed */
        ast_t                           reason;                 /* why we blocked */
        wait_result_t           wait_result;    /* outcome of wait -
@@ -182,30 +181,49 @@ struct thread {
 #define TH_RUN                 0x04                    /* running or on runq */
 #define TH_UNINT               0x08                    /* waiting uninteruptibly */
 #define        TH_TERMINATE    0x10                    /* halted at termination */
+#define        TH_TERMINATE2   0x20                    /* added to termination queue */
 
-#define TH_ABORT               0x20                    /* abort interruptible waits */
-#define TH_ABORT_SAFELY        0x40                    /* ... but only those at safe point */
-
-#define TH_IDLE                        0x80                    /* processor idle thread */
-
-#define        TH_SCHED_STATE  (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
+#define TH_IDLE                        0x80                    /* idling processor */
 
        /* Scheduling information */
-       integer_t                       sched_mode;                     /* scheduling mode bits */
-#define TH_MODE_REALTIME               0x0001          /* time constraints supplied */
-#define TH_MODE_TIMESHARE              0x0002          /* use timesharing algorithm */
-#define TH_MODE_PREEMPT                        0x0004          /* can preempt kernel contexts */
-#define TH_MODE_FAILSAFE               0x0008          /* fail-safe has tripped */
-#define        TH_MODE_PROMOTED                0x0010          /* sched pri has been promoted */
-#define        TH_MODE_DEPRESS                 0x0020          /* normal depress yield */
-#define TH_MODE_POLLDEPRESS            0x0040          /* polled depress yield */
-#define TH_MODE_ISDEPRESSED            (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
+       sched_mode_t                    sched_mode;             /* scheduling mode */
+       sched_mode_t                    saved_mode;             /* saved mode during forced mode demotion */
+       
+       unsigned int                    sched_flags;            /* current flag bits */
+#define TH_SFLAG_FAIRSHARE_TRIPPED     0x0001          /* fairshare scheduling activated */
+#define TH_SFLAG_FAILSAFE              0x0002          /* fail-safe has tripped */
+#define TH_SFLAG_THROTTLED             0x0004      /* owner task in throttled state */
+#define TH_SFLAG_DEMOTED_MASK      (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE | TH_SFLAG_FAIRSHARE_TRIPPED)
+
+#define        TH_SFLAG_PROMOTED               0x0008          /* sched pri has been promoted */
+#define TH_SFLAG_ABORT                 0x0010          /* abort interruptible waits */
+#define TH_SFLAG_ABORTSAFELY           0x0020          /* ... but only those at safe point */
+#define TH_SFLAG_ABORTED_MASK          (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
+#define        TH_SFLAG_DEPRESS                0x0040          /* normal depress yield */
+#define TH_SFLAG_POLLDEPRESS           0x0080          /* polled depress yield */
+#define TH_SFLAG_DEPRESSED_MASK                (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
+#define TH_SFLAG_PRI_UPDATE            0x0100          /* Updating priority */
+#define TH_SFLAG_EAGERPREEMPT          0x0200          /* Any preemption of this thread should be treated as if AST_URGENT applied */
+
+/*
+ * A thread can either be completely unthrottled, about to be throttled,
+ * throttled (TH_SFLAG_THROTTLED), or about to be unthrottled
+ */
+#define        TH_SFLAG_PENDING_THROTTLE_DEMOTION      0x1000  /* Pending sched_mode demotion */
+#define        TH_SFLAG_PENDING_THROTTLE_PROMOTION     0x2000  /* Pending sched_mode promition */
+#define        TH_SFLAG_PENDING_THROTTLE_MASK          (TH_SFLAG_PENDING_THROTTLE_DEMOTION | TH_SFLAG_PENDING_THROTTLE_PROMOTION)
 
        integer_t                       sched_pri;                      /* scheduled (current) priority */
        integer_t                       priority;                       /* base priority */
        integer_t                       max_priority;           /* max base priority */
        integer_t                       task_priority;          /* copy of task base priority */
 
+#if defined(CONFIG_SCHED_GRRR)
+#if 0
+       uint16_t                        grrr_deficit;           /* fixed point (1/1000th quantum) fractional deficit */
+#endif
+#endif
+       
        integer_t                       promotions;                     /* level of promotion */
        integer_t                       pending_promoter_index;
        void                            *pending_promoter[2];
@@ -222,32 +240,50 @@ struct thread {
                uint64_t                        deadline;
        }                                       realtime;
 
+       uint32_t                        was_promoted_on_wakeup;
        uint32_t                        current_quantum;        /* duration of current quantum */
+       uint64_t                        last_run_time;          /* time when thread was switched away from */
+       uint64_t                        last_quantum_refill_time;       /* time when current_quantum was refilled after expiration */
 
   /* Data used during setrun/dispatch */
        timer_data_t            system_timer;           /* system mode timer */
-       processor_set_t         processor_set;          /* assigned processor set */
        processor_t                     bound_processor;        /* bound to a processor? */
        processor_t                     last_processor;         /* processor last dispatched on */
-       uint64_t                        last_switch;            /* time of last context switch */
+       processor_t                     chosen_processor;       /* Where we want to run this thread */
 
        /* Fail-safe computation since last unblock or qualifying yield */
        uint64_t                        computation_metered;
        uint64_t                        computation_epoch;
-       integer_t                       safe_mode;              /* saved mode during fail-safe */
-       natural_t                       safe_release;   /* when to release fail-safe */
-
+       uint64_t                        safe_release;   /* when to release fail-safe */
+
+       /* Call out from scheduler */
+       void                            (*sched_call)(
+                                                       int                     type,
+                                                       thread_t        thread);
+#if defined(CONFIG_SCHED_PROTO)
+       uint32_t                        runqueue_generation;    /* last time runqueue was drained */
+#endif
+       
        /* Statistics and timesharing calculations */
+#if defined(CONFIG_SCHED_TRADITIONAL)
        natural_t                       sched_stamp;    /* last scheduler tick */
        natural_t                       sched_usage;    /* timesharing cpu usage [sched] */
        natural_t                       pri_shift;              /* usage -> priority from pset */
        natural_t                       cpu_usage;              /* instrumented cpu usage [%cpu] */
        natural_t                       cpu_delta;              /* accumulated cpu_usage delta */
+#endif
+       uint32_t                        c_switch;               /* total context switches */
+       uint32_t                        p_switch;               /* total processor switches */
+       uint32_t                        ps_switch;              /* total pset switches */
 
        /* Timing data structures */
+       int                                     precise_user_kernel_time; /* precise user/kernel enabled for this thread */
        timer_data_t            user_timer;                     /* user mode timer */
-       uint64_t                        system_timer_save;      /* saved system timer value */
        uint64_t                        user_timer_save;        /* saved user timer value */
+       uint64_t                        system_timer_save;      /* saved system timer value */
+       uint64_t                        vtimer_user_save;       /* saved values for vtimers */
+       uint64_t                        vtimer_prof_save;
+       uint64_t                        vtimer_rlim_save;
 
        /* Timed wait expiration */
        timer_call_data_t       wait_timer;
@@ -258,6 +294,13 @@ struct thread {
        timer_call_data_t       depress_timer;
        integer_t                       depress_timer_active;
 
+       /*
+        * Processor/cache affinity
+        * - affinity_threads links task threads with the same affinity set
+        */
+       affinity_set_t                  affinity_set;
+       queue_chain_t                   affinity_threads;
+
        /* Various bits of stashed state */
        union {
                struct {
@@ -267,6 +310,7 @@ struct thread {
                        mach_msg_size_t         msize;          /* max size for recvd msg */
                        mach_msg_option_t       option;         /* options for receive */
                        mach_msg_size_t         slist_size;     /* scatter list size */
+                       mach_port_name_t        receiver_name;  /* the receive port name */
                        struct ipc_kmsg         *kmsg;          /* received message */
                        mach_port_seqno_t       seqno;          /* seqno of recvd message */
                        mach_msg_continue_t     continuation;
@@ -289,15 +333,10 @@ struct thread {
        mach_port_t ith_rpc_reply;                      /* reply port for kernel RPCs */
 
        /* Ast/Halt data structures */
-       vm_offset_t                     recover;                /* page fault recover(copyin/out) */
-       int                                     ref_count;              /* number of references to me */
+       vm_offset_t                                     recover;                /* page fault recover(copyin/out) */
+       uint32_t                                        ref_count;              /* number of references to me */
 
-       /* Processor set info */
-       queue_chain_t           pset_threads;   /* list of all threads in pset */
-#if    MACH_HOST
-       boolean_t                       may_assign;             /* may assignment change? */
-       boolean_t                       assign_active;  /* waiting for may_assign */
-#endif /* MACH_HOST */
+       queue_chain_t                           threads;                /* global list of all threads */
 
        /* Activation */
                queue_chain_t                   task_threads;
@@ -309,7 +348,7 @@ struct thread {
                struct task                             *task;
                vm_map_t                                map;
 
-               decl_mutex_data(,mutex)
+               decl_lck_mtx_data(,mutex)
 
                /* Kernel holds on this thread  */
                int                                             suspend_count;
@@ -322,12 +361,10 @@ struct thread {
 
                /* Miscellaneous bits guarded by mutex */
                uint32_t
-               /* Indicates that the thread has not been terminated */
-                                               active:1,
-
-          /* Indicates that the thread has been started after creation */
-                                               started:1,
-                                               :0;
+                       active:1,                               /* Thread is active and has not been terminated */
+                       started:1,                              /* Thread has been started after creation */
+                       static_param:1,                 /* Disallow policy parameter changes */
+                       :0;
 
                /* Return Handers */
                struct ReturnHandler {
@@ -345,16 +382,51 @@ struct thread {
                /* Owned ulocks (a lock set element) */
                queue_head_t                    held_ulocks;
 
-#if    MACH_PROF
-               /* Profiling */
-               boolean_t                               profiled;
-               boolean_t                               profiled_own;
-               struct prof_data                *profil_buffer;
-#endif /* MACH_PROF */
-
 #ifdef MACH_BSD
                void                                    *uthread;
 #endif
+
+#if CONFIG_DTRACE
+               uint32_t t_dtrace_predcache;/* DTrace per thread predicate value hint */
+               int64_t t_dtrace_tracing;       /* Thread time under dtrace_probe() */
+               int64_t t_dtrace_vtime;
+#endif
+
+               uint32_t    t_page_creation_count;
+               clock_sec_t t_page_creation_time;
+
+#define T_CHUD_MARKED           0x01          /* this thread is marked by CHUD */
+#define T_IN_CHUD               0x02          /* this thread is already in a CHUD handler */
+#define THREAD_PMC_FLAG         0x04          /* Bit in "t_chud" signifying PMC interest */    
+#define T_AST_CALLSTACK         0x08          /* Thread scheduled to dump a
+                                              * callstack on its next
+                                              * AST */
+#define T_AST_NAME              0x10          /* Thread scheduled to dump
+                                              * its name on its next
+                                              * AST */
+#define T_NAME_DONE             0x20          /* Thread has previously
+                                              * recorded its name */
+
+               uint32_t t_chud;        /* CHUD flags, used for Shark */
+               uint32_t chud_c_switch; /* last dispatch detection */
+
+               integer_t mutex_count;  /* total count of locks held */
+
+               uint64_t thread_id;     /*system wide unique thread-id*/
+
+       /* Statistics accumulated per-thread and aggregated per-task */
+       uint32_t                syscalls_unix;
+       uint32_t                syscalls_mach;
+       ledger_t                t_ledger;
+       ledger_t                t_threadledger; /* per thread ledger */
+       struct process_policy ext_appliedstate; /* externally applied actions */
+       struct process_policy ext_policystate;  /* externally defined process policy states*/
+       struct process_policy appliedstate;             /* self applied acions */
+       struct process_policy policystate;              /* process wide policy states */
+#if CONFIG_EMBEDDED
+       task_watch_t *  taskwatch;              /* task watch */
+       integer_t               saved_importance;               /* saved task-relative importance */
+#endif /* CONFIG_EMBEDDED */
 };
 
 #define ith_state              saved.receive.state
@@ -363,6 +435,7 @@ struct thread {
 #define ith_msize              saved.receive.msize
 #define        ith_option              saved.receive.option
 #define ith_scatter_list_size  saved.receive.slist_size
+#define ith_receiver_name      saved.receive.receiver_name
 #define ith_continuation       saved.receive.continuation
 #define ith_kmsg               saved.receive.kmsg
 #define ith_seqno              saved.receive.seqno
@@ -373,14 +446,14 @@ struct thread {
 #define sth_result             saved.sema.result
 #define sth_continuation       saved.sema.continuation
 
-extern void                    thread_bootstrap(void);
+extern void                    thread_bootstrap(void) __attribute__((section("__TEXT, initcode")));
 
-extern void                    thread_init(void);
+extern void                    thread_init(void) __attribute__((section("__TEXT, initcode")));
 
 extern void                    thread_daemon_init(void);
 
 #define        thread_reference_internal(thread)       \
-                       hw_atomic_add(&(thread)->ref_count, 1)
+                       (void)hw_atomic_add(&(thread)->ref_count, 1)
 
 #define thread_deallocate_internal(thread)     \
                        hw_atomic_sub(&(thread)->ref_count, 1)
@@ -388,7 +461,7 @@ extern void                 thread_daemon_init(void);
 #define thread_reference(thread)                                       \
 MACRO_BEGIN                                                                                    \
        if ((thread) != THREAD_NULL)                                    \
-               thread_reference_internal(thread);                      \
+               thread_reference_internal(thread);              \
 MACRO_END
 
 extern void                    thread_deallocate(
@@ -399,6 +472,9 @@ extern void                 thread_terminate_self(void);
 extern kern_return_t   thread_terminate_internal(
                                                        thread_t                thread);
 
+extern void                    thread_start_internal(
+                                                       thread_t                        thread) __attribute__ ((noinline));
+
 extern void                    thread_terminate_enqueue(
                                                thread_t                thread);
 
@@ -411,48 +487,37 @@ extern void                       thread_hold(
 extern void                    thread_release(
                                                thread_t        thread);
 
+
 #define        thread_lock_init(th)    simple_lock_init(&(th)->sched_lock, 0)
 #define thread_lock(th)                        simple_lock(&(th)->sched_lock)
 #define thread_unlock(th)              simple_unlock(&(th)->sched_lock)
-#define thread_lock_try(th)            simple_lock_try(&(th)->sched_lock)
-
-#define thread_should_halt_fast(thread)                (!(thread)->active)
 
 #define wake_lock_init(th)             simple_lock_init(&(th)->wake_lock, 0)
 #define wake_lock(th)                  simple_lock(&(th)->wake_lock)
 #define wake_unlock(th)                        simple_unlock(&(th)->wake_lock)
-#define wake_lock_try(th)              simple_lock_try(&(th)->wake_lock)
+
+#define thread_should_halt_fast(thread)                (!(thread)->active)
 
 extern void                            stack_alloc(
                                                        thread_t                thread);
 
+extern void                    stack_handoff(
+                                                       thread_t                from,
+                                                       thread_t                to);
+
 extern void                            stack_free(
                                                        thread_t                thread);
 
-extern void                            stack_free_stack(
-                                                       vm_offset_t             stack);
+extern void                            stack_free_reserved(
+                                                       thread_t                thread);
 
 extern boolean_t               stack_alloc_try(
                                                        thread_t            thread);
 
 extern void                            stack_collect(void);
 
-extern void                            stack_init(void);
+extern void                            stack_init(void) __attribute__((section("__TEXT, initcode")));
 
-extern kern_return_t    thread_state_initialize(
-                                                       thread_t                                thread);
-
-extern kern_return_t   thread_setstatus(
-                                                       thread_t                                thread,
-                                                       int                                             flavor,
-                                                       thread_state_t                  tstate,
-                                                       mach_msg_type_number_t  count);
-
-extern kern_return_t   thread_getstatus(
-                                                       thread_t                                thread,
-                                                       int                                             flavor,
-                                                       thread_state_t                  tstate,
-                                                       mach_msg_type_number_t  *count);
 
 extern kern_return_t   thread_info_internal(
                                                        thread_t                                thread,
@@ -523,6 +588,8 @@ extern void                         machine_thread_init(void);
 extern kern_return_t   machine_thread_create(
                                                        thread_t                thread,
                                                        task_t                  task);
+extern void            machine_thread_switch_addrmode(
+                                                      thread_t                 thread);
 
 extern void                machine_thread_destroy(
                                                        thread_t                thread);
@@ -530,14 +597,15 @@ extern void                   machine_thread_destroy(
 extern void                            machine_set_current_thread(
                                                        thread_t                        thread);
 
-extern void                    machine_thread_terminate_self(void);
-
 extern kern_return_t   machine_thread_get_kern_state(
                                                        thread_t                                thread,
                                                        thread_flavor_t                 flavor,
                                                        thread_state_t                  tstate,
                                                        mach_msg_type_number_t  *count);
 
+extern kern_return_t   machine_thread_inherit_taskwide(
+                                                       thread_t                thread,
+                                                       task_t                  parent_task);
 
 /*
  * XXX Funnel locks XXX
@@ -553,9 +621,9 @@ struct funnel_lock {
 
 typedef struct ReturnHandler           ReturnHandler;
 
-#define        thread_mtx_lock(thread)                 mutex_lock(&(thread)->mutex)
-#define        thread_mtx_try(thread)                  mutex_try(&(thread)->mutex)
-#define        thread_mtx_unlock(thread)               mutex_unlock(&(thread)->mutex)
+#define        thread_mtx_lock(thread)                 lck_mtx_lock(&(thread)->mutex)
+#define        thread_mtx_try(thread)                  lck_mtx_try_lock(&(thread)->mutex)
+#define        thread_mtx_unlock(thread)               lck_mtx_unlock(&(thread)->mutex)
 
 extern void                    act_execute_returnhandlers(void);
 
@@ -566,6 +634,17 @@ extern void                        special_handler(
                                                ReturnHandler   *rh,
                                                thread_t                thread);
 
+void act_machine_sv_free(thread_t, int);
+
+vm_offset_t                    min_valid_stack_address(void);
+vm_offset_t                    max_valid_stack_address(void);
+
+extern void            funnel_lock(
+                                               struct funnel_lock      *lock);
+
+extern void            funnel_unlock(
+                                               struct funnel_lock      *lock);
+
 #else  /* MACH_KERNEL_PRIVATE */
 
 __BEGIN_DECLS
@@ -584,33 +663,22 @@ __END_DECLS
 
 #ifdef KERNEL_PRIVATE
 
-typedef struct funnel_lock             funnel_t;
-
-#ifdef MACH_KERNEL_PRIVATE
-
-extern void            funnel_lock(
-                                               funnel_t        *lock);
-
-extern void            funnel_unlock(
-                                               funnel_t        *lock);
-
-vm_offset_t                    min_valid_stack_address(void);
-vm_offset_t                    max_valid_stack_address(void);
-
-#endif /* MACH_KERNEL_PRIVATE */
-
 __BEGIN_DECLS
 
-extern funnel_t                *thread_funnel_get(void);
-
-extern boolean_t       thread_funnel_set(
-                                               funnel_t        *lock,
-                                               boolean_t        funneled);
+#if defined(__i386__)
 
 extern thread_t                kernel_thread(
                                                task_t          task,
                                                void            (*start)(void));
 
+#endif /* defined(__i386__) */
+
+extern uint64_t                        thread_tid(
+                                               thread_t thread);
+
+extern uint64_t                        thread_dispatchqaddr(
+                                               thread_t thread);
+
 __END_DECLS
 
 #endif /* KERNEL_PRIVATE */
@@ -619,9 +687,47 @@ __BEGIN_DECLS
 
 #ifdef XNU_KERNEL_PRIVATE
 
+extern kern_return_t    thread_state_initialize(
+                                                       thread_t                                thread);
+
+extern kern_return_t   thread_setstatus(
+                                                       thread_t                                thread,
+                                                       int                                             flavor,
+                                                       thread_state_t                  tstate,
+                                                       mach_msg_type_number_t  count);
+
+extern kern_return_t   thread_getstatus(
+                                                       thread_t                                thread,
+                                                       int                                             flavor,
+                                                       thread_state_t                  tstate,
+                                                       mach_msg_type_number_t  *count);
+
+extern kern_return_t   thread_create_workq(
+                                                       task_t                  task,
+                                                       thread_continue_t       thread_return,
+                                                       thread_t                *new_thread);
+
+extern void    thread_yield_internal(
+       mach_msg_timeout_t      interval);
+
 /*
- * XXX Funnel locks XXX
+ * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
+ * 
+ * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
+ * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
  */
+#define THREAD_CPULIMIT_BLOCK          0x1
+#define THREAD_CPULIMIT_EXCEPTION      0x2
+
+struct _thread_ledger_indices {
+       int cpu_time;
+};
+
+extern struct _thread_ledger_indices thread_ledgers;
+
+extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
+
+typedef struct funnel_lock             funnel_t;
 
 #define THR_FUNNEL_NULL (funnel_t *)0
 
@@ -631,6 +737,12 @@ extern funnel_t             *funnel_alloc(
 extern void                    funnel_free(
                                                funnel_t        *lock);
 
+extern funnel_t                *thread_funnel_get(void);
+
+extern boolean_t       thread_funnel_set(
+                                               funnel_t        *lock,
+                                               boolean_t        funneled);
+
 extern void                    thread_read_times(
                                                thread_t                thread,
                                                time_value_t    *user_time,
@@ -648,34 +760,129 @@ extern void                      thread_setentrypoint(
                                                thread_t                thread,
                                                mach_vm_offset_t        entry);
 
+extern kern_return_t   thread_setsinglestep(
+                                               thread_t                thread,
+                                               int                     on);
+
+extern kern_return_t   thread_userstack(
+                                               thread_t,
+                                               int,
+                                               thread_state_t,
+                                               unsigned int,
+                                               mach_vm_offset_t *,
+                                               int *);
+
+extern kern_return_t   thread_entrypoint(
+                                               thread_t,
+                                               int,
+                                               thread_state_t,
+                                               unsigned int,
+                                               mach_vm_offset_t *); 
+
+extern kern_return_t   thread_userstackdefault(
+                                               thread_t,
+                                               mach_vm_offset_t *);
+
 extern kern_return_t   thread_wire_internal(
                                                        host_priv_t             host_priv,
                                                        thread_t                thread,
                                                        boolean_t               wired,
                                                        boolean_t               *prev_state);
 
-/* JMM - These are only temporary */
-extern boolean_t       is_thread_running(thread_t); /* True is TH_RUN */
-extern boolean_t       is_thread_idle(thread_t); /* True is TH_IDLE */
-
 extern kern_return_t   thread_dup(thread_t);
 
+typedef void   (*sched_call_t)(
+                                       int                             type,
+                                       thread_t                thread);
+
+#define SCHED_CALL_BLOCK               0x1
+#define SCHED_CALL_UNBLOCK             0x2
+
+extern void            thread_sched_call(
+                                       thread_t                thread,
+                                       sched_call_t    call);
+
+extern void            thread_static_param(
+                                       thread_t                thread,
+                                       boolean_t               state);
+
+extern kern_return_t   thread_policy_set_internal(
+                                       thread_t                thread,
+                                       thread_policy_flavor_t  flavor,
+                                       thread_policy_t         policy_info,
+                                       mach_msg_type_number_t  count);
+
+
 extern task_t  get_threadtask(thread_t);
+#define thread_is_64bit(thd)   \
+       task_has_64BitAddr(get_threadtask(thd))
+
 
 extern void            *get_bsdthread_info(thread_t);
 extern void            set_bsdthread_info(thread_t, void *);
-extern void            *uthread_alloc(task_t, thread_t);
-extern void            uthread_free(task_t, void *, void *); 
+extern void            *uthread_alloc(task_t, thread_t, int);
+extern void            uthread_cleanup(task_t, void *, void *); 
+extern void            uthread_zone_free(void *); 
+extern void            uthread_cred_free(void *); 
 
 extern boolean_t       thread_should_halt(
                                                thread_t                thread);
 
+extern boolean_t       thread_should_abort(
+                                               thread_t);
+
+extern int is_64signalregset(void);
+
+void act_set_apc(thread_t);
+void act_set_kperf(thread_t);
+
+extern uint32_t dtrace_get_thread_predcache(thread_t);
+extern int64_t dtrace_get_thread_vtime(thread_t);
+extern int64_t dtrace_get_thread_tracing(thread_t);
+extern boolean_t dtrace_get_thread_reentering(thread_t);
+extern vm_offset_t dtrace_get_kernel_stack(thread_t);
+extern void dtrace_set_thread_predcache(thread_t, uint32_t);
+extern void dtrace_set_thread_vtime(thread_t, int64_t);
+extern void dtrace_set_thread_tracing(thread_t, int64_t);
+extern void dtrace_set_thread_reentering(thread_t, boolean_t);
+extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
+extern void dtrace_thread_bootstrap(void);
+
+extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
+
+
+extern void            thread_set_wq_state32(
+                                             thread_t          thread,
+                                             thread_state_t    tstate);
+
+extern void            thread_set_wq_state64(
+                                             thread_t          thread,
+                                             thread_state_t    tstate);
+
+extern vm_offset_t     kernel_stack_mask;
+extern vm_offset_t     kernel_stack_size;
+extern vm_offset_t     kernel_stack_depth_max;
+
 #endif /* XNU_KERNEL_PRIVATE */
 
+/*! @function kernel_thread_start
+    @abstract Create a kernel thread.
+    @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread).
+    @param continuation A C-function pointer where the thread will begin execution.
+    @param parameter Caller specified data to be passed to the new thread.
+    @param new_thread Reference to the new thread is returned in this parameter.
+    @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
+*/
+
 extern kern_return_t   kernel_thread_start(
                                                        thread_continue_t       continuation,
                                                        void                            *parameter,
                                                        thread_t                        *new_thread);
+#ifdef KERNEL_PRIVATE
+void thread_set_eager_preempt(thread_t thread);
+void thread_clear_eager_preempt(thread_t thread);
+extern ipc_port_t convert_thread_to_port(thread_t);
+#endif /* KERNEL_PRIVATE */
 
 __END_DECLS