/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_FREE_COPYRIGHT@
#include <mach/mach_types.h>
#include <mach/message.h>
#include <mach/boolean.h>
-#include <mach/vm_types.h>
-#include <mach/vm_prot.h>
+#include <mach/vm_param.h>
#include <mach/thread_info.h>
#include <mach/thread_status.h>
-#include <kern/cpu_data.h> /* for current_thread */
+#include <mach/exception_types.h>
+
#include <kern/kern_types.h>
-/*
- * Logically, a thread of control consists of two parts:
- * a thread_shuttle, which may migrate during an RPC, and
- * a thread_activation, which remains attached to a task.
- * The thread_shuttle is the larger portion of the two-part thread,
- * and contains scheduling info, messaging support, accounting info,
- * and links to the thread_activation within which the shuttle is
- * currently operating.
- *
- * It might make sense to have the thread_shuttle be a proper sub-structure
- * of the thread, with the thread containing links to both the shuttle and
- * activation. In order to reduce the scope and complexity of source
- * changes and the overhead of maintaining these linkages, we have subsumed
- * the shuttle into the thread, calling it a thread_shuttle.
- *
- * User accesses to threads always come in via the user's thread port,
- * which gets translated to a pointer to the target thread_activation.
- * Kernel accesses intended to effect the entire thread, typically use
- * a pointer to the thread_shuttle (current_thread()) as the target of
- * their operations. This makes sense given that we have subsumed the
- * shuttle into the thread_shuttle, eliminating one set of linkages.
- * Operations effecting only the shuttle may use a thread_shuttle_t
- * to indicate this.
- *
- * The current_act() macro returns a pointer to the current thread_act, while
- * the current_thread() macro returns a pointer to the currently active
- * thread_shuttle (representing the thread in its entirety).
- */
+#include <sys/cdefs.h>
-/*
- * Possible results of thread_block - returned in
- * current_thread()->wait_result.
- */
-#define THREAD_AWAKENED 0 /* normal wakeup */
-#define THREAD_TIMED_OUT 1 /* timeout expired */
-#define THREAD_INTERRUPTED 2 /* interrupted by clear_wait */
-#define THREAD_RESTART 3 /* restart operation entirely */
+#ifdef MACH_KERNEL_PRIVATE
-/*
- * Interruptible flags for assert_wait
- *
- */
-#define THREAD_UNINT 0 /* not interruptible */
-#define THREAD_INTERRUPTIBLE 1 /* may not be restartable */
-#define THREAD_ABORTSAFE 2 /* abortable safely */
-
-#ifdef MACH_KERNEL_PRIVATE
-#include <cpus.h>
-#include <hw_footprint.h>
-#include <mach_host.h>
-#include <mach_prof.h>
-#include <mach_lock_mon.h>
+#include <mach_assert.h>
#include <mach_ldebug.h>
+#include <ipc/ipc_types.h>
+
#include <mach/port.h>
-#include <kern/ast.h>
#include <kern/cpu_number.h>
+#include <kern/smp.h>
#include <kern/queue.h>
-#include <kern/time_out.h>
#include <kern/timer.h>
-#include <kern/lock.h>
+#include <kern/simple_lock.h>
+#include <kern/locks.h>
#include <kern/sched.h>
#include <kern/sched_prim.h>
-#include <kern/thread_pool.h>
+#include <mach/sfi_class.h>
#include <kern/thread_call.h>
#include <kern/timer_call.h>
#include <kern/task.h>
+#include <kern/exception.h>
+#include <kern/affinity.h>
+
+#include <kern/waitq.h>
+
#include <ipc/ipc_kmsg.h>
+
+#include <machine/cpu_data.h>
#include <machine/thread.h>
-typedef struct {
- int fnl_type; /* funnel type */
- mutex_t * fnl_mutex; /* underlying mutex for the funnel */
- void * fnl_mtxholder; /* thread (last)holdng mutex */
- void * fnl_mtxrelease; /* thread (last)releasing mutex */
- mutex_t * fnl_oldmutex; /* Mutex before collapsing split funnel */
-} funnel_t;
+struct thread {
+
+#if MACH_ASSERT
+#define THREAD_MAGIC 0x1234ABCDDCBA4321ULL
+ /* Ensure nothing uses &thread as a queue entry */
+ uint64_t thread_magic;
+#endif /* MACH_ASSERT */
-typedef struct thread_shuttle {
/*
- * Beginning of thread_shuttle proper. When the thread is on
- * a wait queue, these three fields are in treated as an un-
- * official union with a wait_queue_element. If you change
- * these, you must change that definition as well.
+ * NOTE: The runq field in the thread structure has an unusual
+ * locking protocol. If its value is PROCESSOR_NULL, then it is
+ * locked by the thread_lock, but if its value is something else
+ * then it is locked by the associated run queue lock. It is
+ * set to PROCESSOR_NULL without holding the thread lock, but the
+ * transition from PROCESSOR_NULL to non-null must be done
+ * under the thread lock and the run queue lock.
+ *
+ * New waitq APIs allow the 'links' and 'runq' fields to be
+ * anywhere in the thread structure.
*/
- queue_chain_t links; /* current run/wait queue links */
- run_queue_t runq; /* run queue p is on SEE BELOW */
- int whichq; /* which queue level p is on */
-
-/*
- * NOTE: The runq field in the thread structure has an unusual
- * locking protocol. If its value is RUN_QUEUE_NULL, then it is
- * locked by the thread_lock, but if its value is something else
- * (i.e. a run_queue) then it is locked by that run_queue's lock.
- */
-
- /* Thread bookkeeping */
- queue_chain_t pset_threads; /* list of all shuttles in proc set */
-
- /* Self-preservation */
- decl_simple_lock_data(,lock) /* scheduling lock (thread_lock()) */
- decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/
- decl_mutex_data(,rpc_lock) /* RPC lock (rpc_lock()) */
- int ref_count; /* number of references to me */
-
- vm_offset_t kernel_stack; /* accurate only if the thread is
- not swapped and not executing */
-
- vm_offset_t stack_privilege;/* reserved kernel stack */
-
- /* Blocking information */
- int reason; /* why we blocked */
- event_t wait_event; /* event we are waiting on */
- kern_return_t wait_result; /* outcome of wait -
- may be examined by this thread
- WITHOUT locking */
- wait_queue_t wait_queue; /* wait queue we are currently on */
- queue_chain_t wait_link; /* event's wait queue link */
- boolean_t wake_active; /* Someone is waiting for this
- thread to become suspended */
- int state; /* Thread state: */
- boolean_t preempt; /* Thread is undergoing preemption */
- boolean_t interruptible; /* Thread is interruptible */
-
-#if ETAP_EVENT_MONITOR
- int etap_reason; /* real reason why we blocked */
- boolean_t etap_trace; /* ETAP trace status */
-#endif /* ETAP_EVENT_MONITOR */
-
+ union {
+ queue_chain_t runq_links; /* run queue links */
+ queue_chain_t wait_links; /* wait queue links */
+ };
+
+ processor_t runq; /* run queue assignment */
+
+ event64_t wait_event; /* wait queue event */
+ struct waitq *waitq; /* wait queue this thread is enqueued on */
+
+ /* Data updated during assert_wait/thread_wakeup */
+#if __SMP__
+ decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */
+ decl_simple_lock_data(,wake_lock) /* for thread stop / wait (wake_lock()) */
+#endif
+ integer_t options; /* options set by thread itself */
+#define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */
+#define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */
+#define TH_OPT_DTRACE 0x0008 /* executing under dtrace_probe */
+#define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */
+#define TH_OPT_PROC_CPULIMIT 0x0020 /* Thread has a task-wide CPU limit applied to it */
+#define TH_OPT_PRVT_CPULIMIT 0x0040 /* Thread has a thread-private CPU limit applied to it */
+#define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */
+#define TH_OPT_GLOBAL_FORCED_IDLE 0x0100 /* Thread performs forced idle for thermal control */
+#define TH_OPT_SCHED_VM_GROUP 0x0200 /* Thread belongs to special scheduler VM group */
+#define TH_OPT_HONOR_QLIMIT 0x0400 /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */
+#define TH_OPT_SEND_IMPORTANCE 0x0800 /* Thread will allow importance donation from kernel rpc */
+
+ boolean_t wake_active; /* wake event on stop */
+ int at_safe_point; /* thread_abort_safely allowed */
+ ast_t reason; /* why we blocked */
+ uint32_t quantum_remaining;
+ wait_result_t wait_result; /* outcome of wait -
+ * may be examined by this thread
+ * WITHOUT locking */
+ thread_continue_t continuation; /* continue here next dispatch */
+ void *parameter; /* continuation parameter */
+
+ /* Data updated/used in thread_invoke */
+ vm_offset_t kernel_stack; /* current kernel stack */
+ vm_offset_t reserved_stack; /* reserved kernel stack */
+
+ /* Thread state: */
+ int state;
/*
* Thread states [bits or'ed]
*/
-#define TH_WAIT 0x01 /* thread is queued for waiting */
-#define TH_SUSP 0x02 /* thread has been asked to stop */
-#define TH_RUN 0x04 /* thread is running or on runq */
-#define TH_UNINT 0x08 /* thread is waiting uninteruptibly */
-#define TH_HALTED 0x10 /* thread is halted at clean point ? */
-
-#define TH_ABORT 0x20 /* abort interruptible waits */
-#define TH_SWAPPED_OUT 0x40 /* thread is swapped out */
+#define TH_WAIT 0x01 /* queued for waiting */
+#define TH_SUSP 0x02 /* stopped or requested to stop */
+#define TH_RUN 0x04 /* running or on runq */
+#define TH_UNINT 0x08 /* waiting uninteruptibly */
+#define TH_TERMINATE 0x10 /* halted at termination */
+#define TH_TERMINATE2 0x20 /* added to termination queue */
-#define TH_IDLE 0x80 /* thread is an idle thread */
-
-#define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
-
-#define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */
-#define TH_STACK_COMING_IN 0x0200 /* thread is waiting for kernel stack */
-#define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_COMING_IN)
-
-#define TH_TERMINATE 0x0400 /* thread is terminating */
-
- /* Stack handoff information */
- void (*continuation)(void); /* start here next time dispatched */
- int cont_arg; /* XXX continuation argument */
+#define TH_IDLE 0x80 /* idling processor */
/* Scheduling information */
- integer_t importance; /* task-relative importance */
- integer_t sched_mode; /* scheduling mode bits */
-#define TH_MODE_REALTIME 0x0001
- struct { /* see mach/thread_policy.h */
- natural_t period;
- natural_t computation;
- natural_t constraint;
+ sched_mode_t sched_mode; /* scheduling mode */
+ sched_mode_t saved_mode; /* saved mode during forced mode demotion */
+
+ /* This thread's contribution to global sched counters */
+ sched_bucket_t th_sched_bucket;
+
+ sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */
+ sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */
+
+
+ uint32_t sched_flags; /* current flag bits */
+/* TH_SFLAG_FAIRSHARE_TRIPPED (unused) 0x0001 */
+#define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */
+#define TH_SFLAG_THROTTLED 0x0004 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
+#define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE) /* saved_mode contains previous sched_mode */
+
+#define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted */
+#define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */
+#define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */
+#define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
+#define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
+#define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
+#define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
+/* unused TH_SFLAG_PRI_UPDATE 0x0100 */
+#define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */
+#define TH_SFLAG_RW_PROMOTED 0x0400 /* sched pri has been promoted due to blocking with RW lock held */
+/* unused TH_SFLAG_THROTTLE_DEMOTED 0x0800 */
+#define TH_SFLAG_WAITQ_PROMOTED 0x1000 /* sched pri promoted from waitq wakeup (generally for IPC receive) */
+#define TH_SFLAG_PROMOTED_MASK (TH_SFLAG_PROMOTED | TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED)
+
+#define TH_SFLAG_RW_PROMOTED_BIT (10) /* 0x400 */
+
+ int16_t sched_pri; /* scheduled (current) priority */
+ int16_t base_pri; /* base priority */
+ int16_t max_priority; /* copy of max base priority */
+ int16_t task_priority; /* copy of task base priority */
+
+#if defined(CONFIG_SCHED_GRRR)
+#if 0
+ uint16_t grrr_deficit; /* fixed point (1/1000th quantum) fractional deficit */
+#endif
+#endif
+
+ int16_t promotions; /* level of promotion */
+ int16_t pending_promoter_index;
+ uint32_t ref_count; /* number of references to me */
+ void *pending_promoter[2];
+
+ uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */
+
+ integer_t importance; /* task-relative importance */
+ uint32_t was_promoted_on_wakeup;
+
+ /* Priority depression expiration */
+ integer_t depress_timer_active;
+ timer_call_data_t depress_timer;
+ /* real-time parameters */
+ struct { /* see mach/thread_policy.h */
+ uint32_t period;
+ uint32_t computation;
+ uint32_t constraint;
boolean_t preemptible;
+ uint64_t deadline;
} realtime;
- integer_t priority; /* base priority */
- integer_t sched_pri; /* scheduled (current) priority */
- integer_t depress_priority; /* priority to restore */
- integer_t max_priority;
-
- natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */
- natural_t sched_usage; /* load-weighted cpu usage [sched] */
- natural_t sched_stamp; /* last time priority was updated */
- natural_t sleep_stamp; /* last time in TH_WAIT state */
-
- /* 'Obsolete' stuff that cannot be removed yet */
- integer_t policy;
- integer_t sp_state;
- integer_t unconsumed_quantum;
-
- /* VM global variables */
- boolean_t vm_privilege; /* can use reserved memory? */
- vm_offset_t recover; /* page fault recovery (copyin/out) */
+ uint64_t last_run_time; /* time when thread was switched away from */
+ uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */
+
+#if defined(CONFIG_SCHED_MULTIQ)
+ sched_group_t sched_group;
+#endif /* defined(CONFIG_SCHED_MULTIQ) */
+
+ /* Data used during setrun/dispatch */
+ timer_data_t system_timer; /* system mode timer */
+ processor_t bound_processor; /* bound to a processor? */
+ processor_t last_processor; /* processor last dispatched on */
+ processor_t chosen_processor; /* Where we want to run this thread */
+
+ /* Fail-safe computation since last unblock or qualifying yield */
+ uint64_t computation_metered;
+ uint64_t computation_epoch;
+ uint64_t safe_release; /* when to release fail-safe */
+
+ /* Call out from scheduler */
+ void (*sched_call)(
+ int type,
+ thread_t thread);
+#if defined(CONFIG_SCHED_PROTO)
+ uint32_t runqueue_generation; /* last time runqueue was drained */
+#endif
+
+ /* Statistics and timesharing calculations */
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+ natural_t sched_stamp; /* last scheduler tick */
+ natural_t sched_usage; /* timesharing cpu usage [sched] */
+ natural_t pri_shift; /* usage -> priority from pset */
+ natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
+ natural_t cpu_delta; /* accumulated cpu_usage delta */
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+ uint32_t c_switch; /* total context switches */
+ uint32_t p_switch; /* total processor switches */
+ uint32_t ps_switch; /* total pset switches */
+
+ integer_t mutex_count; /* total count of locks held */
+ /* Timing data structures */
+ int precise_user_kernel_time; /* precise user/kernel enabled for this thread */
+ timer_data_t user_timer; /* user mode timer */
+ uint64_t user_timer_save; /* saved user timer value */
+ uint64_t system_timer_save; /* saved system timer value */
+ uint64_t vtimer_user_save; /* saved values for vtimers */
+ uint64_t vtimer_prof_save;
+ uint64_t vtimer_rlim_save;
+ uint64_t vtimer_qos_save;
+
+#if CONFIG_SCHED_SFI
+ /* Timing for wait state */
+ uint64_t wait_sfi_begin_time; /* start time for thread waiting in SFI */
+#endif
- /* IPC data structures */
+ /* Timed wait expiration */
+ timer_call_data_t wait_timer;
+ integer_t wait_timer_active;
+ boolean_t wait_timer_is_set;
- struct ipc_kmsg_queue ith_messages;
- mach_port_t ith_mig_reply; /* reply port for mig */
- mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
+ /*
+ * Processor/cache affinity
+ * - affinity_threads links task threads with the same affinity set
+ */
+ affinity_set_t affinity_set;
+ queue_chain_t affinity_threads;
/* Various bits of stashed state */
union {
struct {
- mach_msg_return_t state; /* receive state */
- ipc_object_t object; /* object received on */
- mach_msg_header_t *msg; /* receive buffer pointer */
- mach_msg_size_t msize; /* max size for recvd msg */
- mach_msg_option_t option; /* options for receive */
- mach_msg_size_t slist_size; /* scatter list size */
- struct ipc_kmsg *kmsg; /* received message */
- mach_port_seqno_t seqno; /* seqno of recvd message */
- void (*continuation)(mach_msg_return_t);
+ mach_msg_return_t state; /* receive state */
+ mach_port_seqno_t seqno; /* seqno of recvd message */
+ ipc_object_t object; /* object received on */
+ mach_vm_address_t msg_addr; /* receive buffer pointer */
+ mach_msg_size_t rsize; /* max size for recvd msg */
+ mach_msg_size_t msize; /* actual size for recvd msg */
+ mach_msg_option_t option; /* options for receive */
+ mach_port_name_t receiver_name; /* the receive port name */
+ union {
+ struct ipc_kmsg *kmsg; /* received message */
+ struct ipc_mqueue *peekq; /* mqueue to peek at */
+ struct {
+ mach_msg_priority_t qos; /* received message qos */
+ mach_msg_priority_t oqos; /* override qos for message */
+ } received_qos;
+ };
+ mach_msg_continue_t continuation;
} receive;
struct {
- struct semaphore *waitsemaphore; /* semaphore ref */
- struct semaphore *signalsemaphore; /* semaphore ref */
- int options; /* semaphore options */
- kern_return_t result; /* primary result */
- void (*continuation)(kern_return_t);
+ struct semaphore *waitsemaphore; /* semaphore ref */
+ struct semaphore *signalsemaphore; /* semaphore ref */
+ int options; /* semaphore options */
+ kern_return_t result; /* primary result */
+ mach_msg_continue_t continuation;
} sema;
struct {
- struct sf_policy *policy; /* scheduling policy */
- int option; /* switch option */
+ int option; /* switch option */
+ boolean_t reenable_workq_callback; /* on entry, callbacks were suspended */
} swtch;
- char *other; /* catch-all for other state */
+ int misc; /* catch-all for other state */
} saved;
- /* Timing data structures */
- timer_data_t user_timer; /* user mode timer */
- timer_data_t system_timer; /* system mode timer */
- timer_data_t depressed_timer;/* depressed priority timer */
- timer_save_data_t user_timer_save; /* saved user timer value */
- timer_save_data_t system_timer_save; /* saved sys timer val. */
- /*** ??? should the next two fields be moved to SP-specific struct?***/
- unsigned int cpu_delta; /* cpu usage since last update */
- unsigned int sched_delta; /* weighted cpu usage since update */
+ /* Structure to save information about guard exception */
+ struct {
+ unsigned type; /* EXC_GUARD reason/type */
+ mach_exception_data_type_t code; /* Exception code */
+ mach_exception_data_type_t subcode; /* Exception sub-code */
+ } guard_exc_info;
- /* Timed wait expiration */
- timer_call_data_t wait_timer;
- integer_t wait_timer_active;
- boolean_t wait_timer_is_set;
+ /* Kernel holds on this thread */
+ int16_t suspend_count;
+ /* User level suspensions */
+ int16_t user_stop_count;
- /* Priority depression expiration */
- thread_call_data_t depress_timer;
+ /* IPC data structures */
+#if IMPORTANCE_INHERITANCE
+ natural_t ith_assertions; /* assertions pending drop */
+#endif
+ struct ipc_kmsg_queue ith_messages; /* messages to reap */
+ mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
/* Ast/Halt data structures */
- boolean_t active; /* how alive is the thread */
-
- /* Processor data structures */
- processor_set_t processor_set; /* assigned processor set */
-#if NCPUS > 1
- processor_t bound_processor; /* bound to processor ?*/
-#endif /* NCPUS > 1 */
-#if MACH_HOST
- boolean_t may_assign; /* may assignment change? */
- boolean_t assign_active; /* someone waiting for may_assign */
-#endif /* MACH_HOST */
-
-#if XKMACHKERNEL
- int xk_type;
-#endif /* XKMACHKERNEL */
-
-#if NCPUS > 1
- processor_t last_processor; /* processor this last ran on */
-#if MACH_LOCK_MON
- unsigned lock_stack; /* number of locks held */
-#endif /* MACH_LOCK_MON */
-#endif /* NCPUS > 1 */
-
- int at_safe_point; /* thread_abort_safely allowed */
- int funnel_state;
-#define TH_FN_OWNED 0x1 /* we own the funnel lock */
-#define TH_FN_REFUNNEL 0x2 /* must reaquire funnel lock when unblocking */
- funnel_t *funnel_lock;
-#if MACH_LDEBUG
- /*
- * Debugging: track acquired mutexes and locks.
- * Because a thread can block while holding such
- * synchronizers, we think of the thread as
- * "owning" them.
- */
-#define MUTEX_STACK_DEPTH 20
-#define LOCK_STACK_DEPTH 20
- mutex_t *mutex_stack[MUTEX_STACK_DEPTH];
- lock_t *lock_stack[LOCK_STACK_DEPTH];
- unsigned int mutex_stack_index;
- unsigned int lock_stack_index;
- unsigned mutex_count; /* XXX to be deleted XXX */
- boolean_t kthread; /* thread is a kernel thread */
-#endif /* MACH_LDEBUG */
+ vm_offset_t recover; /* page fault recover(copyin/out) */
+
+ queue_chain_t threads; /* global list of all threads */
+
+ /* Activation */
+ queue_chain_t task_threads;
+
+ /* Task membership */
+ struct task *task;
+ vm_map_t map;
+
+ decl_lck_mtx_data(,mutex)
+
+
+ /* Pending thread ast(s) */
+ ast_t ast;
+
+ /* Miscellaneous bits guarded by mutex */
+ uint32_t
+ active:1, /* Thread is active and has not been terminated */
+ started:1, /* Thread has been started after creation */
+ static_param:1, /* Disallow policy parameter changes */
+ inspection:1, /* TRUE when task is being inspected by crash reporter */
+ policy_reset:1, /* Disallow policy parameter changes on terminating threads */
+ suspend_parked:1, /* thread parked in thread_suspended */
+ corpse_dup:1, /* TRUE when thread is an inactive duplicate in a corpse */
+ :0;
+
+ /* Ports associated with this thread */
+ struct ipc_port *ith_self; /* not a right, doesn't hold ref */
+ struct ipc_port *ith_sself; /* a send right */
+ struct exception_action *exc_actions;
+
+#ifdef MACH_BSD
+ void *uthread;
+#endif
+
+#if CONFIG_DTRACE
+ uint32_t t_dtrace_flags; /* DTrace thread states */
+#define TH_DTRACE_EXECSUCCESS 0x01
+ uint32_t t_dtrace_predcache;/* DTrace per thread predicate value hint */
+ int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */
+ int64_t t_dtrace_vtime;
+#endif
+
+ clock_sec_t t_page_creation_time;
+ uint32_t t_page_creation_count;
+ uint32_t t_page_creation_throttled;
+#if (DEVELOPMENT || DEBUG)
+ uint64_t t_page_creation_throttled_hard;
+ uint64_t t_page_creation_throttled_soft;
+#endif /* DEVELOPMENT || DEBUG */
+
+#ifdef KPERF
+/* The high 7 bits are the number of frames to sample of a user callstack. */
+#define T_KPERF_CALLSTACK_DEPTH_OFFSET (25)
+#define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET)
+#define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET)
+#endif
+
+#define T_KPERF_AST_CALLSTACK (1U << 0) /* dump a callstack on thread's next AST */
+#define T_KPERF_AST_DISPATCH (1U << 1) /* dump a name on thread's next AST */
+#define T_KPC_ALLOC (1U << 2) /* thread needs a kpc_buf allocated */
+/* only go up to T_KPERF_CALLSTACK_DEPTH_OFFSET - 1 */
+
+#ifdef KPERF
+ uint32_t kperf_flags;
+ uint32_t kperf_pet_gen; /* last generation of PET that sampled this thread*/
+ uint32_t kperf_c_switch; /* last dispatch detection */
+ uint32_t kperf_pet_cnt; /* how many times a thread has been sampled by PET */
+#endif
+
+#ifdef KPC
+ /* accumulated performance counters for this thread */
+ uint64_t *kpc_buf;
+#endif
+
+#if HYPERVISOR
+ /* hypervisor virtual CPU object associated with this thread */
+ void *hv_thread_target;
+#endif /* HYPERVISOR */
+
+ uint64_t thread_id; /*system wide unique thread-id*/
+
+ /* Statistics accumulated per-thread and aggregated per-task */
+ uint32_t syscalls_unix;
+ uint32_t syscalls_mach;
+ ledger_t t_ledger;
+ ledger_t t_threadledger; /* per thread ledger */
+#ifdef CONFIG_BANK
+ ledger_t t_bankledger; /* ledger to charge someone */
+ uint64_t t_deduct_bank_ledger_time; /* cpu time to be deducted from bank ledger */
+#endif
+
+ /* policy is protected by the thread mutex */
+ struct thread_requested_policy requested_policy;
+ struct thread_effective_policy effective_policy;
+
+ /* usynch override is protected by the task lock, eventually will be thread mutex */
+ struct thread_qos_override {
+ struct thread_qos_override *override_next;
+ uint32_t override_contended_resource_count;
+ int16_t override_qos;
+ int16_t override_resource_type;
+ user_addr_t override_resource;
+ } *overrides;
+
+ uint32_t ipc_overrides;
+ uint32_t user_promotions;
+ uint16_t user_promotion_basepri;
+
+ int iotier_override; /* atomic operations to set, cleared on ret to user */
+ io_stat_info_t thread_io_stats; /* per-thread I/O statistics */
+
+
+ uint32_t thread_callout_interrupt_wakeups;
+ uint32_t thread_callout_platform_idle_wakeups;
+ uint32_t thread_timer_wakeups_bin_1;
+ uint32_t thread_timer_wakeups_bin_2;
+ uint16_t thread_tag;
+ uint16_t callout_woken_from_icontext:1,
+ callout_woken_from_platform_idle:1,
+ callout_woke_thread:1,
+ thread_bitfield_unused:13;
+
+ mach_port_name_t ith_voucher_name;
+ ipc_voucher_t ith_voucher;
+#if CONFIG_IOSCHED
+ void *decmp_upl;
+#endif /* CONFIG_IOSCHED */
+
+ /* work interval ID (if any) associated with the thread. Uses thread mutex */
+ uint64_t work_interval_id;
+
+ /*** Machine-dependent state ***/
+ struct machine_thread machine;
+
+#if SCHED_TRACE_THREAD_WAKEUPS
+ uintptr_t thread_wakeup_bt[64];
+#endif
+};
+
+#define ith_state saved.receive.state
+#define ith_object saved.receive.object
+#define ith_msg_addr saved.receive.msg_addr
+#define ith_rsize saved.receive.rsize
+#define ith_msize saved.receive.msize
+#define ith_option saved.receive.option
+#define ith_receiver_name saved.receive.receiver_name
+#define ith_continuation saved.receive.continuation
+#define ith_kmsg saved.receive.kmsg
+#define ith_peekq saved.receive.peekq
+#define ith_qos saved.receive.received_qos.qos
+#define ith_qos_override saved.receive.received_qos.oqos
+#define ith_seqno saved.receive.seqno
+
+#define sth_waitsemaphore saved.sema.waitsemaphore
+#define sth_signalsemaphore saved.sema.signalsemaphore
+#define sth_options saved.sema.options
+#define sth_result saved.sema.result
+#define sth_continuation saved.sema.continuation
+
+#if MACH_ASSERT
+#define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \
+ "bad thread magic 0x%llx for thread %p, expected 0x%llx", \
+ (thread)->thread_magic, (thread), THREAD_MAGIC)
+#else
+#define assert_thread_magic(thread) do { (void)(thread); } while (0)
+#endif
+
+extern void thread_bootstrap(void);
+
+extern void thread_init(void);
+
+extern void thread_daemon_init(void);
+
+#define thread_reference_internal(thread) \
+ (void)hw_atomic_add(&(thread)->ref_count, 1)
+
+#define thread_reference(thread) \
+MACRO_BEGIN \
+ if ((thread) != THREAD_NULL) \
+ thread_reference_internal(thread); \
+MACRO_END
- /*
- * End of thread_shuttle proper
- */
+extern void thread_deallocate(
+ thread_t thread);
- /*
- * Migration and thread_activation linkage information
- */
- struct thread_activation *top_act; /* "current" thr_act */
+extern void thread_deallocate_safe(
+ thread_t thread);
-} Thread_Shuttle;
+extern void thread_terminate_self(void);
-#define THREAD_SHUTTLE_NULL ((thread_shuttle_t)0)
+extern kern_return_t thread_terminate_internal(
+ thread_t thread);
-#define ith_state saved.receive.state
-#define ith_object saved.receive.object
-#define ith_msg saved.receive.msg
-#define ith_msize saved.receive.msize
-#define ith_option saved.receive.option
-#define ith_scatter_list_size saved.receive.slist_size
-#define ith_continuation saved.receive.continuation
-#define ith_kmsg saved.receive.kmsg
-#define ith_seqno saved.receive.seqno
+extern void thread_start(
+ thread_t thread) __attribute__ ((noinline));
-#define sth_waitsemaphore saved.sema.waitsemaphore
-#define sth_signalsemaphore saved.sema.signalsemaphore
-#define sth_options saved.sema.options
-#define sth_result saved.sema.result
-#define sth_continuation saved.sema.continuation
+extern void thread_start_in_assert_wait(
+ thread_t thread,
+ event_t event,
+ wait_interrupt_t interruptible) __attribute__ ((noinline));
-extern thread_act_t active_kloaded[NCPUS]; /* "" kernel-loaded acts */
-extern vm_offset_t active_stacks[NCPUS]; /* active kernel stacks */
-extern vm_offset_t kernel_stack[NCPUS];
+extern void thread_terminate_enqueue(
+ thread_t thread);
-#ifndef MACHINE_STACK_STASH
-/*
- * MD Macro to fill up global stack state,
- * keeping the MD structure sizes + games private
- */
-#define MACHINE_STACK_STASH(stack) \
-MACRO_BEGIN \
- mp_disable_preemption(); \
- active_stacks[cpu_number()] = (stack); \
- kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \
- mp_enable_preemption(); \
-MACRO_END
-#endif /* MACHINE_STACK_STASH */
+extern void thread_exception_enqueue(
+ task_t task,
+ thread_t thread);
-/*
- * Kernel-only routines
- */
+extern void thread_copy_resource_info(
+ thread_t dst_thread,
+ thread_t src_thread);
-/* Initialize thread module */
-extern void thread_init(void);
+extern void thread_terminate_crashed_threads(void);
-/* Take reference on thread (make sure it doesn't go away) */
-extern void thread_reference(
- thread_t thread);
+extern void thread_stack_enqueue(
+ thread_t thread);
-/* Release reference on thread */
-extern void thread_deallocate(
- thread_t thread);
+extern void thread_hold(
+ thread_t thread);
-/* Set priority of calling thread */
-extern void thread_set_own_priority(
- int priority);
+extern void thread_release(
+ thread_t thread);
-/* Start a thread at specified routine */
-#define thread_start(thread, start) \
- (thread)->continuation = (start)
+extern void thread_corpse_continue(void);
+extern boolean_t thread_is_active(thread_t thread);
-/* Reaps threads waiting to be destroyed */
-extern void thread_reaper(void);
+/* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */
+#if __SMP__
+#define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0)
+#define thread_lock(th) simple_lock(&(th)->sched_lock)
+#define thread_unlock(th) simple_unlock(&(th)->sched_lock)
+#define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0)
+#define wake_lock(th) simple_lock(&(th)->wake_lock)
+#define wake_unlock(th) simple_unlock(&(th)->wake_lock)
+#else
+#define thread_lock_init(th) do { (void)th; } while(0)
+#define thread_lock(th) do { (void)th; } while(0)
+#define thread_unlock(th) do { (void)th; } while(0)
-#if MACH_HOST
-/* Preclude thread processor set assignement */
-extern void thread_freeze(
- thread_t thread);
+#define wake_lock_init(th) do { (void)th; } while(0)
+#define wake_lock(th) do { (void)th; } while(0)
+#define wake_unlock(th) do { (void)th; } while(0)
+#endif
-/* Assign thread to a processor set */
-extern void thread_doassign(
- thread_t thread,
- processor_set_t new_pset,
- boolean_t release_freeze);
+#define thread_should_halt_fast(thread) (!(thread)->active)
-/* Allow thread processor set assignement */
-extern void thread_unfreeze(
- thread_t thread);
+extern void stack_alloc(
+ thread_t thread);
-#endif /* MACH_HOST */
+extern void stack_handoff(
+ thread_t from,
+ thread_t to);
-/* Insure thread always has a kernel stack */
-extern void stack_privilege(
- thread_t thread);
+extern void stack_free(
+ thread_t thread);
-extern void consider_thread_collect(void);
+extern void stack_free_reserved(
+ thread_t thread);
-/*
- * Arguments to specify aggressiveness to thread halt.
- * Can't have MUST_HALT and SAFELY at the same time.
- */
-#define THREAD_HALT_NORMAL 0
-#define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */
-#define THREAD_HALT_SAFELY 2 /* result must be restartable */
+extern boolean_t stack_alloc_try(
+ thread_t thread);
-/*
- * Macro-defined routines
- */
+extern void stack_collect(void);
-#define thread_pcb(th) ((th)->pcb)
+extern void stack_init(void);
-#define thread_lock_init(th) \
- simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK)
-#define thread_lock(th) simple_lock(&(th)->lock)
-#define thread_unlock(th) simple_unlock(&(th)->lock)
-#define thread_should_halt_fast(thread) \
- (!(thread)->top_act || \
- !(thread)->top_act->active || \
- (thread)->top_act->ast & (AST_HALT|AST_TERMINATE))
+extern kern_return_t thread_info_internal(
+ thread_t thread,
+ thread_flavor_t flavor,
+ thread_info_t thread_info_out,
+ mach_msg_type_number_t *thread_info_count);
-#define thread_should_halt(thread) thread_should_halt_fast(thread)
-#define rpc_lock_init(th) mutex_init(&(th)->rpc_lock, ETAP_THREAD_RPC)
-#define rpc_lock(th) mutex_lock(&(th)->rpc_lock)
-#define rpc_lock_try(th) mutex_try(&(th)->rpc_lock)
-#define rpc_unlock(th) mutex_unlock(&(th)->rpc_lock)
-/*
- * Lock to cover wake_active only; like thread_lock(), is taken
- * at splsched(). Used to avoid calling into scheduler with a
- * thread_lock() held. Precedes thread_lock() (and other scheduling-
- * related locks) in the system lock ordering.
- */
-#define wake_lock_init(th) \
- simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
-#define wake_lock(th) simple_lock(&(th)->wake_lock)
-#define wake_unlock(th) simple_unlock(&(th)->wake_lock)
-
-static __inline__ vm_offset_t current_stack(void);
-static __inline__ vm_offset_t
-current_stack(void)
-{
- vm_offset_t ret;
-
- mp_disable_preemption();
- ret = active_stacks[cpu_number()];
- mp_enable_preemption();
- return ret;
+extern kern_return_t kernel_thread_create(
+ thread_continue_t continuation,
+ void *parameter,
+ integer_t priority,
+ thread_t *new_thread);
+
+extern kern_return_t kernel_thread_start_priority(
+ thread_continue_t continuation,
+ void *parameter,
+ integer_t priority,
+ thread_t *new_thread);
+
+extern void machine_stack_attach(
+ thread_t thread,
+ vm_offset_t stack);
+
+extern vm_offset_t machine_stack_detach(
+ thread_t thread);
+
+extern void machine_stack_handoff(
+ thread_t old,
+ thread_t new);
+
+extern thread_t machine_switch_context(
+ thread_t old_thread,
+ thread_continue_t continuation,
+ thread_t new_thread);
+
+extern void machine_load_context(
+ thread_t thread);
+
+extern kern_return_t machine_thread_state_initialize(
+ thread_t thread);
+
+extern kern_return_t machine_thread_set_state(
+ thread_t thread,
+ thread_flavor_t flavor,
+ thread_state_t state,
+ mach_msg_type_number_t count);
+
+extern kern_return_t machine_thread_get_state(
+ thread_t thread,
+ thread_flavor_t flavor,
+ thread_state_t state,
+ mach_msg_type_number_t *count);
+
+extern kern_return_t machine_thread_dup(
+ thread_t self,
+ thread_t target);
+
+extern void machine_thread_init(void);
+
+extern kern_return_t machine_thread_create(
+ thread_t thread,
+ task_t task);
+extern void machine_thread_switch_addrmode(
+ thread_t thread);
+
+extern void machine_thread_destroy(
+ thread_t thread);
+
+extern void machine_set_current_thread(
+ thread_t thread);
+
+extern kern_return_t machine_thread_get_kern_state(
+ thread_t thread,
+ thread_flavor_t flavor,
+ thread_state_t tstate,
+ mach_msg_type_number_t *count);
+
+extern kern_return_t machine_thread_inherit_taskwide(
+ thread_t thread,
+ task_t parent_task);
+
+extern kern_return_t machine_thread_set_tsd_base(
+ thread_t thread,
+ mach_vm_offset_t tsd_base);
+
+#define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex)
+#define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex)
+#define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex)
+
+extern void thread_apc_ast(thread_t thread);
+
+extern void thread_update_qos_cpu_time(thread_t thread);
+
+void act_machine_sv_free(thread_t, int);
+
+vm_offset_t min_valid_stack_address(void);
+vm_offset_t max_valid_stack_address(void);
+
+static inline uint16_t thread_set_tag_internal(thread_t thread, uint16_t tag) {
+ return __sync_fetch_and_or(&thread->thread_tag, tag);
+}
+
+static inline uint16_t thread_get_tag_internal(thread_t thread) {
+ return thread->thread_tag;
}
-extern void pcb_module_init(void);
+extern void thread_set_options(uint32_t thopt);
+
+#else /* MACH_KERNEL_PRIVATE */
+
+__BEGIN_DECLS
+
+extern thread_t current_thread(void);
+
+extern void thread_reference(
+ thread_t thread);
+
+extern void thread_deallocate(
+ thread_t thread);
+
+__END_DECLS
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+#ifdef KERNEL_PRIVATE
-extern void pcb_init(
- thread_act_t thr_act);
+__BEGIN_DECLS
-extern void pcb_terminate(
- thread_act_t thr_act);
+extern uint64_t thread_dispatchqaddr(
+ thread_t thread);
-extern void pcb_collect(
- thread_act_t thr_act);
+__END_DECLS
-extern void pcb_user_to_kernel(
- thread_act_t thr_act);
+#endif /* KERNEL_PRIVATE */
+
+#ifdef KERNEL
+__BEGIN_DECLS
+
+extern uint64_t thread_tid(thread_t thread);
+
+__END_DECLS
+
+#endif /* KERNEL */
+
+__BEGIN_DECLS
+
+#ifdef XNU_KERNEL_PRIVATE
+
+/*
+ * Thread tags; for easy identification.
+ */
+#define THREAD_TAG_MAINTHREAD 0x1
+#define THREAD_TAG_CALLOUT 0x2
+#define THREAD_TAG_IOWORKLOOP 0x4
+
+#define THREAD_TAG_PTHREAD 0x10
+#define THREAD_TAG_WORKQUEUE 0x20
+
+uint16_t thread_set_tag(thread_t, uint16_t);
+uint16_t thread_get_tag(thread_t);
+
+extern kern_return_t thread_state_initialize(
+ thread_t thread);
extern kern_return_t thread_setstatus(
- thread_act_t thr_act,
+ thread_t thread,
int flavor,
thread_state_t tstate,
mach_msg_type_number_t count);
extern kern_return_t thread_getstatus(
- thread_act_t thr_act,
+ thread_t thread,
int flavor,
thread_state_t tstate,
mach_msg_type_number_t *count);
-extern boolean_t stack_alloc_try(
- thread_t thread,
- void (*start_pos)(thread_t));
+extern kern_return_t thread_create_with_continuation(
+ task_t task,
+ thread_t *new_thread,
+ thread_continue_t continuation);
-/* This routine now used only internally */
-extern kern_return_t thread_info_shuttle(
- thread_act_t thr_act,
- thread_flavor_t flavor,
- thread_info_t thread_info_out,
- mach_msg_type_number_t *thread_info_count);
+extern kern_return_t thread_create_waiting(task_t task,
+ thread_continue_t continuation,
+ event_t event,
+ thread_t *new_thread);
-extern void thread_user_to_kernel(
- thread_t thread);
+extern kern_return_t thread_create_workq(
+ task_t task,
+ thread_continue_t thread_return,
+ thread_t *new_thread);
-/* Machine-dependent routines */
-extern void thread_machine_init(void);
+extern kern_return_t thread_create_workq_waiting(
+ task_t task,
+ thread_continue_t thread_return,
+ event_t event,
+ thread_t *new_thread);
-extern void thread_machine_set_current(
- thread_t thread );
+extern void thread_yield_internal(
+ mach_msg_timeout_t interval);
-extern kern_return_t thread_machine_create(
- thread_t thread,
- thread_act_t thr_act,
- void (*start_pos)(thread_t));
+extern void thread_yield_to_preemption(void);
-extern void thread_set_syscall_return(
- thread_t thread,
- kern_return_t retval);
+/*
+ * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
+ *
+ * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
+ * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
+ * 3) Disable. Remove any existing CPU limit.
+ */
+#define THREAD_CPULIMIT_BLOCK 0x1
+#define THREAD_CPULIMIT_EXCEPTION 0x2
+#define THREAD_CPULIMIT_DISABLE 0x3
-extern void thread_machine_destroy(
- thread_t thread );
+struct _thread_ledger_indices {
+ int cpu_time;
+};
-extern void thread_machine_flush(
- thread_act_t thr_act);
+extern struct _thread_ledger_indices thread_ledgers;
-extern thread_t kernel_thread_with_priority(
- task_t task,
- integer_t priority,
- void (*start)(void),
- boolean_t start_running);
+extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
+extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
-extern void funnel_lock(funnel_t *);
+extern void thread_read_times(
+ thread_t thread,
+ time_value_t *user_time,
+ time_value_t *system_time);
-extern void funnel_unlock(funnel_t *);
+extern uint64_t thread_get_runtime_self(void);
-#else /* !MACH_KERNEL_PRIVATE */
+extern void thread_setuserstack(
+ thread_t thread,
+ mach_vm_offset_t user_stack);
-typedef struct __funnel__ funnel_t;
+extern uint64_t thread_adjuserstack(
+ thread_t thread,
+ int adjust);
-extern boolean_t thread_should_halt(thread_t);
+extern void thread_setentrypoint(
+ thread_t thread,
+ mach_vm_offset_t entry);
-#endif /* !MACH_KERNEL_PRIVATE */
+extern kern_return_t thread_set_tsd_base(
+ thread_t thread,
+ mach_vm_offset_t tsd_base);
-#define THR_FUNNEL_NULL (funnel_t *)0
+extern kern_return_t thread_setsinglestep(
+ thread_t thread,
+ int on);
-extern thread_t kernel_thread(
- task_t task,
- void (*start)(void));
+extern kern_return_t thread_userstack(
+ thread_t,
+ int,
+ thread_state_t,
+ unsigned int,
+ mach_vm_offset_t *,
+ int *,
+ boolean_t);
-extern void thread_terminate_self(void);
+extern kern_return_t thread_entrypoint(
+ thread_t,
+ int,
+ thread_state_t,
+ unsigned int,
+ mach_vm_offset_t *);
+
+extern kern_return_t thread_userstackdefault(
+ mach_vm_offset_t *,
+ boolean_t);
+
+extern kern_return_t thread_wire_internal(
+ host_priv_t host_priv,
+ thread_t thread,
+ boolean_t wired,
+ boolean_t *prev_state);
+
+
+extern kern_return_t thread_dup(thread_t);
+
+extern kern_return_t thread_dup2(thread_t, thread_t);
-extern funnel_t * funnel_alloc(int);
+typedef void (*sched_call_t)(
+ int type,
+ thread_t thread);
-extern funnel_t * thread_funnel_get(void);
+#define SCHED_CALL_BLOCK 0x1
+#define SCHED_CALL_UNBLOCK 0x2
-extern boolean_t thread_funnel_set(funnel_t * fnl, boolean_t funneled);
+extern void thread_sched_call(
+ thread_t thread,
+ sched_call_t call);
-extern boolean_t thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl);
+extern sched_call_t thread_disable_sched_call(
+ thread_t thread,
+ sched_call_t call);
-extern void thread_set_cont_arg(int);
+extern void thread_reenable_sched_call(
+ thread_t thread,
+ sched_call_t call);
-extern int thread_get_cont_arg(void);
+extern void thread_static_param(
+ thread_t thread,
+ boolean_t state);
+
+extern boolean_t thread_is_static_param(
+ thread_t thread);
+
+extern task_t get_threadtask(thread_t);
+#define thread_is_64bit(thd) \
+ task_has_64BitAddr(get_threadtask(thd))
+
+
+extern void *get_bsdthread_info(thread_t);
+extern void set_bsdthread_info(thread_t, void *);
+extern void *uthread_alloc(task_t, thread_t, int);
+extern void uthread_cleanup_name(void *uthread);
+extern void uthread_cleanup(task_t, void *, void *);
+extern void uthread_zone_free(void *);
+extern void uthread_cred_free(void *);
+
+#if PROC_REF_DEBUG
+extern int uthread_get_proc_refcount(void *);
+extern void uthread_reset_proc_refcount(void *);
+extern int proc_ref_tracking_disabled;
+#endif
+
+extern boolean_t thread_should_halt(
+ thread_t thread);
+
+extern boolean_t thread_should_abort(
+ thread_t);
+
+extern int is_64signalregset(void);
+
+extern void act_set_kperf(thread_t);
+extern void set_astledger(thread_t thread);
+extern void act_set_io_telemetry_ast(thread_t);
+
+extern uint32_t dtrace_get_thread_predcache(thread_t);
+extern int64_t dtrace_get_thread_vtime(thread_t);
+extern int64_t dtrace_get_thread_tracing(thread_t);
+extern boolean_t dtrace_get_thread_reentering(thread_t);
+extern int dtrace_get_thread_last_cpu_id(thread_t);
+extern vm_offset_t dtrace_get_kernel_stack(thread_t);
+extern void dtrace_set_thread_predcache(thread_t, uint32_t);
+extern void dtrace_set_thread_vtime(thread_t, int64_t);
+extern void dtrace_set_thread_tracing(thread_t, int64_t);
+extern void dtrace_set_thread_reentering(thread_t, boolean_t);
+extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
+extern void dtrace_thread_bootstrap(void);
+extern void dtrace_thread_didexec(thread_t);
+
+extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
+
+
+extern kern_return_t thread_set_wq_state32(
+ thread_t thread,
+ thread_state_t tstate);
+
+extern kern_return_t thread_set_wq_state64(
+ thread_t thread,
+ thread_state_t tstate);
+
+extern vm_offset_t kernel_stack_mask;
+extern vm_offset_t kernel_stack_size;
+extern vm_offset_t kernel_stack_depth_max;
+
+void guard_ast(thread_t thread);
+extern void fd_guard_ast(thread_t thread);
+extern void mach_port_guard_ast(thread_t thread);
+extern void thread_guard_violation(thread_t thread, unsigned type);
+extern void thread_update_io_stats(thread_t thread, int size, int io_flags);
+
+extern kern_return_t thread_set_voucher_name(mach_port_name_t name);
+extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid);
+
+extern void set_thread_rwlock_boost(void);
+extern void clear_thread_rwlock_boost(void);
+
+/*! @function thread_has_thread_name
+ @abstract Checks if a thread has a name.
+ @discussion This function takes one input, a thread, and returns a boolean value indicating if that thread already has a name associated with it.
+ @param th The thread to inspect.
+ @result TRUE if the thread has a name, FALSE otherwise.
+*/
+extern boolean_t thread_has_thread_name(thread_t th);
+
+/*! @function thread_set_thread_name
+ @abstract Set a thread's name.
+ @discussion This function takes two input parameters: a thread to name, and the name to apply to the thread. The name will be attached to the thread in order to better identify the thread.
+ @param th The thread to be named.
+ @param name The name to apply to the thread.
+*/
+extern void thread_set_thread_name(thread_t th, const char* name);
+
+extern void thread_enable_send_importance(thread_t thread, boolean_t enable);
+
+/* Get a backtrace for a threads kernel or user stack (user_p), with pc and optionally
+ * frame pointer (getfp). Returns bytes added to buffer, and kThreadTruncatedBT in
+ * thread_trace_flags if a user page is not present after kdp_lightweight_fault() is
+ * called.
+ */
-/* JMM - These are only temporary */
-extern boolean_t is_thread_running(thread_t); /* True is TH_RUN */
-extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */
-extern event_t get_thread_waitevent(thread_t);
-extern kern_return_t get_thread_waitresult(thread_t);
+extern int machine_trace_thread(
+ thread_t thread,
+ char *tracepos,
+ char *tracebound,
+ int nframes,
+ boolean_t user_p,
+ boolean_t getfp,
+ uint32_t *thread_trace_flags);
+
+extern int machine_trace_thread64(thread_t thread,
+ char *tracepos,
+ char *tracebound,
+ int nframes,
+ boolean_t user_p,
+ boolean_t getfp,
+ uint32_t *thread_trace_flags);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+
+/*! @function kernel_thread_start
+ @abstract Create a kernel thread.
+ @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread).
+ @param continuation A C-function pointer where the thread will begin execution.
+ @param parameter Caller specified data to be passed to the new thread.
+ @param new_thread Reference to the new thread is returned in this parameter.
+ @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
+*/
+
+extern kern_return_t kernel_thread_start(
+ thread_continue_t continuation,
+ void *parameter,
+ thread_t *new_thread);
+#ifdef KERNEL_PRIVATE
+void thread_set_eager_preempt(thread_t thread);
+void thread_clear_eager_preempt(thread_t thread);
+extern ipc_port_t convert_thread_to_port(thread_t);
+extern boolean_t is_vm_privileged(void);
+extern boolean_t set_vm_privilege(boolean_t);
+#endif /* KERNEL_PRIVATE */
+
+__END_DECLS
#endif /* _KERN_THREAD_H_ */