#define TH_OPT_INTMASK 0x03 /* interrupt / abort level */
#define TH_OPT_VMPRIV 0x04 /* may allocate reserved memory */
#define TH_OPT_DTRACE 0x08 /* executing under dtrace_probe */
+#define TH_OPT_SYSTEM_CRITICAL 0x10 /* Thread must always be allowed to run - even under heavy load */
+#define TH_OPT_PROC_CPULIMIT 0x20 /* Thread has a task-wide CPU limit applied to it */
+#define TH_OPT_PRVT_CPULIMIT 0x40 /* Thread has a thread-private CPU limit applied to it */
/* Data updated during assert_wait/thread_wakeup */
decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */
#define TH_IDLE 0x80 /* idling processor */
/* Scheduling information */
- integer_t sched_mode; /* scheduling mode bits */
-#define TH_MODE_REALTIME 0x0001 /* time constraints supplied */
-#define TH_MODE_TIMESHARE 0x0002 /* use timesharing algorithm */
-#define TH_MODE_FAILSAFE 0x0004 /* fail-safe has tripped */
-#define TH_MODE_PROMOTED 0x0008 /* sched pri has been promoted */
-#define TH_MODE_ABORT 0x0010 /* abort interruptible waits */
-#define TH_MODE_ABORTSAFELY 0x0020 /* ... but only those at safe point */
-#define TH_MODE_ISABORTED (TH_MODE_ABORT | TH_MODE_ABORTSAFELY)
-#define TH_MODE_DEPRESS 0x0040 /* normal depress yield */
-#define TH_MODE_POLLDEPRESS 0x0080 /* polled depress yield */
-#define TH_MODE_ISDEPRESSED (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
+ sched_mode_t sched_mode; /* scheduling mode */
+ sched_mode_t saved_mode; /* saved mode during forced mode demotion */
+
+ unsigned int sched_flags; /* current flag bits */
+#define TH_SFLAG_FAIRSHARE_TRIPPED 0x0001 /* fairshare scheduling activated */
+#define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */
+#define TH_SFLAG_THROTTLED 0x0004 /* owner task in throttled state */
+#define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE | TH_SFLAG_FAIRSHARE_TRIPPED)
+
+#define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted */
+#define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */
+#define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */
+#define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
+#define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
+#define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
+#define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
+#define TH_SFLAG_PRI_UPDATE 0x0100 /* Updating priority */
+#define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */
+
+/*
+ * A thread can either be completely unthrottled, about to be throttled,
+ * throttled (TH_SFLAG_THROTTLED), or about to be unthrottled
+ */
+#define TH_SFLAG_PENDING_THROTTLE_DEMOTION 0x1000 /* Pending sched_mode demotion */
+#define TH_SFLAG_PENDING_THROTTLE_PROMOTION 0x2000 /* Pending sched_mode promition */
+#define TH_SFLAG_PENDING_THROTTLE_MASK (TH_SFLAG_PENDING_THROTTLE_DEMOTION | TH_SFLAG_PENDING_THROTTLE_PROMOTION)
integer_t sched_pri; /* scheduled (current) priority */
integer_t priority; /* base priority */
integer_t max_priority; /* max base priority */
integer_t task_priority; /* copy of task base priority */
+#if defined(CONFIG_SCHED_GRRR)
+#if 0
+ uint16_t grrr_deficit; /* fixed point (1/1000th quantum) fractional deficit */
+#endif
+#endif
+
integer_t promotions; /* level of promotion */
integer_t pending_promoter_index;
void *pending_promoter[2];
uint64_t deadline;
} realtime;
+ uint32_t was_promoted_on_wakeup;
uint32_t current_quantum; /* duration of current quantum */
+ uint64_t last_run_time; /* time when thread was switched away from */
+ uint64_t last_quantum_refill_time; /* time when current_quantum was refilled after expiration */
/* Data used during setrun/dispatch */
timer_data_t system_timer; /* system mode timer */
processor_t bound_processor; /* bound to a processor? */
processor_t last_processor; /* processor last dispatched on */
+ processor_t chosen_processor; /* Where we want to run this thread */
/* Fail-safe computation since last unblock or qualifying yield */
uint64_t computation_metered;
uint64_t computation_epoch;
- integer_t safe_mode; /* saved mode during fail-safe */
- natural_t safe_release; /* when to release fail-safe */
+ uint64_t safe_release; /* when to release fail-safe */
/* Call out from scheduler */
void (*sched_call)(
int type,
thread_t thread);
-
+#if defined(CONFIG_SCHED_PROTO)
+ uint32_t runqueue_generation; /* last time runqueue was drained */
+#endif
+
/* Statistics and timesharing calculations */
+#if defined(CONFIG_SCHED_TRADITIONAL)
natural_t sched_stamp; /* last scheduler tick */
natural_t sched_usage; /* timesharing cpu usage [sched] */
natural_t pri_shift; /* usage -> priority from pset */
natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
natural_t cpu_delta; /* accumulated cpu_usage delta */
+#endif
uint32_t c_switch; /* total context switches */
uint32_t p_switch; /* total processor switches */
uint32_t ps_switch; /* total pset switches */
/* Timing data structures */
+ int precise_user_kernel_time; /* precise user/kernel enabled for this thread */
timer_data_t user_timer; /* user mode timer */
uint64_t user_timer_save; /* saved user timer value */
uint64_t system_timer_save; /* saved system timer value */
int64_t t_dtrace_vtime;
#endif
-#define T_CHUD_MARKED 0x1 /* this thread is marked by CHUD */
-#define T_IN_CHUD 0x2 /* this thread is already in a CHUD handler */
-#define THREAD_PMC_FLAG 0x4 /* Bit in "t_chud" signifying PMC interest */
uint32_t t_page_creation_count;
clock_sec_t t_page_creation_time;
+#define T_CHUD_MARKED 0x01 /* this thread is marked by CHUD */
+#define T_IN_CHUD 0x02 /* this thread is already in a CHUD handler */
+#define THREAD_PMC_FLAG 0x04 /* Bit in "t_chud" signifying PMC interest */
+#define T_AST_CALLSTACK 0x08 /* Thread scheduled to dump a
+ * callstack on its next
+ * AST */
+#define T_AST_NAME 0x10 /* Thread scheduled to dump
+ * its name on its next
+ * AST */
+#define T_NAME_DONE 0x20 /* Thread has previously
+ * recorded its name */
+
uint32_t t_chud; /* CHUD flags, used for Shark */
+ uint32_t chud_c_switch; /* last dispatch detection */
+
+ integer_t mutex_count; /* total count of locks held */
+
uint64_t thread_id; /*system wide unique thread-id*/
+
+ /* Statistics accumulated per-thread and aggregated per-task */
+ uint32_t syscalls_unix;
+ uint32_t syscalls_mach;
+ ledger_t t_ledger;
+ ledger_t t_threadledger; /* per thread ledger */
+ struct process_policy ext_appliedstate; /* externally applied actions */
+ struct process_policy ext_policystate; /* externally defined process policy states*/
+ struct process_policy appliedstate; /* self applied acions */
+ struct process_policy policystate; /* process wide policy states */
+#if CONFIG_EMBEDDED
+ task_watch_t * taskwatch; /* task watch */
+ integer_t saved_importance; /* saved task-relative importance */
+#endif /* CONFIG_EMBEDDED */
};
#define ith_state saved.receive.state
extern void stack_alloc(
thread_t thread);
+extern void stack_handoff(
+ thread_t from,
+ thread_t to);
+
extern void stack_free(
thread_t thread);
-extern void stack_free_stack(
- vm_offset_t stack);
+extern void stack_free_reserved(
+ thread_t thread);
extern boolean_t stack_alloc_try(
thread_t thread);
extern void stack_init(void) __attribute__((section("__TEXT, initcode")));
-extern kern_return_t thread_state_initialize(
- thread_t thread);
-
-extern kern_return_t thread_setstatus(
- thread_t thread,
- int flavor,
- thread_state_t tstate,
- mach_msg_type_number_t count);
-
-extern kern_return_t thread_getstatus(
- thread_t thread,
- int flavor,
- thread_state_t tstate,
- mach_msg_type_number_t *count);
extern kern_return_t thread_info_internal(
thread_t thread,
extern void machine_set_current_thread(
thread_t thread);
-extern void machine_thread_terminate_self(void);
-
extern kern_return_t machine_thread_get_kern_state(
thread_t thread,
thread_flavor_t flavor,
__BEGIN_DECLS
-#ifndef __LP64__
+#if defined(__i386__)
extern thread_t kernel_thread(
task_t task,
void (*start)(void));
-#endif /* __LP64__ */
+#endif /* defined(__i386__) */
extern uint64_t thread_tid(
thread_t thread);
#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t thread_state_initialize(
+ thread_t thread);
+
+extern kern_return_t thread_setstatus(
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ mach_msg_type_number_t count);
+
+extern kern_return_t thread_getstatus(
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ mach_msg_type_number_t *count);
+
extern kern_return_t thread_create_workq(
task_t task,
thread_continue_t thread_return,
extern void thread_yield_internal(
mach_msg_timeout_t interval);
+/*
+ * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
+ *
+ * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
+ * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
+ */
+#define THREAD_CPULIMIT_BLOCK 0x1
+#define THREAD_CPULIMIT_EXCEPTION 0x2
+
+struct _thread_ledger_indices {
+ int cpu_time;
+};
+
+extern struct _thread_ledger_indices thread_ledgers;
+
+extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
+
typedef struct funnel_lock funnel_t;
#define THR_FUNNEL_NULL (funnel_t *)0
thread_t thread,
int on);
+extern kern_return_t thread_userstack(
+ thread_t,
+ int,
+ thread_state_t,
+ unsigned int,
+ mach_vm_offset_t *,
+ int *);
+
+extern kern_return_t thread_entrypoint(
+ thread_t,
+ int,
+ thread_state_t,
+ unsigned int,
+ mach_vm_offset_t *);
+
+extern kern_return_t thread_userstackdefault(
+ thread_t,
+ mach_vm_offset_t *);
+
extern kern_return_t thread_wire_internal(
host_priv_t host_priv,
thread_t thread,
extern boolean_t thread_should_halt(
thread_t thread);
+extern boolean_t thread_should_abort(
+ thread_t);
+
extern int is_64signalregset(void);
void act_set_apc(thread_t);
+void act_set_kperf(thread_t);
extern uint32_t dtrace_get_thread_predcache(thread_t);
extern int64_t dtrace_get_thread_vtime(thread_t);
thread_continue_t continuation,
void *parameter,
thread_t *new_thread);
+#ifdef KERNEL_PRIVATE
+void thread_set_eager_preempt(thread_t thread);
+void thread_clear_eager_preempt(thread_t thread);
+extern ipc_port_t convert_thread_to_port(thread_t);
+#endif /* KERNEL_PRIVATE */
__END_DECLS