/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_FREE_COPYRIGHT@
#include <cputypes.h>
#include <mach_assert.h>
-#include <mach_host.h>
-#include <mach_prof.h>
#include <mach_ldebug.h>
#include <ipc/ipc_types.h>
#include <kern/timer_call.h>
#include <kern/task.h>
#include <kern/exception.h>
+#include <kern/affinity.h>
#include <ipc/ipc_kmsg.h>
struct thread {
/*
* NOTE: The runq field in the thread structure has an unusual
- * locking protocol. If its value is RUN_QUEUE_NULL, then it is
+ * locking protocol. If its value is PROCESSOR_NULL, then it is
* locked by the thread_lock, but if its value is something else
- * (i.e. a run_queue) then it is locked by that run_queue's lock.
+ * then it is locked by the associated run queue lock.
*
* When the thread is on a wait queue, these first three fields
* are treated as an unofficial union with a wait_queue_element.
*/
/* Items examined often, modified infrequently */
queue_chain_t links; /* run/wait queue links */
- run_queue_t runq; /* run queue thread is on SEE BELOW */
+ processor_t runq; /* run queue assignment */
wait_queue_t wait_queue; /* wait queue we are currently on */
event64_t wait_event; /* wait queue event */
integer_t options; /* options set by thread itself */
#define TH_OPT_INTMASK 0x03 /* interrupt / abort level */
#define TH_OPT_VMPRIV 0x04 /* may allocate reserved memory */
-#define TH_OPT_DELAYIDLE 0x08 /* performing delayed idle */
-#define TH_OPT_CALLOUT 0x10 /* executing as callout */
+#define TH_OPT_DTRACE 0x08 /* executing under dtrace_probe */
/* Data updated during assert_wait/thread_wakeup */
decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */
- decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/
- boolean_t wake_active; /* Someone is waiting for this */
+ decl_simple_lock_data(,wake_lock) /* for thread stop / wait (wake_lock()) */
+ boolean_t wake_active; /* wake event on stop */
int at_safe_point; /* thread_abort_safely allowed */
ast_t reason; /* why we blocked */
wait_result_t wait_result; /* outcome of wait -
#define TH_UNINT 0x08 /* waiting uninteruptibly */
#define TH_TERMINATE 0x10 /* halted at termination */
-#define TH_ABORT 0x20 /* abort interruptible waits */
-#define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */
-
-#define TH_IDLE 0x80 /* processor idle thread */
-
-#define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
+#define TH_IDLE 0x80 /* idling processor */
/* Scheduling information */
integer_t sched_mode; /* scheduling mode bits */
#define TH_MODE_REALTIME 0x0001 /* time constraints supplied */
#define TH_MODE_TIMESHARE 0x0002 /* use timesharing algorithm */
-#define TH_MODE_PREEMPT 0x0004 /* can preempt kernel contexts */
-#define TH_MODE_FAILSAFE 0x0008 /* fail-safe has tripped */
-#define TH_MODE_PROMOTED 0x0010 /* sched pri has been promoted */
-#define TH_MODE_DEPRESS 0x0020 /* normal depress yield */
-#define TH_MODE_POLLDEPRESS 0x0040 /* polled depress yield */
+#define TH_MODE_FAILSAFE 0x0004 /* fail-safe has tripped */
+#define TH_MODE_PROMOTED 0x0008 /* sched pri has been promoted */
+#define TH_MODE_ABORT 0x0010 /* abort interruptible waits */
+#define TH_MODE_ABORTSAFELY 0x0020 /* ... but only those at safe point */
+#define TH_MODE_ISABORTED (TH_MODE_ABORT | TH_MODE_ABORTSAFELY)
+#define TH_MODE_DEPRESS 0x0040 /* normal depress yield */
+#define TH_MODE_POLLDEPRESS 0x0080 /* polled depress yield */
#define TH_MODE_ISDEPRESSED (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
integer_t sched_pri; /* scheduled (current) priority */
/* Data used during setrun/dispatch */
timer_data_t system_timer; /* system mode timer */
- processor_set_t processor_set; /* assigned processor set */
processor_t bound_processor; /* bound to a processor? */
processor_t last_processor; /* processor last dispatched on */
- uint64_t last_switch; /* time of last context switch */
/* Fail-safe computation since last unblock or qualifying yield */
uint64_t computation_metered;
integer_t safe_mode; /* saved mode during fail-safe */
natural_t safe_release; /* when to release fail-safe */
+ /* Call out from scheduler */
+ void (*sched_call)(
+ int type,
+ thread_t thread);
+
/* Statistics and timesharing calculations */
natural_t sched_stamp; /* last scheduler tick */
natural_t sched_usage; /* timesharing cpu usage [sched] */
natural_t pri_shift; /* usage -> priority from pset */
natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
natural_t cpu_delta; /* accumulated cpu_usage delta */
+ uint32_t c_switch; /* total context switches */
+ uint32_t p_switch; /* total processor switches */
+ uint32_t ps_switch; /* total pset switches */
/* Timing data structures */
timer_data_t user_timer; /* user mode timer */
- uint64_t system_timer_save; /* saved system timer value */
uint64_t user_timer_save; /* saved user timer value */
+ uint64_t system_timer_save; /* saved system timer value */
+ uint64_t vtimer_user_save; /* saved values for vtimers */
+ uint64_t vtimer_prof_save;
+ uint64_t vtimer_rlim_save;
/* Timed wait expiration */
timer_call_data_t wait_timer;
timer_call_data_t depress_timer;
integer_t depress_timer_active;
+ /*
+ * Processor/cache affinity
+ * - affinity_threads links task threads with the same affinity set
+ */
+ affinity_set_t affinity_set;
+ queue_chain_t affinity_threads;
+
/* Various bits of stashed state */
union {
struct {
mach_msg_size_t msize; /* max size for recvd msg */
mach_msg_option_t option; /* options for receive */
mach_msg_size_t slist_size; /* scatter list size */
+ mach_port_name_t receiver_name; /* the receive port name */
struct ipc_kmsg *kmsg; /* received message */
mach_port_seqno_t seqno; /* seqno of recvd message */
mach_msg_continue_t continuation;
mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
/* Ast/Halt data structures */
- vm_offset_t recover; /* page fault recover(copyin/out) */
- int ref_count; /* number of references to me */
+ vm_offset_t recover; /* page fault recover(copyin/out) */
+ uint32_t ref_count; /* number of references to me */
- /* Processor set info */
- queue_chain_t pset_threads; /* list of all threads in pset */
-#if MACH_HOST
- boolean_t may_assign; /* may assignment change? */
- boolean_t assign_active; /* waiting for may_assign */
-#endif /* MACH_HOST */
+ queue_chain_t threads; /* global list of all threads */
/* Activation */
queue_chain_t task_threads;
struct task *task;
vm_map_t map;
- decl_mutex_data(,mutex)
+ decl_lck_mtx_data(,mutex)
/* Kernel holds on this thread */
int suspend_count;
/* Miscellaneous bits guarded by mutex */
uint32_t
- /* Indicates that the thread has not been terminated */
- active:1,
-
- /* Indicates that the thread has been started after creation */
- started:1,
- :0;
+ active:1, /* Thread is active and has not been terminated */
+ started:1, /* Thread has been started after creation */
+ static_param:1, /* Disallow policy parameter changes */
+ :0;
/* Return Handers */
struct ReturnHandler {
/* Owned ulocks (a lock set element) */
queue_head_t held_ulocks;
-#if MACH_PROF
- /* Profiling */
- boolean_t profiled;
- boolean_t profiled_own;
- struct prof_data *profil_buffer;
-#endif /* MACH_PROF */
-
#ifdef MACH_BSD
void *uthread;
#endif
+
+#if CONFIG_DTRACE
+ uint32_t t_dtrace_predcache;/* DTrace per thread predicate value hint */
+ int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */
+ int64_t t_dtrace_vtime;
+#endif
+
+#define T_CHUD_MARKED 0x1 /* this thread is marked by CHUD */
+#define T_IN_CHUD 0x2 /* this thread is already in a CHUD handler */
+#define THREAD_PMC_FLAG 0x4 /* Bit in "t_chud" signifying PMC interest */
+ uint32_t t_page_creation_count;
+ clock_sec_t t_page_creation_time;
+
+ uint32_t t_chud; /* CHUD flags, used for Shark */
+ uint64_t thread_id; /*system wide unique thread-id*/
};
#define ith_state saved.receive.state
#define ith_msize saved.receive.msize
#define ith_option saved.receive.option
#define ith_scatter_list_size saved.receive.slist_size
+#define ith_receiver_name saved.receive.receiver_name
#define ith_continuation saved.receive.continuation
#define ith_kmsg saved.receive.kmsg
#define ith_seqno saved.receive.seqno
#define sth_result saved.sema.result
#define sth_continuation saved.sema.continuation
-extern void thread_bootstrap(void);
+extern void thread_bootstrap(void) __attribute__((section("__TEXT, initcode")));
-extern void thread_init(void);
+extern void thread_init(void) __attribute__((section("__TEXT, initcode")));
extern void thread_daemon_init(void);
#define thread_reference_internal(thread) \
- hw_atomic_add(&(thread)->ref_count, 1)
+ (void)hw_atomic_add(&(thread)->ref_count, 1)
#define thread_deallocate_internal(thread) \
hw_atomic_sub(&(thread)->ref_count, 1)
#define thread_reference(thread) \
MACRO_BEGIN \
if ((thread) != THREAD_NULL) \
- thread_reference_internal(thread); \
+ thread_reference_internal(thread); \
MACRO_END
extern void thread_deallocate(
extern kern_return_t thread_terminate_internal(
thread_t thread);
+extern void thread_start_internal(
+ thread_t thread) __attribute__ ((noinline));
+
extern void thread_terminate_enqueue(
thread_t thread);
extern void thread_release(
thread_t thread);
+
#define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0)
#define thread_lock(th) simple_lock(&(th)->sched_lock)
#define thread_unlock(th) simple_unlock(&(th)->sched_lock)
-#define thread_lock_try(th) simple_lock_try(&(th)->sched_lock)
-
-#define thread_should_halt_fast(thread) (!(thread)->active)
#define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0)
#define wake_lock(th) simple_lock(&(th)->wake_lock)
#define wake_unlock(th) simple_unlock(&(th)->wake_lock)
-#define wake_lock_try(th) simple_lock_try(&(th)->wake_lock)
+
+#define thread_should_halt_fast(thread) (!(thread)->active)
extern void stack_alloc(
thread_t thread);
extern void stack_collect(void);
-extern void stack_init(void);
+extern void stack_init(void) __attribute__((section("__TEXT, initcode")));
extern kern_return_t thread_state_initialize(
thread_t thread);
extern kern_return_t machine_thread_create(
thread_t thread,
task_t task);
+extern void machine_thread_switch_addrmode(
+ thread_t thread);
extern void machine_thread_destroy(
thread_t thread);
thread_state_t tstate,
mach_msg_type_number_t *count);
+extern kern_return_t machine_thread_inherit_taskwide(
+ thread_t thread,
+ task_t parent_task);
/*
* XXX Funnel locks XXX
typedef struct ReturnHandler ReturnHandler;
-#define thread_mtx_lock(thread) mutex_lock(&(thread)->mutex)
-#define thread_mtx_try(thread) mutex_try(&(thread)->mutex)
-#define thread_mtx_unlock(thread) mutex_unlock(&(thread)->mutex)
+#define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex)
+#define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex)
+#define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex)
extern void act_execute_returnhandlers(void);
ReturnHandler *rh,
thread_t thread);
+void act_machine_sv_free(thread_t, int);
+
+vm_offset_t min_valid_stack_address(void);
+vm_offset_t max_valid_stack_address(void);
+
+extern void funnel_lock(
+ struct funnel_lock *lock);
+
+extern void funnel_unlock(
+ struct funnel_lock *lock);
+
#else /* MACH_KERNEL_PRIVATE */
__BEGIN_DECLS
#ifdef KERNEL_PRIVATE
-typedef struct funnel_lock funnel_t;
-
-#ifdef MACH_KERNEL_PRIVATE
-
-extern void funnel_lock(
- funnel_t *lock);
-
-extern void funnel_unlock(
- funnel_t *lock);
-
-vm_offset_t min_valid_stack_address(void);
-vm_offset_t max_valid_stack_address(void);
-
-#endif /* MACH_KERNEL_PRIVATE */
-
__BEGIN_DECLS
-extern funnel_t *thread_funnel_get(void);
-
-extern boolean_t thread_funnel_set(
- funnel_t *lock,
- boolean_t funneled);
+#ifndef __LP64__
extern thread_t kernel_thread(
task_t task,
void (*start)(void));
+#endif /* __LP64__ */
+
+extern uint64_t thread_tid(
+ thread_t thread);
+
+extern uint64_t thread_dispatchqaddr(
+ thread_t thread);
+
__END_DECLS
#endif /* KERNEL_PRIVATE */
#ifdef XNU_KERNEL_PRIVATE
-/*
- * XXX Funnel locks XXX
- */
+extern kern_return_t thread_create_workq(
+ task_t task,
+ thread_t *new_thread);
+
+extern void thread_yield_internal(
+ mach_msg_timeout_t interval);
+
+typedef struct funnel_lock funnel_t;
#define THR_FUNNEL_NULL (funnel_t *)0
extern void funnel_free(
funnel_t *lock);
+extern funnel_t *thread_funnel_get(void);
+
+extern boolean_t thread_funnel_set(
+ funnel_t *lock,
+ boolean_t funneled);
+
extern void thread_read_times(
thread_t thread,
time_value_t *user_time,
thread_t thread,
mach_vm_offset_t entry);
+extern kern_return_t thread_setsinglestep(
+ thread_t thread,
+ int on);
+
extern kern_return_t thread_wire_internal(
host_priv_t host_priv,
thread_t thread,
boolean_t wired,
boolean_t *prev_state);
-/* JMM - These are only temporary */
-extern boolean_t is_thread_running(thread_t); /* True is TH_RUN */
-extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */
-
extern kern_return_t thread_dup(thread_t);
+typedef void (*sched_call_t)(
+ int type,
+ thread_t thread);
+
+#define SCHED_CALL_BLOCK 0x1
+#define SCHED_CALL_UNBLOCK 0x2
+
+extern void thread_sched_call(
+ thread_t thread,
+ sched_call_t call);
+
+extern void thread_static_param(
+ thread_t thread,
+ boolean_t state);
+
+extern kern_return_t thread_policy_set_internal(
+ thread_t thread,
+ thread_policy_flavor_t flavor,
+ thread_policy_t policy_info,
+ mach_msg_type_number_t count);
+
+
extern task_t get_threadtask(thread_t);
+#define thread_is_64bit(thd) \
+ task_has_64BitAddr(get_threadtask(thd))
+
extern void *get_bsdthread_info(thread_t);
extern void set_bsdthread_info(thread_t, void *);
-extern void *uthread_alloc(task_t, thread_t);
-extern void uthread_free(task_t, void *, void *);
+extern void *uthread_alloc(task_t, thread_t, int);
+extern void uthread_cleanup(task_t, void *, void *);
+extern void uthread_zone_free(void *);
+extern void uthread_cred_free(void *);
extern boolean_t thread_should_halt(
thread_t thread);
+extern int is_64signalregset(void);
+
+void act_set_apc(thread_t);
+
+extern uint32_t dtrace_get_thread_predcache(thread_t);
+extern int64_t dtrace_get_thread_vtime(thread_t);
+extern int64_t dtrace_get_thread_tracing(thread_t);
+extern boolean_t dtrace_get_thread_reentering(thread_t);
+extern vm_offset_t dtrace_get_kernel_stack(thread_t);
+extern void dtrace_set_thread_predcache(thread_t, uint32_t);
+extern void dtrace_set_thread_vtime(thread_t, int64_t);
+extern void dtrace_set_thread_tracing(thread_t, int64_t);
+extern void dtrace_set_thread_reentering(thread_t, boolean_t);
+extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t);
+extern void dtrace_thread_bootstrap(void);
+
+extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
+
+
+extern void thread_set_wq_state32(
+ thread_t thread,
+ thread_state_t tstate);
+
+extern void thread_set_wq_state64(
+ thread_t thread,
+ thread_state_t tstate);
+
+extern vm_offset_t kernel_stack_mask;
+extern vm_offset_t kernel_stack_size;
+extern vm_offset_t kernel_stack_depth_max;
+
#endif /* XNU_KERNEL_PRIVATE */
+/*! @function kernel_thread_start
+ @abstract Create a kernel thread.
+ @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread).
+ @param continuation A C-function pointer where the thread will begin execution.
+ @param parameter Caller specified data to be passed to the new thread.
+ @param new_thread Reference to the new thread is returned in this parameter.
+ @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
+*/
+
extern kern_return_t kernel_thread_start(
thread_continue_t continuation,
void *parameter,