X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d7e50217d7adf6e52786a38bcaa4cd698cb9a79e..91447636331957f3d9b5ca5b508f07c526b0074d:/osfmk/kern/thread.h?ds=sidebyside diff --git a/osfmk/kern/thread.h b/osfmk/kern/thread.h index 13c981605..cde616313 100644 --- a/osfmk/kern/thread.h +++ b/osfmk/kern/thread.h @@ -1,24 +1,21 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. * * @APPLE_LICENSE_HEADER_END@ */ @@ -85,103 +82,69 @@ #include #include #include -#include -#include +#include #include #include +#include -#include /* for current_thread */ #include -#include - -/* - * Logically, a thread of control consists of two parts: - * - * a thread_shuttle, which may migrate due to resource contention - * and - * a thread_activation, which remains attached to a task. - * - * The thread_shuttle contains scheduling info, accounting info, - * and links to the thread_activation within which the shuttle is - * currently operating. - * - * It might make sense to have the thread_shuttle be a proper sub-structure - * of the thread, with the thread containing links to both the shuttle and - * activation. In order to reduce the scope and complexity of source - * changes and the overhead of maintaining these linkages, we have subsumed - * the shuttle into the thread, calling it a thread_shuttle. - * - * User accesses to threads always come in via the user's thread port, - * which gets translated to a pointer to the target thread_activation. - */ -#include - -#ifdef __APPLE_API_PRIVATE +#include #ifdef MACH_KERNEL_PRIVATE -#include -#include +#include + +#include #include #include -#include #include +#include + #include -#include #include #include -#include #include #include +#include #include #include #include #include #include +#include + #include + +#include #include -/* - * Kernel accesses intended to effect the entire thread, typically use - * a pointer to the thread_shuttle (current_thread()) as the target of - * their operations. This makes sense given that we have subsumed the - * shuttle into the thread_shuttle, eliminating one set of linkages. - * Operations effecting only the shuttle may use a thread_shuttle_t - * to indicate this. - * - * The current_act() macro returns a pointer to the current thread_act, while - * the current_thread() macro returns a pointer to the currently active - * thread_shuttle (representing the thread in its entirety). - */ -struct thread_shuttle { +struct thread { /* * NOTE: The runq field in the thread structure has an unusual * locking protocol. If its value is RUN_QUEUE_NULL, then it is * locked by the thread_lock, but if its value is something else * (i.e. a run_queue) then it is locked by that run_queue's lock. * - * Beginning of thread_shuttle proper. When the thread is on - * a wait queue, these first three fields are treated as an un- - * official union with a wait_queue_element. If you change - * these, you must change that definition as well (wait_queue.h). + * When the thread is on a wait queue, these first three fields + * are treated as an unofficial union with a wait_queue_element. + * If you change these, you must change that definition as well + * (kern/wait_queue.h). */ /* Items examined often, modified infrequently */ queue_chain_t links; /* run/wait queue links */ run_queue_t runq; /* run queue thread is on SEE BELOW */ wait_queue_t wait_queue; /* wait queue we are currently on */ event64_t wait_event; /* wait queue event */ - thread_act_t top_act; /* "current" thr_act */ - uint32_t /* Only set by thread itself */ - interrupt_level:2, /* interrupts/aborts allowed */ - vm_privilege:1, /* can use reserved memory? */ - active_callout:1, /* an active callout */ - :0; - + integer_t options; /* options set by thread itself */ +#define TH_OPT_INTMASK 0x03 /* interrupt / abort level */ +#define TH_OPT_VMPRIV 0x04 /* may allocate reserved memory */ +#define TH_OPT_DELAYIDLE 0x08 /* performing delayed idle */ +#define TH_OPT_CALLOUT 0x10 /* executing as callout */ /* Data updated during assert_wait/thread_wakeup */ - decl_simple_lock_data(,lock) /* scheduling lock (thread_lock()) */ + decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */ decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/ boolean_t wake_active; /* Someone is waiting for this */ int at_safe_point; /* thread_abort_safely allowed */ @@ -189,8 +152,8 @@ struct thread_shuttle { wait_result_t wait_result; /* outcome of wait - * may be examined by this thread * WITHOUT locking */ - thread_roust_t roust; /* routine to roust it after wait */ - thread_continue_t continuation; /* resume here next dispatch */ + thread_continue_t continuation; /* continue here next dispatch */ + void *parameter; /* continuation parameter */ /* Data updated/used in thread_invoke */ struct funnel_lock *funnel_lock; /* Non-reentrancy funnel */ @@ -199,30 +162,26 @@ struct thread_shuttle { #define TH_FN_REFUNNEL 0x2 /* re-acquire funnel on dispatch */ vm_offset_t kernel_stack; /* current kernel stack */ - vm_offset_t stack_privilege; /* reserved kernel stack */ + vm_offset_t reserved_stack; /* reserved kernel stack */ /* Thread state: */ int state; /* * Thread states [bits or'ed] */ -#define TH_WAIT 0x01 /* thread is queued for waiting */ -#define TH_SUSP 0x02 /* thread has been asked to stop */ -#define TH_RUN 0x04 /* thread is running or on runq */ -#define TH_UNINT 0x08 /* thread is waiting uninteruptibly */ -#define TH_TERMINATE 0x10 /* thread is halting at termination */ +#define TH_WAIT 0x01 /* queued for waiting */ +#define TH_SUSP 0x02 /* stopped or requested to stop */ +#define TH_RUN 0x04 /* running or on runq */ +#define TH_UNINT 0x08 /* waiting uninteruptibly */ +#define TH_TERMINATE 0x10 /* halted at termination */ -#define TH_ABORT 0x20 /* abort interruptible waits */ -#define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */ +#define TH_ABORT 0x20 /* abort interruptible waits */ +#define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */ -#define TH_IDLE 0x80 /* thread is an idle thread */ +#define TH_IDLE 0x80 /* processor idle thread */ #define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT) -#define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */ -#define TH_STACK_ALLOC 0x0200 /* waiting for stack allocation */ -#define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_ALLOC) - /* Scheduling information */ integer_t sched_mode; /* scheduling mode bits */ #define TH_MODE_REALTIME 0x0001 /* time constraints supplied */ @@ -230,9 +189,8 @@ struct thread_shuttle { #define TH_MODE_PREEMPT 0x0004 /* can preempt kernel contexts */ #define TH_MODE_FAILSAFE 0x0008 /* fail-safe has tripped */ #define TH_MODE_PROMOTED 0x0010 /* sched pri has been promoted */ -#define TH_MODE_FORCEDPREEMPT 0x0020 /* force setting of mode PREEMPT */ -#define TH_MODE_DEPRESS 0x0040 /* normal depress yield */ -#define TH_MODE_POLLDEPRESS 0x0080 /* polled depress yield */ +#define TH_MODE_DEPRESS 0x0020 /* normal depress yield */ +#define TH_MODE_POLLDEPRESS 0x0040 /* polled depress yield */ #define TH_MODE_ISDEPRESSED (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS) integer_t sched_pri; /* scheduled (current) priority */ @@ -246,12 +204,14 @@ struct thread_shuttle { integer_t importance; /* task-relative importance */ - /* time constraint parameters */ + /* real-time parameters */ struct { /* see mach/thread_policy.h */ uint32_t period; uint32_t computation; uint32_t constraint; boolean_t preemptible; + + uint64_t deadline; } realtime; uint32_t current_quantum; /* duration of current quantum */ @@ -269,34 +229,33 @@ struct thread_shuttle { integer_t safe_mode; /* saved mode during fail-safe */ natural_t safe_release; /* when to release fail-safe */ - /* Used in priority computations */ - natural_t sched_stamp; /* when priority was updated */ - natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */ - natural_t cpu_delta; /* cpu usage since last update */ - natural_t sched_usage; /* load-weighted cpu usage [sched] */ - natural_t sched_delta; /* weighted cpu usage since update */ - natural_t sleep_stamp; /* when entered TH_WAIT state */ + /* Statistics and timesharing calculations */ + natural_t sched_stamp; /* last scheduler tick */ + natural_t sched_usage; /* timesharing cpu usage [sched] */ + natural_t pri_shift; /* usage -> priority from pset */ + natural_t cpu_usage; /* instrumented cpu usage [%cpu] */ + natural_t cpu_delta; /* accumulated cpu_usage delta */ /* Timing data structures */ - timer_data_t user_timer; /* user mode timer */ - timer_save_data_t system_timer_save; /* saved system timer value */ - timer_save_data_t user_timer_save; /* saved user timer value */ + timer_data_t user_timer; /* user mode timer */ + uint64_t system_timer_save; /* saved system timer value */ + uint64_t user_timer_save; /* saved user timer value */ /* Timed wait expiration */ - timer_call_data_t wait_timer; - integer_t wait_timer_active; - boolean_t wait_timer_is_set; + timer_call_data_t wait_timer; + integer_t wait_timer_active; + boolean_t wait_timer_is_set; /* Priority depression expiration */ - timer_call_data_t depress_timer; - integer_t depress_timer_active; + timer_call_data_t depress_timer; + integer_t depress_timer_active; /* Various bits of stashed state */ union { struct { mach_msg_return_t state; /* receive state */ ipc_object_t object; /* object received on */ - mach_msg_header_t *msg; /* receive buffer pointer */ + mach_vm_address_t msg_addr; /* receive buffer pointer */ mach_msg_size_t msize; /* max size for recvd msg */ mach_msg_option_t option; /* options for receive */ mach_msg_size_t slist_size; /* scatter list size */ @@ -319,54 +278,80 @@ struct thread_shuttle { /* IPC data structures */ struct ipc_kmsg_queue ith_messages; - mach_port_t ith_mig_reply; /* reply port for mig */ mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */ /* Ast/Halt data structures */ - boolean_t active; /* thread is active */ vm_offset_t recover; /* page fault recover(copyin/out) */ int ref_count; /* number of references to me */ /* Processor set info */ - queue_chain_t pset_threads; /* list of all shuttles in pset */ + queue_chain_t pset_threads; /* list of all threads in pset */ #if MACH_HOST boolean_t may_assign; /* may assignment change? */ boolean_t assign_active; /* waiting for may_assign */ #endif /* MACH_HOST */ -/* BEGIN TRACING/DEBUG */ + /* Activation */ + queue_chain_t task_threads; -#if MACH_LOCK_MON - unsigned lock_stack; /* number of locks held */ -#endif /* MACH_LOCK_MON */ + /*** Machine-dependent state ***/ + struct machine_thread machine; -#if ETAP_EVENT_MONITOR - int etap_reason; /* real reason why we blocked */ - boolean_t etap_trace; /* ETAP trace status */ -#endif /* ETAP_EVENT_MONITOR */ + /* Task membership */ + struct task *task; + vm_map_t map; -#if MACH_LDEBUG - /* - * Debugging: track acquired mutexes and locks. - * Because a thread can block while holding such - * synchronizers, we think of the thread as - * "owning" them. - */ -#define MUTEX_STACK_DEPTH 20 -#define LOCK_STACK_DEPTH 20 - mutex_t *mutex_stack[MUTEX_STACK_DEPTH]; - lock_t *lock_stack[LOCK_STACK_DEPTH]; - unsigned int mutex_stack_index; - unsigned int lock_stack_index; - unsigned mutex_count; /* XXX to be deleted XXX */ -#endif /* MACH_LDEBUG */ -/* END TRACING/DEBUG */ + decl_mutex_data(,mutex) + /* Kernel holds on this thread */ + int suspend_count; + + /* User level suspensions */ + int user_stop_count; + + /* Pending thread ast(s) */ + ast_t ast; + + /* Miscellaneous bits guarded by mutex */ + uint32_t + /* Indicates that the thread has not been terminated */ + active:1, + + /* Indicates that the thread has been started after creation */ + started:1, + :0; + + /* Return Handers */ + struct ReturnHandler { + struct ReturnHandler *next; + void (*handler)( + struct ReturnHandler *rh, + struct thread *thread); + } *handlers, special_handler; + + /* Ports associated with this thread */ + struct ipc_port *ith_self; /* not a right, doesn't hold ref */ + struct ipc_port *ith_sself; /* a send right */ + struct exception_action exc_actions[EXC_TYPES_COUNT]; + + /* Owned ulocks (a lock set element) */ + queue_head_t held_ulocks; + +#if MACH_PROF + /* Profiling */ + boolean_t profiled; + boolean_t profiled_own; + struct prof_data *profil_buffer; +#endif /* MACH_PROF */ + +#ifdef MACH_BSD + void *uthread; +#endif }; #define ith_state saved.receive.state #define ith_object saved.receive.object -#define ith_msg saved.receive.msg +#define ith_msg_addr saved.receive.msg_addr #define ith_msize saved.receive.msize #define ith_option saved.receive.option #define ith_scatter_list_size saved.receive.slist_size @@ -380,227 +365,310 @@ struct thread_shuttle { #define sth_result saved.sema.result #define sth_continuation saved.sema.continuation -struct funnel_lock { - int fnl_type; /* funnel type */ - mutex_t *fnl_mutex; /* underlying mutex for the funnel */ - void * fnl_mtxholder; /* thread (last)holdng mutex */ - void * fnl_mtxrelease; /* thread (last)releasing mutex */ - mutex_t *fnl_oldmutex; /* Mutex before collapsing split funnel */ -}; - -typedef struct funnel_lock funnel_t; - -extern thread_act_t active_kloaded[NCPUS]; /* "" kernel-loaded acts */ -extern vm_offset_t active_stacks[NCPUS]; /* active kernel stacks */ -extern vm_offset_t kernel_stack[NCPUS]; - -extern struct thread_shuttle pageout_thread; - -#ifndef MACHINE_STACK_STASH -/* - * MD Macro to fill up global stack state, - * keeping the MD structure sizes + games private - */ -#define MACHINE_STACK_STASH(stack) \ -MACRO_BEGIN \ - mp_disable_preemption(); \ - active_stacks[cpu_number()] = (stack); \ - kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \ - mp_enable_preemption(); \ -MACRO_END -#endif /* MACHINE_STACK_STASH */ - -/* - * Kernel-only routines - */ +extern void thread_bootstrap(void); -/* Initialize thread module */ -extern void thread_init(void); +extern void thread_init(void); -/* Take reference on thread (make sure it doesn't go away) */ -extern void thread_reference( - thread_t thread); +extern void thread_daemon_init(void); -/* Release reference on thread */ -extern void thread_deallocate( - thread_t thread); +#define thread_reference_internal(thread) \ + hw_atomic_add(&(thread)->ref_count, 1) -/* Set task priority of member thread */ -extern void thread_task_priority( - thread_t thread, - integer_t priority, - integer_t max_priority); - -/* Start a thread at specified routine */ -#define thread_start(thread, start) \ - (thread)->continuation = (start) - -/* Reaps threads waiting to be destroyed */ -extern void thread_reaper_init(void); +#define thread_deallocate_internal(thread) \ + hw_atomic_sub(&(thread)->ref_count, 1) +#define thread_reference(thread) \ +MACRO_BEGIN \ + if ((thread) != THREAD_NULL) \ + thread_reference_internal(thread); \ +MACRO_END -/* Insure thread always has a kernel stack */ -extern void stack_privilege( - thread_t thread); +extern void thread_deallocate( + thread_t thread); -extern void consider_thread_collect(void); +extern void thread_terminate_self(void); -/* - * Arguments to specify aggressiveness to thread halt. - * Can't have MUST_HALT and SAFELY at the same time. - */ -#define THREAD_HALT_NORMAL 0 -#define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */ -#define THREAD_HALT_SAFELY 2 /* result must be restartable */ +extern kern_return_t thread_terminate_internal( + thread_t thread); -/* - * Macro-defined routines - */ +extern void thread_terminate_enqueue( + thread_t thread); -#define thread_pcb(th) ((th)->pcb) +extern void thread_stack_enqueue( + thread_t thread); -#define thread_lock_init(th) simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK) -#define thread_lock(th) simple_lock(&(th)->lock) -#define thread_unlock(th) simple_unlock(&(th)->lock) -#define thread_lock_try(th) simple_lock_try(&(th)->lock) +extern void thread_hold( + thread_t thread); -#define thread_should_halt_fast(thread) \ - (!(thread)->top_act || !(thread)->top_act->active) +extern void thread_release( + thread_t thread); -#define thread_should_halt(thread) thread_should_halt_fast(thread) +#define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0) +#define thread_lock(th) simple_lock(&(th)->sched_lock) +#define thread_unlock(th) simple_unlock(&(th)->sched_lock) +#define thread_lock_try(th) simple_lock_try(&(th)->sched_lock) -#define thread_reference_locked(thread) ((thread)->ref_count++) +#define thread_should_halt_fast(thread) (!(thread)->active) -/* - * Lock to cover wake_active only; like thread_lock(), is taken - * at splsched(). Used to avoid calling into scheduler with a - * thread_lock() held. Precedes thread_lock() (and other scheduling- - * related locks) in the system lock ordering. - */ -#define wake_lock_init(th) \ - simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE) -#define wake_lock(th) simple_lock(&(th)->wake_lock) -#define wake_unlock(th) simple_unlock(&(th)->wake_lock) +#define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0) +#define wake_lock(th) simple_lock(&(th)->wake_lock) +#define wake_unlock(th) simple_unlock(&(th)->wake_lock) #define wake_lock_try(th) simple_lock_try(&(th)->wake_lock) -static __inline__ vm_offset_t current_stack(void); -static __inline__ vm_offset_t -current_stack(void) -{ - vm_offset_t ret; +extern void stack_alloc( + thread_t thread); - mp_disable_preemption(); - ret = active_stacks[cpu_number()]; - mp_enable_preemption(); - return ret; -} +extern void stack_free( + thread_t thread); -extern void pcb_module_init(void); +extern void stack_free_stack( + vm_offset_t stack); -extern void pcb_init( - thread_act_t thr_act); +extern boolean_t stack_alloc_try( + thread_t thread); -extern void pcb_terminate( - thread_act_t thr_act); +extern void stack_collect(void); -extern void pcb_collect( - thread_act_t thr_act); +extern void stack_init(void); -extern void pcb_user_to_kernel( - thread_act_t thr_act); +extern kern_return_t thread_state_initialize( + thread_t thread); extern kern_return_t thread_setstatus( - thread_act_t thr_act, + thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t count); extern kern_return_t thread_getstatus( - thread_act_t thr_act, + thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t *count); -extern boolean_t stack_alloc_try( - thread_t thread, - void (*start_pos)(thread_t)); - -/* This routine now used only internally */ -extern kern_return_t thread_info_shuttle( - thread_act_t thr_act, +extern kern_return_t thread_info_internal( + thread_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, mach_msg_type_number_t *thread_info_count); -/* Machine-dependent routines */ -extern void thread_machine_init(void); +extern void thread_task_priority( + thread_t thread, + integer_t priority, + integer_t max_priority); -extern void thread_machine_set_current( - thread_t thread ); +extern void thread_policy_reset( + thread_t thread); -extern kern_return_t thread_machine_create( - thread_t thread, - thread_act_t thr_act, - void (*start_pos)(thread_t)); +extern kern_return_t kernel_thread_create( + thread_continue_t continuation, + void *parameter, + integer_t priority, + thread_t *new_thread); -extern void thread_set_syscall_return( - thread_t thread, - kern_return_t retval); +extern kern_return_t kernel_thread_start_priority( + thread_continue_t continuation, + void *parameter, + integer_t priority, + thread_t *new_thread); -extern void thread_machine_destroy( - thread_t thread ); +extern void machine_stack_attach( + thread_t thread, + vm_offset_t stack); -extern void thread_machine_flush( - thread_act_t thr_act); +extern vm_offset_t machine_stack_detach( + thread_t thread); -extern thread_t kernel_thread_with_priority( - task_t task, - integer_t priority, - void (*start)(void), - boolean_t alloc_stack, - boolean_t start_running); +extern void machine_stack_handoff( + thread_t old, + thread_t new); -extern void thread_terminate_self(void); +extern thread_t machine_switch_context( + thread_t old_thread, + thread_continue_t continuation, + thread_t new_thread); + +extern void machine_load_context( + thread_t thread); -extern void funnel_lock(funnel_t *); +extern kern_return_t machine_thread_state_initialize( + thread_t thread); + +extern kern_return_t machine_thread_set_state( + thread_t thread, + thread_flavor_t flavor, + thread_state_t state, + mach_msg_type_number_t count); + +extern kern_return_t machine_thread_get_state( + thread_t thread, + thread_flavor_t flavor, + thread_state_t state, + mach_msg_type_number_t *count); + +extern kern_return_t machine_thread_dup( + thread_t self, + thread_t target); + +extern void machine_thread_init(void); + +extern kern_return_t machine_thread_create( + thread_t thread, + task_t task); + +extern void machine_thread_destroy( + thread_t thread); + +extern void machine_set_current_thread( + thread_t thread); + +extern void machine_thread_terminate_self(void); + +extern kern_return_t machine_thread_get_kern_state( + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count); -extern void funnel_unlock(funnel_t *); + +/* + * XXX Funnel locks XXX + */ + +struct funnel_lock { + int fnl_type; /* funnel type */ + lck_mtx_t *fnl_mutex; /* underlying mutex for the funnel */ + void * fnl_mtxholder; /* thread (last)holdng mutex */ + void * fnl_mtxrelease; /* thread (last)releasing mutex */ + lck_mtx_t *fnl_oldmutex; /* Mutex before collapsing split funnel */ +}; + +typedef struct ReturnHandler ReturnHandler; + +#define thread_mtx_lock(thread) mutex_lock(&(thread)->mutex) +#define thread_mtx_try(thread) mutex_try(&(thread)->mutex) +#define thread_mtx_unlock(thread) mutex_unlock(&(thread)->mutex) + +extern void act_execute_returnhandlers(void); + +extern void install_special_handler( + thread_t thread); + +extern void special_handler( + ReturnHandler *rh, + thread_t thread); #else /* MACH_KERNEL_PRIVATE */ +__BEGIN_DECLS + +extern thread_t current_thread(void); + +extern void thread_reference( + thread_t thread); + +extern void thread_deallocate( + thread_t thread); + +__END_DECLS + +#endif /* MACH_KERNEL_PRIVATE */ + +#ifdef KERNEL_PRIVATE + typedef struct funnel_lock funnel_t; -extern boolean_t thread_should_halt(thread_t); +#ifdef MACH_KERNEL_PRIVATE + +extern void funnel_lock( + funnel_t *lock); + +extern void funnel_unlock( + funnel_t *lock); + +vm_offset_t min_valid_stack_address(void); +vm_offset_t max_valid_stack_address(void); #endif /* MACH_KERNEL_PRIVATE */ +__BEGIN_DECLS + +extern funnel_t *thread_funnel_get(void); + +extern boolean_t thread_funnel_set( + funnel_t *lock, + boolean_t funneled); + extern thread_t kernel_thread( task_t task, void (*start)(void)); -extern void thread_set_cont_arg(int); +__END_DECLS -extern int thread_get_cont_arg(void); +#endif /* KERNEL_PRIVATE */ -/* JMM - These are only temporary */ -extern boolean_t is_thread_running(thread_act_t); /* True is TH_RUN */ -extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */ -extern kern_return_t get_thread_waitresult(thread_t); +__BEGIN_DECLS -#endif /* __APPLE_API_PRIVATE */ +#ifdef XNU_KERNEL_PRIVATE -#ifdef __APPLE_API_EVOLVING +/* + * XXX Funnel locks XXX + */ #define THR_FUNNEL_NULL (funnel_t *)0 -extern funnel_t * funnel_alloc(int); +extern funnel_t *funnel_alloc( + int type); + +extern void funnel_free( + funnel_t *lock); + +extern void thread_read_times( + thread_t thread, + time_value_t *user_time, + time_value_t *system_time); + +extern void thread_setuserstack( + thread_t thread, + mach_vm_offset_t user_stack); + +extern uint64_t thread_adjuserstack( + thread_t thread, + int adjust); + +extern void thread_setentrypoint( + thread_t thread, + mach_vm_offset_t entry); + +extern kern_return_t thread_wire_internal( + host_priv_t host_priv, + thread_t thread, + boolean_t wired, + boolean_t *prev_state); + +/* JMM - These are only temporary */ +extern boolean_t is_thread_running(thread_t); /* True is TH_RUN */ +extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */ + +extern kern_return_t thread_dup(thread_t); + +extern task_t get_threadtask(thread_t); + +extern void *get_bsdthread_info(thread_t); +extern void set_bsdthread_info(thread_t, void *); +extern void *uthread_alloc(task_t, thread_t); +extern void uthread_free(task_t, void *, void *); -extern funnel_t * thread_funnel_get(void); +extern boolean_t thread_should_halt( + thread_t thread); -extern boolean_t thread_funnel_set(funnel_t * fnl, boolean_t funneled); +#endif /* XNU_KERNEL_PRIVATE */ -extern boolean_t thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl); +extern kern_return_t kernel_thread_start( + thread_continue_t continuation, + void *parameter, + thread_t *new_thread); -#endif /* __APPLE_API_EVOLVING */ +__END_DECLS #endif /* _KERN_THREAD_H_ */