]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread.h
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / kern / thread.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217
A
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_FREE_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: thread.h
57 * Author: Avadis Tevanian, Jr.
58 *
59 * This file contains the structure definitions for threads.
60 *
61 */
62/*
63 * Copyright (c) 1993 The University of Utah and
64 * the Computer Systems Laboratory (CSL). All rights reserved.
65 *
66 * Permission to use, copy, modify and distribute this software and its
67 * documentation is hereby granted, provided that both the copyright
68 * notice and this permission notice appear in all copies of the
69 * software, derivative works or modified versions, and any portions
70 * thereof, and that both notices appear in supporting documentation.
71 *
72 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
73 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
74 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
75 *
76 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
77 * improvements that they make and grant CSL redistribution rights.
78 *
79 */
80
81#ifndef _KERN_THREAD_H_
82#define _KERN_THREAD_H_
83
84#include <mach/kern_return.h>
85#include <mach/mach_types.h>
86#include <mach/message.h>
87#include <mach/boolean.h>
88#include <mach/vm_types.h>
89#include <mach/vm_prot.h>
90#include <mach/thread_info.h>
91#include <mach/thread_status.h>
9bccf70c 92
1c79356b
A
93#include <kern/cpu_data.h> /* for current_thread */
94#include <kern/kern_types.h>
95
9bccf70c
A
96#include <ipc/ipc_types.h>
97
1c79356b
A
98/*
99 * Logically, a thread of control consists of two parts:
9bccf70c
A
100 *
101 * a thread_shuttle, which may migrate due to resource contention
102 * and
1c79356b 103 * a thread_activation, which remains attached to a task.
9bccf70c
A
104 *
105 * The thread_shuttle contains scheduling info, accounting info,
1c79356b
A
106 * and links to the thread_activation within which the shuttle is
107 * currently operating.
108 *
109 * It might make sense to have the thread_shuttle be a proper sub-structure
110 * of the thread, with the thread containing links to both the shuttle and
111 * activation. In order to reduce the scope and complexity of source
112 * changes and the overhead of maintaining these linkages, we have subsumed
113 * the shuttle into the thread, calling it a thread_shuttle.
114 *
115 * User accesses to threads always come in via the user's thread port,
116 * which gets translated to a pointer to the target thread_activation.
1c79356b 117 */
9bccf70c 118#include <sys/appleapiopts.h>
1c79356b 119
9bccf70c 120#ifdef __APPLE_API_PRIVATE
1c79356b 121
9bccf70c 122#ifdef MACH_KERNEL_PRIVATE
1c79356b 123
1c79356b
A
124#include <cpus.h>
125#include <hw_footprint.h>
126#include <mach_host.h>
127#include <mach_prof.h>
128#include <mach_lock_mon.h>
129#include <mach_ldebug.h>
130
131#include <mach/port.h>
132#include <kern/ast.h>
133#include <kern/cpu_number.h>
134#include <kern/queue.h>
135#include <kern/time_out.h>
136#include <kern/timer.h>
137#include <kern/lock.h>
138#include <kern/sched.h>
139#include <kern/sched_prim.h>
1c79356b
A
140#include <kern/thread_call.h>
141#include <kern/timer_call.h>
142#include <kern/task.h>
143#include <ipc/ipc_kmsg.h>
144#include <machine/thread.h>
145
9bccf70c
A
146/*
147 * Kernel accesses intended to effect the entire thread, typically use
148 * a pointer to the thread_shuttle (current_thread()) as the target of
149 * their operations. This makes sense given that we have subsumed the
150 * shuttle into the thread_shuttle, eliminating one set of linkages.
151 * Operations effecting only the shuttle may use a thread_shuttle_t
152 * to indicate this.
153 *
154 * The current_act() macro returns a pointer to the current thread_act, while
155 * the current_thread() macro returns a pointer to the currently active
156 * thread_shuttle (representing the thread in its entirety).
157 */
0b4e3aa0 158struct thread_shuttle {
1c79356b 159 /*
9bccf70c
A
160 * NOTE: The runq field in the thread structure has an unusual
161 * locking protocol. If its value is RUN_QUEUE_NULL, then it is
162 * locked by the thread_lock, but if its value is something else
163 * (i.e. a run_queue) then it is locked by that run_queue's lock.
164 *
165 * Beginning of thread_shuttle proper. When the thread is on
166 * a wait queue, these first three fields are treated as an un-
167 * official union with a wait_queue_element. If you change
168 * these, you must change that definition as well (wait_queue.h).
1c79356b 169 */
9bccf70c
A
170 /* Items examined often, modified infrequently */
171 queue_chain_t links; /* run/wait queue links */
172 run_queue_t runq; /* run queue thread is on SEE BELOW */
173 wait_queue_t wait_queue; /* wait queue we are currently on */
174 event64_t wait_event; /* wait queue event */
175 thread_act_t top_act; /* "current" thr_act */
176 uint32_t /* Only set by thread itself */
177 interrupt_level:2, /* interrupts/aborts allowed */
178 vm_privilege:1, /* can use reserved memory? */
179 active_callout:1, /* an active callout */
180 :0;
1c79356b 181
1c79356b 182
9bccf70c 183 /* Data updated during assert_wait/thread_wakeup */
0b4e3aa0
A
184 decl_simple_lock_data(,lock) /* scheduling lock (thread_lock()) */
185 decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/
9bccf70c
A
186 boolean_t wake_active; /* Someone is waiting for this */
187 int at_safe_point; /* thread_abort_safely allowed */
188 ast_t reason; /* why we blocked */
189 wait_result_t wait_result; /* outcome of wait -
0b4e3aa0
A
190 * may be examined by this thread
191 * WITHOUT locking */
9bccf70c
A
192 thread_roust_t roust; /* routine to roust it after wait */
193 thread_continue_t continuation; /* resume here next dispatch */
0b4e3aa0 194
9bccf70c
A
195 /* Data updated/used in thread_invoke */
196 struct funnel_lock *funnel_lock; /* Non-reentrancy funnel */
197 int funnel_state;
198#define TH_FN_OWNED 0x1 /* we own the funnel */
199#define TH_FN_REFUNNEL 0x2 /* re-acquire funnel on dispatch */
0b4e3aa0 200
9bccf70c
A
201 vm_offset_t kernel_stack; /* current kernel stack */
202 vm_offset_t stack_privilege; /* reserved kernel stack */
0b4e3aa0
A
203
204 /* Thread state: */
9bccf70c 205 int state;
1c79356b
A
206/*
207 * Thread states [bits or'ed]
208 */
9bccf70c
A
209#define TH_WAIT 0x01 /* thread is queued for waiting */
210#define TH_SUSP 0x02 /* thread has been asked to stop */
211#define TH_RUN 0x04 /* thread is running or on runq */
212#define TH_UNINT 0x08 /* thread is waiting uninteruptibly */
213#define TH_TERMINATE 0x10 /* thread is halting at termination */
1c79356b
A
214
215#define TH_ABORT 0x20 /* abort interruptible waits */
9bccf70c 216#define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */
1c79356b 217
9bccf70c 218#define TH_IDLE 0x80 /* thread is an idle thread */
1c79356b
A
219
220#define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
221
9bccf70c
A
222#define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */
223#define TH_STACK_ALLOC 0x0200 /* waiting for stack allocation */
0b4e3aa0 224#define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_ALLOC)
1c79356b 225
9bccf70c
A
226 /* Scheduling information */
227 integer_t sched_mode; /* scheduling mode bits */
228#define TH_MODE_REALTIME 0x0001 /* time constraints supplied */
229#define TH_MODE_TIMESHARE 0x0002 /* use timesharing algorithm */
230#define TH_MODE_PREEMPT 0x0004 /* can preempt kernel contexts */
231#define TH_MODE_FAILSAFE 0x0008 /* fail-safe has tripped */
232#define TH_MODE_PROMOTED 0x0010 /* sched pri has been promoted */
233#define TH_MODE_FORCEDPREEMPT 0x0020 /* force setting of mode PREEMPT */
234#define TH_MODE_DEPRESS 0x0040 /* normal depress yield */
235#define TH_MODE_POLLDEPRESS 0x0080 /* polled depress yield */
236#define TH_MODE_ISDEPRESSED (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
237
238 integer_t sched_pri; /* scheduled (current) priority */
239 integer_t priority; /* base priority */
240 integer_t max_priority; /* max base priority */
241 integer_t task_priority; /* copy of task base priority */
242
243 integer_t promotions; /* level of promotion */
244 integer_t pending_promoter_index;
245 void *pending_promoter[2];
1c79356b 246
9bccf70c 247 integer_t importance; /* task-relative importance */
1c79356b 248
9bccf70c
A
249 /* time constraint parameters */
250 struct { /* see mach/thread_policy.h */
0b4e3aa0
A
251 uint32_t period;
252 uint32_t computation;
253 uint32_t constraint;
1c79356b
A
254 boolean_t preemptible;
255 } realtime;
256
0b4e3aa0
A
257 uint32_t current_quantum; /* duration of current quantum */
258
9bccf70c
A
259 /* Data used during setrun/dispatch */
260 timer_data_t system_timer; /* system mode timer */
261 processor_set_t processor_set; /* assigned processor set */
262 processor_t bound_processor; /* bound to a processor? */
263 processor_t last_processor; /* processor last dispatched on */
264 uint64_t last_switch; /* time of last context switch */
265
0b4e3aa0 266 /* Fail-safe computation since last unblock or qualifying yield */
9bccf70c 267 uint64_t computation_metered;
0b4e3aa0 268 uint64_t computation_epoch;
9bccf70c
A
269 integer_t safe_mode; /* saved mode during fail-safe */
270 natural_t safe_release; /* when to release fail-safe */
1c79356b 271
9bccf70c
A
272 /* Used in priority computations */
273 natural_t sched_stamp; /* when priority was updated */
1c79356b 274 natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */
0b4e3aa0 275 natural_t cpu_delta; /* cpu usage since last update */
1c79356b 276 natural_t sched_usage; /* load-weighted cpu usage [sched] */
0b4e3aa0 277 natural_t sched_delta; /* weighted cpu usage since update */
0b4e3aa0 278 natural_t sleep_stamp; /* when entered TH_WAIT state */
1c79356b 279
9bccf70c
A
280 /* Timing data structures */
281 timer_data_t user_timer; /* user mode timer */
282 timer_save_data_t system_timer_save; /* saved system timer value */
283 timer_save_data_t user_timer_save; /* saved user timer value */
1c79356b 284
9bccf70c
A
285 /* Timed wait expiration */
286 timer_call_data_t wait_timer;
287 integer_t wait_timer_active;
288 boolean_t wait_timer_is_set;
1c79356b 289
9bccf70c
A
290 /* Priority depression expiration */
291 timer_call_data_t depress_timer;
292 integer_t depress_timer_active;
1c79356b
A
293
294 /* Various bits of stashed state */
295 union {
296 struct {
0b4e3aa0
A
297 mach_msg_return_t state; /* receive state */
298 ipc_object_t object; /* object received on */
299 mach_msg_header_t *msg; /* receive buffer pointer */
300 mach_msg_size_t msize; /* max size for recvd msg */
301 mach_msg_option_t option; /* options for receive */
302 mach_msg_size_t slist_size; /* scatter list size */
303 struct ipc_kmsg *kmsg; /* received message */
304 mach_port_seqno_t seqno; /* seqno of recvd message */
9bccf70c
A
305 mach_msg_continue_t continuation;
306 } receive;
1c79356b 307 struct {
0b4e3aa0
A
308 struct semaphore *waitsemaphore; /* semaphore ref */
309 struct semaphore *signalsemaphore; /* semaphore ref */
310 int options; /* semaphore options */
311 kern_return_t result; /* primary result */
9bccf70c
A
312 mach_msg_continue_t continuation;
313 } sema;
1c79356b 314 struct {
9bccf70c
A
315 int option; /* switch option */
316 } swtch;
317 int misc; /* catch-all for other state */
318 } saved;
1c79356b 319
9bccf70c
A
320 /* IPC data structures */
321 struct ipc_kmsg_queue ith_messages;
322 mach_port_t ith_mig_reply; /* reply port for mig */
323 mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
1c79356b
A
324
325 /* Ast/Halt data structures */
9bccf70c
A
326 boolean_t active; /* thread is active */
327 vm_offset_t recover; /* page fault recover(copyin/out) */
328 int ref_count; /* number of references to me */
1c79356b 329
9bccf70c
A
330 /* Processor set info */
331 queue_chain_t pset_threads; /* list of all shuttles in pset */
1c79356b 332#if MACH_HOST
9bccf70c
A
333 boolean_t may_assign; /* may assignment change? */
334 boolean_t assign_active; /* waiting for may_assign */
1c79356b
A
335#endif /* MACH_HOST */
336
0b4e3aa0 337/* BEGIN TRACING/DEBUG */
1c79356b 338
1c79356b 339#if MACH_LOCK_MON
0b4e3aa0 340 unsigned lock_stack; /* number of locks held */
1c79356b 341#endif /* MACH_LOCK_MON */
1c79356b 342
0b4e3aa0
A
343#if ETAP_EVENT_MONITOR
344 int etap_reason; /* real reason why we blocked */
345 boolean_t etap_trace; /* ETAP trace status */
346#endif /* ETAP_EVENT_MONITOR */
347
1c79356b
A
348#if MACH_LDEBUG
349 /*
350 * Debugging: track acquired mutexes and locks.
351 * Because a thread can block while holding such
352 * synchronizers, we think of the thread as
353 * "owning" them.
354 */
355#define MUTEX_STACK_DEPTH 20
356#define LOCK_STACK_DEPTH 20
0b4e3aa0
A
357 mutex_t *mutex_stack[MUTEX_STACK_DEPTH];
358 lock_t *lock_stack[LOCK_STACK_DEPTH];
359 unsigned int mutex_stack_index;
360 unsigned int lock_stack_index;
361 unsigned mutex_count; /* XXX to be deleted XXX */
1c79356b 362#endif /* MACH_LDEBUG */
0b4e3aa0 363/* END TRACING/DEBUG */
1c79356b 364
0b4e3aa0 365};
1c79356b 366
1c79356b
A
367#define ith_state saved.receive.state
368#define ith_object saved.receive.object
369#define ith_msg saved.receive.msg
370#define ith_msize saved.receive.msize
371#define ith_option saved.receive.option
372#define ith_scatter_list_size saved.receive.slist_size
373#define ith_continuation saved.receive.continuation
374#define ith_kmsg saved.receive.kmsg
375#define ith_seqno saved.receive.seqno
376
377#define sth_waitsemaphore saved.sema.waitsemaphore
378#define sth_signalsemaphore saved.sema.signalsemaphore
379#define sth_options saved.sema.options
380#define sth_result saved.sema.result
381#define sth_continuation saved.sema.continuation
382
0b4e3aa0
A
383struct funnel_lock {
384 int fnl_type; /* funnel type */
385 mutex_t *fnl_mutex; /* underlying mutex for the funnel */
386 void * fnl_mtxholder; /* thread (last)holdng mutex */
387 void * fnl_mtxrelease; /* thread (last)releasing mutex */
388 mutex_t *fnl_oldmutex; /* Mutex before collapsing split funnel */
389};
390
391typedef struct funnel_lock funnel_t;
392
1c79356b
A
393extern thread_act_t active_kloaded[NCPUS]; /* "" kernel-loaded acts */
394extern vm_offset_t active_stacks[NCPUS]; /* active kernel stacks */
395extern vm_offset_t kernel_stack[NCPUS];
396
9bccf70c
A
397extern struct thread_shuttle pageout_thread;
398
1c79356b
A
399#ifndef MACHINE_STACK_STASH
400/*
401 * MD Macro to fill up global stack state,
402 * keeping the MD structure sizes + games private
403 */
404#define MACHINE_STACK_STASH(stack) \
405MACRO_BEGIN \
406 mp_disable_preemption(); \
407 active_stacks[cpu_number()] = (stack); \
408 kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \
409 mp_enable_preemption(); \
410MACRO_END
411#endif /* MACHINE_STACK_STASH */
412
413/*
414 * Kernel-only routines
415 */
416
417/* Initialize thread module */
418extern void thread_init(void);
419
420/* Take reference on thread (make sure it doesn't go away) */
421extern void thread_reference(
422 thread_t thread);
423
424/* Release reference on thread */
425extern void thread_deallocate(
426 thread_t thread);
427
0b4e3aa0
A
428/* Set task priority of member thread */
429extern void thread_task_priority(
430 thread_t thread,
431 integer_t priority,
432 integer_t max_priority);
1c79356b
A
433
434/* Start a thread at specified routine */
435#define thread_start(thread, start) \
436 (thread)->continuation = (start)
437
1c79356b 438/* Reaps threads waiting to be destroyed */
9bccf70c 439extern void thread_reaper_init(void);
1c79356b 440
1c79356b
A
441
442/* Insure thread always has a kernel stack */
443extern void stack_privilege(
444 thread_t thread);
445
446extern void consider_thread_collect(void);
447
448/*
449 * Arguments to specify aggressiveness to thread halt.
450 * Can't have MUST_HALT and SAFELY at the same time.
451 */
452#define THREAD_HALT_NORMAL 0
453#define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */
454#define THREAD_HALT_SAFELY 2 /* result must be restartable */
455
456/*
457 * Macro-defined routines
458 */
459
9bccf70c 460#define thread_pcb(th) ((th)->pcb)
1c79356b 461
9bccf70c
A
462#define thread_lock_init(th) simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK)
463#define thread_lock(th) simple_lock(&(th)->lock)
464#define thread_unlock(th) simple_unlock(&(th)->lock)
465#define thread_lock_try(th) simple_lock_try(&(th)->lock)
1c79356b
A
466
467#define thread_should_halt_fast(thread) \
9bccf70c 468 (!(thread)->top_act || !(thread)->top_act->active)
1c79356b
A
469
470#define thread_should_halt(thread) thread_should_halt_fast(thread)
471
9bccf70c 472#define thread_reference_locked(thread) ((thread)->ref_count++)
1c79356b
A
473
474/*
475 * Lock to cover wake_active only; like thread_lock(), is taken
476 * at splsched(). Used to avoid calling into scheduler with a
477 * thread_lock() held. Precedes thread_lock() (and other scheduling-
478 * related locks) in the system lock ordering.
479 */
480#define wake_lock_init(th) \
481 simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
482#define wake_lock(th) simple_lock(&(th)->wake_lock)
483#define wake_unlock(th) simple_unlock(&(th)->wake_lock)
9bccf70c 484#define wake_lock_try(th) simple_lock_try(&(th)->wake_lock)
1c79356b
A
485
486static __inline__ vm_offset_t current_stack(void);
487static __inline__ vm_offset_t
488current_stack(void)
489{
490 vm_offset_t ret;
491
492 mp_disable_preemption();
493 ret = active_stacks[cpu_number()];
494 mp_enable_preemption();
495 return ret;
496}
497
1c79356b
A
498extern void pcb_module_init(void);
499
500extern void pcb_init(
501 thread_act_t thr_act);
502
503extern void pcb_terminate(
504 thread_act_t thr_act);
505
506extern void pcb_collect(
507 thread_act_t thr_act);
508
509extern void pcb_user_to_kernel(
510 thread_act_t thr_act);
511
512extern kern_return_t thread_setstatus(
513 thread_act_t thr_act,
514 int flavor,
515 thread_state_t tstate,
516 mach_msg_type_number_t count);
517
518extern kern_return_t thread_getstatus(
519 thread_act_t thr_act,
520 int flavor,
521 thread_state_t tstate,
522 mach_msg_type_number_t *count);
523
524extern boolean_t stack_alloc_try(
525 thread_t thread,
526 void (*start_pos)(thread_t));
527
528/* This routine now used only internally */
529extern kern_return_t thread_info_shuttle(
530 thread_act_t thr_act,
531 thread_flavor_t flavor,
532 thread_info_t thread_info_out,
533 mach_msg_type_number_t *thread_info_count);
534
1c79356b
A
535/* Machine-dependent routines */
536extern void thread_machine_init(void);
537
538extern void thread_machine_set_current(
539 thread_t thread );
540
541extern kern_return_t thread_machine_create(
542 thread_t thread,
543 thread_act_t thr_act,
544 void (*start_pos)(thread_t));
545
546extern void thread_set_syscall_return(
547 thread_t thread,
548 kern_return_t retval);
549
550extern void thread_machine_destroy(
551 thread_t thread );
552
553extern void thread_machine_flush(
554 thread_act_t thr_act);
555
556extern thread_t kernel_thread_with_priority(
557 task_t task,
558 integer_t priority,
559 void (*start)(void),
0b4e3aa0 560 boolean_t alloc_stack,
1c79356b
A
561 boolean_t start_running);
562
9bccf70c
A
563extern void thread_terminate_self(void);
564
1c79356b
A
565extern void funnel_lock(funnel_t *);
566
567extern void funnel_unlock(funnel_t *);
568
9bccf70c 569#else /* MACH_KERNEL_PRIVATE */
1c79356b 570
0b4e3aa0 571typedef struct funnel_lock funnel_t;
1c79356b
A
572
573extern boolean_t thread_should_halt(thread_t);
574
9bccf70c 575#endif /* MACH_KERNEL_PRIVATE */
1c79356b
A
576
577extern thread_t kernel_thread(
9bccf70c
A
578 task_t task,
579 void (*start)(void));
1c79356b 580
9bccf70c
A
581extern void thread_set_cont_arg(int);
582
583extern int thread_get_cont_arg(void);
584
585/* JMM - These are only temporary */
586extern boolean_t is_thread_running(thread_act_t); /* True is TH_RUN */
587extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */
588extern kern_return_t get_thread_waitresult(thread_t);
589
590#endif /* __APPLE_API_PRIVATE */
591
592#ifdef __APPLE_API_EVOLVING
593
594#define THR_FUNNEL_NULL (funnel_t *)0
1c79356b
A
595
596extern funnel_t * funnel_alloc(int);
597
598extern funnel_t * thread_funnel_get(void);
599
600extern boolean_t thread_funnel_set(funnel_t * fnl, boolean_t funneled);
601
602extern boolean_t thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl);
603
9bccf70c 604#endif /* __APPLE_API_EVOLVING */
1c79356b
A
605
606#endif /* _KERN_THREAD_H_ */