]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.h
xnu-517.tar.gz
[apple/xnu.git] / osfmk / kern / thread.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_FREE_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: thread.h
57 * Author: Avadis Tevanian, Jr.
58 *
59 * This file contains the structure definitions for threads.
60 *
61 */
62 /*
63 * Copyright (c) 1993 The University of Utah and
64 * the Computer Systems Laboratory (CSL). All rights reserved.
65 *
66 * Permission to use, copy, modify and distribute this software and its
67 * documentation is hereby granted, provided that both the copyright
68 * notice and this permission notice appear in all copies of the
69 * software, derivative works or modified versions, and any portions
70 * thereof, and that both notices appear in supporting documentation.
71 *
72 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
73 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
74 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
75 *
76 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
77 * improvements that they make and grant CSL redistribution rights.
78 *
79 */
80
81 #ifndef _KERN_THREAD_H_
82 #define _KERN_THREAD_H_
83
84 #include <mach/kern_return.h>
85 #include <mach/mach_types.h>
86 #include <mach/message.h>
87 #include <mach/boolean.h>
88 #include <mach/vm_param.h>
89 #include <mach/thread_info.h>
90 #include <mach/thread_status.h>
91 #include <mach/exception_types.h>
92
93 #include <kern/cpu_data.h> /* for current_thread */
94 #include <kern/kern_types.h>
95
96 #include <ipc/ipc_types.h>
97
98 /*
99 * Logically, a thread of control consists of two parts:
100 *
101 * + A thread_shuttle, which may migrate due to resource contention
102 *
103 * + A thread_activation, which remains attached to a task.
104 *
105 * The thread_shuttle contains scheduling info, accounting info,
106 * and links to the thread_activation within which the shuttle is
107 * currently operating.
108 *
109 * An activation always has a valid task pointer, and it is always constant.
110 * The activation is only linked onto the task's activation list until
111 * the activation is terminated.
112 *
113 * The thread holds a reference on the activation while using it.
114 */
115
116 #include <sys/appleapiopts.h>
117
118 #ifdef __APPLE_API_PRIVATE
119
120 #ifdef MACH_KERNEL_PRIVATE
121
122 #include <cpus.h>
123 #include <cputypes.h>
124
125 #include <mach_assert.h>
126 #include <mach_host.h>
127 #include <mach_prof.h>
128 #include <mach_lock_mon.h>
129 #include <mach_ldebug.h>
130
131 #include <mach/port.h>
132 #include <kern/ast.h>
133 #include <kern/cpu_number.h>
134 #include <kern/queue.h>
135 #include <kern/time_out.h>
136 #include <kern/timer.h>
137 #include <kern/lock.h>
138 #include <kern/sched.h>
139 #include <kern/sched_prim.h>
140 #include <kern/thread_call.h>
141 #include <kern/timer_call.h>
142 #include <kern/task.h>
143 #include <kern/exception.h>
144 #include <kern/etap_macros.h>
145 #include <ipc/ipc_kmsg.h>
146 #include <ipc/ipc_port.h>
147
148 #include <machine/thread.h>
149 #include <machine/thread_act.h>
150
151 struct thread {
152 /*
153 * NOTE: The runq field in the thread structure has an unusual
154 * locking protocol. If its value is RUN_QUEUE_NULL, then it is
155 * locked by the thread_lock, but if its value is something else
156 * (i.e. a run_queue) then it is locked by that run_queue's lock.
157 *
158 * Beginning of thread_shuttle proper. When the thread is on
159 * a wait queue, these first three fields are treated as an un-
160 * official union with a wait_queue_element. If you change
161 * these, you must change that definition as well (wait_queue.h).
162 */
163 /* Items examined often, modified infrequently */
164 queue_chain_t links; /* run/wait queue links */
165 run_queue_t runq; /* run queue thread is on SEE BELOW */
166 wait_queue_t wait_queue; /* wait queue we are currently on */
167 event64_t wait_event; /* wait queue event */
168 thread_act_t top_act; /* "current" thr_act */
169 uint32_t /* Only set by thread itself */
170 interrupt_level:2, /* interrupts/aborts allowed */
171 vm_privilege:1, /* can use reserved memory? */
172 active_callout:1, /* an active callout */
173 :0;
174
175
176 /* Data updated during assert_wait/thread_wakeup */
177 decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */
178 decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/
179 boolean_t wake_active; /* Someone is waiting for this */
180 int at_safe_point; /* thread_abort_safely allowed */
181 ast_t reason; /* why we blocked */
182 wait_result_t wait_result; /* outcome of wait -
183 * may be examined by this thread
184 * WITHOUT locking */
185 thread_roust_t roust; /* routine to roust it after wait */
186 thread_continue_t continuation; /* resume here next dispatch */
187
188 /* Data updated/used in thread_invoke */
189 struct funnel_lock *funnel_lock; /* Non-reentrancy funnel */
190 int funnel_state;
191 #define TH_FN_OWNED 0x1 /* we own the funnel */
192 #define TH_FN_REFUNNEL 0x2 /* re-acquire funnel on dispatch */
193
194 vm_offset_t kernel_stack; /* current kernel stack */
195 vm_offset_t reserved_stack; /* reserved kernel stack */
196
197 /* Thread state: */
198 int state;
199 /*
200 * Thread states [bits or'ed]
201 */
202 #define TH_WAIT 0x01 /* queued for waiting */
203 #define TH_SUSP 0x02 /* stopped or requested to stop */
204 #define TH_RUN 0x04 /* running or on runq */
205 #define TH_UNINT 0x08 /* waiting uninteruptibly */
206 #define TH_TERMINATE 0x10 /* halted at termination */
207
208 #define TH_ABORT 0x20 /* abort interruptible waits */
209 #define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */
210
211 #define TH_IDLE 0x80 /* processor idle thread */
212
213 #define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
214
215 #define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */
216 #define TH_STACK_ALLOC 0x0200 /* waiting for stack allocation */
217 #define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_ALLOC)
218
219 /* Scheduling information */
220 integer_t sched_mode; /* scheduling mode bits */
221 #define TH_MODE_REALTIME 0x0001 /* time constraints supplied */
222 #define TH_MODE_TIMESHARE 0x0002 /* use timesharing algorithm */
223 #define TH_MODE_PREEMPT 0x0004 /* can preempt kernel contexts */
224 #define TH_MODE_FAILSAFE 0x0008 /* fail-safe has tripped */
225 #define TH_MODE_PROMOTED 0x0010 /* sched pri has been promoted */
226 #define TH_MODE_FORCEDPREEMPT 0x0020 /* force setting of mode PREEMPT */
227 #define TH_MODE_DEPRESS 0x0040 /* normal depress yield */
228 #define TH_MODE_POLLDEPRESS 0x0080 /* polled depress yield */
229 #define TH_MODE_ISDEPRESSED (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
230
231 integer_t sched_pri; /* scheduled (current) priority */
232 integer_t priority; /* base priority */
233 integer_t max_priority; /* max base priority */
234 integer_t task_priority; /* copy of task base priority */
235
236 integer_t promotions; /* level of promotion */
237 integer_t pending_promoter_index;
238 void *pending_promoter[2];
239
240 integer_t importance; /* task-relative importance */
241
242 /* real-time parameters */
243 struct { /* see mach/thread_policy.h */
244 uint32_t period;
245 uint32_t computation;
246 uint32_t constraint;
247 boolean_t preemptible;
248
249 uint64_t deadline;
250 } realtime;
251
252 uint32_t current_quantum; /* duration of current quantum */
253
254 /* Data used during setrun/dispatch */
255 timer_data_t system_timer; /* system mode timer */
256 processor_set_t processor_set; /* assigned processor set */
257 processor_t bound_processor; /* bound to a processor? */
258 processor_t last_processor; /* processor last dispatched on */
259 uint64_t last_switch; /* time of last context switch */
260
261 /* Fail-safe computation since last unblock or qualifying yield */
262 uint64_t computation_metered;
263 uint64_t computation_epoch;
264 integer_t safe_mode; /* saved mode during fail-safe */
265 natural_t safe_release; /* when to release fail-safe */
266
267 /* Statistics and timesharing calculations */
268 natural_t sched_stamp; /* when priority was updated */
269 natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */
270 natural_t cpu_delta; /* cpu usage since last update */
271 natural_t sched_usage; /* load-weighted cpu usage [sched] */
272 natural_t sched_delta; /* weighted cpu usage since update */
273 natural_t sleep_stamp; /* when entered TH_WAIT state */
274
275 /* Timing data structures */
276 timer_data_t user_timer; /* user mode timer */
277 timer_save_data_t system_timer_save; /* saved system timer value */
278 timer_save_data_t user_timer_save; /* saved user timer value */
279
280 /* Timed wait expiration */
281 timer_call_data_t wait_timer;
282 integer_t wait_timer_active;
283 boolean_t wait_timer_is_set;
284
285 /* Priority depression expiration */
286 timer_call_data_t depress_timer;
287 integer_t depress_timer_active;
288
289 /* Various bits of stashed state */
290 union {
291 struct {
292 mach_msg_return_t state; /* receive state */
293 ipc_object_t object; /* object received on */
294 mach_msg_header_t *msg; /* receive buffer pointer */
295 mach_msg_size_t msize; /* max size for recvd msg */
296 mach_msg_option_t option; /* options for receive */
297 mach_msg_size_t slist_size; /* scatter list size */
298 struct ipc_kmsg *kmsg; /* received message */
299 mach_port_seqno_t seqno; /* seqno of recvd message */
300 mach_msg_continue_t continuation;
301 } receive;
302 struct {
303 struct semaphore *waitsemaphore; /* semaphore ref */
304 struct semaphore *signalsemaphore; /* semaphore ref */
305 int options; /* semaphore options */
306 kern_return_t result; /* primary result */
307 mach_msg_continue_t continuation;
308 } sema;
309 struct {
310 int option; /* switch option */
311 } swtch;
312 int misc; /* catch-all for other state */
313 } saved;
314
315 /* IPC data structures */
316 struct ipc_kmsg_queue ith_messages;
317 mach_port_t ith_mig_reply; /* reply port for mig */
318 mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
319
320 /* Ast/Halt data structures */
321 vm_offset_t recover; /* page fault recover(copyin/out) */
322 int ref_count; /* number of references to me */
323
324 /* Processor set info */
325 queue_chain_t pset_threads; /* list of all threads in pset */
326 #if MACH_HOST
327 boolean_t may_assign; /* may assignment change? */
328 boolean_t assign_active; /* waiting for may_assign */
329 #endif /* MACH_HOST */
330
331 /* Activation */
332 queue_chain_t task_threads;
333
334 /*** Machine-dependent state ***/
335 struct MachineThrAct mact;
336
337 /* Task membership */
338 struct task *task;
339 vm_map_t map;
340
341 decl_mutex_data(,lock)
342 int act_ref_count;
343
344 /* Associated shuttle */
345 struct thread *thread;
346
347 /*
348 * Next higher and next lower activation on
349 * the thread's activation stack.
350 */
351 struct thread *higher, *lower;
352
353 /* Kernel holds on this thread */
354 int suspend_count;
355
356 /* User level suspensions */
357 int user_stop_count;
358
359 /* Pending thread ast(s) */
360 ast_t ast;
361
362 /* Miscellaneous bits guarded by lock mutex */
363 uint32_t
364 /* Indicates that the thread has not been terminated */
365 active:1,
366
367 /* Indicates that the thread has been started after creation */
368 started:1,
369 :0;
370
371 /* Return Handers */
372 struct ReturnHandler {
373 struct ReturnHandler *next;
374 void (*handler)(
375 struct ReturnHandler *rh,
376 struct thread *act);
377 } *handlers, special_handler;
378
379 /* Ports associated with this thread */
380 struct ipc_port *ith_self; /* not a right, doesn't hold ref */
381 struct ipc_port *ith_sself; /* a send right */
382 struct exception_action exc_actions[EXC_TYPES_COUNT];
383
384 /* Owned ulocks (a lock set element) */
385 queue_head_t held_ulocks;
386
387 #if MACH_PROF
388 /* Profiling */
389 boolean_t profiled;
390 boolean_t profiled_own;
391 struct prof_data *profil_buffer;
392 #endif /* MACH_PROF */
393
394 #ifdef MACH_BSD
395 void *uthread;
396 #endif
397
398 /* BEGIN TRACING/DEBUG */
399
400 #if MACH_LOCK_MON
401 unsigned lock_stack; /* number of locks held */
402 #endif /* MACH_LOCK_MON */
403
404 #if ETAP_EVENT_MONITOR
405 int etap_reason; /* real reason why we blocked */
406 boolean_t etap_trace; /* ETAP trace status */
407 #endif /* ETAP_EVENT_MONITOR */
408
409 #if MACH_LDEBUG
410 /*
411 * Debugging: track acquired mutexes and locks.
412 * Because a thread can block while holding such
413 * synchronizers, we think of the thread as
414 * "owning" them.
415 */
416 #define MUTEX_STACK_DEPTH 20
417 #define LOCK_STACK_DEPTH 20
418 mutex_t *mutex_stack[MUTEX_STACK_DEPTH];
419 lock_t *lock_stack[LOCK_STACK_DEPTH];
420 unsigned int mutex_stack_index;
421 unsigned int lock_stack_index;
422 unsigned mutex_count; /* XXX to be deleted XXX */
423 #endif /* MACH_LDEBUG */
424 /* END TRACING/DEBUG */
425
426 };
427
428 #define ith_state saved.receive.state
429 #define ith_object saved.receive.object
430 #define ith_msg saved.receive.msg
431 #define ith_msize saved.receive.msize
432 #define ith_option saved.receive.option
433 #define ith_scatter_list_size saved.receive.slist_size
434 #define ith_continuation saved.receive.continuation
435 #define ith_kmsg saved.receive.kmsg
436 #define ith_seqno saved.receive.seqno
437
438 #define sth_waitsemaphore saved.sema.waitsemaphore
439 #define sth_signalsemaphore saved.sema.signalsemaphore
440 #define sth_options saved.sema.options
441 #define sth_result saved.sema.result
442 #define sth_continuation saved.sema.continuation
443
444 extern void thread_bootstrap(void);
445
446 extern void thread_init(void);
447
448 extern void thread_reaper_init(void);
449
450 extern void thread_reference(
451 thread_t thread);
452
453 extern void thread_deallocate(
454 thread_t thread);
455
456 extern void thread_terminate_self(void);
457
458 extern void thread_hold(
459 thread_act_t thread);
460
461 extern void thread_release(
462 thread_act_t thread);
463
464 #define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, ETAP_THREAD_LOCK)
465 #define thread_lock(th) simple_lock(&(th)->sched_lock)
466 #define thread_unlock(th) simple_unlock(&(th)->sched_lock)
467 #define thread_lock_try(th) simple_lock_try(&(th)->sched_lock)
468
469 #define thread_should_halt_fast(thread) \
470 (!(thread)->top_act || !(thread)->top_act->active)
471
472 #define thread_reference_locked(thread) ((thread)->ref_count++)
473
474 #define wake_lock_init(th) \
475 simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
476 #define wake_lock(th) simple_lock(&(th)->wake_lock)
477 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
478 #define wake_lock_try(th) simple_lock_try(&(th)->wake_lock)
479
480 extern vm_offset_t stack_alloc(
481 thread_t thread,
482 void (*start)(thread_t));
483
484 extern boolean_t stack_alloc_try(
485 thread_t thread,
486 void (*start)(thread_t));
487
488 extern void stack_free(
489 thread_t thread);
490
491 extern void stack_free_stack(
492 vm_offset_t stack);
493
494 extern void stack_collect(void);
495
496 extern kern_return_t thread_setstatus(
497 thread_act_t thread,
498 int flavor,
499 thread_state_t tstate,
500 mach_msg_type_number_t count);
501
502 extern kern_return_t thread_getstatus(
503 thread_act_t thread,
504 int flavor,
505 thread_state_t tstate,
506 mach_msg_type_number_t *count);
507
508 extern kern_return_t thread_info_shuttle(
509 thread_act_t thread,
510 thread_flavor_t flavor,
511 thread_info_t thread_info_out,
512 mach_msg_type_number_t *thread_info_count);
513
514 extern void thread_task_priority(
515 thread_t thread,
516 integer_t priority,
517 integer_t max_priority);
518
519 extern kern_return_t thread_get_special_port(
520 thread_act_t thread,
521 int which,
522 ipc_port_t *port);
523
524 extern kern_return_t thread_set_special_port(
525 thread_act_t thread,
526 int which,
527 ipc_port_t port);
528
529 extern thread_act_t switch_act(
530 thread_act_t act);
531
532 extern thread_t kernel_thread_create(
533 void (*start)(void),
534 integer_t priority);
535
536 extern thread_t kernel_thread_with_priority(
537 void (*start)(void),
538 integer_t priority);
539
540 extern void machine_stack_attach(
541 thread_t thread,
542 vm_offset_t stack,
543 void (*start)(thread_t));
544
545 extern vm_offset_t machine_stack_detach(
546 thread_t thread);
547
548 extern void machine_stack_handoff(
549 thread_t old,
550 thread_t new);
551
552 extern thread_t machine_switch_context(
553 thread_t old_thread,
554 thread_continue_t continuation,
555 thread_t new_thread);
556
557 extern void machine_load_context(
558 thread_t thread);
559
560 extern void machine_switch_act(
561 thread_t thread,
562 thread_act_t old,
563 thread_act_t new);
564
565 extern kern_return_t machine_thread_set_state(
566 thread_act_t act,
567 thread_flavor_t flavor,
568 thread_state_t state,
569 mach_msg_type_number_t count);
570
571 extern kern_return_t machine_thread_get_state(
572 thread_act_t act,
573 thread_flavor_t flavor,
574 thread_state_t state,
575 mach_msg_type_number_t *count);
576
577 extern kern_return_t machine_thread_dup(
578 thread_act_t self,
579 thread_act_t target);
580
581 extern void machine_thread_init(void);
582
583 extern kern_return_t machine_thread_create(
584 thread_t thread,
585 task_t task);
586
587 extern void machine_thread_destroy(
588 thread_t thread);
589
590 extern void machine_thread_set_current(
591 thread_t thread);
592
593 extern void machine_thread_terminate_self(void);
594
595 /*
596 * XXX Funnel locks XXX
597 */
598
599 struct funnel_lock {
600 int fnl_type; /* funnel type */
601 mutex_t *fnl_mutex; /* underlying mutex for the funnel */
602 void * fnl_mtxholder; /* thread (last)holdng mutex */
603 void * fnl_mtxrelease; /* thread (last)releasing mutex */
604 mutex_t *fnl_oldmutex; /* Mutex before collapsing split funnel */
605 };
606
607 typedef struct funnel_lock funnel_t;
608
609 extern void funnel_lock(
610 funnel_t *lock);
611
612 extern void funnel_unlock(
613 funnel_t *lock);
614
615 typedef struct ReturnHandler ReturnHandler;
616
617 #define act_lock(act) mutex_lock(&(act)->lock)
618 #define act_lock_try(act) mutex_try(&(act)->lock)
619 #define act_unlock(act) mutex_unlock(&(act)->lock)
620
621 #define act_reference_locked(act) \
622 MACRO_BEGIN \
623 (act)->act_ref_count++; \
624 MACRO_END
625
626 #define act_deallocate_locked(act) \
627 MACRO_BEGIN \
628 if (--(act)->act_ref_count == 0) \
629 panic("act_deallocate_locked"); \
630 MACRO_END
631
632 extern void act_reference(
633 thread_act_t act);
634
635 extern void act_deallocate(
636 thread_act_t act);
637
638 extern void act_attach(
639 thread_act_t act,
640 thread_t thread);
641
642 extern void act_detach(
643 thread_act_t act);
644
645 extern thread_t act_lock_thread(
646 thread_act_t act);
647
648 extern void act_unlock_thread(
649 thread_act_t act);
650
651 extern thread_act_t thread_lock_act(
652 thread_t thread);
653
654 extern void thread_unlock_act(
655 thread_t thread);
656
657 extern void act_execute_returnhandlers(void);
658
659 extern void install_special_handler(
660 thread_act_t thread);
661
662 extern void special_handler(
663 ReturnHandler *rh,
664 thread_act_t act);
665
666 #else /* MACH_KERNEL_PRIVATE */
667
668 typedef struct funnel_lock funnel_t;
669
670 extern boolean_t thread_should_halt(
671 thread_t thread);
672
673 extern void act_reference(
674 thread_act_t act);
675
676 extern void act_deallocate(
677 thread_act_t act);
678
679 #endif /* MACH_KERNEL_PRIVATE */
680
681 extern thread_t kernel_thread(
682 task_t task,
683 void (*start)(void));
684
685 extern void thread_set_cont_arg(
686 int arg);
687
688 extern int thread_get_cont_arg(void);
689
690 /* JMM - These are only temporary */
691 extern boolean_t is_thread_running(thread_act_t); /* True is TH_RUN */
692 extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */
693 extern kern_return_t get_thread_waitresult(thread_t);
694
695 typedef void (thread_apc_handler_t)(thread_act_t);
696
697 extern kern_return_t thread_apc_set(thread_act_t, thread_apc_handler_t);
698 extern kern_return_t thread_apc_clear(thread_act_t, thread_apc_handler_t);
699
700 extern vm_map_t swap_act_map(thread_act_t, vm_map_t);
701
702 extern void *get_bsdthread_info(thread_act_t);
703 extern void set_bsdthread_info(thread_act_t, void *);
704 extern task_t get_threadtask(thread_act_t);
705
706 #endif /* __APPLE_API_PRIVATE */
707
708 #ifdef __APPLE_API_UNSTABLE
709
710 #if !defined(MACH_KERNEL_PRIVATE)
711
712 extern thread_act_t current_act(void);
713
714 #endif /* MACH_KERNEL_PRIVATE */
715
716 #endif /* __APPLE_API_UNSTABLE */
717
718 #ifdef __APPLE_API_EVOLVING
719
720 /*
721 * XXX Funnel locks XXX
722 */
723
724 #define THR_FUNNEL_NULL (funnel_t *)0
725
726 extern funnel_t *funnel_alloc(
727 int type);
728
729 extern funnel_t *thread_funnel_get(void);
730
731 extern boolean_t thread_funnel_set(
732 funnel_t *lock,
733 boolean_t funneled);
734
735 extern boolean_t thread_funnel_merge(
736 funnel_t *lock,
737 funnel_t *other);
738
739 #endif /* __APPLE_API_EVOLVING */
740
741 #ifdef __APPLE_API_PRIVATE
742
743 extern boolean_t refunnel_hint(
744 thread_t thread,
745 wait_result_t wresult);
746
747 /* For use by CHUD */
748 vm_offset_t min_valid_stack_address(void);
749 vm_offset_t max_valid_stack_address(void);
750
751 #endif /* __APPLE_API_PRIVATE */
752
753 #endif /* _KERN_THREAD_H_ */