2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
56 * This file contains the structure definitions for threads.
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
78 #ifndef _KERN_THREAD_H_
79 #define _KERN_THREAD_H_
81 #include <mach/kern_return.h>
82 #include <mach/mach_types.h>
83 #include <mach/message.h>
84 #include <mach/boolean.h>
85 #include <mach/vm_types.h>
86 #include <mach/vm_prot.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_status.h>
90 #include <kern/cpu_data.h> /* for current_thread */
91 #include <kern/kern_types.h>
93 #include <ipc/ipc_types.h>
96 * Logically, a thread of control consists of two parts:
98 * a thread_shuttle, which may migrate due to resource contention
100 * a thread_activation, which remains attached to a task.
102 * The thread_shuttle contains scheduling info, accounting info,
103 * and links to the thread_activation within which the shuttle is
104 * currently operating.
106 * It might make sense to have the thread_shuttle be a proper sub-structure
107 * of the thread, with the thread containing links to both the shuttle and
108 * activation. In order to reduce the scope and complexity of source
109 * changes and the overhead of maintaining these linkages, we have subsumed
110 * the shuttle into the thread, calling it a thread_shuttle.
112 * User accesses to threads always come in via the user's thread port,
113 * which gets translated to a pointer to the target thread_activation.
115 #include <sys/appleapiopts.h>
117 #ifdef __APPLE_API_PRIVATE
119 #ifdef MACH_KERNEL_PRIVATE
122 #include <hw_footprint.h>
123 #include <mach_host.h>
124 #include <mach_prof.h>
125 #include <mach_lock_mon.h>
126 #include <mach_ldebug.h>
128 #include <mach/port.h>
129 #include <kern/ast.h>
130 #include <kern/cpu_number.h>
131 #include <kern/queue.h>
132 #include <kern/time_out.h>
133 #include <kern/timer.h>
134 #include <kern/lock.h>
135 #include <kern/sched.h>
136 #include <kern/sched_prim.h>
137 #include <kern/thread_call.h>
138 #include <kern/timer_call.h>
139 #include <kern/task.h>
140 #include <ipc/ipc_kmsg.h>
141 #include <machine/thread.h>
144 * Kernel accesses intended to effect the entire thread, typically use
145 * a pointer to the thread_shuttle (current_thread()) as the target of
146 * their operations. This makes sense given that we have subsumed the
147 * shuttle into the thread_shuttle, eliminating one set of linkages.
148 * Operations effecting only the shuttle may use a thread_shuttle_t
151 * The current_act() macro returns a pointer to the current thread_act, while
152 * the current_thread() macro returns a pointer to the currently active
153 * thread_shuttle (representing the thread in its entirety).
155 struct thread_shuttle
{
157 * NOTE: The runq field in the thread structure has an unusual
158 * locking protocol. If its value is RUN_QUEUE_NULL, then it is
159 * locked by the thread_lock, but if its value is something else
160 * (i.e. a run_queue) then it is locked by that run_queue's lock.
162 * Beginning of thread_shuttle proper. When the thread is on
163 * a wait queue, these first three fields are treated as an un-
164 * official union with a wait_queue_element. If you change
165 * these, you must change that definition as well (wait_queue.h).
167 /* Items examined often, modified infrequently */
168 queue_chain_t links
; /* run/wait queue links */
169 run_queue_t runq
; /* run queue thread is on SEE BELOW */
170 wait_queue_t wait_queue
; /* wait queue we are currently on */
171 event64_t wait_event
; /* wait queue event */
172 thread_act_t top_act
; /* "current" thr_act */
173 uint32_t /* Only set by thread itself */
174 interrupt_level
:2, /* interrupts/aborts allowed */
175 vm_privilege
:1, /* can use reserved memory? */
176 active_callout
:1, /* an active callout */
180 /* Data updated during assert_wait/thread_wakeup */
181 decl_simple_lock_data(,lock
) /* scheduling lock (thread_lock()) */
182 decl_simple_lock_data(,wake_lock
) /* covers wake_active (wake_lock())*/
183 boolean_t wake_active
; /* Someone is waiting for this */
184 int at_safe_point
; /* thread_abort_safely allowed */
185 ast_t reason
; /* why we blocked */
186 wait_result_t wait_result
; /* outcome of wait -
187 * may be examined by this thread
189 thread_roust_t roust
; /* routine to roust it after wait */
190 thread_continue_t continuation
; /* resume here next dispatch */
192 /* Data updated/used in thread_invoke */
193 struct funnel_lock
*funnel_lock
; /* Non-reentrancy funnel */
195 #define TH_FN_OWNED 0x1 /* we own the funnel */
196 #define TH_FN_REFUNNEL 0x2 /* re-acquire funnel on dispatch */
198 vm_offset_t kernel_stack
; /* current kernel stack */
199 vm_offset_t stack_privilege
; /* reserved kernel stack */
204 * Thread states [bits or'ed]
206 #define TH_WAIT 0x01 /* thread is queued for waiting */
207 #define TH_SUSP 0x02 /* thread has been asked to stop */
208 #define TH_RUN 0x04 /* thread is running or on runq */
209 #define TH_UNINT 0x08 /* thread is waiting uninteruptibly */
210 #define TH_TERMINATE 0x10 /* thread is halting at termination */
212 #define TH_ABORT 0x20 /* abort interruptible waits */
213 #define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */
215 #define TH_IDLE 0x80 /* thread is an idle thread */
217 #define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
219 #define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */
220 #define TH_STACK_ALLOC 0x0200 /* waiting for stack allocation */
221 #define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_ALLOC)
223 /* Scheduling information */
224 integer_t sched_mode
; /* scheduling mode bits */
225 #define TH_MODE_REALTIME 0x0001 /* time constraints supplied */
226 #define TH_MODE_TIMESHARE 0x0002 /* use timesharing algorithm */
227 #define TH_MODE_PREEMPT 0x0004 /* can preempt kernel contexts */
228 #define TH_MODE_FAILSAFE 0x0008 /* fail-safe has tripped */
229 #define TH_MODE_PROMOTED 0x0010 /* sched pri has been promoted */
230 #define TH_MODE_FORCEDPREEMPT 0x0020 /* force setting of mode PREEMPT */
231 #define TH_MODE_DEPRESS 0x0040 /* normal depress yield */
232 #define TH_MODE_POLLDEPRESS 0x0080 /* polled depress yield */
233 #define TH_MODE_ISDEPRESSED (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
235 integer_t sched_pri
; /* scheduled (current) priority */
236 integer_t priority
; /* base priority */
237 integer_t max_priority
; /* max base priority */
238 integer_t task_priority
; /* copy of task base priority */
240 integer_t promotions
; /* level of promotion */
241 integer_t pending_promoter_index
;
242 void *pending_promoter
[2];
244 integer_t importance
; /* task-relative importance */
246 /* time constraint parameters */
247 struct { /* see mach/thread_policy.h */
249 uint32_t computation
;
251 boolean_t preemptible
;
254 uint32_t current_quantum
; /* duration of current quantum */
256 /* Data used during setrun/dispatch */
257 timer_data_t system_timer
; /* system mode timer */
258 processor_set_t processor_set
; /* assigned processor set */
259 processor_t bound_processor
; /* bound to a processor? */
260 processor_t last_processor
; /* processor last dispatched on */
261 uint64_t last_switch
; /* time of last context switch */
263 /* Fail-safe computation since last unblock or qualifying yield */
264 uint64_t computation_metered
;
265 uint64_t computation_epoch
;
266 integer_t safe_mode
; /* saved mode during fail-safe */
267 natural_t safe_release
; /* when to release fail-safe */
269 /* Used in priority computations */
270 natural_t sched_stamp
; /* when priority was updated */
271 natural_t cpu_usage
; /* exp. decaying cpu usage [%cpu] */
272 natural_t cpu_delta
; /* cpu usage since last update */
273 natural_t sched_usage
; /* load-weighted cpu usage [sched] */
274 natural_t sched_delta
; /* weighted cpu usage since update */
275 natural_t sleep_stamp
; /* when entered TH_WAIT state */
277 /* Timing data structures */
278 timer_data_t user_timer
; /* user mode timer */
279 timer_save_data_t system_timer_save
; /* saved system timer value */
280 timer_save_data_t user_timer_save
; /* saved user timer value */
282 /* Timed wait expiration */
283 timer_call_data_t wait_timer
;
284 integer_t wait_timer_active
;
285 boolean_t wait_timer_is_set
;
287 /* Priority depression expiration */
288 timer_call_data_t depress_timer
;
289 integer_t depress_timer_active
;
291 /* Various bits of stashed state */
294 mach_msg_return_t state
; /* receive state */
295 ipc_object_t object
; /* object received on */
296 mach_msg_header_t
*msg
; /* receive buffer pointer */
297 mach_msg_size_t msize
; /* max size for recvd msg */
298 mach_msg_option_t option
; /* options for receive */
299 mach_msg_size_t slist_size
; /* scatter list size */
300 struct ipc_kmsg
*kmsg
; /* received message */
301 mach_port_seqno_t seqno
; /* seqno of recvd message */
302 mach_msg_continue_t continuation
;
305 struct semaphore
*waitsemaphore
; /* semaphore ref */
306 struct semaphore
*signalsemaphore
; /* semaphore ref */
307 int options
; /* semaphore options */
308 kern_return_t result
; /* primary result */
309 mach_msg_continue_t continuation
;
312 int option
; /* switch option */
314 int misc
; /* catch-all for other state */
317 /* IPC data structures */
318 struct ipc_kmsg_queue ith_messages
;
319 mach_port_t ith_mig_reply
; /* reply port for mig */
320 mach_port_t ith_rpc_reply
; /* reply port for kernel RPCs */
322 /* Ast/Halt data structures */
323 boolean_t active
; /* thread is active */
324 vm_offset_t recover
; /* page fault recover(copyin/out) */
325 int ref_count
; /* number of references to me */
327 /* Processor set info */
328 queue_chain_t pset_threads
; /* list of all shuttles in pset */
330 boolean_t may_assign
; /* may assignment change? */
331 boolean_t assign_active
; /* waiting for may_assign */
332 #endif /* MACH_HOST */
334 /* BEGIN TRACING/DEBUG */
337 unsigned lock_stack
; /* number of locks held */
338 #endif /* MACH_LOCK_MON */
340 #if ETAP_EVENT_MONITOR
341 int etap_reason
; /* real reason why we blocked */
342 boolean_t etap_trace
; /* ETAP trace status */
343 #endif /* ETAP_EVENT_MONITOR */
347 * Debugging: track acquired mutexes and locks.
348 * Because a thread can block while holding such
349 * synchronizers, we think of the thread as
352 #define MUTEX_STACK_DEPTH 20
353 #define LOCK_STACK_DEPTH 20
354 mutex_t
*mutex_stack
[MUTEX_STACK_DEPTH
];
355 lock_t
*lock_stack
[LOCK_STACK_DEPTH
];
356 unsigned int mutex_stack_index
;
357 unsigned int lock_stack_index
;
358 unsigned mutex_count
; /* XXX to be deleted XXX */
359 #endif /* MACH_LDEBUG */
360 /* END TRACING/DEBUG */
364 #define ith_state saved.receive.state
365 #define ith_object saved.receive.object
366 #define ith_msg saved.receive.msg
367 #define ith_msize saved.receive.msize
368 #define ith_option saved.receive.option
369 #define ith_scatter_list_size saved.receive.slist_size
370 #define ith_continuation saved.receive.continuation
371 #define ith_kmsg saved.receive.kmsg
372 #define ith_seqno saved.receive.seqno
374 #define sth_waitsemaphore saved.sema.waitsemaphore
375 #define sth_signalsemaphore saved.sema.signalsemaphore
376 #define sth_options saved.sema.options
377 #define sth_result saved.sema.result
378 #define sth_continuation saved.sema.continuation
381 int fnl_type
; /* funnel type */
382 mutex_t
*fnl_mutex
; /* underlying mutex for the funnel */
383 void * fnl_mtxholder
; /* thread (last)holdng mutex */
384 void * fnl_mtxrelease
; /* thread (last)releasing mutex */
385 mutex_t
*fnl_oldmutex
; /* Mutex before collapsing split funnel */
388 typedef struct funnel_lock funnel_t
;
390 extern thread_act_t active_kloaded
[NCPUS
]; /* "" kernel-loaded acts */
391 extern vm_offset_t active_stacks
[NCPUS
]; /* active kernel stacks */
392 extern vm_offset_t kernel_stack
[NCPUS
];
394 extern struct thread_shuttle pageout_thread
;
396 #ifndef MACHINE_STACK_STASH
398 * MD Macro to fill up global stack state,
399 * keeping the MD structure sizes + games private
401 #define MACHINE_STACK_STASH(stack) \
403 mp_disable_preemption(); \
404 active_stacks[cpu_number()] = (stack); \
405 kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \
406 mp_enable_preemption(); \
408 #endif /* MACHINE_STACK_STASH */
411 * Kernel-only routines
414 /* Initialize thread module */
415 extern void thread_init(void);
417 /* Take reference on thread (make sure it doesn't go away) */
418 extern void thread_reference(
421 /* Release reference on thread */
422 extern void thread_deallocate(
425 /* Set task priority of member thread */
426 extern void thread_task_priority(
429 integer_t max_priority
);
431 /* Start a thread at specified routine */
432 #define thread_start(thread, start) \
433 (thread)->continuation = (start)
435 /* Reaps threads waiting to be destroyed */
436 extern void thread_reaper_init(void);
439 /* Insure thread always has a kernel stack */
440 extern void stack_privilege(
443 extern void consider_thread_collect(void);
446 * Arguments to specify aggressiveness to thread halt.
447 * Can't have MUST_HALT and SAFELY at the same time.
449 #define THREAD_HALT_NORMAL 0
450 #define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */
451 #define THREAD_HALT_SAFELY 2 /* result must be restartable */
454 * Macro-defined routines
457 #define thread_pcb(th) ((th)->pcb)
459 #define thread_lock_init(th) simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK)
460 #define thread_lock(th) simple_lock(&(th)->lock)
461 #define thread_unlock(th) simple_unlock(&(th)->lock)
462 #define thread_lock_try(th) simple_lock_try(&(th)->lock)
464 #define thread_should_halt_fast(thread) \
465 (!(thread)->top_act || !(thread)->top_act->active)
467 #define thread_should_halt(thread) thread_should_halt_fast(thread)
469 #define thread_reference_locked(thread) ((thread)->ref_count++)
472 * Lock to cover wake_active only; like thread_lock(), is taken
473 * at splsched(). Used to avoid calling into scheduler with a
474 * thread_lock() held. Precedes thread_lock() (and other scheduling-
475 * related locks) in the system lock ordering.
477 #define wake_lock_init(th) \
478 simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
479 #define wake_lock(th) simple_lock(&(th)->wake_lock)
480 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
481 #define wake_lock_try(th) simple_lock_try(&(th)->wake_lock)
483 static __inline__ vm_offset_t
current_stack(void);
484 static __inline__ vm_offset_t
489 mp_disable_preemption();
490 ret
= active_stacks
[cpu_number()];
491 mp_enable_preemption();
495 extern void pcb_module_init(void);
497 extern void pcb_init(
498 thread_act_t thr_act
);
500 extern void pcb_terminate(
501 thread_act_t thr_act
);
503 extern void pcb_collect(
504 thread_act_t thr_act
);
506 extern void pcb_user_to_kernel(
507 thread_act_t thr_act
);
509 extern kern_return_t
thread_setstatus(
510 thread_act_t thr_act
,
512 thread_state_t tstate
,
513 mach_msg_type_number_t count
);
515 extern kern_return_t
thread_getstatus(
516 thread_act_t thr_act
,
518 thread_state_t tstate
,
519 mach_msg_type_number_t
*count
);
521 extern boolean_t
stack_alloc_try(
523 void (*start_pos
)(thread_t
));
525 /* This routine now used only internally */
526 extern kern_return_t
thread_info_shuttle(
527 thread_act_t thr_act
,
528 thread_flavor_t flavor
,
529 thread_info_t thread_info_out
,
530 mach_msg_type_number_t
*thread_info_count
);
532 /* Machine-dependent routines */
533 extern void thread_machine_init(void);
535 extern void thread_machine_set_current(
538 extern kern_return_t
thread_machine_create(
540 thread_act_t thr_act
,
541 void (*start_pos
)(thread_t
));
543 extern void thread_set_syscall_return(
545 kern_return_t retval
);
547 extern void thread_machine_destroy(
550 extern void thread_machine_flush(
551 thread_act_t thr_act
);
553 extern thread_t
kernel_thread_with_priority(
557 boolean_t alloc_stack
,
558 boolean_t start_running
);
560 extern void thread_terminate_self(void);
562 extern void funnel_lock(funnel_t
*);
564 extern void funnel_unlock(funnel_t
*);
566 #else /* MACH_KERNEL_PRIVATE */
568 typedef struct funnel_lock funnel_t
;
570 extern boolean_t
thread_should_halt(thread_t
);
572 #endif /* MACH_KERNEL_PRIVATE */
574 extern thread_t
kernel_thread(
576 void (*start
)(void));
578 extern void thread_set_cont_arg(int);
580 extern int thread_get_cont_arg(void);
582 /* JMM - These are only temporary */
583 extern boolean_t
is_thread_running(thread_act_t
); /* True is TH_RUN */
584 extern boolean_t
is_thread_idle(thread_t
); /* True is TH_IDLE */
585 extern kern_return_t
get_thread_waitresult(thread_t
);
587 #endif /* __APPLE_API_PRIVATE */
589 #ifdef __APPLE_API_EVOLVING
591 #define THR_FUNNEL_NULL (funnel_t *)0
593 extern funnel_t
* funnel_alloc(int);
595 extern funnel_t
* thread_funnel_get(void);
597 extern boolean_t
thread_funnel_set(funnel_t
* fnl
, boolean_t funneled
);
599 extern boolean_t
thread_funnel_merge(funnel_t
* fnl
, funnel_t
* otherfnl
);
601 #endif /* __APPLE_API_EVOLVING */
603 #endif /* _KERN_THREAD_H_ */