2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
56 * This file contains the structure definitions for threads.
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
78 #ifndef _KERN_THREAD_H_
79 #define _KERN_THREAD_H_
81 #include <mach/kern_return.h>
82 #include <mach/mach_types.h>
83 #include <mach/message.h>
84 #include <mach/boolean.h>
85 #include <mach/vm_types.h>
86 #include <mach/vm_prot.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_status.h>
89 #include <kern/cpu_data.h> /* for current_thread */
90 #include <kern/kern_types.h>
93 * Logically, a thread of control consists of two parts:
94 * a thread_shuttle, which may migrate during an RPC, and
95 * a thread_activation, which remains attached to a task.
96 * The thread_shuttle is the larger portion of the two-part thread,
97 * and contains scheduling info, messaging support, accounting info,
98 * and links to the thread_activation within which the shuttle is
99 * currently operating.
101 * It might make sense to have the thread_shuttle be a proper sub-structure
102 * of the thread, with the thread containing links to both the shuttle and
103 * activation. In order to reduce the scope and complexity of source
104 * changes and the overhead of maintaining these linkages, we have subsumed
105 * the shuttle into the thread, calling it a thread_shuttle.
107 * User accesses to threads always come in via the user's thread port,
108 * which gets translated to a pointer to the target thread_activation.
109 * Kernel accesses intended to effect the entire thread, typically use
110 * a pointer to the thread_shuttle (current_thread()) as the target of
111 * their operations. This makes sense given that we have subsumed the
112 * shuttle into the thread_shuttle, eliminating one set of linkages.
113 * Operations effecting only the shuttle may use a thread_shuttle_t
116 * The current_act() macro returns a pointer to the current thread_act, while
117 * the current_thread() macro returns a pointer to the currently active
118 * thread_shuttle (representing the thread in its entirety).
122 * Possible results of thread_block - returned in
123 * current_thread()->wait_result.
125 #define THREAD_AWAKENED 0 /* normal wakeup */
126 #define THREAD_TIMED_OUT 1 /* timeout expired */
127 #define THREAD_INTERRUPTED 2 /* interrupted by clear_wait */
128 #define THREAD_RESTART 3 /* restart operation entirely */
131 * Interruptible flags for assert_wait
134 #define THREAD_UNINT 0 /* not interruptible */
135 #define THREAD_INTERRUPTIBLE 1 /* may not be restartable */
136 #define THREAD_ABORTSAFE 2 /* abortable safely */
138 #ifdef MACH_KERNEL_PRIVATE
140 #include <hw_footprint.h>
141 #include <mach_host.h>
142 #include <mach_prof.h>
143 #include <mach_lock_mon.h>
144 #include <mach_ldebug.h>
146 #include <mach/port.h>
147 #include <kern/ast.h>
148 #include <kern/cpu_number.h>
149 #include <kern/queue.h>
150 #include <kern/time_out.h>
151 #include <kern/timer.h>
152 #include <kern/lock.h>
153 #include <kern/sched.h>
154 #include <kern/sched_prim.h>
155 #include <kern/thread_pool.h>
156 #include <kern/thread_call.h>
157 #include <kern/timer_call.h>
158 #include <kern/task.h>
159 #include <ipc/ipc_kmsg.h>
160 #include <machine/thread.h>
163 int fnl_type
; /* funnel type */
164 mutex_t
* fnl_mutex
; /* underlying mutex for the funnel */
165 void * fnl_mtxholder
; /* thread (last)holdng mutex */
166 void * fnl_mtxrelease
; /* thread (last)releasing mutex */
167 mutex_t
* fnl_oldmutex
; /* Mutex before collapsing split funnel */
171 typedef struct thread_shuttle
{
173 * Beginning of thread_shuttle proper. When the thread is on
174 * a wait queue, these three fields are in treated as an un-
175 * official union with a wait_queue_element. If you change
176 * these, you must change that definition as well.
178 queue_chain_t links
; /* current run/wait queue links */
179 run_queue_t runq
; /* run queue p is on SEE BELOW */
180 int whichq
; /* which queue level p is on */
183 * NOTE: The runq field in the thread structure has an unusual
184 * locking protocol. If its value is RUN_QUEUE_NULL, then it is
185 * locked by the thread_lock, but if its value is something else
186 * (i.e. a run_queue) then it is locked by that run_queue's lock.
189 /* Thread bookkeeping */
190 queue_chain_t pset_threads
; /* list of all shuttles in proc set */
192 /* Self-preservation */
193 decl_simple_lock_data(,lock
) /* scheduling lock (thread_lock()) */
194 decl_simple_lock_data(,wake_lock
) /* covers wake_active (wake_lock())*/
195 decl_mutex_data(,rpc_lock
) /* RPC lock (rpc_lock()) */
196 int ref_count
; /* number of references to me */
198 vm_offset_t kernel_stack
; /* accurate only if the thread is
199 not swapped and not executing */
201 vm_offset_t stack_privilege
;/* reserved kernel stack */
203 /* Blocking information */
204 int reason
; /* why we blocked */
205 event_t wait_event
; /* event we are waiting on */
206 kern_return_t wait_result
; /* outcome of wait -
207 may be examined by this thread
209 wait_queue_t wait_queue
; /* wait queue we are currently on */
210 queue_chain_t wait_link
; /* event's wait queue link */
211 boolean_t wake_active
; /* Someone is waiting for this
212 thread to become suspended */
213 int state
; /* Thread state: */
214 boolean_t preempt
; /* Thread is undergoing preemption */
215 boolean_t interruptible
; /* Thread is interruptible */
217 #if ETAP_EVENT_MONITOR
218 int etap_reason
; /* real reason why we blocked */
219 boolean_t etap_trace
; /* ETAP trace status */
220 #endif /* ETAP_EVENT_MONITOR */
223 * Thread states [bits or'ed]
225 #define TH_WAIT 0x01 /* thread is queued for waiting */
226 #define TH_SUSP 0x02 /* thread has been asked to stop */
227 #define TH_RUN 0x04 /* thread is running or on runq */
228 #define TH_UNINT 0x08 /* thread is waiting uninteruptibly */
229 #define TH_HALTED 0x10 /* thread is halted at clean point ? */
231 #define TH_ABORT 0x20 /* abort interruptible waits */
232 #define TH_SWAPPED_OUT 0x40 /* thread is swapped out */
234 #define TH_IDLE 0x80 /* thread is an idle thread */
236 #define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
238 #define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */
239 #define TH_STACK_COMING_IN 0x0200 /* thread is waiting for kernel stack */
240 #define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_COMING_IN)
242 #define TH_TERMINATE 0x0400 /* thread is terminating */
244 /* Stack handoff information */
245 void (*continuation
)(void); /* start here next time dispatched */
246 int cont_arg
; /* XXX continuation argument */
248 /* Scheduling information */
249 integer_t importance
; /* task-relative importance */
250 integer_t sched_mode
; /* scheduling mode bits */
251 #define TH_MODE_REALTIME 0x0001
252 struct { /* see mach/thread_policy.h */
254 natural_t computation
;
255 natural_t constraint
;
256 boolean_t preemptible
;
259 integer_t priority
; /* base priority */
260 integer_t sched_pri
; /* scheduled (current) priority */
261 integer_t depress_priority
; /* priority to restore */
262 integer_t max_priority
;
264 natural_t cpu_usage
; /* exp. decaying cpu usage [%cpu] */
265 natural_t sched_usage
; /* load-weighted cpu usage [sched] */
266 natural_t sched_stamp
; /* last time priority was updated */
267 natural_t sleep_stamp
; /* last time in TH_WAIT state */
269 /* 'Obsolete' stuff that cannot be removed yet */
272 integer_t unconsumed_quantum
;
274 /* VM global variables */
275 boolean_t vm_privilege
; /* can use reserved memory? */
276 vm_offset_t recover
; /* page fault recovery (copyin/out) */
278 /* IPC data structures */
280 struct ipc_kmsg_queue ith_messages
;
282 mach_port_t ith_mig_reply
; /* reply port for mig */
283 mach_port_t ith_rpc_reply
; /* reply port for kernel RPCs */
285 /* Various bits of stashed state */
288 mach_msg_return_t state
; /* receive state */
289 ipc_object_t object
; /* object received on */
290 mach_msg_header_t
*msg
; /* receive buffer pointer */
291 mach_msg_size_t msize
; /* max size for recvd msg */
292 mach_msg_option_t option
; /* options for receive */
293 mach_msg_size_t slist_size
; /* scatter list size */
294 struct ipc_kmsg
*kmsg
; /* received message */
295 mach_port_seqno_t seqno
; /* seqno of recvd message */
296 void (*continuation
)(mach_msg_return_t
);
299 struct semaphore
*waitsemaphore
; /* semaphore ref */
300 struct semaphore
*signalsemaphore
; /* semaphore ref */
301 int options
; /* semaphore options */
302 kern_return_t result
; /* primary result */
303 void (*continuation
)(kern_return_t
);
306 struct sf_policy
*policy
; /* scheduling policy */
307 int option
; /* switch option */
309 char *other
; /* catch-all for other state */
312 /* Timing data structures */
313 timer_data_t user_timer
; /* user mode timer */
314 timer_data_t system_timer
; /* system mode timer */
315 timer_data_t depressed_timer
;/* depressed priority timer */
316 timer_save_data_t user_timer_save
; /* saved user timer value */
317 timer_save_data_t system_timer_save
; /* saved sys timer val. */
318 /*** ??? should the next two fields be moved to SP-specific struct?***/
319 unsigned int cpu_delta
; /* cpu usage since last update */
320 unsigned int sched_delta
; /* weighted cpu usage since update */
322 /* Timed wait expiration */
323 timer_call_data_t wait_timer
;
324 integer_t wait_timer_active
;
325 boolean_t wait_timer_is_set
;
327 /* Priority depression expiration */
328 thread_call_data_t depress_timer
;
330 /* Ast/Halt data structures */
331 boolean_t active
; /* how alive is the thread */
333 /* Processor data structures */
334 processor_set_t processor_set
; /* assigned processor set */
336 processor_t bound_processor
; /* bound to processor ?*/
337 #endif /* NCPUS > 1 */
339 boolean_t may_assign
; /* may assignment change? */
340 boolean_t assign_active
; /* someone waiting for may_assign */
341 #endif /* MACH_HOST */
345 #endif /* XKMACHKERNEL */
348 processor_t last_processor
; /* processor this last ran on */
350 unsigned lock_stack
; /* number of locks held */
351 #endif /* MACH_LOCK_MON */
352 #endif /* NCPUS > 1 */
354 int at_safe_point
; /* thread_abort_safely allowed */
356 #define TH_FN_OWNED 0x1 /* we own the funnel lock */
357 #define TH_FN_REFUNNEL 0x2 /* must reaquire funnel lock when unblocking */
358 funnel_t
*funnel_lock
;
361 * Debugging: track acquired mutexes and locks.
362 * Because a thread can block while holding such
363 * synchronizers, we think of the thread as
366 #define MUTEX_STACK_DEPTH 20
367 #define LOCK_STACK_DEPTH 20
368 mutex_t
*mutex_stack
[MUTEX_STACK_DEPTH
];
369 lock_t
*lock_stack
[LOCK_STACK_DEPTH
];
370 unsigned int mutex_stack_index
;
371 unsigned int lock_stack_index
;
372 unsigned mutex_count
; /* XXX to be deleted XXX */
373 boolean_t kthread
; /* thread is a kernel thread */
374 #endif /* MACH_LDEBUG */
377 * End of thread_shuttle proper
381 * Migration and thread_activation linkage information
383 struct thread_activation
*top_act
; /* "current" thr_act */
387 #define THREAD_SHUTTLE_NULL ((thread_shuttle_t)0)
389 #define ith_state saved.receive.state
390 #define ith_object saved.receive.object
391 #define ith_msg saved.receive.msg
392 #define ith_msize saved.receive.msize
393 #define ith_option saved.receive.option
394 #define ith_scatter_list_size saved.receive.slist_size
395 #define ith_continuation saved.receive.continuation
396 #define ith_kmsg saved.receive.kmsg
397 #define ith_seqno saved.receive.seqno
399 #define sth_waitsemaphore saved.sema.waitsemaphore
400 #define sth_signalsemaphore saved.sema.signalsemaphore
401 #define sth_options saved.sema.options
402 #define sth_result saved.sema.result
403 #define sth_continuation saved.sema.continuation
405 extern thread_act_t active_kloaded
[NCPUS
]; /* "" kernel-loaded acts */
406 extern vm_offset_t active_stacks
[NCPUS
]; /* active kernel stacks */
407 extern vm_offset_t kernel_stack
[NCPUS
];
409 #ifndef MACHINE_STACK_STASH
411 * MD Macro to fill up global stack state,
412 * keeping the MD structure sizes + games private
414 #define MACHINE_STACK_STASH(stack) \
416 mp_disable_preemption(); \
417 active_stacks[cpu_number()] = (stack); \
418 kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \
419 mp_enable_preemption(); \
421 #endif /* MACHINE_STACK_STASH */
424 * Kernel-only routines
427 /* Initialize thread module */
428 extern void thread_init(void);
430 /* Take reference on thread (make sure it doesn't go away) */
431 extern void thread_reference(
434 /* Release reference on thread */
435 extern void thread_deallocate(
438 /* Set priority of calling thread */
439 extern void thread_set_own_priority(
442 /* Start a thread at specified routine */
443 #define thread_start(thread, start) \
444 (thread)->continuation = (start)
447 /* Reaps threads waiting to be destroyed */
448 extern void thread_reaper(void);
452 /* Preclude thread processor set assignement */
453 extern void thread_freeze(
456 /* Assign thread to a processor set */
457 extern void thread_doassign(
459 processor_set_t new_pset
,
460 boolean_t release_freeze
);
462 /* Allow thread processor set assignement */
463 extern void thread_unfreeze(
466 #endif /* MACH_HOST */
468 /* Insure thread always has a kernel stack */
469 extern void stack_privilege(
472 extern void consider_thread_collect(void);
475 * Arguments to specify aggressiveness to thread halt.
476 * Can't have MUST_HALT and SAFELY at the same time.
478 #define THREAD_HALT_NORMAL 0
479 #define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */
480 #define THREAD_HALT_SAFELY 2 /* result must be restartable */
483 * Macro-defined routines
486 #define thread_pcb(th) ((th)->pcb)
488 #define thread_lock_init(th) \
489 simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK)
490 #define thread_lock(th) simple_lock(&(th)->lock)
491 #define thread_unlock(th) simple_unlock(&(th)->lock)
493 #define thread_should_halt_fast(thread) \
494 (!(thread)->top_act || \
495 !(thread)->top_act->active || \
496 (thread)->top_act->ast & (AST_HALT|AST_TERMINATE))
498 #define thread_should_halt(thread) thread_should_halt_fast(thread)
500 #define rpc_lock_init(th) mutex_init(&(th)->rpc_lock, ETAP_THREAD_RPC)
501 #define rpc_lock(th) mutex_lock(&(th)->rpc_lock)
502 #define rpc_lock_try(th) mutex_try(&(th)->rpc_lock)
503 #define rpc_unlock(th) mutex_unlock(&(th)->rpc_lock)
506 * Lock to cover wake_active only; like thread_lock(), is taken
507 * at splsched(). Used to avoid calling into scheduler with a
508 * thread_lock() held. Precedes thread_lock() (and other scheduling-
509 * related locks) in the system lock ordering.
511 #define wake_lock_init(th) \
512 simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
513 #define wake_lock(th) simple_lock(&(th)->wake_lock)
514 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
516 static __inline__ vm_offset_t
current_stack(void);
517 static __inline__ vm_offset_t
522 mp_disable_preemption();
523 ret
= active_stacks
[cpu_number()];
524 mp_enable_preemption();
529 extern void pcb_module_init(void);
531 extern void pcb_init(
532 thread_act_t thr_act
);
534 extern void pcb_terminate(
535 thread_act_t thr_act
);
537 extern void pcb_collect(
538 thread_act_t thr_act
);
540 extern void pcb_user_to_kernel(
541 thread_act_t thr_act
);
543 extern kern_return_t
thread_setstatus(
544 thread_act_t thr_act
,
546 thread_state_t tstate
,
547 mach_msg_type_number_t count
);
549 extern kern_return_t
thread_getstatus(
550 thread_act_t thr_act
,
552 thread_state_t tstate
,
553 mach_msg_type_number_t
*count
);
555 extern boolean_t
stack_alloc_try(
557 void (*start_pos
)(thread_t
));
559 /* This routine now used only internally */
560 extern kern_return_t
thread_info_shuttle(
561 thread_act_t thr_act
,
562 thread_flavor_t flavor
,
563 thread_info_t thread_info_out
,
564 mach_msg_type_number_t
*thread_info_count
);
566 extern void thread_user_to_kernel(
569 /* Machine-dependent routines */
570 extern void thread_machine_init(void);
572 extern void thread_machine_set_current(
575 extern kern_return_t
thread_machine_create(
577 thread_act_t thr_act
,
578 void (*start_pos
)(thread_t
));
580 extern void thread_set_syscall_return(
582 kern_return_t retval
);
584 extern void thread_machine_destroy(
587 extern void thread_machine_flush(
588 thread_act_t thr_act
);
590 extern thread_t
kernel_thread_with_priority(
594 boolean_t start_running
);
596 extern void funnel_lock(funnel_t
*);
598 extern void funnel_unlock(funnel_t
*);
600 #else /* !MACH_KERNEL_PRIVATE */
602 typedef struct __funnel__ funnel_t
;
604 extern boolean_t
thread_should_halt(thread_t
);
606 #endif /* !MACH_KERNEL_PRIVATE */
608 #define THR_FUNNEL_NULL (funnel_t *)0
610 extern thread_t
kernel_thread(
612 void (*start
)(void));
614 extern void thread_terminate_self(void);
616 extern funnel_t
* funnel_alloc(int);
618 extern funnel_t
* thread_funnel_get(void);
620 extern boolean_t
thread_funnel_set(funnel_t
* fnl
, boolean_t funneled
);
622 extern boolean_t
thread_funnel_merge(funnel_t
* fnl
, funnel_t
* otherfnl
);
624 extern void thread_set_cont_arg(int);
626 extern int thread_get_cont_arg(void);
628 /* JMM - These are only temporary */
629 extern boolean_t
is_thread_running(thread_t
); /* True is TH_RUN */
630 extern boolean_t
is_thread_idle(thread_t
); /* True is TH_IDLE */
631 extern event_t
get_thread_waitevent(thread_t
);
632 extern kern_return_t
get_thread_waitresult(thread_t
);
634 #endif /* _KERN_THREAD_H_ */