2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Copyright (c) 1993 The University of Utah and
27 * the Computer Systems Laboratory (CSL). All rights reserved.
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
35 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
40 * improvements that they make and grant CSL redistribution rights.
42 * Author: Bryan Ford, University of Utah CSL
46 * thread activation definitions
48 #ifndef _KERN_THREAD_ACT_H_
49 #define _KERN_THREAD_ACT_H_
51 #include <mach/mach_types.h>
53 #include <mach/vm_param.h>
54 #include <mach/thread_info.h>
55 #include <mach/exception_types.h>
58 #ifdef MACH_KERNEL_PRIVATE
59 #include <mach_assert.h>
60 #include <thread_swapper.h>
63 #include <kern/lock.h>
64 #include <kern/queue.h>
65 #include <kern/etap_macros.h>
66 #include <kern/exception.h>
67 #include <kern/thread.h>
68 #include <kern/thread_pool.h>
69 #include <ipc/ipc_port.h>
70 #include <machine/thread_act.h>
72 /* Here is a description of the states an thread_activation may be in.
74 * An activation always has a valid task pointer, and it is always constant.
75 * The activation is only linked onto the task's activation list until
76 * the activation is terminated.
78 * An activation is in use or not, depending on whether its thread
79 * pointer is nonzero. If it is not in use, it is just sitting idly
80 * waiting to be used by a thread. The thread holds a reference on
81 * the activation while using it.
83 * An activation lives on an thread_pool if its pool_port pointer is nonzero.
84 * When in use, it can still live on an thread_pool, but it is not actually
85 * linked onto the thread_pool's list of available activations. In this case,
86 * the act will return to its thread_pool as soon as it becomes unused.
88 * An activation is active until thread_terminate is called on it;
89 * then it is inactive, waiting for all references to be dropped.
90 * Future control operations on the terminated activation will fail,
91 * with the exception that act_yank still works if the activation is
92 * still on an RPC chain. A terminated activation always has null
93 * thread and pool_port pointers.
95 * An activation is suspended when suspend_count > 0.
96 * A suspended activation can live on an thread_pool, but it is not
97 * actually linked onto the thread_pool while suspended.
99 * Locking note: access to data relevant to scheduling state (user_stop_count,
100 * suspend_count, handlers, special_handler) is controlled by the combination
101 * of locks acquired by act_lock_thread(). That is, not only must act_lock()
102 * be held, but RPC through the activation must be frozen (so that the
103 * thread pointer doesn't change). If a shuttle is associated with the
104 * activation, then its thread_lock() must also be acquired to change these
105 * data. Regardless of whether a shuttle is present, the data must be
106 * altered at splsched().
109 typedef struct ReturnHandler
{
110 struct ReturnHandler
*next
;
111 void (*handler
)(struct ReturnHandler
*rh
,
112 struct thread_activation
*thr_act
);
115 typedef struct thread_activation
{
117 /*** task linkage ***/
119 /* Links for task's circular list of activations. The activation
120 * is only on the task's activation list while active. Must be
123 queue_chain_t thr_acts
;
125 /* Indicators for whether this activation is in the midst of
126 * resuming or has already been resumed in a kernel-loaded
127 * task -- these flags are basically for quick access to
130 boolean_t kernel_loaded
; /* running in kernel-loaded task */
131 boolean_t kernel_loading
; /* about to run kernel-loaded */
133 /*** Machine-dependent state ***/
134 struct MachineThrAct mact
;
136 /*** Consistency ***/
137 decl_mutex_data(,lock
)
138 decl_simple_lock_data(,sched_lock
)
141 /* Reference to the task this activation is in.
142 * Constant for the life of the activation
145 vm_map_t map
; /* cached current map */
147 /*** thread_pool-related stuff ***/
148 /* Port containing the thread_pool this activation normally lives
149 * on, zero if none. The port (really the thread_pool) holds a
150 * reference to the activation as long as this is nonzero (even when
151 * the activation isn't actually on the thread_pool's list).
153 struct ipc_port
*pool_port
;
155 /* Link on the thread_pool's list of activations.
156 * The activation is only actually on the thread_pool's list
157 * (and hence this is valid) when not in use (thread == 0) and
158 * not suspended (suspend_count == 0).
160 struct thread_activation
*thread_pool_next
;
165 rpc_subsystem_t r_subsystem
;
167 mach_rpc_id_t r_routine_num
;
168 mach_rpc_signature_t r_sig_ptr
;
169 mach_rpc_size_t r_sig_size
;
171 rpc_id_t r_routine_num
;
172 rpc_signature_t r_sig_ptr
; /* Stored Client Sig Ptr */
173 rpc_size_t r_sig_size
; /* Size of Sig stored */
174 struct rpc_signature r_sigbuf
; /* Static Reservation of Sig Mem */
175 routine_descriptor_t r_sigbufp
; /* For dynamic storage of Sig */
176 vm_size_t r_sigbuf_size
; /* Size of buffer allocated for sig */
178 vm_offset_t r_new_argv
;
179 vm_offset_t
*r_arg_buf
;
180 vm_offset_t r_arg_buf_data
[RPC_KBUF_SIZE
];
181 rpc_copy_state_t r_state
;
182 rpc_copy_state_data_t r_state_data
[RPC_DESC_COUNT
];
183 unsigned int r_port_flags
;
184 ipc_port_t r_local_port
;
189 ipc_port_t r_exc_port
;
191 mach_msg_type_number_t r_ostate_cnt
;
192 exception_data_type_t r_code
[EXCEPTION_CODE_MAX
];
193 #if ETAP_EVENT_MONITOR
194 exception_type_t r_exception
;
199 /*** Thread linkage ***/
200 /* Shuttle using this activation, zero if not in use. The shuttle
201 * holds a reference on the activation while this is nonzero.
203 struct thread_shuttle
*thread
;
205 /* The rest in this section is only valid when thread is nonzero. */
207 /* Next higher and next lower activation on the thread's activation
208 * stack. For a topmost activation or the null_act, higher is
209 * undefined. The bottommost activation is always the null_act.
211 struct thread_activation
*higher
, *lower
;
213 /* Alert bits pending at this activation; some of them may have
214 * propagated from lower activations.
218 /* Mask of alert bits to be allowed to pass through from lower levels.
223 /* Saved policy and priority of shuttle if changed to migrate into
224 * higher-priority or more real-time task. Only valid if
225 * saved_sched_stamp is nonzero and equal to the sched_change_stamp
226 * in the thread_shuttle. (Otherwise, the policy or priority has
227 * been explicitly changed in the meantime, and the saved values
230 policy_t saved_policy
;
231 integer_t saved_base_priority
;
232 unsigned int saved_sched_change_stamp
;
234 /*** Control information ***/
236 /* Number of outstanding suspensions on this activation. */
239 /* User-visible scheduling state */
240 int user_stop_count
; /* outstanding stops */
242 /* ast is needed - see ast.h */
247 int swap_state
; /* swap state (or unswappable flag)*/
248 queue_chain_t swap_queue
; /* links on swap queues */
250 boolean_t kernel_stack_swapped_in
;
251 /* debug for thread swapping */
252 #endif /* MACH_ASSERT */
253 #endif /* THREAD_SWAPPER */
255 /* This is normally true, but is set to false when the
256 * activation is terminated.
260 /* Chain of return handlers to be called before the thread is
261 * allowed to return to this invocation
263 ReturnHandler
*handlers
;
265 /* A special ReturnHandler attached to the above chain to
266 * handle suspension and such
268 ReturnHandler special_handler
;
270 /* Special ports attached to this activation */
271 struct ipc_port
*ith_self
; /* not a right, doesn't hold ref */
272 struct ipc_port
*ith_sself
; /* a send right */
273 struct exception_action exc_actions
[EXC_TYPES_COUNT
];
275 /* A list of ulocks (a lock set element) currently held by the thread
277 queue_head_t held_ulocks
;
280 /* Profiling data structures */
281 boolean_t act_profiled
; /* is activation being profiled? */
282 boolean_t act_profiled_own
;
283 /* is activation being profiled
285 struct prof_data
*profil_buffer
;/* prof struct if either is so */
286 #endif /* MACH_PROF */
294 /* RPC state fields */
295 #define r_subsystem rpc_state.regular.r_subsystem
296 #define r_routine_num rpc_state.regular.r_routine_num
297 #define r_sig_ptr rpc_state.regular.r_sig_ptr
298 #define r_sig_size rpc_state.regular.r_sig_size
299 #define r_sigbuf rpc_state.regular.r_sigbuf
300 #define r_sigbufp rpc_state.regular.r_sigbufp
301 #define r_sigbuf_size rpc_state.regular.r_sigbuf_size
302 #define r_new_argv rpc_state.regular.r_new_argv
303 #define r_arg_buf rpc_state.regular.r_arg_buf
304 #define r_arg_buf_data rpc_state.regular.r_arg_buf_data
305 #define r_state rpc_state.regular.r_state
306 #define r_state_data rpc_state.regular.r_state_data
307 #define r_port_flags rpc_state.regular.r_port_flags
308 #define r_local_port rpc_state.regular.r_local_port
309 #define r_kkt_args rpc_state.regular.r_kkt_args
310 #define r_port rpc_state.exception.r_port
311 #define r_exc_port rpc_state.exception.r_exc_port
312 #define r_exc_flavor rpc_state.exception.r_exc_flavor
313 #define r_ostate_cnt rpc_state.exception.r_ostate_cnt
314 #define r_code rpc_state.exception.r_code
315 #define r_exception rpc_state.exception.r_exception
318 #define SERVER_TERMINATED 0x01
319 #define ORPHANED 0x02
320 #define CLIENT_TERMINATED 0x04
321 #define TIME_CONSTRAINT_UNSATISFIED 0x08
325 * Encapsulate the actions needed to ensure that next lower act on
326 * RPC chain is swapped in. Used at base spl; assumes rpc_lock()
327 * of thread is held; if port is non-null, assumes its ip_lock()
330 #define act_switch_swapcheck(thread, port) \
332 thread_act_t __act__ = thread->top_act; \
334 while (__act__->lower) { \
335 thread_act_t __l__ = __act__->lower; \
337 if (__l__->swap_state == TH_SW_IN || \
338 __l__->swap_state == TH_SW_UNSWAPPABLE) \
341 * XXX - Do we need to reference __l__? \
345 if (!thread_swapin_blocking(__l__)) \
346 panic("act_switch_swapcheck: !active"); \
349 if (__act__->lower == __l__) \
354 #else /* !THREAD_SWAPPER */
356 #define act_switch_swapcheck(thread, port)
358 #endif /* !THREAD_SWAPPER */
360 #define act_lock_init(thr_act) mutex_init(&(thr_act)->lock, ETAP_THREAD_ACT)
361 #define act_lock(thr_act) mutex_lock(&(thr_act)->lock)
362 #define act_lock_try(thr_act) mutex_try(&(thr_act)->lock)
363 #define act_unlock(thr_act) mutex_unlock(&(thr_act)->lock)
365 /* Sanity check the ref count. If it is 0, we may be doubly zfreeing.
366 * If it is larger than max int, it has been corrupted, probably by being
367 * modified into an address (this is architecture dependent, but it's
368 * safe to assume there cannot really be max int references).
370 #define ACT_MAX_REFERENCES \
371 (unsigned)(~0 ^ (1 << (sizeof(int)*BYTE_SIZE - 1)))
373 #define act_reference_fast(thr_act) \
377 assert((thr_act)->ref_count < ACT_MAX_REFERENCES); \
378 (thr_act)->ref_count++; \
379 act_unlock(thr_act); \
383 #define act_reference(thr_act) act_reference_fast(thr_act)
385 #define act_locked_act_reference(thr_act) \
388 assert((thr_act)->ref_count < ACT_MAX_REFERENCES); \
389 (thr_act)->ref_count++; \
393 #define sigbuf_dealloc(thr_act) \
394 if ((thr_act->r_sigbufp) && (thr_act->r_sigbuf_size > \
395 sizeof(thr_act->r_sigbuf))) \
397 kfree((vm_offset_t)thr_act->r_sigbufp, \
398 thr_act->r_sigbuf_size); \
399 thr_act->r_sigbuf_size = 0; \
402 #define act_deallocate_fast(thr_act) \
407 assert((thr_act)->ref_count > 0 && \
408 (thr_act)->ref_count <= ACT_MAX_REFERENCES); \
409 new_value = --(thr_act)->ref_count; \
410 act_unlock(thr_act); \
411 if (new_value == 0) \
416 #define act_deallocate(thr_act) act_deallocate_fast(thr_act)
418 #define act_locked_act_deallocate(thr_act) \
422 assert((thr_act)->ref_count > 0 && \
423 (thr_act)->ref_count <= ACT_MAX_REFERENCES); \
424 new_value = --(thr_act)->ref_count; \
425 if (new_value == 0) { \
426 panic("a_l_act_deallocate: would free act"); \
432 extern void act_init(void);
433 extern kern_return_t
act_disable_task_locked(thread_act_t
);
434 extern void thread_release(thread_act_t
);
435 extern kern_return_t
thread_dowait(thread_act_t
, boolean_t
);
436 extern void thread_hold(thread_act_t
);
437 extern void nudge(thread_act_t
);
439 extern kern_return_t
act_set_thread_pool(thread_act_t
, ipc_port_t
);
440 extern kern_return_t
act_locked_act_set_thread_pool(thread_act_t
, ipc_port_t
);
441 extern kern_return_t
thread_get_special_port(thread_act_t
, int,
443 extern kern_return_t
thread_set_special_port(thread_act_t
, int,
445 extern thread_t
act_lock_thread(thread_act_t
);
446 extern void act_unlock_thread(thread_act_t
);
447 extern void install_special_handler(thread_act_t
);
448 extern thread_act_t
thread_lock_act(thread_t
);
449 extern void thread_unlock_act(thread_t
);
450 extern void act_attach(thread_act_t
, thread_t
, unsigned);
451 extern void act_execute_returnhandlers(void);
452 extern void act_detach(thread_act_t
);
453 extern void act_free(thread_act_t
);
455 /* machine-dependent functions */
456 extern void act_machine_return(kern_return_t
);
457 extern void act_machine_init(void);
458 extern kern_return_t
act_machine_create(struct task
*, thread_act_t
);
459 extern void act_machine_destroy(thread_act_t
);
460 extern kern_return_t
act_machine_set_state(thread_act_t
,
461 thread_flavor_t
, thread_state_t
,
462 mach_msg_type_number_t
);
463 extern kern_return_t
act_machine_get_state(thread_act_t
,
464 thread_flavor_t
, thread_state_t
,
465 mach_msg_type_number_t
*);
466 extern void act_machine_switch_pcb(thread_act_t
);
467 extern void act_virtual_machine_destroy(thread_act_t
);
469 extern kern_return_t
act_create(task_t
, thread_act_t
*);
470 extern kern_return_t
act_get_state(thread_act_t
, int, thread_state_t
,
471 mach_msg_type_number_t
*);
472 extern kern_return_t
act_set_state(thread_act_t
, int, thread_state_t
,
473 mach_msg_type_number_t
);
475 extern int dump_act(thread_act_t
); /* debugging */
477 #define current_act_fast() (current_thread()->top_act)
478 #define current_act_slow() ((current_thread()) ? \
479 current_act_fast() : \
482 #define current_act() current_act_slow() /* JMM - til we find the culprit */
484 #else /* !MACH_KERNEL_PRIVATE */
486 extern thread_act_t
current_act(void);
487 extern void act_reference(thread_act_t
);
488 extern void act_deallocate(thread_act_t
);
490 #endif /* !MACH_KERNEL_PRIVATE */
492 /* Exported to world */
493 extern kern_return_t
act_alert(thread_act_t
, unsigned);
494 extern kern_return_t
act_alert_mask(thread_act_t
, unsigned );
495 extern kern_return_t
post_alert(thread_act_t
, unsigned);
497 extern kern_return_t
thread_abort(thread_act_t
);
498 extern kern_return_t
thread_abort_safely(thread_act_t
);
499 extern kern_return_t
thread_resume(thread_act_t
);
500 extern kern_return_t
thread_suspend(thread_act_t
);
501 extern kern_return_t
thread_terminate(thread_act_t
);
503 typedef void (thread_apc_handler_t
)(thread_act_t
);
505 extern kern_return_t
thread_apc_set(thread_act_t
, thread_apc_handler_t
);
506 extern kern_return_t
thread_apc_clear(thread_act_t
, thread_apc_handler_t
);
508 extern vm_map_t
swap_act_map(thread_act_t
, vm_map_t
);
510 extern void *get_bsdthread_info(thread_act_t
);
511 extern void set_bsdthread_info(thread_act_t
, void *);
512 extern task_t
get_threadtask(thread_act_t
);
514 #endif /* _KERN_THREAD_ACT_H_ */