]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_act.h
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_FREE_COPYRIGHT@
24 */
25/*
26 * Copyright (c) 1993 The University of Utah and
27 * the Computer Systems Laboratory (CSL). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
34 *
35 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
36 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
37 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
38 *
39 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
40 * improvements that they make and grant CSL redistribution rights.
41 *
42 * Author: Bryan Ford, University of Utah CSL
43 *
44 * File: thread_act.h
45 *
46 * thread activation definitions
47 */
48#ifndef _KERN_THREAD_ACT_H_
49#define _KERN_THREAD_ACT_H_
50
51#include <mach/mach_types.h>
52#include <mach/rpc.h>
53#include <mach/vm_param.h>
54#include <mach/thread_info.h>
55#include <mach/exception_types.h>
56
57
58#ifdef MACH_KERNEL_PRIVATE
59#include <mach_assert.h>
60#include <thread_swapper.h>
61#include <cputypes.h>
62
63#include <kern/lock.h>
64#include <kern/queue.h>
65#include <kern/etap_macros.h>
66#include <kern/exception.h>
67#include <kern/thread.h>
68#include <kern/thread_pool.h>
69#include <ipc/ipc_port.h>
70#include <machine/thread_act.h>
71
72/* Here is a description of the states an thread_activation may be in.
73 *
74 * An activation always has a valid task pointer, and it is always constant.
75 * The activation is only linked onto the task's activation list until
76 * the activation is terminated.
77 *
78 * An activation is in use or not, depending on whether its thread
79 * pointer is nonzero. If it is not in use, it is just sitting idly
80 * waiting to be used by a thread. The thread holds a reference on
81 * the activation while using it.
82 *
83 * An activation lives on an thread_pool if its pool_port pointer is nonzero.
84 * When in use, it can still live on an thread_pool, but it is not actually
85 * linked onto the thread_pool's list of available activations. In this case,
86 * the act will return to its thread_pool as soon as it becomes unused.
87 *
88 * An activation is active until thread_terminate is called on it;
89 * then it is inactive, waiting for all references to be dropped.
90 * Future control operations on the terminated activation will fail,
91 * with the exception that act_yank still works if the activation is
92 * still on an RPC chain. A terminated activation always has null
93 * thread and pool_port pointers.
94 *
95 * An activation is suspended when suspend_count > 0.
96 * A suspended activation can live on an thread_pool, but it is not
97 * actually linked onto the thread_pool while suspended.
98 *
99 * Locking note: access to data relevant to scheduling state (user_stop_count,
100 * suspend_count, handlers, special_handler) is controlled by the combination
101 * of locks acquired by act_lock_thread(). That is, not only must act_lock()
102 * be held, but RPC through the activation must be frozen (so that the
103 * thread pointer doesn't change). If a shuttle is associated with the
104 * activation, then its thread_lock() must also be acquired to change these
105 * data. Regardless of whether a shuttle is present, the data must be
106 * altered at splsched().
107 */
108
109typedef struct ReturnHandler {
110 struct ReturnHandler *next;
111 void (*handler)(struct ReturnHandler *rh,
112 struct thread_activation *thr_act);
113} ReturnHandler;
114
115typedef struct thread_activation {
116
117 /*** task linkage ***/
118
119 /* Links for task's circular list of activations. The activation
120 * is only on the task's activation list while active. Must be
121 * first.
122 */
123 queue_chain_t thr_acts;
124
125 /* Indicators for whether this activation is in the midst of
126 * resuming or has already been resumed in a kernel-loaded
127 * task -- these flags are basically for quick access to
128 * this information.
129 */
130 boolean_t kernel_loaded; /* running in kernel-loaded task */
131 boolean_t kernel_loading; /* about to run kernel-loaded */
132
133 /*** Machine-dependent state ***/
134 struct MachineThrAct mact;
135
136 /*** Consistency ***/
137 decl_mutex_data(,lock)
138 decl_simple_lock_data(,sched_lock)
139 int ref_count;
140
141 /* Reference to the task this activation is in.
142 * Constant for the life of the activation
143 */
144 struct task *task;
145 vm_map_t map; /* cached current map */
146
147 /*** thread_pool-related stuff ***/
148 /* Port containing the thread_pool this activation normally lives
149 * on, zero if none. The port (really the thread_pool) holds a
150 * reference to the activation as long as this is nonzero (even when
151 * the activation isn't actually on the thread_pool's list).
152 */
153 struct ipc_port *pool_port;
154
155 /* Link on the thread_pool's list of activations.
156 * The activation is only actually on the thread_pool's list
157 * (and hence this is valid) when not in use (thread == 0) and
158 * not suspended (suspend_count == 0).
159 */
160 struct thread_activation *thread_pool_next;
161
162 /* RPC state */
163 union {
164 struct {
165 rpc_subsystem_t r_subsystem;
166#if 0 /* Grenoble */
167 mach_rpc_id_t r_routine_num;
168 mach_rpc_signature_t r_sig_ptr;
169 mach_rpc_size_t r_sig_size;
170#else
171 rpc_id_t r_routine_num;
172 rpc_signature_t r_sig_ptr; /* Stored Client Sig Ptr */
173 rpc_size_t r_sig_size; /* Size of Sig stored */
174 struct rpc_signature r_sigbuf; /* Static Reservation of Sig Mem */
175 routine_descriptor_t r_sigbufp; /* For dynamic storage of Sig */
176 vm_size_t r_sigbuf_size; /* Size of buffer allocated for sig */
177#endif
178 vm_offset_t r_new_argv;
179 vm_offset_t *r_arg_buf;
180 vm_offset_t r_arg_buf_data[RPC_KBUF_SIZE];
181 rpc_copy_state_t r_state;
182 rpc_copy_state_data_t r_state_data[RPC_DESC_COUNT];
183 unsigned int r_port_flags;
184 ipc_port_t r_local_port;
185 void *r_kkt_args;
186 } regular;
187 struct {
188 ipc_port_t r_port;
189 ipc_port_t r_exc_port;
190 int r_exc_flavor;
191 mach_msg_type_number_t r_ostate_cnt;
192 exception_data_type_t r_code[EXCEPTION_CODE_MAX];
193#if ETAP_EVENT_MONITOR
194 exception_type_t r_exception;
195#endif
196 } exception;
197 } rpc_state;
198
199 /*** Thread linkage ***/
200 /* Shuttle using this activation, zero if not in use. The shuttle
201 * holds a reference on the activation while this is nonzero.
202 */
203 struct thread_shuttle *thread;
204
205 /* The rest in this section is only valid when thread is nonzero. */
206
207 /* Next higher and next lower activation on the thread's activation
208 * stack. For a topmost activation or the null_act, higher is
209 * undefined. The bottommost activation is always the null_act.
210 */
211 struct thread_activation *higher, *lower;
212
213 /* Alert bits pending at this activation; some of them may have
214 * propagated from lower activations.
215 */
216 unsigned alerts;
217
218 /* Mask of alert bits to be allowed to pass through from lower levels.
219 */
220 unsigned alert_mask;
221
222#if 0 /* Grenoble */
223 /* Saved policy and priority of shuttle if changed to migrate into
224 * higher-priority or more real-time task. Only valid if
225 * saved_sched_stamp is nonzero and equal to the sched_change_stamp
226 * in the thread_shuttle. (Otherwise, the policy or priority has
227 * been explicitly changed in the meantime, and the saved values
228 * are invalid.)
229 */
230 policy_t saved_policy;
231 integer_t saved_base_priority;
232 unsigned int saved_sched_change_stamp;
233#endif
234 /*** Control information ***/
235
236 /* Number of outstanding suspensions on this activation. */
237 int suspend_count;
238
239 /* User-visible scheduling state */
240 int user_stop_count; /* outstanding stops */
241
242 /* ast is needed - see ast.h */
243 int ast;
244
245#if THREAD_SWAPPER
246 /* task swapper */
247 int swap_state; /* swap state (or unswappable flag)*/
248 queue_chain_t swap_queue; /* links on swap queues */
249#if MACH_ASSERT
250 boolean_t kernel_stack_swapped_in;
251 /* debug for thread swapping */
252#endif /* MACH_ASSERT */
253#endif /* THREAD_SWAPPER */
254
255 /* This is normally true, but is set to false when the
256 * activation is terminated.
257 */
258 int active;
259
260 /* Chain of return handlers to be called before the thread is
261 * allowed to return to this invocation
262 */
263 ReturnHandler *handlers;
264
265 /* A special ReturnHandler attached to the above chain to
266 * handle suspension and such
267 */
268 ReturnHandler special_handler;
269
270 /* Special ports attached to this activation */
271 struct ipc_port *ith_self; /* not a right, doesn't hold ref */
272 struct ipc_port *ith_sself; /* a send right */
273 struct exception_action exc_actions[EXC_TYPES_COUNT];
274
275 /* A list of ulocks (a lock set element) currently held by the thread
276 */
277 queue_head_t held_ulocks;
278
279#if MACH_PROF
280 /* Profiling data structures */
281 boolean_t act_profiled; /* is activation being profiled? */
282 boolean_t act_profiled_own;
283 /* is activation being profiled
284 * on its own ? */
285 struct prof_data *profil_buffer;/* prof struct if either is so */
286#endif /* MACH_PROF */
287
288#ifdef MACH_BSD
289 void *uthread;
290#endif
291
292} Thread_Activation;
293
294/* RPC state fields */
295#define r_subsystem rpc_state.regular.r_subsystem
296#define r_routine_num rpc_state.regular.r_routine_num
297#define r_sig_ptr rpc_state.regular.r_sig_ptr
298#define r_sig_size rpc_state.regular.r_sig_size
299#define r_sigbuf rpc_state.regular.r_sigbuf
300#define r_sigbufp rpc_state.regular.r_sigbufp
301#define r_sigbuf_size rpc_state.regular.r_sigbuf_size
302#define r_new_argv rpc_state.regular.r_new_argv
303#define r_arg_buf rpc_state.regular.r_arg_buf
304#define r_arg_buf_data rpc_state.regular.r_arg_buf_data
305#define r_state rpc_state.regular.r_state
306#define r_state_data rpc_state.regular.r_state_data
307#define r_port_flags rpc_state.regular.r_port_flags
308#define r_local_port rpc_state.regular.r_local_port
309#define r_kkt_args rpc_state.regular.r_kkt_args
310#define r_port rpc_state.exception.r_port
311#define r_exc_port rpc_state.exception.r_exc_port
312#define r_exc_flavor rpc_state.exception.r_exc_flavor
313#define r_ostate_cnt rpc_state.exception.r_ostate_cnt
314#define r_code rpc_state.exception.r_code
315#define r_exception rpc_state.exception.r_exception
316
317/* Alert bits */
318#define SERVER_TERMINATED 0x01
319#define ORPHANED 0x02
320#define CLIENT_TERMINATED 0x04
321#define TIME_CONSTRAINT_UNSATISFIED 0x08
322
323#if THREAD_SWAPPER
324/*
325 * Encapsulate the actions needed to ensure that next lower act on
326 * RPC chain is swapped in. Used at base spl; assumes rpc_lock()
327 * of thread is held; if port is non-null, assumes its ip_lock()
328 * is also held.
329 */
330#define act_switch_swapcheck(thread, port) \
331MACRO_BEGIN \
332 thread_act_t __act__ = thread->top_act; \
333 \
334 while (__act__->lower) { \
335 thread_act_t __l__ = __act__->lower; \
336 \
337 if (__l__->swap_state == TH_SW_IN || \
338 __l__->swap_state == TH_SW_UNSWAPPABLE) \
339 break; \
340 /* \
341 * XXX - Do we need to reference __l__? \
342 */ \
343 if (port) \
344 ip_unlock(port); \
345 if (!thread_swapin_blocking(__l__)) \
346 panic("act_switch_swapcheck: !active"); \
347 if (port) \
348 ip_lock(port); \
349 if (__act__->lower == __l__) \
350 break; \
351 } \
352MACRO_END
353
354#else /* !THREAD_SWAPPER */
355
356#define act_switch_swapcheck(thread, port)
357
358#endif /* !THREAD_SWAPPER */
359
360#define act_lock_init(thr_act) mutex_init(&(thr_act)->lock, ETAP_THREAD_ACT)
361#define act_lock(thr_act) mutex_lock(&(thr_act)->lock)
362#define act_lock_try(thr_act) mutex_try(&(thr_act)->lock)
363#define act_unlock(thr_act) mutex_unlock(&(thr_act)->lock)
364
365/* Sanity check the ref count. If it is 0, we may be doubly zfreeing.
366 * If it is larger than max int, it has been corrupted, probably by being
367 * modified into an address (this is architecture dependent, but it's
368 * safe to assume there cannot really be max int references).
369 */
370#define ACT_MAX_REFERENCES \
371 (unsigned)(~0 ^ (1 << (sizeof(int)*BYTE_SIZE - 1)))
372
373#define act_reference_fast(thr_act) \
374 MACRO_BEGIN \
375 if (thr_act) { \
376 act_lock(thr_act); \
377 assert((thr_act)->ref_count < ACT_MAX_REFERENCES); \
378 (thr_act)->ref_count++; \
379 act_unlock(thr_act); \
380 } \
381 MACRO_END
382
383#define act_reference(thr_act) act_reference_fast(thr_act)
384
385#define act_locked_act_reference(thr_act) \
386 MACRO_BEGIN \
387 if (thr_act) { \
388 assert((thr_act)->ref_count < ACT_MAX_REFERENCES); \
389 (thr_act)->ref_count++; \
390 } \
391 MACRO_END
392
393#define sigbuf_dealloc(thr_act) \
394 if ((thr_act->r_sigbufp) && (thr_act->r_sigbuf_size > \
395 sizeof(thr_act->r_sigbuf))) \
396 { \
397 kfree((vm_offset_t)thr_act->r_sigbufp, \
398 thr_act->r_sigbuf_size); \
399 thr_act->r_sigbuf_size = 0; \
400 }
401
402#define act_deallocate_fast(thr_act) \
403 MACRO_BEGIN \
404 if (thr_act) { \
405 int new_value; \
406 act_lock(thr_act); \
407 assert((thr_act)->ref_count > 0 && \
408 (thr_act)->ref_count <= ACT_MAX_REFERENCES); \
409 new_value = --(thr_act)->ref_count; \
410 act_unlock(thr_act); \
411 if (new_value == 0) \
412 act_free(thr_act); \
413 } \
414 MACRO_END
415
416#define act_deallocate(thr_act) act_deallocate_fast(thr_act)
417
418#define act_locked_act_deallocate(thr_act) \
419 MACRO_BEGIN \
420 if (thr_act) { \
421 int new_value; \
422 assert((thr_act)->ref_count > 0 && \
423 (thr_act)->ref_count <= ACT_MAX_REFERENCES); \
424 new_value = --(thr_act)->ref_count; \
425 if (new_value == 0) { \
426 panic("a_l_act_deallocate: would free act"); \
427 } \
428 } \
429 MACRO_END
430
431
432extern void act_init(void);
433extern kern_return_t act_disable_task_locked(thread_act_t);
434extern void thread_release(thread_act_t);
435extern kern_return_t thread_dowait(thread_act_t, boolean_t);
436extern void thread_hold(thread_act_t);
437extern void nudge(thread_act_t);
438
439extern kern_return_t act_set_thread_pool(thread_act_t, ipc_port_t);
440extern kern_return_t act_locked_act_set_thread_pool(thread_act_t, ipc_port_t);
441extern kern_return_t thread_get_special_port(thread_act_t, int,
442 ipc_port_t *);
443extern kern_return_t thread_set_special_port(thread_act_t, int,
444 ipc_port_t);
445extern thread_t act_lock_thread(thread_act_t);
446extern void act_unlock_thread(thread_act_t);
447extern void install_special_handler(thread_act_t);
448extern thread_act_t thread_lock_act(thread_t);
449extern void thread_unlock_act(thread_t);
450extern void act_attach(thread_act_t, thread_t, unsigned);
451extern void act_execute_returnhandlers(void);
452extern void act_detach(thread_act_t);
453extern void act_free(thread_act_t);
454
455/* machine-dependent functions */
456extern void act_machine_return(kern_return_t);
457extern void act_machine_init(void);
458extern kern_return_t act_machine_create(struct task *, thread_act_t);
459extern void act_machine_destroy(thread_act_t);
460extern kern_return_t act_machine_set_state(thread_act_t,
461 thread_flavor_t, thread_state_t,
462 mach_msg_type_number_t );
463extern kern_return_t act_machine_get_state(thread_act_t,
464 thread_flavor_t, thread_state_t,
465 mach_msg_type_number_t *);
466extern void act_machine_switch_pcb(thread_act_t);
467extern void act_virtual_machine_destroy(thread_act_t);
468
469extern kern_return_t act_create(task_t, thread_act_t *);
470extern kern_return_t act_get_state(thread_act_t, int, thread_state_t,
471 mach_msg_type_number_t *);
472extern kern_return_t act_set_state(thread_act_t, int, thread_state_t,
473 mach_msg_type_number_t);
474
475extern int dump_act(thread_act_t); /* debugging */
476
477#define current_act_fast() (current_thread()->top_act)
478#define current_act_slow() ((current_thread()) ? \
479 current_act_fast() : \
480 THR_ACT_NULL)
481
482#define current_act() current_act_slow() /* JMM - til we find the culprit */
483
484#else /* !MACH_KERNEL_PRIVATE */
485
486extern thread_act_t current_act(void);
487extern void act_reference(thread_act_t);
488extern void act_deallocate(thread_act_t);
489
490#endif /* !MACH_KERNEL_PRIVATE */
491
492/* Exported to world */
493extern kern_return_t act_alert(thread_act_t, unsigned);
494extern kern_return_t act_alert_mask(thread_act_t, unsigned );
495extern kern_return_t post_alert(thread_act_t, unsigned);
496
497extern kern_return_t thread_abort(thread_act_t);
498extern kern_return_t thread_abort_safely(thread_act_t);
499extern kern_return_t thread_resume(thread_act_t);
500extern kern_return_t thread_suspend(thread_act_t);
501extern kern_return_t thread_terminate(thread_act_t);
502
503typedef void (thread_apc_handler_t)(thread_act_t);
504
505extern kern_return_t thread_apc_set(thread_act_t, thread_apc_handler_t);
506extern kern_return_t thread_apc_clear(thread_act_t, thread_apc_handler_t);
507
508extern vm_map_t swap_act_map(thread_act_t, vm_map_t);
509
510extern void *get_bsdthread_info(thread_act_t);
511extern void set_bsdthread_info(thread_act_t, void *);
512extern task_t get_threadtask(thread_act_t);
513
514#endif /* _KERN_THREAD_ACT_H_ */