]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.h
80127b04dba7973b3d0d85bc2be46cf12dcf1d70
[apple/xnu.git] / osfmk / kern / thread.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: thread.h
54 * Author: Avadis Tevanian, Jr.
55 *
56 * This file contains the structure definitions for threads.
57 *
58 */
59 /*
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
62 *
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
68 *
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
72 *
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
75 *
76 */
77
78 #ifndef _KERN_THREAD_H_
79 #define _KERN_THREAD_H_
80
81 #include <mach/kern_return.h>
82 #include <mach/mach_types.h>
83 #include <mach/message.h>
84 #include <mach/boolean.h>
85 #include <mach/vm_types.h>
86 #include <mach/vm_prot.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_status.h>
89 #include <kern/cpu_data.h> /* for current_thread */
90 #include <kern/kern_types.h>
91
92 /*
93 * Logically, a thread of control consists of two parts:
94 * a thread_shuttle, which may migrate during an RPC, and
95 * a thread_activation, which remains attached to a task.
96 * The thread_shuttle is the larger portion of the two-part thread,
97 * and contains scheduling info, messaging support, accounting info,
98 * and links to the thread_activation within which the shuttle is
99 * currently operating.
100 *
101 * It might make sense to have the thread_shuttle be a proper sub-structure
102 * of the thread, with the thread containing links to both the shuttle and
103 * activation. In order to reduce the scope and complexity of source
104 * changes and the overhead of maintaining these linkages, we have subsumed
105 * the shuttle into the thread, calling it a thread_shuttle.
106 *
107 * User accesses to threads always come in via the user's thread port,
108 * which gets translated to a pointer to the target thread_activation.
109 * Kernel accesses intended to effect the entire thread, typically use
110 * a pointer to the thread_shuttle (current_thread()) as the target of
111 * their operations. This makes sense given that we have subsumed the
112 * shuttle into the thread_shuttle, eliminating one set of linkages.
113 * Operations effecting only the shuttle may use a thread_shuttle_t
114 * to indicate this.
115 *
116 * The current_act() macro returns a pointer to the current thread_act, while
117 * the current_thread() macro returns a pointer to the currently active
118 * thread_shuttle (representing the thread in its entirety).
119 */
120
121 /*
122 * Possible results of thread_block - returned in
123 * current_thread()->wait_result.
124 */
125 #define THREAD_AWAKENED 0 /* normal wakeup */
126 #define THREAD_TIMED_OUT 1 /* timeout expired */
127 #define THREAD_INTERRUPTED 2 /* interrupted by clear_wait */
128 #define THREAD_RESTART 3 /* restart operation entirely */
129
130 /*
131 * Interruptible flags for assert_wait
132 *
133 */
134 #define THREAD_UNINT 0 /* not interruptible */
135 #define THREAD_INTERRUPTIBLE 1 /* may not be restartable */
136 #define THREAD_ABORTSAFE 2 /* abortable safely */
137
138 #ifdef MACH_KERNEL_PRIVATE
139 #include <cpus.h>
140 #include <hw_footprint.h>
141 #include <mach_host.h>
142 #include <mach_prof.h>
143 #include <mach_lock_mon.h>
144 #include <mach_ldebug.h>
145
146 #include <mach/port.h>
147 #include <kern/ast.h>
148 #include <kern/cpu_number.h>
149 #include <kern/queue.h>
150 #include <kern/time_out.h>
151 #include <kern/timer.h>
152 #include <kern/lock.h>
153 #include <kern/sched.h>
154 #include <kern/sched_prim.h>
155 #include <kern/thread_pool.h>
156 #include <kern/thread_call.h>
157 #include <kern/timer_call.h>
158 #include <kern/task.h>
159 #include <ipc/ipc_kmsg.h>
160 #include <machine/thread.h>
161
162 typedef struct {
163 int fnl_type; /* funnel type */
164 mutex_t * fnl_mutex; /* underlying mutex for the funnel */
165 void * fnl_mtxholder; /* thread (last)holdng mutex */
166 void * fnl_mtxrelease; /* thread (last)releasing mutex */
167 mutex_t * fnl_oldmutex; /* Mutex before collapsing split funnel */
168 } funnel_t;
169
170
171 typedef struct thread_shuttle {
172 /*
173 * Beginning of thread_shuttle proper. When the thread is on
174 * a wait queue, these three fields are in treated as an un-
175 * official union with a wait_queue_element. If you change
176 * these, you must change that definition as well.
177 */
178 queue_chain_t links; /* current run/wait queue links */
179 run_queue_t runq; /* run queue p is on SEE BELOW */
180 int whichq; /* which queue level p is on */
181
182 /*
183 * NOTE: The runq field in the thread structure has an unusual
184 * locking protocol. If its value is RUN_QUEUE_NULL, then it is
185 * locked by the thread_lock, but if its value is something else
186 * (i.e. a run_queue) then it is locked by that run_queue's lock.
187 */
188
189 /* Thread bookkeeping */
190 queue_chain_t pset_threads; /* list of all shuttles in proc set */
191
192 /* Self-preservation */
193 decl_simple_lock_data(,lock) /* scheduling lock (thread_lock()) */
194 decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/
195 decl_mutex_data(,rpc_lock) /* RPC lock (rpc_lock()) */
196 int ref_count; /* number of references to me */
197
198 vm_offset_t kernel_stack; /* accurate only if the thread is
199 not swapped and not executing */
200
201 vm_offset_t stack_privilege;/* reserved kernel stack */
202
203 /* Blocking information */
204 int reason; /* why we blocked */
205 event_t wait_event; /* event we are waiting on */
206 kern_return_t wait_result; /* outcome of wait -
207 may be examined by this thread
208 WITHOUT locking */
209 wait_queue_t wait_queue; /* wait queue we are currently on */
210 queue_chain_t wait_link; /* event's wait queue link */
211 boolean_t wake_active; /* Someone is waiting for this
212 thread to become suspended */
213 int state; /* Thread state: */
214 boolean_t preempt; /* Thread is undergoing preemption */
215 boolean_t interruptible; /* Thread is interruptible */
216
217 #if ETAP_EVENT_MONITOR
218 int etap_reason; /* real reason why we blocked */
219 boolean_t etap_trace; /* ETAP trace status */
220 #endif /* ETAP_EVENT_MONITOR */
221
222 /*
223 * Thread states [bits or'ed]
224 */
225 #define TH_WAIT 0x01 /* thread is queued for waiting */
226 #define TH_SUSP 0x02 /* thread has been asked to stop */
227 #define TH_RUN 0x04 /* thread is running or on runq */
228 #define TH_UNINT 0x08 /* thread is waiting uninteruptibly */
229 #define TH_HALTED 0x10 /* thread is halted at clean point ? */
230
231 #define TH_ABORT 0x20 /* abort interruptible waits */
232 #define TH_SWAPPED_OUT 0x40 /* thread is swapped out */
233
234 #define TH_IDLE 0x80 /* thread is an idle thread */
235
236 #define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
237
238 #define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */
239 #define TH_STACK_COMING_IN 0x0200 /* thread is waiting for kernel stack */
240 #define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_COMING_IN)
241
242 #define TH_TERMINATE 0x0400 /* thread is terminating */
243
244 /* Stack handoff information */
245 void (*continuation)(void); /* start here next time dispatched */
246 int cont_arg; /* XXX continuation argument */
247
248 /* Scheduling information */
249 integer_t importance; /* task-relative importance */
250 integer_t sched_mode; /* scheduling mode bits */
251 #define TH_MODE_REALTIME 0x0001
252 struct { /* see mach/thread_policy.h */
253 natural_t period;
254 natural_t computation;
255 natural_t constraint;
256 boolean_t preemptible;
257 } realtime;
258
259 integer_t priority; /* base priority */
260 integer_t sched_pri; /* scheduled (current) priority */
261 integer_t depress_priority; /* priority to restore */
262 integer_t max_priority;
263
264 natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */
265 natural_t sched_usage; /* load-weighted cpu usage [sched] */
266 natural_t sched_stamp; /* last time priority was updated */
267 natural_t sleep_stamp; /* last time in TH_WAIT state */
268
269 /* 'Obsolete' stuff that cannot be removed yet */
270 integer_t policy;
271 integer_t sp_state;
272 integer_t unconsumed_quantum;
273
274 /* VM global variables */
275 boolean_t vm_privilege; /* can use reserved memory? */
276 vm_offset_t recover; /* page fault recovery (copyin/out) */
277
278 /* IPC data structures */
279
280 struct ipc_kmsg_queue ith_messages;
281
282 mach_port_t ith_mig_reply; /* reply port for mig */
283 mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
284
285 /* Various bits of stashed state */
286 union {
287 struct {
288 mach_msg_return_t state; /* receive state */
289 ipc_object_t object; /* object received on */
290 mach_msg_header_t *msg; /* receive buffer pointer */
291 mach_msg_size_t msize; /* max size for recvd msg */
292 mach_msg_option_t option; /* options for receive */
293 mach_msg_size_t slist_size; /* scatter list size */
294 struct ipc_kmsg *kmsg; /* received message */
295 mach_port_seqno_t seqno; /* seqno of recvd message */
296 void (*continuation)(mach_msg_return_t);
297 } receive;
298 struct {
299 struct semaphore *waitsemaphore; /* semaphore ref */
300 struct semaphore *signalsemaphore; /* semaphore ref */
301 int options; /* semaphore options */
302 kern_return_t result; /* primary result */
303 void (*continuation)(kern_return_t);
304 } sema;
305 struct {
306 struct sf_policy *policy; /* scheduling policy */
307 int option; /* switch option */
308 } swtch;
309 char *other; /* catch-all for other state */
310 } saved;
311
312 /* Timing data structures */
313 timer_data_t user_timer; /* user mode timer */
314 timer_data_t system_timer; /* system mode timer */
315 timer_data_t depressed_timer;/* depressed priority timer */
316 timer_save_data_t user_timer_save; /* saved user timer value */
317 timer_save_data_t system_timer_save; /* saved sys timer val. */
318 /*** ??? should the next two fields be moved to SP-specific struct?***/
319 unsigned int cpu_delta; /* cpu usage since last update */
320 unsigned int sched_delta; /* weighted cpu usage since update */
321
322 /* Timed wait expiration */
323 timer_call_data_t wait_timer;
324 integer_t wait_timer_active;
325 boolean_t wait_timer_is_set;
326
327 /* Priority depression expiration */
328 thread_call_data_t depress_timer;
329
330 /* Ast/Halt data structures */
331 boolean_t active; /* how alive is the thread */
332
333 /* Processor data structures */
334 processor_set_t processor_set; /* assigned processor set */
335 #if NCPUS > 1
336 processor_t bound_processor; /* bound to processor ?*/
337 #endif /* NCPUS > 1 */
338 #if MACH_HOST
339 boolean_t may_assign; /* may assignment change? */
340 boolean_t assign_active; /* someone waiting for may_assign */
341 #endif /* MACH_HOST */
342
343 #if XKMACHKERNEL
344 int xk_type;
345 #endif /* XKMACHKERNEL */
346
347 #if NCPUS > 1
348 processor_t last_processor; /* processor this last ran on */
349 #if MACH_LOCK_MON
350 unsigned lock_stack; /* number of locks held */
351 #endif /* MACH_LOCK_MON */
352 #endif /* NCPUS > 1 */
353
354 int at_safe_point; /* thread_abort_safely allowed */
355 int funnel_state;
356 #define TH_FN_OWNED 0x1 /* we own the funnel lock */
357 #define TH_FN_REFUNNEL 0x2 /* must reaquire funnel lock when unblocking */
358 funnel_t *funnel_lock;
359 #if MACH_LDEBUG
360 /*
361 * Debugging: track acquired mutexes and locks.
362 * Because a thread can block while holding such
363 * synchronizers, we think of the thread as
364 * "owning" them.
365 */
366 #define MUTEX_STACK_DEPTH 20
367 #define LOCK_STACK_DEPTH 20
368 mutex_t *mutex_stack[MUTEX_STACK_DEPTH];
369 lock_t *lock_stack[LOCK_STACK_DEPTH];
370 unsigned int mutex_stack_index;
371 unsigned int lock_stack_index;
372 unsigned mutex_count; /* XXX to be deleted XXX */
373 boolean_t kthread; /* thread is a kernel thread */
374 #endif /* MACH_LDEBUG */
375
376 /*
377 * End of thread_shuttle proper
378 */
379
380 /*
381 * Migration and thread_activation linkage information
382 */
383 struct thread_activation *top_act; /* "current" thr_act */
384
385 } Thread_Shuttle;
386
387 #define THREAD_SHUTTLE_NULL ((thread_shuttle_t)0)
388
389 #define ith_state saved.receive.state
390 #define ith_object saved.receive.object
391 #define ith_msg saved.receive.msg
392 #define ith_msize saved.receive.msize
393 #define ith_option saved.receive.option
394 #define ith_scatter_list_size saved.receive.slist_size
395 #define ith_continuation saved.receive.continuation
396 #define ith_kmsg saved.receive.kmsg
397 #define ith_seqno saved.receive.seqno
398
399 #define sth_waitsemaphore saved.sema.waitsemaphore
400 #define sth_signalsemaphore saved.sema.signalsemaphore
401 #define sth_options saved.sema.options
402 #define sth_result saved.sema.result
403 #define sth_continuation saved.sema.continuation
404
405 extern thread_act_t active_kloaded[NCPUS]; /* "" kernel-loaded acts */
406 extern vm_offset_t active_stacks[NCPUS]; /* active kernel stacks */
407 extern vm_offset_t kernel_stack[NCPUS];
408
409 #ifndef MACHINE_STACK_STASH
410 /*
411 * MD Macro to fill up global stack state,
412 * keeping the MD structure sizes + games private
413 */
414 #define MACHINE_STACK_STASH(stack) \
415 MACRO_BEGIN \
416 mp_disable_preemption(); \
417 active_stacks[cpu_number()] = (stack); \
418 kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \
419 mp_enable_preemption(); \
420 MACRO_END
421 #endif /* MACHINE_STACK_STASH */
422
423 /*
424 * Kernel-only routines
425 */
426
427 /* Initialize thread module */
428 extern void thread_init(void);
429
430 /* Take reference on thread (make sure it doesn't go away) */
431 extern void thread_reference(
432 thread_t thread);
433
434 /* Release reference on thread */
435 extern void thread_deallocate(
436 thread_t thread);
437
438 /* Set priority of calling thread */
439 extern void thread_set_own_priority(
440 int priority);
441
442 /* Start a thread at specified routine */
443 #define thread_start(thread, start) \
444 (thread)->continuation = (start)
445
446
447 /* Reaps threads waiting to be destroyed */
448 extern void thread_reaper(void);
449
450
451 #if MACH_HOST
452 /* Preclude thread processor set assignement */
453 extern void thread_freeze(
454 thread_t thread);
455
456 /* Assign thread to a processor set */
457 extern void thread_doassign(
458 thread_t thread,
459 processor_set_t new_pset,
460 boolean_t release_freeze);
461
462 /* Allow thread processor set assignement */
463 extern void thread_unfreeze(
464 thread_t thread);
465
466 #endif /* MACH_HOST */
467
468 /* Insure thread always has a kernel stack */
469 extern void stack_privilege(
470 thread_t thread);
471
472 extern void consider_thread_collect(void);
473
474 /*
475 * Arguments to specify aggressiveness to thread halt.
476 * Can't have MUST_HALT and SAFELY at the same time.
477 */
478 #define THREAD_HALT_NORMAL 0
479 #define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */
480 #define THREAD_HALT_SAFELY 2 /* result must be restartable */
481
482 /*
483 * Macro-defined routines
484 */
485
486 #define thread_pcb(th) ((th)->pcb)
487
488 #define thread_lock_init(th) \
489 simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK)
490 #define thread_lock(th) simple_lock(&(th)->lock)
491 #define thread_unlock(th) simple_unlock(&(th)->lock)
492
493 #define thread_should_halt_fast(thread) \
494 (!(thread)->top_act || \
495 !(thread)->top_act->active || \
496 (thread)->top_act->ast & (AST_HALT|AST_TERMINATE))
497
498 #define thread_should_halt(thread) thread_should_halt_fast(thread)
499
500 #define rpc_lock_init(th) mutex_init(&(th)->rpc_lock, ETAP_THREAD_RPC)
501 #define rpc_lock(th) mutex_lock(&(th)->rpc_lock)
502 #define rpc_lock_try(th) mutex_try(&(th)->rpc_lock)
503 #define rpc_unlock(th) mutex_unlock(&(th)->rpc_lock)
504
505 /*
506 * Lock to cover wake_active only; like thread_lock(), is taken
507 * at splsched(). Used to avoid calling into scheduler with a
508 * thread_lock() held. Precedes thread_lock() (and other scheduling-
509 * related locks) in the system lock ordering.
510 */
511 #define wake_lock_init(th) \
512 simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
513 #define wake_lock(th) simple_lock(&(th)->wake_lock)
514 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
515
516 static __inline__ vm_offset_t current_stack(void);
517 static __inline__ vm_offset_t
518 current_stack(void)
519 {
520 vm_offset_t ret;
521
522 mp_disable_preemption();
523 ret = active_stacks[cpu_number()];
524 mp_enable_preemption();
525 return ret;
526 }
527
528
529 extern void pcb_module_init(void);
530
531 extern void pcb_init(
532 thread_act_t thr_act);
533
534 extern void pcb_terminate(
535 thread_act_t thr_act);
536
537 extern void pcb_collect(
538 thread_act_t thr_act);
539
540 extern void pcb_user_to_kernel(
541 thread_act_t thr_act);
542
543 extern kern_return_t thread_setstatus(
544 thread_act_t thr_act,
545 int flavor,
546 thread_state_t tstate,
547 mach_msg_type_number_t count);
548
549 extern kern_return_t thread_getstatus(
550 thread_act_t thr_act,
551 int flavor,
552 thread_state_t tstate,
553 mach_msg_type_number_t *count);
554
555 extern boolean_t stack_alloc_try(
556 thread_t thread,
557 void (*start_pos)(thread_t));
558
559 /* This routine now used only internally */
560 extern kern_return_t thread_info_shuttle(
561 thread_act_t thr_act,
562 thread_flavor_t flavor,
563 thread_info_t thread_info_out,
564 mach_msg_type_number_t *thread_info_count);
565
566 extern void thread_user_to_kernel(
567 thread_t thread);
568
569 /* Machine-dependent routines */
570 extern void thread_machine_init(void);
571
572 extern void thread_machine_set_current(
573 thread_t thread );
574
575 extern kern_return_t thread_machine_create(
576 thread_t thread,
577 thread_act_t thr_act,
578 void (*start_pos)(thread_t));
579
580 extern void thread_set_syscall_return(
581 thread_t thread,
582 kern_return_t retval);
583
584 extern void thread_machine_destroy(
585 thread_t thread );
586
587 extern void thread_machine_flush(
588 thread_act_t thr_act);
589
590 extern thread_t kernel_thread_with_priority(
591 task_t task,
592 integer_t priority,
593 void (*start)(void),
594 boolean_t start_running);
595
596 extern void funnel_lock(funnel_t *);
597
598 extern void funnel_unlock(funnel_t *);
599
600 #else /* !MACH_KERNEL_PRIVATE */
601
602 typedef struct __funnel__ funnel_t;
603
604 extern boolean_t thread_should_halt(thread_t);
605
606 #endif /* !MACH_KERNEL_PRIVATE */
607
608 #define THR_FUNNEL_NULL (funnel_t *)0
609
610 extern thread_t kernel_thread(
611 task_t task,
612 void (*start)(void));
613
614 extern void thread_terminate_self(void);
615
616 extern funnel_t * funnel_alloc(int);
617
618 extern funnel_t * thread_funnel_get(void);
619
620 extern boolean_t thread_funnel_set(funnel_t * fnl, boolean_t funneled);
621
622 extern boolean_t thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl);
623
624 extern void thread_set_cont_arg(int);
625
626 extern int thread_get_cont_arg(void);
627
628 /* JMM - These are only temporary */
629 extern boolean_t is_thread_running(thread_t); /* True is TH_RUN */
630 extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */
631 extern event_t get_thread_waitevent(thread_t);
632 extern kern_return_t get_thread_waitresult(thread_t);
633
634 #endif /* _KERN_THREAD_H_ */