]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.h
xnu-344.21.74.tar.gz
[apple/xnu.git] / osfmk / kern / thread.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_FREE_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: thread.h
57 * Author: Avadis Tevanian, Jr.
58 *
59 * This file contains the structure definitions for threads.
60 *
61 */
62 /*
63 * Copyright (c) 1993 The University of Utah and
64 * the Computer Systems Laboratory (CSL). All rights reserved.
65 *
66 * Permission to use, copy, modify and distribute this software and its
67 * documentation is hereby granted, provided that both the copyright
68 * notice and this permission notice appear in all copies of the
69 * software, derivative works or modified versions, and any portions
70 * thereof, and that both notices appear in supporting documentation.
71 *
72 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
73 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
74 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
75 *
76 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
77 * improvements that they make and grant CSL redistribution rights.
78 *
79 */
80
81 #ifndef _KERN_THREAD_H_
82 #define _KERN_THREAD_H_
83
84 #include <mach/kern_return.h>
85 #include <mach/mach_types.h>
86 #include <mach/message.h>
87 #include <mach/boolean.h>
88 #include <mach/vm_types.h>
89 #include <mach/vm_prot.h>
90 #include <mach/thread_info.h>
91 #include <mach/thread_status.h>
92
93 #include <kern/cpu_data.h> /* for current_thread */
94 #include <kern/kern_types.h>
95
96 #include <ipc/ipc_types.h>
97
98 /*
99 * Logically, a thread of control consists of two parts:
100 *
101 * a thread_shuttle, which may migrate due to resource contention
102 * and
103 * a thread_activation, which remains attached to a task.
104 *
105 * The thread_shuttle contains scheduling info, accounting info,
106 * and links to the thread_activation within which the shuttle is
107 * currently operating.
108 *
109 * It might make sense to have the thread_shuttle be a proper sub-structure
110 * of the thread, with the thread containing links to both the shuttle and
111 * activation. In order to reduce the scope and complexity of source
112 * changes and the overhead of maintaining these linkages, we have subsumed
113 * the shuttle into the thread, calling it a thread_shuttle.
114 *
115 * User accesses to threads always come in via the user's thread port,
116 * which gets translated to a pointer to the target thread_activation.
117 */
118 #include <sys/appleapiopts.h>
119
120 #ifdef __APPLE_API_PRIVATE
121
122 #ifdef MACH_KERNEL_PRIVATE
123
124 #include <cpus.h>
125 #include <hw_footprint.h>
126 #include <mach_host.h>
127 #include <mach_prof.h>
128 #include <mach_lock_mon.h>
129 #include <mach_ldebug.h>
130
131 #include <mach/port.h>
132 #include <kern/ast.h>
133 #include <kern/cpu_number.h>
134 #include <kern/queue.h>
135 #include <kern/time_out.h>
136 #include <kern/timer.h>
137 #include <kern/lock.h>
138 #include <kern/sched.h>
139 #include <kern/sched_prim.h>
140 #include <kern/thread_call.h>
141 #include <kern/timer_call.h>
142 #include <kern/task.h>
143 #include <ipc/ipc_kmsg.h>
144 #include <machine/thread.h>
145
146 /*
147 * Kernel accesses intended to effect the entire thread, typically use
148 * a pointer to the thread_shuttle (current_thread()) as the target of
149 * their operations. This makes sense given that we have subsumed the
150 * shuttle into the thread_shuttle, eliminating one set of linkages.
151 * Operations effecting only the shuttle may use a thread_shuttle_t
152 * to indicate this.
153 *
154 * The current_act() macro returns a pointer to the current thread_act, while
155 * the current_thread() macro returns a pointer to the currently active
156 * thread_shuttle (representing the thread in its entirety).
157 */
158 struct thread_shuttle {
159 /*
160 * NOTE: The runq field in the thread structure has an unusual
161 * locking protocol. If its value is RUN_QUEUE_NULL, then it is
162 * locked by the thread_lock, but if its value is something else
163 * (i.e. a run_queue) then it is locked by that run_queue's lock.
164 *
165 * Beginning of thread_shuttle proper. When the thread is on
166 * a wait queue, these first three fields are treated as an un-
167 * official union with a wait_queue_element. If you change
168 * these, you must change that definition as well (wait_queue.h).
169 */
170 /* Items examined often, modified infrequently */
171 queue_chain_t links; /* run/wait queue links */
172 run_queue_t runq; /* run queue thread is on SEE BELOW */
173 wait_queue_t wait_queue; /* wait queue we are currently on */
174 event64_t wait_event; /* wait queue event */
175 thread_act_t top_act; /* "current" thr_act */
176 uint32_t /* Only set by thread itself */
177 interrupt_level:2, /* interrupts/aborts allowed */
178 vm_privilege:1, /* can use reserved memory? */
179 active_callout:1, /* an active callout */
180 :0;
181
182
183 /* Data updated during assert_wait/thread_wakeup */
184 decl_simple_lock_data(,lock) /* scheduling lock (thread_lock()) */
185 decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/
186 boolean_t wake_active; /* Someone is waiting for this */
187 int at_safe_point; /* thread_abort_safely allowed */
188 ast_t reason; /* why we blocked */
189 wait_result_t wait_result; /* outcome of wait -
190 * may be examined by this thread
191 * WITHOUT locking */
192 thread_roust_t roust; /* routine to roust it after wait */
193 thread_continue_t continuation; /* resume here next dispatch */
194
195 /* Data updated/used in thread_invoke */
196 struct funnel_lock *funnel_lock; /* Non-reentrancy funnel */
197 int funnel_state;
198 #define TH_FN_OWNED 0x1 /* we own the funnel */
199 #define TH_FN_REFUNNEL 0x2 /* re-acquire funnel on dispatch */
200
201 vm_offset_t kernel_stack; /* current kernel stack */
202 vm_offset_t stack_privilege; /* reserved kernel stack */
203
204 /* Thread state: */
205 int state;
206 /*
207 * Thread states [bits or'ed]
208 */
209 #define TH_WAIT 0x01 /* thread is queued for waiting */
210 #define TH_SUSP 0x02 /* thread has been asked to stop */
211 #define TH_RUN 0x04 /* thread is running or on runq */
212 #define TH_UNINT 0x08 /* thread is waiting uninteruptibly */
213 #define TH_TERMINATE 0x10 /* thread is halting at termination */
214
215 #define TH_ABORT 0x20 /* abort interruptible waits */
216 #define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */
217
218 #define TH_IDLE 0x80 /* thread is an idle thread */
219
220 #define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
221
222 #define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */
223 #define TH_STACK_ALLOC 0x0200 /* waiting for stack allocation */
224 #define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_ALLOC)
225
226 /* Scheduling information */
227 integer_t sched_mode; /* scheduling mode bits */
228 #define TH_MODE_REALTIME 0x0001 /* time constraints supplied */
229 #define TH_MODE_TIMESHARE 0x0002 /* use timesharing algorithm */
230 #define TH_MODE_PREEMPT 0x0004 /* can preempt kernel contexts */
231 #define TH_MODE_FAILSAFE 0x0008 /* fail-safe has tripped */
232 #define TH_MODE_PROMOTED 0x0010 /* sched pri has been promoted */
233 #define TH_MODE_FORCEDPREEMPT 0x0020 /* force setting of mode PREEMPT */
234 #define TH_MODE_DEPRESS 0x0040 /* normal depress yield */
235 #define TH_MODE_POLLDEPRESS 0x0080 /* polled depress yield */
236 #define TH_MODE_ISDEPRESSED (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
237
238 integer_t sched_pri; /* scheduled (current) priority */
239 integer_t priority; /* base priority */
240 integer_t max_priority; /* max base priority */
241 integer_t task_priority; /* copy of task base priority */
242
243 integer_t promotions; /* level of promotion */
244 integer_t pending_promoter_index;
245 void *pending_promoter[2];
246
247 integer_t importance; /* task-relative importance */
248
249 /* time constraint parameters */
250 struct { /* see mach/thread_policy.h */
251 uint32_t period;
252 uint32_t computation;
253 uint32_t constraint;
254 boolean_t preemptible;
255 } realtime;
256
257 uint32_t current_quantum; /* duration of current quantum */
258
259 /* Data used during setrun/dispatch */
260 timer_data_t system_timer; /* system mode timer */
261 processor_set_t processor_set; /* assigned processor set */
262 processor_t bound_processor; /* bound to a processor? */
263 processor_t last_processor; /* processor last dispatched on */
264 uint64_t last_switch; /* time of last context switch */
265
266 /* Fail-safe computation since last unblock or qualifying yield */
267 uint64_t computation_metered;
268 uint64_t computation_epoch;
269 integer_t safe_mode; /* saved mode during fail-safe */
270 natural_t safe_release; /* when to release fail-safe */
271
272 /* Used in priority computations */
273 natural_t sched_stamp; /* when priority was updated */
274 natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */
275 natural_t cpu_delta; /* cpu usage since last update */
276 natural_t sched_usage; /* load-weighted cpu usage [sched] */
277 natural_t sched_delta; /* weighted cpu usage since update */
278 natural_t sleep_stamp; /* when entered TH_WAIT state */
279
280 /* Timing data structures */
281 timer_data_t user_timer; /* user mode timer */
282 timer_save_data_t system_timer_save; /* saved system timer value */
283 timer_save_data_t user_timer_save; /* saved user timer value */
284
285 /* Timed wait expiration */
286 timer_call_data_t wait_timer;
287 integer_t wait_timer_active;
288 boolean_t wait_timer_is_set;
289
290 /* Priority depression expiration */
291 timer_call_data_t depress_timer;
292 integer_t depress_timer_active;
293
294 /* Various bits of stashed state */
295 union {
296 struct {
297 mach_msg_return_t state; /* receive state */
298 ipc_object_t object; /* object received on */
299 mach_msg_header_t *msg; /* receive buffer pointer */
300 mach_msg_size_t msize; /* max size for recvd msg */
301 mach_msg_option_t option; /* options for receive */
302 mach_msg_size_t slist_size; /* scatter list size */
303 struct ipc_kmsg *kmsg; /* received message */
304 mach_port_seqno_t seqno; /* seqno of recvd message */
305 mach_msg_continue_t continuation;
306 } receive;
307 struct {
308 struct semaphore *waitsemaphore; /* semaphore ref */
309 struct semaphore *signalsemaphore; /* semaphore ref */
310 int options; /* semaphore options */
311 kern_return_t result; /* primary result */
312 mach_msg_continue_t continuation;
313 } sema;
314 struct {
315 int option; /* switch option */
316 } swtch;
317 int misc; /* catch-all for other state */
318 } saved;
319
320 /* IPC data structures */
321 struct ipc_kmsg_queue ith_messages;
322 mach_port_t ith_mig_reply; /* reply port for mig */
323 mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */
324
325 /* Ast/Halt data structures */
326 boolean_t active; /* thread is active */
327 vm_offset_t recover; /* page fault recover(copyin/out) */
328 int ref_count; /* number of references to me */
329
330 /* Processor set info */
331 queue_chain_t pset_threads; /* list of all shuttles in pset */
332 #if MACH_HOST
333 boolean_t may_assign; /* may assignment change? */
334 boolean_t assign_active; /* waiting for may_assign */
335 #endif /* MACH_HOST */
336
337 /* BEGIN TRACING/DEBUG */
338
339 #if MACH_LOCK_MON
340 unsigned lock_stack; /* number of locks held */
341 #endif /* MACH_LOCK_MON */
342
343 #if ETAP_EVENT_MONITOR
344 int etap_reason; /* real reason why we blocked */
345 boolean_t etap_trace; /* ETAP trace status */
346 #endif /* ETAP_EVENT_MONITOR */
347
348 #if MACH_LDEBUG
349 /*
350 * Debugging: track acquired mutexes and locks.
351 * Because a thread can block while holding such
352 * synchronizers, we think of the thread as
353 * "owning" them.
354 */
355 #define MUTEX_STACK_DEPTH 20
356 #define LOCK_STACK_DEPTH 20
357 mutex_t *mutex_stack[MUTEX_STACK_DEPTH];
358 lock_t *lock_stack[LOCK_STACK_DEPTH];
359 unsigned int mutex_stack_index;
360 unsigned int lock_stack_index;
361 unsigned mutex_count; /* XXX to be deleted XXX */
362 #endif /* MACH_LDEBUG */
363 /* END TRACING/DEBUG */
364
365 };
366
367 #define ith_state saved.receive.state
368 #define ith_object saved.receive.object
369 #define ith_msg saved.receive.msg
370 #define ith_msize saved.receive.msize
371 #define ith_option saved.receive.option
372 #define ith_scatter_list_size saved.receive.slist_size
373 #define ith_continuation saved.receive.continuation
374 #define ith_kmsg saved.receive.kmsg
375 #define ith_seqno saved.receive.seqno
376
377 #define sth_waitsemaphore saved.sema.waitsemaphore
378 #define sth_signalsemaphore saved.sema.signalsemaphore
379 #define sth_options saved.sema.options
380 #define sth_result saved.sema.result
381 #define sth_continuation saved.sema.continuation
382
383 struct funnel_lock {
384 int fnl_type; /* funnel type */
385 mutex_t *fnl_mutex; /* underlying mutex for the funnel */
386 void * fnl_mtxholder; /* thread (last)holdng mutex */
387 void * fnl_mtxrelease; /* thread (last)releasing mutex */
388 mutex_t *fnl_oldmutex; /* Mutex before collapsing split funnel */
389 };
390
391 typedef struct funnel_lock funnel_t;
392
393 extern thread_act_t active_kloaded[NCPUS]; /* "" kernel-loaded acts */
394 extern vm_offset_t active_stacks[NCPUS]; /* active kernel stacks */
395 extern vm_offset_t kernel_stack[NCPUS];
396
397 extern struct thread_shuttle pageout_thread;
398
399 #ifndef MACHINE_STACK_STASH
400 /*
401 * MD Macro to fill up global stack state,
402 * keeping the MD structure sizes + games private
403 */
404 #define MACHINE_STACK_STASH(stack) \
405 MACRO_BEGIN \
406 mp_disable_preemption(); \
407 active_stacks[cpu_number()] = (stack); \
408 kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \
409 mp_enable_preemption(); \
410 MACRO_END
411 #endif /* MACHINE_STACK_STASH */
412
413 /*
414 * Kernel-only routines
415 */
416
417 /* Initialize thread module */
418 extern void thread_init(void);
419
420 /* Take reference on thread (make sure it doesn't go away) */
421 extern void thread_reference(
422 thread_t thread);
423
424 /* Release reference on thread */
425 extern void thread_deallocate(
426 thread_t thread);
427
428 /* Set task priority of member thread */
429 extern void thread_task_priority(
430 thread_t thread,
431 integer_t priority,
432 integer_t max_priority);
433
434 /* Start a thread at specified routine */
435 #define thread_start(thread, start) \
436 (thread)->continuation = (start)
437
438 /* Reaps threads waiting to be destroyed */
439 extern void thread_reaper_init(void);
440
441
442 /* Insure thread always has a kernel stack */
443 extern void stack_privilege(
444 thread_t thread);
445
446 extern void consider_thread_collect(void);
447
448 /*
449 * Arguments to specify aggressiveness to thread halt.
450 * Can't have MUST_HALT and SAFELY at the same time.
451 */
452 #define THREAD_HALT_NORMAL 0
453 #define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */
454 #define THREAD_HALT_SAFELY 2 /* result must be restartable */
455
456 /*
457 * Macro-defined routines
458 */
459
460 #define thread_pcb(th) ((th)->pcb)
461
462 #define thread_lock_init(th) simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK)
463 #define thread_lock(th) simple_lock(&(th)->lock)
464 #define thread_unlock(th) simple_unlock(&(th)->lock)
465 #define thread_lock_try(th) simple_lock_try(&(th)->lock)
466
467 #define thread_should_halt_fast(thread) \
468 (!(thread)->top_act || !(thread)->top_act->active)
469
470 #define thread_should_halt(thread) thread_should_halt_fast(thread)
471
472 #define thread_reference_locked(thread) ((thread)->ref_count++)
473
474 /*
475 * Lock to cover wake_active only; like thread_lock(), is taken
476 * at splsched(). Used to avoid calling into scheduler with a
477 * thread_lock() held. Precedes thread_lock() (and other scheduling-
478 * related locks) in the system lock ordering.
479 */
480 #define wake_lock_init(th) \
481 simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE)
482 #define wake_lock(th) simple_lock(&(th)->wake_lock)
483 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
484 #define wake_lock_try(th) simple_lock_try(&(th)->wake_lock)
485
486 static __inline__ vm_offset_t current_stack(void);
487 static __inline__ vm_offset_t
488 current_stack(void)
489 {
490 vm_offset_t ret;
491
492 mp_disable_preemption();
493 ret = active_stacks[cpu_number()];
494 mp_enable_preemption();
495 return ret;
496 }
497
498 extern void pcb_module_init(void);
499
500 extern void pcb_init(
501 thread_act_t thr_act);
502
503 extern void pcb_terminate(
504 thread_act_t thr_act);
505
506 extern void pcb_collect(
507 thread_act_t thr_act);
508
509 extern void pcb_user_to_kernel(
510 thread_act_t thr_act);
511
512 extern kern_return_t thread_setstatus(
513 thread_act_t thr_act,
514 int flavor,
515 thread_state_t tstate,
516 mach_msg_type_number_t count);
517
518 extern kern_return_t thread_getstatus(
519 thread_act_t thr_act,
520 int flavor,
521 thread_state_t tstate,
522 mach_msg_type_number_t *count);
523
524 extern boolean_t stack_alloc_try(
525 thread_t thread,
526 void (*start_pos)(thread_t));
527
528 /* This routine now used only internally */
529 extern kern_return_t thread_info_shuttle(
530 thread_act_t thr_act,
531 thread_flavor_t flavor,
532 thread_info_t thread_info_out,
533 mach_msg_type_number_t *thread_info_count);
534
535 /* Machine-dependent routines */
536 extern void thread_machine_init(void);
537
538 extern void thread_machine_set_current(
539 thread_t thread );
540
541 extern kern_return_t thread_machine_create(
542 thread_t thread,
543 thread_act_t thr_act,
544 void (*start_pos)(thread_t));
545
546 extern void thread_set_syscall_return(
547 thread_t thread,
548 kern_return_t retval);
549
550 extern void thread_machine_destroy(
551 thread_t thread );
552
553 extern void thread_machine_flush(
554 thread_act_t thr_act);
555
556 extern thread_t kernel_thread_with_priority(
557 task_t task,
558 integer_t priority,
559 void (*start)(void),
560 boolean_t alloc_stack,
561 boolean_t start_running);
562
563 extern void thread_terminate_self(void);
564
565 extern void funnel_lock(funnel_t *);
566
567 extern void funnel_unlock(funnel_t *);
568
569 #else /* MACH_KERNEL_PRIVATE */
570
571 typedef struct funnel_lock funnel_t;
572
573 extern boolean_t thread_should_halt(thread_t);
574
575 #endif /* MACH_KERNEL_PRIVATE */
576
577 extern thread_t kernel_thread(
578 task_t task,
579 void (*start)(void));
580
581 extern void thread_set_cont_arg(int);
582
583 extern int thread_get_cont_arg(void);
584
585 /* JMM - These are only temporary */
586 extern boolean_t is_thread_running(thread_act_t); /* True is TH_RUN */
587 extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */
588 extern kern_return_t get_thread_waitresult(thread_t);
589
590 #endif /* __APPLE_API_PRIVATE */
591
592 #ifdef __APPLE_API_EVOLVING
593
594 #define THR_FUNNEL_NULL (funnel_t *)0
595
596 extern funnel_t * funnel_alloc(int);
597
598 extern funnel_t * thread_funnel_get(void);
599
600 extern boolean_t thread_funnel_set(funnel_t * fnl, boolean_t funneled);
601
602 extern boolean_t thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl);
603
604 #endif /* __APPLE_API_EVOLVING */
605
606 #endif /* _KERN_THREAD_H_ */