]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_FREE_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: thread.h | |
54 | * Author: Avadis Tevanian, Jr. | |
55 | * | |
56 | * This file contains the structure definitions for threads. | |
57 | * | |
58 | */ | |
59 | /* | |
60 | * Copyright (c) 1993 The University of Utah and | |
61 | * the Computer Systems Laboratory (CSL). All rights reserved. | |
62 | * | |
63 | * Permission to use, copy, modify and distribute this software and its | |
64 | * documentation is hereby granted, provided that both the copyright | |
65 | * notice and this permission notice appear in all copies of the | |
66 | * software, derivative works or modified versions, and any portions | |
67 | * thereof, and that both notices appear in supporting documentation. | |
68 | * | |
69 | * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
70 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF | |
71 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
72 | * | |
73 | * CSL requests users of this software to return to csl-dist@cs.utah.edu any | |
74 | * improvements that they make and grant CSL redistribution rights. | |
75 | * | |
76 | */ | |
77 | ||
78 | #ifndef _KERN_THREAD_H_ | |
79 | #define _KERN_THREAD_H_ | |
80 | ||
81 | #include <mach/kern_return.h> | |
82 | #include <mach/mach_types.h> | |
83 | #include <mach/message.h> | |
84 | #include <mach/boolean.h> | |
85 | #include <mach/vm_param.h> | |
86 | #include <mach/thread_info.h> | |
87 | #include <mach/thread_status.h> | |
88 | #include <mach/exception_types.h> | |
89 | ||
90 | #include <kern/cpu_data.h> /* for current_thread */ | |
91 | #include <kern/kern_types.h> | |
92 | ||
93 | #include <ipc/ipc_types.h> | |
94 | ||
95 | /* | |
96 | * Logically, a thread of control consists of two parts: | |
97 | * | |
98 | * + A thread_shuttle, which may migrate due to resource contention | |
99 | * | |
100 | * + A thread_activation, which remains attached to a task. | |
101 | * | |
102 | * The thread_shuttle contains scheduling info, accounting info, | |
103 | * and links to the thread_activation within which the shuttle is | |
104 | * currently operating. | |
105 | * | |
106 | * An activation always has a valid task pointer, and it is always constant. | |
107 | * The activation is only linked onto the task's activation list until | |
108 | * the activation is terminated. | |
109 | * | |
110 | * The thread holds a reference on the activation while using it. | |
111 | */ | |
112 | ||
113 | #include <sys/appleapiopts.h> | |
114 | ||
115 | #ifdef __APPLE_API_PRIVATE | |
116 | ||
117 | #ifdef MACH_KERNEL_PRIVATE | |
118 | ||
119 | #include <cpus.h> | |
120 | #include <cputypes.h> | |
121 | ||
122 | #include <mach_assert.h> | |
123 | #include <mach_host.h> | |
124 | #include <mach_prof.h> | |
125 | #include <mach_lock_mon.h> | |
126 | #include <mach_ldebug.h> | |
127 | ||
128 | #include <mach/port.h> | |
129 | #include <kern/ast.h> | |
130 | #include <kern/cpu_number.h> | |
131 | #include <kern/queue.h> | |
132 | #include <kern/time_out.h> | |
133 | #include <kern/timer.h> | |
134 | #include <kern/lock.h> | |
135 | #include <kern/sched.h> | |
136 | #include <kern/sched_prim.h> | |
137 | #include <kern/thread_call.h> | |
138 | #include <kern/timer_call.h> | |
139 | #include <kern/task.h> | |
140 | #include <kern/exception.h> | |
141 | #include <kern/etap_macros.h> | |
142 | #include <ipc/ipc_kmsg.h> | |
143 | #include <ipc/ipc_port.h> | |
144 | ||
145 | #include <machine/thread.h> | |
146 | #include <machine/thread_act.h> | |
147 | ||
148 | struct thread { | |
149 | /* | |
150 | * NOTE: The runq field in the thread structure has an unusual | |
151 | * locking protocol. If its value is RUN_QUEUE_NULL, then it is | |
152 | * locked by the thread_lock, but if its value is something else | |
153 | * (i.e. a run_queue) then it is locked by that run_queue's lock. | |
154 | * | |
155 | * Beginning of thread_shuttle proper. When the thread is on | |
156 | * a wait queue, these first three fields are treated as an un- | |
157 | * official union with a wait_queue_element. If you change | |
158 | * these, you must change that definition as well (wait_queue.h). | |
159 | */ | |
160 | /* Items examined often, modified infrequently */ | |
161 | queue_chain_t links; /* run/wait queue links */ | |
162 | run_queue_t runq; /* run queue thread is on SEE BELOW */ | |
163 | wait_queue_t wait_queue; /* wait queue we are currently on */ | |
164 | event64_t wait_event; /* wait queue event */ | |
165 | thread_act_t top_act; /* "current" thr_act */ | |
166 | uint32_t /* Only set by thread itself */ | |
167 | interrupt_level:2, /* interrupts/aborts allowed */ | |
168 | vm_privilege:1, /* can use reserved memory? */ | |
169 | active_callout:1, /* an active callout */ | |
170 | :0; | |
171 | ||
172 | ||
173 | /* Data updated during assert_wait/thread_wakeup */ | |
174 | decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */ | |
175 | decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/ | |
176 | boolean_t wake_active; /* Someone is waiting for this */ | |
177 | int at_safe_point; /* thread_abort_safely allowed */ | |
178 | ast_t reason; /* why we blocked */ | |
179 | wait_result_t wait_result; /* outcome of wait - | |
180 | * may be examined by this thread | |
181 | * WITHOUT locking */ | |
182 | thread_roust_t roust; /* routine to roust it after wait */ | |
183 | thread_continue_t continuation; /* resume here next dispatch */ | |
184 | ||
185 | /* Data updated/used in thread_invoke */ | |
186 | struct funnel_lock *funnel_lock; /* Non-reentrancy funnel */ | |
187 | int funnel_state; | |
188 | #define TH_FN_OWNED 0x1 /* we own the funnel */ | |
189 | #define TH_FN_REFUNNEL 0x2 /* re-acquire funnel on dispatch */ | |
190 | ||
191 | vm_offset_t kernel_stack; /* current kernel stack */ | |
192 | vm_offset_t reserved_stack; /* reserved kernel stack */ | |
193 | ||
194 | /* Thread state: */ | |
195 | int state; | |
196 | /* | |
197 | * Thread states [bits or'ed] | |
198 | */ | |
199 | #define TH_WAIT 0x01 /* queued for waiting */ | |
200 | #define TH_SUSP 0x02 /* stopped or requested to stop */ | |
201 | #define TH_RUN 0x04 /* running or on runq */ | |
202 | #define TH_UNINT 0x08 /* waiting uninteruptibly */ | |
203 | #define TH_TERMINATE 0x10 /* halted at termination */ | |
204 | ||
205 | #define TH_ABORT 0x20 /* abort interruptible waits */ | |
206 | #define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */ | |
207 | ||
208 | #define TH_IDLE 0x80 /* processor idle thread */ | |
209 | ||
210 | #define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT) | |
211 | ||
212 | #define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */ | |
213 | #define TH_STACK_ALLOC 0x0200 /* waiting for stack allocation */ | |
214 | #define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_ALLOC) | |
215 | ||
216 | /* Scheduling information */ | |
217 | integer_t sched_mode; /* scheduling mode bits */ | |
218 | #define TH_MODE_REALTIME 0x0001 /* time constraints supplied */ | |
219 | #define TH_MODE_TIMESHARE 0x0002 /* use timesharing algorithm */ | |
220 | #define TH_MODE_PREEMPT 0x0004 /* can preempt kernel contexts */ | |
221 | #define TH_MODE_FAILSAFE 0x0008 /* fail-safe has tripped */ | |
222 | #define TH_MODE_PROMOTED 0x0010 /* sched pri has been promoted */ | |
223 | #define TH_MODE_FORCEDPREEMPT 0x0020 /* force setting of mode PREEMPT */ | |
224 | #define TH_MODE_DEPRESS 0x0040 /* normal depress yield */ | |
225 | #define TH_MODE_POLLDEPRESS 0x0080 /* polled depress yield */ | |
226 | #define TH_MODE_ISDEPRESSED (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS) | |
227 | ||
228 | integer_t sched_pri; /* scheduled (current) priority */ | |
229 | integer_t priority; /* base priority */ | |
230 | integer_t max_priority; /* max base priority */ | |
231 | integer_t task_priority; /* copy of task base priority */ | |
232 | ||
233 | integer_t promotions; /* level of promotion */ | |
234 | integer_t pending_promoter_index; | |
235 | void *pending_promoter[2]; | |
236 | ||
237 | integer_t importance; /* task-relative importance */ | |
238 | ||
239 | /* real-time parameters */ | |
240 | struct { /* see mach/thread_policy.h */ | |
241 | uint32_t period; | |
242 | uint32_t computation; | |
243 | uint32_t constraint; | |
244 | boolean_t preemptible; | |
245 | ||
246 | uint64_t deadline; | |
247 | } realtime; | |
248 | ||
249 | uint32_t current_quantum; /* duration of current quantum */ | |
250 | ||
251 | /* Data used during setrun/dispatch */ | |
252 | timer_data_t system_timer; /* system mode timer */ | |
253 | processor_set_t processor_set; /* assigned processor set */ | |
254 | processor_t bound_processor; /* bound to a processor? */ | |
255 | processor_t last_processor; /* processor last dispatched on */ | |
256 | uint64_t last_switch; /* time of last context switch */ | |
257 | ||
258 | /* Fail-safe computation since last unblock or qualifying yield */ | |
259 | uint64_t computation_metered; | |
260 | uint64_t computation_epoch; | |
261 | integer_t safe_mode; /* saved mode during fail-safe */ | |
262 | natural_t safe_release; /* when to release fail-safe */ | |
263 | ||
264 | /* Statistics and timesharing calculations */ | |
265 | natural_t sched_stamp; /* when priority was updated */ | |
266 | natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */ | |
267 | natural_t cpu_delta; /* cpu usage since last update */ | |
268 | natural_t sched_usage; /* load-weighted cpu usage [sched] */ | |
269 | natural_t sched_delta; /* weighted cpu usage since update */ | |
270 | natural_t sleep_stamp; /* when entered TH_WAIT state */ | |
271 | ||
272 | /* Timing data structures */ | |
273 | timer_data_t user_timer; /* user mode timer */ | |
274 | timer_save_data_t system_timer_save; /* saved system timer value */ | |
275 | timer_save_data_t user_timer_save; /* saved user timer value */ | |
276 | ||
277 | /* Timed wait expiration */ | |
278 | timer_call_data_t wait_timer; | |
279 | integer_t wait_timer_active; | |
280 | boolean_t wait_timer_is_set; | |
281 | ||
282 | /* Priority depression expiration */ | |
283 | timer_call_data_t depress_timer; | |
284 | integer_t depress_timer_active; | |
285 | ||
286 | /* Various bits of stashed state */ | |
287 | union { | |
288 | struct { | |
289 | mach_msg_return_t state; /* receive state */ | |
290 | ipc_object_t object; /* object received on */ | |
291 | mach_msg_header_t *msg; /* receive buffer pointer */ | |
292 | mach_msg_size_t msize; /* max size for recvd msg */ | |
293 | mach_msg_option_t option; /* options for receive */ | |
294 | mach_msg_size_t slist_size; /* scatter list size */ | |
295 | struct ipc_kmsg *kmsg; /* received message */ | |
296 | mach_port_seqno_t seqno; /* seqno of recvd message */ | |
297 | mach_msg_continue_t continuation; | |
298 | } receive; | |
299 | struct { | |
300 | struct semaphore *waitsemaphore; /* semaphore ref */ | |
301 | struct semaphore *signalsemaphore; /* semaphore ref */ | |
302 | int options; /* semaphore options */ | |
303 | kern_return_t result; /* primary result */ | |
304 | mach_msg_continue_t continuation; | |
305 | } sema; | |
306 | struct { | |
307 | int option; /* switch option */ | |
308 | } swtch; | |
309 | int misc; /* catch-all for other state */ | |
310 | } saved; | |
311 | ||
312 | /* IPC data structures */ | |
313 | struct ipc_kmsg_queue ith_messages; | |
314 | mach_port_t ith_mig_reply; /* reply port for mig */ | |
315 | mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */ | |
316 | ||
317 | /* Ast/Halt data structures */ | |
318 | vm_offset_t recover; /* page fault recover(copyin/out) */ | |
319 | int ref_count; /* number of references to me */ | |
320 | ||
321 | /* Processor set info */ | |
322 | queue_chain_t pset_threads; /* list of all threads in pset */ | |
323 | #if MACH_HOST | |
324 | boolean_t may_assign; /* may assignment change? */ | |
325 | boolean_t assign_active; /* waiting for may_assign */ | |
326 | #endif /* MACH_HOST */ | |
327 | ||
328 | /* Activation */ | |
329 | queue_chain_t task_threads; | |
330 | ||
331 | /*** Machine-dependent state ***/ | |
332 | struct MachineThrAct mact; | |
333 | ||
334 | /* Task membership */ | |
335 | struct task *task; | |
336 | vm_map_t map; | |
337 | ||
338 | decl_mutex_data(,lock) | |
339 | int act_ref_count; | |
340 | ||
341 | /* Associated shuttle */ | |
342 | struct thread *thread; | |
343 | ||
344 | /* | |
345 | * Next higher and next lower activation on | |
346 | * the thread's activation stack. | |
347 | */ | |
348 | struct thread *higher, *lower; | |
349 | ||
350 | /* Kernel holds on this thread */ | |
351 | int suspend_count; | |
352 | ||
353 | /* User level suspensions */ | |
354 | int user_stop_count; | |
355 | ||
356 | /* Pending thread ast(s) */ | |
357 | ast_t ast; | |
358 | ||
359 | /* Miscellaneous bits guarded by lock mutex */ | |
360 | uint32_t | |
361 | /* Indicates that the thread has not been terminated */ | |
362 | active:1, | |
363 | ||
364 | /* Indicates that the thread has been started after creation */ | |
365 | started:1, | |
366 | :0; | |
367 | ||
368 | /* Return Handers */ | |
369 | struct ReturnHandler { | |
370 | struct ReturnHandler *next; | |
371 | void (*handler)( | |
372 | struct ReturnHandler *rh, | |
373 | struct thread *act); | |
374 | } *handlers, special_handler; | |
375 | ||
376 | /* Ports associated with this thread */ | |
377 | struct ipc_port *ith_self; /* not a right, doesn't hold ref */ | |
378 | struct ipc_port *ith_sself; /* a send right */ | |
379 | struct exception_action exc_actions[EXC_TYPES_COUNT]; | |
380 | ||
381 | /* Owned ulocks (a lock set element) */ | |
382 | queue_head_t held_ulocks; | |
383 | ||
384 | #if MACH_PROF | |
385 | /* Profiling */ | |
386 | boolean_t profiled; | |
387 | boolean_t profiled_own; | |
388 | struct prof_data *profil_buffer; | |
389 | #endif /* MACH_PROF */ | |
390 | ||
391 | #ifdef MACH_BSD | |
392 | void *uthread; | |
393 | #endif | |
394 | ||
395 | /* BEGIN TRACING/DEBUG */ | |
396 | ||
397 | #if MACH_LOCK_MON | |
398 | unsigned lock_stack; /* number of locks held */ | |
399 | #endif /* MACH_LOCK_MON */ | |
400 | ||
401 | #if ETAP_EVENT_MONITOR | |
402 | int etap_reason; /* real reason why we blocked */ | |
403 | boolean_t etap_trace; /* ETAP trace status */ | |
404 | #endif /* ETAP_EVENT_MONITOR */ | |
405 | ||
406 | #if MACH_LDEBUG | |
407 | /* | |
408 | * Debugging: track acquired mutexes and locks. | |
409 | * Because a thread can block while holding such | |
410 | * synchronizers, we think of the thread as | |
411 | * "owning" them. | |
412 | */ | |
413 | #define MUTEX_STACK_DEPTH 20 | |
414 | #define LOCK_STACK_DEPTH 20 | |
415 | mutex_t *mutex_stack[MUTEX_STACK_DEPTH]; | |
416 | lock_t *lock_stack[LOCK_STACK_DEPTH]; | |
417 | unsigned int mutex_stack_index; | |
418 | unsigned int lock_stack_index; | |
419 | unsigned mutex_count; /* XXX to be deleted XXX */ | |
420 | #endif /* MACH_LDEBUG */ | |
421 | /* END TRACING/DEBUG */ | |
422 | ||
423 | }; | |
424 | ||
425 | #define ith_state saved.receive.state | |
426 | #define ith_object saved.receive.object | |
427 | #define ith_msg saved.receive.msg | |
428 | #define ith_msize saved.receive.msize | |
429 | #define ith_option saved.receive.option | |
430 | #define ith_scatter_list_size saved.receive.slist_size | |
431 | #define ith_continuation saved.receive.continuation | |
432 | #define ith_kmsg saved.receive.kmsg | |
433 | #define ith_seqno saved.receive.seqno | |
434 | ||
435 | #define sth_waitsemaphore saved.sema.waitsemaphore | |
436 | #define sth_signalsemaphore saved.sema.signalsemaphore | |
437 | #define sth_options saved.sema.options | |
438 | #define sth_result saved.sema.result | |
439 | #define sth_continuation saved.sema.continuation | |
440 | ||
441 | extern void thread_bootstrap(void); | |
442 | ||
443 | extern void thread_init(void); | |
444 | ||
445 | extern void thread_reaper_init(void); | |
446 | ||
447 | extern void thread_reference( | |
448 | thread_t thread); | |
449 | ||
450 | extern void thread_deallocate( | |
451 | thread_t thread); | |
452 | ||
453 | extern void thread_terminate_self(void); | |
454 | ||
455 | extern void thread_hold( | |
456 | thread_act_t thread); | |
457 | ||
458 | extern void thread_release( | |
459 | thread_act_t thread); | |
460 | ||
461 | #define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, ETAP_THREAD_LOCK) | |
462 | #define thread_lock(th) simple_lock(&(th)->sched_lock) | |
463 | #define thread_unlock(th) simple_unlock(&(th)->sched_lock) | |
464 | #define thread_lock_try(th) simple_lock_try(&(th)->sched_lock) | |
465 | ||
466 | #define thread_should_halt_fast(thread) \ | |
467 | (!(thread)->top_act || !(thread)->top_act->active) | |
468 | ||
469 | #define thread_reference_locked(thread) ((thread)->ref_count++) | |
470 | ||
471 | #define wake_lock_init(th) \ | |
472 | simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE) | |
473 | #define wake_lock(th) simple_lock(&(th)->wake_lock) | |
474 | #define wake_unlock(th) simple_unlock(&(th)->wake_lock) | |
475 | #define wake_lock_try(th) simple_lock_try(&(th)->wake_lock) | |
476 | ||
477 | extern vm_offset_t stack_alloc( | |
478 | thread_t thread, | |
479 | void (*start)(thread_t)); | |
480 | ||
481 | extern boolean_t stack_alloc_try( | |
482 | thread_t thread, | |
483 | void (*start)(thread_t)); | |
484 | ||
485 | extern void stack_free( | |
486 | thread_t thread); | |
487 | ||
488 | extern void stack_free_stack( | |
489 | vm_offset_t stack); | |
490 | ||
491 | extern void stack_collect(void); | |
492 | ||
493 | extern kern_return_t thread_setstatus( | |
494 | thread_act_t thread, | |
495 | int flavor, | |
496 | thread_state_t tstate, | |
497 | mach_msg_type_number_t count); | |
498 | ||
499 | extern kern_return_t thread_getstatus( | |
500 | thread_act_t thread, | |
501 | int flavor, | |
502 | thread_state_t tstate, | |
503 | mach_msg_type_number_t *count); | |
504 | ||
505 | extern kern_return_t thread_info_shuttle( | |
506 | thread_act_t thread, | |
507 | thread_flavor_t flavor, | |
508 | thread_info_t thread_info_out, | |
509 | mach_msg_type_number_t *thread_info_count); | |
510 | ||
511 | extern void thread_task_priority( | |
512 | thread_t thread, | |
513 | integer_t priority, | |
514 | integer_t max_priority); | |
515 | ||
516 | extern kern_return_t thread_get_special_port( | |
517 | thread_act_t thread, | |
518 | int which, | |
519 | ipc_port_t *port); | |
520 | ||
521 | extern kern_return_t thread_set_special_port( | |
522 | thread_act_t thread, | |
523 | int which, | |
524 | ipc_port_t port); | |
525 | ||
526 | extern thread_act_t switch_act( | |
527 | thread_act_t act); | |
528 | ||
529 | extern thread_t kernel_thread_create( | |
530 | void (*start)(void), | |
531 | integer_t priority); | |
532 | ||
533 | extern thread_t kernel_thread_with_priority( | |
534 | void (*start)(void), | |
535 | integer_t priority); | |
536 | ||
537 | extern void machine_stack_attach( | |
538 | thread_t thread, | |
539 | vm_offset_t stack, | |
540 | void (*start)(thread_t)); | |
541 | ||
542 | extern vm_offset_t machine_stack_detach( | |
543 | thread_t thread); | |
544 | ||
545 | extern void machine_stack_handoff( | |
546 | thread_t old, | |
547 | thread_t new); | |
548 | ||
549 | extern thread_t machine_switch_context( | |
550 | thread_t old_thread, | |
551 | thread_continue_t continuation, | |
552 | thread_t new_thread); | |
553 | ||
554 | extern void machine_load_context( | |
555 | thread_t thread); | |
556 | ||
557 | extern void machine_switch_act( | |
558 | thread_t thread, | |
559 | thread_act_t old, | |
560 | thread_act_t new); | |
561 | ||
562 | extern kern_return_t machine_thread_set_state( | |
563 | thread_act_t act, | |
564 | thread_flavor_t flavor, | |
565 | thread_state_t state, | |
566 | mach_msg_type_number_t count); | |
567 | ||
568 | extern kern_return_t machine_thread_get_state( | |
569 | thread_act_t act, | |
570 | thread_flavor_t flavor, | |
571 | thread_state_t state, | |
572 | mach_msg_type_number_t *count); | |
573 | ||
574 | extern kern_return_t machine_thread_dup( | |
575 | thread_act_t self, | |
576 | thread_act_t target); | |
577 | ||
578 | extern void machine_thread_init(void); | |
579 | ||
580 | extern kern_return_t machine_thread_create( | |
581 | thread_t thread, | |
582 | task_t task); | |
583 | ||
584 | extern void machine_thread_destroy( | |
585 | thread_t thread); | |
586 | ||
587 | extern void machine_thread_set_current( | |
588 | thread_t thread); | |
589 | ||
590 | extern void machine_thread_terminate_self(void); | |
591 | ||
592 | /* | |
593 | * XXX Funnel locks XXX | |
594 | */ | |
595 | ||
596 | struct funnel_lock { | |
597 | int fnl_type; /* funnel type */ | |
598 | mutex_t *fnl_mutex; /* underlying mutex for the funnel */ | |
599 | void * fnl_mtxholder; /* thread (last)holdng mutex */ | |
600 | void * fnl_mtxrelease; /* thread (last)releasing mutex */ | |
601 | mutex_t *fnl_oldmutex; /* Mutex before collapsing split funnel */ | |
602 | }; | |
603 | ||
604 | typedef struct funnel_lock funnel_t; | |
605 | ||
606 | extern void funnel_lock( | |
607 | funnel_t *lock); | |
608 | ||
609 | extern void funnel_unlock( | |
610 | funnel_t *lock); | |
611 | ||
612 | typedef struct ReturnHandler ReturnHandler; | |
613 | ||
614 | #define act_lock(act) mutex_lock(&(act)->lock) | |
615 | #define act_lock_try(act) mutex_try(&(act)->lock) | |
616 | #define act_unlock(act) mutex_unlock(&(act)->lock) | |
617 | ||
618 | #define act_reference_locked(act) \ | |
619 | MACRO_BEGIN \ | |
620 | (act)->act_ref_count++; \ | |
621 | MACRO_END | |
622 | ||
623 | #define act_deallocate_locked(act) \ | |
624 | MACRO_BEGIN \ | |
625 | if (--(act)->act_ref_count == 0) \ | |
626 | panic("act_deallocate_locked"); \ | |
627 | MACRO_END | |
628 | ||
629 | extern void act_reference( | |
630 | thread_act_t act); | |
631 | ||
632 | extern void act_deallocate( | |
633 | thread_act_t act); | |
634 | ||
635 | extern void act_attach( | |
636 | thread_act_t act, | |
637 | thread_t thread); | |
638 | ||
639 | extern void act_detach( | |
640 | thread_act_t act); | |
641 | ||
642 | extern thread_t act_lock_thread( | |
643 | thread_act_t act); | |
644 | ||
645 | extern void act_unlock_thread( | |
646 | thread_act_t act); | |
647 | ||
648 | extern thread_act_t thread_lock_act( | |
649 | thread_t thread); | |
650 | ||
651 | extern void thread_unlock_act( | |
652 | thread_t thread); | |
653 | ||
654 | extern void act_execute_returnhandlers(void); | |
655 | ||
656 | extern void install_special_handler( | |
657 | thread_act_t thread); | |
658 | ||
659 | extern void special_handler( | |
660 | ReturnHandler *rh, | |
661 | thread_act_t act); | |
662 | ||
663 | #else /* MACH_KERNEL_PRIVATE */ | |
664 | ||
665 | typedef struct funnel_lock funnel_t; | |
666 | ||
667 | extern boolean_t thread_should_halt( | |
668 | thread_t thread); | |
669 | ||
670 | extern void act_reference( | |
671 | thread_act_t act); | |
672 | ||
673 | extern void act_deallocate( | |
674 | thread_act_t act); | |
675 | ||
676 | #endif /* MACH_KERNEL_PRIVATE */ | |
677 | ||
678 | extern thread_t kernel_thread( | |
679 | task_t task, | |
680 | void (*start)(void)); | |
681 | ||
682 | extern void thread_set_cont_arg( | |
683 | int arg); | |
684 | ||
685 | extern int thread_get_cont_arg(void); | |
686 | ||
687 | /* JMM - These are only temporary */ | |
688 | extern boolean_t is_thread_running(thread_act_t); /* True is TH_RUN */ | |
689 | extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */ | |
690 | extern kern_return_t get_thread_waitresult(thread_t); | |
691 | ||
692 | typedef void (thread_apc_handler_t)(thread_act_t); | |
693 | ||
694 | extern kern_return_t thread_apc_set(thread_act_t, thread_apc_handler_t); | |
695 | extern kern_return_t thread_apc_clear(thread_act_t, thread_apc_handler_t); | |
696 | ||
697 | extern vm_map_t swap_act_map(thread_act_t, vm_map_t); | |
698 | ||
699 | extern void *get_bsdthread_info(thread_act_t); | |
700 | extern void set_bsdthread_info(thread_act_t, void *); | |
701 | extern task_t get_threadtask(thread_act_t); | |
702 | ||
703 | #endif /* __APPLE_API_PRIVATE */ | |
704 | ||
705 | #ifdef __APPLE_API_UNSTABLE | |
706 | ||
707 | #if !defined(MACH_KERNEL_PRIVATE) | |
708 | ||
709 | extern thread_act_t current_act(void); | |
710 | ||
711 | #endif /* MACH_KERNEL_PRIVATE */ | |
712 | ||
713 | #endif /* __APPLE_API_UNSTABLE */ | |
714 | ||
715 | #ifdef __APPLE_API_EVOLVING | |
716 | ||
717 | /* | |
718 | * XXX Funnel locks XXX | |
719 | */ | |
720 | ||
721 | #define THR_FUNNEL_NULL (funnel_t *)0 | |
722 | ||
723 | extern funnel_t *funnel_alloc( | |
724 | int type); | |
725 | ||
726 | extern funnel_t *thread_funnel_get(void); | |
727 | ||
728 | extern boolean_t thread_funnel_set( | |
729 | funnel_t *lock, | |
730 | boolean_t funneled); | |
731 | ||
732 | extern boolean_t thread_funnel_merge( | |
733 | funnel_t *lock, | |
734 | funnel_t *other); | |
735 | ||
736 | #endif /* __APPLE_API_EVOLVING */ | |
737 | ||
738 | #ifdef __APPLE_API_PRIVATE | |
739 | ||
740 | extern boolean_t refunnel_hint( | |
741 | thread_t thread, | |
742 | wait_result_t wresult); | |
743 | ||
744 | /* For use by CHUD */ | |
745 | vm_offset_t min_valid_stack_address(void); | |
746 | vm_offset_t max_valid_stack_address(void); | |
747 | ||
748 | #endif /* __APPLE_API_PRIVATE */ | |
749 | ||
750 | #endif /* _KERN_THREAD_H_ */ |