2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 #include <mach/mach_types.h>
23 #include <kern/queue.h>
25 #include <kern/thread.h>
26 #include <kern/thread_act.h>
27 #include <kern/task.h>
29 #include <kern/lock.h>
30 #include <vm/vm_map.h>
32 #include <ipc/ipc_port.h>
33 #include <ipc/ipc_object.h>
35 #undef thread_should_halt
36 #undef ipc_port_release
38 #undef thread_ast_clear
40 decl_simple_lock_data(extern,reaper_lock
)
41 extern queue_head_t reaper_queue
;
43 /* BSD KERN COMPONENT INTERFACE */
45 vm_address_t bsd_init_task
= 0;
46 char init_task_failure_data
[1024];
48 thread_act_t
get_firstthread(task_t
);
49 vm_map_t
get_task_map(task_t
);
50 ipc_space_t
get_task_ipcspace(task_t
);
51 boolean_t
is_kerneltask(task_t
);
52 boolean_t
is_thread_idle(thread_t
);
53 boolean_t
is_thread_running(thread_t
);
54 thread_shuttle_t
getshuttle_thread( thread_act_t
);
55 thread_act_t
getact_thread( thread_shuttle_t
);
56 vm_offset_t
get_map_min( vm_map_t
);
57 vm_offset_t
get_map_max( vm_map_t
);
58 int get_task_userstop(task_t
);
59 int get_thread_userstop(thread_act_t
);
60 int inc_task_userstop(task_t
);
61 boolean_t
thread_should_abort(thread_shuttle_t
);
62 boolean_t
current_thread_aborted(void);
63 void task_act_iterate_wth_args(task_t
, void(*)(thread_act_t
, void *), void *);
64 void ipc_port_release(ipc_port_t
);
65 void thread_ast_set(thread_act_t
, ast_t
);
66 void thread_ast_clear(thread_act_t
, ast_t
);
67 boolean_t
is_thread_active(thread_t
);
68 event_t
get_thread_waitevent(thread_t
);
69 kern_return_t
get_thread_waitresult(thread_t
);
70 vm_size_t
get_vmmap_size(vm_map_t
);
71 int get_vmmap_entries(vm_map_t
);
72 int get_task_numacts(task_t
);
73 thread_act_t
get_firstthread(task_t task
);
74 kern_return_t
get_signalact(task_t
, thread_act_t
*, thread_t
*, int);
79 void *get_bsdtask_info(task_t t
)
87 void set_bsdtask_info(task_t t
,void * v
)
95 void *get_bsdthread_info(thread_act_t th
)
101 * XXX: wait for BSD to fix signal code
102 * Until then, we cannot block here. We know the task
103 * can't go away, so we make sure it is still active after
104 * retrieving the first thread for extra safety.
106 thread_act_t
get_firstthread(task_t task
)
108 thread_act_t thr_act
;
110 thr_act
= (thread_act_t
)queue_first(&task
->thr_acts
);
111 if (thr_act
== (thread_act_t
)&task
->thr_acts
)
112 thr_act
= THR_ACT_NULL
;
114 return(THR_ACT_NULL
);
118 kern_return_t
get_signalact(task_t task
,thread_act_t
* thact
, thread_t
* thshut
, int setast
)
123 thread_act_t thr_act
;
129 return(KERN_FAILURE
);
132 thr_act
= THR_ACT_NULL
;
133 for (inc
= (thread_act_t
)queue_first(&task
->thr_acts
);
134 inc
!= (thread_act_t
)&task
->thr_acts
;
136 th
= act_lock_thread(inc
);
137 if ((inc
->active
) && ((th
->state
& TH_ABORT
) != TH_ABORT
)) {
141 act_unlock_thread(inc
);
142 ninc
= (thread_act_t
)queue_next(&inc
->thr_acts
);
149 *thshut
= thr_act
? thr_act
->thread
: THREAD_NULL
;
152 thread_ast_set(thr_act
, AST_BSD
);
153 if (current_act() == thr_act
)
156 act_unlock_thread(thr_act
);
161 return(KERN_SUCCESS
);
163 return(KERN_FAILURE
);
167 kern_return_t
check_actforsig(task_t task
, thread_act_t
* thact
, thread_t
* thshut
, int setast
)
172 thread_act_t thr_act
;
179 return(KERN_FAILURE
);
182 thr_act
= THR_ACT_NULL
;
183 for (inc
= (thread_act_t
)queue_first(&task
->thr_acts
);
184 inc
!= (thread_act_t
)&task
->thr_acts
;
188 ninc
= (thread_act_t
)queue_next(&inc
->thr_acts
);
191 th
= act_lock_thread(inc
);
192 if ((inc
->active
) && ((th
->state
& TH_ABORT
) != TH_ABORT
)) {
197 act_unlock_thread(inc
);
198 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
204 *thshut
= thr_act
? thr_act
->thread
: THREAD_NULL
;
206 thread_ast_set(thr_act
, AST_BSD
);
207 if (current_act() == thr_act
)
210 act_unlock_thread(thr_act
);
215 return(KERN_SUCCESS
);
217 return(KERN_FAILURE
);
223 vm_map_t
get_task_map(task_t t
)
231 ipc_space_t
get_task_ipcspace(task_t t
)
233 return(t
->itk_space
);
236 int get_task_numacts(task_t t
)
238 return(t
->thr_act_count
);
242 * Reset the current task's map by taking a reference
243 * on the new map. The old map reference is returned.
246 swap_task_map(task_t task
,vm_map_t map
)
250 vm_map_reference(map
);
259 * Reset the current act map.
260 * The caller donates us a reference to the new map
261 * and we donote our reference to the old map to him.
264 swap_act_map(thread_act_t thr_act
,vm_map_t map
)
269 old_map
= thr_act
->map
;
278 pmap_t
get_task_pmap(task_t t
)
280 return(t
->map
->pmap
);
286 pmap_t
get_map_pmap(vm_map_t map
)
293 task_t
get_threadtask(thread_act_t th
)
302 boolean_t
is_thread_idle(thread_t th
)
304 return((th
->state
& TH_IDLE
) == TH_IDLE
);
310 boolean_t
is_thread_running(thread_t th
)
312 return((th
->state
& TH_RUN
) == TH_RUN
);
348 return(vm_map_min(map
));
358 return(vm_map_max(map
));
368 get_vmsubmap_entries(
370 vm_object_offset_t start
,
371 vm_object_offset_t end
)
373 int total_entries
= 0;
374 vm_map_entry_t entry
;
377 entry
= vm_map_first_entry(map
);
378 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< start
)) {
379 entry
= entry
->vme_next
;
382 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
383 if(entry
->is_sub_map
) {
385 get_vmsubmap_entries(entry
->object
.sub_map
,
388 (entry
->vme_end
- entry
->vme_start
));
392 entry
= entry
->vme_next
;
395 return(total_entries
);
402 int total_entries
= 0;
403 vm_map_entry_t entry
;
406 entry
= vm_map_first_entry(map
);
408 while(entry
!= vm_map_to_entry(map
)) {
409 if(entry
->is_sub_map
) {
411 get_vmsubmap_entries(entry
->object
.sub_map
,
414 (entry
->vme_end
- entry
->vme_start
));
418 entry
= entry
->vme_next
;
421 return(total_entries
);
434 return(task
->user_stop_count
);
444 return(th
->user_stop_count
);
455 i
= task
->user_stop_count
;
456 task
->user_stop_count
++;
468 return( (!th
->top_act
|| !th
->top_act
->active
||
469 th
->state
& TH_ABORT
));
476 current_thread_aborted (
479 thread_t th
= current_thread();
481 return(!th
->top_act
||
482 ((th
->state
& TH_ABORT
) && (th
->interruptible
)));
489 task_act_iterate_wth_args(
491 void (*func_callback
)(thread_act_t
, void *),
494 thread_act_t inc
, ninc
;
497 for (inc
= (thread_act_t
)queue_first(&task
->thr_acts
);
498 inc
!= (thread_act_t
)&task
->thr_acts
;
500 ninc
= (thread_act_t
)queue_next(&inc
->thr_acts
);
501 (void) (*func_callback
)(inc
, func_arg
);
510 ipc_object_release(&(port
)->ip_object
);
525 act
->ast
&= ~(reason
);
536 get_thread_waitevent(
539 return(th
->wait_event
);
543 get_thread_waitresult(
546 return(th
->wait_result
);