2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 #include <mach/mach_types.h>
26 #include <kern/queue.h>
28 #include <kern/thread.h>
29 #include <kern/thread_act.h>
30 #include <kern/task.h>
32 #include <kern/lock.h>
33 #include <vm/vm_map.h>
35 #include <ipc/ipc_port.h>
36 #include <ipc/ipc_object.h>
38 #undef thread_should_halt
39 #undef ipc_port_release
41 decl_simple_lock_data(extern,reaper_lock
)
42 extern queue_head_t reaper_queue
;
44 /* BSD KERN COMPONENT INTERFACE */
46 task_t bsd_init_task
= TASK_NULL
;
47 char init_task_failure_data
[1024];
49 thread_act_t
get_firstthread(task_t
);
50 vm_map_t
get_task_map(task_t
);
51 ipc_space_t
get_task_ipcspace(task_t
);
52 boolean_t
is_kerneltask(task_t
);
53 boolean_t
is_thread_idle(thread_t
);
54 boolean_t
is_thread_running(thread_act_t
);
55 thread_shuttle_t
getshuttle_thread( thread_act_t
);
56 thread_act_t
getact_thread( thread_shuttle_t
);
57 vm_offset_t
get_map_min( vm_map_t
);
58 vm_offset_t
get_map_max( vm_map_t
);
59 int get_task_userstop(task_t
);
60 int get_thread_userstop(thread_act_t
);
61 boolean_t
thread_should_abort(thread_shuttle_t
);
62 boolean_t
current_thread_aborted(void);
63 void task_act_iterate_wth_args(task_t
, void(*)(thread_act_t
, void *), void *);
64 void ipc_port_release(ipc_port_t
);
65 boolean_t
is_thread_active(thread_t
);
66 kern_return_t
get_thread_waitresult(thread_t
);
67 vm_size_t
get_vmmap_size(vm_map_t
);
68 int get_vmmap_entries(vm_map_t
);
69 int get_task_numacts(task_t
);
70 thread_act_t
get_firstthread(task_t task
);
71 kern_return_t
get_signalact(task_t
, thread_act_t
*, thread_t
*, int);
77 void *get_bsdtask_info(task_t t
)
85 void set_bsdtask_info(task_t t
,void * v
)
93 void *get_bsdthread_info(thread_act_t th
)
99 * XXX: wait for BSD to fix signal code
100 * Until then, we cannot block here. We know the task
101 * can't go away, so we make sure it is still active after
102 * retrieving the first thread for extra safety.
104 thread_act_t
get_firstthread(task_t task
)
106 thread_act_t thr_act
;
108 thr_act
= (thread_act_t
)queue_first(&task
->thr_acts
);
109 if (thr_act
== (thread_act_t
)&task
->thr_acts
)
110 thr_act
= THR_ACT_NULL
;
112 return(THR_ACT_NULL
);
116 kern_return_t
get_signalact(task_t task
,thread_act_t
* thact
, thread_t
* thshut
, int setast
)
121 thread_act_t thr_act
;
127 return(KERN_FAILURE
);
130 thr_act
= THR_ACT_NULL
;
131 for (inc
= (thread_act_t
)queue_first(&task
->thr_acts
);
132 inc
!= (thread_act_t
)&task
->thr_acts
;
134 th
= act_lock_thread(inc
);
136 ((th
->state
& (TH_ABORT
|TH_ABORT_SAFELY
)) != TH_ABORT
)) {
140 act_unlock_thread(inc
);
141 ninc
= (thread_act_t
)queue_next(&inc
->thr_acts
);
148 *thshut
= thr_act
? thr_act
->thread
: THREAD_NULL
;
151 act_set_astbsd(thr_act
);
153 act_unlock_thread(thr_act
);
158 return(KERN_SUCCESS
);
160 return(KERN_FAILURE
);
164 kern_return_t
check_actforsig(task_t task
, thread_act_t thact
, thread_t
* thshut
, int setast
)
169 thread_act_t thr_act
;
176 return(KERN_FAILURE
);
179 thr_act
= THR_ACT_NULL
;
180 for (inc
= (thread_act_t
)queue_first(&task
->thr_acts
);
181 inc
!= (thread_act_t
)&task
->thr_acts
;
185 ninc
= (thread_act_t
)queue_next(&inc
->thr_acts
);
188 th
= act_lock_thread(inc
);
190 ((th
->state
& (TH_ABORT
|TH_ABORT_SAFELY
)) != TH_ABORT
)) {
195 act_unlock_thread(inc
);
196 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
202 *thshut
= thr_act
? thr_act
->thread
: THREAD_NULL
;
204 act_set_astbsd(thr_act
);
206 act_unlock_thread(thr_act
);
211 return(KERN_SUCCESS
);
213 return(KERN_FAILURE
);
219 vm_map_t
get_task_map(task_t t
)
227 ipc_space_t
get_task_ipcspace(task_t t
)
229 return(t
->itk_space
);
232 int get_task_numacts(task_t t
)
234 return(t
->thr_act_count
);
238 /* does this machine need 64bit register set for signal handler */
239 int is_64signalregset(void)
241 task_t t
= current_task();
242 if(t
->taskFeatures
[0] & tf64BitData
)
249 * Reset the current task's map by taking a reference
250 * on the new map. The old map reference is returned.
253 swap_task_map(task_t task
,vm_map_t map
)
257 vm_map_reference(map
);
266 * Reset the current act map.
267 * The caller donates us a reference to the new map
268 * and we donote our reference to the old map to him.
271 swap_act_map(thread_act_t thr_act
,vm_map_t map
)
276 old_map
= thr_act
->map
;
285 pmap_t
get_task_pmap(task_t t
)
287 return(t
->map
->pmap
);
293 pmap_t
get_map_pmap(vm_map_t map
)
300 task_t
get_threadtask(thread_act_t th
)
309 boolean_t
is_thread_idle(thread_t th
)
311 return((th
->state
& TH_IDLE
) == TH_IDLE
);
317 boolean_t
is_thread_running(thread_act_t thact
)
319 thread_t th
= thact
->thread
;
320 return((th
->state
& TH_RUN
) == TH_RUN
);
356 return(vm_map_min(map
));
366 return(vm_map_max(map
));
376 get_vmsubmap_entries(
378 vm_object_offset_t start
,
379 vm_object_offset_t end
)
381 int total_entries
= 0;
382 vm_map_entry_t entry
;
385 entry
= vm_map_first_entry(map
);
386 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< start
)) {
387 entry
= entry
->vme_next
;
390 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
391 if(entry
->is_sub_map
) {
393 get_vmsubmap_entries(entry
->object
.sub_map
,
396 (entry
->vme_end
- entry
->vme_start
));
400 entry
= entry
->vme_next
;
403 return(total_entries
);
410 int total_entries
= 0;
411 vm_map_entry_t entry
;
414 entry
= vm_map_first_entry(map
);
416 while(entry
!= vm_map_to_entry(map
)) {
417 if(entry
->is_sub_map
) {
419 get_vmsubmap_entries(entry
->object
.sub_map
,
422 (entry
->vme_end
- entry
->vme_start
));
426 entry
= entry
->vme_next
;
429 return(total_entries
);
442 return(task
->user_stop_count
);
452 return(th
->user_stop_count
);
462 return(!th
->top_act
|| !th
->top_act
->active
||
463 (th
->state
& (TH_ABORT
|TH_ABORT_SAFELY
)) == TH_ABORT
);
467 * This routine is like thread_should_abort() above. It checks to
468 * see if the current thread is aborted. But unlike above, it also
469 * checks to see if thread is safely aborted. If so, it returns
470 * that fact, and clears the condition (safe aborts only should
471 * have a single effect, and a poll of the abort status
475 current_thread_aborted (
478 thread_t th
= current_thread();
482 ((th
->state
& (TH_ABORT
|TH_ABORT_SAFELY
)) == TH_ABORT
&&
483 th
->interrupt_level
!= THREAD_UNINT
))
485 if (th
->state
& TH_ABORT_SAFELY
) {
488 if (th
->state
& TH_ABORT_SAFELY
)
489 th
->state
&= ~(TH_ABORT
|TH_ABORT_SAFELY
);
500 task_act_iterate_wth_args(
502 void (*func_callback
)(thread_act_t
, void *),
505 thread_act_t inc
, ninc
;
508 for (inc
= (thread_act_t
)queue_first(&task
->thr_acts
);
509 inc
!= (thread_act_t
)&task
->thr_acts
;
511 ninc
= (thread_act_t
)queue_next(&inc
->thr_acts
);
512 (void) (*func_callback
)(inc
, func_arg
);
521 ipc_object_release(&(port
)->ip_object
);
532 get_thread_waitresult(
535 return(th
->wait_result
);
543 reenable
= ml_set_interrupts_enabled(FALSE
);
544 ast_on_fast(AST_BSD
);
545 (void)ml_set_interrupts_enabled(reenable
);