2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 #include <mach/mach_types.h>
24 #include <kern/kern_types.h>
25 #include <kern/processor.h>
26 #include <kern/thread.h>
27 #include <kern/task.h>
29 #include <kern/lock.h>
31 #include <ipc/ipc_port.h>
32 #include <ipc/ipc_object.h>
33 #include <vm/vm_map.h>
34 #include <vm/vm_kern.h>
36 #include <vm/vm_protos.h> /* last */
38 #undef thread_should_halt
39 #undef ipc_port_release
41 /* BSD KERN COMPONENT INTERFACE */
43 task_t bsd_init_task
= TASK_NULL
;
44 char init_task_failure_data
[1024];
45 extern unsigned int not_in_kdp
; /* Skip acquiring locks if we're in kdp */
47 thread_t
get_firstthread(task_t
);
48 int get_task_userstop(task_t
);
49 int get_thread_userstop(thread_t
);
50 boolean_t
thread_should_abort(thread_t
);
51 boolean_t
current_thread_aborted(void);
52 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
53 void ipc_port_release(ipc_port_t
);
54 boolean_t
is_thread_active(thread_t
);
55 kern_return_t
get_signalact(task_t
, thread_t
*, int);
56 int get_vmsubmap_entries(vm_map_t
, vm_object_offset_t
, vm_object_offset_t
);
61 void *get_bsdtask_info(task_t t
)
69 void set_bsdtask_info(task_t t
,void * v
)
77 void *get_bsdthread_info(thread_t th
)
83 * XXX: wait for BSD to fix signal code
84 * Until then, we cannot block here. We know the task
85 * can't go away, so we make sure it is still active after
86 * retrieving the first thread for extra safety.
88 thread_t
get_firstthread(task_t task
)
90 thread_t thread
= (thread_t
)queue_first(&task
->threads
);
92 if (queue_end(&task
->threads
, (queue_entry_t
)thread
))
104 thread_t
*result_out
,
107 kern_return_t result
= KERN_SUCCESS
;
108 thread_t inc
, thread
= THREAD_NULL
;
115 return (KERN_FAILURE
);
118 for (inc
= (thread_t
)queue_first(&task
->threads
);
119 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
120 thread_mtx_lock(inc
);
122 (inc
->state
& (TH_ABORT
|TH_ABORT_SAFELY
)) != TH_ABORT
) {
126 thread_mtx_unlock(inc
);
128 inc
= (thread_t
)queue_next(&inc
->task_threads
);
132 *result_out
= thread
;
136 act_set_astbsd(thread
);
138 thread_mtx_unlock(thread
);
141 result
= KERN_FAILURE
;
155 kern_return_t result
= KERN_FAILURE
;
163 return (KERN_FAILURE
);
166 for (inc
= (thread_t
)queue_first(&task
->threads
);
167 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
169 thread_mtx_lock(inc
);
172 (inc
->state
& (TH_ABORT
|TH_ABORT_SAFELY
)) != TH_ABORT
) {
173 result
= KERN_SUCCESS
;
177 thread_mtx_unlock(inc
);
181 inc
= (thread_t
)queue_next(&inc
->task_threads
);
184 if (result
== KERN_SUCCESS
) {
186 act_set_astbsd(thread
);
188 thread_mtx_unlock(thread
);
197 * This is only safe to call from a thread executing in
198 * in the task's context or if the task is locked Otherwise,
199 * the map could be switched for the task (and freed) before
200 * we to return it here.
202 vm_map_t
get_task_map(task_t t
)
207 vm_map_t
get_task_map_reference(task_t t
)
220 vm_map_reference_swap(m
);
228 ipc_space_t
get_task_ipcspace(task_t t
)
230 return(t
->itk_space
);
233 int get_task_numacts(task_t t
)
235 return(t
->thread_count
);
238 /* does this machine need 64bit register set for signal handler */
239 int is_64signalregset(void)
241 task_t t
= current_task();
242 if(t
->taskFeatures
[0] & tf64BitData
)
249 * The old map reference is returned.
252 swap_task_map(task_t task
,vm_map_t map
)
254 thread_t thread
= current_thread();
257 if (task
!= thread
->task
)
258 panic("swap_task_map");
262 thread
->map
= task
->map
= map
;
265 inval_copy_windows(thread
);
273 pmap_t
get_task_pmap(task_t t
)
275 return(t
->map
->pmap
);
281 pmap_t
get_map_pmap(vm_map_t map
)
288 task_t
get_threadtask(thread_t th
)
297 boolean_t
is_thread_idle(thread_t th
)
299 return((th
->state
& TH_IDLE
) == TH_IDLE
);
305 boolean_t
is_thread_running(thread_t th
)
307 return((th
->state
& TH_RUN
) == TH_RUN
);
337 return(vm_map_min(map
));
347 return(vm_map_max(map
));
357 get_vmsubmap_entries(
359 vm_object_offset_t start
,
360 vm_object_offset_t end
)
362 int total_entries
= 0;
363 vm_map_entry_t entry
;
367 entry
= vm_map_first_entry(map
);
368 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< start
)) {
369 entry
= entry
->vme_next
;
372 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
373 if(entry
->is_sub_map
) {
375 get_vmsubmap_entries(entry
->object
.sub_map
,
378 (entry
->vme_end
- entry
->vme_start
));
382 entry
= entry
->vme_next
;
386 return(total_entries
);
393 int total_entries
= 0;
394 vm_map_entry_t entry
;
398 entry
= vm_map_first_entry(map
);
400 while(entry
!= vm_map_to_entry(map
)) {
401 if(entry
->is_sub_map
) {
403 get_vmsubmap_entries(entry
->object
.sub_map
,
406 (entry
->vme_end
- entry
->vme_start
));
410 entry
= entry
->vme_next
;
414 return(total_entries
);
427 return(task
->user_stop_count
);
437 return(th
->user_stop_count
);
447 return ((th
->state
& (TH_ABORT
|TH_ABORT_SAFELY
)) == TH_ABORT
);
451 * This routine is like thread_should_abort() above. It checks to
452 * see if the current thread is aborted. But unlike above, it also
453 * checks to see if thread is safely aborted. If so, it returns
454 * that fact, and clears the condition (safe aborts only should
455 * have a single effect, and a poll of the abort status
459 current_thread_aborted (
462 thread_t th
= current_thread();
465 if ((th
->state
& (TH_ABORT
|TH_ABORT_SAFELY
)) == TH_ABORT
&&
466 (th
->options
& TH_OPT_INTMASK
) != THREAD_UNINT
)
468 if (th
->state
& TH_ABORT_SAFELY
) {
471 if (th
->state
& TH_ABORT_SAFELY
)
472 th
->state
&= ~(TH_ABORT
|TH_ABORT_SAFELY
);
483 task_act_iterate_wth_args(
485 void (*func_callback
)(thread_t
, void *),
492 for (inc
= (thread_t
)queue_first(&task
->threads
);
493 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
494 (void) (*func_callback
)(inc
, func_arg
);
495 inc
= (thread_t
)queue_next(&inc
->task_threads
);
505 ipc_object_release(&(port
)->ip_object
);
520 reenable
= ml_set_interrupts_enabled(FALSE
);
521 ast_on_fast(AST_BSD
);
522 (void)ml_set_interrupts_enabled(reenable
);
526 #include <sys/bsdtask_info.h>
529 fill_taskprocinfo(task_t task
, struct proc_taskinfo_internal
* ptinfo
)
532 task_absolutetime_info_data_t tinfo
;
536 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
538 ptinfo
->pti_virtual_size
= map
->size
;
539 ptinfo
->pti_resident_size
= (mach_vm_size_t
)(pmap_resident_count(map
->pmap
)
544 ptinfo
->pti_policy
= ((task
!= kernel_task
)?
545 POLICY_TIMESHARE
: POLICY_RR
);
547 tinfo
.threads_user
= tinfo
.threads_system
= 0;
548 tinfo
.total_user
= task
->total_user_time
;
549 tinfo
.total_system
= task
->total_system_time
;
551 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
554 if ((thread
->state
& TH_RUN
) == TH_RUN
)
556 tval
= timer_grab(&thread
->user_timer
);
557 tinfo
.threads_user
+= tval
;
558 tinfo
.total_user
+= tval
;
560 tval
= timer_grab(&thread
->system_timer
);
561 tinfo
.threads_system
+= tval
;
562 tinfo
.total_system
+= tval
;
565 ptinfo
->pti_total_system
= tinfo
.total_system
;
566 ptinfo
->pti_total_user
= tinfo
.total_user
;
567 ptinfo
->pti_threads_system
= tinfo
.threads_system
;
568 ptinfo
->pti_threads_user
= tinfo
.threads_user
;
570 ptinfo
->pti_faults
= task
->faults
;
571 ptinfo
->pti_pageins
= task
->pageins
;
572 ptinfo
->pti_cow_faults
= task
->cow_faults
;
573 ptinfo
->pti_messages_sent
= task
->messages_sent
;
574 ptinfo
->pti_messages_received
= task
->messages_received
;
575 ptinfo
->pti_syscalls_mach
= task
->syscalls_mach
;
576 ptinfo
->pti_syscalls_unix
= task
->syscalls_unix
;
577 ptinfo
->pti_csw
= task
->csw
;
578 ptinfo
->pti_threadnum
= task
->thread_count
;
579 ptinfo
->pti_numrunning
= numrunning
;
580 ptinfo
->pti_priority
= task
->priority
;
586 fill_taskthreadinfo(task_t task
, uint64_t thaddr
, struct proc_threadinfo_internal
* ptinfo
)
590 thread_basic_info_data_t basic_info
;
595 for (thact
= (thread_t
)queue_first(&task
->threads
);
596 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
598 if (thact
->machine
.cthread_self
== thaddr
)
599 #elif defined (__i386__)
600 if (thact
->machine
.pcb
->cthread_self
== thaddr
)
602 #error architecture not supported
606 count
= THREAD_BASIC_INFO_COUNT
;
607 if ((kret
= thread_info_internal(thact
, THREAD_BASIC_INFO
, &basic_info
, &count
)) != KERN_SUCCESS
) {
612 ptinfo
->pth_user_time
= timer_grab(&basic_info
.user_time
);
613 ptinfo
->pth_system_time
= timer_grab(&basic_info
.system_time
);
615 ptinfo
->pth_user_time
= ((basic_info
.user_time
.seconds
* NSEC_PER_SEC
) + (basic_info
.user_time
.microseconds
* NSEC_PER_USEC
));
616 ptinfo
->pth_system_time
= ((basic_info
.system_time
.seconds
* NSEC_PER_SEC
) + (basic_info
.system_time
.microseconds
* NSEC_PER_USEC
));
619 ptinfo
->pth_cpu_usage
= basic_info
.cpu_usage
;
620 ptinfo
->pth_policy
= basic_info
.policy
;
621 ptinfo
->pth_run_state
= basic_info
.run_state
;
622 ptinfo
->pth_flags
= basic_info
.flags
;
623 ptinfo
->pth_sleep_time
= basic_info
.sleep_time
;
624 ptinfo
->pth_curpri
= thact
->sched_pri
;
625 ptinfo
->pth_priority
= thact
->priority
;
626 ptinfo
->pth_maxpriority
= thact
->max_priority
;
631 thact
= (thread_t
)queue_next(&thact
->task_threads
);
641 fill_taskthreadlist(task_t task
, void * buffer
, int thcount
)
648 uptr
= (uint64_t *)buffer
;
652 for (thact
= (thread_t
)queue_first(&task
->threads
);
653 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
655 thaddr
= thact
->machine
.cthread_self
;
656 #elif defined (__i386__)
657 thaddr
= thact
->machine
.pcb
->cthread_self
;
659 #error architecture not supported
663 if (numthr
>= thcount
)
665 thact
= (thread_t
)queue_next(&thact
->task_threads
);
670 return(numthr
* sizeof(uint64_t));
675 get_numthreads(task_t task
)
677 return(task
->thread_count
);