2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
47 #undef thread_should_halt
49 /* BSD KERN COMPONENT INTERFACE */
51 task_t bsd_init_task
= TASK_NULL
;
52 boolean_t init_task_died
;
53 char init_task_failure_data
[1024];
54 extern unsigned int not_in_kdp
; /* Skip acquiring locks if we're in kdp */
56 thread_t
get_firstthread(task_t
);
57 int get_task_userstop(task_t
);
58 int get_thread_userstop(thread_t
);
59 boolean_t
current_thread_aborted(void);
60 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
61 kern_return_t
get_signalact(task_t
, thread_t
*, int);
62 int get_vmsubmap_entries(vm_map_t
, vm_object_offset_t
, vm_object_offset_t
);
63 int fill_task_rusage(task_t task
, rusage_info_current
*ri
);
64 int fill_task_io_rusage(task_t task
, rusage_info_current
*ri
);
65 int fill_task_qos_rusage(task_t task
, rusage_info_current
*ri
);
66 void fill_task_billed_usage(task_t task
, rusage_info_current
*ri
);
71 void *get_bsdtask_info(task_t t
)
79 void *get_bsdthreadtask_info(thread_t th
)
81 return(th
->task
!= TASK_NULL
? th
->task
->bsd_info
: NULL
);
87 void set_bsdtask_info(task_t t
,void * v
)
95 void *get_bsdthread_info(thread_t th
)
103 int get_thread_lock_count(thread_t th
); /* forced forward */
104 int get_thread_lock_count(thread_t th
)
106 return(th
->mutex_count
);
110 * XXX: wait for BSD to fix signal code
111 * Until then, we cannot block here. We know the task
112 * can't go away, so we make sure it is still active after
113 * retrieving the first thread for extra safety.
115 thread_t
get_firstthread(task_t task
)
117 thread_t thread
= (thread_t
)(void *)queue_first(&task
->threads
);
119 if (queue_end(&task
->threads
, (queue_entry_t
)thread
))
120 thread
= THREAD_NULL
;
123 return (THREAD_NULL
);
131 thread_t
*result_out
,
134 kern_return_t result
= KERN_SUCCESS
;
135 thread_t inc
, thread
= THREAD_NULL
;
142 return (KERN_FAILURE
);
145 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
146 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
147 thread_mtx_lock(inc
);
149 (inc
->sched_flags
& TH_SFLAG_ABORTED_MASK
) != TH_SFLAG_ABORT
) {
153 thread_mtx_unlock(inc
);
155 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
159 *result_out
= thread
;
163 act_set_astbsd(thread
);
165 thread_mtx_unlock(thread
);
168 result
= KERN_FAILURE
;
182 kern_return_t result
= KERN_FAILURE
;
190 return (KERN_FAILURE
);
193 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
194 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
196 thread_mtx_lock(inc
);
199 (inc
->sched_flags
& TH_SFLAG_ABORTED_MASK
) != TH_SFLAG_ABORT
) {
200 result
= KERN_SUCCESS
;
204 thread_mtx_unlock(inc
);
208 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
211 if (result
== KERN_SUCCESS
) {
213 act_set_astbsd(thread
);
215 thread_mtx_unlock(thread
);
223 ledger_t
get_task_ledger(task_t t
)
229 * This is only safe to call from a thread executing in
230 * in the task's context or if the task is locked Otherwise,
231 * the map could be switched for the task (and freed) before
232 * we to return it here.
234 vm_map_t
get_task_map(task_t t
)
239 vm_map_t
get_task_map_reference(task_t t
)
252 vm_map_reference_swap(m
);
260 ipc_space_t
get_task_ipcspace(task_t t
)
262 return(t
->itk_space
);
265 int get_task_numactivethreads(task_t task
)
268 int num_active_thr
=0;
271 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
272 !queue_end(&task
->threads
, (queue_entry_t
)inc
); inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
))
278 return num_active_thr
;
281 int get_task_numacts(task_t t
)
283 return(t
->thread_count
);
286 /* does this machine need 64bit register set for signal handler */
287 int is_64signalregset(void)
289 if (task_has_64BitData(current_task())) {
297 * Swap in a new map for the task/thread pair; the old map reference is
301 swap_task_map(task_t task
, thread_t thread
, vm_map_t map
, boolean_t doswitch
)
305 if (task
!= thread
->task
)
306 panic("swap_task_map");
309 mp_disable_preemption();
311 thread
->map
= task
->map
= map
;
313 pmap_switch(map
->pmap
);
315 mp_enable_preemption();
318 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
319 inval_copy_windows(thread
);
328 pmap_t
get_task_pmap(task_t t
)
330 return(t
->map
->pmap
);
336 uint64_t get_task_resident_size(task_t task
)
340 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
341 return((uint64_t)pmap_resident_count(map
->pmap
) * PAGE_SIZE_64
);
344 uint64_t get_task_compressed(task_t task
)
348 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
349 return((uint64_t)pmap_compressed(map
->pmap
) * PAGE_SIZE_64
);
352 uint64_t get_task_resident_max(task_t task
)
356 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
357 return((uint64_t)pmap_resident_max(map
->pmap
) * PAGE_SIZE_64
);
360 uint64_t get_task_purgeable_size(task_t task
)
363 mach_vm_size_t volatile_virtual_size
;
364 mach_vm_size_t volatile_resident_size
;
365 mach_vm_size_t volatile_pmap_size
;
367 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
368 vm_map_query_volatile(map
, &volatile_virtual_size
, &volatile_resident_size
, &volatile_pmap_size
);
370 return((uint64_t)volatile_resident_size
);
375 uint64_t get_task_phys_footprint(task_t task
)
378 ledger_amount_t credit
, debit
;
380 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.phys_footprint
, &credit
, &debit
);
381 if (KERN_SUCCESS
== ret
) {
382 return (credit
- debit
);
391 uint64_t get_task_phys_footprint_max(task_t task
)
396 ret
= ledger_get_maximum(task
->ledger
, task_ledgers
.phys_footprint
, &max
);
397 if (KERN_SUCCESS
== ret
) {
404 uint64_t get_task_cpu_time(task_t task
)
407 ledger_amount_t credit
, debit
;
409 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.cpu_time
, &credit
, &debit
);
410 if (KERN_SUCCESS
== ret
) {
411 return (credit
- debit
);
420 pmap_t
get_map_pmap(vm_map_t map
)
427 task_t
get_threadtask(thread_t th
)
439 return(vm_map_min(map
));
449 return(vm_map_max(map
));
459 get_vmsubmap_entries(
461 vm_object_offset_t start
,
462 vm_object_offset_t end
)
464 int total_entries
= 0;
465 vm_map_entry_t entry
;
469 entry
= vm_map_first_entry(map
);
470 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< start
)) {
471 entry
= entry
->vme_next
;
474 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
475 if(entry
->is_sub_map
) {
477 get_vmsubmap_entries(entry
->object
.sub_map
,
480 (entry
->vme_end
- entry
->vme_start
));
484 entry
= entry
->vme_next
;
488 return(total_entries
);
495 int total_entries
= 0;
496 vm_map_entry_t entry
;
500 entry
= vm_map_first_entry(map
);
502 while(entry
!= vm_map_to_entry(map
)) {
503 if(entry
->is_sub_map
) {
505 get_vmsubmap_entries(entry
->object
.sub_map
,
508 (entry
->vme_end
- entry
->vme_start
));
512 entry
= entry
->vme_next
;
516 return(total_entries
);
529 return(task
->user_stop_count
);
539 return(th
->user_stop_count
);
546 get_task_pidsuspended(
549 return (task
->pidsuspended
);
559 return (task
->frozen
);
569 return ((th
->sched_flags
& TH_SFLAG_ABORTED_MASK
) == TH_SFLAG_ABORT
);
573 * This routine is like thread_should_abort() above. It checks to
574 * see if the current thread is aborted. But unlike above, it also
575 * checks to see if thread is safely aborted. If so, it returns
576 * that fact, and clears the condition (safe aborts only should
577 * have a single effect, and a poll of the abort status
581 current_thread_aborted (
584 thread_t th
= current_thread();
587 if ((th
->sched_flags
& TH_SFLAG_ABORTED_MASK
) == TH_SFLAG_ABORT
&&
588 (th
->options
& TH_OPT_INTMASK
) != THREAD_UNINT
)
590 if (th
->sched_flags
& TH_SFLAG_ABORTSAFELY
) {
593 if (th
->sched_flags
& TH_SFLAG_ABORTSAFELY
)
594 th
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
605 task_act_iterate_wth_args(
607 void (*func_callback
)(thread_t
, void *),
614 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
615 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
616 (void) (*func_callback
)(inc
, func_arg
);
617 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
629 reenable
= ml_set_interrupts_enabled(FALSE
);
630 ast_on_fast(AST_BSD
);
631 (void)ml_set_interrupts_enabled(reenable
);
635 #include <sys/bsdtask_info.h>
638 fill_taskprocinfo(task_t task
, struct proc_taskinfo_internal
* ptinfo
)
641 task_absolutetime_info_data_t tinfo
;
643 uint32_t cswitch
= 0, numrunning
= 0;
644 uint32_t syscalls_unix
= 0;
645 uint32_t syscalls_mach
= 0;
647 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
649 ptinfo
->pti_virtual_size
= map
->size
;
650 ptinfo
->pti_resident_size
=
651 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
))
656 ptinfo
->pti_policy
= ((task
!= kernel_task
)?
657 POLICY_TIMESHARE
: POLICY_RR
);
659 tinfo
.threads_user
= tinfo
.threads_system
= 0;
660 tinfo
.total_user
= task
->total_user_time
;
661 tinfo
.total_system
= task
->total_system_time
;
663 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
667 if (thread
->options
& TH_OPT_IDLE_THREAD
)
673 if ((thread
->state
& TH_RUN
) == TH_RUN
)
675 cswitch
+= thread
->c_switch
;
676 tval
= timer_grab(&thread
->user_timer
);
677 tinfo
.threads_user
+= tval
;
678 tinfo
.total_user
+= tval
;
680 tval
= timer_grab(&thread
->system_timer
);
682 if (thread
->precise_user_kernel_time
) {
683 tinfo
.threads_system
+= tval
;
684 tinfo
.total_system
+= tval
;
686 /* system_timer may represent either sys or user */
687 tinfo
.threads_user
+= tval
;
688 tinfo
.total_user
+= tval
;
691 syscalls_unix
+= thread
->syscalls_unix
;
692 syscalls_mach
+= thread
->syscalls_mach
;
694 thread_unlock(thread
);
698 ptinfo
->pti_total_system
= tinfo
.total_system
;
699 ptinfo
->pti_total_user
= tinfo
.total_user
;
700 ptinfo
->pti_threads_system
= tinfo
.threads_system
;
701 ptinfo
->pti_threads_user
= tinfo
.threads_user
;
703 ptinfo
->pti_faults
= task
->faults
;
704 ptinfo
->pti_pageins
= task
->pageins
;
705 ptinfo
->pti_cow_faults
= task
->cow_faults
;
706 ptinfo
->pti_messages_sent
= task
->messages_sent
;
707 ptinfo
->pti_messages_received
= task
->messages_received
;
708 ptinfo
->pti_syscalls_mach
= task
->syscalls_mach
+ syscalls_mach
;
709 ptinfo
->pti_syscalls_unix
= task
->syscalls_unix
+ syscalls_unix
;
710 ptinfo
->pti_csw
= task
->c_switch
+ cswitch
;
711 ptinfo
->pti_threadnum
= task
->thread_count
;
712 ptinfo
->pti_numrunning
= numrunning
;
713 ptinfo
->pti_priority
= task
->priority
;
719 fill_taskthreadinfo(task_t task
, uint64_t thaddr
, int thuniqueid
, struct proc_threadinfo_internal
* ptinfo
, void * vpp
, int *vidp
)
723 mach_msg_type_number_t count
;
724 thread_basic_info_data_t basic_info
;
730 for (thact
= (thread_t
)(void *)queue_first(&task
->threads
);
731 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
732 addr
= (thuniqueid
==0)?thact
->machine
.cthread_self
: thact
->thread_id
;
736 count
= THREAD_BASIC_INFO_COUNT
;
737 if ((kret
= thread_info_internal(thact
, THREAD_BASIC_INFO
, (thread_info_t
)&basic_info
, &count
)) != KERN_SUCCESS
) {
741 ptinfo
->pth_user_time
= ((basic_info
.user_time
.seconds
* (integer_t
)NSEC_PER_SEC
) + (basic_info
.user_time
.microseconds
* (integer_t
)NSEC_PER_USEC
));
742 ptinfo
->pth_system_time
= ((basic_info
.system_time
.seconds
* (integer_t
)NSEC_PER_SEC
) + (basic_info
.system_time
.microseconds
* (integer_t
)NSEC_PER_USEC
));
744 ptinfo
->pth_cpu_usage
= basic_info
.cpu_usage
;
745 ptinfo
->pth_policy
= basic_info
.policy
;
746 ptinfo
->pth_run_state
= basic_info
.run_state
;
747 ptinfo
->pth_flags
= basic_info
.flags
;
748 ptinfo
->pth_sleep_time
= basic_info
.sleep_time
;
749 ptinfo
->pth_curpri
= thact
->sched_pri
;
750 ptinfo
->pth_priority
= thact
->priority
;
751 ptinfo
->pth_maxpriority
= thact
->max_priority
;
753 if ((vpp
!= NULL
) && (thact
->uthread
!= NULL
))
754 bsd_threadcdir(thact
->uthread
, vpp
, vidp
);
755 bsd_getthreadname(thact
->uthread
,ptinfo
->pth_name
);
759 thact
= (thread_t
)(void *)queue_next(&thact
->task_threads
);
769 fill_taskthreadlist(task_t task
, void * buffer
, int thcount
)
776 uptr
= (uint64_t *)buffer
;
780 for (thact
= (thread_t
)(void *)queue_first(&task
->threads
);
781 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
782 thaddr
= thact
->machine
.cthread_self
;
785 if (numthr
>= thcount
)
787 thact
= (thread_t
)(void *)queue_next(&thact
->task_threads
);
792 return (int)(numthr
* sizeof(uint64_t));
797 get_numthreads(task_t task
)
799 return(task
->thread_count
);
803 * Gather the various pieces of info about the designated task,
804 * and collect it all into a single rusage_info.
807 fill_task_rusage(task_t task
, rusage_info_current
*ri
)
809 struct task_power_info powerinfo
;
811 assert(task
!= TASK_NULL
);
814 task_power_info_locked(task
, &powerinfo
, NULL
);
815 ri
->ri_pkg_idle_wkups
= powerinfo
.task_platform_idle_wakeups
;
816 ri
->ri_interrupt_wkups
= powerinfo
.task_interrupt_wakeups
;
817 ri
->ri_user_time
= powerinfo
.total_user
;
818 ri
->ri_system_time
= powerinfo
.total_system
;
820 ledger_get_balance(task
->ledger
, task_ledgers
.phys_footprint
,
821 (ledger_amount_t
*)&ri
->ri_phys_footprint
);
822 ledger_get_balance(task
->ledger
, task_ledgers
.phys_mem
,
823 (ledger_amount_t
*)&ri
->ri_resident_size
);
824 ledger_get_balance(task
->ledger
, task_ledgers
.wired_mem
,
825 (ledger_amount_t
*)&ri
->ri_wired_size
);
827 ri
->ri_pageins
= task
->pageins
;
834 fill_task_billed_usage(task_t task __unused
, rusage_info_current
*ri
)
837 ri
->ri_billed_system_time
= bank_billed_time(task
->bank_context
);
838 ri
->ri_serviced_system_time
= bank_serviced_time(task
->bank_context
);
840 ri
->ri_billed_system_time
= 0;
841 ri
->ri_serviced_system_time
= 0;
846 fill_task_io_rusage(task_t task
, rusage_info_current
*ri
)
848 assert(task
!= TASK_NULL
);
851 if (task
->task_io_stats
) {
852 ri
->ri_diskio_bytesread
= task
->task_io_stats
->disk_reads
.size
;
853 ri
->ri_diskio_byteswritten
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
);
855 /* I/O Stats unavailable */
856 ri
->ri_diskio_bytesread
= 0;
857 ri
->ri_diskio_byteswritten
= 0;
864 fill_task_qos_rusage(task_t task
, rusage_info_current
*ri
)
868 assert(task
!= TASK_NULL
);
871 /* Rollup Qos time of all the threads to task */
872 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
873 if (thread
->options
& TH_OPT_IDLE_THREAD
)
876 thread_mtx_lock(thread
);
877 thread_update_qos_cpu_time(thread
, TRUE
);
878 thread_mtx_unlock(thread
);
881 ri
->ri_cpu_time_qos_default
= task
->cpu_time_qos_stats
.cpu_time_qos_default
;
882 ri
->ri_cpu_time_qos_maintenance
= task
->cpu_time_qos_stats
.cpu_time_qos_maintenance
;
883 ri
->ri_cpu_time_qos_background
= task
->cpu_time_qos_stats
.cpu_time_qos_background
;
884 ri
->ri_cpu_time_qos_utility
= task
->cpu_time_qos_stats
.cpu_time_qos_utility
;
885 ri
->ri_cpu_time_qos_legacy
= task
->cpu_time_qos_stats
.cpu_time_qos_legacy
;
886 ri
->ri_cpu_time_qos_user_initiated
= task
->cpu_time_qos_stats
.cpu_time_qos_user_initiated
;
887 ri
->ri_cpu_time_qos_user_interactive
= task
->cpu_time_qos_stats
.cpu_time_qos_user_interactive
;