2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48 #include <sys/proc_require.h>
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
55 #include <machine/limits.h>
56 #include <sys/codesign.h> /* CS_CDHASH_LEN */
58 #undef thread_should_halt
60 /* BSD KERN COMPONENT INTERFACE */
62 extern unsigned int not_in_kdp
; /* Skip acquiring locks if we're in kdp */
64 thread_t
get_firstthread(task_t
);
65 int get_task_userstop(task_t
);
66 int get_thread_userstop(thread_t
);
67 boolean_t
current_thread_aborted(void);
68 void task_act_iterate_wth_args(task_t
, void (*)(thread_t
, void *), void *);
69 kern_return_t
get_signalact(task_t
, thread_t
*, int);
70 int fill_task_rusage(task_t task
, rusage_info_current
*ri
);
71 int fill_task_io_rusage(task_t task
, rusage_info_current
*ri
);
72 int fill_task_qos_rusage(task_t task
, rusage_info_current
*ri
);
73 void fill_task_monotonic_rusage(task_t task
, rusage_info_current
*ri
);
74 uint64_t get_task_logical_writes(task_t task
, boolean_t external
);
75 void fill_task_billed_usage(task_t task
, rusage_info_current
*ri
);
76 void task_bsdtask_kill(task_t
);
78 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p
);
79 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p
);
80 extern uint64_t proc_uniqueid(void *p
);
81 extern int proc_pidversion(void *p
);
82 extern int proc_getcdhash(void *p
, char *cdhash
);
85 extern void psignal(void *, int);
92 get_bsdtask_info(task_t t
)
94 proc_require(t
->bsd_info
, PROC_REQUIRE_ALLOW_NULL
| PROC_REQUIRE_ALLOW_KERNPROC
);
99 task_bsdtask_kill(task_t t
)
101 void * bsd_info
= get_bsdtask_info(t
);
102 if (bsd_info
!= NULL
) {
103 psignal(bsd_info
, SIGKILL
);
110 get_bsdthreadtask_info(thread_t th
)
112 void *bsd_info
= NULL
;
115 bsd_info
= get_bsdtask_info(th
->task
);
124 set_bsdtask_info(task_t t
, void * v
)
133 get_bsdthread_info(thread_t th
)
139 * This is used to remember any FS error from VNOP_PAGEIN code when
140 * invoked under vm_fault(). The value is an errno style value. It can
141 * be retrieved by exception handlers using thread_get_state().
144 set_thread_pagein_error(thread_t th
, int error
)
146 assert(th
== current_thread());
147 if (error
== 0 || th
->t_pagein_error
== 0) {
148 th
->t_pagein_error
= error
;
152 #if defined(__x86_64__)
154 * Returns non-zero if the thread has a non-NULL task
155 * and that task has an LDT.
158 thread_task_has_ldt(thread_t th
)
160 return th
->task
&& th
->task
->i386_ldt
!= 0;
162 #endif /* __x86_64__ */
167 int get_thread_lock_count(thread_t th
); /* forced forward */
169 get_thread_lock_count(thread_t th
)
171 return th
->mutex_count
;
175 * XXX: wait for BSD to fix signal code
176 * Until then, we cannot block here. We know the task
177 * can't go away, so we make sure it is still active after
178 * retrieving the first thread for extra safety.
181 get_firstthread(task_t task
)
183 thread_t thread
= (thread_t
)(void *)queue_first(&task
->threads
);
185 if (queue_end(&task
->threads
, (queue_entry_t
)thread
)) {
186 thread
= THREAD_NULL
;
199 thread_t
*result_out
,
202 kern_return_t result
= KERN_SUCCESS
;
203 thread_t inc
, thread
= THREAD_NULL
;
213 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
214 !queue_end(&task
->threads
, (queue_entry_t
)inc
);) {
215 thread_mtx_lock(inc
);
217 (inc
->sched_flags
& TH_SFLAG_ABORTED_MASK
) != TH_SFLAG_ABORT
) {
221 thread_mtx_unlock(inc
);
223 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
227 *result_out
= thread
;
232 act_set_astbsd(thread
);
235 thread_mtx_unlock(thread
);
237 result
= KERN_FAILURE
;
252 kern_return_t result
= KERN_FAILURE
;
263 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
264 !queue_end(&task
->threads
, (queue_entry_t
)inc
);) {
266 thread_mtx_lock(inc
);
269 (inc
->sched_flags
& TH_SFLAG_ABORTED_MASK
) != TH_SFLAG_ABORT
) {
270 result
= KERN_SUCCESS
;
274 thread_mtx_unlock(inc
);
278 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
281 if (result
== KERN_SUCCESS
) {
283 act_set_astbsd(thread
);
286 thread_mtx_unlock(thread
);
295 get_task_ledger(task_t t
)
301 * This is only safe to call from a thread executing in
302 * in the task's context or if the task is locked. Otherwise,
303 * the map could be switched for the task (and freed) before
304 * we go to return it here.
307 get_task_map(task_t t
)
313 get_task_map_reference(task_t t
)
336 get_task_ipcspace(task_t t
)
342 get_task_numacts(task_t t
)
344 return t
->thread_count
;
347 /* does this machine need 64bit register set for signal handler */
349 is_64signalregset(void)
351 if (task_has_64Bit_data(current_task())) {
359 * Swap in a new map for the task/thread pair; the old map reference is
360 * returned. Also does a pmap switch if thread provided is current thread.
363 swap_task_map(task_t task
, thread_t thread
, vm_map_t map
)
366 boolean_t doswitch
= (thread
== current_thread()) ? TRUE
: FALSE
;
368 if (task
!= thread
->task
) {
369 panic("swap_task_map");
373 mp_disable_preemption();
376 thread
->map
= task
->map
= map
;
377 vm_commit_pagezero_status(map
);
380 PMAP_SWITCH_USER(thread
, map
, cpu_number());
382 mp_enable_preemption();
390 * This is only safe to call from a thread executing in
391 * in the task's context or if the task is locked. Otherwise,
392 * the map could be switched for the task (and freed) before
393 * we go to return it here.
396 get_task_pmap(task_t t
)
405 get_task_resident_size(task_t task
)
409 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
410 return (uint64_t)pmap_resident_count(map
->pmap
) * PAGE_SIZE_64
;
414 get_task_compressed(task_t task
)
418 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
419 return (uint64_t)pmap_compressed(map
->pmap
) * PAGE_SIZE_64
;
423 get_task_resident_max(task_t task
)
427 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
428 return (uint64_t)pmap_resident_max(map
->pmap
) * PAGE_SIZE_64
;
432 * Get the balance for a given field in the task ledger.
433 * Returns 0 if the entry is invalid.
436 get_task_ledger_balance(task_t task
, int entry
)
438 ledger_amount_t balance
= 0;
440 ledger_get_balance(task
->ledger
, entry
, &balance
);
445 get_task_purgeable_size(task_t task
)
448 ledger_amount_t balance
= 0;
449 uint64_t volatile_size
= 0;
451 ret
= ledger_get_balance(task
->ledger
, task_ledgers
.purgeable_volatile
, &balance
);
452 if (ret
!= KERN_SUCCESS
) {
456 volatile_size
+= balance
;
458 ret
= ledger_get_balance(task
->ledger
, task_ledgers
.purgeable_volatile_compressed
, &balance
);
459 if (ret
!= KERN_SUCCESS
) {
463 volatile_size
+= balance
;
465 return volatile_size
;
472 get_task_phys_footprint(task_t task
)
474 return get_task_ledger_balance(task
, task_ledgers
.phys_footprint
);
477 #if CONFIG_LEDGER_INTERVAL_MAX
482 get_task_phys_footprint_interval_max(task_t task
, int reset
)
487 ret
= ledger_get_interval_max(task
->ledger
, task_ledgers
.phys_footprint
, &max
, reset
);
489 if (KERN_SUCCESS
== ret
) {
495 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
501 get_task_phys_footprint_lifetime_max(task_t task
)
506 ret
= ledger_get_lifetime_max(task
->ledger
, task_ledgers
.phys_footprint
, &max
);
508 if (KERN_SUCCESS
== ret
) {
519 get_task_phys_footprint_limit(task_t task
)
524 ret
= ledger_get_limit(task
->ledger
, task_ledgers
.phys_footprint
, &max
);
525 if (KERN_SUCCESS
== ret
) {
533 get_task_internal(task_t task
)
535 return get_task_ledger_balance(task
, task_ledgers
.internal
);
539 get_task_internal_compressed(task_t task
)
541 return get_task_ledger_balance(task
, task_ledgers
.internal_compressed
);
545 get_task_purgeable_nonvolatile(task_t task
)
547 return get_task_ledger_balance(task
, task_ledgers
.purgeable_nonvolatile
);
551 get_task_purgeable_nonvolatile_compressed(task_t task
)
553 return get_task_ledger_balance(task
, task_ledgers
.purgeable_nonvolatile_compressed
);
557 get_task_alternate_accounting(task_t task
)
559 return get_task_ledger_balance(task
, task_ledgers
.alternate_accounting
);
563 get_task_alternate_accounting_compressed(task_t task
)
565 return get_task_ledger_balance(task
, task_ledgers
.alternate_accounting_compressed
);
569 get_task_page_table(task_t task
)
571 return get_task_ledger_balance(task
, task_ledgers
.page_table
);
576 get_task_frozen_to_swap(task_t task
)
578 return get_task_ledger_balance(task
, task_ledgers
.frozen_to_swap
);
580 #endif /* CONFIG_FREEZE */
583 get_task_iokit_mapped(task_t task
)
585 return get_task_ledger_balance(task
, task_ledgers
.iokit_mapped
);
589 get_task_network_nonvolatile(task_t task
)
591 return get_task_ledger_balance(task
, task_ledgers
.network_nonvolatile
);
595 get_task_network_nonvolatile_compressed(task_t task
)
597 return get_task_ledger_balance(task
, task_ledgers
.network_nonvolatile_compressed
);
601 get_task_wired_mem(task_t task
)
603 return get_task_ledger_balance(task
, task_ledgers
.wired_mem
);
607 get_task_tagged_footprint(task_t task
)
610 ledger_amount_t credit
, debit
;
612 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.tagged_footprint
, &credit
, &debit
);
613 if (KERN_SUCCESS
== ret
) {
614 return credit
- debit
;
621 get_task_tagged_footprint_compressed(task_t task
)
624 ledger_amount_t credit
, debit
;
626 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.tagged_footprint_compressed
, &credit
, &debit
);
627 if (KERN_SUCCESS
== ret
) {
628 return credit
- debit
;
635 get_task_media_footprint(task_t task
)
638 ledger_amount_t credit
, debit
;
640 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.media_footprint
, &credit
, &debit
);
641 if (KERN_SUCCESS
== ret
) {
642 return credit
- debit
;
649 get_task_media_footprint_compressed(task_t task
)
652 ledger_amount_t credit
, debit
;
654 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.media_footprint_compressed
, &credit
, &debit
);
655 if (KERN_SUCCESS
== ret
) {
656 return credit
- debit
;
663 get_task_graphics_footprint(task_t task
)
666 ledger_amount_t credit
, debit
;
668 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.graphics_footprint
, &credit
, &debit
);
669 if (KERN_SUCCESS
== ret
) {
670 return credit
- debit
;
678 get_task_graphics_footprint_compressed(task_t task
)
681 ledger_amount_t credit
, debit
;
683 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.graphics_footprint_compressed
, &credit
, &debit
);
684 if (KERN_SUCCESS
== ret
) {
685 return credit
- debit
;
692 get_task_neural_footprint(task_t task
)
695 ledger_amount_t credit
, debit
;
697 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.neural_footprint
, &credit
, &debit
);
698 if (KERN_SUCCESS
== ret
) {
699 return credit
- debit
;
706 get_task_neural_footprint_compressed(task_t task
)
709 ledger_amount_t credit
, debit
;
711 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.neural_footprint_compressed
, &credit
, &debit
);
712 if (KERN_SUCCESS
== ret
) {
713 return credit
- debit
;
720 get_task_cpu_time(task_t task
)
722 return get_task_ledger_balance(task
, task_ledgers
.cpu_time
);
726 get_task_loadTag(task_t task
)
728 return os_atomic_load(&task
->loadTag
, relaxed
);
732 set_task_loadTag(task_t task
, uint32_t loadTag
)
734 return os_atomic_xchg(&task
->loadTag
, loadTag
, relaxed
);
741 get_threadtask(thread_t th
)
753 return vm_map_min(map
);
763 return vm_map_max(map
);
769 return vm_map_adjusted_size(map
);
775 return vm_map_page_size(task
->map
);
781 get_vmsubmap_entries(
783 vm_object_offset_t start
,
784 vm_object_offset_t end
)
786 int total_entries
= 0;
787 vm_map_entry_t entry
;
792 entry
= vm_map_first_entry(map
);
793 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< start
)) {
794 entry
= entry
->vme_next
;
797 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
798 if (entry
->is_sub_map
) {
800 get_vmsubmap_entries(VME_SUBMAP(entry
),
808 entry
= entry
->vme_next
;
813 return total_entries
;
820 int total_entries
= 0;
821 vm_map_entry_t entry
;
826 entry
= vm_map_first_entry(map
);
828 while (entry
!= vm_map_to_entry(map
)) {
829 if (entry
->is_sub_map
) {
831 get_vmsubmap_entries(VME_SUBMAP(entry
),
839 entry
= entry
->vme_next
;
844 return total_entries
;
846 #endif /* CONFIG_COREDUMP */
858 return task
->user_stop_count
;
868 return th
->user_stop_count
;
875 get_task_pidsuspended(
878 return task
->pidsuspended
;
898 return (th
->sched_flags
& TH_SFLAG_ABORTED_MASK
) == TH_SFLAG_ABORT
;
902 * This routine is like thread_should_abort() above. It checks to
903 * see if the current thread is aborted. But unlike above, it also
904 * checks to see if thread is safely aborted. If so, it returns
905 * that fact, and clears the condition (safe aborts only should
906 * have a single effect, and a poll of the abort status
910 current_thread_aborted(
913 thread_t th
= current_thread();
916 if ((th
->sched_flags
& TH_SFLAG_ABORTED_MASK
) == TH_SFLAG_ABORT
&&
917 (th
->options
& TH_OPT_INTMASK
) != THREAD_UNINT
) {
920 if (th
->sched_flags
& TH_SFLAG_ABORTSAFELY
) {
923 if (th
->sched_flags
& TH_SFLAG_ABORTSAFELY
) {
924 th
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
936 task_act_iterate_wth_args(
938 void (*func_callback
)(thread_t
, void *),
945 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
946 !queue_end(&task
->threads
, (queue_entry_t
)inc
);) {
947 (void) (*func_callback
)(inc
, func_arg
);
948 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
955 #include <sys/bsdtask_info.h>
958 fill_taskprocinfo(task_t task
, struct proc_taskinfo_internal
* ptinfo
)
961 task_absolutetime_info_data_t tinfo
;
963 uint32_t cswitch
= 0, numrunning
= 0;
964 uint32_t syscalls_unix
= 0;
965 uint32_t syscalls_mach
= 0;
969 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
971 ptinfo
->pti_virtual_size
= vm_map_adjusted_size(map
);
972 ptinfo
->pti_resident_size
=
973 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
))
976 ptinfo
->pti_policy
= ((task
!= kernel_task
)?
977 POLICY_TIMESHARE
: POLICY_RR
);
979 tinfo
.threads_user
= tinfo
.threads_system
= 0;
980 tinfo
.total_user
= task
->total_user_time
;
981 tinfo
.total_system
= task
->total_system_time
;
983 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
987 if (thread
->options
& TH_OPT_IDLE_THREAD
) {
994 if ((thread
->state
& TH_RUN
) == TH_RUN
) {
997 cswitch
+= thread
->c_switch
;
998 tval
= timer_grab(&thread
->user_timer
);
999 tinfo
.threads_user
+= tval
;
1000 tinfo
.total_user
+= tval
;
1002 tval
= timer_grab(&thread
->system_timer
);
1004 if (thread
->precise_user_kernel_time
) {
1005 tinfo
.threads_system
+= tval
;
1006 tinfo
.total_system
+= tval
;
1008 /* system_timer may represent either sys or user */
1009 tinfo
.threads_user
+= tval
;
1010 tinfo
.total_user
+= tval
;
1013 syscalls_unix
+= thread
->syscalls_unix
;
1014 syscalls_mach
+= thread
->syscalls_mach
;
1016 thread_unlock(thread
);
1020 ptinfo
->pti_total_system
= tinfo
.total_system
;
1021 ptinfo
->pti_total_user
= tinfo
.total_user
;
1022 ptinfo
->pti_threads_system
= tinfo
.threads_system
;
1023 ptinfo
->pti_threads_user
= tinfo
.threads_user
;
1025 ptinfo
->pti_faults
= (int32_t) MIN(counter_load(&task
->faults
), INT32_MAX
);
1026 ptinfo
->pti_pageins
= task
->pageins
;
1027 ptinfo
->pti_cow_faults
= task
->cow_faults
;
1028 ptinfo
->pti_messages_sent
= task
->messages_sent
;
1029 ptinfo
->pti_messages_received
= task
->messages_received
;
1030 ptinfo
->pti_syscalls_mach
= task
->syscalls_mach
+ syscalls_mach
;
1031 ptinfo
->pti_syscalls_unix
= task
->syscalls_unix
+ syscalls_unix
;
1032 ptinfo
->pti_csw
= task
->c_switch
+ cswitch
;
1033 ptinfo
->pti_threadnum
= task
->thread_count
;
1034 ptinfo
->pti_numrunning
= numrunning
;
1035 ptinfo
->pti_priority
= task
->priority
;
1041 fill_taskthreadinfo(task_t task
, uint64_t thaddr
, bool thuniqueid
, struct proc_threadinfo_internal
* ptinfo
, void * vpp
, int *vidp
)
1045 mach_msg_type_number_t count
;
1046 thread_basic_info_data_t basic_info
;
1052 for (thact
= (thread_t
)(void *)queue_first(&task
->threads
);
1053 !queue_end(&task
->threads
, (queue_entry_t
)thact
);) {
1054 addr
= (thuniqueid
) ? thact
->thread_id
: thact
->machine
.cthread_self
;
1055 if (addr
== thaddr
) {
1056 count
= THREAD_BASIC_INFO_COUNT
;
1057 if ((kret
= thread_info_internal(thact
, THREAD_BASIC_INFO
, (thread_info_t
)&basic_info
, &count
)) != KERN_SUCCESS
) {
1061 ptinfo
->pth_user_time
= (((uint64_t)basic_info
.user_time
.seconds
* NSEC_PER_SEC
) + ((uint64_t)basic_info
.user_time
.microseconds
* NSEC_PER_USEC
));
1062 ptinfo
->pth_system_time
= (((uint64_t)basic_info
.system_time
.seconds
* NSEC_PER_SEC
) + ((uint64_t)basic_info
.system_time
.microseconds
* NSEC_PER_USEC
));
1064 ptinfo
->pth_cpu_usage
= basic_info
.cpu_usage
;
1065 ptinfo
->pth_policy
= basic_info
.policy
;
1066 ptinfo
->pth_run_state
= basic_info
.run_state
;
1067 ptinfo
->pth_flags
= basic_info
.flags
;
1068 ptinfo
->pth_sleep_time
= basic_info
.sleep_time
;
1069 ptinfo
->pth_curpri
= thact
->sched_pri
;
1070 ptinfo
->pth_priority
= thact
->base_pri
;
1071 ptinfo
->pth_maxpriority
= thact
->max_priority
;
1073 if ((vpp
!= NULL
) && (thact
->uthread
!= NULL
)) {
1074 bsd_threadcdir(thact
->uthread
, vpp
, vidp
);
1076 bsd_getthreadname(thact
->uthread
, ptinfo
->pth_name
);
1080 thact
= (thread_t
)(void *)queue_next(&thact
->task_threads
);
1090 fill_taskthreadlist(task_t task
, void * buffer
, int thcount
, bool thuniqueid
)
1097 uptr
= (uint64_t *)buffer
;
1101 for (thact
= (thread_t
)(void *)queue_first(&task
->threads
);
1102 !queue_end(&task
->threads
, (queue_entry_t
)thact
);) {
1103 thaddr
= (thuniqueid
) ? thact
->thread_id
: thact
->machine
.cthread_self
;
1106 if (numthr
>= thcount
) {
1109 thact
= (thread_t
)(void *)queue_next(&thact
->task_threads
);
1114 return (int)(numthr
* sizeof(uint64_t));
1118 get_numthreads(task_t task
)
1120 return task
->thread_count
;
1124 * Gather the various pieces of info about the designated task,
1125 * and collect it all into a single rusage_info.
1128 fill_task_rusage(task_t task
, rusage_info_current
*ri
)
1130 struct task_power_info powerinfo
;
1132 uint64_t runnable_time
= 0;
1134 assert(task
!= TASK_NULL
);
1137 task_power_info_locked(task
, &powerinfo
, NULL
, NULL
, &runnable_time
);
1138 ri
->ri_pkg_idle_wkups
= powerinfo
.task_platform_idle_wakeups
;
1139 ri
->ri_interrupt_wkups
= powerinfo
.task_interrupt_wakeups
;
1140 ri
->ri_user_time
= powerinfo
.total_user
;
1141 ri
->ri_system_time
= powerinfo
.total_system
;
1142 ri
->ri_runnable_time
= runnable_time
;
1144 ri
->ri_phys_footprint
= get_task_phys_footprint(task
);
1145 ledger_get_balance(task
->ledger
, task_ledgers
.phys_mem
,
1146 (ledger_amount_t
*)&ri
->ri_resident_size
);
1147 ri
->ri_wired_size
= get_task_wired_mem(task
);
1149 ri
->ri_pageins
= task
->pageins
;
1156 fill_task_billed_usage(task_t task __unused
, rusage_info_current
*ri
)
1158 bank_billed_balance_safe(task
, &ri
->ri_billed_system_time
, &ri
->ri_billed_energy
);
1159 bank_serviced_balance_safe(task
, &ri
->ri_serviced_system_time
, &ri
->ri_serviced_energy
);
1163 fill_task_io_rusage(task_t task
, rusage_info_current
*ri
)
1165 assert(task
!= TASK_NULL
);
1168 if (task
->task_io_stats
) {
1169 ri
->ri_diskio_bytesread
= task
->task_io_stats
->disk_reads
.size
;
1170 ri
->ri_diskio_byteswritten
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
);
1172 /* I/O Stats unavailable */
1173 ri
->ri_diskio_bytesread
= 0;
1174 ri
->ri_diskio_byteswritten
= 0;
1181 fill_task_qos_rusage(task_t task
, rusage_info_current
*ri
)
1185 assert(task
!= TASK_NULL
);
1188 /* Rollup QoS time of all the threads to task */
1189 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1190 if (thread
->options
& TH_OPT_IDLE_THREAD
) {
1194 thread_update_qos_cpu_time(thread
);
1196 ri
->ri_cpu_time_qos_default
= task
->cpu_time_eqos_stats
.cpu_time_qos_default
;
1197 ri
->ri_cpu_time_qos_maintenance
= task
->cpu_time_eqos_stats
.cpu_time_qos_maintenance
;
1198 ri
->ri_cpu_time_qos_background
= task
->cpu_time_eqos_stats
.cpu_time_qos_background
;
1199 ri
->ri_cpu_time_qos_utility
= task
->cpu_time_eqos_stats
.cpu_time_qos_utility
;
1200 ri
->ri_cpu_time_qos_legacy
= task
->cpu_time_eqos_stats
.cpu_time_qos_legacy
;
1201 ri
->ri_cpu_time_qos_user_initiated
= task
->cpu_time_eqos_stats
.cpu_time_qos_user_initiated
;
1202 ri
->ri_cpu_time_qos_user_interactive
= task
->cpu_time_eqos_stats
.cpu_time_qos_user_interactive
;
1209 fill_task_monotonic_rusage(task_t task
, rusage_info_current
*ri
)
1212 if (!mt_core_supported
) {
1216 assert(task
!= TASK_NULL
);
1218 uint64_t counts
[MT_CORE_NFIXED
] = { 0 };
1219 mt_fixed_task_counts(task
, counts
);
1220 #ifdef MT_CORE_INSTRS
1221 ri
->ri_instructions
= counts
[MT_CORE_INSTRS
];
1222 #endif /* defined(MT_CORE_INSTRS) */
1223 ri
->ri_cycles
= counts
[MT_CORE_CYCLES
];
1224 #else /* MONOTONIC */
1225 #pragma unused(task, ri)
1226 #endif /* !MONOTONIC */
1230 get_task_logical_writes(task_t task
, boolean_t external
)
1232 assert(task
!= TASK_NULL
);
1233 struct ledger_entry_info lei
;
1237 if (external
== FALSE
) {
1238 ledger_get_entry_info(task
->ledger
, task_ledgers
.logical_writes
, &lei
);
1240 ledger_get_entry_info(task
->ledger
, task_ledgers
.logical_writes_to_external
, &lei
);
1243 ledger_get_entry_info(task
->ledger
, task_ledgers
.logical_writes
, &lei
);
1246 return lei
.lei_balance
;
1250 get_task_dispatchqueue_serialno_offset(task_t task
)
1252 uint64_t dq_serialno_offset
= 0;
1254 if (task
->bsd_info
) {
1255 dq_serialno_offset
= get_dispatchqueue_serialno_offset_from_proc(task
->bsd_info
);
1258 return dq_serialno_offset
;
1262 get_task_dispatchqueue_label_offset(task_t task
)
1264 uint64_t dq_label_offset
= 0;
1266 if (task
->bsd_info
) {
1267 dq_label_offset
= get_dispatchqueue_label_offset_from_proc(task
->bsd_info
);
1270 return dq_label_offset
;
1274 get_task_uniqueid(task_t task
)
1276 if (task
->bsd_info
) {
1277 return proc_uniqueid(task
->bsd_info
);
1284 get_task_version(task_t task
)
1286 if (task
->bsd_info
) {
1287 return proc_pidversion(task
->bsd_info
);
1295 get_task_crash_label(task_t task
)
1297 return task
->crash_label
;
1302 fill_taskipctableinfo(task_t task
, uint32_t *table_size
, uint32_t *table_free
)
1304 ipc_space_t space
= task
->itk_space
;
1305 if (space
== NULL
) {
1309 is_read_lock(space
);
1310 if (!is_active(space
)) {
1311 is_read_unlock(space
);
1315 *table_size
= space
->is_table_size
;
1316 *table_free
= space
->is_table_free
;
1318 is_read_unlock(space
);
1324 get_task_cdhash(task_t task
, char cdhash
[static CS_CDHASH_LEN
])
1329 result
= task
->bsd_info
? proc_getcdhash(task
->bsd_info
, cdhash
) : ESRCH
;