2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
49 #include <kern/monotonic.h>
50 #include <machine/monotonic.h>
51 #endif /* MONOTONIC */
53 #include <machine/limits.h>
55 #undef thread_should_halt
57 /* BSD KERN COMPONENT INTERFACE */
59 extern unsigned int not_in_kdp
; /* Skip acquiring locks if we're in kdp */
61 thread_t
get_firstthread(task_t
);
62 int get_task_userstop(task_t
);
63 int get_thread_userstop(thread_t
);
64 boolean_t
current_thread_aborted(void);
65 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
66 kern_return_t
get_signalact(task_t
, thread_t
*, int);
67 int fill_task_rusage(task_t task
, rusage_info_current
*ri
);
68 int fill_task_io_rusage(task_t task
, rusage_info_current
*ri
);
69 int fill_task_qos_rusage(task_t task
, rusage_info_current
*ri
);
70 void fill_task_monotonic_rusage(task_t task
, rusage_info_current
*ri
);
71 uint64_t get_task_logical_writes(task_t task
);
72 void fill_task_billed_usage(task_t task
, rusage_info_current
*ri
);
73 void task_bsdtask_kill(task_t
);
75 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p
);
76 extern uint64_t proc_uniqueid(void *p
);
77 extern int proc_pidversion(void *p
);
80 extern void psignal(void *, int);
86 void *get_bsdtask_info(task_t t
)
91 void task_bsdtask_kill(task_t t
)
93 void * bsd_info
= get_bsdtask_info(t
);
94 if (bsd_info
!= NULL
) {
95 psignal(bsd_info
, SIGKILL
);
101 void *get_bsdthreadtask_info(thread_t th
)
103 return(th
->task
!= TASK_NULL
? th
->task
->bsd_info
: NULL
);
109 void set_bsdtask_info(task_t t
,void * v
)
117 void *get_bsdthread_info(thread_t th
)
125 int get_thread_lock_count(thread_t th
); /* forced forward */
126 int get_thread_lock_count(thread_t th
)
128 return(th
->mutex_count
);
132 * XXX: wait for BSD to fix signal code
133 * Until then, we cannot block here. We know the task
134 * can't go away, so we make sure it is still active after
135 * retrieving the first thread for extra safety.
137 thread_t
get_firstthread(task_t task
)
139 thread_t thread
= (thread_t
)(void *)queue_first(&task
->threads
);
141 if (queue_end(&task
->threads
, (queue_entry_t
)thread
))
142 thread
= THREAD_NULL
;
145 return (THREAD_NULL
);
153 thread_t
*result_out
,
156 kern_return_t result
= KERN_SUCCESS
;
157 thread_t inc
, thread
= THREAD_NULL
;
164 return (KERN_FAILURE
);
167 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
168 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
169 thread_mtx_lock(inc
);
171 (inc
->sched_flags
& TH_SFLAG_ABORTED_MASK
) != TH_SFLAG_ABORT
) {
175 thread_mtx_unlock(inc
);
177 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
181 *result_out
= thread
;
185 act_set_astbsd(thread
);
187 thread_mtx_unlock(thread
);
190 result
= KERN_FAILURE
;
204 kern_return_t result
= KERN_FAILURE
;
212 return (KERN_FAILURE
);
215 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
216 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
218 thread_mtx_lock(inc
);
221 (inc
->sched_flags
& TH_SFLAG_ABORTED_MASK
) != TH_SFLAG_ABORT
) {
222 result
= KERN_SUCCESS
;
226 thread_mtx_unlock(inc
);
230 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
233 if (result
== KERN_SUCCESS
) {
235 act_set_astbsd(thread
);
237 thread_mtx_unlock(thread
);
245 ledger_t
get_task_ledger(task_t t
)
251 * This is only safe to call from a thread executing in
252 * in the task's context or if the task is locked. Otherwise,
253 * the map could be switched for the task (and freed) before
254 * we go to return it here.
256 vm_map_t
get_task_map(task_t t
)
261 vm_map_t
get_task_map_reference(task_t t
)
274 vm_map_reference_swap(m
);
282 ipc_space_t
get_task_ipcspace(task_t t
)
284 return(t
->itk_space
);
287 int get_task_numactivethreads(task_t task
)
290 int num_active_thr
=0;
293 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
294 !queue_end(&task
->threads
, (queue_entry_t
)inc
); inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
))
300 return num_active_thr
;
303 int get_task_numacts(task_t t
)
305 return(t
->thread_count
);
308 /* does this machine need 64bit register set for signal handler */
309 int is_64signalregset(void)
311 if (task_has_64Bit_data(current_task())) {
319 * Swap in a new map for the task/thread pair; the old map reference is
320 * returned. Also does a pmap switch if thread provided is current thread.
323 swap_task_map(task_t task
, thread_t thread
, vm_map_t map
)
326 boolean_t doswitch
= (thread
== current_thread()) ? TRUE
: FALSE
;
328 if (task
!= thread
->task
)
329 panic("swap_task_map");
332 mp_disable_preemption();
335 thread
->map
= task
->map
= map
;
336 vm_commit_pagezero_status(map
);
339 #if defined(__arm__) || defined(__arm64__)
340 PMAP_SWITCH_USER(thread
, map
, cpu_number())
342 pmap_switch(map
->pmap
);
345 mp_enable_preemption();
348 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
349 inval_copy_windows(thread
);
357 * This is only safe to call from a thread executing in
358 * in the task's context or if the task is locked. Otherwise,
359 * the map could be switched for the task (and freed) before
360 * we go to return it here.
362 pmap_t
get_task_pmap(task_t t
)
364 return(t
->map
->pmap
);
370 uint64_t get_task_resident_size(task_t task
)
374 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
375 return((uint64_t)pmap_resident_count(map
->pmap
) * PAGE_SIZE_64
);
378 uint64_t get_task_compressed(task_t task
)
382 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
383 return((uint64_t)pmap_compressed(map
->pmap
) * PAGE_SIZE_64
);
386 uint64_t get_task_resident_max(task_t task
)
390 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
391 return((uint64_t)pmap_resident_max(map
->pmap
) * PAGE_SIZE_64
);
394 uint64_t get_task_purgeable_size(task_t task
)
397 ledger_amount_t credit
, debit
;
398 uint64_t volatile_size
= 0;
400 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.purgeable_volatile
, &credit
, &debit
);
401 if (ret
!= KERN_SUCCESS
) {
405 volatile_size
+= (credit
- debit
);
407 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.purgeable_volatile_compressed
, &credit
, &debit
);
408 if (ret
!= KERN_SUCCESS
) {
412 volatile_size
+= (credit
- debit
);
414 return volatile_size
;
420 uint64_t get_task_phys_footprint(task_t task
)
423 ledger_amount_t credit
, debit
;
425 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.phys_footprint
, &credit
, &debit
);
426 if (KERN_SUCCESS
== ret
) {
427 return (credit
- debit
);
433 #if CONFIG_LEDGER_INTERVAL_MAX
437 uint64_t get_task_phys_footprint_interval_max(task_t task
, int reset
)
442 ret
= ledger_get_interval_max(task
->ledger
, task_ledgers
.phys_footprint
, &max
, reset
);
444 if(KERN_SUCCESS
== ret
) {
450 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
455 uint64_t get_task_phys_footprint_lifetime_max(task_t task
)
460 ret
= ledger_get_lifetime_max(task
->ledger
, task_ledgers
.phys_footprint
, &max
);
462 if(KERN_SUCCESS
== ret
) {
472 uint64_t get_task_phys_footprint_limit(task_t task
)
477 ret
= ledger_get_limit(task
->ledger
, task_ledgers
.phys_footprint
, &max
);
478 if (KERN_SUCCESS
== ret
) {
485 uint64_t get_task_internal(task_t task
)
488 ledger_amount_t credit
, debit
;
490 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.internal
, &credit
, &debit
);
491 if (KERN_SUCCESS
== ret
) {
492 return (credit
- debit
);
498 uint64_t get_task_internal_compressed(task_t task
)
501 ledger_amount_t credit
, debit
;
503 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.internal_compressed
, &credit
, &debit
);
504 if (KERN_SUCCESS
== ret
) {
505 return (credit
- debit
);
511 uint64_t get_task_purgeable_nonvolatile(task_t task
)
514 ledger_amount_t credit
, debit
;
516 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.purgeable_nonvolatile
, &credit
, &debit
);
517 if (KERN_SUCCESS
== ret
) {
518 return (credit
- debit
);
524 uint64_t get_task_purgeable_nonvolatile_compressed(task_t task
)
527 ledger_amount_t credit
, debit
;
529 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.purgeable_nonvolatile_compressed
, &credit
, &debit
);
530 if (KERN_SUCCESS
== ret
) {
531 return (credit
- debit
);
537 uint64_t get_task_alternate_accounting(task_t task
)
540 ledger_amount_t credit
, debit
;
542 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.alternate_accounting
, &credit
, &debit
);
543 if (KERN_SUCCESS
== ret
) {
544 return (credit
- debit
);
550 uint64_t get_task_alternate_accounting_compressed(task_t task
)
553 ledger_amount_t credit
, debit
;
555 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.alternate_accounting_compressed
, &credit
, &debit
);
556 if (KERN_SUCCESS
== ret
) {
557 return (credit
- debit
);
563 uint64_t get_task_page_table(task_t task
)
566 ledger_amount_t credit
, debit
;
568 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.page_table
, &credit
, &debit
);
569 if (KERN_SUCCESS
== ret
) {
570 return (credit
- debit
);
576 uint64_t get_task_iokit_mapped(task_t task
)
579 ledger_amount_t credit
, debit
;
581 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.iokit_mapped
, &credit
, &debit
);
582 if (KERN_SUCCESS
== ret
) {
583 return (credit
- debit
);
589 uint64_t get_task_network_nonvolatile(task_t task
)
592 ledger_amount_t credit
, debit
;
594 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.network_nonvolatile
, &credit
, &debit
);
595 if (KERN_SUCCESS
== ret
) {
596 return (credit
- debit
);
602 uint64_t get_task_network_nonvolatile_compressed(task_t task
)
605 ledger_amount_t credit
, debit
;
607 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.network_nonvolatile_compressed
, &credit
, &debit
);
608 if (KERN_SUCCESS
== ret
) {
609 return (credit
- debit
);
615 uint64_t get_task_wired_mem(task_t task
)
618 ledger_amount_t credit
, debit
;
620 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.wired_mem
, &credit
, &debit
);
621 if (KERN_SUCCESS
== ret
) {
622 return (credit
- debit
);
629 uint64_t get_task_cpu_time(task_t task
)
632 ledger_amount_t credit
, debit
;
634 ret
= ledger_get_entries(task
->ledger
, task_ledgers
.cpu_time
, &credit
, &debit
);
635 if (KERN_SUCCESS
== ret
) {
636 return (credit
- debit
);
645 task_t
get_threadtask(thread_t th
)
657 return(vm_map_min(map
));
667 return(vm_map_max(map
));
679 get_vmsubmap_entries(
681 vm_object_offset_t start
,
682 vm_object_offset_t end
)
684 int total_entries
= 0;
685 vm_map_entry_t entry
;
689 entry
= vm_map_first_entry(map
);
690 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< start
)) {
691 entry
= entry
->vme_next
;
694 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
695 if(entry
->is_sub_map
) {
697 get_vmsubmap_entries(VME_SUBMAP(entry
),
705 entry
= entry
->vme_next
;
709 return(total_entries
);
716 int total_entries
= 0;
717 vm_map_entry_t entry
;
721 entry
= vm_map_first_entry(map
);
723 while(entry
!= vm_map_to_entry(map
)) {
724 if(entry
->is_sub_map
) {
726 get_vmsubmap_entries(VME_SUBMAP(entry
),
734 entry
= entry
->vme_next
;
738 return(total_entries
);
740 #endif /* CONFIG_COREDUMP */
752 return(task
->user_stop_count
);
762 return(th
->user_stop_count
);
769 get_task_pidsuspended(
772 return (task
->pidsuspended
);
782 return (task
->frozen
);
792 return ((th
->sched_flags
& TH_SFLAG_ABORTED_MASK
) == TH_SFLAG_ABORT
);
796 * This routine is like thread_should_abort() above. It checks to
797 * see if the current thread is aborted. But unlike above, it also
798 * checks to see if thread is safely aborted. If so, it returns
799 * that fact, and clears the condition (safe aborts only should
800 * have a single effect, and a poll of the abort status
804 current_thread_aborted (
807 thread_t th
= current_thread();
810 if ((th
->sched_flags
& TH_SFLAG_ABORTED_MASK
) == TH_SFLAG_ABORT
&&
811 (th
->options
& TH_OPT_INTMASK
) != THREAD_UNINT
)
813 if (th
->sched_flags
& TH_SFLAG_ABORTSAFELY
) {
816 if (th
->sched_flags
& TH_SFLAG_ABORTSAFELY
)
817 th
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
828 task_act_iterate_wth_args(
830 void (*func_callback
)(thread_t
, void *),
837 for (inc
= (thread_t
)(void *)queue_first(&task
->threads
);
838 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
839 (void) (*func_callback
)(inc
, func_arg
);
840 inc
= (thread_t
)(void *)queue_next(&inc
->task_threads
);
847 #include <sys/bsdtask_info.h>
850 fill_taskprocinfo(task_t task
, struct proc_taskinfo_internal
* ptinfo
)
853 task_absolutetime_info_data_t tinfo
;
855 uint32_t cswitch
= 0, numrunning
= 0;
856 uint32_t syscalls_unix
= 0;
857 uint32_t syscalls_mach
= 0;
861 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
863 ptinfo
->pti_virtual_size
= map
->size
;
864 ptinfo
->pti_resident_size
=
865 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
))
868 ptinfo
->pti_policy
= ((task
!= kernel_task
)?
869 POLICY_TIMESHARE
: POLICY_RR
);
871 tinfo
.threads_user
= tinfo
.threads_system
= 0;
872 tinfo
.total_user
= task
->total_user_time
;
873 tinfo
.total_system
= task
->total_system_time
;
875 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
879 if (thread
->options
& TH_OPT_IDLE_THREAD
)
885 if ((thread
->state
& TH_RUN
) == TH_RUN
)
887 cswitch
+= thread
->c_switch
;
888 tval
= timer_grab(&thread
->user_timer
);
889 tinfo
.threads_user
+= tval
;
890 tinfo
.total_user
+= tval
;
892 tval
= timer_grab(&thread
->system_timer
);
894 if (thread
->precise_user_kernel_time
) {
895 tinfo
.threads_system
+= tval
;
896 tinfo
.total_system
+= tval
;
898 /* system_timer may represent either sys or user */
899 tinfo
.threads_user
+= tval
;
900 tinfo
.total_user
+= tval
;
903 syscalls_unix
+= thread
->syscalls_unix
;
904 syscalls_mach
+= thread
->syscalls_mach
;
906 thread_unlock(thread
);
910 ptinfo
->pti_total_system
= tinfo
.total_system
;
911 ptinfo
->pti_total_user
= tinfo
.total_user
;
912 ptinfo
->pti_threads_system
= tinfo
.threads_system
;
913 ptinfo
->pti_threads_user
= tinfo
.threads_user
;
915 ptinfo
->pti_faults
= task
->faults
;
916 ptinfo
->pti_pageins
= task
->pageins
;
917 ptinfo
->pti_cow_faults
= task
->cow_faults
;
918 ptinfo
->pti_messages_sent
= task
->messages_sent
;
919 ptinfo
->pti_messages_received
= task
->messages_received
;
920 ptinfo
->pti_syscalls_mach
= task
->syscalls_mach
+ syscalls_mach
;
921 ptinfo
->pti_syscalls_unix
= task
->syscalls_unix
+ syscalls_unix
;
922 ptinfo
->pti_csw
= task
->c_switch
+ cswitch
;
923 ptinfo
->pti_threadnum
= task
->thread_count
;
924 ptinfo
->pti_numrunning
= numrunning
;
925 ptinfo
->pti_priority
= task
->priority
;
931 fill_taskthreadinfo(task_t task
, uint64_t thaddr
, bool thuniqueid
, struct proc_threadinfo_internal
* ptinfo
, void * vpp
, int *vidp
)
935 mach_msg_type_number_t count
;
936 thread_basic_info_data_t basic_info
;
942 for (thact
= (thread_t
)(void *)queue_first(&task
->threads
);
943 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
944 addr
= (thuniqueid
) ? thact
->thread_id
: thact
->machine
.cthread_self
;
948 count
= THREAD_BASIC_INFO_COUNT
;
949 if ((kret
= thread_info_internal(thact
, THREAD_BASIC_INFO
, (thread_info_t
)&basic_info
, &count
)) != KERN_SUCCESS
) {
953 ptinfo
->pth_user_time
= ((basic_info
.user_time
.seconds
* (integer_t
)NSEC_PER_SEC
) + (basic_info
.user_time
.microseconds
* (integer_t
)NSEC_PER_USEC
));
954 ptinfo
->pth_system_time
= ((basic_info
.system_time
.seconds
* (integer_t
)NSEC_PER_SEC
) + (basic_info
.system_time
.microseconds
* (integer_t
)NSEC_PER_USEC
));
956 ptinfo
->pth_cpu_usage
= basic_info
.cpu_usage
;
957 ptinfo
->pth_policy
= basic_info
.policy
;
958 ptinfo
->pth_run_state
= basic_info
.run_state
;
959 ptinfo
->pth_flags
= basic_info
.flags
;
960 ptinfo
->pth_sleep_time
= basic_info
.sleep_time
;
961 ptinfo
->pth_curpri
= thact
->sched_pri
;
962 ptinfo
->pth_priority
= thact
->base_pri
;
963 ptinfo
->pth_maxpriority
= thact
->max_priority
;
965 if ((vpp
!= NULL
) && (thact
->uthread
!= NULL
))
966 bsd_threadcdir(thact
->uthread
, vpp
, vidp
);
967 bsd_getthreadname(thact
->uthread
,ptinfo
->pth_name
);
971 thact
= (thread_t
)(void *)queue_next(&thact
->task_threads
);
981 fill_taskthreadlist(task_t task
, void * buffer
, int thcount
, bool thuniqueid
)
988 uptr
= (uint64_t *)buffer
;
992 for (thact
= (thread_t
)(void *)queue_first(&task
->threads
);
993 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
994 thaddr
= (thuniqueid
) ? thact
->thread_id
: thact
->machine
.cthread_self
;
997 if (numthr
>= thcount
)
999 thact
= (thread_t
)(void *)queue_next(&thact
->task_threads
);
1004 return (int)(numthr
* sizeof(uint64_t));
1009 get_numthreads(task_t task
)
1011 return(task
->thread_count
);
1015 * Gather the various pieces of info about the designated task,
1016 * and collect it all into a single rusage_info.
1019 fill_task_rusage(task_t task
, rusage_info_current
*ri
)
1021 struct task_power_info powerinfo
;
1023 assert(task
!= TASK_NULL
);
1026 task_power_info_locked(task
, &powerinfo
, NULL
, NULL
);
1027 ri
->ri_pkg_idle_wkups
= powerinfo
.task_platform_idle_wakeups
;
1028 ri
->ri_interrupt_wkups
= powerinfo
.task_interrupt_wakeups
;
1029 ri
->ri_user_time
= powerinfo
.total_user
;
1030 ri
->ri_system_time
= powerinfo
.total_system
;
1032 ledger_get_balance(task
->ledger
, task_ledgers
.phys_footprint
,
1033 (ledger_amount_t
*)&ri
->ri_phys_footprint
);
1034 ledger_get_balance(task
->ledger
, task_ledgers
.phys_mem
,
1035 (ledger_amount_t
*)&ri
->ri_resident_size
);
1036 ledger_get_balance(task
->ledger
, task_ledgers
.wired_mem
,
1037 (ledger_amount_t
*)&ri
->ri_wired_size
);
1039 ri
->ri_pageins
= task
->pageins
;
1046 fill_task_billed_usage(task_t task __unused
, rusage_info_current
*ri
)
1048 bank_billed_balance_safe(task
, &ri
->ri_billed_system_time
, &ri
->ri_billed_energy
);
1049 bank_serviced_balance_safe(task
, &ri
->ri_serviced_system_time
, &ri
->ri_serviced_energy
);
1053 fill_task_io_rusage(task_t task
, rusage_info_current
*ri
)
1055 assert(task
!= TASK_NULL
);
1058 if (task
->task_io_stats
) {
1059 ri
->ri_diskio_bytesread
= task
->task_io_stats
->disk_reads
.size
;
1060 ri
->ri_diskio_byteswritten
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
);
1062 /* I/O Stats unavailable */
1063 ri
->ri_diskio_bytesread
= 0;
1064 ri
->ri_diskio_byteswritten
= 0;
1071 fill_task_qos_rusage(task_t task
, rusage_info_current
*ri
)
1075 assert(task
!= TASK_NULL
);
1078 /* Rollup QoS time of all the threads to task */
1079 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1080 if (thread
->options
& TH_OPT_IDLE_THREAD
)
1083 thread_update_qos_cpu_time(thread
);
1085 ri
->ri_cpu_time_qos_default
= task
->cpu_time_eqos_stats
.cpu_time_qos_default
;
1086 ri
->ri_cpu_time_qos_maintenance
= task
->cpu_time_eqos_stats
.cpu_time_qos_maintenance
;
1087 ri
->ri_cpu_time_qos_background
= task
->cpu_time_eqos_stats
.cpu_time_qos_background
;
1088 ri
->ri_cpu_time_qos_utility
= task
->cpu_time_eqos_stats
.cpu_time_qos_utility
;
1089 ri
->ri_cpu_time_qos_legacy
= task
->cpu_time_eqos_stats
.cpu_time_qos_legacy
;
1090 ri
->ri_cpu_time_qos_user_initiated
= task
->cpu_time_eqos_stats
.cpu_time_qos_user_initiated
;
1091 ri
->ri_cpu_time_qos_user_interactive
= task
->cpu_time_eqos_stats
.cpu_time_qos_user_interactive
;
1098 fill_task_monotonic_rusage(task_t task
, rusage_info_current
*ri
)
1101 if (!mt_core_supported
) {
1105 assert(task
!= TASK_NULL
);
1107 uint64_t counts
[MT_CORE_NFIXED
] = {};
1108 mt_fixed_task_counts(task
, counts
);
1109 #ifdef MT_CORE_INSTRS
1110 ri
->ri_instructions
= counts
[MT_CORE_INSTRS
];
1111 #endif /* defined(MT_CORE_INSTRS) */
1112 ri
->ri_cycles
= counts
[MT_CORE_CYCLES
];
1113 #else /* MONOTONIC */
1114 #pragma unused(task, ri)
1115 #endif /* !MONOTONIC */
1119 get_task_logical_writes(task_t task
)
1121 assert(task
!= TASK_NULL
);
1122 struct ledger_entry_info lei
;
1125 ledger_get_entry_info(task
->ledger
, task_ledgers
.logical_writes
, &lei
);
1128 return lei
.lei_balance
;
1132 get_task_dispatchqueue_serialno_offset(task_t task
)
1134 uint64_t dq_serialno_offset
= 0;
1136 if (task
->bsd_info
) {
1137 dq_serialno_offset
= get_dispatchqueue_serialno_offset_from_proc(task
->bsd_info
);
1140 return dq_serialno_offset
;
1144 get_task_uniqueid(task_t task
)
1146 if (task
->bsd_info
) {
1147 return proc_uniqueid(task
->bsd_info
);
1154 get_task_version(task_t task
)
1156 if (task
->bsd_info
) {
1157 return proc_pidversion(task
->bsd_info
);
1165 get_task_crash_label(task_t task
)
1167 return task
->crash_label
;