+int
+fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
+{
+ thread_t thact;
+ int err = 0;
+ mach_msg_type_number_t count;
+ thread_basic_info_data_t basic_info;
+ kern_return_t kret;
+ uint64_t addr = 0;
+
+ task_lock(task);
+
+ for (thact = (thread_t)(void *)queue_first(&task->threads);
+ !queue_end(&task->threads, (queue_entry_t)thact);) {
+ addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
+ if (addr == thaddr) {
+ count = THREAD_BASIC_INFO_COUNT;
+ if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
+ err = 1;
+ goto out;
+ }
+ ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
+ ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
+
+ ptinfo->pth_cpu_usage = basic_info.cpu_usage;
+ ptinfo->pth_policy = basic_info.policy;
+ ptinfo->pth_run_state = basic_info.run_state;
+ ptinfo->pth_flags = basic_info.flags;
+ ptinfo->pth_sleep_time = basic_info.sleep_time;
+ ptinfo->pth_curpri = thact->sched_pri;
+ ptinfo->pth_priority = thact->base_pri;
+ ptinfo->pth_maxpriority = thact->max_priority;
+
+ if ((vpp != NULL) && (thact->uthread != NULL)) {
+ bsd_threadcdir(thact->uthread, vpp, vidp);
+ }
+ bsd_getthreadname(thact->uthread, ptinfo->pth_name);
+ err = 0;
+ goto out;
+ }
+ thact = (thread_t)(void *)queue_next(&thact->task_threads);
+ }
+ err = 1;
+
+out:
+ task_unlock(task);
+ return err;
+}
+
+int
+fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
+{
+ int numthr = 0;
+ thread_t thact;
+ uint64_t * uptr;
+ uint64_t thaddr;
+
+ uptr = (uint64_t *)buffer;
+
+ task_lock(task);
+
+ for (thact = (thread_t)(void *)queue_first(&task->threads);
+ !queue_end(&task->threads, (queue_entry_t)thact);) {
+ thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
+ *uptr++ = thaddr;
+ numthr++;
+ if (numthr >= thcount) {
+ goto out;
+ }
+ thact = (thread_t)(void *)queue_next(&thact->task_threads);
+ }
+
+out:
+ task_unlock(task);
+ return (int)(numthr * sizeof(uint64_t));
+}
+
+int
+get_numthreads(task_t task)
+{
+ return task->thread_count;
+}
+
+/*
+ * Gather the various pieces of info about the designated task,
+ * and collect it all into a single rusage_info.
+ */
+int
+fill_task_rusage(task_t task, rusage_info_current *ri)
+{
+ struct task_power_info powerinfo;
+
+ uint64_t runnable_time = 0;
+
+ assert(task != TASK_NULL);
+ task_lock(task);
+
+ task_power_info_locked(task, &powerinfo, NULL, NULL, &runnable_time);
+ ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
+ ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
+ ri->ri_user_time = powerinfo.total_user;
+ ri->ri_system_time = powerinfo.total_system;
+ ri->ri_runnable_time = runnable_time;
+
+ ledger_get_balance(task->ledger, task_ledgers.phys_footprint,
+ (ledger_amount_t *)&ri->ri_phys_footprint);
+ ledger_get_balance(task->ledger, task_ledgers.phys_mem,
+ (ledger_amount_t *)&ri->ri_resident_size);
+ ledger_get_balance(task->ledger, task_ledgers.wired_mem,
+ (ledger_amount_t *)&ri->ri_wired_size);
+
+ ri->ri_pageins = task->pageins;
+
+ task_unlock(task);
+ return 0;
+}
+
+void
+fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
+{
+ bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
+ bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
+}
+
+int
+fill_task_io_rusage(task_t task, rusage_info_current *ri)
+{
+ assert(task != TASK_NULL);
+ task_lock(task);
+
+ if (task->task_io_stats) {
+ ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
+ ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
+ } else {
+ /* I/O Stats unavailable */
+ ri->ri_diskio_bytesread = 0;
+ ri->ri_diskio_byteswritten = 0;
+ }
+ task_unlock(task);
+ return 0;
+}
+
+int
+fill_task_qos_rusage(task_t task, rusage_info_current *ri)