]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
c2edb90698487d9979142a53aad9160541baf997
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46
47 #undef thread_should_halt
48
49 /* BSD KERN COMPONENT INTERFACE */
50
51 task_t bsd_init_task = TASK_NULL;
52 boolean_t init_task_died;
53 char init_task_failure_data[1024];
54 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
55
56 thread_t get_firstthread(task_t);
57 int get_task_userstop(task_t);
58 int get_thread_userstop(thread_t);
59 boolean_t current_thread_aborted(void);
60 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
61 kern_return_t get_signalact(task_t , thread_t *, int);
62 int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
63 int fill_task_rusage(task_t task, rusage_info_current *ri);
64 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
65 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
66 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
67
68 /*
69 *
70 */
71 void *get_bsdtask_info(task_t t)
72 {
73 return(t->bsd_info);
74 }
75
76 /*
77 *
78 */
79 void *get_bsdthreadtask_info(thread_t th)
80 {
81 return(th->task != TASK_NULL ? th->task->bsd_info : NULL);
82 }
83
84 /*
85 *
86 */
87 void set_bsdtask_info(task_t t,void * v)
88 {
89 t->bsd_info=v;
90 }
91
92 /*
93 *
94 */
95 void *get_bsdthread_info(thread_t th)
96 {
97 return(th->uthread);
98 }
99
100 /*
101 * XXX
102 */
103 int get_thread_lock_count(thread_t th); /* forced forward */
104 int get_thread_lock_count(thread_t th)
105 {
106 return(th->mutex_count);
107 }
108
109 /*
110 * XXX: wait for BSD to fix signal code
111 * Until then, we cannot block here. We know the task
112 * can't go away, so we make sure it is still active after
113 * retrieving the first thread for extra safety.
114 */
115 thread_t get_firstthread(task_t task)
116 {
117 thread_t thread = (thread_t)(void *)queue_first(&task->threads);
118
119 if (queue_end(&task->threads, (queue_entry_t)thread))
120 thread = THREAD_NULL;
121
122 if (!task->active)
123 return (THREAD_NULL);
124
125 return (thread);
126 }
127
128 kern_return_t
129 get_signalact(
130 task_t task,
131 thread_t *result_out,
132 int setast)
133 {
134 kern_return_t result = KERN_SUCCESS;
135 thread_t inc, thread = THREAD_NULL;
136
137 task_lock(task);
138
139 if (!task->active) {
140 task_unlock(task);
141
142 return (KERN_FAILURE);
143 }
144
145 for (inc = (thread_t)(void *)queue_first(&task->threads);
146 !queue_end(&task->threads, (queue_entry_t)inc); ) {
147 thread_mtx_lock(inc);
148 if (inc->active &&
149 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
150 thread = inc;
151 break;
152 }
153 thread_mtx_unlock(inc);
154
155 inc = (thread_t)(void *)queue_next(&inc->task_threads);
156 }
157
158 if (result_out)
159 *result_out = thread;
160
161 if (thread) {
162 if (setast)
163 act_set_astbsd(thread);
164
165 thread_mtx_unlock(thread);
166 }
167 else
168 result = KERN_FAILURE;
169
170 task_unlock(task);
171
172 return (result);
173 }
174
175
176 kern_return_t
177 check_actforsig(
178 task_t task,
179 thread_t thread,
180 int setast)
181 {
182 kern_return_t result = KERN_FAILURE;
183 thread_t inc;
184
185 task_lock(task);
186
187 if (!task->active) {
188 task_unlock(task);
189
190 return (KERN_FAILURE);
191 }
192
193 for (inc = (thread_t)(void *)queue_first(&task->threads);
194 !queue_end(&task->threads, (queue_entry_t)inc); ) {
195 if (inc == thread) {
196 thread_mtx_lock(inc);
197
198 if (inc->active &&
199 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
200 result = KERN_SUCCESS;
201 break;
202 }
203
204 thread_mtx_unlock(inc);
205 break;
206 }
207
208 inc = (thread_t)(void *)queue_next(&inc->task_threads);
209 }
210
211 if (result == KERN_SUCCESS) {
212 if (setast)
213 act_set_astbsd(thread);
214
215 thread_mtx_unlock(thread);
216 }
217
218 task_unlock(task);
219
220 return (result);
221 }
222
223 ledger_t get_task_ledger(task_t t)
224 {
225 return(t->ledger);
226 }
227
228 /*
229 * This is only safe to call from a thread executing in
230 * in the task's context or if the task is locked Otherwise,
231 * the map could be switched for the task (and freed) before
232 * we to return it here.
233 */
234 vm_map_t get_task_map(task_t t)
235 {
236 return(t->map);
237 }
238
239 vm_map_t get_task_map_reference(task_t t)
240 {
241 vm_map_t m;
242
243 if (t == NULL)
244 return VM_MAP_NULL;
245
246 task_lock(t);
247 if (!t->active) {
248 task_unlock(t);
249 return VM_MAP_NULL;
250 }
251 m = t->map;
252 vm_map_reference_swap(m);
253 task_unlock(t);
254 return m;
255 }
256
257 /*
258 *
259 */
260 ipc_space_t get_task_ipcspace(task_t t)
261 {
262 return(t->itk_space);
263 }
264
265 int get_task_numactivethreads(task_t task)
266 {
267 thread_t inc;
268 int num_active_thr=0;
269 task_lock(task);
270
271 for (inc = (thread_t)(void *)queue_first(&task->threads);
272 !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)(void *)queue_next(&inc->task_threads))
273 {
274 if(inc->active)
275 num_active_thr++;
276 }
277 task_unlock(task);
278 return num_active_thr;
279 }
280
281 int get_task_numacts(task_t t)
282 {
283 return(t->thread_count);
284 }
285
286 /* does this machine need 64bit register set for signal handler */
287 int is_64signalregset(void)
288 {
289 if (task_has_64BitData(current_task())) {
290 return(1);
291 }
292
293 return(0);
294 }
295
296 /*
297 * Swap in a new map for the task/thread pair; the old map reference is
298 * returned.
299 */
300 vm_map_t
301 swap_task_map(task_t task, thread_t thread, vm_map_t map, boolean_t doswitch)
302 {
303 vm_map_t old_map;
304
305 if (task != thread->task)
306 panic("swap_task_map");
307
308 task_lock(task);
309 mp_disable_preemption();
310 old_map = task->map;
311 thread->map = task->map = map;
312 if (doswitch) {
313 pmap_switch(map->pmap);
314 }
315 mp_enable_preemption();
316 task_unlock(task);
317
318 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
319 inval_copy_windows(thread);
320 #endif
321
322 return old_map;
323 }
324
325 /*
326 *
327 */
328 pmap_t get_task_pmap(task_t t)
329 {
330 return(t->map->pmap);
331 }
332
333 /*
334 *
335 */
336 uint64_t get_task_resident_size(task_t task)
337 {
338 vm_map_t map;
339
340 map = (task == kernel_task) ? kernel_map: task->map;
341 return((uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64);
342 }
343
344 uint64_t get_task_compressed(task_t task)
345 {
346 vm_map_t map;
347
348 map = (task == kernel_task) ? kernel_map: task->map;
349 return((uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64);
350 }
351
352 uint64_t get_task_resident_max(task_t task)
353 {
354 vm_map_t map;
355
356 map = (task == kernel_task) ? kernel_map: task->map;
357 return((uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64);
358 }
359
360 uint64_t get_task_purgeable_size(task_t task)
361 {
362 vm_map_t map;
363 mach_vm_size_t volatile_virtual_size;
364 mach_vm_size_t volatile_resident_size;
365 mach_vm_size_t volatile_pmap_size;
366
367 map = (task == kernel_task) ? kernel_map: task->map;
368 vm_map_query_volatile(map, &volatile_virtual_size, &volatile_resident_size, &volatile_pmap_size);
369
370 return((uint64_t)volatile_resident_size);
371 }
372 /*
373 *
374 */
375 uint64_t get_task_phys_footprint(task_t task)
376 {
377 kern_return_t ret;
378 ledger_amount_t credit, debit;
379
380 ret = ledger_get_entries(task->ledger, task_ledgers.phys_footprint, &credit, &debit);
381 if (KERN_SUCCESS == ret) {
382 return (credit - debit);
383 }
384
385 return 0;
386 }
387
388 /*
389 *
390 */
391 uint64_t get_task_phys_footprint_max(task_t task)
392 {
393 kern_return_t ret;
394 ledger_amount_t max;
395
396 ret = ledger_get_maximum(task->ledger, task_ledgers.phys_footprint, &max);
397 if (KERN_SUCCESS == ret) {
398 return max;
399 }
400
401 return 0;
402 }
403
404 uint64_t get_task_cpu_time(task_t task)
405 {
406 kern_return_t ret;
407 ledger_amount_t credit, debit;
408
409 ret = ledger_get_entries(task->ledger, task_ledgers.cpu_time, &credit, &debit);
410 if (KERN_SUCCESS == ret) {
411 return (credit - debit);
412 }
413
414 return 0;
415 }
416
417 /*
418 *
419 */
420 pmap_t get_map_pmap(vm_map_t map)
421 {
422 return(map->pmap);
423 }
424 /*
425 *
426 */
427 task_t get_threadtask(thread_t th)
428 {
429 return(th->task);
430 }
431
432 /*
433 *
434 */
435 vm_map_offset_t
436 get_map_min(
437 vm_map_t map)
438 {
439 return(vm_map_min(map));
440 }
441
442 /*
443 *
444 */
445 vm_map_offset_t
446 get_map_max(
447 vm_map_t map)
448 {
449 return(vm_map_max(map));
450 }
451 vm_map_size_t
452 get_vmmap_size(
453 vm_map_t map)
454 {
455 return(map->size);
456 }
457
458 int
459 get_vmsubmap_entries(
460 vm_map_t map,
461 vm_object_offset_t start,
462 vm_object_offset_t end)
463 {
464 int total_entries = 0;
465 vm_map_entry_t entry;
466
467 if (not_in_kdp)
468 vm_map_lock(map);
469 entry = vm_map_first_entry(map);
470 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
471 entry = entry->vme_next;
472 }
473
474 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
475 if(entry->is_sub_map) {
476 total_entries +=
477 get_vmsubmap_entries(entry->object.sub_map,
478 entry->offset,
479 entry->offset +
480 (entry->vme_end - entry->vme_start));
481 } else {
482 total_entries += 1;
483 }
484 entry = entry->vme_next;
485 }
486 if (not_in_kdp)
487 vm_map_unlock(map);
488 return(total_entries);
489 }
490
491 int
492 get_vmmap_entries(
493 vm_map_t map)
494 {
495 int total_entries = 0;
496 vm_map_entry_t entry;
497
498 if (not_in_kdp)
499 vm_map_lock(map);
500 entry = vm_map_first_entry(map);
501
502 while(entry != vm_map_to_entry(map)) {
503 if(entry->is_sub_map) {
504 total_entries +=
505 get_vmsubmap_entries(entry->object.sub_map,
506 entry->offset,
507 entry->offset +
508 (entry->vme_end - entry->vme_start));
509 } else {
510 total_entries += 1;
511 }
512 entry = entry->vme_next;
513 }
514 if (not_in_kdp)
515 vm_map_unlock(map);
516 return(total_entries);
517 }
518
519 /*
520 *
521 */
522 /*
523 *
524 */
525 int
526 get_task_userstop(
527 task_t task)
528 {
529 return(task->user_stop_count);
530 }
531
532 /*
533 *
534 */
535 int
536 get_thread_userstop(
537 thread_t th)
538 {
539 return(th->user_stop_count);
540 }
541
542 /*
543 *
544 */
545 boolean_t
546 get_task_pidsuspended(
547 task_t task)
548 {
549 return (task->pidsuspended);
550 }
551
552 /*
553 *
554 */
555 boolean_t
556 get_task_frozen(
557 task_t task)
558 {
559 return (task->frozen);
560 }
561
562 /*
563 *
564 */
565 boolean_t
566 thread_should_abort(
567 thread_t th)
568 {
569 return ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT);
570 }
571
572 /*
573 * This routine is like thread_should_abort() above. It checks to
574 * see if the current thread is aborted. But unlike above, it also
575 * checks to see if thread is safely aborted. If so, it returns
576 * that fact, and clears the condition (safe aborts only should
577 * have a single effect, and a poll of the abort status
578 * qualifies.
579 */
580 boolean_t
581 current_thread_aborted (
582 void)
583 {
584 thread_t th = current_thread();
585 spl_t s;
586
587 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
588 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
589 return (TRUE);
590 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
591 s = splsched();
592 thread_lock(th);
593 if (th->sched_flags & TH_SFLAG_ABORTSAFELY)
594 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
595 thread_unlock(th);
596 splx(s);
597 }
598 return FALSE;
599 }
600
601 /*
602 *
603 */
604 void
605 task_act_iterate_wth_args(
606 task_t task,
607 void (*func_callback)(thread_t, void *),
608 void *func_arg)
609 {
610 thread_t inc;
611
612 task_lock(task);
613
614 for (inc = (thread_t)(void *)queue_first(&task->threads);
615 !queue_end(&task->threads, (queue_entry_t)inc); ) {
616 (void) (*func_callback)(inc, func_arg);
617 inc = (thread_t)(void *)queue_next(&inc->task_threads);
618 }
619
620 task_unlock(task);
621 }
622
623
624 void
625 astbsd_on(void)
626 {
627 boolean_t reenable;
628
629 reenable = ml_set_interrupts_enabled(FALSE);
630 ast_on_fast(AST_BSD);
631 (void)ml_set_interrupts_enabled(reenable);
632 }
633
634
635 #include <sys/bsdtask_info.h>
636
637 void
638 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
639 {
640 vm_map_t map;
641 task_absolutetime_info_data_t tinfo;
642 thread_t thread;
643 uint32_t cswitch = 0, numrunning = 0;
644 uint32_t syscalls_unix = 0;
645 uint32_t syscalls_mach = 0;
646
647 map = (task == kernel_task)? kernel_map: task->map;
648
649 ptinfo->pti_virtual_size = map->size;
650 ptinfo->pti_resident_size =
651 (mach_vm_size_t)(pmap_resident_count(map->pmap))
652 * PAGE_SIZE_64;
653
654 task_lock(task);
655
656 ptinfo->pti_policy = ((task != kernel_task)?
657 POLICY_TIMESHARE: POLICY_RR);
658
659 tinfo.threads_user = tinfo.threads_system = 0;
660 tinfo.total_user = task->total_user_time;
661 tinfo.total_system = task->total_system_time;
662
663 queue_iterate(&task->threads, thread, thread_t, task_threads) {
664 uint64_t tval;
665 spl_t x;
666
667 if (thread->options & TH_OPT_IDLE_THREAD)
668 continue;
669
670 x = splsched();
671 thread_lock(thread);
672
673 if ((thread->state & TH_RUN) == TH_RUN)
674 numrunning++;
675 cswitch += thread->c_switch;
676 tval = timer_grab(&thread->user_timer);
677 tinfo.threads_user += tval;
678 tinfo.total_user += tval;
679
680 tval = timer_grab(&thread->system_timer);
681
682 if (thread->precise_user_kernel_time) {
683 tinfo.threads_system += tval;
684 tinfo.total_system += tval;
685 } else {
686 /* system_timer may represent either sys or user */
687 tinfo.threads_user += tval;
688 tinfo.total_user += tval;
689 }
690
691 syscalls_unix += thread->syscalls_unix;
692 syscalls_mach += thread->syscalls_mach;
693
694 thread_unlock(thread);
695 splx(x);
696 }
697
698 ptinfo->pti_total_system = tinfo.total_system;
699 ptinfo->pti_total_user = tinfo.total_user;
700 ptinfo->pti_threads_system = tinfo.threads_system;
701 ptinfo->pti_threads_user = tinfo.threads_user;
702
703 ptinfo->pti_faults = task->faults;
704 ptinfo->pti_pageins = task->pageins;
705 ptinfo->pti_cow_faults = task->cow_faults;
706 ptinfo->pti_messages_sent = task->messages_sent;
707 ptinfo->pti_messages_received = task->messages_received;
708 ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
709 ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
710 ptinfo->pti_csw = task->c_switch + cswitch;
711 ptinfo->pti_threadnum = task->thread_count;
712 ptinfo->pti_numrunning = numrunning;
713 ptinfo->pti_priority = task->priority;
714
715 task_unlock(task);
716 }
717
718 int
719 fill_taskthreadinfo(task_t task, uint64_t thaddr, int thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
720 {
721 thread_t thact;
722 int err=0;
723 mach_msg_type_number_t count;
724 thread_basic_info_data_t basic_info;
725 kern_return_t kret;
726 uint64_t addr = 0;
727
728 task_lock(task);
729
730 for (thact = (thread_t)(void *)queue_first(&task->threads);
731 !queue_end(&task->threads, (queue_entry_t)thact); ) {
732 addr = (thuniqueid==0)?thact->machine.cthread_self: thact->thread_id;
733 if (addr == thaddr)
734 {
735
736 count = THREAD_BASIC_INFO_COUNT;
737 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
738 err = 1;
739 goto out;
740 }
741 ptinfo->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC));
742 ptinfo->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC));
743
744 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
745 ptinfo->pth_policy = basic_info.policy;
746 ptinfo->pth_run_state = basic_info.run_state;
747 ptinfo->pth_flags = basic_info.flags;
748 ptinfo->pth_sleep_time = basic_info.sleep_time;
749 ptinfo->pth_curpri = thact->sched_pri;
750 ptinfo->pth_priority = thact->priority;
751 ptinfo->pth_maxpriority = thact->max_priority;
752
753 if ((vpp != NULL) && (thact->uthread != NULL))
754 bsd_threadcdir(thact->uthread, vpp, vidp);
755 bsd_getthreadname(thact->uthread,ptinfo->pth_name);
756 err = 0;
757 goto out;
758 }
759 thact = (thread_t)(void *)queue_next(&thact->task_threads);
760 }
761 err = 1;
762
763 out:
764 task_unlock(task);
765 return(err);
766 }
767
768 int
769 fill_taskthreadlist(task_t task, void * buffer, int thcount)
770 {
771 int numthr=0;
772 thread_t thact;
773 uint64_t * uptr;
774 uint64_t thaddr;
775
776 uptr = (uint64_t *)buffer;
777
778 task_lock(task);
779
780 for (thact = (thread_t)(void *)queue_first(&task->threads);
781 !queue_end(&task->threads, (queue_entry_t)thact); ) {
782 thaddr = thact->machine.cthread_self;
783 *uptr++ = thaddr;
784 numthr++;
785 if (numthr >= thcount)
786 goto out;
787 thact = (thread_t)(void *)queue_next(&thact->task_threads);
788 }
789
790 out:
791 task_unlock(task);
792 return (int)(numthr * sizeof(uint64_t));
793
794 }
795
796 int
797 get_numthreads(task_t task)
798 {
799 return(task->thread_count);
800 }
801
802 /*
803 * Gather the various pieces of info about the designated task,
804 * and collect it all into a single rusage_info.
805 */
806 int
807 fill_task_rusage(task_t task, rusage_info_current *ri)
808 {
809 struct task_power_info powerinfo;
810
811 assert(task != TASK_NULL);
812 task_lock(task);
813
814 task_power_info_locked(task, &powerinfo, NULL);
815 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
816 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
817 ri->ri_user_time = powerinfo.total_user;
818 ri->ri_system_time = powerinfo.total_system;
819
820 ledger_get_balance(task->ledger, task_ledgers.phys_footprint,
821 (ledger_amount_t *)&ri->ri_phys_footprint);
822 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
823 (ledger_amount_t *)&ri->ri_resident_size);
824 ledger_get_balance(task->ledger, task_ledgers.wired_mem,
825 (ledger_amount_t *)&ri->ri_wired_size);
826
827 ri->ri_pageins = task->pageins;
828
829 task_unlock(task);
830 return (0);
831 }
832
833 void
834 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
835 {
836 #if CONFIG_BANK
837 ri->ri_billed_system_time = bank_billed_time(task->bank_context);
838 ri->ri_serviced_system_time = bank_serviced_time(task->bank_context);
839 #else
840 ri->ri_billed_system_time = 0;
841 ri->ri_serviced_system_time = 0;
842 #endif
843 }
844
845 int
846 fill_task_io_rusage(task_t task, rusage_info_current *ri)
847 {
848 assert(task != TASK_NULL);
849 task_lock(task);
850
851 if (task->task_io_stats) {
852 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
853 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
854 } else {
855 /* I/O Stats unavailable */
856 ri->ri_diskio_bytesread = 0;
857 ri->ri_diskio_byteswritten = 0;
858 }
859 task_unlock(task);
860 return (0);
861 }
862
863 int
864 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
865 {
866 thread_t thread;
867
868 assert(task != TASK_NULL);
869 task_lock(task);
870
871 /* Rollup Qos time of all the threads to task */
872 queue_iterate(&task->threads, thread, thread_t, task_threads) {
873 if (thread->options & TH_OPT_IDLE_THREAD)
874 continue;
875
876 thread_mtx_lock(thread);
877 thread_update_qos_cpu_time(thread, TRUE);
878 thread_mtx_unlock(thread);
879
880 }
881 ri->ri_cpu_time_qos_default = task->cpu_time_qos_stats.cpu_time_qos_default;
882 ri->ri_cpu_time_qos_maintenance = task->cpu_time_qos_stats.cpu_time_qos_maintenance;
883 ri->ri_cpu_time_qos_background = task->cpu_time_qos_stats.cpu_time_qos_background;
884 ri->ri_cpu_time_qos_utility = task->cpu_time_qos_stats.cpu_time_qos_utility;
885 ri->ri_cpu_time_qos_legacy = task->cpu_time_qos_stats.cpu_time_qos_legacy;
886 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_qos_stats.cpu_time_qos_user_initiated;
887 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_qos_stats.cpu_time_qos_user_interactive;
888
889 task_unlock(task);
890 return (0);
891 }