]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
ac79a2345df76e61352f1c47b1d5b3f5badfd3ab
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47
48 #undef thread_should_halt
49
50 /* BSD KERN COMPONENT INTERFACE */
51
52 task_t bsd_init_task = TASK_NULL;
53 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
54
55 thread_t get_firstthread(task_t);
56 int get_task_userstop(task_t);
57 int get_thread_userstop(thread_t);
58 boolean_t current_thread_aborted(void);
59 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
60 kern_return_t get_signalact(task_t , thread_t *, int);
61 int fill_task_rusage(task_t task, rusage_info_current *ri);
62 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
63 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
64 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
65 void task_bsdtask_kill(task_t);
66
67 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
68 extern uint64_t proc_uniqueid(void *p);
69
70 #if MACH_BSD
71 extern void psignal(void *, int);
72 #endif
73
74 /*
75 *
76 */
77 void *get_bsdtask_info(task_t t)
78 {
79 return(t->bsd_info);
80 }
81
82 void task_bsdtask_kill(task_t t)
83 {
84 void * bsd_info = get_bsdtask_info(t);
85 if (bsd_info != NULL) {
86 psignal(bsd_info, SIGKILL);
87 }
88 }
89 /*
90 *
91 */
92 void *get_bsdthreadtask_info(thread_t th)
93 {
94 return(th->task != TASK_NULL ? th->task->bsd_info : NULL);
95 }
96
97 /*
98 *
99 */
100 void set_bsdtask_info(task_t t,void * v)
101 {
102 t->bsd_info=v;
103 }
104
105 /*
106 *
107 */
108 void *get_bsdthread_info(thread_t th)
109 {
110 return(th->uthread);
111 }
112
113 /*
114 * XXX
115 */
116 int get_thread_lock_count(thread_t th); /* forced forward */
117 int get_thread_lock_count(thread_t th)
118 {
119 return(th->mutex_count);
120 }
121
122 /*
123 * XXX: wait for BSD to fix signal code
124 * Until then, we cannot block here. We know the task
125 * can't go away, so we make sure it is still active after
126 * retrieving the first thread for extra safety.
127 */
128 thread_t get_firstthread(task_t task)
129 {
130 thread_t thread = (thread_t)(void *)queue_first(&task->threads);
131
132 if (queue_end(&task->threads, (queue_entry_t)thread))
133 thread = THREAD_NULL;
134
135 if (!task->active)
136 return (THREAD_NULL);
137
138 return (thread);
139 }
140
141 kern_return_t
142 get_signalact(
143 task_t task,
144 thread_t *result_out,
145 int setast)
146 {
147 kern_return_t result = KERN_SUCCESS;
148 thread_t inc, thread = THREAD_NULL;
149
150 task_lock(task);
151
152 if (!task->active) {
153 task_unlock(task);
154
155 return (KERN_FAILURE);
156 }
157
158 for (inc = (thread_t)(void *)queue_first(&task->threads);
159 !queue_end(&task->threads, (queue_entry_t)inc); ) {
160 thread_mtx_lock(inc);
161 if (inc->active &&
162 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
163 thread = inc;
164 break;
165 }
166 thread_mtx_unlock(inc);
167
168 inc = (thread_t)(void *)queue_next(&inc->task_threads);
169 }
170
171 if (result_out)
172 *result_out = thread;
173
174 if (thread) {
175 if (setast)
176 act_set_astbsd(thread);
177
178 thread_mtx_unlock(thread);
179 }
180 else
181 result = KERN_FAILURE;
182
183 task_unlock(task);
184
185 return (result);
186 }
187
188
189 kern_return_t
190 check_actforsig(
191 task_t task,
192 thread_t thread,
193 int setast)
194 {
195 kern_return_t result = KERN_FAILURE;
196 thread_t inc;
197
198 task_lock(task);
199
200 if (!task->active) {
201 task_unlock(task);
202
203 return (KERN_FAILURE);
204 }
205
206 for (inc = (thread_t)(void *)queue_first(&task->threads);
207 !queue_end(&task->threads, (queue_entry_t)inc); ) {
208 if (inc == thread) {
209 thread_mtx_lock(inc);
210
211 if (inc->active &&
212 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
213 result = KERN_SUCCESS;
214 break;
215 }
216
217 thread_mtx_unlock(inc);
218 break;
219 }
220
221 inc = (thread_t)(void *)queue_next(&inc->task_threads);
222 }
223
224 if (result == KERN_SUCCESS) {
225 if (setast)
226 act_set_astbsd(thread);
227
228 thread_mtx_unlock(thread);
229 }
230
231 task_unlock(task);
232
233 return (result);
234 }
235
236 ledger_t get_task_ledger(task_t t)
237 {
238 return(t->ledger);
239 }
240
241 /*
242 * This is only safe to call from a thread executing in
243 * in the task's context or if the task is locked. Otherwise,
244 * the map could be switched for the task (and freed) before
245 * we go to return it here.
246 */
247 vm_map_t get_task_map(task_t t)
248 {
249 return(t->map);
250 }
251
252 vm_map_t get_task_map_reference(task_t t)
253 {
254 vm_map_t m;
255
256 if (t == NULL)
257 return VM_MAP_NULL;
258
259 task_lock(t);
260 if (!t->active) {
261 task_unlock(t);
262 return VM_MAP_NULL;
263 }
264 m = t->map;
265 vm_map_reference_swap(m);
266 task_unlock(t);
267 return m;
268 }
269
270 /*
271 *
272 */
273 ipc_space_t get_task_ipcspace(task_t t)
274 {
275 return(t->itk_space);
276 }
277
278 int get_task_numactivethreads(task_t task)
279 {
280 thread_t inc;
281 int num_active_thr=0;
282 task_lock(task);
283
284 for (inc = (thread_t)(void *)queue_first(&task->threads);
285 !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)(void *)queue_next(&inc->task_threads))
286 {
287 if(inc->active)
288 num_active_thr++;
289 }
290 task_unlock(task);
291 return num_active_thr;
292 }
293
294 int get_task_numacts(task_t t)
295 {
296 return(t->thread_count);
297 }
298
299 /* does this machine need 64bit register set for signal handler */
300 int is_64signalregset(void)
301 {
302 if (task_has_64BitData(current_task())) {
303 return(1);
304 }
305
306 return(0);
307 }
308
309 /*
310 * Swap in a new map for the task/thread pair; the old map reference is
311 * returned. Also does a pmap switch if thread provided is current thread.
312 */
313 vm_map_t
314 swap_task_map(task_t task, thread_t thread, vm_map_t map)
315 {
316 vm_map_t old_map;
317 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
318
319 if (task != thread->task)
320 panic("swap_task_map");
321
322 task_lock(task);
323 mp_disable_preemption();
324
325 old_map = task->map;
326 thread->map = task->map = map;
327 vm_commit_pagezero_status(map);
328
329 if (doswitch) {
330 pmap_switch(map->pmap);
331 }
332 mp_enable_preemption();
333 task_unlock(task);
334
335 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
336 inval_copy_windows(thread);
337 #endif
338
339 return old_map;
340 }
341
342 /*
343 *
344 * This is only safe to call from a thread executing in
345 * in the task's context or if the task is locked. Otherwise,
346 * the map could be switched for the task (and freed) before
347 * we go to return it here.
348 */
349 pmap_t get_task_pmap(task_t t)
350 {
351 return(t->map->pmap);
352 }
353
354 /*
355 *
356 */
357 uint64_t get_task_resident_size(task_t task)
358 {
359 vm_map_t map;
360
361 map = (task == kernel_task) ? kernel_map: task->map;
362 return((uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64);
363 }
364
365 uint64_t get_task_compressed(task_t task)
366 {
367 vm_map_t map;
368
369 map = (task == kernel_task) ? kernel_map: task->map;
370 return((uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64);
371 }
372
373 uint64_t get_task_resident_max(task_t task)
374 {
375 vm_map_t map;
376
377 map = (task == kernel_task) ? kernel_map: task->map;
378 return((uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64);
379 }
380
381 uint64_t get_task_purgeable_size(task_t task)
382 {
383 kern_return_t ret;
384 ledger_amount_t credit, debit;
385 uint64_t volatile_size = 0;
386
387 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile, &credit, &debit);
388 if (ret != KERN_SUCCESS) {
389 return 0;
390 }
391
392 volatile_size += (credit - debit);
393
394 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile_compressed, &credit, &debit);
395 if (ret != KERN_SUCCESS) {
396 return 0;
397 }
398
399 volatile_size += (credit - debit);
400
401 return volatile_size;
402 }
403
404 /*
405 *
406 */
407 uint64_t get_task_phys_footprint(task_t task)
408 {
409 kern_return_t ret;
410 ledger_amount_t credit, debit;
411
412 ret = ledger_get_entries(task->ledger, task_ledgers.phys_footprint, &credit, &debit);
413 if (KERN_SUCCESS == ret) {
414 return (credit - debit);
415 }
416
417 return 0;
418 }
419
420 /*
421 *
422 */
423 uint64_t get_task_phys_footprint_max(task_t task)
424 {
425 kern_return_t ret;
426 ledger_amount_t max;
427
428 ret = ledger_get_maximum(task->ledger, task_ledgers.phys_footprint, &max);
429 if (KERN_SUCCESS == ret) {
430 return max;
431 }
432
433 return 0;
434 }
435
436 /*
437 *
438 */
439 uint64_t get_task_phys_footprint_limit(task_t task)
440 {
441 kern_return_t ret;
442 ledger_amount_t max;
443
444 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
445 if (KERN_SUCCESS == ret) {
446 return max;
447 }
448
449 return 0;
450 }
451
452 uint64_t get_task_internal(task_t task)
453 {
454 kern_return_t ret;
455 ledger_amount_t credit, debit;
456
457 ret = ledger_get_entries(task->ledger, task_ledgers.internal, &credit, &debit);
458 if (KERN_SUCCESS == ret) {
459 return (credit - debit);
460 }
461
462 return 0;
463 }
464
465 uint64_t get_task_internal_compressed(task_t task)
466 {
467 kern_return_t ret;
468 ledger_amount_t credit, debit;
469
470 ret = ledger_get_entries(task->ledger, task_ledgers.internal_compressed, &credit, &debit);
471 if (KERN_SUCCESS == ret) {
472 return (credit - debit);
473 }
474
475 return 0;
476 }
477
478 uint64_t get_task_purgeable_nonvolatile(task_t task)
479 {
480 kern_return_t ret;
481 ledger_amount_t credit, debit;
482
483 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile, &credit, &debit);
484 if (KERN_SUCCESS == ret) {
485 return (credit - debit);
486 }
487
488 return 0;
489 }
490
491 uint64_t get_task_purgeable_nonvolatile_compressed(task_t task)
492 {
493 kern_return_t ret;
494 ledger_amount_t credit, debit;
495
496 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile_compressed, &credit, &debit);
497 if (KERN_SUCCESS == ret) {
498 return (credit - debit);
499 }
500
501 return 0;
502 }
503
504 uint64_t get_task_alternate_accounting(task_t task)
505 {
506 kern_return_t ret;
507 ledger_amount_t credit, debit;
508
509 ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting, &credit, &debit);
510 if (KERN_SUCCESS == ret) {
511 return (credit - debit);
512 }
513
514 return 0;
515 }
516
517 uint64_t get_task_alternate_accounting_compressed(task_t task)
518 {
519 kern_return_t ret;
520 ledger_amount_t credit, debit;
521
522 ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting_compressed, &credit, &debit);
523 if (KERN_SUCCESS == ret) {
524 return (credit - debit);
525 }
526
527 return 0;
528 }
529
530 uint64_t get_task_page_table(task_t task)
531 {
532 kern_return_t ret;
533 ledger_amount_t credit, debit;
534
535 ret = ledger_get_entries(task->ledger, task_ledgers.page_table, &credit, &debit);
536 if (KERN_SUCCESS == ret) {
537 return (credit - debit);
538 }
539
540 return 0;
541 }
542
543 uint64_t get_task_iokit_mapped(task_t task)
544 {
545 kern_return_t ret;
546 ledger_amount_t credit, debit;
547
548 ret = ledger_get_entries(task->ledger, task_ledgers.iokit_mapped, &credit, &debit);
549 if (KERN_SUCCESS == ret) {
550 return (credit - debit);
551 }
552
553 return 0;
554 }
555
556 uint64_t get_task_cpu_time(task_t task)
557 {
558 kern_return_t ret;
559 ledger_amount_t credit, debit;
560
561 ret = ledger_get_entries(task->ledger, task_ledgers.cpu_time, &credit, &debit);
562 if (KERN_SUCCESS == ret) {
563 return (credit - debit);
564 }
565
566 return 0;
567 }
568
569 /*
570 *
571 */
572 task_t get_threadtask(thread_t th)
573 {
574 return(th->task);
575 }
576
577 /*
578 *
579 */
580 vm_map_offset_t
581 get_map_min(
582 vm_map_t map)
583 {
584 return(vm_map_min(map));
585 }
586
587 /*
588 *
589 */
590 vm_map_offset_t
591 get_map_max(
592 vm_map_t map)
593 {
594 return(vm_map_max(map));
595 }
596 vm_map_size_t
597 get_vmmap_size(
598 vm_map_t map)
599 {
600 return(map->size);
601 }
602
603 #if CONFIG_COREDUMP
604
605 static int
606 get_vmsubmap_entries(
607 vm_map_t map,
608 vm_object_offset_t start,
609 vm_object_offset_t end)
610 {
611 int total_entries = 0;
612 vm_map_entry_t entry;
613
614 if (not_in_kdp)
615 vm_map_lock(map);
616 entry = vm_map_first_entry(map);
617 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
618 entry = entry->vme_next;
619 }
620
621 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
622 if(entry->is_sub_map) {
623 total_entries +=
624 get_vmsubmap_entries(VME_SUBMAP(entry),
625 VME_OFFSET(entry),
626 (VME_OFFSET(entry) +
627 entry->vme_end -
628 entry->vme_start));
629 } else {
630 total_entries += 1;
631 }
632 entry = entry->vme_next;
633 }
634 if (not_in_kdp)
635 vm_map_unlock(map);
636 return(total_entries);
637 }
638
639 int
640 get_vmmap_entries(
641 vm_map_t map)
642 {
643 int total_entries = 0;
644 vm_map_entry_t entry;
645
646 if (not_in_kdp)
647 vm_map_lock(map);
648 entry = vm_map_first_entry(map);
649
650 while(entry != vm_map_to_entry(map)) {
651 if(entry->is_sub_map) {
652 total_entries +=
653 get_vmsubmap_entries(VME_SUBMAP(entry),
654 VME_OFFSET(entry),
655 (VME_OFFSET(entry) +
656 entry->vme_end -
657 entry->vme_start));
658 } else {
659 total_entries += 1;
660 }
661 entry = entry->vme_next;
662 }
663 if (not_in_kdp)
664 vm_map_unlock(map);
665 return(total_entries);
666 }
667 #endif /* CONFIG_COREDUMP */
668
669 /*
670 *
671 */
672 /*
673 *
674 */
675 int
676 get_task_userstop(
677 task_t task)
678 {
679 return(task->user_stop_count);
680 }
681
682 /*
683 *
684 */
685 int
686 get_thread_userstop(
687 thread_t th)
688 {
689 return(th->user_stop_count);
690 }
691
692 /*
693 *
694 */
695 boolean_t
696 get_task_pidsuspended(
697 task_t task)
698 {
699 return (task->pidsuspended);
700 }
701
702 /*
703 *
704 */
705 boolean_t
706 get_task_frozen(
707 task_t task)
708 {
709 return (task->frozen);
710 }
711
712 /*
713 *
714 */
715 boolean_t
716 thread_should_abort(
717 thread_t th)
718 {
719 return ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT);
720 }
721
722 /*
723 * This routine is like thread_should_abort() above. It checks to
724 * see if the current thread is aborted. But unlike above, it also
725 * checks to see if thread is safely aborted. If so, it returns
726 * that fact, and clears the condition (safe aborts only should
727 * have a single effect, and a poll of the abort status
728 * qualifies.
729 */
730 boolean_t
731 current_thread_aborted (
732 void)
733 {
734 thread_t th = current_thread();
735 spl_t s;
736
737 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
738 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
739 return (TRUE);
740 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
741 s = splsched();
742 thread_lock(th);
743 if (th->sched_flags & TH_SFLAG_ABORTSAFELY)
744 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
745 thread_unlock(th);
746 splx(s);
747 }
748 return FALSE;
749 }
750
751 /*
752 *
753 */
754 void
755 task_act_iterate_wth_args(
756 task_t task,
757 void (*func_callback)(thread_t, void *),
758 void *func_arg)
759 {
760 thread_t inc;
761
762 task_lock(task);
763
764 for (inc = (thread_t)(void *)queue_first(&task->threads);
765 !queue_end(&task->threads, (queue_entry_t)inc); ) {
766 (void) (*func_callback)(inc, func_arg);
767 inc = (thread_t)(void *)queue_next(&inc->task_threads);
768 }
769
770 task_unlock(task);
771 }
772
773
774 #include <sys/bsdtask_info.h>
775
776 void
777 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
778 {
779 vm_map_t map;
780 task_absolutetime_info_data_t tinfo;
781 thread_t thread;
782 uint32_t cswitch = 0, numrunning = 0;
783 uint32_t syscalls_unix = 0;
784 uint32_t syscalls_mach = 0;
785
786 task_lock(task);
787
788 map = (task == kernel_task)? kernel_map: task->map;
789
790 ptinfo->pti_virtual_size = map->size;
791 ptinfo->pti_resident_size =
792 (mach_vm_size_t)(pmap_resident_count(map->pmap))
793 * PAGE_SIZE_64;
794
795 ptinfo->pti_policy = ((task != kernel_task)?
796 POLICY_TIMESHARE: POLICY_RR);
797
798 tinfo.threads_user = tinfo.threads_system = 0;
799 tinfo.total_user = task->total_user_time;
800 tinfo.total_system = task->total_system_time;
801
802 queue_iterate(&task->threads, thread, thread_t, task_threads) {
803 uint64_t tval;
804 spl_t x;
805
806 if (thread->options & TH_OPT_IDLE_THREAD)
807 continue;
808
809 x = splsched();
810 thread_lock(thread);
811
812 if ((thread->state & TH_RUN) == TH_RUN)
813 numrunning++;
814 cswitch += thread->c_switch;
815 tval = timer_grab(&thread->user_timer);
816 tinfo.threads_user += tval;
817 tinfo.total_user += tval;
818
819 tval = timer_grab(&thread->system_timer);
820
821 if (thread->precise_user_kernel_time) {
822 tinfo.threads_system += tval;
823 tinfo.total_system += tval;
824 } else {
825 /* system_timer may represent either sys or user */
826 tinfo.threads_user += tval;
827 tinfo.total_user += tval;
828 }
829
830 syscalls_unix += thread->syscalls_unix;
831 syscalls_mach += thread->syscalls_mach;
832
833 thread_unlock(thread);
834 splx(x);
835 }
836
837 ptinfo->pti_total_system = tinfo.total_system;
838 ptinfo->pti_total_user = tinfo.total_user;
839 ptinfo->pti_threads_system = tinfo.threads_system;
840 ptinfo->pti_threads_user = tinfo.threads_user;
841
842 ptinfo->pti_faults = task->faults;
843 ptinfo->pti_pageins = task->pageins;
844 ptinfo->pti_cow_faults = task->cow_faults;
845 ptinfo->pti_messages_sent = task->messages_sent;
846 ptinfo->pti_messages_received = task->messages_received;
847 ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
848 ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
849 ptinfo->pti_csw = task->c_switch + cswitch;
850 ptinfo->pti_threadnum = task->thread_count;
851 ptinfo->pti_numrunning = numrunning;
852 ptinfo->pti_priority = task->priority;
853
854 task_unlock(task);
855 }
856
857 int
858 fill_taskthreadinfo(task_t task, uint64_t thaddr, int thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
859 {
860 thread_t thact;
861 int err=0;
862 mach_msg_type_number_t count;
863 thread_basic_info_data_t basic_info;
864 kern_return_t kret;
865 uint64_t addr = 0;
866
867 task_lock(task);
868
869 for (thact = (thread_t)(void *)queue_first(&task->threads);
870 !queue_end(&task->threads, (queue_entry_t)thact); ) {
871 addr = (thuniqueid==0)?thact->machine.cthread_self: thact->thread_id;
872 if (addr == thaddr)
873 {
874
875 count = THREAD_BASIC_INFO_COUNT;
876 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
877 err = 1;
878 goto out;
879 }
880 ptinfo->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC));
881 ptinfo->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC));
882
883 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
884 ptinfo->pth_policy = basic_info.policy;
885 ptinfo->pth_run_state = basic_info.run_state;
886 ptinfo->pth_flags = basic_info.flags;
887 ptinfo->pth_sleep_time = basic_info.sleep_time;
888 ptinfo->pth_curpri = thact->sched_pri;
889 ptinfo->pth_priority = thact->base_pri;
890 ptinfo->pth_maxpriority = thact->max_priority;
891
892 if ((vpp != NULL) && (thact->uthread != NULL))
893 bsd_threadcdir(thact->uthread, vpp, vidp);
894 bsd_getthreadname(thact->uthread,ptinfo->pth_name);
895 err = 0;
896 goto out;
897 }
898 thact = (thread_t)(void *)queue_next(&thact->task_threads);
899 }
900 err = 1;
901
902 out:
903 task_unlock(task);
904 return(err);
905 }
906
907 int
908 fill_taskthreadlist(task_t task, void * buffer, int thcount)
909 {
910 int numthr=0;
911 thread_t thact;
912 uint64_t * uptr;
913 uint64_t thaddr;
914
915 uptr = (uint64_t *)buffer;
916
917 task_lock(task);
918
919 for (thact = (thread_t)(void *)queue_first(&task->threads);
920 !queue_end(&task->threads, (queue_entry_t)thact); ) {
921 thaddr = thact->machine.cthread_self;
922 *uptr++ = thaddr;
923 numthr++;
924 if (numthr >= thcount)
925 goto out;
926 thact = (thread_t)(void *)queue_next(&thact->task_threads);
927 }
928
929 out:
930 task_unlock(task);
931 return (int)(numthr * sizeof(uint64_t));
932
933 }
934
935 int
936 get_numthreads(task_t task)
937 {
938 return(task->thread_count);
939 }
940
941 /*
942 * Gather the various pieces of info about the designated task,
943 * and collect it all into a single rusage_info.
944 */
945 int
946 fill_task_rusage(task_t task, rusage_info_current *ri)
947 {
948 struct task_power_info powerinfo;
949
950 assert(task != TASK_NULL);
951 task_lock(task);
952
953 task_power_info_locked(task, &powerinfo, NULL, NULL);
954 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
955 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
956 ri->ri_user_time = powerinfo.total_user;
957 ri->ri_system_time = powerinfo.total_system;
958
959 ledger_get_balance(task->ledger, task_ledgers.phys_footprint,
960 (ledger_amount_t *)&ri->ri_phys_footprint);
961 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
962 (ledger_amount_t *)&ri->ri_resident_size);
963 ledger_get_balance(task->ledger, task_ledgers.wired_mem,
964 (ledger_amount_t *)&ri->ri_wired_size);
965
966 ri->ri_pageins = task->pageins;
967
968 task_unlock(task);
969 return (0);
970 }
971
972 void
973 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
974 {
975 #if CONFIG_BANK
976 ri->ri_billed_system_time = bank_billed_time_safe(task);
977 ri->ri_serviced_system_time = bank_serviced_time_safe(task);
978 #else
979 ri->ri_billed_system_time = 0;
980 ri->ri_serviced_system_time = 0;
981 #endif
982 }
983
984 int
985 fill_task_io_rusage(task_t task, rusage_info_current *ri)
986 {
987 assert(task != TASK_NULL);
988 task_lock(task);
989
990 if (task->task_io_stats) {
991 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
992 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
993 } else {
994 /* I/O Stats unavailable */
995 ri->ri_diskio_bytesread = 0;
996 ri->ri_diskio_byteswritten = 0;
997 }
998 task_unlock(task);
999 return (0);
1000 }
1001
1002 int
1003 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1004 {
1005 thread_t thread;
1006
1007 assert(task != TASK_NULL);
1008 task_lock(task);
1009
1010 /* Rollup Qos time of all the threads to task */
1011 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1012 if (thread->options & TH_OPT_IDLE_THREAD)
1013 continue;
1014
1015 thread_update_qos_cpu_time(thread);
1016 }
1017 ri->ri_cpu_time_qos_default = task->cpu_time_qos_stats.cpu_time_qos_default;
1018 ri->ri_cpu_time_qos_maintenance = task->cpu_time_qos_stats.cpu_time_qos_maintenance;
1019 ri->ri_cpu_time_qos_background = task->cpu_time_qos_stats.cpu_time_qos_background;
1020 ri->ri_cpu_time_qos_utility = task->cpu_time_qos_stats.cpu_time_qos_utility;
1021 ri->ri_cpu_time_qos_legacy = task->cpu_time_qos_stats.cpu_time_qos_legacy;
1022 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_qos_stats.cpu_time_qos_user_initiated;
1023 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_qos_stats.cpu_time_qos_user_interactive;
1024
1025 task_unlock(task);
1026 return (0);
1027 }
1028
1029 uint64_t
1030 get_task_dispatchqueue_serialno_offset(task_t task)
1031 {
1032 uint64_t dq_serialno_offset = 0;
1033
1034 if (task->bsd_info) {
1035 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(task->bsd_info);
1036 }
1037
1038 return dq_serialno_offset;
1039 }
1040
1041 uint64_t
1042 get_task_uniqueid(task_t task)
1043 {
1044 if (task->bsd_info) {
1045 return proc_uniqueid(task->bsd_info);
1046 } else {
1047 return UINT64_MAX;
1048 }
1049 }
1050
1051 #if CONFIG_MACF
1052 struct label *
1053 get_task_crash_label(task_t task)
1054 {
1055 return task->crash_label;
1056 }
1057
1058 void
1059 set_task_crash_label(task_t task, struct label *label)
1060 {
1061 task->crash_label = label;
1062 }
1063 #endif