]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
7196cc5c649699d696fd5673fcf1ca8d895c8972
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47
48 #if MONOTONIC
49 #include <kern/monotonic.h>
50 #include <machine/monotonic.h>
51 #endif /* MONOTONIC */
52
53 #include <machine/limits.h>
54
55 #undef thread_should_halt
56
57 /* BSD KERN COMPONENT INTERFACE */
58
59 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
60
61 thread_t get_firstthread(task_t);
62 int get_task_userstop(task_t);
63 int get_thread_userstop(thread_t);
64 boolean_t current_thread_aborted(void);
65 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
66 kern_return_t get_signalact(task_t , thread_t *, int);
67 int fill_task_rusage(task_t task, rusage_info_current *ri);
68 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
69 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
70 void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri);
71 uint64_t get_task_logical_writes(task_t task);
72 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
73 void task_bsdtask_kill(task_t);
74
75 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
76 extern uint64_t proc_uniqueid(void *p);
77 extern int proc_pidversion(void *p);
78
79 #if MACH_BSD
80 extern void psignal(void *, int);
81 #endif
82
83 /*
84 *
85 */
86 void *get_bsdtask_info(task_t t)
87 {
88 return(t->bsd_info);
89 }
90
91 void task_bsdtask_kill(task_t t)
92 {
93 void * bsd_info = get_bsdtask_info(t);
94 if (bsd_info != NULL) {
95 psignal(bsd_info, SIGKILL);
96 }
97 }
98 /*
99 *
100 */
101 void *get_bsdthreadtask_info(thread_t th)
102 {
103 return(th->task != TASK_NULL ? th->task->bsd_info : NULL);
104 }
105
106 /*
107 *
108 */
109 void set_bsdtask_info(task_t t,void * v)
110 {
111 t->bsd_info=v;
112 }
113
114 /*
115 *
116 */
117 void *get_bsdthread_info(thread_t th)
118 {
119 return(th->uthread);
120 }
121
122 /*
123 * XXX
124 */
125 int get_thread_lock_count(thread_t th); /* forced forward */
126 int get_thread_lock_count(thread_t th)
127 {
128 return(th->mutex_count);
129 }
130
131 /*
132 * XXX: wait for BSD to fix signal code
133 * Until then, we cannot block here. We know the task
134 * can't go away, so we make sure it is still active after
135 * retrieving the first thread for extra safety.
136 */
137 thread_t get_firstthread(task_t task)
138 {
139 thread_t thread = (thread_t)(void *)queue_first(&task->threads);
140
141 if (queue_end(&task->threads, (queue_entry_t)thread))
142 thread = THREAD_NULL;
143
144 if (!task->active)
145 return (THREAD_NULL);
146
147 return (thread);
148 }
149
150 kern_return_t
151 get_signalact(
152 task_t task,
153 thread_t *result_out,
154 int setast)
155 {
156 kern_return_t result = KERN_SUCCESS;
157 thread_t inc, thread = THREAD_NULL;
158
159 task_lock(task);
160
161 if (!task->active) {
162 task_unlock(task);
163
164 return (KERN_FAILURE);
165 }
166
167 for (inc = (thread_t)(void *)queue_first(&task->threads);
168 !queue_end(&task->threads, (queue_entry_t)inc); ) {
169 thread_mtx_lock(inc);
170 if (inc->active &&
171 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
172 thread = inc;
173 break;
174 }
175 thread_mtx_unlock(inc);
176
177 inc = (thread_t)(void *)queue_next(&inc->task_threads);
178 }
179
180 if (result_out)
181 *result_out = thread;
182
183 if (thread) {
184 if (setast)
185 act_set_astbsd(thread);
186
187 thread_mtx_unlock(thread);
188 }
189 else
190 result = KERN_FAILURE;
191
192 task_unlock(task);
193
194 return (result);
195 }
196
197
198 kern_return_t
199 check_actforsig(
200 task_t task,
201 thread_t thread,
202 int setast)
203 {
204 kern_return_t result = KERN_FAILURE;
205 thread_t inc;
206
207 task_lock(task);
208
209 if (!task->active) {
210 task_unlock(task);
211
212 return (KERN_FAILURE);
213 }
214
215 for (inc = (thread_t)(void *)queue_first(&task->threads);
216 !queue_end(&task->threads, (queue_entry_t)inc); ) {
217 if (inc == thread) {
218 thread_mtx_lock(inc);
219
220 if (inc->active &&
221 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
222 result = KERN_SUCCESS;
223 break;
224 }
225
226 thread_mtx_unlock(inc);
227 break;
228 }
229
230 inc = (thread_t)(void *)queue_next(&inc->task_threads);
231 }
232
233 if (result == KERN_SUCCESS) {
234 if (setast)
235 act_set_astbsd(thread);
236
237 thread_mtx_unlock(thread);
238 }
239
240 task_unlock(task);
241
242 return (result);
243 }
244
245 ledger_t get_task_ledger(task_t t)
246 {
247 return(t->ledger);
248 }
249
250 /*
251 * This is only safe to call from a thread executing in
252 * in the task's context or if the task is locked. Otherwise,
253 * the map could be switched for the task (and freed) before
254 * we go to return it here.
255 */
256 vm_map_t get_task_map(task_t t)
257 {
258 return(t->map);
259 }
260
261 vm_map_t get_task_map_reference(task_t t)
262 {
263 vm_map_t m;
264
265 if (t == NULL)
266 return VM_MAP_NULL;
267
268 task_lock(t);
269 if (!t->active) {
270 task_unlock(t);
271 return VM_MAP_NULL;
272 }
273 m = t->map;
274 vm_map_reference_swap(m);
275 task_unlock(t);
276 return m;
277 }
278
279 /*
280 *
281 */
282 ipc_space_t get_task_ipcspace(task_t t)
283 {
284 return(t->itk_space);
285 }
286
287 int get_task_numactivethreads(task_t task)
288 {
289 thread_t inc;
290 int num_active_thr=0;
291 task_lock(task);
292
293 for (inc = (thread_t)(void *)queue_first(&task->threads);
294 !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)(void *)queue_next(&inc->task_threads))
295 {
296 if(inc->active)
297 num_active_thr++;
298 }
299 task_unlock(task);
300 return num_active_thr;
301 }
302
303 int get_task_numacts(task_t t)
304 {
305 return(t->thread_count);
306 }
307
308 /* does this machine need 64bit register set for signal handler */
309 int is_64signalregset(void)
310 {
311 if (task_has_64BitData(current_task())) {
312 return(1);
313 }
314
315 return(0);
316 }
317
318 /*
319 * Swap in a new map for the task/thread pair; the old map reference is
320 * returned. Also does a pmap switch if thread provided is current thread.
321 */
322 vm_map_t
323 swap_task_map(task_t task, thread_t thread, vm_map_t map)
324 {
325 vm_map_t old_map;
326 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
327
328 if (task != thread->task)
329 panic("swap_task_map");
330
331 task_lock(task);
332 mp_disable_preemption();
333
334 old_map = task->map;
335 thread->map = task->map = map;
336 vm_commit_pagezero_status(map);
337
338 if (doswitch) {
339 #if defined(__arm__) || defined(__arm64__)
340 PMAP_SWITCH_USER(thread, map, cpu_number())
341 #else
342 pmap_switch(map->pmap);
343 #endif
344 }
345 mp_enable_preemption();
346 task_unlock(task);
347
348 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
349 inval_copy_windows(thread);
350 #endif
351
352 return old_map;
353 }
354
355 /*
356 *
357 * This is only safe to call from a thread executing in
358 * in the task's context or if the task is locked. Otherwise,
359 * the map could be switched for the task (and freed) before
360 * we go to return it here.
361 */
362 pmap_t get_task_pmap(task_t t)
363 {
364 return(t->map->pmap);
365 }
366
367 /*
368 *
369 */
370 uint64_t get_task_resident_size(task_t task)
371 {
372 vm_map_t map;
373
374 map = (task == kernel_task) ? kernel_map: task->map;
375 return((uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64);
376 }
377
378 uint64_t get_task_compressed(task_t task)
379 {
380 vm_map_t map;
381
382 map = (task == kernel_task) ? kernel_map: task->map;
383 return((uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64);
384 }
385
386 uint64_t get_task_resident_max(task_t task)
387 {
388 vm_map_t map;
389
390 map = (task == kernel_task) ? kernel_map: task->map;
391 return((uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64);
392 }
393
394 uint64_t get_task_purgeable_size(task_t task)
395 {
396 kern_return_t ret;
397 ledger_amount_t credit, debit;
398 uint64_t volatile_size = 0;
399
400 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile, &credit, &debit);
401 if (ret != KERN_SUCCESS) {
402 return 0;
403 }
404
405 volatile_size += (credit - debit);
406
407 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile_compressed, &credit, &debit);
408 if (ret != KERN_SUCCESS) {
409 return 0;
410 }
411
412 volatile_size += (credit - debit);
413
414 return volatile_size;
415 }
416
417 /*
418 *
419 */
420 uint64_t get_task_phys_footprint(task_t task)
421 {
422 kern_return_t ret;
423 ledger_amount_t credit, debit;
424
425 ret = ledger_get_entries(task->ledger, task_ledgers.phys_footprint, &credit, &debit);
426 if (KERN_SUCCESS == ret) {
427 return (credit - debit);
428 }
429
430 return 0;
431 }
432
433 /*
434 *
435 */
436 uint64_t get_task_phys_footprint_recent_max(task_t task)
437 {
438 kern_return_t ret;
439 ledger_amount_t max;
440
441 ret = ledger_get_recent_max(task->ledger, task_ledgers.phys_footprint, &max);
442 if (KERN_SUCCESS == ret) {
443 return max;
444 }
445
446 return 0;
447 }
448
449 /*
450 *
451 */
452 uint64_t get_task_phys_footprint_lifetime_max(task_t task)
453 {
454 kern_return_t ret;
455 ledger_amount_t max;
456
457 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
458
459 if(KERN_SUCCESS == ret) {
460 return max;
461 }
462
463 return 0;
464 }
465
466 /*
467 *
468 */
469 uint64_t get_task_phys_footprint_limit(task_t task)
470 {
471 kern_return_t ret;
472 ledger_amount_t max;
473
474 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
475 if (KERN_SUCCESS == ret) {
476 return max;
477 }
478
479 return 0;
480 }
481
482 uint64_t get_task_internal(task_t task)
483 {
484 kern_return_t ret;
485 ledger_amount_t credit, debit;
486
487 ret = ledger_get_entries(task->ledger, task_ledgers.internal, &credit, &debit);
488 if (KERN_SUCCESS == ret) {
489 return (credit - debit);
490 }
491
492 return 0;
493 }
494
495 uint64_t get_task_internal_compressed(task_t task)
496 {
497 kern_return_t ret;
498 ledger_amount_t credit, debit;
499
500 ret = ledger_get_entries(task->ledger, task_ledgers.internal_compressed, &credit, &debit);
501 if (KERN_SUCCESS == ret) {
502 return (credit - debit);
503 }
504
505 return 0;
506 }
507
508 uint64_t get_task_purgeable_nonvolatile(task_t task)
509 {
510 kern_return_t ret;
511 ledger_amount_t credit, debit;
512
513 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile, &credit, &debit);
514 if (KERN_SUCCESS == ret) {
515 return (credit - debit);
516 }
517
518 return 0;
519 }
520
521 uint64_t get_task_purgeable_nonvolatile_compressed(task_t task)
522 {
523 kern_return_t ret;
524 ledger_amount_t credit, debit;
525
526 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile_compressed, &credit, &debit);
527 if (KERN_SUCCESS == ret) {
528 return (credit - debit);
529 }
530
531 return 0;
532 }
533
534 uint64_t get_task_alternate_accounting(task_t task)
535 {
536 kern_return_t ret;
537 ledger_amount_t credit, debit;
538
539 ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting, &credit, &debit);
540 if (KERN_SUCCESS == ret) {
541 return (credit - debit);
542 }
543
544 return 0;
545 }
546
547 uint64_t get_task_alternate_accounting_compressed(task_t task)
548 {
549 kern_return_t ret;
550 ledger_amount_t credit, debit;
551
552 ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting_compressed, &credit, &debit);
553 if (KERN_SUCCESS == ret) {
554 return (credit - debit);
555 }
556
557 return 0;
558 }
559
560 uint64_t get_task_page_table(task_t task)
561 {
562 kern_return_t ret;
563 ledger_amount_t credit, debit;
564
565 ret = ledger_get_entries(task->ledger, task_ledgers.page_table, &credit, &debit);
566 if (KERN_SUCCESS == ret) {
567 return (credit - debit);
568 }
569
570 return 0;
571 }
572
573 uint64_t get_task_iokit_mapped(task_t task)
574 {
575 kern_return_t ret;
576 ledger_amount_t credit, debit;
577
578 ret = ledger_get_entries(task->ledger, task_ledgers.iokit_mapped, &credit, &debit);
579 if (KERN_SUCCESS == ret) {
580 return (credit - debit);
581 }
582
583 return 0;
584 }
585
586 uint64_t get_task_cpu_time(task_t task)
587 {
588 kern_return_t ret;
589 ledger_amount_t credit, debit;
590
591 ret = ledger_get_entries(task->ledger, task_ledgers.cpu_time, &credit, &debit);
592 if (KERN_SUCCESS == ret) {
593 return (credit - debit);
594 }
595
596 return 0;
597 }
598
599 /*
600 *
601 */
602 task_t get_threadtask(thread_t th)
603 {
604 return(th->task);
605 }
606
607 /*
608 *
609 */
610 vm_map_offset_t
611 get_map_min(
612 vm_map_t map)
613 {
614 return(vm_map_min(map));
615 }
616
617 /*
618 *
619 */
620 vm_map_offset_t
621 get_map_max(
622 vm_map_t map)
623 {
624 return(vm_map_max(map));
625 }
626 vm_map_size_t
627 get_vmmap_size(
628 vm_map_t map)
629 {
630 return(map->size);
631 }
632
633 #if CONFIG_COREDUMP
634
635 static int
636 get_vmsubmap_entries(
637 vm_map_t map,
638 vm_object_offset_t start,
639 vm_object_offset_t end)
640 {
641 int total_entries = 0;
642 vm_map_entry_t entry;
643
644 if (not_in_kdp)
645 vm_map_lock(map);
646 entry = vm_map_first_entry(map);
647 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
648 entry = entry->vme_next;
649 }
650
651 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
652 if(entry->is_sub_map) {
653 total_entries +=
654 get_vmsubmap_entries(VME_SUBMAP(entry),
655 VME_OFFSET(entry),
656 (VME_OFFSET(entry) +
657 entry->vme_end -
658 entry->vme_start));
659 } else {
660 total_entries += 1;
661 }
662 entry = entry->vme_next;
663 }
664 if (not_in_kdp)
665 vm_map_unlock(map);
666 return(total_entries);
667 }
668
669 int
670 get_vmmap_entries(
671 vm_map_t map)
672 {
673 int total_entries = 0;
674 vm_map_entry_t entry;
675
676 if (not_in_kdp)
677 vm_map_lock(map);
678 entry = vm_map_first_entry(map);
679
680 while(entry != vm_map_to_entry(map)) {
681 if(entry->is_sub_map) {
682 total_entries +=
683 get_vmsubmap_entries(VME_SUBMAP(entry),
684 VME_OFFSET(entry),
685 (VME_OFFSET(entry) +
686 entry->vme_end -
687 entry->vme_start));
688 } else {
689 total_entries += 1;
690 }
691 entry = entry->vme_next;
692 }
693 if (not_in_kdp)
694 vm_map_unlock(map);
695 return(total_entries);
696 }
697 #endif /* CONFIG_COREDUMP */
698
699 /*
700 *
701 */
702 /*
703 *
704 */
705 int
706 get_task_userstop(
707 task_t task)
708 {
709 return(task->user_stop_count);
710 }
711
712 /*
713 *
714 */
715 int
716 get_thread_userstop(
717 thread_t th)
718 {
719 return(th->user_stop_count);
720 }
721
722 /*
723 *
724 */
725 boolean_t
726 get_task_pidsuspended(
727 task_t task)
728 {
729 return (task->pidsuspended);
730 }
731
732 /*
733 *
734 */
735 boolean_t
736 get_task_frozen(
737 task_t task)
738 {
739 return (task->frozen);
740 }
741
742 /*
743 *
744 */
745 boolean_t
746 thread_should_abort(
747 thread_t th)
748 {
749 return ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT);
750 }
751
752 /*
753 * This routine is like thread_should_abort() above. It checks to
754 * see if the current thread is aborted. But unlike above, it also
755 * checks to see if thread is safely aborted. If so, it returns
756 * that fact, and clears the condition (safe aborts only should
757 * have a single effect, and a poll of the abort status
758 * qualifies.
759 */
760 boolean_t
761 current_thread_aborted (
762 void)
763 {
764 thread_t th = current_thread();
765 spl_t s;
766
767 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
768 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
769 return (TRUE);
770 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
771 s = splsched();
772 thread_lock(th);
773 if (th->sched_flags & TH_SFLAG_ABORTSAFELY)
774 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
775 thread_unlock(th);
776 splx(s);
777 }
778 return FALSE;
779 }
780
781 /*
782 *
783 */
784 void
785 task_act_iterate_wth_args(
786 task_t task,
787 void (*func_callback)(thread_t, void *),
788 void *func_arg)
789 {
790 thread_t inc;
791
792 task_lock(task);
793
794 for (inc = (thread_t)(void *)queue_first(&task->threads);
795 !queue_end(&task->threads, (queue_entry_t)inc); ) {
796 (void) (*func_callback)(inc, func_arg);
797 inc = (thread_t)(void *)queue_next(&inc->task_threads);
798 }
799
800 task_unlock(task);
801 }
802
803
804 #include <sys/bsdtask_info.h>
805
806 void
807 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
808 {
809 vm_map_t map;
810 task_absolutetime_info_data_t tinfo;
811 thread_t thread;
812 uint32_t cswitch = 0, numrunning = 0;
813 uint32_t syscalls_unix = 0;
814 uint32_t syscalls_mach = 0;
815
816 task_lock(task);
817
818 map = (task == kernel_task)? kernel_map: task->map;
819
820 ptinfo->pti_virtual_size = map->size;
821 ptinfo->pti_resident_size =
822 (mach_vm_size_t)(pmap_resident_count(map->pmap))
823 * PAGE_SIZE_64;
824
825 ptinfo->pti_policy = ((task != kernel_task)?
826 POLICY_TIMESHARE: POLICY_RR);
827
828 tinfo.threads_user = tinfo.threads_system = 0;
829 tinfo.total_user = task->total_user_time;
830 tinfo.total_system = task->total_system_time;
831
832 queue_iterate(&task->threads, thread, thread_t, task_threads) {
833 uint64_t tval;
834 spl_t x;
835
836 if (thread->options & TH_OPT_IDLE_THREAD)
837 continue;
838
839 x = splsched();
840 thread_lock(thread);
841
842 if ((thread->state & TH_RUN) == TH_RUN)
843 numrunning++;
844 cswitch += thread->c_switch;
845 tval = timer_grab(&thread->user_timer);
846 tinfo.threads_user += tval;
847 tinfo.total_user += tval;
848
849 tval = timer_grab(&thread->system_timer);
850
851 if (thread->precise_user_kernel_time) {
852 tinfo.threads_system += tval;
853 tinfo.total_system += tval;
854 } else {
855 /* system_timer may represent either sys or user */
856 tinfo.threads_user += tval;
857 tinfo.total_user += tval;
858 }
859
860 syscalls_unix += thread->syscalls_unix;
861 syscalls_mach += thread->syscalls_mach;
862
863 thread_unlock(thread);
864 splx(x);
865 }
866
867 ptinfo->pti_total_system = tinfo.total_system;
868 ptinfo->pti_total_user = tinfo.total_user;
869 ptinfo->pti_threads_system = tinfo.threads_system;
870 ptinfo->pti_threads_user = tinfo.threads_user;
871
872 ptinfo->pti_faults = task->faults;
873 ptinfo->pti_pageins = task->pageins;
874 ptinfo->pti_cow_faults = task->cow_faults;
875 ptinfo->pti_messages_sent = task->messages_sent;
876 ptinfo->pti_messages_received = task->messages_received;
877 ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
878 ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
879 ptinfo->pti_csw = task->c_switch + cswitch;
880 ptinfo->pti_threadnum = task->thread_count;
881 ptinfo->pti_numrunning = numrunning;
882 ptinfo->pti_priority = task->priority;
883
884 task_unlock(task);
885 }
886
887 int
888 fill_taskthreadinfo(task_t task, uint64_t thaddr, int thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
889 {
890 thread_t thact;
891 int err=0;
892 mach_msg_type_number_t count;
893 thread_basic_info_data_t basic_info;
894 kern_return_t kret;
895 uint64_t addr = 0;
896
897 task_lock(task);
898
899 for (thact = (thread_t)(void *)queue_first(&task->threads);
900 !queue_end(&task->threads, (queue_entry_t)thact); ) {
901 addr = (thuniqueid==0)?thact->machine.cthread_self: thact->thread_id;
902 if (addr == thaddr)
903 {
904
905 count = THREAD_BASIC_INFO_COUNT;
906 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
907 err = 1;
908 goto out;
909 }
910 ptinfo->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC));
911 ptinfo->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC));
912
913 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
914 ptinfo->pth_policy = basic_info.policy;
915 ptinfo->pth_run_state = basic_info.run_state;
916 ptinfo->pth_flags = basic_info.flags;
917 ptinfo->pth_sleep_time = basic_info.sleep_time;
918 ptinfo->pth_curpri = thact->sched_pri;
919 ptinfo->pth_priority = thact->base_pri;
920 ptinfo->pth_maxpriority = thact->max_priority;
921
922 if ((vpp != NULL) && (thact->uthread != NULL))
923 bsd_threadcdir(thact->uthread, vpp, vidp);
924 bsd_getthreadname(thact->uthread,ptinfo->pth_name);
925 err = 0;
926 goto out;
927 }
928 thact = (thread_t)(void *)queue_next(&thact->task_threads);
929 }
930 err = 1;
931
932 out:
933 task_unlock(task);
934 return(err);
935 }
936
937 int
938 fill_taskthreadlist(task_t task, void * buffer, int thcount)
939 {
940 int numthr=0;
941 thread_t thact;
942 uint64_t * uptr;
943 uint64_t thaddr;
944
945 uptr = (uint64_t *)buffer;
946
947 task_lock(task);
948
949 for (thact = (thread_t)(void *)queue_first(&task->threads);
950 !queue_end(&task->threads, (queue_entry_t)thact); ) {
951 thaddr = thact->machine.cthread_self;
952 *uptr++ = thaddr;
953 numthr++;
954 if (numthr >= thcount)
955 goto out;
956 thact = (thread_t)(void *)queue_next(&thact->task_threads);
957 }
958
959 out:
960 task_unlock(task);
961 return (int)(numthr * sizeof(uint64_t));
962
963 }
964
965 int
966 get_numthreads(task_t task)
967 {
968 return(task->thread_count);
969 }
970
971 /*
972 * Gather the various pieces of info about the designated task,
973 * and collect it all into a single rusage_info.
974 */
975 int
976 fill_task_rusage(task_t task, rusage_info_current *ri)
977 {
978 struct task_power_info powerinfo;
979
980 assert(task != TASK_NULL);
981 task_lock(task);
982
983 task_power_info_locked(task, &powerinfo, NULL, NULL);
984 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
985 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
986 ri->ri_user_time = powerinfo.total_user;
987 ri->ri_system_time = powerinfo.total_system;
988
989 ledger_get_balance(task->ledger, task_ledgers.phys_footprint,
990 (ledger_amount_t *)&ri->ri_phys_footprint);
991 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
992 (ledger_amount_t *)&ri->ri_resident_size);
993 ledger_get_balance(task->ledger, task_ledgers.wired_mem,
994 (ledger_amount_t *)&ri->ri_wired_size);
995
996 ri->ri_pageins = task->pageins;
997
998 task_unlock(task);
999 return (0);
1000 }
1001
1002 void
1003 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1004 {
1005 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1006 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1007 }
1008
1009 int
1010 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1011 {
1012 assert(task != TASK_NULL);
1013 task_lock(task);
1014
1015 if (task->task_io_stats) {
1016 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1017 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1018 } else {
1019 /* I/O Stats unavailable */
1020 ri->ri_diskio_bytesread = 0;
1021 ri->ri_diskio_byteswritten = 0;
1022 }
1023 task_unlock(task);
1024 return (0);
1025 }
1026
1027 int
1028 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1029 {
1030 thread_t thread;
1031
1032 assert(task != TASK_NULL);
1033 task_lock(task);
1034
1035 /* Rollup Qos time of all the threads to task */
1036 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1037 if (thread->options & TH_OPT_IDLE_THREAD)
1038 continue;
1039
1040 thread_update_qos_cpu_time(thread);
1041 }
1042 ri->ri_cpu_time_qos_default = task->cpu_time_qos_stats.cpu_time_qos_default;
1043 ri->ri_cpu_time_qos_maintenance = task->cpu_time_qos_stats.cpu_time_qos_maintenance;
1044 ri->ri_cpu_time_qos_background = task->cpu_time_qos_stats.cpu_time_qos_background;
1045 ri->ri_cpu_time_qos_utility = task->cpu_time_qos_stats.cpu_time_qos_utility;
1046 ri->ri_cpu_time_qos_legacy = task->cpu_time_qos_stats.cpu_time_qos_legacy;
1047 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_qos_stats.cpu_time_qos_user_initiated;
1048 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_qos_stats.cpu_time_qos_user_interactive;
1049
1050 task_unlock(task);
1051 return (0);
1052 }
1053
1054 void
1055 fill_task_monotonic_rusage(task_t task, rusage_info_current *ri)
1056 {
1057 #if MONOTONIC
1058 if (!mt_core_supported) {
1059 return;
1060 }
1061
1062 assert(task != TASK_NULL);
1063
1064 uint64_t counts[MT_CORE_NFIXED] = {};
1065 mt_fixed_task_counts(task, counts);
1066 #ifdef MT_CORE_INSTRS
1067 ri->ri_instructions = counts[MT_CORE_INSTRS];
1068 #endif /* defined(MT_CORE_INSTRS) */
1069 ri->ri_cycles = counts[MT_CORE_CYCLES];
1070 #else /* MONOTONIC */
1071 #pragma unused(task, ri)
1072 #endif /* !MONOTONIC */
1073 }
1074
1075 uint64_t
1076 get_task_logical_writes(task_t task)
1077 {
1078 assert(task != TASK_NULL);
1079 struct ledger_entry_info lei;
1080
1081 task_lock(task);
1082 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
1083
1084 task_unlock(task);
1085 return lei.lei_balance;
1086 }
1087
1088 uint64_t
1089 get_task_dispatchqueue_serialno_offset(task_t task)
1090 {
1091 uint64_t dq_serialno_offset = 0;
1092
1093 if (task->bsd_info) {
1094 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(task->bsd_info);
1095 }
1096
1097 return dq_serialno_offset;
1098 }
1099
1100 uint64_t
1101 get_task_uniqueid(task_t task)
1102 {
1103 if (task->bsd_info) {
1104 return proc_uniqueid(task->bsd_info);
1105 } else {
1106 return UINT64_MAX;
1107 }
1108 }
1109
1110 int
1111 get_task_version(task_t task)
1112 {
1113 if (task->bsd_info) {
1114 return proc_pidversion(task->bsd_info);
1115 } else {
1116 return INT_MAX;
1117 }
1118 }
1119
1120 #if CONFIG_MACF
1121 struct label *
1122 get_task_crash_label(task_t task)
1123 {
1124 return task->crash_label;
1125 }
1126 #endif