]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48
49 #if MONOTONIC
50 #include <kern/monotonic.h>
51 #include <machine/monotonic.h>
52 #endif /* MONOTONIC */
53
54 #include <machine/limits.h>
55 #include <sys/codesign.h> /* CS_CDHASH_LEN */
56
57 #undef thread_should_halt
58
59 /* BSD KERN COMPONENT INTERFACE */
60
61 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
62
63 thread_t get_firstthread(task_t);
64 int get_task_userstop(task_t);
65 int get_thread_userstop(thread_t);
66 boolean_t current_thread_aborted(void);
67 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
68 kern_return_t get_signalact(task_t, thread_t *, int);
69 int fill_task_rusage(task_t task, rusage_info_current *ri);
70 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
71 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
72 void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri);
73 uint64_t get_task_logical_writes(task_t task, boolean_t external);
74 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
75 void task_bsdtask_kill(task_t);
76
77 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
78 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
79 extern uint64_t proc_uniqueid(void *p);
80 extern int proc_pidversion(void *p);
81 extern int proc_getcdhash(void *p, char *cdhash);
82
83 #if MACH_BSD
84 extern void psignal(void *, int);
85 #endif
86
87 /*
88 *
89 */
90 void *
91 get_bsdtask_info(task_t t)
92 {
93 return t->bsd_info;
94 }
95
96 void
97 task_bsdtask_kill(task_t t)
98 {
99 void * bsd_info = get_bsdtask_info(t);
100 if (bsd_info != NULL) {
101 psignal(bsd_info, SIGKILL);
102 }
103 }
104 /*
105 *
106 */
107 void *
108 get_bsdthreadtask_info(thread_t th)
109 {
110 return th->task != TASK_NULL ? th->task->bsd_info : NULL;
111 }
112
113 /*
114 *
115 */
116 void
117 set_bsdtask_info(task_t t, void * v)
118 {
119 t->bsd_info = v;
120 }
121
122 /*
123 *
124 */
125 void *
126 get_bsdthread_info(thread_t th)
127 {
128 return th->uthread;
129 }
130
131 /*
132 * This is used to remember any FS error from VNOP_PAGEIN code when
133 * invoked under vm_fault(). The value is an errno style value. It can
134 * be retrieved by exception handlers using thread_get_state().
135 */
136 void
137 set_thread_pagein_error(thread_t th, int error)
138 {
139 assert(th == current_thread());
140 if (error == 0 || th->t_pagein_error == 0) {
141 th->t_pagein_error = error;
142 }
143 }
144
145 #if defined(__x86_64__)
146 /*
147 * Returns non-zero if the thread has a non-NULL task
148 * and that task has an LDT.
149 */
150 int
151 thread_task_has_ldt(thread_t th)
152 {
153 return th->task && th->task->i386_ldt != 0;
154 }
155 #endif /* __x86_64__ */
156
157 /*
158 * XXX
159 */
160 int get_thread_lock_count(thread_t th); /* forced forward */
161 int
162 get_thread_lock_count(thread_t th)
163 {
164 return th->mutex_count;
165 }
166
167 /*
168 * XXX: wait for BSD to fix signal code
169 * Until then, we cannot block here. We know the task
170 * can't go away, so we make sure it is still active after
171 * retrieving the first thread for extra safety.
172 */
173 thread_t
174 get_firstthread(task_t task)
175 {
176 thread_t thread = (thread_t)(void *)queue_first(&task->threads);
177
178 if (queue_end(&task->threads, (queue_entry_t)thread)) {
179 thread = THREAD_NULL;
180 }
181
182 if (!task->active) {
183 return THREAD_NULL;
184 }
185
186 return thread;
187 }
188
189 kern_return_t
190 get_signalact(
191 task_t task,
192 thread_t *result_out,
193 int setast)
194 {
195 kern_return_t result = KERN_SUCCESS;
196 thread_t inc, thread = THREAD_NULL;
197
198 task_lock(task);
199
200 if (!task->active) {
201 task_unlock(task);
202
203 return KERN_FAILURE;
204 }
205
206 for (inc = (thread_t)(void *)queue_first(&task->threads);
207 !queue_end(&task->threads, (queue_entry_t)inc);) {
208 thread_mtx_lock(inc);
209 if (inc->active &&
210 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
211 thread = inc;
212 break;
213 }
214 thread_mtx_unlock(inc);
215
216 inc = (thread_t)(void *)queue_next(&inc->task_threads);
217 }
218
219 if (result_out) {
220 *result_out = thread;
221 }
222
223 if (thread) {
224 if (setast) {
225 act_set_astbsd(thread);
226 }
227
228 thread_mtx_unlock(thread);
229 } else {
230 result = KERN_FAILURE;
231 }
232
233 task_unlock(task);
234
235 return result;
236 }
237
238
239 kern_return_t
240 check_actforsig(
241 task_t task,
242 thread_t thread,
243 int setast)
244 {
245 kern_return_t result = KERN_FAILURE;
246 thread_t inc;
247
248 task_lock(task);
249
250 if (!task->active) {
251 task_unlock(task);
252
253 return KERN_FAILURE;
254 }
255
256 for (inc = (thread_t)(void *)queue_first(&task->threads);
257 !queue_end(&task->threads, (queue_entry_t)inc);) {
258 if (inc == thread) {
259 thread_mtx_lock(inc);
260
261 if (inc->active &&
262 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
263 result = KERN_SUCCESS;
264 break;
265 }
266
267 thread_mtx_unlock(inc);
268 break;
269 }
270
271 inc = (thread_t)(void *)queue_next(&inc->task_threads);
272 }
273
274 if (result == KERN_SUCCESS) {
275 if (setast) {
276 act_set_astbsd(thread);
277 }
278
279 thread_mtx_unlock(thread);
280 }
281
282 task_unlock(task);
283
284 return result;
285 }
286
287 ledger_t
288 get_task_ledger(task_t t)
289 {
290 return t->ledger;
291 }
292
293 /*
294 * This is only safe to call from a thread executing in
295 * in the task's context or if the task is locked. Otherwise,
296 * the map could be switched for the task (and freed) before
297 * we go to return it here.
298 */
299 vm_map_t
300 get_task_map(task_t t)
301 {
302 return t->map;
303 }
304
305 vm_map_t
306 get_task_map_reference(task_t t)
307 {
308 vm_map_t m;
309
310 if (t == NULL) {
311 return VM_MAP_NULL;
312 }
313
314 task_lock(t);
315 if (!t->active) {
316 task_unlock(t);
317 return VM_MAP_NULL;
318 }
319 m = t->map;
320 vm_map_reference_swap(m);
321 task_unlock(t);
322 return m;
323 }
324
325 /*
326 *
327 */
328 ipc_space_t
329 get_task_ipcspace(task_t t)
330 {
331 return t->itk_space;
332 }
333
334 int
335 get_task_numacts(task_t t)
336 {
337 return t->thread_count;
338 }
339
340 /* does this machine need 64bit register set for signal handler */
341 int
342 is_64signalregset(void)
343 {
344 if (task_has_64Bit_data(current_task())) {
345 return 1;
346 }
347
348 return 0;
349 }
350
351 /*
352 * Swap in a new map for the task/thread pair; the old map reference is
353 * returned. Also does a pmap switch if thread provided is current thread.
354 */
355 vm_map_t
356 swap_task_map(task_t task, thread_t thread, vm_map_t map)
357 {
358 vm_map_t old_map;
359 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
360
361 if (task != thread->task) {
362 panic("swap_task_map");
363 }
364
365 task_lock(task);
366 mp_disable_preemption();
367
368 old_map = task->map;
369 thread->map = task->map = map;
370 vm_commit_pagezero_status(map);
371
372 if (doswitch) {
373 PMAP_SWITCH_USER(thread, map, cpu_number());
374 }
375 mp_enable_preemption();
376 task_unlock(task);
377
378 #if defined(__x86_64__) && NCOPY_WINDOWS > 0
379 inval_copy_windows(thread);
380 #endif
381
382 return old_map;
383 }
384
385 /*
386 *
387 * This is only safe to call from a thread executing in
388 * in the task's context or if the task is locked. Otherwise,
389 * the map could be switched for the task (and freed) before
390 * we go to return it here.
391 */
392 pmap_t
393 get_task_pmap(task_t t)
394 {
395 return t->map->pmap;
396 }
397
398 /*
399 *
400 */
401 uint64_t
402 get_task_resident_size(task_t task)
403 {
404 vm_map_t map;
405
406 map = (task == kernel_task) ? kernel_map: task->map;
407 return (uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64;
408 }
409
410 uint64_t
411 get_task_compressed(task_t task)
412 {
413 vm_map_t map;
414
415 map = (task == kernel_task) ? kernel_map: task->map;
416 return (uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64;
417 }
418
419 uint64_t
420 get_task_resident_max(task_t task)
421 {
422 vm_map_t map;
423
424 map = (task == kernel_task) ? kernel_map: task->map;
425 return (uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64;
426 }
427
428 uint64_t
429 get_task_purgeable_size(task_t task)
430 {
431 kern_return_t ret;
432 ledger_amount_t credit, debit;
433 uint64_t volatile_size = 0;
434
435 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile, &credit, &debit);
436 if (ret != KERN_SUCCESS) {
437 return 0;
438 }
439
440 volatile_size += (credit - debit);
441
442 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile_compressed, &credit, &debit);
443 if (ret != KERN_SUCCESS) {
444 return 0;
445 }
446
447 volatile_size += (credit - debit);
448
449 return volatile_size;
450 }
451
452 /*
453 *
454 */
455 uint64_t
456 get_task_phys_footprint(task_t task)
457 {
458 kern_return_t ret;
459 ledger_amount_t credit, debit;
460
461 ret = ledger_get_entries(task->ledger, task_ledgers.phys_footprint, &credit, &debit);
462 if (KERN_SUCCESS == ret) {
463 return credit - debit;
464 }
465
466 return 0;
467 }
468
469 #if CONFIG_LEDGER_INTERVAL_MAX
470 /*
471 *
472 */
473 uint64_t
474 get_task_phys_footprint_interval_max(task_t task, int reset)
475 {
476 kern_return_t ret;
477 ledger_amount_t max;
478
479 ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
480
481 if (KERN_SUCCESS == ret) {
482 return max;
483 }
484
485 return 0;
486 }
487 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
488
489 /*
490 *
491 */
492 uint64_t
493 get_task_phys_footprint_lifetime_max(task_t task)
494 {
495 kern_return_t ret;
496 ledger_amount_t max;
497
498 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
499
500 if (KERN_SUCCESS == ret) {
501 return max;
502 }
503
504 return 0;
505 }
506
507 /*
508 *
509 */
510 uint64_t
511 get_task_phys_footprint_limit(task_t task)
512 {
513 kern_return_t ret;
514 ledger_amount_t max;
515
516 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
517 if (KERN_SUCCESS == ret) {
518 return max;
519 }
520
521 return 0;
522 }
523
524 uint64_t
525 get_task_internal(task_t task)
526 {
527 kern_return_t ret;
528 ledger_amount_t credit, debit;
529
530 ret = ledger_get_entries(task->ledger, task_ledgers.internal, &credit, &debit);
531 if (KERN_SUCCESS == ret) {
532 return credit - debit;
533 }
534
535 return 0;
536 }
537
538 uint64_t
539 get_task_internal_compressed(task_t task)
540 {
541 kern_return_t ret;
542 ledger_amount_t credit, debit;
543
544 ret = ledger_get_entries(task->ledger, task_ledgers.internal_compressed, &credit, &debit);
545 if (KERN_SUCCESS == ret) {
546 return credit - debit;
547 }
548
549 return 0;
550 }
551
552 uint64_t
553 get_task_purgeable_nonvolatile(task_t task)
554 {
555 kern_return_t ret;
556 ledger_amount_t credit, debit;
557
558 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile, &credit, &debit);
559 if (KERN_SUCCESS == ret) {
560 return credit - debit;
561 }
562
563 return 0;
564 }
565
566 uint64_t
567 get_task_purgeable_nonvolatile_compressed(task_t task)
568 {
569 kern_return_t ret;
570 ledger_amount_t credit, debit;
571
572 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile_compressed, &credit, &debit);
573 if (KERN_SUCCESS == ret) {
574 return credit - debit;
575 }
576
577 return 0;
578 }
579
580 uint64_t
581 get_task_alternate_accounting(task_t task)
582 {
583 kern_return_t ret;
584 ledger_amount_t credit, debit;
585
586 ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting, &credit, &debit);
587 if (KERN_SUCCESS == ret) {
588 return credit - debit;
589 }
590
591 return 0;
592 }
593
594 uint64_t
595 get_task_alternate_accounting_compressed(task_t task)
596 {
597 kern_return_t ret;
598 ledger_amount_t credit, debit;
599
600 ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting_compressed, &credit, &debit);
601 if (KERN_SUCCESS == ret) {
602 return credit - debit;
603 }
604
605 return 0;
606 }
607
608 uint64_t
609 get_task_page_table(task_t task)
610 {
611 kern_return_t ret;
612 ledger_amount_t credit, debit;
613
614 ret = ledger_get_entries(task->ledger, task_ledgers.page_table, &credit, &debit);
615 if (KERN_SUCCESS == ret) {
616 return credit - debit;
617 }
618
619 return 0;
620 }
621
622 uint64_t
623 get_task_iokit_mapped(task_t task)
624 {
625 kern_return_t ret;
626 ledger_amount_t credit, debit;
627
628 ret = ledger_get_entries(task->ledger, task_ledgers.iokit_mapped, &credit, &debit);
629 if (KERN_SUCCESS == ret) {
630 return credit - debit;
631 }
632
633 return 0;
634 }
635
636 uint64_t
637 get_task_network_nonvolatile(task_t task)
638 {
639 kern_return_t ret;
640 ledger_amount_t credit, debit;
641
642 ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile, &credit, &debit);
643 if (KERN_SUCCESS == ret) {
644 return credit - debit;
645 }
646
647 return 0;
648 }
649
650 uint64_t
651 get_task_network_nonvolatile_compressed(task_t task)
652 {
653 kern_return_t ret;
654 ledger_amount_t credit, debit;
655
656 ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile_compressed, &credit, &debit);
657 if (KERN_SUCCESS == ret) {
658 return credit - debit;
659 }
660
661 return 0;
662 }
663
664 uint64_t
665 get_task_wired_mem(task_t task)
666 {
667 kern_return_t ret;
668 ledger_amount_t credit, debit;
669
670 ret = ledger_get_entries(task->ledger, task_ledgers.wired_mem, &credit, &debit);
671 if (KERN_SUCCESS == ret) {
672 return credit - debit;
673 }
674
675 return 0;
676 }
677
678
679 uint64_t
680 get_task_cpu_time(task_t task)
681 {
682 kern_return_t ret;
683 ledger_amount_t credit, debit;
684
685 ret = ledger_get_entries(task->ledger, task_ledgers.cpu_time, &credit, &debit);
686 if (KERN_SUCCESS == ret) {
687 return credit - debit;
688 }
689
690 return 0;
691 }
692
693 uint32_t
694 get_task_loadTag(task_t task)
695 {
696 return os_atomic_load(&task->loadTag, relaxed);
697 }
698
699 uint32_t
700 set_task_loadTag(task_t task, uint32_t loadTag)
701 {
702 return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
703 }
704
705 /*
706 *
707 */
708 task_t
709 get_threadtask(thread_t th)
710 {
711 return th->task;
712 }
713
714 /*
715 *
716 */
717 vm_map_offset_t
718 get_map_min(
719 vm_map_t map)
720 {
721 return vm_map_min(map);
722 }
723
724 /*
725 *
726 */
727 vm_map_offset_t
728 get_map_max(
729 vm_map_t map)
730 {
731 return vm_map_max(map);
732 }
733 vm_map_size_t
734 get_vmmap_size(
735 vm_map_t map)
736 {
737 return map->size;
738 }
739
740 #if CONFIG_COREDUMP
741
742 static int
743 get_vmsubmap_entries(
744 vm_map_t map,
745 vm_object_offset_t start,
746 vm_object_offset_t end)
747 {
748 int total_entries = 0;
749 vm_map_entry_t entry;
750
751 if (not_in_kdp) {
752 vm_map_lock(map);
753 }
754 entry = vm_map_first_entry(map);
755 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
756 entry = entry->vme_next;
757 }
758
759 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
760 if (entry->is_sub_map) {
761 total_entries +=
762 get_vmsubmap_entries(VME_SUBMAP(entry),
763 VME_OFFSET(entry),
764 (VME_OFFSET(entry) +
765 entry->vme_end -
766 entry->vme_start));
767 } else {
768 total_entries += 1;
769 }
770 entry = entry->vme_next;
771 }
772 if (not_in_kdp) {
773 vm_map_unlock(map);
774 }
775 return total_entries;
776 }
777
778 int
779 get_vmmap_entries(
780 vm_map_t map)
781 {
782 int total_entries = 0;
783 vm_map_entry_t entry;
784
785 if (not_in_kdp) {
786 vm_map_lock(map);
787 }
788 entry = vm_map_first_entry(map);
789
790 while (entry != vm_map_to_entry(map)) {
791 if (entry->is_sub_map) {
792 total_entries +=
793 get_vmsubmap_entries(VME_SUBMAP(entry),
794 VME_OFFSET(entry),
795 (VME_OFFSET(entry) +
796 entry->vme_end -
797 entry->vme_start));
798 } else {
799 total_entries += 1;
800 }
801 entry = entry->vme_next;
802 }
803 if (not_in_kdp) {
804 vm_map_unlock(map);
805 }
806 return total_entries;
807 }
808 #endif /* CONFIG_COREDUMP */
809
810 /*
811 *
812 */
813 /*
814 *
815 */
816 int
817 get_task_userstop(
818 task_t task)
819 {
820 return task->user_stop_count;
821 }
822
823 /*
824 *
825 */
826 int
827 get_thread_userstop(
828 thread_t th)
829 {
830 return th->user_stop_count;
831 }
832
833 /*
834 *
835 */
836 boolean_t
837 get_task_pidsuspended(
838 task_t task)
839 {
840 return task->pidsuspended;
841 }
842
843 /*
844 *
845 */
846 boolean_t
847 get_task_frozen(
848 task_t task)
849 {
850 return task->frozen;
851 }
852
853 /*
854 *
855 */
856 boolean_t
857 thread_should_abort(
858 thread_t th)
859 {
860 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
861 }
862
863 /*
864 * This routine is like thread_should_abort() above. It checks to
865 * see if the current thread is aborted. But unlike above, it also
866 * checks to see if thread is safely aborted. If so, it returns
867 * that fact, and clears the condition (safe aborts only should
868 * have a single effect, and a poll of the abort status
869 * qualifies.
870 */
871 boolean_t
872 current_thread_aborted(
873 void)
874 {
875 thread_t th = current_thread();
876 spl_t s;
877
878 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
879 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
880 return TRUE;
881 }
882 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
883 s = splsched();
884 thread_lock(th);
885 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
886 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
887 }
888 thread_unlock(th);
889 splx(s);
890 }
891 return FALSE;
892 }
893
894 /*
895 *
896 */
897 void
898 task_act_iterate_wth_args(
899 task_t task,
900 void (*func_callback)(thread_t, void *),
901 void *func_arg)
902 {
903 thread_t inc;
904
905 task_lock(task);
906
907 for (inc = (thread_t)(void *)queue_first(&task->threads);
908 !queue_end(&task->threads, (queue_entry_t)inc);) {
909 (void) (*func_callback)(inc, func_arg);
910 inc = (thread_t)(void *)queue_next(&inc->task_threads);
911 }
912
913 task_unlock(task);
914 }
915
916
917 #include <sys/bsdtask_info.h>
918
919 void
920 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
921 {
922 vm_map_t map;
923 task_absolutetime_info_data_t tinfo;
924 thread_t thread;
925 uint32_t cswitch = 0, numrunning = 0;
926 uint32_t syscalls_unix = 0;
927 uint32_t syscalls_mach = 0;
928
929 task_lock(task);
930
931 map = (task == kernel_task)? kernel_map: task->map;
932
933 ptinfo->pti_virtual_size = map->size;
934 ptinfo->pti_resident_size =
935 (mach_vm_size_t)(pmap_resident_count(map->pmap))
936 * PAGE_SIZE_64;
937
938 ptinfo->pti_policy = ((task != kernel_task)?
939 POLICY_TIMESHARE: POLICY_RR);
940
941 tinfo.threads_user = tinfo.threads_system = 0;
942 tinfo.total_user = task->total_user_time;
943 tinfo.total_system = task->total_system_time;
944
945 queue_iterate(&task->threads, thread, thread_t, task_threads) {
946 uint64_t tval;
947 spl_t x;
948
949 if (thread->options & TH_OPT_IDLE_THREAD) {
950 continue;
951 }
952
953 x = splsched();
954 thread_lock(thread);
955
956 if ((thread->state & TH_RUN) == TH_RUN) {
957 numrunning++;
958 }
959 cswitch += thread->c_switch;
960 tval = timer_grab(&thread->user_timer);
961 tinfo.threads_user += tval;
962 tinfo.total_user += tval;
963
964 tval = timer_grab(&thread->system_timer);
965
966 if (thread->precise_user_kernel_time) {
967 tinfo.threads_system += tval;
968 tinfo.total_system += tval;
969 } else {
970 /* system_timer may represent either sys or user */
971 tinfo.threads_user += tval;
972 tinfo.total_user += tval;
973 }
974
975 syscalls_unix += thread->syscalls_unix;
976 syscalls_mach += thread->syscalls_mach;
977
978 thread_unlock(thread);
979 splx(x);
980 }
981
982 ptinfo->pti_total_system = tinfo.total_system;
983 ptinfo->pti_total_user = tinfo.total_user;
984 ptinfo->pti_threads_system = tinfo.threads_system;
985 ptinfo->pti_threads_user = tinfo.threads_user;
986
987 ptinfo->pti_faults = task->faults;
988 ptinfo->pti_pageins = task->pageins;
989 ptinfo->pti_cow_faults = task->cow_faults;
990 ptinfo->pti_messages_sent = task->messages_sent;
991 ptinfo->pti_messages_received = task->messages_received;
992 ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
993 ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
994 ptinfo->pti_csw = task->c_switch + cswitch;
995 ptinfo->pti_threadnum = task->thread_count;
996 ptinfo->pti_numrunning = numrunning;
997 ptinfo->pti_priority = task->priority;
998
999 task_unlock(task);
1000 }
1001
1002 int
1003 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1004 {
1005 thread_t thact;
1006 int err = 0;
1007 mach_msg_type_number_t count;
1008 thread_basic_info_data_t basic_info;
1009 kern_return_t kret;
1010 uint64_t addr = 0;
1011
1012 task_lock(task);
1013
1014 for (thact = (thread_t)(void *)queue_first(&task->threads);
1015 !queue_end(&task->threads, (queue_entry_t)thact);) {
1016 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1017 if (addr == thaddr) {
1018 count = THREAD_BASIC_INFO_COUNT;
1019 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1020 err = 1;
1021 goto out;
1022 }
1023 ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1024 ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1025
1026 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1027 ptinfo->pth_policy = basic_info.policy;
1028 ptinfo->pth_run_state = basic_info.run_state;
1029 ptinfo->pth_flags = basic_info.flags;
1030 ptinfo->pth_sleep_time = basic_info.sleep_time;
1031 ptinfo->pth_curpri = thact->sched_pri;
1032 ptinfo->pth_priority = thact->base_pri;
1033 ptinfo->pth_maxpriority = thact->max_priority;
1034
1035 if ((vpp != NULL) && (thact->uthread != NULL)) {
1036 bsd_threadcdir(thact->uthread, vpp, vidp);
1037 }
1038 bsd_getthreadname(thact->uthread, ptinfo->pth_name);
1039 err = 0;
1040 goto out;
1041 }
1042 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1043 }
1044 err = 1;
1045
1046 out:
1047 task_unlock(task);
1048 return err;
1049 }
1050
1051 int
1052 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1053 {
1054 int numthr = 0;
1055 thread_t thact;
1056 uint64_t * uptr;
1057 uint64_t thaddr;
1058
1059 uptr = (uint64_t *)buffer;
1060
1061 task_lock(task);
1062
1063 for (thact = (thread_t)(void *)queue_first(&task->threads);
1064 !queue_end(&task->threads, (queue_entry_t)thact);) {
1065 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1066 *uptr++ = thaddr;
1067 numthr++;
1068 if (numthr >= thcount) {
1069 goto out;
1070 }
1071 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1072 }
1073
1074 out:
1075 task_unlock(task);
1076 return (int)(numthr * sizeof(uint64_t));
1077 }
1078
1079 int
1080 get_numthreads(task_t task)
1081 {
1082 return task->thread_count;
1083 }
1084
1085 /*
1086 * Gather the various pieces of info about the designated task,
1087 * and collect it all into a single rusage_info.
1088 */
1089 int
1090 fill_task_rusage(task_t task, rusage_info_current *ri)
1091 {
1092 struct task_power_info powerinfo;
1093
1094 uint64_t runnable_time = 0;
1095
1096 assert(task != TASK_NULL);
1097 task_lock(task);
1098
1099 task_power_info_locked(task, &powerinfo, NULL, NULL, &runnable_time);
1100 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1101 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1102 ri->ri_user_time = powerinfo.total_user;
1103 ri->ri_system_time = powerinfo.total_system;
1104 ri->ri_runnable_time = runnable_time;
1105
1106 ledger_get_balance(task->ledger, task_ledgers.phys_footprint,
1107 (ledger_amount_t *)&ri->ri_phys_footprint);
1108 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1109 (ledger_amount_t *)&ri->ri_resident_size);
1110 ledger_get_balance(task->ledger, task_ledgers.wired_mem,
1111 (ledger_amount_t *)&ri->ri_wired_size);
1112
1113 ri->ri_pageins = task->pageins;
1114
1115 task_unlock(task);
1116 return 0;
1117 }
1118
1119 void
1120 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1121 {
1122 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1123 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1124 }
1125
1126 int
1127 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1128 {
1129 assert(task != TASK_NULL);
1130 task_lock(task);
1131
1132 if (task->task_io_stats) {
1133 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1134 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1135 } else {
1136 /* I/O Stats unavailable */
1137 ri->ri_diskio_bytesread = 0;
1138 ri->ri_diskio_byteswritten = 0;
1139 }
1140 task_unlock(task);
1141 return 0;
1142 }
1143
1144 int
1145 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1146 {
1147 thread_t thread;
1148
1149 assert(task != TASK_NULL);
1150 task_lock(task);
1151
1152 /* Rollup QoS time of all the threads to task */
1153 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1154 if (thread->options & TH_OPT_IDLE_THREAD) {
1155 continue;
1156 }
1157
1158 thread_update_qos_cpu_time(thread);
1159 }
1160 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1161 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1162 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1163 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1164 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1165 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1166 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1167
1168 task_unlock(task);
1169 return 0;
1170 }
1171
1172 void
1173 fill_task_monotonic_rusage(task_t task, rusage_info_current *ri)
1174 {
1175 #if MONOTONIC
1176 if (!mt_core_supported) {
1177 return;
1178 }
1179
1180 assert(task != TASK_NULL);
1181
1182 uint64_t counts[MT_CORE_NFIXED] = { 0 };
1183 mt_fixed_task_counts(task, counts);
1184 #ifdef MT_CORE_INSTRS
1185 ri->ri_instructions = counts[MT_CORE_INSTRS];
1186 #endif /* defined(MT_CORE_INSTRS) */
1187 ri->ri_cycles = counts[MT_CORE_CYCLES];
1188 #else /* MONOTONIC */
1189 #pragma unused(task, ri)
1190 #endif /* !MONOTONIC */
1191 }
1192
1193 uint64_t
1194 get_task_logical_writes(task_t task, boolean_t external)
1195 {
1196 assert(task != TASK_NULL);
1197 struct ledger_entry_info lei;
1198
1199 task_lock(task);
1200
1201 if (external == FALSE) {
1202 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
1203 } else {
1204 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes_to_external, &lei);
1205 }
1206
1207 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
1208
1209 task_unlock(task);
1210 return lei.lei_balance;
1211 }
1212
1213 uint64_t
1214 get_task_dispatchqueue_serialno_offset(task_t task)
1215 {
1216 uint64_t dq_serialno_offset = 0;
1217
1218 if (task->bsd_info) {
1219 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(task->bsd_info);
1220 }
1221
1222 return dq_serialno_offset;
1223 }
1224
1225 uint64_t
1226 get_task_dispatchqueue_label_offset(task_t task)
1227 {
1228 uint64_t dq_label_offset = 0;
1229
1230 if (task->bsd_info) {
1231 dq_label_offset = get_dispatchqueue_label_offset_from_proc(task->bsd_info);
1232 }
1233
1234 return dq_label_offset;
1235 }
1236
1237 uint64_t
1238 get_task_uniqueid(task_t task)
1239 {
1240 if (task->bsd_info) {
1241 return proc_uniqueid(task->bsd_info);
1242 } else {
1243 return UINT64_MAX;
1244 }
1245 }
1246
1247 int
1248 get_task_version(task_t task)
1249 {
1250 if (task->bsd_info) {
1251 return proc_pidversion(task->bsd_info);
1252 } else {
1253 return INT_MAX;
1254 }
1255 }
1256
1257 #if CONFIG_MACF
1258 struct label *
1259 get_task_crash_label(task_t task)
1260 {
1261 return task->crash_label;
1262 }
1263 #endif
1264
1265 int
1266 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1267 {
1268 ipc_space_t space = task->itk_space;
1269 if (space == NULL) {
1270 return -1;
1271 }
1272
1273 is_read_lock(space);
1274 if (!is_active(space)) {
1275 is_read_unlock(space);
1276 return -1;
1277 }
1278
1279 *table_size = space->is_table_size;
1280 *table_free = space->is_table_free;
1281
1282 is_read_unlock(space);
1283
1284 return 0;
1285 }
1286
1287 int
1288 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1289 {
1290 int result = 0;
1291
1292 task_lock(task);
1293 result = task->bsd_info ? proc_getcdhash(task->bsd_info, cdhash) : ESRCH;
1294 task_unlock(task);
1295
1296 return result;
1297 }