]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
add2c1d51e0ea5d2fbada9ea75c4039b0932b7ea
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47
48 #if MONOTONIC
49 #include <kern/monotonic.h>
50 #include <machine/monotonic.h>
51 #endif /* MONOTONIC */
52
53 #include <machine/limits.h>
54
55 #undef thread_should_halt
56
57 /* BSD KERN COMPONENT INTERFACE */
58
59 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
60
61 thread_t get_firstthread(task_t);
62 int get_task_userstop(task_t);
63 int get_thread_userstop(thread_t);
64 boolean_t current_thread_aborted(void);
65 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
66 kern_return_t get_signalact(task_t, thread_t *, int);
67 int fill_task_rusage(task_t task, rusage_info_current *ri);
68 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
69 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
70 void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri);
71 uint64_t get_task_logical_writes(task_t task);
72 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
73 void task_bsdtask_kill(task_t);
74
75 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
76 extern uint64_t proc_uniqueid(void *p);
77 extern int proc_pidversion(void *p);
78
79 #if MACH_BSD
80 extern void psignal(void *, int);
81 #endif
82
83 /*
84 *
85 */
86 void *
87 get_bsdtask_info(task_t t)
88 {
89 return t->bsd_info;
90 }
91
92 void
93 task_bsdtask_kill(task_t t)
94 {
95 void * bsd_info = get_bsdtask_info(t);
96 if (bsd_info != NULL) {
97 psignal(bsd_info, SIGKILL);
98 }
99 }
100 /*
101 *
102 */
103 void *
104 get_bsdthreadtask_info(thread_t th)
105 {
106 return th->task != TASK_NULL ? th->task->bsd_info : NULL;
107 }
108
109 /*
110 *
111 */
112 void
113 set_bsdtask_info(task_t t, void * v)
114 {
115 t->bsd_info = v;
116 }
117
118 /*
119 *
120 */
121 void *
122 get_bsdthread_info(thread_t th)
123 {
124 return th->uthread;
125 }
126
127 #if defined(__x86_64__)
128 /*
129 * Returns non-zero if the thread has a non-NULL task
130 * and that task has an LDT.
131 */
132 int
133 thread_task_has_ldt(thread_t th)
134 {
135 return th->task && th->task->i386_ldt != 0;
136 }
137 #endif /* __x86_64__ */
138
139 /*
140 * XXX
141 */
142 int get_thread_lock_count(thread_t th); /* forced forward */
143 int
144 get_thread_lock_count(thread_t th)
145 {
146 return th->mutex_count;
147 }
148
149 /*
150 * XXX: wait for BSD to fix signal code
151 * Until then, we cannot block here. We know the task
152 * can't go away, so we make sure it is still active after
153 * retrieving the first thread for extra safety.
154 */
155 thread_t
156 get_firstthread(task_t task)
157 {
158 thread_t thread = (thread_t)(void *)queue_first(&task->threads);
159
160 if (queue_end(&task->threads, (queue_entry_t)thread)) {
161 thread = THREAD_NULL;
162 }
163
164 if (!task->active) {
165 return THREAD_NULL;
166 }
167
168 return thread;
169 }
170
171 kern_return_t
172 get_signalact(
173 task_t task,
174 thread_t *result_out,
175 int setast)
176 {
177 kern_return_t result = KERN_SUCCESS;
178 thread_t inc, thread = THREAD_NULL;
179
180 task_lock(task);
181
182 if (!task->active) {
183 task_unlock(task);
184
185 return KERN_FAILURE;
186 }
187
188 for (inc = (thread_t)(void *)queue_first(&task->threads);
189 !queue_end(&task->threads, (queue_entry_t)inc);) {
190 thread_mtx_lock(inc);
191 if (inc->active &&
192 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
193 thread = inc;
194 break;
195 }
196 thread_mtx_unlock(inc);
197
198 inc = (thread_t)(void *)queue_next(&inc->task_threads);
199 }
200
201 if (result_out) {
202 *result_out = thread;
203 }
204
205 if (thread) {
206 if (setast) {
207 act_set_astbsd(thread);
208 }
209
210 thread_mtx_unlock(thread);
211 } else {
212 result = KERN_FAILURE;
213 }
214
215 task_unlock(task);
216
217 return result;
218 }
219
220
221 kern_return_t
222 check_actforsig(
223 task_t task,
224 thread_t thread,
225 int setast)
226 {
227 kern_return_t result = KERN_FAILURE;
228 thread_t inc;
229
230 task_lock(task);
231
232 if (!task->active) {
233 task_unlock(task);
234
235 return KERN_FAILURE;
236 }
237
238 for (inc = (thread_t)(void *)queue_first(&task->threads);
239 !queue_end(&task->threads, (queue_entry_t)inc);) {
240 if (inc == thread) {
241 thread_mtx_lock(inc);
242
243 if (inc->active &&
244 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
245 result = KERN_SUCCESS;
246 break;
247 }
248
249 thread_mtx_unlock(inc);
250 break;
251 }
252
253 inc = (thread_t)(void *)queue_next(&inc->task_threads);
254 }
255
256 if (result == KERN_SUCCESS) {
257 if (setast) {
258 act_set_astbsd(thread);
259 }
260
261 thread_mtx_unlock(thread);
262 }
263
264 task_unlock(task);
265
266 return result;
267 }
268
269 ledger_t
270 get_task_ledger(task_t t)
271 {
272 return t->ledger;
273 }
274
275 /*
276 * This is only safe to call from a thread executing in
277 * in the task's context or if the task is locked. Otherwise,
278 * the map could be switched for the task (and freed) before
279 * we go to return it here.
280 */
281 vm_map_t
282 get_task_map(task_t t)
283 {
284 return t->map;
285 }
286
287 vm_map_t
288 get_task_map_reference(task_t t)
289 {
290 vm_map_t m;
291
292 if (t == NULL) {
293 return VM_MAP_NULL;
294 }
295
296 task_lock(t);
297 if (!t->active) {
298 task_unlock(t);
299 return VM_MAP_NULL;
300 }
301 m = t->map;
302 vm_map_reference_swap(m);
303 task_unlock(t);
304 return m;
305 }
306
307 /*
308 *
309 */
310 ipc_space_t
311 get_task_ipcspace(task_t t)
312 {
313 return t->itk_space;
314 }
315
316 int
317 get_task_numactivethreads(task_t task)
318 {
319 thread_t inc;
320 int num_active_thr = 0;
321 task_lock(task);
322
323 for (inc = (thread_t)(void *)queue_first(&task->threads);
324 !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)(void *)queue_next(&inc->task_threads)) {
325 if (inc->active) {
326 num_active_thr++;
327 }
328 }
329 task_unlock(task);
330 return num_active_thr;
331 }
332
333 int
334 get_task_numacts(task_t t)
335 {
336 return t->thread_count;
337 }
338
339 /* does this machine need 64bit register set for signal handler */
340 int
341 is_64signalregset(void)
342 {
343 if (task_has_64Bit_data(current_task())) {
344 return 1;
345 }
346
347 return 0;
348 }
349
350 /*
351 * Swap in a new map for the task/thread pair; the old map reference is
352 * returned. Also does a pmap switch if thread provided is current thread.
353 */
354 vm_map_t
355 swap_task_map(task_t task, thread_t thread, vm_map_t map)
356 {
357 vm_map_t old_map;
358 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
359
360 if (task != thread->task) {
361 panic("swap_task_map");
362 }
363
364 task_lock(task);
365 mp_disable_preemption();
366
367 old_map = task->map;
368 thread->map = task->map = map;
369 vm_commit_pagezero_status(map);
370
371 if (doswitch) {
372 PMAP_SWITCH_USER(thread, map, cpu_number());
373 }
374 mp_enable_preemption();
375 task_unlock(task);
376
377 #if defined(__x86_64__) && NCOPY_WINDOWS > 0
378 inval_copy_windows(thread);
379 #endif
380
381 return old_map;
382 }
383
384 /*
385 *
386 * This is only safe to call from a thread executing in
387 * in the task's context or if the task is locked. Otherwise,
388 * the map could be switched for the task (and freed) before
389 * we go to return it here.
390 */
391 pmap_t
392 get_task_pmap(task_t t)
393 {
394 return t->map->pmap;
395 }
396
397 /*
398 *
399 */
400 uint64_t
401 get_task_resident_size(task_t task)
402 {
403 vm_map_t map;
404
405 map = (task == kernel_task) ? kernel_map: task->map;
406 return (uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64;
407 }
408
409 uint64_t
410 get_task_compressed(task_t task)
411 {
412 vm_map_t map;
413
414 map = (task == kernel_task) ? kernel_map: task->map;
415 return (uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64;
416 }
417
418 uint64_t
419 get_task_resident_max(task_t task)
420 {
421 vm_map_t map;
422
423 map = (task == kernel_task) ? kernel_map: task->map;
424 return (uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64;
425 }
426
427 uint64_t
428 get_task_purgeable_size(task_t task)
429 {
430 kern_return_t ret;
431 ledger_amount_t credit, debit;
432 uint64_t volatile_size = 0;
433
434 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile, &credit, &debit);
435 if (ret != KERN_SUCCESS) {
436 return 0;
437 }
438
439 volatile_size += (credit - debit);
440
441 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile_compressed, &credit, &debit);
442 if (ret != KERN_SUCCESS) {
443 return 0;
444 }
445
446 volatile_size += (credit - debit);
447
448 return volatile_size;
449 }
450
451 /*
452 *
453 */
454 uint64_t
455 get_task_phys_footprint(task_t task)
456 {
457 kern_return_t ret;
458 ledger_amount_t credit, debit;
459
460 ret = ledger_get_entries(task->ledger, task_ledgers.phys_footprint, &credit, &debit);
461 if (KERN_SUCCESS == ret) {
462 return credit - debit;
463 }
464
465 return 0;
466 }
467
468 #if CONFIG_LEDGER_INTERVAL_MAX
469 /*
470 *
471 */
472 uint64_t
473 get_task_phys_footprint_interval_max(task_t task, int reset)
474 {
475 kern_return_t ret;
476 ledger_amount_t max;
477
478 ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
479
480 if (KERN_SUCCESS == ret) {
481 return max;
482 }
483
484 return 0;
485 }
486 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
487
488 /*
489 *
490 */
491 uint64_t
492 get_task_phys_footprint_lifetime_max(task_t task)
493 {
494 kern_return_t ret;
495 ledger_amount_t max;
496
497 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
498
499 if (KERN_SUCCESS == ret) {
500 return max;
501 }
502
503 return 0;
504 }
505
506 /*
507 *
508 */
509 uint64_t
510 get_task_phys_footprint_limit(task_t task)
511 {
512 kern_return_t ret;
513 ledger_amount_t max;
514
515 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
516 if (KERN_SUCCESS == ret) {
517 return max;
518 }
519
520 return 0;
521 }
522
523 uint64_t
524 get_task_internal(task_t task)
525 {
526 kern_return_t ret;
527 ledger_amount_t credit, debit;
528
529 ret = ledger_get_entries(task->ledger, task_ledgers.internal, &credit, &debit);
530 if (KERN_SUCCESS == ret) {
531 return credit - debit;
532 }
533
534 return 0;
535 }
536
537 uint64_t
538 get_task_internal_compressed(task_t task)
539 {
540 kern_return_t ret;
541 ledger_amount_t credit, debit;
542
543 ret = ledger_get_entries(task->ledger, task_ledgers.internal_compressed, &credit, &debit);
544 if (KERN_SUCCESS == ret) {
545 return credit - debit;
546 }
547
548 return 0;
549 }
550
551 uint64_t
552 get_task_purgeable_nonvolatile(task_t task)
553 {
554 kern_return_t ret;
555 ledger_amount_t credit, debit;
556
557 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile, &credit, &debit);
558 if (KERN_SUCCESS == ret) {
559 return credit - debit;
560 }
561
562 return 0;
563 }
564
565 uint64_t
566 get_task_purgeable_nonvolatile_compressed(task_t task)
567 {
568 kern_return_t ret;
569 ledger_amount_t credit, debit;
570
571 ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile_compressed, &credit, &debit);
572 if (KERN_SUCCESS == ret) {
573 return credit - debit;
574 }
575
576 return 0;
577 }
578
579 uint64_t
580 get_task_alternate_accounting(task_t task)
581 {
582 kern_return_t ret;
583 ledger_amount_t credit, debit;
584
585 ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting, &credit, &debit);
586 if (KERN_SUCCESS == ret) {
587 return credit - debit;
588 }
589
590 return 0;
591 }
592
593 uint64_t
594 get_task_alternate_accounting_compressed(task_t task)
595 {
596 kern_return_t ret;
597 ledger_amount_t credit, debit;
598
599 ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting_compressed, &credit, &debit);
600 if (KERN_SUCCESS == ret) {
601 return credit - debit;
602 }
603
604 return 0;
605 }
606
607 uint64_t
608 get_task_page_table(task_t task)
609 {
610 kern_return_t ret;
611 ledger_amount_t credit, debit;
612
613 ret = ledger_get_entries(task->ledger, task_ledgers.page_table, &credit, &debit);
614 if (KERN_SUCCESS == ret) {
615 return credit - debit;
616 }
617
618 return 0;
619 }
620
621 uint64_t
622 get_task_iokit_mapped(task_t task)
623 {
624 kern_return_t ret;
625 ledger_amount_t credit, debit;
626
627 ret = ledger_get_entries(task->ledger, task_ledgers.iokit_mapped, &credit, &debit);
628 if (KERN_SUCCESS == ret) {
629 return credit - debit;
630 }
631
632 return 0;
633 }
634
635 uint64_t
636 get_task_network_nonvolatile(task_t task)
637 {
638 kern_return_t ret;
639 ledger_amount_t credit, debit;
640
641 ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile, &credit, &debit);
642 if (KERN_SUCCESS == ret) {
643 return credit - debit;
644 }
645
646 return 0;
647 }
648
649 uint64_t
650 get_task_network_nonvolatile_compressed(task_t task)
651 {
652 kern_return_t ret;
653 ledger_amount_t credit, debit;
654
655 ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile_compressed, &credit, &debit);
656 if (KERN_SUCCESS == ret) {
657 return credit - debit;
658 }
659
660 return 0;
661 }
662
663 uint64_t
664 get_task_wired_mem(task_t task)
665 {
666 kern_return_t ret;
667 ledger_amount_t credit, debit;
668
669 ret = ledger_get_entries(task->ledger, task_ledgers.wired_mem, &credit, &debit);
670 if (KERN_SUCCESS == ret) {
671 return credit - debit;
672 }
673
674 return 0;
675 }
676
677
678 uint64_t
679 get_task_cpu_time(task_t task)
680 {
681 kern_return_t ret;
682 ledger_amount_t credit, debit;
683
684 ret = ledger_get_entries(task->ledger, task_ledgers.cpu_time, &credit, &debit);
685 if (KERN_SUCCESS == ret) {
686 return credit - debit;
687 }
688
689 return 0;
690 }
691
692 /*
693 *
694 */
695 task_t
696 get_threadtask(thread_t th)
697 {
698 return th->task;
699 }
700
701 /*
702 *
703 */
704 vm_map_offset_t
705 get_map_min(
706 vm_map_t map)
707 {
708 return vm_map_min(map);
709 }
710
711 /*
712 *
713 */
714 vm_map_offset_t
715 get_map_max(
716 vm_map_t map)
717 {
718 return vm_map_max(map);
719 }
720 vm_map_size_t
721 get_vmmap_size(
722 vm_map_t map)
723 {
724 return map->size;
725 }
726
727 #if CONFIG_COREDUMP
728
729 static int
730 get_vmsubmap_entries(
731 vm_map_t map,
732 vm_object_offset_t start,
733 vm_object_offset_t end)
734 {
735 int total_entries = 0;
736 vm_map_entry_t entry;
737
738 if (not_in_kdp) {
739 vm_map_lock(map);
740 }
741 entry = vm_map_first_entry(map);
742 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
743 entry = entry->vme_next;
744 }
745
746 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
747 if (entry->is_sub_map) {
748 total_entries +=
749 get_vmsubmap_entries(VME_SUBMAP(entry),
750 VME_OFFSET(entry),
751 (VME_OFFSET(entry) +
752 entry->vme_end -
753 entry->vme_start));
754 } else {
755 total_entries += 1;
756 }
757 entry = entry->vme_next;
758 }
759 if (not_in_kdp) {
760 vm_map_unlock(map);
761 }
762 return total_entries;
763 }
764
765 int
766 get_vmmap_entries(
767 vm_map_t map)
768 {
769 int total_entries = 0;
770 vm_map_entry_t entry;
771
772 if (not_in_kdp) {
773 vm_map_lock(map);
774 }
775 entry = vm_map_first_entry(map);
776
777 while (entry != vm_map_to_entry(map)) {
778 if (entry->is_sub_map) {
779 total_entries +=
780 get_vmsubmap_entries(VME_SUBMAP(entry),
781 VME_OFFSET(entry),
782 (VME_OFFSET(entry) +
783 entry->vme_end -
784 entry->vme_start));
785 } else {
786 total_entries += 1;
787 }
788 entry = entry->vme_next;
789 }
790 if (not_in_kdp) {
791 vm_map_unlock(map);
792 }
793 return total_entries;
794 }
795 #endif /* CONFIG_COREDUMP */
796
797 /*
798 *
799 */
800 /*
801 *
802 */
803 int
804 get_task_userstop(
805 task_t task)
806 {
807 return task->user_stop_count;
808 }
809
810 /*
811 *
812 */
813 int
814 get_thread_userstop(
815 thread_t th)
816 {
817 return th->user_stop_count;
818 }
819
820 /*
821 *
822 */
823 boolean_t
824 get_task_pidsuspended(
825 task_t task)
826 {
827 return task->pidsuspended;
828 }
829
830 /*
831 *
832 */
833 boolean_t
834 get_task_frozen(
835 task_t task)
836 {
837 return task->frozen;
838 }
839
840 /*
841 *
842 */
843 boolean_t
844 thread_should_abort(
845 thread_t th)
846 {
847 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
848 }
849
850 /*
851 * This routine is like thread_should_abort() above. It checks to
852 * see if the current thread is aborted. But unlike above, it also
853 * checks to see if thread is safely aborted. If so, it returns
854 * that fact, and clears the condition (safe aborts only should
855 * have a single effect, and a poll of the abort status
856 * qualifies.
857 */
858 boolean_t
859 current_thread_aborted(
860 void)
861 {
862 thread_t th = current_thread();
863 spl_t s;
864
865 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
866 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
867 return TRUE;
868 }
869 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
870 s = splsched();
871 thread_lock(th);
872 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
873 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
874 }
875 thread_unlock(th);
876 splx(s);
877 }
878 return FALSE;
879 }
880
881 /*
882 *
883 */
884 void
885 task_act_iterate_wth_args(
886 task_t task,
887 void (*func_callback)(thread_t, void *),
888 void *func_arg)
889 {
890 thread_t inc;
891
892 task_lock(task);
893
894 for (inc = (thread_t)(void *)queue_first(&task->threads);
895 !queue_end(&task->threads, (queue_entry_t)inc);) {
896 (void) (*func_callback)(inc, func_arg);
897 inc = (thread_t)(void *)queue_next(&inc->task_threads);
898 }
899
900 task_unlock(task);
901 }
902
903
904 #include <sys/bsdtask_info.h>
905
906 void
907 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
908 {
909 vm_map_t map;
910 task_absolutetime_info_data_t tinfo;
911 thread_t thread;
912 uint32_t cswitch = 0, numrunning = 0;
913 uint32_t syscalls_unix = 0;
914 uint32_t syscalls_mach = 0;
915
916 task_lock(task);
917
918 map = (task == kernel_task)? kernel_map: task->map;
919
920 ptinfo->pti_virtual_size = map->size;
921 ptinfo->pti_resident_size =
922 (mach_vm_size_t)(pmap_resident_count(map->pmap))
923 * PAGE_SIZE_64;
924
925 ptinfo->pti_policy = ((task != kernel_task)?
926 POLICY_TIMESHARE: POLICY_RR);
927
928 tinfo.threads_user = tinfo.threads_system = 0;
929 tinfo.total_user = task->total_user_time;
930 tinfo.total_system = task->total_system_time;
931
932 queue_iterate(&task->threads, thread, thread_t, task_threads) {
933 uint64_t tval;
934 spl_t x;
935
936 if (thread->options & TH_OPT_IDLE_THREAD) {
937 continue;
938 }
939
940 x = splsched();
941 thread_lock(thread);
942
943 if ((thread->state & TH_RUN) == TH_RUN) {
944 numrunning++;
945 }
946 cswitch += thread->c_switch;
947 tval = timer_grab(&thread->user_timer);
948 tinfo.threads_user += tval;
949 tinfo.total_user += tval;
950
951 tval = timer_grab(&thread->system_timer);
952
953 if (thread->precise_user_kernel_time) {
954 tinfo.threads_system += tval;
955 tinfo.total_system += tval;
956 } else {
957 /* system_timer may represent either sys or user */
958 tinfo.threads_user += tval;
959 tinfo.total_user += tval;
960 }
961
962 syscalls_unix += thread->syscalls_unix;
963 syscalls_mach += thread->syscalls_mach;
964
965 thread_unlock(thread);
966 splx(x);
967 }
968
969 ptinfo->pti_total_system = tinfo.total_system;
970 ptinfo->pti_total_user = tinfo.total_user;
971 ptinfo->pti_threads_system = tinfo.threads_system;
972 ptinfo->pti_threads_user = tinfo.threads_user;
973
974 ptinfo->pti_faults = task->faults;
975 ptinfo->pti_pageins = task->pageins;
976 ptinfo->pti_cow_faults = task->cow_faults;
977 ptinfo->pti_messages_sent = task->messages_sent;
978 ptinfo->pti_messages_received = task->messages_received;
979 ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
980 ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
981 ptinfo->pti_csw = task->c_switch + cswitch;
982 ptinfo->pti_threadnum = task->thread_count;
983 ptinfo->pti_numrunning = numrunning;
984 ptinfo->pti_priority = task->priority;
985
986 task_unlock(task);
987 }
988
989 int
990 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
991 {
992 thread_t thact;
993 int err = 0;
994 mach_msg_type_number_t count;
995 thread_basic_info_data_t basic_info;
996 kern_return_t kret;
997 uint64_t addr = 0;
998
999 task_lock(task);
1000
1001 for (thact = (thread_t)(void *)queue_first(&task->threads);
1002 !queue_end(&task->threads, (queue_entry_t)thact);) {
1003 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1004 if (addr == thaddr) {
1005 count = THREAD_BASIC_INFO_COUNT;
1006 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1007 err = 1;
1008 goto out;
1009 }
1010 ptinfo->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC));
1011 ptinfo->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC));
1012
1013 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1014 ptinfo->pth_policy = basic_info.policy;
1015 ptinfo->pth_run_state = basic_info.run_state;
1016 ptinfo->pth_flags = basic_info.flags;
1017 ptinfo->pth_sleep_time = basic_info.sleep_time;
1018 ptinfo->pth_curpri = thact->sched_pri;
1019 ptinfo->pth_priority = thact->base_pri;
1020 ptinfo->pth_maxpriority = thact->max_priority;
1021
1022 if ((vpp != NULL) && (thact->uthread != NULL)) {
1023 bsd_threadcdir(thact->uthread, vpp, vidp);
1024 }
1025 bsd_getthreadname(thact->uthread, ptinfo->pth_name);
1026 err = 0;
1027 goto out;
1028 }
1029 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1030 }
1031 err = 1;
1032
1033 out:
1034 task_unlock(task);
1035 return err;
1036 }
1037
1038 int
1039 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1040 {
1041 int numthr = 0;
1042 thread_t thact;
1043 uint64_t * uptr;
1044 uint64_t thaddr;
1045
1046 uptr = (uint64_t *)buffer;
1047
1048 task_lock(task);
1049
1050 for (thact = (thread_t)(void *)queue_first(&task->threads);
1051 !queue_end(&task->threads, (queue_entry_t)thact);) {
1052 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1053 *uptr++ = thaddr;
1054 numthr++;
1055 if (numthr >= thcount) {
1056 goto out;
1057 }
1058 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1059 }
1060
1061 out:
1062 task_unlock(task);
1063 return (int)(numthr * sizeof(uint64_t));
1064 }
1065
1066 int
1067 get_numthreads(task_t task)
1068 {
1069 return task->thread_count;
1070 }
1071
1072 /*
1073 * Gather the various pieces of info about the designated task,
1074 * and collect it all into a single rusage_info.
1075 */
1076 int
1077 fill_task_rusage(task_t task, rusage_info_current *ri)
1078 {
1079 struct task_power_info powerinfo;
1080
1081 assert(task != TASK_NULL);
1082 task_lock(task);
1083
1084 task_power_info_locked(task, &powerinfo, NULL, NULL);
1085 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1086 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1087 ri->ri_user_time = powerinfo.total_user;
1088 ri->ri_system_time = powerinfo.total_system;
1089
1090 ledger_get_balance(task->ledger, task_ledgers.phys_footprint,
1091 (ledger_amount_t *)&ri->ri_phys_footprint);
1092 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1093 (ledger_amount_t *)&ri->ri_resident_size);
1094 ledger_get_balance(task->ledger, task_ledgers.wired_mem,
1095 (ledger_amount_t *)&ri->ri_wired_size);
1096
1097 ri->ri_pageins = task->pageins;
1098
1099 task_unlock(task);
1100 return 0;
1101 }
1102
1103 void
1104 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1105 {
1106 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1107 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1108 }
1109
1110 int
1111 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1112 {
1113 assert(task != TASK_NULL);
1114 task_lock(task);
1115
1116 if (task->task_io_stats) {
1117 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1118 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1119 } else {
1120 /* I/O Stats unavailable */
1121 ri->ri_diskio_bytesread = 0;
1122 ri->ri_diskio_byteswritten = 0;
1123 }
1124 task_unlock(task);
1125 return 0;
1126 }
1127
1128 int
1129 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1130 {
1131 thread_t thread;
1132
1133 assert(task != TASK_NULL);
1134 task_lock(task);
1135
1136 /* Rollup QoS time of all the threads to task */
1137 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1138 if (thread->options & TH_OPT_IDLE_THREAD) {
1139 continue;
1140 }
1141
1142 thread_update_qos_cpu_time(thread);
1143 }
1144 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1145 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1146 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1147 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1148 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1149 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1150 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1151
1152 task_unlock(task);
1153 return 0;
1154 }
1155
1156 void
1157 fill_task_monotonic_rusage(task_t task, rusage_info_current *ri)
1158 {
1159 #if MONOTONIC
1160 if (!mt_core_supported) {
1161 return;
1162 }
1163
1164 assert(task != TASK_NULL);
1165
1166 uint64_t counts[MT_CORE_NFIXED] = { 0 };
1167 mt_fixed_task_counts(task, counts);
1168 #ifdef MT_CORE_INSTRS
1169 ri->ri_instructions = counts[MT_CORE_INSTRS];
1170 #endif /* defined(MT_CORE_INSTRS) */
1171 ri->ri_cycles = counts[MT_CORE_CYCLES];
1172 #else /* MONOTONIC */
1173 #pragma unused(task, ri)
1174 #endif /* !MONOTONIC */
1175 }
1176
1177 uint64_t
1178 get_task_logical_writes(task_t task)
1179 {
1180 assert(task != TASK_NULL);
1181 struct ledger_entry_info lei;
1182
1183 task_lock(task);
1184 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
1185
1186 task_unlock(task);
1187 return lei.lei_balance;
1188 }
1189
1190 uint64_t
1191 get_task_dispatchqueue_serialno_offset(task_t task)
1192 {
1193 uint64_t dq_serialno_offset = 0;
1194
1195 if (task->bsd_info) {
1196 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(task->bsd_info);
1197 }
1198
1199 return dq_serialno_offset;
1200 }
1201
1202 uint64_t
1203 get_task_uniqueid(task_t task)
1204 {
1205 if (task->bsd_info) {
1206 return proc_uniqueid(task->bsd_info);
1207 } else {
1208 return UINT64_MAX;
1209 }
1210 }
1211
1212 int
1213 get_task_version(task_t task)
1214 {
1215 if (task->bsd_info) {
1216 return proc_pidversion(task->bsd_info);
1217 } else {
1218 return INT_MAX;
1219 }
1220 }
1221
1222 #if CONFIG_MACF
1223 struct label *
1224 get_task_crash_label(task_t task)
1225 {
1226 return task->crash_label;
1227 }
1228 #endif