]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48 #include <sys/proc_require.h>
49
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54
55 #include <machine/limits.h>
56 #include <sys/codesign.h> /* CS_CDHASH_LEN */
57
58 #undef thread_should_halt
59
60 /* BSD KERN COMPONENT INTERFACE */
61
62 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
63
64 thread_t get_firstthread(task_t);
65 int get_task_userstop(task_t);
66 int get_thread_userstop(thread_t);
67 boolean_t current_thread_aborted(void);
68 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
69 kern_return_t get_signalact(task_t, thread_t *, int);
70 int fill_task_rusage(task_t task, rusage_info_current *ri);
71 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
72 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
73 void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri);
74 uint64_t get_task_logical_writes(task_t task, boolean_t external);
75 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
76 void task_bsdtask_kill(task_t);
77
78 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
79 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
80 extern uint64_t proc_uniqueid(void *p);
81 extern int proc_pidversion(void *p);
82 extern int proc_getcdhash(void *p, char *cdhash);
83
84 #if MACH_BSD
85 extern void psignal(void *, int);
86 #endif
87
88 /*
89 *
90 */
91 void *
92 get_bsdtask_info(task_t t)
93 {
94 proc_require(t->bsd_info, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_KERNPROC);
95 return t->bsd_info;
96 }
97
98 void
99 task_bsdtask_kill(task_t t)
100 {
101 void * bsd_info = get_bsdtask_info(t);
102 if (bsd_info != NULL) {
103 psignal(bsd_info, SIGKILL);
104 }
105 }
106 /*
107 *
108 */
109 void *
110 get_bsdthreadtask_info(thread_t th)
111 {
112 void *bsd_info = NULL;
113
114 if (th->task) {
115 bsd_info = get_bsdtask_info(th->task);
116 }
117 return bsd_info;
118 }
119
120 /*
121 *
122 */
123 void
124 set_bsdtask_info(task_t t, void * v)
125 {
126 t->bsd_info = v;
127 }
128
129 /*
130 *
131 */
132 void *
133 get_bsdthread_info(thread_t th)
134 {
135 return th->uthread;
136 }
137
138 /*
139 * This is used to remember any FS error from VNOP_PAGEIN code when
140 * invoked under vm_fault(). The value is an errno style value. It can
141 * be retrieved by exception handlers using thread_get_state().
142 */
143 void
144 set_thread_pagein_error(thread_t th, int error)
145 {
146 assert(th == current_thread());
147 if (error == 0 || th->t_pagein_error == 0) {
148 th->t_pagein_error = error;
149 }
150 }
151
152 #if defined(__x86_64__)
153 /*
154 * Returns non-zero if the thread has a non-NULL task
155 * and that task has an LDT.
156 */
157 int
158 thread_task_has_ldt(thread_t th)
159 {
160 return th->task && th->task->i386_ldt != 0;
161 }
162 #endif /* __x86_64__ */
163
164 /*
165 * XXX
166 */
167 int get_thread_lock_count(thread_t th); /* forced forward */
168 int
169 get_thread_lock_count(thread_t th)
170 {
171 return th->mutex_count;
172 }
173
174 /*
175 * XXX: wait for BSD to fix signal code
176 * Until then, we cannot block here. We know the task
177 * can't go away, so we make sure it is still active after
178 * retrieving the first thread for extra safety.
179 */
180 thread_t
181 get_firstthread(task_t task)
182 {
183 thread_t thread = (thread_t)(void *)queue_first(&task->threads);
184
185 if (queue_end(&task->threads, (queue_entry_t)thread)) {
186 thread = THREAD_NULL;
187 }
188
189 if (!task->active) {
190 return THREAD_NULL;
191 }
192
193 return thread;
194 }
195
196 kern_return_t
197 get_signalact(
198 task_t task,
199 thread_t *result_out,
200 int setast)
201 {
202 kern_return_t result = KERN_SUCCESS;
203 thread_t inc, thread = THREAD_NULL;
204
205 task_lock(task);
206
207 if (!task->active) {
208 task_unlock(task);
209
210 return KERN_FAILURE;
211 }
212
213 for (inc = (thread_t)(void *)queue_first(&task->threads);
214 !queue_end(&task->threads, (queue_entry_t)inc);) {
215 thread_mtx_lock(inc);
216 if (inc->active &&
217 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
218 thread = inc;
219 break;
220 }
221 thread_mtx_unlock(inc);
222
223 inc = (thread_t)(void *)queue_next(&inc->task_threads);
224 }
225
226 if (result_out) {
227 *result_out = thread;
228 }
229
230 if (thread) {
231 if (setast) {
232 act_set_astbsd(thread);
233 }
234
235 thread_mtx_unlock(thread);
236 } else {
237 result = KERN_FAILURE;
238 }
239
240 task_unlock(task);
241
242 return result;
243 }
244
245
246 kern_return_t
247 check_actforsig(
248 task_t task,
249 thread_t thread,
250 int setast)
251 {
252 kern_return_t result = KERN_FAILURE;
253 thread_t inc;
254
255 task_lock(task);
256
257 if (!task->active) {
258 task_unlock(task);
259
260 return KERN_FAILURE;
261 }
262
263 for (inc = (thread_t)(void *)queue_first(&task->threads);
264 !queue_end(&task->threads, (queue_entry_t)inc);) {
265 if (inc == thread) {
266 thread_mtx_lock(inc);
267
268 if (inc->active &&
269 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
270 result = KERN_SUCCESS;
271 break;
272 }
273
274 thread_mtx_unlock(inc);
275 break;
276 }
277
278 inc = (thread_t)(void *)queue_next(&inc->task_threads);
279 }
280
281 if (result == KERN_SUCCESS) {
282 if (setast) {
283 act_set_astbsd(thread);
284 }
285
286 thread_mtx_unlock(thread);
287 }
288
289 task_unlock(task);
290
291 return result;
292 }
293
294 ledger_t
295 get_task_ledger(task_t t)
296 {
297 return t->ledger;
298 }
299
300 /*
301 * This is only safe to call from a thread executing in
302 * in the task's context or if the task is locked. Otherwise,
303 * the map could be switched for the task (and freed) before
304 * we go to return it here.
305 */
306 vm_map_t
307 get_task_map(task_t t)
308 {
309 return t->map;
310 }
311
312 vm_map_t
313 get_task_map_reference(task_t t)
314 {
315 vm_map_t m;
316
317 if (t == NULL) {
318 return VM_MAP_NULL;
319 }
320
321 task_lock(t);
322 if (!t->active) {
323 task_unlock(t);
324 return VM_MAP_NULL;
325 }
326 m = t->map;
327 vm_map_reference(m);
328 task_unlock(t);
329 return m;
330 }
331
332 /*
333 *
334 */
335 ipc_space_t
336 get_task_ipcspace(task_t t)
337 {
338 return t->itk_space;
339 }
340
341 int
342 get_task_numacts(task_t t)
343 {
344 return t->thread_count;
345 }
346
347 /* does this machine need 64bit register set for signal handler */
348 int
349 is_64signalregset(void)
350 {
351 if (task_has_64Bit_data(current_task())) {
352 return 1;
353 }
354
355 return 0;
356 }
357
358 /*
359 * Swap in a new map for the task/thread pair; the old map reference is
360 * returned. Also does a pmap switch if thread provided is current thread.
361 */
362 vm_map_t
363 swap_task_map(task_t task, thread_t thread, vm_map_t map)
364 {
365 vm_map_t old_map;
366 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
367
368 if (task != thread->task) {
369 panic("swap_task_map");
370 }
371
372 task_lock(task);
373 mp_disable_preemption();
374
375 old_map = task->map;
376 thread->map = task->map = map;
377 vm_commit_pagezero_status(map);
378
379 if (doswitch) {
380 PMAP_SWITCH_USER(thread, map, cpu_number());
381 }
382 mp_enable_preemption();
383 task_unlock(task);
384
385 return old_map;
386 }
387
388 /*
389 *
390 * This is only safe to call from a thread executing in
391 * in the task's context or if the task is locked. Otherwise,
392 * the map could be switched for the task (and freed) before
393 * we go to return it here.
394 */
395 pmap_t
396 get_task_pmap(task_t t)
397 {
398 return t->map->pmap;
399 }
400
401 /*
402 *
403 */
404 uint64_t
405 get_task_resident_size(task_t task)
406 {
407 vm_map_t map;
408
409 map = (task == kernel_task) ? kernel_map: task->map;
410 return (uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64;
411 }
412
413 uint64_t
414 get_task_compressed(task_t task)
415 {
416 vm_map_t map;
417
418 map = (task == kernel_task) ? kernel_map: task->map;
419 return (uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64;
420 }
421
422 uint64_t
423 get_task_resident_max(task_t task)
424 {
425 vm_map_t map;
426
427 map = (task == kernel_task) ? kernel_map: task->map;
428 return (uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64;
429 }
430
431 /*
432 * Get the balance for a given field in the task ledger.
433 * Returns 0 if the entry is invalid.
434 */
435 static uint64_t
436 get_task_ledger_balance(task_t task, int entry)
437 {
438 ledger_amount_t balance = 0;
439
440 ledger_get_balance(task->ledger, entry, &balance);
441 return balance;
442 }
443
444 uint64_t
445 get_task_purgeable_size(task_t task)
446 {
447 kern_return_t ret;
448 ledger_amount_t balance = 0;
449 uint64_t volatile_size = 0;
450
451 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
452 if (ret != KERN_SUCCESS) {
453 return 0;
454 }
455
456 volatile_size += balance;
457
458 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
459 if (ret != KERN_SUCCESS) {
460 return 0;
461 }
462
463 volatile_size += balance;
464
465 return volatile_size;
466 }
467
468 /*
469 *
470 */
471 uint64_t
472 get_task_phys_footprint(task_t task)
473 {
474 return get_task_ledger_balance(task, task_ledgers.phys_footprint);
475 }
476
477 #if CONFIG_LEDGER_INTERVAL_MAX
478 /*
479 *
480 */
481 uint64_t
482 get_task_phys_footprint_interval_max(task_t task, int reset)
483 {
484 kern_return_t ret;
485 ledger_amount_t max;
486
487 ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
488
489 if (KERN_SUCCESS == ret) {
490 return max;
491 }
492
493 return 0;
494 }
495 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
496
497 /*
498 *
499 */
500 uint64_t
501 get_task_phys_footprint_lifetime_max(task_t task)
502 {
503 kern_return_t ret;
504 ledger_amount_t max;
505
506 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
507
508 if (KERN_SUCCESS == ret) {
509 return max;
510 }
511
512 return 0;
513 }
514
515 /*
516 *
517 */
518 uint64_t
519 get_task_phys_footprint_limit(task_t task)
520 {
521 kern_return_t ret;
522 ledger_amount_t max;
523
524 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
525 if (KERN_SUCCESS == ret) {
526 return max;
527 }
528
529 return 0;
530 }
531
532 uint64_t
533 get_task_internal(task_t task)
534 {
535 return get_task_ledger_balance(task, task_ledgers.internal);
536 }
537
538 uint64_t
539 get_task_internal_compressed(task_t task)
540 {
541 return get_task_ledger_balance(task, task_ledgers.internal_compressed);
542 }
543
544 uint64_t
545 get_task_purgeable_nonvolatile(task_t task)
546 {
547 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
548 }
549
550 uint64_t
551 get_task_purgeable_nonvolatile_compressed(task_t task)
552 {
553 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
554 }
555
556 uint64_t
557 get_task_alternate_accounting(task_t task)
558 {
559 return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
560 }
561
562 uint64_t
563 get_task_alternate_accounting_compressed(task_t task)
564 {
565 return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
566 }
567
568 uint64_t
569 get_task_page_table(task_t task)
570 {
571 return get_task_ledger_balance(task, task_ledgers.page_table);
572 }
573
574 #if CONFIG_FREEZE
575 uint64_t
576 get_task_frozen_to_swap(task_t task)
577 {
578 return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
579 }
580 #endif /* CONFIG_FREEZE */
581
582 uint64_t
583 get_task_iokit_mapped(task_t task)
584 {
585 return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
586 }
587
588 uint64_t
589 get_task_network_nonvolatile(task_t task)
590 {
591 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
592 }
593
594 uint64_t
595 get_task_network_nonvolatile_compressed(task_t task)
596 {
597 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
598 }
599
600 uint64_t
601 get_task_wired_mem(task_t task)
602 {
603 return get_task_ledger_balance(task, task_ledgers.wired_mem);
604 }
605
606 uint64_t
607 get_task_tagged_footprint(task_t task)
608 {
609 kern_return_t ret;
610 ledger_amount_t credit, debit;
611
612 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
613 if (KERN_SUCCESS == ret) {
614 return credit - debit;
615 }
616
617 return 0;
618 }
619
620 uint64_t
621 get_task_tagged_footprint_compressed(task_t task)
622 {
623 kern_return_t ret;
624 ledger_amount_t credit, debit;
625
626 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
627 if (KERN_SUCCESS == ret) {
628 return credit - debit;
629 }
630
631 return 0;
632 }
633
634 uint64_t
635 get_task_media_footprint(task_t task)
636 {
637 kern_return_t ret;
638 ledger_amount_t credit, debit;
639
640 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
641 if (KERN_SUCCESS == ret) {
642 return credit - debit;
643 }
644
645 return 0;
646 }
647
648 uint64_t
649 get_task_media_footprint_compressed(task_t task)
650 {
651 kern_return_t ret;
652 ledger_amount_t credit, debit;
653
654 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
655 if (KERN_SUCCESS == ret) {
656 return credit - debit;
657 }
658
659 return 0;
660 }
661
662 uint64_t
663 get_task_graphics_footprint(task_t task)
664 {
665 kern_return_t ret;
666 ledger_amount_t credit, debit;
667
668 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
669 if (KERN_SUCCESS == ret) {
670 return credit - debit;
671 }
672
673 return 0;
674 }
675
676
677 uint64_t
678 get_task_graphics_footprint_compressed(task_t task)
679 {
680 kern_return_t ret;
681 ledger_amount_t credit, debit;
682
683 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
684 if (KERN_SUCCESS == ret) {
685 return credit - debit;
686 }
687
688 return 0;
689 }
690
691 uint64_t
692 get_task_neural_footprint(task_t task)
693 {
694 kern_return_t ret;
695 ledger_amount_t credit, debit;
696
697 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
698 if (KERN_SUCCESS == ret) {
699 return credit - debit;
700 }
701
702 return 0;
703 }
704
705 uint64_t
706 get_task_neural_footprint_compressed(task_t task)
707 {
708 kern_return_t ret;
709 ledger_amount_t credit, debit;
710
711 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
712 if (KERN_SUCCESS == ret) {
713 return credit - debit;
714 }
715
716 return 0;
717 }
718
719 uint64_t
720 get_task_cpu_time(task_t task)
721 {
722 return get_task_ledger_balance(task, task_ledgers.cpu_time);
723 }
724
725 uint32_t
726 get_task_loadTag(task_t task)
727 {
728 return os_atomic_load(&task->loadTag, relaxed);
729 }
730
731 uint32_t
732 set_task_loadTag(task_t task, uint32_t loadTag)
733 {
734 return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
735 }
736
737 /*
738 *
739 */
740 task_t
741 get_threadtask(thread_t th)
742 {
743 return th->task;
744 }
745
746 /*
747 *
748 */
749 vm_map_offset_t
750 get_map_min(
751 vm_map_t map)
752 {
753 return vm_map_min(map);
754 }
755
756 /*
757 *
758 */
759 vm_map_offset_t
760 get_map_max(
761 vm_map_t map)
762 {
763 return vm_map_max(map);
764 }
765 vm_map_size_t
766 get_vmmap_size(
767 vm_map_t map)
768 {
769 return vm_map_adjusted_size(map);
770 }
771 int
772 get_task_page_size(
773 task_t task)
774 {
775 return vm_map_page_size(task->map);
776 }
777
778 #if CONFIG_COREDUMP
779
780 static int
781 get_vmsubmap_entries(
782 vm_map_t map,
783 vm_object_offset_t start,
784 vm_object_offset_t end)
785 {
786 int total_entries = 0;
787 vm_map_entry_t entry;
788
789 if (not_in_kdp) {
790 vm_map_lock(map);
791 }
792 entry = vm_map_first_entry(map);
793 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
794 entry = entry->vme_next;
795 }
796
797 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
798 if (entry->is_sub_map) {
799 total_entries +=
800 get_vmsubmap_entries(VME_SUBMAP(entry),
801 VME_OFFSET(entry),
802 (VME_OFFSET(entry) +
803 entry->vme_end -
804 entry->vme_start));
805 } else {
806 total_entries += 1;
807 }
808 entry = entry->vme_next;
809 }
810 if (not_in_kdp) {
811 vm_map_unlock(map);
812 }
813 return total_entries;
814 }
815
816 int
817 get_vmmap_entries(
818 vm_map_t map)
819 {
820 int total_entries = 0;
821 vm_map_entry_t entry;
822
823 if (not_in_kdp) {
824 vm_map_lock(map);
825 }
826 entry = vm_map_first_entry(map);
827
828 while (entry != vm_map_to_entry(map)) {
829 if (entry->is_sub_map) {
830 total_entries +=
831 get_vmsubmap_entries(VME_SUBMAP(entry),
832 VME_OFFSET(entry),
833 (VME_OFFSET(entry) +
834 entry->vme_end -
835 entry->vme_start));
836 } else {
837 total_entries += 1;
838 }
839 entry = entry->vme_next;
840 }
841 if (not_in_kdp) {
842 vm_map_unlock(map);
843 }
844 return total_entries;
845 }
846 #endif /* CONFIG_COREDUMP */
847
848 /*
849 *
850 */
851 /*
852 *
853 */
854 int
855 get_task_userstop(
856 task_t task)
857 {
858 return task->user_stop_count;
859 }
860
861 /*
862 *
863 */
864 int
865 get_thread_userstop(
866 thread_t th)
867 {
868 return th->user_stop_count;
869 }
870
871 /*
872 *
873 */
874 boolean_t
875 get_task_pidsuspended(
876 task_t task)
877 {
878 return task->pidsuspended;
879 }
880
881 /*
882 *
883 */
884 boolean_t
885 get_task_frozen(
886 task_t task)
887 {
888 return task->frozen;
889 }
890
891 /*
892 *
893 */
894 boolean_t
895 thread_should_abort(
896 thread_t th)
897 {
898 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
899 }
900
901 /*
902 * This routine is like thread_should_abort() above. It checks to
903 * see if the current thread is aborted. But unlike above, it also
904 * checks to see if thread is safely aborted. If so, it returns
905 * that fact, and clears the condition (safe aborts only should
906 * have a single effect, and a poll of the abort status
907 * qualifies.
908 */
909 boolean_t
910 current_thread_aborted(
911 void)
912 {
913 thread_t th = current_thread();
914 spl_t s;
915
916 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
917 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
918 return TRUE;
919 }
920 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
921 s = splsched();
922 thread_lock(th);
923 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
924 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
925 }
926 thread_unlock(th);
927 splx(s);
928 }
929 return FALSE;
930 }
931
932 /*
933 *
934 */
935 void
936 task_act_iterate_wth_args(
937 task_t task,
938 void (*func_callback)(thread_t, void *),
939 void *func_arg)
940 {
941 thread_t inc;
942
943 task_lock(task);
944
945 for (inc = (thread_t)(void *)queue_first(&task->threads);
946 !queue_end(&task->threads, (queue_entry_t)inc);) {
947 (void) (*func_callback)(inc, func_arg);
948 inc = (thread_t)(void *)queue_next(&inc->task_threads);
949 }
950
951 task_unlock(task);
952 }
953
954
955 #include <sys/bsdtask_info.h>
956
957 void
958 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
959 {
960 vm_map_t map;
961 task_absolutetime_info_data_t tinfo;
962 thread_t thread;
963 uint32_t cswitch = 0, numrunning = 0;
964 uint32_t syscalls_unix = 0;
965 uint32_t syscalls_mach = 0;
966
967 task_lock(task);
968
969 map = (task == kernel_task)? kernel_map: task->map;
970
971 ptinfo->pti_virtual_size = vm_map_adjusted_size(map);
972 ptinfo->pti_resident_size =
973 (mach_vm_size_t)(pmap_resident_count(map->pmap))
974 * PAGE_SIZE_64;
975
976 ptinfo->pti_policy = ((task != kernel_task)?
977 POLICY_TIMESHARE: POLICY_RR);
978
979 tinfo.threads_user = tinfo.threads_system = 0;
980 tinfo.total_user = task->total_user_time;
981 tinfo.total_system = task->total_system_time;
982
983 queue_iterate(&task->threads, thread, thread_t, task_threads) {
984 uint64_t tval;
985 spl_t x;
986
987 if (thread->options & TH_OPT_IDLE_THREAD) {
988 continue;
989 }
990
991 x = splsched();
992 thread_lock(thread);
993
994 if ((thread->state & TH_RUN) == TH_RUN) {
995 numrunning++;
996 }
997 cswitch += thread->c_switch;
998 tval = timer_grab(&thread->user_timer);
999 tinfo.threads_user += tval;
1000 tinfo.total_user += tval;
1001
1002 tval = timer_grab(&thread->system_timer);
1003
1004 if (thread->precise_user_kernel_time) {
1005 tinfo.threads_system += tval;
1006 tinfo.total_system += tval;
1007 } else {
1008 /* system_timer may represent either sys or user */
1009 tinfo.threads_user += tval;
1010 tinfo.total_user += tval;
1011 }
1012
1013 syscalls_unix += thread->syscalls_unix;
1014 syscalls_mach += thread->syscalls_mach;
1015
1016 thread_unlock(thread);
1017 splx(x);
1018 }
1019
1020 ptinfo->pti_total_system = tinfo.total_system;
1021 ptinfo->pti_total_user = tinfo.total_user;
1022 ptinfo->pti_threads_system = tinfo.threads_system;
1023 ptinfo->pti_threads_user = tinfo.threads_user;
1024
1025 ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1026 ptinfo->pti_pageins = task->pageins;
1027 ptinfo->pti_cow_faults = task->cow_faults;
1028 ptinfo->pti_messages_sent = task->messages_sent;
1029 ptinfo->pti_messages_received = task->messages_received;
1030 ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
1031 ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
1032 ptinfo->pti_csw = task->c_switch + cswitch;
1033 ptinfo->pti_threadnum = task->thread_count;
1034 ptinfo->pti_numrunning = numrunning;
1035 ptinfo->pti_priority = task->priority;
1036
1037 task_unlock(task);
1038 }
1039
1040 int
1041 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1042 {
1043 thread_t thact;
1044 int err = 0;
1045 mach_msg_type_number_t count;
1046 thread_basic_info_data_t basic_info;
1047 kern_return_t kret;
1048 uint64_t addr = 0;
1049
1050 task_lock(task);
1051
1052 for (thact = (thread_t)(void *)queue_first(&task->threads);
1053 !queue_end(&task->threads, (queue_entry_t)thact);) {
1054 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1055 if (addr == thaddr) {
1056 count = THREAD_BASIC_INFO_COUNT;
1057 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1058 err = 1;
1059 goto out;
1060 }
1061 ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1062 ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1063
1064 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1065 ptinfo->pth_policy = basic_info.policy;
1066 ptinfo->pth_run_state = basic_info.run_state;
1067 ptinfo->pth_flags = basic_info.flags;
1068 ptinfo->pth_sleep_time = basic_info.sleep_time;
1069 ptinfo->pth_curpri = thact->sched_pri;
1070 ptinfo->pth_priority = thact->base_pri;
1071 ptinfo->pth_maxpriority = thact->max_priority;
1072
1073 if ((vpp != NULL) && (thact->uthread != NULL)) {
1074 bsd_threadcdir(thact->uthread, vpp, vidp);
1075 }
1076 bsd_getthreadname(thact->uthread, ptinfo->pth_name);
1077 err = 0;
1078 goto out;
1079 }
1080 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1081 }
1082 err = 1;
1083
1084 out:
1085 task_unlock(task);
1086 return err;
1087 }
1088
1089 int
1090 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1091 {
1092 int numthr = 0;
1093 thread_t thact;
1094 uint64_t * uptr;
1095 uint64_t thaddr;
1096
1097 uptr = (uint64_t *)buffer;
1098
1099 task_lock(task);
1100
1101 for (thact = (thread_t)(void *)queue_first(&task->threads);
1102 !queue_end(&task->threads, (queue_entry_t)thact);) {
1103 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1104 *uptr++ = thaddr;
1105 numthr++;
1106 if (numthr >= thcount) {
1107 goto out;
1108 }
1109 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1110 }
1111
1112 out:
1113 task_unlock(task);
1114 return (int)(numthr * sizeof(uint64_t));
1115 }
1116
1117 int
1118 get_numthreads(task_t task)
1119 {
1120 return task->thread_count;
1121 }
1122
1123 /*
1124 * Gather the various pieces of info about the designated task,
1125 * and collect it all into a single rusage_info.
1126 */
1127 int
1128 fill_task_rusage(task_t task, rusage_info_current *ri)
1129 {
1130 struct task_power_info powerinfo;
1131
1132 uint64_t runnable_time = 0;
1133
1134 assert(task != TASK_NULL);
1135 task_lock(task);
1136
1137 task_power_info_locked(task, &powerinfo, NULL, NULL, &runnable_time);
1138 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1139 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1140 ri->ri_user_time = powerinfo.total_user;
1141 ri->ri_system_time = powerinfo.total_system;
1142 ri->ri_runnable_time = runnable_time;
1143
1144 ri->ri_phys_footprint = get_task_phys_footprint(task);
1145 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1146 (ledger_amount_t *)&ri->ri_resident_size);
1147 ri->ri_wired_size = get_task_wired_mem(task);
1148
1149 ri->ri_pageins = task->pageins;
1150
1151 task_unlock(task);
1152 return 0;
1153 }
1154
1155 void
1156 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1157 {
1158 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1159 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1160 }
1161
1162 int
1163 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1164 {
1165 assert(task != TASK_NULL);
1166 task_lock(task);
1167
1168 if (task->task_io_stats) {
1169 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1170 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1171 } else {
1172 /* I/O Stats unavailable */
1173 ri->ri_diskio_bytesread = 0;
1174 ri->ri_diskio_byteswritten = 0;
1175 }
1176 task_unlock(task);
1177 return 0;
1178 }
1179
1180 int
1181 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1182 {
1183 thread_t thread;
1184
1185 assert(task != TASK_NULL);
1186 task_lock(task);
1187
1188 /* Rollup QoS time of all the threads to task */
1189 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1190 if (thread->options & TH_OPT_IDLE_THREAD) {
1191 continue;
1192 }
1193
1194 thread_update_qos_cpu_time(thread);
1195 }
1196 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1197 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1198 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1199 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1200 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1201 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1202 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1203
1204 task_unlock(task);
1205 return 0;
1206 }
1207
1208 void
1209 fill_task_monotonic_rusage(task_t task, rusage_info_current *ri)
1210 {
1211 #if MONOTONIC
1212 if (!mt_core_supported) {
1213 return;
1214 }
1215
1216 assert(task != TASK_NULL);
1217
1218 uint64_t counts[MT_CORE_NFIXED] = { 0 };
1219 mt_fixed_task_counts(task, counts);
1220 #ifdef MT_CORE_INSTRS
1221 ri->ri_instructions = counts[MT_CORE_INSTRS];
1222 #endif /* defined(MT_CORE_INSTRS) */
1223 ri->ri_cycles = counts[MT_CORE_CYCLES];
1224 #else /* MONOTONIC */
1225 #pragma unused(task, ri)
1226 #endif /* !MONOTONIC */
1227 }
1228
1229 uint64_t
1230 get_task_logical_writes(task_t task, boolean_t external)
1231 {
1232 assert(task != TASK_NULL);
1233 struct ledger_entry_info lei;
1234
1235 task_lock(task);
1236
1237 if (external == FALSE) {
1238 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
1239 } else {
1240 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes_to_external, &lei);
1241 }
1242
1243 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
1244
1245 task_unlock(task);
1246 return lei.lei_balance;
1247 }
1248
1249 uint64_t
1250 get_task_dispatchqueue_serialno_offset(task_t task)
1251 {
1252 uint64_t dq_serialno_offset = 0;
1253
1254 if (task->bsd_info) {
1255 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(task->bsd_info);
1256 }
1257
1258 return dq_serialno_offset;
1259 }
1260
1261 uint64_t
1262 get_task_dispatchqueue_label_offset(task_t task)
1263 {
1264 uint64_t dq_label_offset = 0;
1265
1266 if (task->bsd_info) {
1267 dq_label_offset = get_dispatchqueue_label_offset_from_proc(task->bsd_info);
1268 }
1269
1270 return dq_label_offset;
1271 }
1272
1273 uint64_t
1274 get_task_uniqueid(task_t task)
1275 {
1276 if (task->bsd_info) {
1277 return proc_uniqueid(task->bsd_info);
1278 } else {
1279 return UINT64_MAX;
1280 }
1281 }
1282
1283 int
1284 get_task_version(task_t task)
1285 {
1286 if (task->bsd_info) {
1287 return proc_pidversion(task->bsd_info);
1288 } else {
1289 return INT_MAX;
1290 }
1291 }
1292
1293 #if CONFIG_MACF
1294 struct label *
1295 get_task_crash_label(task_t task)
1296 {
1297 return task->crash_label;
1298 }
1299 #endif
1300
1301 int
1302 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1303 {
1304 ipc_space_t space = task->itk_space;
1305 if (space == NULL) {
1306 return -1;
1307 }
1308
1309 is_read_lock(space);
1310 if (!is_active(space)) {
1311 is_read_unlock(space);
1312 return -1;
1313 }
1314
1315 *table_size = space->is_table_size;
1316 *table_free = space->is_table_free;
1317
1318 is_read_unlock(space);
1319
1320 return 0;
1321 }
1322
1323 int
1324 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1325 {
1326 int result = 0;
1327
1328 task_lock(task);
1329 result = task->bsd_info ? proc_getcdhash(task->bsd_info, cdhash) : ESRCH;
1330 task_unlock(task);
1331
1332 return result;
1333 }