]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
cb46e621be064755251e908b4758e1f8d15244da
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48 #include <sys/proc_require.h>
49
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54
55 #include <machine/limits.h>
56 #include <sys/codesign.h> /* CS_CDHASH_LEN */
57
58 #undef thread_should_halt
59
60 /* BSD KERN COMPONENT INTERFACE */
61
62 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
63
64 thread_t get_firstthread(task_t);
65 int get_task_userstop(task_t);
66 int get_thread_userstop(thread_t);
67 boolean_t current_thread_aborted(void);
68 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
69 kern_return_t get_signalact(task_t, thread_t *, int);
70 int fill_task_rusage(task_t task, rusage_info_current *ri);
71 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
72 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
73 void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri);
74 uint64_t get_task_logical_writes(task_t task, boolean_t external);
75 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
76 void task_bsdtask_kill(task_t);
77
78 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
79 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
80 extern uint64_t proc_uniqueid(void *p);
81 extern int proc_pidversion(void *p);
82 extern int proc_getcdhash(void *p, char *cdhash);
83
84 #if MACH_BSD
85 extern void psignal(void *, int);
86 #endif
87
88 /*
89 *
90 */
91 void *
92 get_bsdtask_info(task_t t)
93 {
94 proc_require(t->bsd_info, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_KERNPROC);
95 return t->bsd_info;
96 }
97
98 void
99 task_bsdtask_kill(task_t t)
100 {
101 void * bsd_info = get_bsdtask_info(t);
102 if (bsd_info != NULL) {
103 psignal(bsd_info, SIGKILL);
104 }
105 }
106 /*
107 *
108 */
109 void *
110 get_bsdthreadtask_info(thread_t th)
111 {
112 void *bsd_info = NULL;
113
114 if (th->task) {
115 bsd_info = get_bsdtask_info(th->task);
116 }
117 return bsd_info;
118 }
119
120 /*
121 *
122 */
123 void
124 set_bsdtask_info(task_t t, void * v)
125 {
126 t->bsd_info = v;
127 }
128
129 /*
130 *
131 */
132 void *
133 get_bsdthread_info(thread_t th)
134 {
135 return th->uthread;
136 }
137
138 /*
139 * This is used to remember any FS error from VNOP_PAGEIN code when
140 * invoked under vm_fault(). The value is an errno style value. It can
141 * be retrieved by exception handlers using thread_get_state().
142 */
143 void
144 set_thread_pagein_error(thread_t th, int error)
145 {
146 assert(th == current_thread());
147 if (error == 0 || th->t_pagein_error == 0) {
148 th->t_pagein_error = error;
149 }
150 }
151
152 #if defined(__x86_64__)
153 /*
154 * Returns non-zero if the thread has a non-NULL task
155 * and that task has an LDT.
156 */
157 int
158 thread_task_has_ldt(thread_t th)
159 {
160 return th->task && th->task->i386_ldt != 0;
161 }
162 #endif /* __x86_64__ */
163
164 /*
165 * XXX
166 */
167 int get_thread_lock_count(thread_t th); /* forced forward */
168 int
169 get_thread_lock_count(thread_t th)
170 {
171 return th->mutex_count;
172 }
173
174 /*
175 * XXX: wait for BSD to fix signal code
176 * Until then, we cannot block here. We know the task
177 * can't go away, so we make sure it is still active after
178 * retrieving the first thread for extra safety.
179 */
180 thread_t
181 get_firstthread(task_t task)
182 {
183 thread_t thread = (thread_t)(void *)queue_first(&task->threads);
184
185 if (queue_end(&task->threads, (queue_entry_t)thread)) {
186 thread = THREAD_NULL;
187 }
188
189 if (!task->active) {
190 return THREAD_NULL;
191 }
192
193 return thread;
194 }
195
196 kern_return_t
197 get_signalact(
198 task_t task,
199 thread_t *result_out,
200 int setast)
201 {
202 kern_return_t result = KERN_SUCCESS;
203 thread_t inc, thread = THREAD_NULL;
204
205 task_lock(task);
206
207 if (!task->active) {
208 task_unlock(task);
209
210 return KERN_FAILURE;
211 }
212
213 for (inc = (thread_t)(void *)queue_first(&task->threads);
214 !queue_end(&task->threads, (queue_entry_t)inc);) {
215 thread_mtx_lock(inc);
216 if (inc->active &&
217 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
218 thread = inc;
219 break;
220 }
221 thread_mtx_unlock(inc);
222
223 inc = (thread_t)(void *)queue_next(&inc->task_threads);
224 }
225
226 if (result_out) {
227 *result_out = thread;
228 }
229
230 if (thread) {
231 if (setast) {
232 act_set_astbsd(thread);
233 }
234
235 thread_mtx_unlock(thread);
236 } else {
237 result = KERN_FAILURE;
238 }
239
240 task_unlock(task);
241
242 return result;
243 }
244
245
246 kern_return_t
247 check_actforsig(
248 task_t task,
249 thread_t thread,
250 int setast)
251 {
252 kern_return_t result = KERN_FAILURE;
253 thread_t inc;
254
255 task_lock(task);
256
257 if (!task->active) {
258 task_unlock(task);
259
260 return KERN_FAILURE;
261 }
262
263 for (inc = (thread_t)(void *)queue_first(&task->threads);
264 !queue_end(&task->threads, (queue_entry_t)inc);) {
265 if (inc == thread) {
266 thread_mtx_lock(inc);
267
268 if (inc->active &&
269 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
270 result = KERN_SUCCESS;
271 break;
272 }
273
274 thread_mtx_unlock(inc);
275 break;
276 }
277
278 inc = (thread_t)(void *)queue_next(&inc->task_threads);
279 }
280
281 if (result == KERN_SUCCESS) {
282 if (setast) {
283 act_set_astbsd(thread);
284 }
285
286 thread_mtx_unlock(thread);
287 }
288
289 task_unlock(task);
290
291 return result;
292 }
293
294 ledger_t
295 get_task_ledger(task_t t)
296 {
297 return t->ledger;
298 }
299
300 /*
301 * This is only safe to call from a thread executing in
302 * in the task's context or if the task is locked. Otherwise,
303 * the map could be switched for the task (and freed) before
304 * we go to return it here.
305 */
306 vm_map_t
307 get_task_map(task_t t)
308 {
309 return t->map;
310 }
311
312 vm_map_t
313 get_task_map_reference(task_t t)
314 {
315 vm_map_t m;
316
317 if (t == NULL) {
318 return VM_MAP_NULL;
319 }
320
321 task_lock(t);
322 if (!t->active) {
323 task_unlock(t);
324 return VM_MAP_NULL;
325 }
326 m = t->map;
327 vm_map_reference_swap(m);
328 task_unlock(t);
329 return m;
330 }
331
332 /*
333 *
334 */
335 ipc_space_t
336 get_task_ipcspace(task_t t)
337 {
338 return t->itk_space;
339 }
340
341 int
342 get_task_numacts(task_t t)
343 {
344 return t->thread_count;
345 }
346
347 /* does this machine need 64bit register set for signal handler */
348 int
349 is_64signalregset(void)
350 {
351 if (task_has_64Bit_data(current_task())) {
352 return 1;
353 }
354
355 return 0;
356 }
357
358 /*
359 * Swap in a new map for the task/thread pair; the old map reference is
360 * returned. Also does a pmap switch if thread provided is current thread.
361 */
362 vm_map_t
363 swap_task_map(task_t task, thread_t thread, vm_map_t map)
364 {
365 vm_map_t old_map;
366 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
367
368 if (task != thread->task) {
369 panic("swap_task_map");
370 }
371
372 task_lock(task);
373 mp_disable_preemption();
374
375 old_map = task->map;
376 thread->map = task->map = map;
377 vm_commit_pagezero_status(map);
378
379 if (doswitch) {
380 PMAP_SWITCH_USER(thread, map, cpu_number());
381 }
382 mp_enable_preemption();
383 task_unlock(task);
384
385 return old_map;
386 }
387
388 /*
389 *
390 * This is only safe to call from a thread executing in
391 * in the task's context or if the task is locked. Otherwise,
392 * the map could be switched for the task (and freed) before
393 * we go to return it here.
394 */
395 pmap_t
396 get_task_pmap(task_t t)
397 {
398 return t->map->pmap;
399 }
400
401 /*
402 *
403 */
404 uint64_t
405 get_task_resident_size(task_t task)
406 {
407 vm_map_t map;
408
409 map = (task == kernel_task) ? kernel_map: task->map;
410 return (uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64;
411 }
412
413 uint64_t
414 get_task_compressed(task_t task)
415 {
416 vm_map_t map;
417
418 map = (task == kernel_task) ? kernel_map: task->map;
419 return (uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64;
420 }
421
422 uint64_t
423 get_task_resident_max(task_t task)
424 {
425 vm_map_t map;
426
427 map = (task == kernel_task) ? kernel_map: task->map;
428 return (uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64;
429 }
430
431 /*
432 * Get the balance for a given field in the task ledger.
433 * Returns 0 if the entry is invalid.
434 */
435 static uint64_t
436 get_task_ledger_balance(task_t task, int entry)
437 {
438 ledger_amount_t balance = 0;
439
440 ledger_get_balance(task->ledger, entry, &balance);
441 return balance;
442 }
443
444 uint64_t
445 get_task_purgeable_size(task_t task)
446 {
447 kern_return_t ret;
448 ledger_amount_t balance = 0;
449 uint64_t volatile_size = 0;
450
451 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
452 if (ret != KERN_SUCCESS) {
453 return 0;
454 }
455
456 volatile_size += balance;
457
458 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
459 if (ret != KERN_SUCCESS) {
460 return 0;
461 }
462
463 volatile_size += balance;
464
465 return volatile_size;
466 }
467
468 /*
469 *
470 */
471 uint64_t
472 get_task_phys_footprint(task_t task)
473 {
474 return get_task_ledger_balance(task, task_ledgers.phys_footprint);
475 }
476
477 #if CONFIG_LEDGER_INTERVAL_MAX
478 /*
479 *
480 */
481 uint64_t
482 get_task_phys_footprint_interval_max(task_t task, int reset)
483 {
484 kern_return_t ret;
485 ledger_amount_t max;
486
487 ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
488
489 if (KERN_SUCCESS == ret) {
490 return max;
491 }
492
493 return 0;
494 }
495 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
496
497 /*
498 *
499 */
500 uint64_t
501 get_task_phys_footprint_lifetime_max(task_t task)
502 {
503 kern_return_t ret;
504 ledger_amount_t max;
505
506 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
507
508 if (KERN_SUCCESS == ret) {
509 return max;
510 }
511
512 return 0;
513 }
514
515 /*
516 *
517 */
518 uint64_t
519 get_task_phys_footprint_limit(task_t task)
520 {
521 kern_return_t ret;
522 ledger_amount_t max;
523
524 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
525 if (KERN_SUCCESS == ret) {
526 return max;
527 }
528
529 return 0;
530 }
531
532 uint64_t
533 get_task_internal(task_t task)
534 {
535 return get_task_ledger_balance(task, task_ledgers.internal);
536 }
537
538 uint64_t
539 get_task_internal_compressed(task_t task)
540 {
541 return get_task_ledger_balance(task, task_ledgers.internal_compressed);
542 }
543
544 uint64_t
545 get_task_purgeable_nonvolatile(task_t task)
546 {
547 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
548 }
549
550 uint64_t
551 get_task_purgeable_nonvolatile_compressed(task_t task)
552 {
553 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
554 }
555
556 uint64_t
557 get_task_alternate_accounting(task_t task)
558 {
559 return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
560 }
561
562 uint64_t
563 get_task_alternate_accounting_compressed(task_t task)
564 {
565 return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
566 }
567
568 uint64_t
569 get_task_page_table(task_t task)
570 {
571 return get_task_ledger_balance(task, task_ledgers.page_table);
572 }
573
574 #if CONFIG_FREEZE
575 uint64_t
576 get_task_frozen_to_swap(task_t task)
577 {
578 return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
579 }
580 #endif /* CONFIG_FREEZE */
581
582 uint64_t
583 get_task_iokit_mapped(task_t task)
584 {
585 return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
586 }
587
588 uint64_t
589 get_task_network_nonvolatile(task_t task)
590 {
591 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
592 }
593
594 uint64_t
595 get_task_network_nonvolatile_compressed(task_t task)
596 {
597 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
598 }
599
600 uint64_t
601 get_task_wired_mem(task_t task)
602 {
603 return get_task_ledger_balance(task, task_ledgers.wired_mem);
604 }
605
606 uint64_t
607 get_task_tagged_footprint(task_t task)
608 {
609 kern_return_t ret;
610 ledger_amount_t credit, debit;
611
612 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
613 if (KERN_SUCCESS == ret) {
614 return credit - debit;
615 }
616
617 return 0;
618 }
619
620 uint64_t
621 get_task_tagged_footprint_compressed(task_t task)
622 {
623 kern_return_t ret;
624 ledger_amount_t credit, debit;
625
626 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
627 if (KERN_SUCCESS == ret) {
628 return credit - debit;
629 }
630
631 return 0;
632 }
633
634 uint64_t
635 get_task_media_footprint(task_t task)
636 {
637 kern_return_t ret;
638 ledger_amount_t credit, debit;
639
640 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
641 if (KERN_SUCCESS == ret) {
642 return credit - debit;
643 }
644
645 return 0;
646 }
647
648 uint64_t
649 get_task_media_footprint_compressed(task_t task)
650 {
651 kern_return_t ret;
652 ledger_amount_t credit, debit;
653
654 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
655 if (KERN_SUCCESS == ret) {
656 return credit - debit;
657 }
658
659 return 0;
660 }
661
662 uint64_t
663 get_task_graphics_footprint(task_t task)
664 {
665 kern_return_t ret;
666 ledger_amount_t credit, debit;
667
668 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
669 if (KERN_SUCCESS == ret) {
670 return credit - debit;
671 }
672
673 return 0;
674 }
675
676
677 uint64_t
678 get_task_graphics_footprint_compressed(task_t task)
679 {
680 kern_return_t ret;
681 ledger_amount_t credit, debit;
682
683 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
684 if (KERN_SUCCESS == ret) {
685 return credit - debit;
686 }
687
688 return 0;
689 }
690
691 uint64_t
692 get_task_neural_footprint(task_t task)
693 {
694 kern_return_t ret;
695 ledger_amount_t credit, debit;
696
697 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
698 if (KERN_SUCCESS == ret) {
699 return credit - debit;
700 }
701
702 return 0;
703 }
704
705 uint64_t
706 get_task_neural_footprint_compressed(task_t task)
707 {
708 kern_return_t ret;
709 ledger_amount_t credit, debit;
710
711 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
712 if (KERN_SUCCESS == ret) {
713 return credit - debit;
714 }
715
716 return 0;
717 }
718
719 uint64_t
720 get_task_cpu_time(task_t task)
721 {
722 return get_task_ledger_balance(task, task_ledgers.cpu_time);
723 }
724
725 uint32_t
726 get_task_loadTag(task_t task)
727 {
728 return os_atomic_load(&task->loadTag, relaxed);
729 }
730
731 uint32_t
732 set_task_loadTag(task_t task, uint32_t loadTag)
733 {
734 return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
735 }
736
737 /*
738 *
739 */
740 task_t
741 get_threadtask(thread_t th)
742 {
743 return th->task;
744 }
745
746 /*
747 *
748 */
749 vm_map_offset_t
750 get_map_min(
751 vm_map_t map)
752 {
753 return vm_map_min(map);
754 }
755
756 /*
757 *
758 */
759 vm_map_offset_t
760 get_map_max(
761 vm_map_t map)
762 {
763 return vm_map_max(map);
764 }
765 vm_map_size_t
766 get_vmmap_size(
767 vm_map_t map)
768 {
769 return vm_map_adjusted_size(map);
770 }
771
772 #if CONFIG_COREDUMP
773
774 static int
775 get_vmsubmap_entries(
776 vm_map_t map,
777 vm_object_offset_t start,
778 vm_object_offset_t end)
779 {
780 int total_entries = 0;
781 vm_map_entry_t entry;
782
783 if (not_in_kdp) {
784 vm_map_lock(map);
785 }
786 entry = vm_map_first_entry(map);
787 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
788 entry = entry->vme_next;
789 }
790
791 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
792 if (entry->is_sub_map) {
793 total_entries +=
794 get_vmsubmap_entries(VME_SUBMAP(entry),
795 VME_OFFSET(entry),
796 (VME_OFFSET(entry) +
797 entry->vme_end -
798 entry->vme_start));
799 } else {
800 total_entries += 1;
801 }
802 entry = entry->vme_next;
803 }
804 if (not_in_kdp) {
805 vm_map_unlock(map);
806 }
807 return total_entries;
808 }
809
810 int
811 get_vmmap_entries(
812 vm_map_t map)
813 {
814 int total_entries = 0;
815 vm_map_entry_t entry;
816
817 if (not_in_kdp) {
818 vm_map_lock(map);
819 }
820 entry = vm_map_first_entry(map);
821
822 while (entry != vm_map_to_entry(map)) {
823 if (entry->is_sub_map) {
824 total_entries +=
825 get_vmsubmap_entries(VME_SUBMAP(entry),
826 VME_OFFSET(entry),
827 (VME_OFFSET(entry) +
828 entry->vme_end -
829 entry->vme_start));
830 } else {
831 total_entries += 1;
832 }
833 entry = entry->vme_next;
834 }
835 if (not_in_kdp) {
836 vm_map_unlock(map);
837 }
838 return total_entries;
839 }
840 #endif /* CONFIG_COREDUMP */
841
842 /*
843 *
844 */
845 /*
846 *
847 */
848 int
849 get_task_userstop(
850 task_t task)
851 {
852 return task->user_stop_count;
853 }
854
855 /*
856 *
857 */
858 int
859 get_thread_userstop(
860 thread_t th)
861 {
862 return th->user_stop_count;
863 }
864
865 /*
866 *
867 */
868 boolean_t
869 get_task_pidsuspended(
870 task_t task)
871 {
872 return task->pidsuspended;
873 }
874
875 /*
876 *
877 */
878 boolean_t
879 get_task_frozen(
880 task_t task)
881 {
882 return task->frozen;
883 }
884
885 /*
886 *
887 */
888 boolean_t
889 thread_should_abort(
890 thread_t th)
891 {
892 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
893 }
894
895 /*
896 * This routine is like thread_should_abort() above. It checks to
897 * see if the current thread is aborted. But unlike above, it also
898 * checks to see if thread is safely aborted. If so, it returns
899 * that fact, and clears the condition (safe aborts only should
900 * have a single effect, and a poll of the abort status
901 * qualifies.
902 */
903 boolean_t
904 current_thread_aborted(
905 void)
906 {
907 thread_t th = current_thread();
908 spl_t s;
909
910 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
911 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
912 return TRUE;
913 }
914 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
915 s = splsched();
916 thread_lock(th);
917 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
918 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
919 }
920 thread_unlock(th);
921 splx(s);
922 }
923 return FALSE;
924 }
925
926 /*
927 *
928 */
929 void
930 task_act_iterate_wth_args(
931 task_t task,
932 void (*func_callback)(thread_t, void *),
933 void *func_arg)
934 {
935 thread_t inc;
936
937 task_lock(task);
938
939 for (inc = (thread_t)(void *)queue_first(&task->threads);
940 !queue_end(&task->threads, (queue_entry_t)inc);) {
941 (void) (*func_callback)(inc, func_arg);
942 inc = (thread_t)(void *)queue_next(&inc->task_threads);
943 }
944
945 task_unlock(task);
946 }
947
948
949 #include <sys/bsdtask_info.h>
950
951 void
952 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
953 {
954 vm_map_t map;
955 task_absolutetime_info_data_t tinfo;
956 thread_t thread;
957 uint32_t cswitch = 0, numrunning = 0;
958 uint32_t syscalls_unix = 0;
959 uint32_t syscalls_mach = 0;
960
961 task_lock(task);
962
963 map = (task == kernel_task)? kernel_map: task->map;
964
965 ptinfo->pti_virtual_size = vm_map_adjusted_size(map);
966 ptinfo->pti_resident_size =
967 (mach_vm_size_t)(pmap_resident_count(map->pmap))
968 * PAGE_SIZE_64;
969
970 ptinfo->pti_policy = ((task != kernel_task)?
971 POLICY_TIMESHARE: POLICY_RR);
972
973 tinfo.threads_user = tinfo.threads_system = 0;
974 tinfo.total_user = task->total_user_time;
975 tinfo.total_system = task->total_system_time;
976
977 queue_iterate(&task->threads, thread, thread_t, task_threads) {
978 uint64_t tval;
979 spl_t x;
980
981 if (thread->options & TH_OPT_IDLE_THREAD) {
982 continue;
983 }
984
985 x = splsched();
986 thread_lock(thread);
987
988 if ((thread->state & TH_RUN) == TH_RUN) {
989 numrunning++;
990 }
991 cswitch += thread->c_switch;
992 tval = timer_grab(&thread->user_timer);
993 tinfo.threads_user += tval;
994 tinfo.total_user += tval;
995
996 tval = timer_grab(&thread->system_timer);
997
998 if (thread->precise_user_kernel_time) {
999 tinfo.threads_system += tval;
1000 tinfo.total_system += tval;
1001 } else {
1002 /* system_timer may represent either sys or user */
1003 tinfo.threads_user += tval;
1004 tinfo.total_user += tval;
1005 }
1006
1007 syscalls_unix += thread->syscalls_unix;
1008 syscalls_mach += thread->syscalls_mach;
1009
1010 thread_unlock(thread);
1011 splx(x);
1012 }
1013
1014 ptinfo->pti_total_system = tinfo.total_system;
1015 ptinfo->pti_total_user = tinfo.total_user;
1016 ptinfo->pti_threads_system = tinfo.threads_system;
1017 ptinfo->pti_threads_user = tinfo.threads_user;
1018
1019 ptinfo->pti_faults = task->faults;
1020 ptinfo->pti_pageins = task->pageins;
1021 ptinfo->pti_cow_faults = task->cow_faults;
1022 ptinfo->pti_messages_sent = task->messages_sent;
1023 ptinfo->pti_messages_received = task->messages_received;
1024 ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
1025 ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
1026 ptinfo->pti_csw = task->c_switch + cswitch;
1027 ptinfo->pti_threadnum = task->thread_count;
1028 ptinfo->pti_numrunning = numrunning;
1029 ptinfo->pti_priority = task->priority;
1030
1031 task_unlock(task);
1032 }
1033
1034 int
1035 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1036 {
1037 thread_t thact;
1038 int err = 0;
1039 mach_msg_type_number_t count;
1040 thread_basic_info_data_t basic_info;
1041 kern_return_t kret;
1042 uint64_t addr = 0;
1043
1044 task_lock(task);
1045
1046 for (thact = (thread_t)(void *)queue_first(&task->threads);
1047 !queue_end(&task->threads, (queue_entry_t)thact);) {
1048 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1049 if (addr == thaddr) {
1050 count = THREAD_BASIC_INFO_COUNT;
1051 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1052 err = 1;
1053 goto out;
1054 }
1055 ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1056 ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1057
1058 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1059 ptinfo->pth_policy = basic_info.policy;
1060 ptinfo->pth_run_state = basic_info.run_state;
1061 ptinfo->pth_flags = basic_info.flags;
1062 ptinfo->pth_sleep_time = basic_info.sleep_time;
1063 ptinfo->pth_curpri = thact->sched_pri;
1064 ptinfo->pth_priority = thact->base_pri;
1065 ptinfo->pth_maxpriority = thact->max_priority;
1066
1067 if ((vpp != NULL) && (thact->uthread != NULL)) {
1068 bsd_threadcdir(thact->uthread, vpp, vidp);
1069 }
1070 bsd_getthreadname(thact->uthread, ptinfo->pth_name);
1071 err = 0;
1072 goto out;
1073 }
1074 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1075 }
1076 err = 1;
1077
1078 out:
1079 task_unlock(task);
1080 return err;
1081 }
1082
1083 int
1084 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1085 {
1086 int numthr = 0;
1087 thread_t thact;
1088 uint64_t * uptr;
1089 uint64_t thaddr;
1090
1091 uptr = (uint64_t *)buffer;
1092
1093 task_lock(task);
1094
1095 for (thact = (thread_t)(void *)queue_first(&task->threads);
1096 !queue_end(&task->threads, (queue_entry_t)thact);) {
1097 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1098 *uptr++ = thaddr;
1099 numthr++;
1100 if (numthr >= thcount) {
1101 goto out;
1102 }
1103 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1104 }
1105
1106 out:
1107 task_unlock(task);
1108 return (int)(numthr * sizeof(uint64_t));
1109 }
1110
1111 int
1112 get_numthreads(task_t task)
1113 {
1114 return task->thread_count;
1115 }
1116
1117 /*
1118 * Gather the various pieces of info about the designated task,
1119 * and collect it all into a single rusage_info.
1120 */
1121 int
1122 fill_task_rusage(task_t task, rusage_info_current *ri)
1123 {
1124 struct task_power_info powerinfo;
1125
1126 uint64_t runnable_time = 0;
1127
1128 assert(task != TASK_NULL);
1129 task_lock(task);
1130
1131 task_power_info_locked(task, &powerinfo, NULL, NULL, &runnable_time);
1132 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1133 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1134 ri->ri_user_time = powerinfo.total_user;
1135 ri->ri_system_time = powerinfo.total_system;
1136 ri->ri_runnable_time = runnable_time;
1137
1138 ri->ri_phys_footprint = get_task_phys_footprint(task);
1139 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1140 (ledger_amount_t *)&ri->ri_resident_size);
1141 ri->ri_wired_size = get_task_wired_mem(task);
1142
1143 ri->ri_pageins = task->pageins;
1144
1145 task_unlock(task);
1146 return 0;
1147 }
1148
1149 void
1150 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1151 {
1152 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1153 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1154 }
1155
1156 int
1157 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1158 {
1159 assert(task != TASK_NULL);
1160 task_lock(task);
1161
1162 if (task->task_io_stats) {
1163 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1164 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1165 } else {
1166 /* I/O Stats unavailable */
1167 ri->ri_diskio_bytesread = 0;
1168 ri->ri_diskio_byteswritten = 0;
1169 }
1170 task_unlock(task);
1171 return 0;
1172 }
1173
1174 int
1175 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1176 {
1177 thread_t thread;
1178
1179 assert(task != TASK_NULL);
1180 task_lock(task);
1181
1182 /* Rollup QoS time of all the threads to task */
1183 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1184 if (thread->options & TH_OPT_IDLE_THREAD) {
1185 continue;
1186 }
1187
1188 thread_update_qos_cpu_time(thread);
1189 }
1190 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1191 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1192 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1193 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1194 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1195 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1196 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1197
1198 task_unlock(task);
1199 return 0;
1200 }
1201
1202 void
1203 fill_task_monotonic_rusage(task_t task, rusage_info_current *ri)
1204 {
1205 #if MONOTONIC
1206 if (!mt_core_supported) {
1207 return;
1208 }
1209
1210 assert(task != TASK_NULL);
1211
1212 uint64_t counts[MT_CORE_NFIXED] = { 0 };
1213 mt_fixed_task_counts(task, counts);
1214 #ifdef MT_CORE_INSTRS
1215 ri->ri_instructions = counts[MT_CORE_INSTRS];
1216 #endif /* defined(MT_CORE_INSTRS) */
1217 ri->ri_cycles = counts[MT_CORE_CYCLES];
1218 #else /* MONOTONIC */
1219 #pragma unused(task, ri)
1220 #endif /* !MONOTONIC */
1221 }
1222
1223 uint64_t
1224 get_task_logical_writes(task_t task, boolean_t external)
1225 {
1226 assert(task != TASK_NULL);
1227 struct ledger_entry_info lei;
1228
1229 task_lock(task);
1230
1231 if (external == FALSE) {
1232 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
1233 } else {
1234 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes_to_external, &lei);
1235 }
1236
1237 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
1238
1239 task_unlock(task);
1240 return lei.lei_balance;
1241 }
1242
1243 uint64_t
1244 get_task_dispatchqueue_serialno_offset(task_t task)
1245 {
1246 uint64_t dq_serialno_offset = 0;
1247
1248 if (task->bsd_info) {
1249 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(task->bsd_info);
1250 }
1251
1252 return dq_serialno_offset;
1253 }
1254
1255 uint64_t
1256 get_task_dispatchqueue_label_offset(task_t task)
1257 {
1258 uint64_t dq_label_offset = 0;
1259
1260 if (task->bsd_info) {
1261 dq_label_offset = get_dispatchqueue_label_offset_from_proc(task->bsd_info);
1262 }
1263
1264 return dq_label_offset;
1265 }
1266
1267 uint64_t
1268 get_task_uniqueid(task_t task)
1269 {
1270 if (task->bsd_info) {
1271 return proc_uniqueid(task->bsd_info);
1272 } else {
1273 return UINT64_MAX;
1274 }
1275 }
1276
1277 int
1278 get_task_version(task_t task)
1279 {
1280 if (task->bsd_info) {
1281 return proc_pidversion(task->bsd_info);
1282 } else {
1283 return INT_MAX;
1284 }
1285 }
1286
1287 #if CONFIG_MACF
1288 struct label *
1289 get_task_crash_label(task_t task)
1290 {
1291 return task->crash_label;
1292 }
1293 #endif
1294
1295 int
1296 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1297 {
1298 ipc_space_t space = task->itk_space;
1299 if (space == NULL) {
1300 return -1;
1301 }
1302
1303 is_read_lock(space);
1304 if (!is_active(space)) {
1305 is_read_unlock(space);
1306 return -1;
1307 }
1308
1309 *table_size = space->is_table_size;
1310 *table_free = space->is_table_free;
1311
1312 is_read_unlock(space);
1313
1314 return 0;
1315 }
1316
1317 int
1318 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1319 {
1320 int result = 0;
1321
1322 task_lock(task);
1323 result = task->bsd_info ? proc_getcdhash(task->bsd_info, cdhash) : ESRCH;
1324 task_unlock(task);
1325
1326 return result;
1327 }