2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
85 #include <sys/file_internal.h>
87 #include <sys/malloc.h>
90 #include <sys/ioctl.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114 #include <sys/sysent.h>
115 #include <sys/reason.h>
116 #include <sys/proc_require.h>
117 #include <IOKit/IOBSD.h> /* IOTaskHasEntitlement() */
118 #include <kern/ipc_kobject.h> /* ipc_kobject_set_kobjidx() */
120 #ifdef CONFIG_32BIT_TELEMETRY
121 #include <sys/kasl.h>
122 #endif /* CONFIG_32BIT_TELEMETRY */
128 #if CONFIG_MEMORYSTATUS
129 #include <sys/kern_memorystatus.h>
133 #include <security/mac_framework.h>
134 #include <security/mac_mach_internal.h>
137 #include <libkern/crypto/sha1.h>
139 #ifdef CONFIG_32BIT_TELEMETRY
140 #define MAX_32BIT_EXEC_SIG_SIZE 160
141 #endif /* CONFIG_32BIT_TELEMETRY */
144 * Structure associated with user cacheing.
147 LIST_ENTRY(uidinfo
) ui_hash
;
151 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
152 LIST_HEAD(uihashhead
, uidinfo
) * uihashtbl
;
153 u_long uihash
; /* size of hash table - 1 */
156 * Other process lists
158 struct pidhashhead
*pidhashtbl
;
160 struct pgrphashhead
*pgrphashtbl
;
162 struct sesshashhead
*sesshashtbl
;
165 struct proclist allproc
;
166 struct proclist zombproc
;
167 extern struct tty cons
;
171 #if DEVELOPMENT || DEBUG
172 int syscallfilter_disable
= 0;
173 #endif // DEVELOPMENT || DEBUG
176 #define __PROC_INTERNAL_DEBUG 1
179 /* Name to give to core files */
180 #if defined(XNU_TARGET_OS_BRIDGE)
181 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+ 1] = {"/private/var/internal/%N.core"};
182 #elif defined(XNU_TARGET_OS_OSX)
183 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+ 1] = {"/cores/core.%P"};
185 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+ 1] = {"/private/var/cores/%N.core"};
190 #include <kern/backtrace.h>
193 static LCK_MTX_DECLARE_ATTR(proc_klist_mlock
, &proc_mlock_grp
, &proc_lck_attr
);
195 ZONE_DECLARE(pgrp_zone
, "pgrp",
196 sizeof(struct pgrp
), ZC_ZFREE_CLEARMEM
);
197 ZONE_DECLARE(session_zone
, "session",
198 sizeof(struct session
), ZC_ZFREE_CLEARMEM
);
200 * If you need accounting for KM_PROC consider using
201 * ZONE_VIEW_DEFINE to define a zone view.
203 #define KM_PROC KHEAP_DEFAULT
205 typedef uint64_t unaligned_u64
__attribute__((aligned(1)));
207 static void orphanpg(struct pgrp
* pg
);
208 void proc_name_kdp(task_t t
, char * buf
, int size
);
209 boolean_t
proc_binary_uuid_kdp(task_t task
, uuid_t uuid
);
210 int proc_threadname_kdp(void * uth
, char * buf
, size_t size
);
211 void proc_starttime_kdp(void * p
, unaligned_u64
*tv_sec
, unaligned_u64
*tv_usec
, unaligned_u64
*abstime
);
212 void proc_archinfo_kdp(void* p
, cpu_type_t
* cputype
, cpu_subtype_t
* cpusubtype
);
213 char * proc_name_address(void * p
);
214 char * proc_longname_address(void *);
216 static void pgrp_add(struct pgrp
* pgrp
, proc_t parent
, proc_t child
);
217 static void pgrp_remove(proc_t p
);
218 static void pgrp_replace(proc_t p
, struct pgrp
*pgrp
);
219 static void pgdelete_dropref(struct pgrp
*pgrp
);
220 extern void pg_rele_dropref(struct pgrp
* pgrp
);
221 static int csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaddittoken
);
222 static boolean_t
proc_parent_is_currentproc(proc_t p
);
224 struct fixjob_iterargs
{
226 struct session
* mysession
;
230 int fixjob_callback(proc_t
, void *);
233 get_current_unique_pid(void)
235 proc_t p
= current_proc();
238 return p
->p_uniqueid
;
245 * Initialize global process hashing structures.
251 LIST_INIT(&zombproc
);
252 pidhashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pidhash
);
253 pgrphashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pgrphash
);
254 sesshashtbl
= hashinit(maxproc
/ 4, M_PROC
, &sesshash
);
255 uihashtbl
= hashinit(maxproc
/ 16, M_PROC
, &uihash
);
257 personas_bootstrap();
262 * Change the count associated with number of processes
263 * a given user is using. This routine protects the uihash
267 chgproccnt(uid_t uid
, int diff
)
270 struct uidinfo
*newuip
= NULL
;
271 struct uihashhead
*uipp
;
277 for (uip
= uipp
->lh_first
; uip
!= 0; uip
= uip
->ui_hash
.le_next
) {
278 if (uip
->ui_uid
== uid
) {
283 uip
->ui_proccnt
+= diff
;
284 if (uip
->ui_proccnt
> 0) {
285 retval
= uip
->ui_proccnt
;
289 LIST_REMOVE(uip
, ui_hash
);
292 kheap_free(KM_PROC
, uip
, sizeof(struct uidinfo
));
301 panic("chgproccnt: lost user");
303 if (newuip
!= NULL
) {
306 LIST_INSERT_HEAD(uipp
, uip
, ui_hash
);
308 uip
->ui_proccnt
= diff
;
314 newuip
= kheap_alloc(KM_PROC
, sizeof(struct uidinfo
), Z_WAITOK
);
315 if (newuip
== NULL
) {
316 panic("chgproccnt: M_PROC zone depleted");
320 kheap_free(KM_PROC
, newuip
, sizeof(struct uidinfo
));
325 * Is p an inferior of the current process?
333 for (; p
!= current_proc(); p
= p
->p_pptr
) {
345 * Is p an inferior of t ?
348 isinferior(proc_t p
, proc_t t
)
354 /* if p==t they are not inferior */
360 for (; p
!= t
; p
= p
->p_pptr
) {
363 /* Detect here if we're in a cycle */
364 if ((p
->p_pid
== 0) || (p
->p_pptr
== start
) || (nchecked
>= nprocs
)) {
375 proc_isinferior(int pid1
, int pid2
)
377 proc_t p
= PROC_NULL
;
378 proc_t t
= PROC_NULL
;
381 if (((p
= proc_find(pid1
)) != (proc_t
)0) && ((t
= proc_find(pid2
)) != (proc_t
)0)) {
382 retval
= isinferior(p
, t
);
385 if (p
!= PROC_NULL
) {
388 if (t
!= PROC_NULL
) {
398 return proc_findinternal(pid
, 0);
402 proc_findinternal(int pid
, int locked
)
404 proc_t p
= PROC_NULL
;
410 p
= pfind_locked(pid
);
411 if ((p
== PROC_NULL
) || (p
!= proc_ref_locked(p
))) {
423 proc_findthread(thread_t thread
)
425 proc_t p
= PROC_NULL
;
429 uth
= get_bsdthread_info(thread
);
430 if (uth
&& (uth
->uu_flag
& UT_VFORK
)) {
433 p
= (proc_t
)(get_bsdthreadtask_info(thread
));
435 p
= proc_ref_locked(p
);
441 * Returns process identity of a given process. Calling this function is not
442 * racy for a current process or if a reference to the process is held.
447 struct proc_ident ident
= {
448 .p_pid
= proc_pid(p
),
449 .p_uniqueid
= proc_uniqueid(p
),
450 .p_idversion
= proc_pidversion(p
),
457 proc_find_ident(struct proc_ident
const *ident
)
459 proc_t proc
= PROC_NULL
;
461 proc
= proc_find(ident
->p_pid
);
462 if (proc
== PROC_NULL
) {
466 if (proc_uniqueid(proc
) != ident
->p_uniqueid
||
467 proc_pidversion(proc
) != ident
->p_idversion
) {
476 uthread_reset_proc_refcount(void *uthread
)
480 uth
= (uthread_t
) uthread
;
481 uth
->uu_proc_refcount
= 0;
484 if (proc_ref_tracking_disabled
) {
494 uthread_get_proc_refcount(void *uthread
)
498 if (proc_ref_tracking_disabled
) {
502 uth
= (uthread_t
) uthread
;
504 return uth
->uu_proc_refcount
;
509 record_procref(proc_t p __unused
, int count
)
513 uth
= current_uthread();
514 uth
->uu_proc_refcount
+= count
;
517 if (proc_ref_tracking_disabled
) {
521 if (uth
->uu_pindex
< NUM_PROC_REFS_TO_TRACK
) {
522 backtrace((uintptr_t *) &uth
->uu_proc_pcs
[uth
->uu_pindex
],
523 PROC_REF_STACK_DEPTH
, NULL
);
525 uth
->uu_proc_ps
[uth
->uu_pindex
] = p
;
532 uthread_needs_to_wait_in_proc_refwait(void)
534 uthread_t uth
= current_uthread();
537 * Allow threads holding no proc refs to wait
538 * in proc_refwait, allowing threads holding
539 * proc refs to wait in proc_refwait causes
540 * deadlocks and makes proc_find non-reentrant.
542 if (uth
->uu_proc_refcount
== 0) {
567 if (p
!= proc_ref_locked(p
)) {
576 proc_ref_locked(proc_t p
)
579 int pid
= proc_pid(p
);
583 * if process still in creation or proc got recycled
584 * during msleep then return failure.
586 if ((p
== PROC_NULL
) || (p1
!= p
) || ((p
->p_listflag
& P_LIST_INCREATE
) != 0)) {
591 * Do not return process marked for termination
592 * or proc_refdrain called without ref wait.
593 * Wait for proc_refdrain_with_refwait to complete if
594 * process in refdrain and refwait flag is set, unless
595 * the current thread is holding to a proc_ref
598 if ((p
->p_stat
!= SZOMB
) &&
599 ((p
->p_listflag
& P_LIST_EXITED
) == 0) &&
600 ((p
->p_listflag
& P_LIST_DEAD
) == 0) &&
601 (((p
->p_listflag
& (P_LIST_DRAIN
| P_LIST_DRAINWAIT
)) == 0) ||
602 ((p
->p_listflag
& P_LIST_REFWAIT
) != 0))) {
603 if ((p
->p_listflag
& P_LIST_REFWAIT
) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
604 msleep(&p
->p_listflag
, &proc_list_mlock
, 0, "proc_refwait", 0);
606 * the proc might have been recycled since we dropped
607 * the proc list lock, get the proc again.
609 p
= pfind_locked(pid
);
613 record_procref(p
, 1);
622 proc_rele_locked(proc_t p
)
624 if (p
->p_refcount
> 0) {
626 record_procref(p
, -1);
627 if ((p
->p_refcount
== 0) && ((p
->p_listflag
& P_LIST_DRAINWAIT
) == P_LIST_DRAINWAIT
)) {
628 p
->p_listflag
&= ~P_LIST_DRAINWAIT
;
629 wakeup(&p
->p_refcount
);
632 panic("proc_rele_locked -ve ref\n");
637 proc_find_zombref(int pid
)
644 p
= pfind_locked(pid
);
646 /* should we bail? */
647 if ((p
== PROC_NULL
) /* not found */
648 || ((p
->p_listflag
& P_LIST_INCREATE
) != 0) /* not created yet */
649 || ((p
->p_listflag
& P_LIST_EXITED
) == 0)) { /* not started exit */
654 /* If someone else is controlling the (unreaped) zombie - wait */
655 if ((p
->p_listflag
& P_LIST_WAITING
) != 0) {
656 (void)msleep(&p
->p_stat
, &proc_list_mlock
, PWAIT
, "waitcoll", 0);
659 p
->p_listflag
|= P_LIST_WAITING
;
667 proc_drop_zombref(proc_t p
)
670 if ((p
->p_listflag
& P_LIST_WAITING
) == P_LIST_WAITING
) {
671 p
->p_listflag
&= ~P_LIST_WAITING
;
679 proc_refdrain(proc_t p
)
681 proc_refdrain_with_refwait(p
, FALSE
);
685 proc_refdrain_with_refwait(proc_t p
, boolean_t get_ref_and_allow_wait
)
687 boolean_t initexec
= FALSE
;
690 p
->p_listflag
|= P_LIST_DRAIN
;
691 if (get_ref_and_allow_wait
) {
693 * All the calls to proc_ref_locked will wait
694 * for the flag to get cleared before returning a ref,
695 * unless the current thread is holding to a proc ref
698 p
->p_listflag
|= P_LIST_REFWAIT
;
704 /* Do not wait in ref drain for launchd exec */
705 while (p
->p_refcount
&& !initexec
) {
706 p
->p_listflag
|= P_LIST_DRAINWAIT
;
707 msleep(&p
->p_refcount
, &proc_list_mlock
, 0, "proc_refdrain", 0);
710 p
->p_listflag
&= ~P_LIST_DRAIN
;
711 if (!get_ref_and_allow_wait
) {
712 p
->p_listflag
|= P_LIST_DEAD
;
714 /* Return a ref to the caller */
716 record_procref(p
, 1);
721 if (get_ref_and_allow_wait
) {
728 proc_refwake(proc_t p
)
731 p
->p_listflag
&= ~P_LIST_REFWAIT
;
732 wakeup(&p
->p_listflag
);
737 proc_parentholdref(proc_t p
)
739 proc_t parent
= PROC_NULL
;
747 if ((pp
== PROC_NULL
) || (pp
->p_stat
== SZOMB
) || ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
))) {
752 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == P_LIST_CHILDDRSTART
) {
753 pp
->p_listflag
|= P_LIST_CHILDDRWAIT
;
754 msleep(&pp
->p_childrencnt
, &proc_list_mlock
, 0, "proc_parent", 0);
763 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == 0) {
774 proc_parentdropref(proc_t p
, int listlocked
)
776 if (listlocked
== 0) {
780 if (p
->p_parentref
> 0) {
782 if ((p
->p_parentref
== 0) && ((p
->p_listflag
& P_LIST_PARENTREFWAIT
) == P_LIST_PARENTREFWAIT
)) {
783 p
->p_listflag
&= ~P_LIST_PARENTREFWAIT
;
784 wakeup(&p
->p_parentref
);
787 panic("proc_parentdropref -ve ref\n");
789 if (listlocked
== 0) {
797 proc_childdrainstart(proc_t p
)
799 #if __PROC_INTERNAL_DEBUG
800 if ((p
->p_listflag
& P_LIST_CHILDDRSTART
) == P_LIST_CHILDDRSTART
) {
801 panic("proc_childdrainstart: childdrain already started\n");
804 p
->p_listflag
|= P_LIST_CHILDDRSTART
;
805 /* wait for all that hold parentrefs to drop */
806 while (p
->p_parentref
> 0) {
807 p
->p_listflag
|= P_LIST_PARENTREFWAIT
;
808 msleep(&p
->p_parentref
, &proc_list_mlock
, 0, "proc_childdrainstart", 0);
814 proc_childdrainend(proc_t p
)
816 #if __PROC_INTERNAL_DEBUG
817 if (p
->p_childrencnt
> 0) {
818 panic("exiting: children stil hanging around\n");
821 p
->p_listflag
|= P_LIST_CHILDDRAINED
;
822 if ((p
->p_listflag
& (P_LIST_CHILDLKWAIT
| P_LIST_CHILDDRWAIT
)) != 0) {
823 p
->p_listflag
&= ~(P_LIST_CHILDLKWAIT
| P_LIST_CHILDDRWAIT
);
824 wakeup(&p
->p_childrencnt
);
829 proc_checkdeadrefs(__unused proc_t p
)
831 #if __PROC_INTERNAL_DEBUG
832 if ((p
->p_listflag
& P_LIST_INHASH
) != 0) {
833 panic("proc being freed and still in hash %p: %u\n", p
, p
->p_listflag
);
835 if (p
->p_childrencnt
!= 0) {
836 panic("proc being freed and pending children cnt %p:%d\n", p
, p
->p_childrencnt
);
838 if (p
->p_refcount
!= 0) {
839 panic("proc being freed and pending refcount %p:%d\n", p
, p
->p_refcount
);
841 if (p
->p_parentref
!= 0) {
842 panic("proc being freed and pending parentrefs %p:%d\n", p
, p
->p_parentref
);
848 __attribute__((always_inline
, visibility("hidden")))
850 proc_require(proc_t proc
, proc_require_flags_t flags
)
852 if ((flags
& PROC_REQUIRE_ALLOW_NULL
) && proc
== PROC_NULL
) {
855 if ((flags
& PROC_REQUIRE_ALLOW_KERNPROC
) && proc
== &proc0
) {
858 zone_id_require(ZONE_ID_PROC
, sizeof(struct proc
), proc
);
865 proc_require(p
, PROC_REQUIRE_ALLOW_KERNPROC
);
875 proc_require(p
, PROC_REQUIRE_ALLOW_KERNPROC
);
882 proc_original_ppid(proc_t p
)
885 proc_require(p
, PROC_REQUIRE_ALLOW_KERNPROC
);
886 return p
->p_original_ppid
;
892 proc_starttime(proc_t p
, struct timeval
*tv
)
894 if (p
!= NULL
&& tv
!= NULL
) {
895 tv
->tv_sec
= p
->p_start
.tv_sec
;
896 tv
->tv_usec
= p
->p_start
.tv_usec
;
905 return current_proc()->p_pid
;
911 return current_proc()->p_ppid
;
915 proc_selfcsflags(void)
917 return (uint64_t)current_proc()->p_csflags
;
921 proc_csflags(proc_t p
, uint64_t *flags
)
924 proc_require(p
, PROC_REQUIRE_ALLOW_KERNPROC
);
925 *flags
= (uint64_t)p
->p_csflags
;
932 proc_platform(const proc_t p
)
935 return p
->p_platform
;
941 proc_min_sdk(proc_t p
)
960 dtrace_current_proc_vforking(void)
962 thread_t th
= current_thread();
963 struct uthread
*ut
= get_bsdthread_info(th
);
966 ((ut
->uu_flag
& (UT_VFORK
| UT_VFORKING
)) == (UT_VFORK
| UT_VFORKING
))) {
968 * Handle the narrow window where we're in the vfork syscall,
969 * but we're not quite ready to claim (in particular, to DTrace)
970 * that we're running as the child.
972 return get_bsdtask_info(get_threadtask(th
));
974 return current_proc();
978 dtrace_proc_selfpid(void)
980 return dtrace_current_proc_vforking()->p_pid
;
984 dtrace_proc_selfppid(void)
986 return dtrace_current_proc_vforking()->p_ppid
;
990 dtrace_proc_selfruid(void)
992 return dtrace_current_proc_vforking()->p_ruid
;
994 #endif /* CONFIG_DTRACE */
997 proc_parent(proc_t p
)
1005 parent
= proc_ref_locked(pp
);
1006 if ((parent
== PROC_NULL
) && (pp
!= PROC_NULL
) && (pp
->p_stat
!= SZOMB
) && ((pp
->p_listflag
& P_LIST_EXITED
) != 0) && ((pp
->p_listflag
& P_LIST_CHILDDRAINED
) == 0)) {
1007 pp
->p_listflag
|= P_LIST_CHILDLKWAIT
;
1008 msleep(&pp
->p_childrencnt
, &proc_list_mlock
, 0, "proc_parent", 0);
1016 proc_parent_is_currentproc(proc_t p
)
1018 boolean_t ret
= FALSE
;
1021 if (p
->p_pptr
== current_proc()) {
1030 proc_name(int pid
, char * buf
, int size
)
1040 if ((p
= proc_find(pid
)) != PROC_NULL
) {
1041 strlcpy(buf
, &p
->p_comm
[0], size
);
1047 proc_name_kdp(task_t t
, char * buf
, int size
)
1049 proc_t p
= get_bsdtask_info(t
);
1050 if (p
== PROC_NULL
) {
1054 if ((size_t)size
> sizeof(p
->p_comm
)) {
1055 strlcpy(buf
, &p
->p_name
[0], MIN((int)sizeof(p
->p_name
), size
));
1057 strlcpy(buf
, &p
->p_comm
[0], MIN((int)sizeof(p
->p_comm
), size
));
1062 proc_binary_uuid_kdp(task_t task
, uuid_t uuid
)
1064 proc_t p
= get_bsdtask_info(task
);
1065 if (p
== PROC_NULL
) {
1069 proc_getexecutableuuid(p
, uuid
, sizeof(uuid_t
));
1075 proc_threadname_kdp(void * uth
, char * buf
, size_t size
)
1077 if (size
< MAXTHREADNAMESIZE
) {
1078 /* this is really just a protective measure for the future in
1079 * case the thread name size in stackshot gets out of sync with
1080 * the BSD max thread name size. Note that bsd_getthreadname
1081 * doesn't take input buffer size into account. */
1086 bsd_getthreadname(uth
, buf
);
1092 /* note that this function is generally going to be called from stackshot,
1093 * and the arguments will be coming from a struct which is declared packed
1094 * thus the input arguments will in general be unaligned. We have to handle
1097 proc_starttime_kdp(void *p
, unaligned_u64
*tv_sec
, unaligned_u64
*tv_usec
, unaligned_u64
*abstime
)
1099 proc_t pp
= (proc_t
)p
;
1100 if (pp
!= PROC_NULL
) {
1101 if (tv_sec
!= NULL
) {
1102 *tv_sec
= pp
->p_start
.tv_sec
;
1104 if (tv_usec
!= NULL
) {
1105 *tv_usec
= pp
->p_start
.tv_usec
;
1107 if (abstime
!= NULL
) {
1108 if (pp
->p_stats
!= NULL
) {
1109 *abstime
= pp
->p_stats
->ps_start
;
1118 proc_archinfo_kdp(void* p
, cpu_type_t
* cputype
, cpu_subtype_t
* cpusubtype
)
1120 proc_t pp
= (proc_t
)p
;
1121 if (pp
!= PROC_NULL
) {
1122 *cputype
= pp
->p_cputype
;
1123 *cpusubtype
= pp
->p_cpusubtype
;
1128 proc_name_address(void *p
)
1130 return &((proc_t
)p
)->p_comm
[0];
1134 proc_longname_address(void *p
)
1136 return &((proc_t
)p
)->p_name
[0];
1140 proc_best_name(proc_t p
)
1142 if (p
->p_name
[0] != '\0') {
1143 return &p
->p_name
[0];
1145 return &p
->p_comm
[0];
1149 proc_selfname(char * buf
, int size
)
1153 if ((p
= current_proc()) != (proc_t
)0) {
1154 strlcpy(buf
, &p
->p_comm
[0], size
);
1159 proc_signal(int pid
, int signum
)
1163 if ((p
= proc_find(pid
)) != PROC_NULL
) {
1170 proc_issignal(int pid
, sigset_t mask
)
1175 if ((p
= proc_find(pid
)) != PROC_NULL
) {
1176 error
= proc_pendingsignals(p
, mask
);
1184 proc_noremotehang(proc_t p
)
1189 retval
= p
->p_flag
& P_NOREMOTEHANG
;
1191 return retval
? 1: 0;
1195 proc_exiting(proc_t p
)
1200 retval
= p
->p_lflag
& P_LEXIT
;
1202 return retval
? 1: 0;
1206 proc_in_teardown(proc_t p
)
1211 retval
= p
->p_lflag
& P_LPEXIT
;
1213 return retval
? 1: 0;
1217 proc_forcequota(proc_t p
)
1222 retval
= p
->p_flag
& P_FORCEQUOTA
;
1224 return retval
? 1: 0;
1228 proc_suser(proc_t p
)
1230 kauth_cred_t my_cred
;
1233 my_cred
= kauth_cred_proc_ref(p
);
1234 error
= suser(my_cred
, &p
->p_acflag
);
1235 kauth_cred_unref(&my_cred
);
1240 proc_task(proc_t proc
)
1242 return (task_t
)proc
->task
;
1246 * Obtain the first thread in a process
1248 * XXX This is a bad thing to do; it exists predominantly to support the
1249 * XXX use of proc_t's in places that should really be using
1250 * XXX thread_t's instead. This maintains historical behaviour, but really
1251 * XXX needs an audit of the context (proxy vs. not) to clean up.
1254 proc_thread(proc_t proc
)
1256 LCK_MTX_ASSERT(&proc
->p_mlock
, LCK_MTX_ASSERT_OWNED
);
1258 uthread_t uth
= TAILQ_FIRST(&proc
->p_uthlist
);
1261 return uth
->uu_context
.vc_thread
;
1268 proc_ucred(proc_t p
)
1276 thread_t th
= current_thread();
1278 return (struct uthread
*)get_bsdthread_info(th
);
1283 proc_is64bit(proc_t p
)
1285 return IS_64BIT_PROCESS(p
);
1289 proc_is64bit_data(proc_t p
)
1292 return (int)task_get_64bit_data(p
->task
);
1296 proc_isinitproc(proc_t p
)
1298 if (initproc
== NULL
) {
1301 return p
== initproc
;
1305 proc_pidversion(proc_t p
)
1307 return p
->p_idversion
;
1311 proc_persona_id(proc_t p
)
1313 return (uint32_t)persona_id_from_proc(p
);
1317 proc_getuid(proc_t p
)
1323 proc_getgid(proc_t p
)
1329 proc_uniqueid(proc_t p
)
1331 return p
->p_uniqueid
;
1335 proc_puniqueid(proc_t p
)
1337 return p
->p_puniqueid
;
1341 proc_coalitionids(__unused proc_t p
, __unused
uint64_t ids
[COALITION_NUM_TYPES
])
1343 #if CONFIG_COALITIONS
1344 task_coalition_ids(p
->task
, ids
);
1346 memset(ids
, 0, sizeof(uint64_t[COALITION_NUM_TYPES
]));
1352 proc_was_throttled(proc_t p
)
1354 return p
->was_throttled
;
1358 proc_did_throttle(proc_t p
)
1360 return p
->did_throttle
;
1364 proc_getcdhash(proc_t p
, unsigned char *cdhash
)
1366 return vn_getcdhash(p
->p_textvp
, p
->p_textoff
, cdhash
);
1370 proc_exitstatus(proc_t p
)
1372 return p
->p_xstat
& 0xffff;
1376 proc_getexecutableuuid(proc_t p
, unsigned char *uuidbuf
, unsigned long size
)
1378 if (size
>= sizeof(p
->p_uuid
)) {
1379 memcpy(uuidbuf
, p
->p_uuid
, sizeof(p
->p_uuid
));
1383 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1385 proc_getexecutablevnode(proc_t p
)
1387 vnode_t tvp
= p
->p_textvp
;
1389 if (tvp
!= NULLVP
) {
1390 if (vnode_getwithref(tvp
) == 0) {
1399 proc_gettty(proc_t p
, vnode_t
*vp
)
1405 struct session
*procsp
= proc_session(p
);
1408 if (procsp
!= SESSION_NULL
) {
1409 session_lock(procsp
);
1410 vnode_t ttyvp
= procsp
->s_ttyvp
;
1411 int ttyvid
= procsp
->s_ttyvid
;
1412 session_unlock(procsp
);
1415 if (vnode_getwithvid(ttyvp
, ttyvid
) == 0) {
1423 session_rele(procsp
);
1430 proc_gettty_dev(proc_t p
, dev_t
*dev
)
1432 struct session
*procsp
= proc_session(p
);
1433 boolean_t has_tty
= FALSE
;
1435 if (procsp
!= SESSION_NULL
) {
1436 session_lock(procsp
);
1438 struct tty
* tp
= SESSION_TP(procsp
);
1439 if (tp
!= TTY_NULL
) {
1444 session_unlock(procsp
);
1445 session_rele(procsp
);
1456 proc_selfexecutableargs(uint8_t *buf
, size_t *buflen
)
1458 proc_t p
= current_proc();
1460 // buflen must always be provided
1461 if (buflen
== NULL
) {
1465 // If a buf is provided, there must be at least enough room to fit argc
1466 if (buf
&& *buflen
< sizeof(p
->p_argc
)) {
1470 if (!p
->user_stack
) {
1475 *buflen
= p
->p_argslen
+ sizeof(p
->p_argc
);
1479 // Copy in argc to the first 4 bytes
1480 memcpy(buf
, &p
->p_argc
, sizeof(p
->p_argc
));
1482 if (*buflen
> sizeof(p
->p_argc
) && p
->p_argslen
> 0) {
1483 // See memory layout comment in kern_exec.c:exec_copyout_strings()
1484 // We want to copy starting from `p_argslen` bytes away from top of stack
1485 return copyin(p
->user_stack
- p
->p_argslen
,
1486 buf
+ sizeof(p
->p_argc
),
1487 MIN(p
->p_argslen
, *buflen
- sizeof(p
->p_argc
)));
1494 proc_getexecutableoffset(proc_t p
)
1496 return p
->p_textoff
;
1500 bsd_set_dependency_capable(task_t task
)
1502 proc_t p
= get_bsdtask_info(task
);
1505 OSBitOrAtomic(P_DEPENDENCY_CAPABLE
, &p
->p_flag
);
1512 IS_64BIT_PROCESS(proc_t p
)
1514 if (p
&& (p
->p_flag
& P_LP64
)) {
1523 * Locate a process by number
1526 pfind_locked(pid_t pid
)
1537 for (p
= PIDHASH(pid
)->lh_first
; p
!= 0; p
= p
->p_hash
.le_next
) {
1538 if (p
->p_pid
== pid
) {
1540 for (q
= p
->p_hash
.le_next
; q
!= 0; q
= q
->p_hash
.le_next
) {
1541 if ((p
!= q
) && (q
->p_pid
== pid
)) {
1542 panic("two procs with same pid %p:%p:%d:%d\n", p
, q
, p
->p_pid
, q
->p_pid
);
1553 * Locate a zombie by PID
1555 __private_extern__ proc_t
1563 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
) {
1564 if (p
->p_pid
== pid
) {
1575 * Locate a process group by number
1584 pgrp
= pgfind_internal(pgid
);
1585 if ((pgrp
== NULL
) || ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) != 0)) {
1588 pgrp
->pg_refcount
++;
1597 pgfind_internal(pid_t pgid
)
1601 for (pgrp
= PGRPHASH(pgid
)->lh_first
; pgrp
!= 0; pgrp
= pgrp
->pg_hash
.le_next
) {
1602 if (pgrp
->pg_id
== pgid
) {
1610 pg_rele(struct pgrp
* pgrp
)
1612 if (pgrp
== PGRP_NULL
) {
1615 pg_rele_dropref(pgrp
);
1619 pg_rele_dropref(struct pgrp
* pgrp
)
1622 if ((pgrp
->pg_refcount
== 1) && ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) == PGRP_FLAG_TERMINATE
)) {
1624 pgdelete_dropref(pgrp
);
1628 pgrp
->pg_refcount
--;
1633 session_find_internal(pid_t sessid
)
1635 struct session
*sess
;
1637 for (sess
= SESSHASH(sessid
)->lh_first
; sess
!= 0; sess
= sess
->s_hash
.le_next
) {
1638 if (sess
->s_sid
== sessid
) {
1647 * Make a new process ready to become a useful member of society by making it
1648 * visible in all the right places and initialize its own lists to empty.
1650 * Parameters: parent The parent of the process to insert
1651 * child The child process to insert
1655 * Notes: Insert a child process into the parents process group, assign
1656 * the child the parent process pointer and PPID of the parent,
1657 * place it on the parents p_children list as a sibling,
1658 * initialize its own child list, place it in the allproc list,
1659 * insert it in the proper hash bucket, and initialize its
1663 pinsertchild(proc_t parent
, proc_t child
)
1667 LIST_INIT(&child
->p_children
);
1668 child
->p_pptr
= parent
;
1669 child
->p_ppid
= parent
->p_pid
;
1670 child
->p_original_ppid
= parent
->p_pid
;
1671 child
->p_puniqueid
= parent
->p_uniqueid
;
1672 child
->p_xhighbits
= 0;
1674 pg
= proc_pgrp(parent
);
1675 pgrp_add(pg
, parent
, child
);
1680 #if CONFIG_MEMORYSTATUS
1681 memorystatus_add(child
, TRUE
);
1684 parent
->p_childrencnt
++;
1685 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
1687 LIST_INSERT_HEAD(&allproc
, child
, p_list
);
1688 /* mark the completion of proc creation */
1689 child
->p_listflag
&= ~P_LIST_INCREATE
;
1695 * Move p to a new or existing process group (and session)
1697 * Returns: 0 Success
1698 * ESRCH No such process
1701 enterpgrp(proc_t p
, pid_t pgid
, int mksess
)
1704 struct pgrp
*mypgrp
;
1705 struct session
* procsp
;
1707 pgrp
= pgfind(pgid
);
1708 mypgrp
= proc_pgrp(p
);
1709 procsp
= proc_session(p
);
1712 if (pgrp
!= NULL
&& mksess
) { /* firewalls */
1713 panic("enterpgrp: setsid into non-empty pgrp");
1715 if (SESS_LEADER(p
, procsp
)) {
1716 panic("enterpgrp: session leader attempted setpgrp");
1719 if (pgrp
== PGRP_NULL
) {
1720 pid_t savepid
= p
->p_pid
;
1721 proc_t np
= PROC_NULL
;
1726 if (p
->p_pid
!= pgid
) {
1727 panic("enterpgrp: new pgrp and pid != pgid");
1730 pgrp
= zalloc_flags(pgrp_zone
, Z_WAITOK
| Z_ZERO
);
1731 if ((np
= proc_find(savepid
)) == NULL
|| np
!= p
) {
1732 if (np
!= PROC_NULL
) {
1735 if (mypgrp
!= PGRP_NULL
) {
1738 if (procsp
!= SESSION_NULL
) {
1739 session_rele(procsp
);
1741 zfree(pgrp_zone
, pgrp
);
1746 struct session
*sess
;
1751 sess
= zalloc_flags(session_zone
, Z_WAITOK
| Z_ZERO
);
1753 sess
->s_sid
= p
->p_pid
;
1755 sess
->s_ttypgrpid
= NO_PID
;
1757 lck_mtx_init(&sess
->s_mlock
, &proc_mlock_grp
, &proc_lck_attr
);
1759 bcopy(procsp
->s_login
, sess
->s_login
,
1760 sizeof(sess
->s_login
));
1761 OSBitAndAtomic(~((uint32_t)P_CONTROLT
), &p
->p_flag
);
1763 LIST_INSERT_HEAD(SESSHASH(sess
->s_sid
), sess
, s_hash
);
1765 pgrp
->pg_session
= sess
;
1766 p
->p_sessionid
= sess
->s_sid
;
1768 if (p
!= current_proc()) {
1769 panic("enterpgrp: mksession and p != curproc");
1774 pgrp
->pg_session
= procsp
;
1775 p
->p_sessionid
= procsp
->s_sid
;
1777 if ((pgrp
->pg_session
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
1778 panic("enterpgrp: providing ref to terminating session ");
1780 pgrp
->pg_session
->s_count
++;
1785 lck_mtx_init(&pgrp
->pg_mlock
, &proc_mlock_grp
, &proc_lck_attr
);
1787 LIST_INIT(&pgrp
->pg_members
);
1789 pgrp
->pg_refcount
= 1;
1790 LIST_INSERT_HEAD(PGRPHASH(pgid
), pgrp
, pg_hash
);
1792 } else if (pgrp
== mypgrp
) {
1794 if (mypgrp
!= NULL
) {
1797 if (procsp
!= SESSION_NULL
) {
1798 session_rele(procsp
);
1803 if (procsp
!= SESSION_NULL
) {
1804 session_rele(procsp
);
1807 * Adjust eligibility of affected pgrps to participate in job control.
1808 * Increment eligibility counts before decrementing, otherwise we
1809 * could reach 0 spuriously during the first call.
1811 fixjobc(p
, pgrp
, 1);
1812 fixjobc(p
, mypgrp
, 0);
1814 if (mypgrp
!= PGRP_NULL
) {
1817 pgrp_replace(p
, pgrp
);
1824 * remove process from process group
1834 * delete a process group
1837 pgdelete_dropref(struct pgrp
*pgrp
)
1841 struct session
*sessp
;
1845 if (pgrp
->pg_membercnt
!= 0) {
1851 pgrp
->pg_refcount
--;
1852 if ((emptypgrp
== 0) || (pgrp
->pg_membercnt
!= 0)) {
1857 pgrp
->pg_listflags
|= PGRP_FLAG_TERMINATE
;
1859 if (pgrp
->pg_refcount
> 0) {
1864 pgrp
->pg_listflags
|= PGRP_FLAG_DEAD
;
1865 LIST_REMOVE(pgrp
, pg_hash
);
1869 ttyp
= SESSION_TP(pgrp
->pg_session
);
1870 if (ttyp
!= TTY_NULL
) {
1871 if (ttyp
->t_pgrp
== pgrp
) {
1873 /* Re-check after acquiring the lock */
1874 if (ttyp
->t_pgrp
== pgrp
) {
1875 ttyp
->t_pgrp
= NULL
;
1876 pgrp
->pg_session
->s_ttypgrpid
= NO_PID
;
1884 sessp
= pgrp
->pg_session
;
1885 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
1886 panic("pg_deleteref: manipulating refs of already terminating session");
1888 if (--sessp
->s_count
== 0) {
1889 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
1890 panic("pg_deleteref: terminating already terminated session");
1892 sessp
->s_listflags
|= S_LIST_TERM
;
1893 ttyp
= SESSION_TP(sessp
);
1894 LIST_REMOVE(sessp
, s_hash
);
1896 if (ttyp
!= TTY_NULL
) {
1898 if (ttyp
->t_session
== sessp
) {
1899 ttyp
->t_session
= NULL
;
1904 sessp
->s_listflags
|= S_LIST_DEAD
;
1905 if (sessp
->s_count
!= 0) {
1906 panic("pg_deleteref: freeing session in use");
1909 lck_mtx_destroy(&sessp
->s_mlock
, &proc_mlock_grp
);
1911 zfree(session_zone
, sessp
);
1915 lck_mtx_destroy(&pgrp
->pg_mlock
, &proc_mlock_grp
);
1916 zfree(pgrp_zone
, pgrp
);
1921 * Adjust pgrp jobc counters when specified process changes process group.
1922 * We count the number of processes in each process group that "qualify"
1923 * the group for terminal job control (those with a parent in a different
1924 * process group of the same session). If that count reaches zero, the
1925 * process group becomes orphaned. Check both the specified process'
1926 * process group and that of its children.
1927 * entering == 0 => p is leaving specified group.
1928 * entering == 1 => p is entering specified group.
1931 fixjob_callback(proc_t p
, void * arg
)
1933 struct fixjob_iterargs
*fp
;
1934 struct pgrp
* pg
, *hispg
;
1935 struct session
* mysession
, *hissess
;
1938 fp
= (struct fixjob_iterargs
*)arg
;
1940 mysession
= fp
->mysession
;
1941 entering
= fp
->entering
;
1943 hispg
= proc_pgrp(p
);
1944 hissess
= proc_session(p
);
1946 if ((hispg
!= pg
) &&
1947 (hissess
== mysession
)) {
1952 } else if (--hispg
->pg_jobc
== 0) {
1959 if (hissess
!= SESSION_NULL
) {
1960 session_rele(hissess
);
1962 if (hispg
!= PGRP_NULL
) {
1966 return PROC_RETURNED
;
1970 fixjobc(proc_t p
, struct pgrp
*pgrp
, int entering
)
1972 struct pgrp
*hispgrp
= PGRP_NULL
;
1973 struct session
*hissess
= SESSION_NULL
;
1974 struct session
*mysession
= pgrp
->pg_session
;
1976 struct fixjob_iterargs fjarg
;
1977 boolean_t proc_parent_self
;
1980 * Check if p's parent is current proc, if yes then no need to take
1981 * a ref; calling proc_parent with current proc as parent may
1982 * deadlock if current proc is exiting.
1984 proc_parent_self
= proc_parent_is_currentproc(p
);
1985 if (proc_parent_self
) {
1986 parent
= current_proc();
1988 parent
= proc_parent(p
);
1991 if (parent
!= PROC_NULL
) {
1992 hispgrp
= proc_pgrp(parent
);
1993 hissess
= proc_session(parent
);
1994 if (!proc_parent_self
) {
2001 * Check p's parent to see whether p qualifies its own process
2002 * group; if so, adjust count for p's process group.
2004 if ((hispgrp
!= pgrp
) &&
2005 (hissess
== mysession
)) {
2010 } else if (--pgrp
->pg_jobc
== 0) {
2018 if (hissess
!= SESSION_NULL
) {
2019 session_rele(hissess
);
2021 if (hispgrp
!= PGRP_NULL
) {
2026 * Check this process' children to see whether they qualify
2027 * their process groups; if so, adjust counts for children's
2031 fjarg
.mysession
= mysession
;
2032 fjarg
.entering
= entering
;
2033 proc_childrenwalk(p
, fixjob_callback
, &fjarg
);
2037 * The pidlist_* routines support the functions in this file that
2038 * walk lists of processes applying filters and callouts to the
2039 * elements of the list.
2041 * A prior implementation used a single linear array, which can be
2042 * tricky to allocate on large systems. This implementation creates
2043 * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
2045 * The array should be sized large enough to keep the overhead of
2046 * walking the list low, but small enough that blocking allocations of
2047 * pidlist_entry_t structures always succeed.
2050 #define PIDS_PER_ENTRY 1021
2052 typedef struct pidlist_entry
{
2053 SLIST_ENTRY(pidlist_entry
) pe_link
;
2055 pid_t pe_pid
[PIDS_PER_ENTRY
];
2059 SLIST_HEAD(, pidlist_entry
) pl_head
;
2060 struct pidlist_entry
*pl_active
;
2064 static __inline__ pidlist_t
*
2065 pidlist_init(pidlist_t
*pl
)
2067 SLIST_INIT(&pl
->pl_head
);
2068 pl
->pl_active
= NULL
;
2074 pidlist_alloc(pidlist_t
*pl
, u_int needed
)
2076 while (pl
->pl_nalloc
< needed
) {
2077 pidlist_entry_t
*pe
= kheap_alloc(KHEAP_TEMP
, sizeof(*pe
),
2080 panic("no space for pidlist entry");
2082 SLIST_INSERT_HEAD(&pl
->pl_head
, pe
, pe_link
);
2083 pl
->pl_nalloc
+= (sizeof(pe
->pe_pid
) / sizeof(pe
->pe_pid
[0]));
2085 return pl
->pl_nalloc
;
2089 pidlist_free(pidlist_t
*pl
)
2091 pidlist_entry_t
*pe
;
2092 while (NULL
!= (pe
= SLIST_FIRST(&pl
->pl_head
))) {
2093 SLIST_FIRST(&pl
->pl_head
) = SLIST_NEXT(pe
, pe_link
);
2094 kheap_free(KHEAP_TEMP
, pe
, sizeof(*pe
));
2099 static __inline__
void
2100 pidlist_set_active(pidlist_t
*pl
)
2102 pl
->pl_active
= SLIST_FIRST(&pl
->pl_head
);
2103 assert(pl
->pl_active
);
2107 pidlist_add_pid(pidlist_t
*pl
, pid_t pid
)
2109 pidlist_entry_t
*pe
= pl
->pl_active
;
2110 if (pe
->pe_nused
>= sizeof(pe
->pe_pid
) / sizeof(pe
->pe_pid
[0])) {
2111 if (NULL
== (pe
= SLIST_NEXT(pe
, pe_link
))) {
2112 panic("pidlist allocation exhausted");
2116 pe
->pe_pid
[pe
->pe_nused
++] = pid
;
2119 static __inline__ u_int
2120 pidlist_nalloc(const pidlist_t
*pl
)
2122 return pl
->pl_nalloc
;
2126 * A process group has become orphaned; if there are any stopped processes in
2127 * the group, hang-up all process in that group.
2130 orphanpg(struct pgrp
*pgrp
)
2132 pidlist_t pid_list
, *pl
= pidlist_init(&pid_list
);
2133 u_int pid_count_available
= 0;
2136 /* allocate outside of the pgrp_lock */
2140 boolean_t should_iterate
= FALSE
;
2141 pid_count_available
= 0;
2143 PGMEMBERS_FOREACH(pgrp
, p
) {
2144 pid_count_available
++;
2145 if (p
->p_stat
== SSTOP
) {
2146 should_iterate
= TRUE
;
2149 if (pid_count_available
== 0 || !should_iterate
) {
2151 goto out
; /* no orphaned processes OR nothing stopped */
2153 if (pidlist_nalloc(pl
) >= pid_count_available
) {
2158 pidlist_alloc(pl
, pid_count_available
);
2160 pidlist_set_active(pl
);
2162 u_int pid_count
= 0;
2163 PGMEMBERS_FOREACH(pgrp
, p
) {
2164 pidlist_add_pid(pl
, proc_pid(p
));
2165 if (++pid_count
>= pid_count_available
) {
2171 const pidlist_entry_t
*pe
;
2172 SLIST_FOREACH(pe
, &(pl
->pl_head
), pe_link
) {
2173 for (u_int i
= 0; i
< pe
->pe_nused
; i
++) {
2174 const pid_t pid
= pe
->pe_pid
[i
];
2176 continue; /* skip kernproc */
2182 proc_transwait(p
, 0);
2185 psignal(p
, SIGCONT
);
2194 proc_is_translated(proc_t p __unused
)
2200 proc_is_classic(proc_t p __unused
)
2212 return task_is_exotic(proc_task(p
));
2222 return task_is_alien(proc_task(p
));
2225 /* XXX Why does this function exist? Need to kill it off... */
2227 current_proc_EXTERNAL(void)
2229 return current_proc();
2233 proc_is_forcing_hfs_case_sensitivity(proc_t p
)
2235 return (p
->p_vfs_iopolicy
& P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY
) ? 1 : 0;
2239 proc_ignores_content_protection(proc_t p
)
2241 return os_atomic_load(&p
->p_vfs_iopolicy
, relaxed
) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION
;
2245 proc_ignores_node_permissions(proc_t p
)
2247 return os_atomic_load(&p
->p_vfs_iopolicy
, relaxed
) & P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS
;
2251 proc_skip_mtime_update(proc_t p
)
2253 return os_atomic_load(&p
->p_vfs_iopolicy
, relaxed
) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE
;
2258 * proc_core_name(name, uid, pid)
2259 * Expand the name described in corefilename, using name, uid, and pid.
2260 * corefilename is a printf-like string, with three format specifiers:
2261 * %N name of process ("name")
2262 * %P process id (pid)
2264 * For example, "%N.core" is the default; they can be disabled completely
2265 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2266 * This is controlled by the sysctl variable kern.corefile (see above).
2268 __private_extern__
int
2269 proc_core_name(const char *name
, uid_t uid
, pid_t pid
, char *cf_name
,
2272 const char *format
, *appendstr
;
2273 char id_buf
[11]; /* Buffer for pid/uid -- max 4B */
2276 if (cf_name
== NULL
) {
2280 format
= corefilename
;
2281 for (i
= 0, n
= 0; n
< cf_name_len
&& format
[i
]; i
++) {
2282 switch (format
[i
]) {
2283 case '%': /* Format character */
2285 switch (format
[i
]) {
2289 case 'N': /* process name */
2292 case 'P': /* process id */
2293 snprintf(id_buf
, sizeof(id_buf
), "%u", pid
);
2296 case 'U': /* user id */
2297 snprintf(id_buf
, sizeof(id_buf
), "%u", uid
);
2300 case '\0': /* format string ended in % symbol */
2305 "Unknown format character %c in `%s'\n",
2308 l
= strlen(appendstr
);
2309 if ((n
+ l
) >= cf_name_len
) {
2312 bcopy(appendstr
, cf_name
+ n
, l
);
2316 cf_name
[n
++] = format
[i
];
2319 if (format
[i
] != '\0') {
2324 log(LOG_ERR
, "pid %ld (%s), uid (%u): corename is too long\n",
2325 (long)pid
, name
, (uint32_t)uid
);
2328 log(LOG_ERR
, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
2329 (long)pid
, name
, (uint32_t)uid
);
2332 #endif /* CONFIG_COREDUMP */
2334 /* Code Signing related routines */
2337 csops(__unused proc_t p
, struct csops_args
*uap
, __unused
int32_t *retval
)
2339 return csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
2340 uap
->usersize
, USER_ADDR_NULL
);
2344 csops_audittoken(__unused proc_t p
, struct csops_audittoken_args
*uap
, __unused
int32_t *retval
)
2346 if (uap
->uaudittoken
== USER_ADDR_NULL
) {
2349 return csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
2350 uap
->usersize
, uap
->uaudittoken
);
2354 csops_copy_token(void *start
, size_t length
, user_size_t usize
, user_addr_t uaddr
)
2356 char fakeheader
[8] = { 0 };
2359 if (usize
< sizeof(fakeheader
)) {
2363 /* if no blob, fill in zero header */
2364 if (NULL
== start
) {
2366 length
= sizeof(fakeheader
);
2367 } else if (usize
< length
) {
2368 /* ... if input too short, copy out length of entitlement */
2369 uint32_t length32
= htonl((uint32_t)length
);
2370 memcpy(&fakeheader
[4], &length32
, sizeof(length32
));
2372 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
2374 return ERANGE
; /* input buffer to short, ERANGE signals that */
2378 return copyout(start
, uaddr
, length
);
2382 csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaudittoken
)
2384 size_t usize
= (size_t)CAST_DOWN(size_t, usersize
);
2390 unsigned char cdhash
[SHA1_RESULTLEN
];
2391 audit_token_t token
;
2392 unsigned int upid
= 0, uidversion
= 0;
2394 forself
= error
= 0;
2397 pid
= proc_selfpid();
2399 if (pid
== proc_selfpid()) {
2407 case CS_OPS_PIDOFFSET
:
2408 case CS_OPS_ENTITLEMENTS_BLOB
:
2409 case CS_OPS_IDENTITY
:
2412 case CS_OPS_CLEAR_LV
:
2413 break; /* not restricted to root */
2415 if (forself
== 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE
) {
2421 pt
= proc_find(pid
);
2422 if (pt
== PROC_NULL
) {
2427 uidversion
= pt
->p_idversion
;
2428 if (uaudittoken
!= USER_ADDR_NULL
) {
2429 error
= copyin(uaudittoken
, &token
, sizeof(audit_token_t
));
2433 /* verify the audit token pid/idversion matches with proc */
2434 if ((token
.val
[5] != upid
) || (token
.val
[7] != uidversion
)) {
2442 case CS_OPS_MARKINVALID
:
2443 case CS_OPS_MARKHARD
:
2444 case CS_OPS_MARKKILL
:
2445 case CS_OPS_MARKRESTRICT
:
2446 case CS_OPS_SET_STATUS
:
2447 case CS_OPS_CLEARINSTALLER
:
2448 case CS_OPS_CLEARPLATFORM
:
2449 case CS_OPS_CLEAR_LV
:
2450 if ((error
= mac_proc_check_set_cs_info(current_proc(), pt
, ops
))) {
2455 if ((error
= mac_proc_check_get_cs_info(current_proc(), pt
, ops
))) {
2462 case CS_OPS_STATUS
: {
2466 retflags
= pt
->p_csflags
;
2467 if (cs_process_enforcement(pt
)) {
2468 retflags
|= CS_ENFORCEMENT
;
2470 if (csproc_get_platform_binary(pt
)) {
2471 retflags
|= CS_PLATFORM_BINARY
;
2473 if (csproc_get_platform_path(pt
)) {
2474 retflags
|= CS_PLATFORM_PATH
;
2476 //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
2477 if ((pt
->p_csflags
& CS_FORCED_LV
) == CS_FORCED_LV
) {
2478 retflags
&= (~CS_REQUIRE_LV
);
2482 if (uaddr
!= USER_ADDR_NULL
) {
2483 error
= copyout(&retflags
, uaddr
, sizeof(uint32_t));
2487 case CS_OPS_MARKINVALID
:
2489 if ((pt
->p_csflags
& CS_VALID
) == CS_VALID
) { /* is currently valid */
2490 pt
->p_csflags
&= ~CS_VALID
; /* set invalid */
2491 cs_process_invalidated(pt
);
2492 if ((pt
->p_csflags
& CS_KILL
) == CS_KILL
) {
2493 pt
->p_csflags
|= CS_KILLED
;
2496 printf("CODE SIGNING: marked invalid by pid %d: "
2497 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2498 proc_selfpid(), pt
->p_pid
, pt
->p_comm
, pt
->p_csflags
);
2500 psignal(pt
, SIGKILL
);
2510 case CS_OPS_MARKHARD
:
2512 pt
->p_csflags
|= CS_HARD
;
2513 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2514 /* @@@ allow? reject? kill? @@@ */
2523 case CS_OPS_MARKKILL
:
2525 pt
->p_csflags
|= CS_KILL
;
2526 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2528 psignal(pt
, SIGKILL
);
2534 case CS_OPS_PIDOFFSET
:
2535 toff
= pt
->p_textoff
;
2537 error
= copyout(&toff
, uaddr
, sizeof(toff
));
2542 /* pt already holds a reference on its p_textvp */
2544 toff
= pt
->p_textoff
;
2546 if (tvp
== NULLVP
|| usize
!= SHA1_RESULTLEN
) {
2551 error
= vn_getcdhash(tvp
, toff
, cdhash
);
2555 error
= copyout(cdhash
, uaddr
, sizeof(cdhash
));
2560 case CS_OPS_ENTITLEMENTS_BLOB
: {
2566 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2572 error
= cs_entitlements_blob_get(pt
, &start
, &length
);
2578 error
= csops_copy_token(start
, length
, usize
, uaddr
);
2581 case CS_OPS_MARKRESTRICT
:
2583 pt
->p_csflags
|= CS_RESTRICT
;
2587 case CS_OPS_SET_STATUS
: {
2590 if (usize
< sizeof(flags
)) {
2595 error
= copyin(uaddr
, &flags
, sizeof(flags
));
2600 /* only allow setting a subset of all code sign flags */
2602 CS_HARD
| CS_EXEC_SET_HARD
|
2603 CS_KILL
| CS_EXEC_SET_KILL
|
2606 CS_ENFORCEMENT
| CS_EXEC_SET_ENFORCEMENT
;
2609 if (pt
->p_csflags
& CS_VALID
) {
2610 if ((flags
& CS_ENFORCEMENT
) &&
2611 !(pt
->p_csflags
& CS_ENFORCEMENT
)) {
2612 vm_map_cs_enforcement_set(get_task_map(pt
->task
), TRUE
);
2614 pt
->p_csflags
|= flags
;
2622 case CS_OPS_CLEAR_LV
: {
2624 * This option is used to remove library validation from
2625 * a running process. This is used in plugin architectures
2626 * when a program needs to load untrusted libraries. This
2627 * allows the process to maintain library validation as
2628 * long as possible, then drop it only when required.
2629 * Once a process has loaded the untrusted library,
2630 * relying on library validation in the future will
2631 * not be effective. An alternative is to re-exec
2632 * your application without library validation, or
2633 * fork an untrusted child.
2635 #if !defined(XNU_TARGET_OS_OSX)
2636 // We only support dropping library validation on macOS
2640 * if we have the flag set, and the caller wants
2641 * to remove it, and they're entitled to, then
2642 * we remove it from the csflags
2644 * NOTE: We are fine to poke into the task because
2645 * we get a ref to pt when we do the proc_find
2646 * at the beginning of this function.
2648 * We also only allow altering ourselves.
2650 if (forself
== 1 && IOTaskHasEntitlement(pt
->task
, CLEAR_LV_ENTITLEMENT
)) {
2652 pt
->p_csflags
&= (~(CS_REQUIRE_LV
| CS_FORCED_LV
));
2666 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2672 error
= cs_blob_get(pt
, &start
, &length
);
2678 error
= csops_copy_token(start
, length
, usize
, uaddr
);
2681 case CS_OPS_IDENTITY
:
2682 case CS_OPS_TEAMID
: {
2683 const char *identity
;
2684 uint8_t fakeheader
[8];
2689 * Make identity have a blob header to make it
2690 * easier on userland to guess the identity
2693 if (usize
< sizeof(fakeheader
)) {
2697 memset(fakeheader
, 0, sizeof(fakeheader
));
2700 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2706 identity
= ops
== CS_OPS_TEAMID
? csproc_get_teamid(pt
) : cs_identity_get(pt
);
2708 if (identity
== NULL
) {
2713 length
= strlen(identity
) + 1; /* include NUL */
2714 idlen
= htonl((uint32_t)(length
+ sizeof(fakeheader
)));
2715 memcpy(&fakeheader
[4], &idlen
, sizeof(idlen
));
2717 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
2722 if (usize
< sizeof(fakeheader
) + length
) {
2724 } else if (usize
> sizeof(fakeheader
)) {
2725 error
= copyout(identity
, uaddr
+ sizeof(fakeheader
), length
);
2731 case CS_OPS_CLEARINSTALLER
:
2733 pt
->p_csflags
&= ~(CS_INSTALLER
| CS_DATAVAULT_CONTROLLER
| CS_EXEC_INHERIT_SIP
);
2737 case CS_OPS_CLEARPLATFORM
:
2738 #if DEVELOPMENT || DEBUG
2739 if (cs_process_global_enforcement()) {
2745 if (csr_check(CSR_ALLOW_APPLE_INTERNAL
) != 0) {
2752 pt
->p_csflags
&= ~(CS_PLATFORM_BINARY
| CS_PLATFORM_PATH
);
2753 csproc_clear_platform_binary(pt
);
2759 #endif /* !DEVELOPMENT || DEBUG */
2773 proc_iterate_fn_t callout
,
2775 proc_iterate_fn_t filterfn
,
2778 pidlist_t pid_list
, *pl
= pidlist_init(&pid_list
);
2779 u_int pid_count_available
= 0;
2781 assert(callout
!= NULL
);
2783 /* allocate outside of the proc_list_lock */
2786 pid_count_available
= nprocs
+ 1; /* kernel_task not counted in nprocs */
2787 assert(pid_count_available
> 0);
2788 if (pidlist_nalloc(pl
) >= pid_count_available
) {
2793 pidlist_alloc(pl
, pid_count_available
);
2795 pidlist_set_active(pl
);
2797 /* filter pids into the pid_list */
2799 u_int pid_count
= 0;
2800 if (flags
& PROC_ALLPROCLIST
) {
2802 ALLPROC_FOREACH(p
) {
2803 /* ignore processes that are being forked */
2804 if (p
->p_stat
== SIDL
) {
2807 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
2810 pidlist_add_pid(pl
, proc_pid(p
));
2811 if (++pid_count
>= pid_count_available
) {
2817 if ((pid_count
< pid_count_available
) &&
2818 (flags
& PROC_ZOMBPROCLIST
)) {
2820 ZOMBPROC_FOREACH(p
) {
2821 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
2824 pidlist_add_pid(pl
, proc_pid(p
));
2825 if (++pid_count
>= pid_count_available
) {
2833 /* call callout on processes in the pid_list */
2835 const pidlist_entry_t
*pe
;
2836 SLIST_FOREACH(pe
, &(pl
->pl_head
), pe_link
) {
2837 for (u_int i
= 0; i
< pe
->pe_nused
; i
++) {
2838 const pid_t pid
= pe
->pe_pid
[i
];
2839 proc_t p
= proc_find(pid
);
2841 if ((flags
& PROC_NOWAITTRANS
) == 0) {
2842 proc_transwait(p
, 0);
2844 const int callout_ret
= callout(p
, arg
);
2846 switch (callout_ret
) {
2847 case PROC_RETURNED_DONE
:
2850 case PROC_CLAIMED_DONE
:
2859 panic("%s: callout =%d for pid %d",
2860 __func__
, callout_ret
, pid
);
2863 } else if (flags
& PROC_ZOMBPROCLIST
) {
2864 p
= proc_find_zombref(pid
);
2868 const int callout_ret
= callout(p
, arg
);
2870 switch (callout_ret
) {
2871 case PROC_RETURNED_DONE
:
2872 proc_drop_zombref(p
);
2874 case PROC_CLAIMED_DONE
:
2878 proc_drop_zombref(p
);
2883 panic("%s: callout =%d for zombie %d",
2884 __func__
, callout_ret
, pid
);
2896 proc_iterate_fn_t callout
,
2898 proc_iterate_fn_t filterfn
,
2903 assert(callout
!= NULL
);
2905 proc_shutdown_exitcount
= 0;
2911 ALLPROC_FOREACH(p
) {
2912 if ((filterfn
!= NULL
) && filterfn(p
, filterarg
) == 0) {
2915 p
= proc_ref_locked(p
);
2922 proc_transwait(p
, 0);
2923 (void)callout(p
, arg
);
2926 goto restart_foreach
;
2935 proc_iterate_fn_t callout
,
2938 pidlist_t pid_list
, *pl
= pidlist_init(&pid_list
);
2939 u_int pid_count_available
= 0;
2941 assert(parent
!= NULL
);
2942 assert(callout
!= NULL
);
2946 pid_count_available
= parent
->p_childrencnt
;
2947 if (pid_count_available
== 0) {
2951 if (pidlist_nalloc(pl
) >= pid_count_available
) {
2956 pidlist_alloc(pl
, pid_count_available
);
2958 pidlist_set_active(pl
);
2960 u_int pid_count
= 0;
2962 PCHILDREN_FOREACH(parent
, p
) {
2963 if (p
->p_stat
== SIDL
) {
2966 pidlist_add_pid(pl
, proc_pid(p
));
2967 if (++pid_count
>= pid_count_available
) {
2974 const pidlist_entry_t
*pe
;
2975 SLIST_FOREACH(pe
, &(pl
->pl_head
), pe_link
) {
2976 for (u_int i
= 0; i
< pe
->pe_nused
; i
++) {
2977 const pid_t pid
= pe
->pe_pid
[i
];
2982 const int callout_ret
= callout(p
, arg
);
2984 switch (callout_ret
) {
2985 case PROC_RETURNED_DONE
:
2988 case PROC_CLAIMED_DONE
:
2997 panic("%s: callout =%d for pid %d",
2998 __func__
, callout_ret
, pid
);
3011 proc_iterate_fn_t callout
,
3013 proc_iterate_fn_t filterfn
,
3016 pidlist_t pid_list
, *pl
= pidlist_init(&pid_list
);
3017 u_int pid_count_available
= 0;
3019 assert(pgrp
!= NULL
);
3020 assert(callout
!= NULL
);
3024 pid_count_available
= pgrp
->pg_membercnt
;
3025 if (pid_count_available
== 0) {
3027 if (flags
& PGRP_DROPREF
) {
3032 if (pidlist_nalloc(pl
) >= pid_count_available
) {
3037 pidlist_alloc(pl
, pid_count_available
);
3039 pidlist_set_active(pl
);
3041 const pid_t pgid
= pgrp
->pg_id
;
3042 u_int pid_count
= 0;
3044 PGMEMBERS_FOREACH(pgrp
, p
) {
3045 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
3048 pidlist_add_pid(pl
, proc_pid(p
));
3049 if (++pid_count
>= pid_count_available
) {
3056 if (flags
& PGRP_DROPREF
) {
3060 const pidlist_entry_t
*pe
;
3061 SLIST_FOREACH(pe
, &(pl
->pl_head
), pe_link
) {
3062 for (u_int i
= 0; i
< pe
->pe_nused
; i
++) {
3063 const pid_t pid
= pe
->pe_pid
[i
];
3065 continue; /* skip kernproc */
3071 if (p
->p_pgrpid
!= pgid
) {
3075 const int callout_ret
= callout(p
, arg
);
3077 switch (callout_ret
) {
3083 case PROC_RETURNED_DONE
:
3086 case PROC_CLAIMED_DONE
:
3090 panic("%s: callout =%d for pid %d",
3091 __func__
, callout_ret
, pid
);
3101 pgrp_add(struct pgrp
* pgrp
, struct proc
* parent
, struct proc
* child
)
3104 child
->p_pgrp
= pgrp
;
3105 child
->p_pgrpid
= pgrp
->pg_id
;
3106 child
->p_sessionid
= pgrp
->pg_session
->s_sid
;
3107 child
->p_listflag
|= P_LIST_INPGRP
;
3109 * When pgrp is being freed , a process can still
3110 * request addition using setpgid from bash when
3111 * login is terminated (login cycler) return ESRCH
3112 * Safe to hold lock due to refcount on pgrp
3114 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
3115 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
3118 if ((pgrp
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
) {
3119 panic("pgrp_add : pgrp is dead adding process");
3124 pgrp
->pg_membercnt
++;
3125 if (parent
!= PROC_NULL
) {
3126 LIST_INSERT_AFTER(parent
, child
, p_pglist
);
3128 LIST_INSERT_HEAD(&pgrp
->pg_members
, child
, p_pglist
);
3133 if (((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (pgrp
->pg_membercnt
!= 0)) {
3134 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
3140 pgrp_remove(struct proc
* p
)
3147 #if __PROC_INTERNAL_DEBUG
3148 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0) {
3149 panic("removing from pglist but no named ref\n");
3152 p
->p_pgrpid
= PGRPID_DEAD
;
3153 p
->p_listflag
&= ~P_LIST_INPGRP
;
3157 if (pg
== PGRP_NULL
) {
3158 panic("pgrp_remove: pg is NULL");
3163 if (pg
->pg_membercnt
< 0) {
3164 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", pg
, p
);
3167 LIST_REMOVE(p
, p_pglist
);
3168 if (pg
->pg_members
.lh_first
== 0) {
3170 pgdelete_dropref(pg
);
3178 /* cannot use proc_pgrp as it maybe stalled */
3180 pgrp_replace(struct proc
* p
, struct pgrp
* newpg
)
3182 struct pgrp
* oldpg
;
3188 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
3189 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
3190 (void)msleep(&p
->p_pgrpid
, &proc_list_mlock
, 0, "proc_pgrp", 0);
3193 p
->p_listflag
|= P_LIST_PGRPTRANS
;
3196 if (oldpg
== PGRP_NULL
) {
3197 panic("pgrp_replace: oldpg NULL");
3199 oldpg
->pg_refcount
++;
3200 #if __PROC_INTERNAL_DEBUG
3201 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0) {
3202 panic("removing from pglist but no named ref\n");
3205 p
->p_pgrpid
= PGRPID_DEAD
;
3206 p
->p_listflag
&= ~P_LIST_INPGRP
;
3212 oldpg
->pg_membercnt
--;
3213 if (oldpg
->pg_membercnt
< 0) {
3214 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", oldpg
, p
);
3216 LIST_REMOVE(p
, p_pglist
);
3217 if (oldpg
->pg_members
.lh_first
== 0) {
3219 pgdelete_dropref(oldpg
);
3227 p
->p_pgrpid
= newpg
->pg_id
;
3228 p
->p_sessionid
= newpg
->pg_session
->s_sid
;
3229 p
->p_listflag
|= P_LIST_INPGRP
;
3231 * When pgrp is being freed , a process can still
3232 * request addition using setpgid from bash when
3233 * login is terminated (login cycler) return ESRCH
3234 * Safe to hold lock due to refcount on pgrp
3236 if ((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
3237 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
3240 if ((newpg
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
) {
3241 panic("pgrp_add : pgrp is dead adding process");
3246 newpg
->pg_membercnt
++;
3247 LIST_INSERT_HEAD(&newpg
->pg_members
, p
, p_pglist
);
3251 if (((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (newpg
->pg_membercnt
!= 0)) {
3252 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
3255 p
->p_listflag
&= ~P_LIST_PGRPTRANS
;
3256 if ((p
->p_listflag
& P_LIST_PGRPTRWAIT
) == P_LIST_PGRPTRWAIT
) {
3257 p
->p_listflag
&= ~P_LIST_PGRPTRWAIT
;
3258 wakeup(&p
->p_pgrpid
);
3264 pgrp_lock(struct pgrp
* pgrp
)
3266 lck_mtx_lock(&pgrp
->pg_mlock
);
3270 pgrp_unlock(struct pgrp
* pgrp
)
3272 lck_mtx_unlock(&pgrp
->pg_mlock
);
3276 session_lock(struct session
* sess
)
3278 lck_mtx_lock(&sess
->s_mlock
);
3283 session_unlock(struct session
* sess
)
3285 lck_mtx_unlock(&sess
->s_mlock
);
3293 if (p
== PROC_NULL
) {
3298 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
3299 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
3300 (void)msleep(&p
->p_pgrpid
, &proc_list_mlock
, 0, "proc_pgrp", 0);
3305 assert(pgrp
!= NULL
);
3307 if (pgrp
!= PGRP_NULL
) {
3308 pgrp
->pg_refcount
++;
3309 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) != 0) {
3310 panic("proc_pgrp: ref being povided for dead pgrp");
3320 tty_pgrp(struct tty
* tp
)
3322 struct pgrp
* pg
= PGRP_NULL
;
3327 if (pg
!= PGRP_NULL
) {
3328 if ((pg
->pg_listflags
& PGRP_FLAG_DEAD
) != 0) {
3329 panic("tty_pgrp: ref being povided for dead pgrp");
3339 proc_session(proc_t p
)
3341 struct session
* sess
= SESSION_NULL
;
3343 if (p
== PROC_NULL
) {
3344 return SESSION_NULL
;
3349 /* wait during transitions */
3350 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
3351 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
3352 (void)msleep(&p
->p_pgrpid
, &proc_list_mlock
, 0, "proc_pgrp", 0);
3355 if ((p
->p_pgrp
!= PGRP_NULL
) && ((sess
= p
->p_pgrp
->pg_session
) != SESSION_NULL
)) {
3356 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
3357 panic("proc_session:returning sesssion ref on terminating session");
3366 session_rele(struct session
*sess
)
3369 if (--sess
->s_count
== 0) {
3370 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
3371 panic("session_rele: terminating already terminated session");
3373 sess
->s_listflags
|= S_LIST_TERM
;
3374 LIST_REMOVE(sess
, s_hash
);
3375 sess
->s_listflags
|= S_LIST_DEAD
;
3376 if (sess
->s_count
!= 0) {
3377 panic("session_rele: freeing session in use");
3380 lck_mtx_destroy(&sess
->s_mlock
, &proc_mlock_grp
);
3381 zfree(session_zone
, sess
);
3388 proc_transstart(proc_t p
, int locked
, int non_blocking
)
3393 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
3394 if (((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
) || non_blocking
) {
3400 p
->p_lflag
|= P_LTRANSWAIT
;
3401 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
3403 p
->p_lflag
|= P_LINTRANSIT
;
3404 p
->p_transholder
= current_thread();
3412 proc_transcommit(proc_t p
, int locked
)
3418 assert((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
);
3419 assert(p
->p_transholder
== current_thread());
3420 p
->p_lflag
|= P_LTRANSCOMMIT
;
3422 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
3423 p
->p_lflag
&= ~P_LTRANSWAIT
;
3424 wakeup(&p
->p_lflag
);
3432 proc_transend(proc_t p
, int locked
)
3438 p
->p_lflag
&= ~(P_LINTRANSIT
| P_LTRANSCOMMIT
);
3439 p
->p_transholder
= NULL
;
3441 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
3442 p
->p_lflag
&= ~P_LTRANSWAIT
;
3443 wakeup(&p
->p_lflag
);
3451 proc_transwait(proc_t p
, int locked
)
3456 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
3457 if ((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
&& current_proc() == p
) {
3463 p
->p_lflag
|= P_LTRANSWAIT
;
3464 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
3473 proc_klist_lock(void)
3475 lck_mtx_lock(&proc_klist_mlock
);
3479 proc_klist_unlock(void)
3481 lck_mtx_unlock(&proc_klist_mlock
);
3485 proc_knote(struct proc
* p
, long hint
)
3488 KNOTE(&p
->p_klist
, hint
);
3489 proc_klist_unlock();
3493 proc_knote_drain(struct proc
*p
)
3495 struct knote
*kn
= NULL
;
3498 * Clear the proc's klist to avoid references after the proc is reaped.
3501 while ((kn
= SLIST_FIRST(&p
->p_klist
))) {
3502 kn
->kn_proc
= PROC_NULL
;
3503 KNOTE_DETACH(&p
->p_klist
, kn
);
3505 proc_klist_unlock();
3509 proc_setregister(proc_t p
)
3512 p
->p_lflag
|= P_LREGISTER
;
3517 proc_resetregister(proc_t p
)
3520 p
->p_lflag
&= ~P_LREGISTER
;
3525 proc_pgrpid(proc_t p
)
3531 proc_sessionid(proc_t p
)
3533 return p
->p_sessionid
;
3539 return current_proc()->p_pgrpid
;
3543 /* return control and action states */
3545 proc_getpcontrol(int pid
, int * pcontrolp
)
3550 if (p
== PROC_NULL
) {
3553 if (pcontrolp
!= NULL
) {
3554 *pcontrolp
= p
->p_pcaction
;
3562 proc_dopcontrol(proc_t p
)
3565 os_reason_t kill_reason
;
3569 pcontrol
= PROC_CONTROL_STATE(p
);
3571 if (PROC_ACTION_STATE(p
) == 0) {
3574 PROC_SETACTION_STATE(p
);
3576 printf("low swap: throttling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3580 PROC_SETACTION_STATE(p
);
3582 printf("low swap: suspending pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3583 task_suspend(p
->task
);
3587 PROC_SETACTION_STATE(p
);
3589 printf("low swap: killing pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3590 kill_reason
= os_reason_create(OS_REASON_JETSAM
, JETSAM_REASON_LOWSWAP
);
3591 psignal_with_reason(p
, SIGKILL
, kill_reason
);
3601 return PROC_RETURNED
;
3606 * Resume a throttled or suspended process. This is an internal interface that's only
3607 * used by the user level code that presents the GUI when we run out of swap space and
3608 * hence is restricted to processes with superuser privileges.
3612 proc_resetpcontrol(int pid
)
3617 proc_t self
= current_proc();
3619 /* if the process has been validated to handle resource control or root is valid one */
3620 if (((self
->p_lflag
& P_LVMRSRCOWNER
) == 0) && (error
= suser(kauth_cred_get(), 0))) {
3625 if (p
== PROC_NULL
) {
3631 pcontrol
= PROC_CONTROL_STATE(p
);
3633 if (PROC_ACTION_STATE(p
) != 0) {
3636 PROC_RESETACTION_STATE(p
);
3638 printf("low swap: unthrottling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3642 PROC_RESETACTION_STATE(p
);
3644 printf("low swap: resuming pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3645 task_resume(p
->task
);
3650 PROC_SETACTION_STATE(p
);
3652 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p
->p_pid
, p
->p_comm
);
3668 struct no_paging_space
{
3669 uint64_t pcs_max_size
;
3670 uint64_t pcs_uniqueid
;
3673 uint64_t pcs_total_size
;
3675 uint64_t npcs_max_size
;
3676 uint64_t npcs_uniqueid
;
3678 int npcs_proc_count
;
3679 uint64_t npcs_total_size
;
3681 int apcs_proc_count
;
3682 uint64_t apcs_total_size
;
3687 proc_pcontrol_filter(proc_t p
, void *arg
)
3689 struct no_paging_space
*nps
;
3690 uint64_t compressed
;
3692 nps
= (struct no_paging_space
*)arg
;
3694 compressed
= get_task_compressed(p
->task
);
3696 if (PROC_CONTROL_STATE(p
)) {
3697 if (PROC_ACTION_STATE(p
) == 0) {
3698 if (compressed
> nps
->pcs_max_size
) {
3699 nps
->pcs_pid
= p
->p_pid
;
3700 nps
->pcs_uniqueid
= p
->p_uniqueid
;
3701 nps
->pcs_max_size
= compressed
;
3703 nps
->pcs_total_size
+= compressed
;
3704 nps
->pcs_proc_count
++;
3706 nps
->apcs_total_size
+= compressed
;
3707 nps
->apcs_proc_count
++;
3710 if (compressed
> nps
->npcs_max_size
) {
3711 nps
->npcs_pid
= p
->p_pid
;
3712 nps
->npcs_uniqueid
= p
->p_uniqueid
;
3713 nps
->npcs_max_size
= compressed
;
3715 nps
->npcs_total_size
+= compressed
;
3716 nps
->npcs_proc_count
++;
3723 proc_pcontrol_null(__unused proc_t p
, __unused
void *arg
)
3725 return PROC_RETURNED
;
3730 * Deal with the low on compressor pool space condition... this function
3731 * gets called when we are approaching the limits of the compressor pool or
3732 * we are unable to create a new swap file.
3733 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3734 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3735 * There are 2 categories of processes to deal with. Those that have an action
3736 * associated with them by the task itself and those that do not. Actionable
3737 * tasks can have one of three categories specified: ones that
3738 * can be killed immediately, ones that should be suspended, and ones that should
3739 * be throttled. Processes that do not have an action associated with them are normally
3740 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3741 * that only by killing them can we hope to put the system back into a usable state.
3744 #define NO_PAGING_SPACE_DEBUG 0
3746 extern uint64_t vm_compressor_pages_compressed(void);
3748 struct timeval last_no_space_action
= {.tv_sec
= 0, .tv_usec
= 0};
3750 #define MB_SIZE (1024 * 1024ULL)
3751 boolean_t
memorystatus_kill_on_VM_compressor_space_shortage(boolean_t
);
3753 extern int32_t max_kill_priority
;
3754 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index
);
3757 no_paging_space_action()
3760 struct no_paging_space nps
;
3762 os_reason_t kill_reason
;
3765 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3769 if (now
.tv_sec
<= last_no_space_action
.tv_sec
+ 5) {
3774 * Examine all processes and find the biggest (biggest is based on the number of pages this
3775 * task has in the compressor pool) that has been marked to have some action
3776 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3779 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3780 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3781 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3783 bzero(&nps
, sizeof(nps
));
3785 proc_iterate(PROC_ALLPROCLIST
, proc_pcontrol_null
, (void *)NULL
, proc_pcontrol_filter
, (void *)&nps
);
3787 #if NO_PAGING_SPACE_DEBUG
3788 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3789 nps
.npcs_proc_count
, nps
.npcs_total_size
, nps
.npcs_max_size
);
3790 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3791 nps
.pcs_proc_count
, nps
.pcs_total_size
, nps
.pcs_max_size
);
3792 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3793 nps
.apcs_proc_count
, nps
.apcs_total_size
);
3795 if (nps
.npcs_max_size
> (vm_compressor_pages_compressed() * 50) / 100) {
3797 * for now we'll knock out any task that has more then 50% of the pages
3798 * held by the compressor
3800 if ((p
= proc_find(nps
.npcs_pid
)) != PROC_NULL
) {
3801 if (nps
.npcs_uniqueid
== p
->p_uniqueid
) {
3803 * verify this is still the same process
3804 * in case the proc exited and the pid got reused while
3805 * we were finishing the proc_iterate and getting to this point
3807 last_no_space_action
= now
;
3809 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p
->p_pid
, p
->p_comm
, (nps
.pcs_max_size
/ MB_SIZE
));
3810 kill_reason
= os_reason_create(OS_REASON_JETSAM
, JETSAM_REASON_LOWSWAP
);
3811 psignal_with_reason(p
, SIGKILL
, kill_reason
);
3823 * We have some processes within our jetsam bands of consideration and hence can be killed.
3824 * So we will invoke the memorystatus thread to go ahead and kill something.
3826 if (memorystatus_get_proccnt_upto_priority(max_kill_priority
) > 0) {
3827 last_no_space_action
= now
;
3828 memorystatus_kill_on_VM_compressor_space_shortage(TRUE
/* async */);
3833 * No eligible processes to kill. So let's suspend/kill the largest
3834 * process depending on its policy control specifications.
3837 if (nps
.pcs_max_size
> 0) {
3838 if ((p
= proc_find(nps
.pcs_pid
)) != PROC_NULL
) {
3839 if (nps
.pcs_uniqueid
== p
->p_uniqueid
) {
3841 * verify this is still the same process
3842 * in case the proc exited and the pid got reused while
3843 * we were finishing the proc_iterate and getting to this point
3845 last_no_space_action
= now
;
3857 last_no_space_action
= now
;
3859 printf("low swap: unable to find any eligible processes to take action on\n");
3865 proc_trace_log(__unused proc_t p
, struct proc_trace_log_args
*uap
, __unused
int *retval
)
3868 proc_t target_proc
= PROC_NULL
;
3869 pid_t target_pid
= uap
->pid
;
3870 uint64_t target_uniqueid
= uap
->uniqueid
;
3871 task_t target_task
= NULL
;
3873 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT
, 0)) {
3877 target_proc
= proc_find(target_pid
);
3878 if (target_proc
!= PROC_NULL
) {
3879 if (target_uniqueid
!= proc_uniqueid(target_proc
)) {
3884 target_task
= proc_task(target_proc
);
3885 if (task_send_trace_memory(target_task
, target_pid
, target_uniqueid
)) {
3894 if (target_proc
!= PROC_NULL
) {
3895 proc_rele(target_proc
);
3900 #if VM_SCAN_FOR_SHADOW_CHAIN
3901 extern int vm_map_shadow_max(vm_map_t map
);
3902 int proc_shadow_max(void);
3904 proc_shadow_max(void)
3913 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
3914 if (p
->p_stat
== SIDL
) {
3921 map
= get_task_map(task
);
3925 retval
= vm_map_shadow_max(map
);
3933 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3935 void proc_set_responsible_pid(proc_t target_proc
, pid_t responsible_pid
);
3937 proc_set_responsible_pid(proc_t target_proc
, pid_t responsible_pid
)
3939 if (target_proc
!= NULL
) {
3940 target_proc
->p_responsible_pid
= responsible_pid
;
3946 proc_chrooted(proc_t p
)
3952 retval
= (p
->p_fd
->fd_rdir
!= NULL
) ? 1 : 0;
3960 proc_send_synchronous_EXC_RESOURCE(proc_t p
)
3962 if (p
== PROC_NULL
) {
3966 /* Send sync EXC_RESOURCE if the process is traced */
3967 if (ISSET(p
->p_lflag
, P_LTRACED
)) {
3975 proc_get_syscall_filter_mask_size(int which
)
3978 case SYSCALL_MASK_UNIX
:
3980 case SYSCALL_MASK_MACH
:
3981 return mach_trap_count
;
3982 case SYSCALL_MASK_KOBJ
:
3983 return mach_kobj_count
;
3990 proc_set_syscall_filter_mask(proc_t p
, int which
, unsigned char *maskptr
, size_t masklen
)
3992 #if DEVELOPMENT || DEBUG
3993 if (syscallfilter_disable
) {
3994 printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p
));
3997 #endif // DEVELOPMENT || DEBUG
4000 case SYSCALL_MASK_UNIX
:
4001 if (maskptr
!= NULL
&& masklen
!= nsysent
) {
4004 p
->syscall_filter_mask
= maskptr
;
4006 case SYSCALL_MASK_MACH
:
4007 if (maskptr
!= NULL
&& masklen
!= (size_t)mach_trap_count
) {
4010 mac_task_set_mach_filter_mask(p
->task
, maskptr
);
4012 case SYSCALL_MASK_KOBJ
:
4013 if (maskptr
!= NULL
&& masklen
!= (size_t)mach_kobj_count
) {
4016 mac_task_set_kobj_filter_mask(p
->task
, maskptr
);
4026 proc_set_syscall_filter_callbacks(syscall_filter_cbs_t cbs
)
4028 if (cbs
->version
!= SYSCALL_FILTER_CALLBACK_VERSION
) {
4032 /* XXX register unix filter callback instead of using MACF hook. */
4034 if (cbs
->mach_filter_cbfunc
|| cbs
->kobj_filter_cbfunc
) {
4035 if (mac_task_register_filter_callbacks(cbs
->mach_filter_cbfunc
,
4036 cbs
->kobj_filter_cbfunc
) != 0) {
4045 proc_set_syscall_filter_index(int which
, int num
, int index
)
4048 case SYSCALL_MASK_KOBJ
:
4049 if (ipc_kobject_set_kobjidx(num
, index
) != 0) {
4059 #endif /* CONFIG_MACF */
4062 proc_set_filter_message_flag(proc_t p
, boolean_t flag
)
4064 if (p
== PROC_NULL
) {
4068 task_set_filter_msg_flag(proc_task(p
), flag
);
4074 proc_get_filter_message_flag(proc_t p
, boolean_t
*flag
)
4076 if (p
== PROC_NULL
|| flag
== NULL
) {
4080 *flag
= task_get_filter_msg_flag(proc_task(p
));
4086 proc_is_traced(proc_t p
)
4089 assert(p
!= PROC_NULL
);
4091 if (p
->p_lflag
& P_LTRACED
) {
4098 #ifdef CONFIG_32BIT_TELEMETRY
4100 proc_log_32bit_telemetry(proc_t p
)
4103 char signature_buf
[MAX_32BIT_EXEC_SIG_SIZE
] = { 0 };
4104 char * signature_cur_end
= &signature_buf
[0];
4105 char * signature_buf_end
= &signature_buf
[MAX_32BIT_EXEC_SIG_SIZE
- 1];
4106 int bytes_printed
= 0;
4108 const char * teamid
= NULL
;
4109 const char * identity
= NULL
;
4110 struct cs_blob
* csblob
= NULL
;
4115 * Get proc name and parent proc name; if the parent execs, we'll get a
4118 bytes_printed
= scnprintf(signature_cur_end
,
4119 signature_buf_end
- signature_cur_end
,
4120 "%s,%s,", p
->p_name
,
4121 (p
->p_pptr
? p
->p_pptr
->p_name
: ""));
4123 if (bytes_printed
> 0) {
4124 signature_cur_end
+= bytes_printed
;
4129 /* Get developer info. */
4130 vnode_t v
= proc_getexecutablevnode(p
);
4133 csblob
= csvnode_get_blob(v
, 0);
4136 teamid
= csblob_get_teamid(csblob
);
4137 identity
= csblob_get_identity(csblob
);
4141 if (teamid
== NULL
) {
4145 if (identity
== NULL
) {
4149 bytes_printed
= scnprintf(signature_cur_end
,
4150 signature_buf_end
- signature_cur_end
,
4151 "%s,%s", teamid
, identity
);
4153 if (bytes_printed
> 0) {
4154 signature_cur_end
+= bytes_printed
;
4162 * We may want to rate limit here, although the SUMMARIZE key should
4163 * help us aggregate events in userspace.
4167 kern_asl_msg(LOG_DEBUG
, "messagetracer", 3,
4168 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
4169 /* 1 */ "com.apple.message.signature", signature_buf
,
4170 /* 2 */ "com.apple.message.summarize", "YES",
4173 #endif /* CONFIG_32BIT_TELEMETRY */