2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
85 #include <sys/file_internal.h>
87 #include <sys/malloc.h>
90 #include <sys/ioctl.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114 #include <sys/sysent.h>
115 #include <sys/reason.h>
117 #ifdef CONFIG_32BIT_TELEMETRY
118 #include <sys/kasl.h>
119 #endif /* CONFIG_32BIT_TELEMETRY */
125 #if CONFIG_MEMORYSTATUS
126 #include <sys/kern_memorystatus.h>
130 #include <security/mac_framework.h>
133 #include <libkern/crypto/sha1.h>
135 #ifdef CONFIG_32BIT_TELEMETRY
136 #define MAX_32BIT_EXEC_SIG_SIZE 160
137 #endif /* CONFIG_32BIT_TELEMETRY */
140 * Structure associated with user cacheing.
143 LIST_ENTRY(uidinfo
) ui_hash
;
147 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
148 LIST_HEAD(uihashhead
, uidinfo
) * uihashtbl
;
149 u_long uihash
; /* size of hash table - 1 */
152 * Other process lists
154 struct pidhashhead
*pidhashtbl
;
156 struct pgrphashhead
*pgrphashtbl
;
158 struct sesshashhead
*sesshashtbl
;
161 struct proclist allproc
;
162 struct proclist zombproc
;
163 extern struct tty cons
;
167 #if DEVELOPMENT || DEBUG
168 int syscallfilter_disable
= 0;
169 #endif // DEVELOPMENT || DEBUG
172 #define __PROC_INTERNAL_DEBUG 1
175 /* Name to give to core files */
176 #if defined(XNU_TARGET_OS_BRIDGE)
177 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+ 1] = {"/private/var/internal/%N.core"};
178 #elif CONFIG_EMBEDDED
179 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+ 1] = {"/private/var/cores/%N.core"};
181 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+ 1] = {"/cores/core.%P"};
186 #include <kern/backtrace.h>
189 typedef uint64_t unaligned_u64
__attribute__((aligned(1)));
191 static void orphanpg(struct pgrp
* pg
);
192 void proc_name_kdp(task_t t
, char * buf
, int size
);
193 boolean_t
proc_binary_uuid_kdp(task_t task
, uuid_t uuid
);
194 int proc_threadname_kdp(void * uth
, char * buf
, size_t size
);
195 void proc_starttime_kdp(void * p
, unaligned_u64
*tv_sec
, unaligned_u64
*tv_usec
, unaligned_u64
*abstime
);
196 char * proc_name_address(void * p
);
198 static void pgrp_add(struct pgrp
* pgrp
, proc_t parent
, proc_t child
);
199 static void pgrp_remove(proc_t p
);
200 static void pgrp_replace(proc_t p
, struct pgrp
*pgrp
);
201 static void pgdelete_dropref(struct pgrp
*pgrp
);
202 extern void pg_rele_dropref(struct pgrp
* pgrp
);
203 static int csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaddittoken
);
204 static boolean_t
proc_parent_is_currentproc(proc_t p
);
206 struct fixjob_iterargs
{
208 struct session
* mysession
;
212 int fixjob_callback(proc_t
, void *);
215 get_current_unique_pid(void)
217 proc_t p
= current_proc();
220 return p
->p_uniqueid
;
227 * Initialize global process hashing structures.
233 LIST_INIT(&zombproc
);
234 pidhashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pidhash
);
235 pgrphashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pgrphash
);
236 sesshashtbl
= hashinit(maxproc
/ 4, M_PROC
, &sesshash
);
237 uihashtbl
= hashinit(maxproc
/ 16, M_PROC
, &uihash
);
239 personas_bootstrap();
244 * Change the count associated with number of processes
245 * a given user is using. This routine protects the uihash
249 chgproccnt(uid_t uid
, int diff
)
252 struct uidinfo
*newuip
= NULL
;
253 struct uihashhead
*uipp
;
259 for (uip
= uipp
->lh_first
; uip
!= 0; uip
= uip
->ui_hash
.le_next
) {
260 if (uip
->ui_uid
== uid
) {
265 uip
->ui_proccnt
+= diff
;
266 if (uip
->ui_proccnt
> 0) {
267 retval
= uip
->ui_proccnt
;
271 if (uip
->ui_proccnt
< 0) {
272 panic("chgproccnt: procs < 0");
274 LIST_REMOVE(uip
, ui_hash
);
277 FREE_ZONE(uip
, sizeof(*uip
), M_PROC
);
286 panic("chgproccnt: lost user");
288 if (newuip
!= NULL
) {
291 LIST_INSERT_HEAD(uipp
, uip
, ui_hash
);
293 uip
->ui_proccnt
= diff
;
299 MALLOC_ZONE(newuip
, struct uidinfo
*, sizeof(*uip
), M_PROC
, M_WAITOK
);
300 if (newuip
== NULL
) {
301 panic("chgproccnt: M_PROC zone depleted");
305 if (newuip
!= NULL
) {
306 FREE_ZONE(newuip
, sizeof(*uip
), M_PROC
);
312 * Is p an inferior of the current process?
320 for (; p
!= current_proc(); p
= p
->p_pptr
) {
332 * Is p an inferior of t ?
335 isinferior(proc_t p
, proc_t t
)
341 /* if p==t they are not inferior */
347 for (; p
!= t
; p
= p
->p_pptr
) {
350 /* Detect here if we're in a cycle */
351 if ((p
->p_pid
== 0) || (p
->p_pptr
== start
) || (nchecked
>= nprocs
)) {
362 proc_isinferior(int pid1
, int pid2
)
364 proc_t p
= PROC_NULL
;
365 proc_t t
= PROC_NULL
;
368 if (((p
= proc_find(pid1
)) != (proc_t
)0) && ((t
= proc_find(pid2
)) != (proc_t
)0)) {
369 retval
= isinferior(p
, t
);
372 if (p
!= PROC_NULL
) {
375 if (t
!= PROC_NULL
) {
385 return proc_findinternal(pid
, 0);
389 proc_findinternal(int pid
, int locked
)
391 proc_t p
= PROC_NULL
;
397 p
= pfind_locked(pid
);
398 if ((p
== PROC_NULL
) || (p
!= proc_ref_locked(p
))) {
410 proc_findthread(thread_t thread
)
412 proc_t p
= PROC_NULL
;
416 uth
= get_bsdthread_info(thread
);
417 if (uth
&& (uth
->uu_flag
& UT_VFORK
)) {
420 p
= (proc_t
)(get_bsdthreadtask_info(thread
));
422 p
= proc_ref_locked(p
);
428 uthread_reset_proc_refcount(void *uthread
)
432 uth
= (uthread_t
) uthread
;
433 uth
->uu_proc_refcount
= 0;
436 if (proc_ref_tracking_disabled
) {
446 uthread_get_proc_refcount(void *uthread
)
450 if (proc_ref_tracking_disabled
) {
454 uth
= (uthread_t
) uthread
;
456 return uth
->uu_proc_refcount
;
461 record_procref(proc_t p __unused
, int count
)
465 uth
= current_uthread();
466 uth
->uu_proc_refcount
+= count
;
469 if (proc_ref_tracking_disabled
) {
473 if (uth
->uu_pindex
< NUM_PROC_REFS_TO_TRACK
) {
474 backtrace((uintptr_t *) &uth
->uu_proc_pcs
[uth
->uu_pindex
],
475 PROC_REF_STACK_DEPTH
, NULL
);
477 uth
->uu_proc_ps
[uth
->uu_pindex
] = p
;
484 uthread_needs_to_wait_in_proc_refwait(void)
486 uthread_t uth
= current_uthread();
489 * Allow threads holding no proc refs to wait
490 * in proc_refwait, allowing threads holding
491 * proc refs to wait in proc_refwait causes
492 * deadlocks and makes proc_find non-reentrant.
494 if (uth
->uu_proc_refcount
== 0) {
519 if (p
!= proc_ref_locked(p
)) {
528 proc_ref_locked(proc_t p
)
531 int pid
= proc_pid(p
);
535 * if process still in creation or proc got recycled
536 * during msleep then return failure.
538 if ((p
== PROC_NULL
) || (p1
!= p
) || ((p
->p_listflag
& P_LIST_INCREATE
) != 0)) {
543 * Do not return process marked for termination
544 * or proc_refdrain called without ref wait.
545 * Wait for proc_refdrain_with_refwait to complete if
546 * process in refdrain and refwait flag is set, unless
547 * the current thread is holding to a proc_ref
550 if ((p
->p_stat
!= SZOMB
) &&
551 ((p
->p_listflag
& P_LIST_EXITED
) == 0) &&
552 ((p
->p_listflag
& P_LIST_DEAD
) == 0) &&
553 (((p
->p_listflag
& (P_LIST_DRAIN
| P_LIST_DRAINWAIT
)) == 0) ||
554 ((p
->p_listflag
& P_LIST_REFWAIT
) != 0))) {
555 if ((p
->p_listflag
& P_LIST_REFWAIT
) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
556 msleep(&p
->p_listflag
, proc_list_mlock
, 0, "proc_refwait", 0);
558 * the proc might have been recycled since we dropped
559 * the proc list lock, get the proc again.
561 p
= pfind_locked(pid
);
565 record_procref(p
, 1);
574 proc_rele_locked(proc_t p
)
576 if (p
->p_refcount
> 0) {
578 record_procref(p
, -1);
579 if ((p
->p_refcount
== 0) && ((p
->p_listflag
& P_LIST_DRAINWAIT
) == P_LIST_DRAINWAIT
)) {
580 p
->p_listflag
&= ~P_LIST_DRAINWAIT
;
581 wakeup(&p
->p_refcount
);
584 panic("proc_rele_locked -ve ref\n");
589 proc_find_zombref(int pid
)
596 p
= pfind_locked(pid
);
598 /* should we bail? */
599 if ((p
== PROC_NULL
) /* not found */
600 || ((p
->p_listflag
& P_LIST_INCREATE
) != 0) /* not created yet */
601 || ((p
->p_listflag
& P_LIST_EXITED
) == 0)) { /* not started exit */
606 /* If someone else is controlling the (unreaped) zombie - wait */
607 if ((p
->p_listflag
& P_LIST_WAITING
) != 0) {
608 (void)msleep(&p
->p_stat
, proc_list_mlock
, PWAIT
, "waitcoll", 0);
611 p
->p_listflag
|= P_LIST_WAITING
;
619 proc_drop_zombref(proc_t p
)
622 if ((p
->p_listflag
& P_LIST_WAITING
) == P_LIST_WAITING
) {
623 p
->p_listflag
&= ~P_LIST_WAITING
;
631 proc_refdrain(proc_t p
)
633 proc_refdrain_with_refwait(p
, FALSE
);
637 proc_refdrain_with_refwait(proc_t p
, boolean_t get_ref_and_allow_wait
)
639 boolean_t initexec
= FALSE
;
642 p
->p_listflag
|= P_LIST_DRAIN
;
643 if (get_ref_and_allow_wait
) {
645 * All the calls to proc_ref_locked will wait
646 * for the flag to get cleared before returning a ref,
647 * unless the current thread is holding to a proc ref
650 p
->p_listflag
|= P_LIST_REFWAIT
;
656 /* Do not wait in ref drain for launchd exec */
657 while (p
->p_refcount
&& !initexec
) {
658 p
->p_listflag
|= P_LIST_DRAINWAIT
;
659 msleep(&p
->p_refcount
, proc_list_mlock
, 0, "proc_refdrain", 0);
662 p
->p_listflag
&= ~P_LIST_DRAIN
;
663 if (!get_ref_and_allow_wait
) {
664 p
->p_listflag
|= P_LIST_DEAD
;
666 /* Return a ref to the caller */
668 record_procref(p
, 1);
673 if (get_ref_and_allow_wait
) {
680 proc_refwake(proc_t p
)
683 p
->p_listflag
&= ~P_LIST_REFWAIT
;
684 wakeup(&p
->p_listflag
);
689 proc_parentholdref(proc_t p
)
691 proc_t parent
= PROC_NULL
;
699 if ((pp
== PROC_NULL
) || (pp
->p_stat
== SZOMB
) || ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
))) {
704 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == P_LIST_CHILDDRSTART
) {
705 pp
->p_listflag
|= P_LIST_CHILDDRWAIT
;
706 msleep(&pp
->p_childrencnt
, proc_list_mlock
, 0, "proc_parent", 0);
715 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == 0) {
726 proc_parentdropref(proc_t p
, int listlocked
)
728 if (listlocked
== 0) {
732 if (p
->p_parentref
> 0) {
734 if ((p
->p_parentref
== 0) && ((p
->p_listflag
& P_LIST_PARENTREFWAIT
) == P_LIST_PARENTREFWAIT
)) {
735 p
->p_listflag
&= ~P_LIST_PARENTREFWAIT
;
736 wakeup(&p
->p_parentref
);
739 panic("proc_parentdropref -ve ref\n");
741 if (listlocked
== 0) {
749 proc_childdrainstart(proc_t p
)
751 #if __PROC_INTERNAL_DEBUG
752 if ((p
->p_listflag
& P_LIST_CHILDDRSTART
) == P_LIST_CHILDDRSTART
) {
753 panic("proc_childdrainstart: childdrain already started\n");
756 p
->p_listflag
|= P_LIST_CHILDDRSTART
;
757 /* wait for all that hold parentrefs to drop */
758 while (p
->p_parentref
> 0) {
759 p
->p_listflag
|= P_LIST_PARENTREFWAIT
;
760 msleep(&p
->p_parentref
, proc_list_mlock
, 0, "proc_childdrainstart", 0);
766 proc_childdrainend(proc_t p
)
768 #if __PROC_INTERNAL_DEBUG
769 if (p
->p_childrencnt
> 0) {
770 panic("exiting: children stil hanging around\n");
773 p
->p_listflag
|= P_LIST_CHILDDRAINED
;
774 if ((p
->p_listflag
& (P_LIST_CHILDLKWAIT
| P_LIST_CHILDDRWAIT
)) != 0) {
775 p
->p_listflag
&= ~(P_LIST_CHILDLKWAIT
| P_LIST_CHILDDRWAIT
);
776 wakeup(&p
->p_childrencnt
);
781 proc_checkdeadrefs(__unused proc_t p
)
783 #if __PROC_INTERNAL_DEBUG
784 if ((p
->p_listflag
& P_LIST_INHASH
) != 0) {
785 panic("proc being freed and still in hash %p: %u\n", p
, p
->p_listflag
);
787 if (p
->p_childrencnt
!= 0) {
788 panic("proc being freed and pending children cnt %p:%d\n", p
, p
->p_childrencnt
);
790 if (p
->p_refcount
!= 0) {
791 panic("proc being freed and pending refcount %p:%d\n", p
, p
->p_refcount
);
793 if (p
->p_parentref
!= 0) {
794 panic("proc being freed and pending parentrefs %p:%d\n", p
, p
->p_parentref
);
818 proc_original_ppid(proc_t p
)
821 return p
->p_original_ppid
;
829 return current_proc()->p_pid
;
835 return current_proc()->p_ppid
;
839 proc_selfcsflags(void)
841 return current_proc()->p_csflags
;
845 proc_platform(proc_t p
)
848 return p
->p_platform
;
864 dtrace_current_proc_vforking(void)
866 thread_t th
= current_thread();
867 struct uthread
*ut
= get_bsdthread_info(th
);
870 ((ut
->uu_flag
& (UT_VFORK
| UT_VFORKING
)) == (UT_VFORK
| UT_VFORKING
))) {
872 * Handle the narrow window where we're in the vfork syscall,
873 * but we're not quite ready to claim (in particular, to DTrace)
874 * that we're running as the child.
876 return get_bsdtask_info(get_threadtask(th
));
878 return current_proc();
882 dtrace_proc_selfpid(void)
884 return dtrace_current_proc_vforking()->p_pid
;
888 dtrace_proc_selfppid(void)
890 return dtrace_current_proc_vforking()->p_ppid
;
894 dtrace_proc_selfruid(void)
896 return dtrace_current_proc_vforking()->p_ruid
;
898 #endif /* CONFIG_DTRACE */
901 proc_parent(proc_t p
)
909 parent
= proc_ref_locked(pp
);
910 if ((parent
== PROC_NULL
) && (pp
!= PROC_NULL
) && (pp
->p_stat
!= SZOMB
) && ((pp
->p_listflag
& P_LIST_EXITED
) != 0) && ((pp
->p_listflag
& P_LIST_CHILDDRAINED
) == 0)) {
911 pp
->p_listflag
|= P_LIST_CHILDLKWAIT
;
912 msleep(&pp
->p_childrencnt
, proc_list_mlock
, 0, "proc_parent", 0);
920 proc_parent_is_currentproc(proc_t p
)
922 boolean_t ret
= FALSE
;
925 if (p
->p_pptr
== current_proc()) {
934 proc_name(int pid
, char * buf
, int size
)
938 if ((p
= proc_find(pid
)) != PROC_NULL
) {
939 strlcpy(buf
, &p
->p_comm
[0], size
);
945 proc_name_kdp(task_t t
, char * buf
, int size
)
947 proc_t p
= get_bsdtask_info(t
);
948 if (p
== PROC_NULL
) {
952 if ((size_t)size
> sizeof(p
->p_comm
)) {
953 strlcpy(buf
, &p
->p_name
[0], MIN((int)sizeof(p
->p_name
), size
));
955 strlcpy(buf
, &p
->p_comm
[0], MIN((int)sizeof(p
->p_comm
), size
));
960 proc_binary_uuid_kdp(task_t task
, uuid_t uuid
)
962 proc_t p
= get_bsdtask_info(task
);
963 if (p
== PROC_NULL
) {
967 proc_getexecutableuuid(p
, uuid
, sizeof(uuid_t
));
973 proc_threadname_kdp(void * uth
, char * buf
, size_t size
)
975 if (size
< MAXTHREADNAMESIZE
) {
976 /* this is really just a protective measure for the future in
977 * case the thread name size in stackshot gets out of sync with
978 * the BSD max thread name size. Note that bsd_getthreadname
979 * doesn't take input buffer size into account. */
984 bsd_getthreadname(uth
, buf
);
990 /* note that this function is generally going to be called from stackshot,
991 * and the arguments will be coming from a struct which is declared packed
992 * thus the input arguments will in general be unaligned. We have to handle
995 proc_starttime_kdp(void *p
, unaligned_u64
*tv_sec
, unaligned_u64
*tv_usec
, unaligned_u64
*abstime
)
997 proc_t pp
= (proc_t
)p
;
998 if (pp
!= PROC_NULL
) {
999 if (tv_sec
!= NULL
) {
1000 *tv_sec
= pp
->p_start
.tv_sec
;
1002 if (tv_usec
!= NULL
) {
1003 *tv_usec
= pp
->p_start
.tv_usec
;
1005 if (abstime
!= NULL
) {
1006 if (pp
->p_stats
!= NULL
) {
1007 *abstime
= pp
->p_stats
->ps_start
;
1016 proc_name_address(void *p
)
1018 return &((proc_t
)p
)->p_comm
[0];
1022 proc_best_name(proc_t p
)
1024 if (p
->p_name
[0] != 0) {
1025 return &p
->p_name
[0];
1027 return &p
->p_comm
[0];
1031 proc_selfname(char * buf
, int size
)
1035 if ((p
= current_proc()) != (proc_t
)0) {
1036 strlcpy(buf
, &p
->p_comm
[0], size
);
1041 proc_signal(int pid
, int signum
)
1045 if ((p
= proc_find(pid
)) != PROC_NULL
) {
1052 proc_issignal(int pid
, sigset_t mask
)
1057 if ((p
= proc_find(pid
)) != PROC_NULL
) {
1058 error
= proc_pendingsignals(p
, mask
);
1066 proc_noremotehang(proc_t p
)
1071 retval
= p
->p_flag
& P_NOREMOTEHANG
;
1073 return retval
? 1: 0;
1077 proc_exiting(proc_t p
)
1082 retval
= p
->p_lflag
& P_LEXIT
;
1084 return retval
? 1: 0;
1088 proc_in_teardown(proc_t p
)
1093 retval
= p
->p_lflag
& P_LPEXIT
;
1095 return retval
? 1: 0;
1099 proc_forcequota(proc_t p
)
1104 retval
= p
->p_flag
& P_FORCEQUOTA
;
1106 return retval
? 1: 0;
1110 proc_suser(proc_t p
)
1112 kauth_cred_t my_cred
;
1115 my_cred
= kauth_cred_proc_ref(p
);
1116 error
= suser(my_cred
, &p
->p_acflag
);
1117 kauth_cred_unref(&my_cred
);
1122 proc_task(proc_t proc
)
1124 return (task_t
)proc
->task
;
1128 * Obtain the first thread in a process
1130 * XXX This is a bad thing to do; it exists predominantly to support the
1131 * XXX use of proc_t's in places that should really be using
1132 * XXX thread_t's instead. This maintains historical behaviour, but really
1133 * XXX needs an audit of the context (proxy vs. not) to clean up.
1136 proc_thread(proc_t proc
)
1138 uthread_t uth
= TAILQ_FIRST(&proc
->p_uthlist
);
1141 return uth
->uu_context
.vc_thread
;
1148 proc_ucred(proc_t p
)
1156 thread_t th
= current_thread();
1158 return (struct uthread
*)get_bsdthread_info(th
);
1163 proc_is64bit(proc_t p
)
1165 return IS_64BIT_PROCESS(p
);
1169 proc_is64bit_data(proc_t p
)
1172 return (int)task_get_64bit_data(p
->task
);
1176 proc_pidversion(proc_t p
)
1178 return p
->p_idversion
;
1182 proc_persona_id(proc_t p
)
1184 return (uint32_t)persona_id_from_proc(p
);
1188 proc_getuid(proc_t p
)
1194 proc_getgid(proc_t p
)
1200 proc_uniqueid(proc_t p
)
1202 return p
->p_uniqueid
;
1206 proc_puniqueid(proc_t p
)
1208 return p
->p_puniqueid
;
1212 proc_coalitionids(__unused proc_t p
, __unused
uint64_t ids
[COALITION_NUM_TYPES
])
1214 #if CONFIG_COALITIONS
1215 task_coalition_ids(p
->task
, ids
);
1217 memset(ids
, 0, sizeof(uint64_t[COALITION_NUM_TYPES
]));
1223 proc_was_throttled(proc_t p
)
1225 return p
->was_throttled
;
1229 proc_did_throttle(proc_t p
)
1231 return p
->did_throttle
;
1235 proc_getcdhash(proc_t p
, unsigned char *cdhash
)
1237 return vn_getcdhash(p
->p_textvp
, p
->p_textoff
, cdhash
);
1241 proc_exitstatus(proc_t p
)
1243 return p
->p_xstat
& 0xffff;
1247 proc_getexecutableuuid(proc_t p
, unsigned char *uuidbuf
, unsigned long size
)
1249 if (size
>= sizeof(p
->p_uuid
)) {
1250 memcpy(uuidbuf
, p
->p_uuid
, sizeof(p
->p_uuid
));
1254 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1256 proc_getexecutablevnode(proc_t p
)
1258 vnode_t tvp
= p
->p_textvp
;
1260 if (tvp
!= NULLVP
) {
1261 if (vnode_getwithref(tvp
) == 0) {
1270 proc_selfexecutableargs(uint8_t *buf
, size_t *buflen
)
1272 proc_t p
= current_proc();
1274 // buflen must always be provided
1275 if (buflen
== NULL
) {
1279 // If a buf is provided, there must be at least enough room to fit argc
1280 if (buf
&& *buflen
< sizeof(p
->p_argc
)) {
1284 if (!p
->user_stack
) {
1289 *buflen
= p
->p_argslen
+ sizeof(p
->p_argc
);
1293 // Copy in argc to the first 4 bytes
1294 memcpy(buf
, &p
->p_argc
, sizeof(p
->p_argc
));
1296 if (*buflen
> sizeof(p
->p_argc
) && p
->p_argslen
> 0) {
1297 // See memory layout comment in kern_exec.c:exec_copyout_strings()
1298 // We want to copy starting from `p_argslen` bytes away from top of stack
1299 return copyin(p
->user_stack
- p
->p_argslen
,
1300 buf
+ sizeof(p
->p_argc
),
1301 MIN(p
->p_argslen
, *buflen
- sizeof(p
->p_argc
)));
1308 proc_getexecutableoffset(proc_t p
)
1310 return p
->p_textoff
;
1314 bsd_set_dependency_capable(task_t task
)
1316 proc_t p
= get_bsdtask_info(task
);
1319 OSBitOrAtomic(P_DEPENDENCY_CAPABLE
, &p
->p_flag
);
1326 IS_64BIT_PROCESS(proc_t p
)
1328 if (p
&& (p
->p_flag
& P_LP64
)) {
1337 * Locate a process by number
1340 pfind_locked(pid_t pid
)
1351 for (p
= PIDHASH(pid
)->lh_first
; p
!= 0; p
= p
->p_hash
.le_next
) {
1352 if (p
->p_pid
== pid
) {
1354 for (q
= p
->p_hash
.le_next
; q
!= 0; q
= q
->p_hash
.le_next
) {
1355 if ((p
!= q
) && (q
->p_pid
== pid
)) {
1356 panic("two procs with same pid %p:%p:%d:%d\n", p
, q
, p
->p_pid
, q
->p_pid
);
1367 * Locate a zombie by PID
1369 __private_extern__ proc_t
1377 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
) {
1378 if (p
->p_pid
== pid
) {
1389 * Locate a process group by number
1398 pgrp
= pgfind_internal(pgid
);
1399 if ((pgrp
== NULL
) || ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) != 0)) {
1402 pgrp
->pg_refcount
++;
1411 pgfind_internal(pid_t pgid
)
1415 for (pgrp
= PGRPHASH(pgid
)->lh_first
; pgrp
!= 0; pgrp
= pgrp
->pg_hash
.le_next
) {
1416 if (pgrp
->pg_id
== pgid
) {
1424 pg_rele(struct pgrp
* pgrp
)
1426 if (pgrp
== PGRP_NULL
) {
1429 pg_rele_dropref(pgrp
);
1433 pg_rele_dropref(struct pgrp
* pgrp
)
1436 if ((pgrp
->pg_refcount
== 1) && ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) == PGRP_FLAG_TERMINATE
)) {
1438 pgdelete_dropref(pgrp
);
1442 pgrp
->pg_refcount
--;
1447 session_find_internal(pid_t sessid
)
1449 struct session
*sess
;
1451 for (sess
= SESSHASH(sessid
)->lh_first
; sess
!= 0; sess
= sess
->s_hash
.le_next
) {
1452 if (sess
->s_sid
== sessid
) {
1461 * Make a new process ready to become a useful member of society by making it
1462 * visible in all the right places and initialize its own lists to empty.
1464 * Parameters: parent The parent of the process to insert
1465 * child The child process to insert
1469 * Notes: Insert a child process into the parents process group, assign
1470 * the child the parent process pointer and PPID of the parent,
1471 * place it on the parents p_children list as a sibling,
1472 * initialize its own child list, place it in the allproc list,
1473 * insert it in the proper hash bucket, and initialize its
1477 pinsertchild(proc_t parent
, proc_t child
)
1481 LIST_INIT(&child
->p_children
);
1482 TAILQ_INIT(&child
->p_evlist
);
1483 child
->p_pptr
= parent
;
1484 child
->p_ppid
= parent
->p_pid
;
1485 child
->p_original_ppid
= parent
->p_pid
;
1486 child
->p_puniqueid
= parent
->p_uniqueid
;
1487 child
->p_xhighbits
= 0;
1489 pg
= proc_pgrp(parent
);
1490 pgrp_add(pg
, parent
, child
);
1495 #if CONFIG_MEMORYSTATUS
1496 memorystatus_add(child
, TRUE
);
1499 parent
->p_childrencnt
++;
1500 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
1502 LIST_INSERT_HEAD(&allproc
, child
, p_list
);
1503 /* mark the completion of proc creation */
1504 child
->p_listflag
&= ~P_LIST_INCREATE
;
1510 * Move p to a new or existing process group (and session)
1512 * Returns: 0 Success
1513 * ESRCH No such process
1516 enterpgrp(proc_t p
, pid_t pgid
, int mksess
)
1519 struct pgrp
*mypgrp
;
1520 struct session
* procsp
;
1522 pgrp
= pgfind(pgid
);
1523 mypgrp
= proc_pgrp(p
);
1524 procsp
= proc_session(p
);
1527 if (pgrp
!= NULL
&& mksess
) { /* firewalls */
1528 panic("enterpgrp: setsid into non-empty pgrp");
1530 if (SESS_LEADER(p
, procsp
)) {
1531 panic("enterpgrp: session leader attempted setpgrp");
1534 if (pgrp
== PGRP_NULL
) {
1535 pid_t savepid
= p
->p_pid
;
1536 proc_t np
= PROC_NULL
;
1541 if (p
->p_pid
!= pgid
) {
1542 panic("enterpgrp: new pgrp and pid != pgid");
1545 MALLOC_ZONE(pgrp
, struct pgrp
*, sizeof(struct pgrp
), M_PGRP
,
1548 panic("enterpgrp: M_PGRP zone depleted");
1550 if ((np
= proc_find(savepid
)) == NULL
|| np
!= p
) {
1551 if (np
!= PROC_NULL
) {
1554 if (mypgrp
!= PGRP_NULL
) {
1557 if (procsp
!= SESSION_NULL
) {
1558 session_rele(procsp
);
1560 FREE_ZONE(pgrp
, sizeof(struct pgrp
), M_PGRP
);
1565 struct session
*sess
;
1570 MALLOC_ZONE(sess
, struct session
*,
1571 sizeof(struct session
), M_SESSION
, M_WAITOK
);
1573 panic("enterpgrp: M_SESSION zone depleted");
1576 sess
->s_sid
= p
->p_pid
;
1578 sess
->s_ttyvp
= NULL
;
1579 sess
->s_ttyp
= TTY_NULL
;
1581 sess
->s_listflags
= 0;
1582 sess
->s_ttypgrpid
= NO_PID
;
1584 lck_mtx_init(&sess
->s_mlock
, proc_mlock_grp
, proc_lck_attr
);
1586 bcopy(procsp
->s_login
, sess
->s_login
,
1587 sizeof(sess
->s_login
));
1588 OSBitAndAtomic(~((uint32_t)P_CONTROLT
), &p
->p_flag
);
1590 LIST_INSERT_HEAD(SESSHASH(sess
->s_sid
), sess
, s_hash
);
1592 pgrp
->pg_session
= sess
;
1594 if (p
!= current_proc()) {
1595 panic("enterpgrp: mksession and p != curproc");
1600 pgrp
->pg_session
= procsp
;
1602 if ((pgrp
->pg_session
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
1603 panic("enterpgrp: providing ref to terminating session ");
1605 pgrp
->pg_session
->s_count
++;
1610 lck_mtx_init(&pgrp
->pg_mlock
, proc_mlock_grp
, proc_lck_attr
);
1612 LIST_INIT(&pgrp
->pg_members
);
1613 pgrp
->pg_membercnt
= 0;
1616 pgrp
->pg_refcount
= 1;
1617 pgrp
->pg_listflags
= 0;
1618 LIST_INSERT_HEAD(PGRPHASH(pgid
), pgrp
, pg_hash
);
1620 } else if (pgrp
== mypgrp
) {
1622 if (mypgrp
!= NULL
) {
1625 if (procsp
!= SESSION_NULL
) {
1626 session_rele(procsp
);
1631 if (procsp
!= SESSION_NULL
) {
1632 session_rele(procsp
);
1635 * Adjust eligibility of affected pgrps to participate in job control.
1636 * Increment eligibility counts before decrementing, otherwise we
1637 * could reach 0 spuriously during the first call.
1639 fixjobc(p
, pgrp
, 1);
1640 fixjobc(p
, mypgrp
, 0);
1642 if (mypgrp
!= PGRP_NULL
) {
1645 pgrp_replace(p
, pgrp
);
1652 * remove process from process group
1662 * delete a process group
1665 pgdelete_dropref(struct pgrp
*pgrp
)
1669 struct session
*sessp
;
1673 if (pgrp
->pg_membercnt
!= 0) {
1679 pgrp
->pg_refcount
--;
1680 if ((emptypgrp
== 0) || (pgrp
->pg_membercnt
!= 0)) {
1685 pgrp
->pg_listflags
|= PGRP_FLAG_TERMINATE
;
1687 if (pgrp
->pg_refcount
> 0) {
1692 pgrp
->pg_listflags
|= PGRP_FLAG_DEAD
;
1693 LIST_REMOVE(pgrp
, pg_hash
);
1697 ttyp
= SESSION_TP(pgrp
->pg_session
);
1698 if (ttyp
!= TTY_NULL
) {
1699 if (ttyp
->t_pgrp
== pgrp
) {
1701 /* Re-check after acquiring the lock */
1702 if (ttyp
->t_pgrp
== pgrp
) {
1703 ttyp
->t_pgrp
= NULL
;
1704 pgrp
->pg_session
->s_ttypgrpid
= NO_PID
;
1712 sessp
= pgrp
->pg_session
;
1713 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
1714 panic("pg_deleteref: manipulating refs of already terminating session");
1716 if (--sessp
->s_count
== 0) {
1717 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
1718 panic("pg_deleteref: terminating already terminated session");
1720 sessp
->s_listflags
|= S_LIST_TERM
;
1721 ttyp
= SESSION_TP(sessp
);
1722 LIST_REMOVE(sessp
, s_hash
);
1724 if (ttyp
!= TTY_NULL
) {
1726 if (ttyp
->t_session
== sessp
) {
1727 ttyp
->t_session
= NULL
;
1732 sessp
->s_listflags
|= S_LIST_DEAD
;
1733 if (sessp
->s_count
!= 0) {
1734 panic("pg_deleteref: freeing session in use");
1737 lck_mtx_destroy(&sessp
->s_mlock
, proc_mlock_grp
);
1739 FREE_ZONE(sessp
, sizeof(struct session
), M_SESSION
);
1743 lck_mtx_destroy(&pgrp
->pg_mlock
, proc_mlock_grp
);
1744 FREE_ZONE(pgrp
, sizeof(*pgrp
), M_PGRP
);
1749 * Adjust pgrp jobc counters when specified process changes process group.
1750 * We count the number of processes in each process group that "qualify"
1751 * the group for terminal job control (those with a parent in a different
1752 * process group of the same session). If that count reaches zero, the
1753 * process group becomes orphaned. Check both the specified process'
1754 * process group and that of its children.
1755 * entering == 0 => p is leaving specified group.
1756 * entering == 1 => p is entering specified group.
1759 fixjob_callback(proc_t p
, void * arg
)
1761 struct fixjob_iterargs
*fp
;
1762 struct pgrp
* pg
, *hispg
;
1763 struct session
* mysession
, *hissess
;
1766 fp
= (struct fixjob_iterargs
*)arg
;
1768 mysession
= fp
->mysession
;
1769 entering
= fp
->entering
;
1771 hispg
= proc_pgrp(p
);
1772 hissess
= proc_session(p
);
1774 if ((hispg
!= pg
) &&
1775 (hissess
== mysession
)) {
1780 } else if (--hispg
->pg_jobc
== 0) {
1787 if (hissess
!= SESSION_NULL
) {
1788 session_rele(hissess
);
1790 if (hispg
!= PGRP_NULL
) {
1794 return PROC_RETURNED
;
1798 fixjobc(proc_t p
, struct pgrp
*pgrp
, int entering
)
1800 struct pgrp
*hispgrp
= PGRP_NULL
;
1801 struct session
*hissess
= SESSION_NULL
;
1802 struct session
*mysession
= pgrp
->pg_session
;
1804 struct fixjob_iterargs fjarg
;
1805 boolean_t proc_parent_self
;
1808 * Check if p's parent is current proc, if yes then no need to take
1809 * a ref; calling proc_parent with current proc as parent may
1810 * deadlock if current proc is exiting.
1812 proc_parent_self
= proc_parent_is_currentproc(p
);
1813 if (proc_parent_self
) {
1814 parent
= current_proc();
1816 parent
= proc_parent(p
);
1819 if (parent
!= PROC_NULL
) {
1820 hispgrp
= proc_pgrp(parent
);
1821 hissess
= proc_session(parent
);
1822 if (!proc_parent_self
) {
1829 * Check p's parent to see whether p qualifies its own process
1830 * group; if so, adjust count for p's process group.
1832 if ((hispgrp
!= pgrp
) &&
1833 (hissess
== mysession
)) {
1838 } else if (--pgrp
->pg_jobc
== 0) {
1846 if (hissess
!= SESSION_NULL
) {
1847 session_rele(hissess
);
1849 if (hispgrp
!= PGRP_NULL
) {
1854 * Check this process' children to see whether they qualify
1855 * their process groups; if so, adjust counts for children's
1859 fjarg
.mysession
= mysession
;
1860 fjarg
.entering
= entering
;
1861 proc_childrenwalk(p
, fixjob_callback
, &fjarg
);
1865 * The pidlist_* routines support the functions in this file that
1866 * walk lists of processes applying filters and callouts to the
1867 * elements of the list.
1869 * A prior implementation used a single linear array, which can be
1870 * tricky to allocate on large systems. This implementation creates
1871 * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
1873 * The array should be sized large enough to keep the overhead of
1874 * walking the list low, but small enough that blocking allocations of
1875 * pidlist_entry_t structures always succeed.
1878 #define PIDS_PER_ENTRY 1021
1880 typedef struct pidlist_entry
{
1881 SLIST_ENTRY(pidlist_entry
) pe_link
;
1883 pid_t pe_pid
[PIDS_PER_ENTRY
];
1887 SLIST_HEAD(, pidlist_entry
) pl_head
;
1888 struct pidlist_entry
*pl_active
;
1892 static __inline__ pidlist_t
*
1893 pidlist_init(pidlist_t
*pl
)
1895 SLIST_INIT(&pl
->pl_head
);
1896 pl
->pl_active
= NULL
;
1902 pidlist_alloc(pidlist_t
*pl
, u_int needed
)
1904 while (pl
->pl_nalloc
< needed
) {
1905 pidlist_entry_t
*pe
= kalloc(sizeof(*pe
));
1907 panic("no space for pidlist entry");
1910 SLIST_INSERT_HEAD(&pl
->pl_head
, pe
, pe_link
);
1911 pl
->pl_nalloc
+= (sizeof(pe
->pe_pid
) / sizeof(pe
->pe_pid
[0]));
1913 return pl
->pl_nalloc
;
1917 pidlist_free(pidlist_t
*pl
)
1919 pidlist_entry_t
*pe
;
1920 while (NULL
!= (pe
= SLIST_FIRST(&pl
->pl_head
))) {
1921 SLIST_FIRST(&pl
->pl_head
) = SLIST_NEXT(pe
, pe_link
);
1922 kfree(pe
, sizeof(*pe
));
1927 static __inline__
void
1928 pidlist_set_active(pidlist_t
*pl
)
1930 pl
->pl_active
= SLIST_FIRST(&pl
->pl_head
);
1931 assert(pl
->pl_active
);
1935 pidlist_add_pid(pidlist_t
*pl
, pid_t pid
)
1937 pidlist_entry_t
*pe
= pl
->pl_active
;
1938 if (pe
->pe_nused
>= sizeof(pe
->pe_pid
) / sizeof(pe
->pe_pid
[0])) {
1939 if (NULL
== (pe
= SLIST_NEXT(pe
, pe_link
))) {
1940 panic("pidlist allocation exhausted");
1944 pe
->pe_pid
[pe
->pe_nused
++] = pid
;
1947 static __inline__ u_int
1948 pidlist_nalloc(const pidlist_t
*pl
)
1950 return pl
->pl_nalloc
;
1954 * A process group has become orphaned; if there are any stopped processes in
1955 * the group, hang-up all process in that group.
1958 orphanpg(struct pgrp
*pgrp
)
1960 pidlist_t pid_list
, *pl
= pidlist_init(&pid_list
);
1961 u_int pid_count_available
= 0;
1964 /* allocate outside of the pgrp_lock */
1968 boolean_t should_iterate
= FALSE
;
1969 pid_count_available
= 0;
1971 PGMEMBERS_FOREACH(pgrp
, p
) {
1972 pid_count_available
++;
1973 if (p
->p_stat
== SSTOP
) {
1974 should_iterate
= TRUE
;
1977 if (pid_count_available
== 0 || !should_iterate
) {
1979 goto out
; /* no orphaned processes OR nothing stopped */
1981 if (pidlist_nalloc(pl
) >= pid_count_available
) {
1986 pidlist_alloc(pl
, pid_count_available
);
1988 pidlist_set_active(pl
);
1990 u_int pid_count
= 0;
1991 PGMEMBERS_FOREACH(pgrp
, p
) {
1992 pidlist_add_pid(pl
, proc_pid(p
));
1993 if (++pid_count
>= pid_count_available
) {
1999 const pidlist_entry_t
*pe
;
2000 SLIST_FOREACH(pe
, &(pl
->pl_head
), pe_link
) {
2001 for (u_int i
= 0; i
< pe
->pe_nused
; i
++) {
2002 const pid_t pid
= pe
->pe_pid
[i
];
2004 continue; /* skip kernproc */
2010 proc_transwait(p
, 0);
2013 psignal(p
, SIGCONT
);
2022 proc_is_classic(proc_t p __unused
)
2027 /* XXX Why does this function exist? Need to kill it off... */
2029 current_proc_EXTERNAL(void)
2031 return current_proc();
2035 proc_is_forcing_hfs_case_sensitivity(proc_t p
)
2037 return (p
->p_vfs_iopolicy
& P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY
) ? 1 : 0;
2042 * proc_core_name(name, uid, pid)
2043 * Expand the name described in corefilename, using name, uid, and pid.
2044 * corefilename is a printf-like string, with three format specifiers:
2045 * %N name of process ("name")
2046 * %P process id (pid)
2048 * For example, "%N.core" is the default; they can be disabled completely
2049 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2050 * This is controlled by the sysctl variable kern.corefile (see above).
2052 __private_extern__
int
2053 proc_core_name(const char *name
, uid_t uid
, pid_t pid
, char *cf_name
,
2056 const char *format
, *appendstr
;
2057 char id_buf
[11]; /* Buffer for pid/uid -- max 4B */
2060 if (cf_name
== NULL
) {
2064 format
= corefilename
;
2065 for (i
= 0, n
= 0; n
< cf_name_len
&& format
[i
]; i
++) {
2066 switch (format
[i
]) {
2067 case '%': /* Format character */
2069 switch (format
[i
]) {
2073 case 'N': /* process name */
2076 case 'P': /* process id */
2077 snprintf(id_buf
, sizeof(id_buf
), "%u", pid
);
2080 case 'U': /* user id */
2081 snprintf(id_buf
, sizeof(id_buf
), "%u", uid
);
2084 case '\0': /* format string ended in % symbol */
2089 "Unknown format character %c in `%s'\n",
2092 l
= strlen(appendstr
);
2093 if ((n
+ l
) >= cf_name_len
) {
2096 bcopy(appendstr
, cf_name
+ n
, l
);
2100 cf_name
[n
++] = format
[i
];
2103 if (format
[i
] != '\0') {
2108 log(LOG_ERR
, "pid %ld (%s), uid (%u): corename is too long\n",
2109 (long)pid
, name
, (uint32_t)uid
);
2112 log(LOG_ERR
, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
2113 (long)pid
, name
, (uint32_t)uid
);
2116 #endif /* CONFIG_COREDUMP */
2118 /* Code Signing related routines */
2121 csops(__unused proc_t p
, struct csops_args
*uap
, __unused
int32_t *retval
)
2123 return csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
2124 uap
->usersize
, USER_ADDR_NULL
);
2128 csops_audittoken(__unused proc_t p
, struct csops_audittoken_args
*uap
, __unused
int32_t *retval
)
2130 if (uap
->uaudittoken
== USER_ADDR_NULL
) {
2133 return csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
2134 uap
->usersize
, uap
->uaudittoken
);
2138 csops_copy_token(void *start
, size_t length
, user_size_t usize
, user_addr_t uaddr
)
2140 char fakeheader
[8] = { 0 };
2143 if (usize
< sizeof(fakeheader
)) {
2147 /* if no blob, fill in zero header */
2148 if (NULL
== start
) {
2150 length
= sizeof(fakeheader
);
2151 } else if (usize
< length
) {
2152 /* ... if input too short, copy out length of entitlement */
2153 uint32_t length32
= htonl((uint32_t)length
);
2154 memcpy(&fakeheader
[4], &length32
, sizeof(length32
));
2156 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
2158 return ERANGE
; /* input buffer to short, ERANGE signals that */
2162 return copyout(start
, uaddr
, length
);
2166 csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaudittoken
)
2168 size_t usize
= (size_t)CAST_DOWN(size_t, usersize
);
2174 unsigned char cdhash
[SHA1_RESULTLEN
];
2175 audit_token_t token
;
2176 unsigned int upid
= 0, uidversion
= 0;
2178 forself
= error
= 0;
2181 pid
= proc_selfpid();
2183 if (pid
== proc_selfpid()) {
2191 case CS_OPS_PIDOFFSET
:
2192 case CS_OPS_ENTITLEMENTS_BLOB
:
2193 case CS_OPS_IDENTITY
:
2196 break; /* not restricted to root */
2198 if (forself
== 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE
) {
2204 pt
= proc_find(pid
);
2205 if (pt
== PROC_NULL
) {
2210 uidversion
= pt
->p_idversion
;
2211 if (uaudittoken
!= USER_ADDR_NULL
) {
2212 error
= copyin(uaudittoken
, &token
, sizeof(audit_token_t
));
2216 /* verify the audit token pid/idversion matches with proc */
2217 if ((token
.val
[5] != upid
) || (token
.val
[7] != uidversion
)) {
2225 case CS_OPS_MARKINVALID
:
2226 case CS_OPS_MARKHARD
:
2227 case CS_OPS_MARKKILL
:
2228 case CS_OPS_MARKRESTRICT
:
2229 case CS_OPS_SET_STATUS
:
2230 case CS_OPS_CLEARINSTALLER
:
2231 case CS_OPS_CLEARPLATFORM
:
2232 if ((error
= mac_proc_check_set_cs_info(current_proc(), pt
, ops
))) {
2237 if ((error
= mac_proc_check_get_cs_info(current_proc(), pt
, ops
))) {
2244 case CS_OPS_STATUS
: {
2248 retflags
= pt
->p_csflags
;
2249 if (cs_process_enforcement(pt
)) {
2250 retflags
|= CS_ENFORCEMENT
;
2252 if (csproc_get_platform_binary(pt
)) {
2253 retflags
|= CS_PLATFORM_BINARY
;
2255 if (csproc_get_platform_path(pt
)) {
2256 retflags
|= CS_PLATFORM_PATH
;
2258 //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
2259 if ((pt
->p_csflags
& CS_FORCED_LV
) == CS_FORCED_LV
) {
2260 retflags
&= (~CS_REQUIRE_LV
);
2264 if (uaddr
!= USER_ADDR_NULL
) {
2265 error
= copyout(&retflags
, uaddr
, sizeof(uint32_t));
2269 case CS_OPS_MARKINVALID
:
2271 if ((pt
->p_csflags
& CS_VALID
) == CS_VALID
) { /* is currently valid */
2272 pt
->p_csflags
&= ~CS_VALID
; /* set invalid */
2273 if ((pt
->p_csflags
& CS_KILL
) == CS_KILL
) {
2274 pt
->p_csflags
|= CS_KILLED
;
2277 printf("CODE SIGNING: marked invalid by pid %d: "
2278 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2279 proc_selfpid(), pt
->p_pid
, pt
->p_comm
, pt
->p_csflags
);
2281 psignal(pt
, SIGKILL
);
2291 case CS_OPS_MARKHARD
:
2293 pt
->p_csflags
|= CS_HARD
;
2294 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2295 /* @@@ allow? reject? kill? @@@ */
2304 case CS_OPS_MARKKILL
:
2306 pt
->p_csflags
|= CS_KILL
;
2307 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2309 psignal(pt
, SIGKILL
);
2315 case CS_OPS_PIDOFFSET
:
2316 toff
= pt
->p_textoff
;
2318 error
= copyout(&toff
, uaddr
, sizeof(toff
));
2323 /* pt already holds a reference on its p_textvp */
2325 toff
= pt
->p_textoff
;
2327 if (tvp
== NULLVP
|| usize
!= SHA1_RESULTLEN
) {
2332 error
= vn_getcdhash(tvp
, toff
, cdhash
);
2336 error
= copyout(cdhash
, uaddr
, sizeof(cdhash
));
2341 case CS_OPS_ENTITLEMENTS_BLOB
: {
2347 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2353 error
= cs_entitlements_blob_get(pt
, &start
, &length
);
2359 error
= csops_copy_token(start
, length
, usize
, uaddr
);
2362 case CS_OPS_MARKRESTRICT
:
2364 pt
->p_csflags
|= CS_RESTRICT
;
2368 case CS_OPS_SET_STATUS
: {
2371 if (usize
< sizeof(flags
)) {
2376 error
= copyin(uaddr
, &flags
, sizeof(flags
));
2381 /* only allow setting a subset of all code sign flags */
2383 CS_HARD
| CS_EXEC_SET_HARD
|
2384 CS_KILL
| CS_EXEC_SET_KILL
|
2387 CS_ENFORCEMENT
| CS_EXEC_SET_ENFORCEMENT
;
2390 if (pt
->p_csflags
& CS_VALID
) {
2391 pt
->p_csflags
|= flags
;
2404 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2410 error
= cs_blob_get(pt
, &start
, &length
);
2416 error
= csops_copy_token(start
, length
, usize
, uaddr
);
2419 case CS_OPS_IDENTITY
:
2420 case CS_OPS_TEAMID
: {
2421 const char *identity
;
2422 uint8_t fakeheader
[8];
2427 * Make identity have a blob header to make it
2428 * easier on userland to guess the identity
2431 if (usize
< sizeof(fakeheader
)) {
2435 memset(fakeheader
, 0, sizeof(fakeheader
));
2438 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2444 identity
= ops
== CS_OPS_TEAMID
? csproc_get_teamid(pt
) : cs_identity_get(pt
);
2446 if (identity
== NULL
) {
2451 length
= strlen(identity
) + 1; /* include NUL */
2452 idlen
= htonl(length
+ sizeof(fakeheader
));
2453 memcpy(&fakeheader
[4], &idlen
, sizeof(idlen
));
2455 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
2460 if (usize
< sizeof(fakeheader
) + length
) {
2462 } else if (usize
> sizeof(fakeheader
)) {
2463 error
= copyout(identity
, uaddr
+ sizeof(fakeheader
), length
);
2469 case CS_OPS_CLEARINSTALLER
:
2471 pt
->p_csflags
&= ~(CS_INSTALLER
| CS_DATAVAULT_CONTROLLER
| CS_EXEC_INHERIT_SIP
);
2475 case CS_OPS_CLEARPLATFORM
:
2476 #if DEVELOPMENT || DEBUG
2477 if (cs_process_global_enforcement()) {
2483 if (csr_check(CSR_ALLOW_APPLE_INTERNAL
) != 0) {
2490 pt
->p_csflags
&= ~(CS_PLATFORM_BINARY
| CS_PLATFORM_PATH
);
2491 csproc_clear_platform_binary(pt
);
2497 #endif /* !DEVELOPMENT || DEBUG */
2511 proc_iterate_fn_t callout
,
2513 proc_iterate_fn_t filterfn
,
2516 pidlist_t pid_list
, *pl
= pidlist_init(&pid_list
);
2517 u_int pid_count_available
= 0;
2519 assert(callout
!= NULL
);
2521 /* allocate outside of the proc_list_lock */
2524 pid_count_available
= nprocs
+ 1; /* kernel_task not counted in nprocs */
2525 assert(pid_count_available
> 0);
2526 if (pidlist_nalloc(pl
) > pid_count_available
) {
2531 pidlist_alloc(pl
, pid_count_available
);
2533 pidlist_set_active(pl
);
2535 /* filter pids into the pid_list */
2537 u_int pid_count
= 0;
2538 if (flags
& PROC_ALLPROCLIST
) {
2540 ALLPROC_FOREACH(p
) {
2541 /* ignore processes that are being forked */
2542 if (p
->p_stat
== SIDL
) {
2545 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
2548 pidlist_add_pid(pl
, proc_pid(p
));
2549 if (++pid_count
>= pid_count_available
) {
2555 if ((pid_count
< pid_count_available
) &&
2556 (flags
& PROC_ZOMBPROCLIST
)) {
2558 ZOMBPROC_FOREACH(p
) {
2559 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
2562 pidlist_add_pid(pl
, proc_pid(p
));
2563 if (++pid_count
>= pid_count_available
) {
2571 /* call callout on processes in the pid_list */
2573 const pidlist_entry_t
*pe
;
2574 SLIST_FOREACH(pe
, &(pl
->pl_head
), pe_link
) {
2575 for (u_int i
= 0; i
< pe
->pe_nused
; i
++) {
2576 const pid_t pid
= pe
->pe_pid
[i
];
2577 proc_t p
= proc_find(pid
);
2579 if ((flags
& PROC_NOWAITTRANS
) == 0) {
2580 proc_transwait(p
, 0);
2582 const int callout_ret
= callout(p
, arg
);
2584 switch (callout_ret
) {
2585 case PROC_RETURNED_DONE
:
2588 case PROC_CLAIMED_DONE
:
2597 panic("%s: callout =%d for pid %d",
2598 __func__
, callout_ret
, pid
);
2601 } else if (flags
& PROC_ZOMBPROCLIST
) {
2602 p
= proc_find_zombref(pid
);
2606 const int callout_ret
= callout(p
, arg
);
2608 switch (callout_ret
) {
2609 case PROC_RETURNED_DONE
:
2610 proc_drop_zombref(p
);
2612 case PROC_CLAIMED_DONE
:
2616 proc_drop_zombref(p
);
2621 panic("%s: callout =%d for zombie %d",
2622 __func__
, callout_ret
, pid
);
2634 proc_iterate_fn_t callout
,
2636 proc_iterate_fn_t filterfn
,
2641 assert(callout
!= NULL
);
2643 proc_shutdown_exitcount
= 0;
2649 ALLPROC_FOREACH(p
) {
2650 if ((filterfn
!= NULL
) && filterfn(p
, filterarg
) == 0) {
2653 p
= proc_ref_locked(p
);
2660 proc_transwait(p
, 0);
2661 (void)callout(p
, arg
);
2664 goto restart_foreach
;
2673 proc_iterate_fn_t callout
,
2676 pidlist_t pid_list
, *pl
= pidlist_init(&pid_list
);
2677 u_int pid_count_available
= 0;
2679 assert(parent
!= NULL
);
2680 assert(callout
!= NULL
);
2684 pid_count_available
= parent
->p_childrencnt
;
2685 if (pid_count_available
== 0) {
2689 if (pidlist_nalloc(pl
) > pid_count_available
) {
2694 pidlist_alloc(pl
, pid_count_available
);
2696 pidlist_set_active(pl
);
2698 u_int pid_count
= 0;
2700 PCHILDREN_FOREACH(parent
, p
) {
2701 if (p
->p_stat
== SIDL
) {
2704 pidlist_add_pid(pl
, proc_pid(p
));
2705 if (++pid_count
>= pid_count_available
) {
2712 const pidlist_entry_t
*pe
;
2713 SLIST_FOREACH(pe
, &(pl
->pl_head
), pe_link
) {
2714 for (u_int i
= 0; i
< pe
->pe_nused
; i
++) {
2715 const pid_t pid
= pe
->pe_pid
[i
];
2720 const int callout_ret
= callout(p
, arg
);
2722 switch (callout_ret
) {
2723 case PROC_RETURNED_DONE
:
2726 case PROC_CLAIMED_DONE
:
2735 panic("%s: callout =%d for pid %d",
2736 __func__
, callout_ret
, pid
);
2749 proc_iterate_fn_t callout
,
2751 proc_iterate_fn_t filterfn
,
2754 pidlist_t pid_list
, *pl
= pidlist_init(&pid_list
);
2755 u_int pid_count_available
= 0;
2757 assert(pgrp
!= NULL
);
2758 assert(callout
!= NULL
);
2762 pid_count_available
= pgrp
->pg_membercnt
;
2763 if (pid_count_available
== 0) {
2765 if (flags
& PGRP_DROPREF
) {
2770 if (pidlist_nalloc(pl
) > pid_count_available
) {
2775 pidlist_alloc(pl
, pid_count_available
);
2777 pidlist_set_active(pl
);
2779 const pid_t pgid
= pgrp
->pg_id
;
2780 u_int pid_count
= 0;
2782 PGMEMBERS_FOREACH(pgrp
, p
) {
2783 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
2786 pidlist_add_pid(pl
, proc_pid(p
));
2787 if (++pid_count
>= pid_count_available
) {
2794 if (flags
& PGRP_DROPREF
) {
2798 const pidlist_entry_t
*pe
;
2799 SLIST_FOREACH(pe
, &(pl
->pl_head
), pe_link
) {
2800 for (u_int i
= 0; i
< pe
->pe_nused
; i
++) {
2801 const pid_t pid
= pe
->pe_pid
[i
];
2803 continue; /* skip kernproc */
2809 if (p
->p_pgrpid
!= pgid
) {
2813 const int callout_ret
= callout(p
, arg
);
2815 switch (callout_ret
) {
2821 case PROC_RETURNED_DONE
:
2824 case PROC_CLAIMED_DONE
:
2828 panic("%s: callout =%d for pid %d",
2829 __func__
, callout_ret
, pid
);
2839 pgrp_add(struct pgrp
* pgrp
, struct proc
* parent
, struct proc
* child
)
2842 child
->p_pgrp
= pgrp
;
2843 child
->p_pgrpid
= pgrp
->pg_id
;
2844 child
->p_listflag
|= P_LIST_INPGRP
;
2846 * When pgrp is being freed , a process can still
2847 * request addition using setpgid from bash when
2848 * login is terminated (login cycler) return ESRCH
2849 * Safe to hold lock due to refcount on pgrp
2851 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
2852 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2855 if ((pgrp
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
) {
2856 panic("pgrp_add : pgrp is dead adding process");
2861 pgrp
->pg_membercnt
++;
2862 if (parent
!= PROC_NULL
) {
2863 LIST_INSERT_AFTER(parent
, child
, p_pglist
);
2865 LIST_INSERT_HEAD(&pgrp
->pg_members
, child
, p_pglist
);
2870 if (((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (pgrp
->pg_membercnt
!= 0)) {
2871 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2877 pgrp_remove(struct proc
* p
)
2884 #if __PROC_INTERNAL_DEBUG
2885 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0) {
2886 panic("removing from pglist but no named ref\n");
2889 p
->p_pgrpid
= PGRPID_DEAD
;
2890 p
->p_listflag
&= ~P_LIST_INPGRP
;
2894 if (pg
== PGRP_NULL
) {
2895 panic("pgrp_remove: pg is NULL");
2900 if (pg
->pg_membercnt
< 0) {
2901 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", pg
, p
);
2904 LIST_REMOVE(p
, p_pglist
);
2905 if (pg
->pg_members
.lh_first
== 0) {
2907 pgdelete_dropref(pg
);
2915 /* cannot use proc_pgrp as it maybe stalled */
2917 pgrp_replace(struct proc
* p
, struct pgrp
* newpg
)
2919 struct pgrp
* oldpg
;
2925 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2926 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2927 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2930 p
->p_listflag
|= P_LIST_PGRPTRANS
;
2933 if (oldpg
== PGRP_NULL
) {
2934 panic("pgrp_replace: oldpg NULL");
2936 oldpg
->pg_refcount
++;
2937 #if __PROC_INTERNAL_DEBUG
2938 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0) {
2939 panic("removing from pglist but no named ref\n");
2942 p
->p_pgrpid
= PGRPID_DEAD
;
2943 p
->p_listflag
&= ~P_LIST_INPGRP
;
2949 oldpg
->pg_membercnt
--;
2950 if (oldpg
->pg_membercnt
< 0) {
2951 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", oldpg
, p
);
2953 LIST_REMOVE(p
, p_pglist
);
2954 if (oldpg
->pg_members
.lh_first
== 0) {
2956 pgdelete_dropref(oldpg
);
2964 p
->p_pgrpid
= newpg
->pg_id
;
2965 p
->p_listflag
|= P_LIST_INPGRP
;
2967 * When pgrp is being freed , a process can still
2968 * request addition using setpgid from bash when
2969 * login is terminated (login cycler) return ESRCH
2970 * Safe to hold lock due to refcount on pgrp
2972 if ((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
2973 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2976 if ((newpg
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
) {
2977 panic("pgrp_add : pgrp is dead adding process");
2982 newpg
->pg_membercnt
++;
2983 LIST_INSERT_HEAD(&newpg
->pg_members
, p
, p_pglist
);
2987 if (((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (newpg
->pg_membercnt
!= 0)) {
2988 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2991 p
->p_listflag
&= ~P_LIST_PGRPTRANS
;
2992 if ((p
->p_listflag
& P_LIST_PGRPTRWAIT
) == P_LIST_PGRPTRWAIT
) {
2993 p
->p_listflag
&= ~P_LIST_PGRPTRWAIT
;
2994 wakeup(&p
->p_pgrpid
);
3000 pgrp_lock(struct pgrp
* pgrp
)
3002 lck_mtx_lock(&pgrp
->pg_mlock
);
3006 pgrp_unlock(struct pgrp
* pgrp
)
3008 lck_mtx_unlock(&pgrp
->pg_mlock
);
3012 session_lock(struct session
* sess
)
3014 lck_mtx_lock(&sess
->s_mlock
);
3019 session_unlock(struct session
* sess
)
3021 lck_mtx_unlock(&sess
->s_mlock
);
3029 if (p
== PROC_NULL
) {
3034 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
3035 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
3036 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
3041 assert(pgrp
!= NULL
);
3043 if (pgrp
!= PGRP_NULL
) {
3044 pgrp
->pg_refcount
++;
3045 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) != 0) {
3046 panic("proc_pgrp: ref being povided for dead pgrp");
3056 tty_pgrp(struct tty
* tp
)
3058 struct pgrp
* pg
= PGRP_NULL
;
3063 if (pg
!= PGRP_NULL
) {
3064 if ((pg
->pg_listflags
& PGRP_FLAG_DEAD
) != 0) {
3065 panic("tty_pgrp: ref being povided for dead pgrp");
3075 proc_session(proc_t p
)
3077 struct session
* sess
= SESSION_NULL
;
3079 if (p
== PROC_NULL
) {
3080 return SESSION_NULL
;
3085 /* wait during transitions */
3086 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
3087 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
3088 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
3091 if ((p
->p_pgrp
!= PGRP_NULL
) && ((sess
= p
->p_pgrp
->pg_session
) != SESSION_NULL
)) {
3092 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
3093 panic("proc_session:returning sesssion ref on terminating session");
3102 session_rele(struct session
*sess
)
3105 if (--sess
->s_count
== 0) {
3106 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0) {
3107 panic("session_rele: terminating already terminated session");
3109 sess
->s_listflags
|= S_LIST_TERM
;
3110 LIST_REMOVE(sess
, s_hash
);
3111 sess
->s_listflags
|= S_LIST_DEAD
;
3112 if (sess
->s_count
!= 0) {
3113 panic("session_rele: freeing session in use");
3116 lck_mtx_destroy(&sess
->s_mlock
, proc_mlock_grp
);
3117 FREE_ZONE(sess
, sizeof(struct session
), M_SESSION
);
3124 proc_transstart(proc_t p
, int locked
, int non_blocking
)
3129 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
3130 if (((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
) || non_blocking
) {
3136 p
->p_lflag
|= P_LTRANSWAIT
;
3137 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
3139 p
->p_lflag
|= P_LINTRANSIT
;
3140 p
->p_transholder
= current_thread();
3148 proc_transcommit(proc_t p
, int locked
)
3154 assert((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
);
3155 assert(p
->p_transholder
== current_thread());
3156 p
->p_lflag
|= P_LTRANSCOMMIT
;
3158 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
3159 p
->p_lflag
&= ~P_LTRANSWAIT
;
3160 wakeup(&p
->p_lflag
);
3168 proc_transend(proc_t p
, int locked
)
3174 p
->p_lflag
&= ~(P_LINTRANSIT
| P_LTRANSCOMMIT
);
3175 p
->p_transholder
= NULL
;
3177 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
3178 p
->p_lflag
&= ~P_LTRANSWAIT
;
3179 wakeup(&p
->p_lflag
);
3187 proc_transwait(proc_t p
, int locked
)
3192 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
3193 if ((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
&& current_proc() == p
) {
3199 p
->p_lflag
|= P_LTRANSWAIT
;
3200 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
3209 proc_klist_lock(void)
3211 lck_mtx_lock(proc_klist_mlock
);
3215 proc_klist_unlock(void)
3217 lck_mtx_unlock(proc_klist_mlock
);
3221 proc_knote(struct proc
* p
, long hint
)
3224 KNOTE(&p
->p_klist
, hint
);
3225 proc_klist_unlock();
3229 proc_knote_drain(struct proc
*p
)
3231 struct knote
*kn
= NULL
;
3234 * Clear the proc's klist to avoid references after the proc is reaped.
3237 while ((kn
= SLIST_FIRST(&p
->p_klist
))) {
3238 kn
->kn_proc
= PROC_NULL
;
3239 KNOTE_DETACH(&p
->p_klist
, kn
);
3241 proc_klist_unlock();
3245 proc_setregister(proc_t p
)
3248 p
->p_lflag
|= P_LREGISTER
;
3253 proc_resetregister(proc_t p
)
3256 p
->p_lflag
&= ~P_LREGISTER
;
3261 proc_pgrpid(proc_t p
)
3267 proc_sessionid(proc_t p
)
3270 struct session
* sessp
= proc_session(p
);
3272 if (sessp
!= SESSION_NULL
) {
3274 session_rele(sessp
);
3283 return current_proc()->p_pgrpid
;
3287 /* return control and action states */
3289 proc_getpcontrol(int pid
, int * pcontrolp
)
3294 if (p
== PROC_NULL
) {
3297 if (pcontrolp
!= NULL
) {
3298 *pcontrolp
= p
->p_pcaction
;
3306 proc_dopcontrol(proc_t p
)
3309 os_reason_t kill_reason
;
3313 pcontrol
= PROC_CONTROL_STATE(p
);
3315 if (PROC_ACTION_STATE(p
) == 0) {
3318 PROC_SETACTION_STATE(p
);
3320 printf("low swap: throttling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3324 PROC_SETACTION_STATE(p
);
3326 printf("low swap: suspending pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3327 task_suspend(p
->task
);
3331 PROC_SETACTION_STATE(p
);
3333 printf("low swap: killing pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3334 kill_reason
= os_reason_create(OS_REASON_JETSAM
, JETSAM_REASON_LOWSWAP
);
3335 psignal_with_reason(p
, SIGKILL
, kill_reason
);
3345 return PROC_RETURNED
;
3350 * Resume a throttled or suspended process. This is an internal interface that's only
3351 * used by the user level code that presents the GUI when we run out of swap space and
3352 * hence is restricted to processes with superuser privileges.
3356 proc_resetpcontrol(int pid
)
3361 proc_t self
= current_proc();
3363 /* if the process has been validated to handle resource control or root is valid one */
3364 if (((self
->p_lflag
& P_LVMRSRCOWNER
) == 0) && (error
= suser(kauth_cred_get(), 0))) {
3369 if (p
== PROC_NULL
) {
3375 pcontrol
= PROC_CONTROL_STATE(p
);
3377 if (PROC_ACTION_STATE(p
) != 0) {
3380 PROC_RESETACTION_STATE(p
);
3382 printf("low swap: unthrottling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3386 PROC_RESETACTION_STATE(p
);
3388 printf("low swap: resuming pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3389 task_resume(p
->task
);
3394 PROC_SETACTION_STATE(p
);
3396 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p
->p_pid
, p
->p_comm
);
3412 struct no_paging_space
{
3413 uint64_t pcs_max_size
;
3414 uint64_t pcs_uniqueid
;
3417 uint64_t pcs_total_size
;
3419 uint64_t npcs_max_size
;
3420 uint64_t npcs_uniqueid
;
3422 int npcs_proc_count
;
3423 uint64_t npcs_total_size
;
3425 int apcs_proc_count
;
3426 uint64_t apcs_total_size
;
3431 proc_pcontrol_filter(proc_t p
, void *arg
)
3433 struct no_paging_space
*nps
;
3434 uint64_t compressed
;
3436 nps
= (struct no_paging_space
*)arg
;
3438 compressed
= get_task_compressed(p
->task
);
3440 if (PROC_CONTROL_STATE(p
)) {
3441 if (PROC_ACTION_STATE(p
) == 0) {
3442 if (compressed
> nps
->pcs_max_size
) {
3443 nps
->pcs_pid
= p
->p_pid
;
3444 nps
->pcs_uniqueid
= p
->p_uniqueid
;
3445 nps
->pcs_max_size
= compressed
;
3447 nps
->pcs_total_size
+= compressed
;
3448 nps
->pcs_proc_count
++;
3450 nps
->apcs_total_size
+= compressed
;
3451 nps
->apcs_proc_count
++;
3454 if (compressed
> nps
->npcs_max_size
) {
3455 nps
->npcs_pid
= p
->p_pid
;
3456 nps
->npcs_uniqueid
= p
->p_uniqueid
;
3457 nps
->npcs_max_size
= compressed
;
3459 nps
->npcs_total_size
+= compressed
;
3460 nps
->npcs_proc_count
++;
3467 proc_pcontrol_null(__unused proc_t p
, __unused
void *arg
)
3469 return PROC_RETURNED
;
3474 * Deal with the low on compressor pool space condition... this function
3475 * gets called when we are approaching the limits of the compressor pool or
3476 * we are unable to create a new swap file.
3477 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3478 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3479 * There are 2 categories of processes to deal with. Those that have an action
3480 * associated with them by the task itself and those that do not. Actionable
3481 * tasks can have one of three categories specified: ones that
3482 * can be killed immediately, ones that should be suspended, and ones that should
3483 * be throttled. Processes that do not have an action associated with them are normally
3484 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3485 * that only by killing them can we hope to put the system back into a usable state.
3488 #define NO_PAGING_SPACE_DEBUG 0
3490 extern uint64_t vm_compressor_pages_compressed(void);
3492 struct timeval last_no_space_action
= {.tv_sec
= 0, .tv_usec
= 0};
3494 #if DEVELOPMENT || DEBUG
3495 extern boolean_t kill_on_no_paging_space
;
3496 #endif /* DEVELOPMENT || DEBUG */
3498 #define MB_SIZE (1024 * 1024ULL)
3499 boolean_t
memorystatus_kill_on_VM_compressor_space_shortage(boolean_t
);
3501 extern int32_t max_kill_priority
;
3502 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index
);
3505 no_paging_space_action()
3508 struct no_paging_space nps
;
3510 os_reason_t kill_reason
;
3513 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3517 if (now
.tv_sec
<= last_no_space_action
.tv_sec
+ 5) {
3522 * Examine all processes and find the biggest (biggest is based on the number of pages this
3523 * task has in the compressor pool) that has been marked to have some action
3524 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3527 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3528 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3529 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3531 bzero(&nps
, sizeof(nps
));
3533 proc_iterate(PROC_ALLPROCLIST
, proc_pcontrol_null
, (void *)NULL
, proc_pcontrol_filter
, (void *)&nps
);
3535 #if NO_PAGING_SPACE_DEBUG
3536 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3537 nps
.npcs_proc_count
, nps
.npcs_total_size
, nps
.npcs_max_size
);
3538 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3539 nps
.pcs_proc_count
, nps
.pcs_total_size
, nps
.pcs_max_size
);
3540 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3541 nps
.apcs_proc_count
, nps
.apcs_total_size
);
3543 if (nps
.npcs_max_size
> (vm_compressor_pages_compressed() * 50) / 100) {
3545 * for now we'll knock out any task that has more then 50% of the pages
3546 * held by the compressor
3548 if ((p
= proc_find(nps
.npcs_pid
)) != PROC_NULL
) {
3549 if (nps
.npcs_uniqueid
== p
->p_uniqueid
) {
3551 * verify this is still the same process
3552 * in case the proc exited and the pid got reused while
3553 * we were finishing the proc_iterate and getting to this point
3555 last_no_space_action
= now
;
3557 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p
->p_pid
, p
->p_comm
, (nps
.pcs_max_size
/ MB_SIZE
));
3558 kill_reason
= os_reason_create(OS_REASON_JETSAM
, JETSAM_REASON_LOWSWAP
);
3559 psignal_with_reason(p
, SIGKILL
, kill_reason
);
3571 * We have some processes within our jetsam bands of consideration and hence can be killed.
3572 * So we will invoke the memorystatus thread to go ahead and kill something.
3574 if (memorystatus_get_proccnt_upto_priority(max_kill_priority
) > 0) {
3575 last_no_space_action
= now
;
3576 memorystatus_kill_on_VM_compressor_space_shortage(TRUE
/* async */);
3581 * No eligible processes to kill. So let's suspend/kill the largest
3582 * process depending on its policy control specifications.
3585 if (nps
.pcs_max_size
> 0) {
3586 if ((p
= proc_find(nps
.pcs_pid
)) != PROC_NULL
) {
3587 if (nps
.pcs_uniqueid
== p
->p_uniqueid
) {
3589 * verify this is still the same process
3590 * in case the proc exited and the pid got reused while
3591 * we were finishing the proc_iterate and getting to this point
3593 last_no_space_action
= now
;
3605 last_no_space_action
= now
;
3607 printf("low swap: unable to find any eligible processes to take action on\n");
3613 proc_trace_log(__unused proc_t p
, struct proc_trace_log_args
*uap
, __unused
int *retval
)
3616 proc_t target_proc
= PROC_NULL
;
3617 pid_t target_pid
= uap
->pid
;
3618 uint64_t target_uniqueid
= uap
->uniqueid
;
3619 task_t target_task
= NULL
;
3621 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT
, 0)) {
3625 target_proc
= proc_find(target_pid
);
3626 if (target_proc
!= PROC_NULL
) {
3627 if (target_uniqueid
!= proc_uniqueid(target_proc
)) {
3632 target_task
= proc_task(target_proc
);
3633 if (task_send_trace_memory(target_task
, target_pid
, target_uniqueid
)) {
3642 if (target_proc
!= PROC_NULL
) {
3643 proc_rele(target_proc
);
3648 #if VM_SCAN_FOR_SHADOW_CHAIN
3649 extern int vm_map_shadow_max(vm_map_t map
);
3650 int proc_shadow_max(void);
3652 proc_shadow_max(void)
3661 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
3662 if (p
->p_stat
== SIDL
) {
3669 map
= get_task_map(task
);
3673 retval
= vm_map_shadow_max(map
);
3681 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3683 void proc_set_responsible_pid(proc_t target_proc
, pid_t responsible_pid
);
3685 proc_set_responsible_pid(proc_t target_proc
, pid_t responsible_pid
)
3687 if (target_proc
!= NULL
) {
3688 target_proc
->p_responsible_pid
= responsible_pid
;
3694 proc_chrooted(proc_t p
)
3700 retval
= (p
->p_fd
->fd_rdir
!= NULL
) ? 1 : 0;
3708 proc_send_synchronous_EXC_RESOURCE(proc_t p
)
3710 if (p
== PROC_NULL
) {
3714 /* Send sync EXC_RESOURCE if the process is traced */
3715 if (ISSET(p
->p_lflag
, P_LTRACED
)) {
3722 proc_get_syscall_filter_mask_size(int which
)
3724 if (which
== SYSCALL_MASK_UNIX
) {
3732 proc_set_syscall_filter_mask(proc_t p
, int which
, unsigned char *maskptr
, size_t masklen
)
3734 #if DEVELOPMENT || DEBUG
3735 if (syscallfilter_disable
) {
3736 printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p
));
3737 return KERN_SUCCESS
;
3739 #endif // DEVELOPMENT || DEBUG
3741 if (which
!= SYSCALL_MASK_UNIX
||
3742 (maskptr
!= NULL
&& masklen
!= nsysent
)) {
3746 p
->syscall_filter_mask
= maskptr
;
3748 return KERN_SUCCESS
;
3751 #ifdef CONFIG_32BIT_TELEMETRY
3753 proc_log_32bit_telemetry(proc_t p
)
3756 char signature_buf
[MAX_32BIT_EXEC_SIG_SIZE
] = { 0 };
3757 char * signature_cur_end
= &signature_buf
[0];
3758 char * signature_buf_end
= &signature_buf
[MAX_32BIT_EXEC_SIG_SIZE
- 1];
3759 int bytes_printed
= 0;
3761 const char * teamid
= NULL
;
3762 const char * identity
= NULL
;
3763 struct cs_blob
* csblob
= NULL
;
3768 * Get proc name and parent proc name; if the parent execs, we'll get a
3771 bytes_printed
= snprintf(signature_cur_end
,
3772 signature_buf_end
- signature_cur_end
,
3773 "%s,%s,", p
->p_name
,
3774 (p
->p_pptr
? p
->p_pptr
->p_name
: ""));
3776 if (bytes_printed
> 0) {
3777 signature_cur_end
+= bytes_printed
;
3782 /* Get developer info. */
3783 vnode_t v
= proc_getexecutablevnode(p
);
3786 csblob
= csvnode_get_blob(v
, 0);
3789 teamid
= csblob_get_teamid(csblob
);
3790 identity
= csblob_get_identity(csblob
);
3794 if (teamid
== NULL
) {
3798 if (identity
== NULL
) {
3802 bytes_printed
= snprintf(signature_cur_end
,
3803 signature_buf_end
- signature_cur_end
,
3804 "%s,%s", teamid
, identity
);
3806 if (bytes_printed
> 0) {
3807 signature_cur_end
+= bytes_printed
;
3815 * We may want to rate limit here, although the SUMMARIZE key should
3816 * help us aggregate events in userspace.
3820 kern_asl_msg(LOG_DEBUG
, "messagetracer", 3,
3821 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
3822 /* 1 */ "com.apple.message.signature", signature_buf
,
3823 /* 2 */ "com.apple.message.summarize", "YES",
3826 #endif /* CONFIG_32BIT_TELEMETRY */