2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
85 #include <sys/file_internal.h>
87 #include <sys/malloc.h>
90 #include <sys/ioctl.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
115 #ifdef CONFIG_32BIT_TELEMETRY
116 #include <sys/kasl.h>
117 #endif /* CONFIG_32BIT_TELEMETRY */
123 #if CONFIG_MEMORYSTATUS
124 #include <sys/kern_memorystatus.h>
128 #include <security/mac_framework.h>
131 #include <libkern/crypto/sha1.h>
133 #ifdef CONFIG_32BIT_TELEMETRY
134 #define MAX_32BIT_EXEC_SIG_SIZE 160
135 #endif /* CONFIG_32BIT_TELEMETRY */
138 * Structure associated with user cacheing.
141 LIST_ENTRY(uidinfo
) ui_hash
;
145 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
146 LIST_HEAD(uihashhead
, uidinfo
) *uihashtbl
;
147 u_long uihash
; /* size of hash table - 1 */
150 * Other process lists
152 struct pidhashhead
*pidhashtbl
;
154 struct pgrphashhead
*pgrphashtbl
;
156 struct sesshashhead
*sesshashtbl
;
159 struct proclist allproc
;
160 struct proclist zombproc
;
161 extern struct tty cons
;
166 #define __PROC_INTERNAL_DEBUG 1
169 /* Name to give to core files */
170 #if defined(XNU_TARGET_OS_BRIDGE)
171 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+1] = {"/private/var/internal/%N.core"};
172 #elif CONFIG_EMBEDDED
173 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+1] = {"/private/var/cores/%N.core"};
175 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+1] = {"/cores/core.%P"};
180 #include <kern/backtrace.h>
183 typedef uint64_t unaligned_u64
__attribute__((aligned(1)));
185 static void orphanpg(struct pgrp
* pg
);
186 void proc_name_kdp(task_t t
, char * buf
, int size
);
187 int proc_threadname_kdp(void * uth
, char * buf
, size_t size
);
188 void proc_starttime_kdp(void * p
, unaligned_u64
*tv_sec
, unaligned_u64
*tv_usec
, unaligned_u64
*abstime
);
189 char * proc_name_address(void * p
);
191 static void pgrp_add(struct pgrp
* pgrp
, proc_t parent
, proc_t child
);
192 static void pgrp_remove(proc_t p
);
193 static void pgrp_replace(proc_t p
, struct pgrp
*pgrp
);
194 static void pgdelete_dropref(struct pgrp
*pgrp
);
195 extern void pg_rele_dropref(struct pgrp
* pgrp
);
196 static int csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaddittoken
);
197 static boolean_t
proc_parent_is_currentproc(proc_t p
);
199 struct fixjob_iterargs
{
201 struct session
* mysession
;
205 int fixjob_callback(proc_t
, void *);
208 get_current_unique_pid(void)
210 proc_t p
= current_proc();
213 return p
->p_uniqueid
;
219 * Initialize global process hashing structures.
225 LIST_INIT(&zombproc
);
226 pidhashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pidhash
);
227 pgrphashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pgrphash
);
228 sesshashtbl
= hashinit(maxproc
/ 4, M_PROC
, &sesshash
);
229 uihashtbl
= hashinit(maxproc
/ 16, M_PROC
, &uihash
);
231 personas_bootstrap();
236 * Change the count associated with number of processes
237 * a given user is using. This routine protects the uihash
241 chgproccnt(uid_t uid
, int diff
)
244 struct uidinfo
*newuip
= NULL
;
245 struct uihashhead
*uipp
;
251 for (uip
= uipp
->lh_first
; uip
!= 0; uip
= uip
->ui_hash
.le_next
)
252 if (uip
->ui_uid
== uid
)
255 uip
->ui_proccnt
+= diff
;
256 if (uip
->ui_proccnt
> 0) {
257 retval
= uip
->ui_proccnt
;
261 if (uip
->ui_proccnt
< 0)
262 panic("chgproccnt: procs < 0");
263 LIST_REMOVE(uip
, ui_hash
);
266 FREE_ZONE(uip
, sizeof(*uip
), M_PROC
);
275 panic("chgproccnt: lost user");
277 if (newuip
!= NULL
) {
280 LIST_INSERT_HEAD(uipp
, uip
, ui_hash
);
282 uip
->ui_proccnt
= diff
;
288 MALLOC_ZONE(newuip
, struct uidinfo
*, sizeof(*uip
), M_PROC
, M_WAITOK
);
290 panic("chgproccnt: M_PROC zone depleted");
294 FREE_ZONE(newuip
, sizeof(*uip
), M_PROC
);
299 * Is p an inferior of the current process?
307 for (; p
!= current_proc(); p
= p
->p_pptr
)
317 * Is p an inferior of t ?
320 isinferior(proc_t p
, proc_t t
)
326 /* if p==t they are not inferior */
331 for (; p
!= t
; p
= p
->p_pptr
) {
334 /* Detect here if we're in a cycle */
335 if ((p
->p_pid
== 0) || (p
->p_pptr
== start
) || (nchecked
>= nprocs
))
345 proc_isinferior(int pid1
, int pid2
)
347 proc_t p
= PROC_NULL
;
348 proc_t t
= PROC_NULL
;
351 if (((p
= proc_find(pid1
)) != (proc_t
)0 ) && ((t
= proc_find(pid2
)) != (proc_t
)0))
352 retval
= isinferior(p
, t
);
365 return(proc_findinternal(pid
, 0));
369 proc_findinternal(int pid
, int locked
)
371 proc_t p
= PROC_NULL
;
377 p
= pfind_locked(pid
);
378 if ((p
== PROC_NULL
) || (p
!= proc_ref_locked(p
)))
389 proc_findthread(thread_t thread
)
391 proc_t p
= PROC_NULL
;
395 uth
= get_bsdthread_info(thread
);
396 if (uth
&& (uth
->uu_flag
& UT_VFORK
))
399 p
= (proc_t
)(get_bsdthreadtask_info(thread
));
400 p
= proc_ref_locked(p
);
406 uthread_reset_proc_refcount(void *uthread
) {
409 uth
= (uthread_t
) uthread
;
410 uth
->uu_proc_refcount
= 0;
413 if (proc_ref_tracking_disabled
) {
423 uthread_get_proc_refcount(void *uthread
) {
426 if (proc_ref_tracking_disabled
) {
430 uth
= (uthread_t
) uthread
;
432 return uth
->uu_proc_refcount
;
437 record_procref(proc_t p __unused
, int count
) {
440 uth
= current_uthread();
441 uth
->uu_proc_refcount
+= count
;
444 if (proc_ref_tracking_disabled
) {
449 if (uth
->uu_pindex
< NUM_PROC_REFS_TO_TRACK
) {
450 backtrace((uintptr_t *) &uth
->uu_proc_pcs
[uth
->uu_pindex
], PROC_REF_STACK_DEPTH
);
452 uth
->uu_proc_ps
[uth
->uu_pindex
] = p
;
460 uthread_needs_to_wait_in_proc_refwait(void) {
461 uthread_t uth
= current_uthread();
464 * Allow threads holding no proc refs to wait
465 * in proc_refwait, allowing threads holding
466 * proc refs to wait in proc_refwait causes
467 * deadlocks and makes proc_find non-reentrant.
469 if (uth
->uu_proc_refcount
== 0)
493 if (p
!= proc_ref_locked(p
))
501 proc_ref_locked(proc_t p
)
504 int pid
= proc_pid(p
);
508 * if process still in creation or proc got recycled
509 * during msleep then return failure.
511 if ((p
== PROC_NULL
) || (p1
!= p
) || ((p
->p_listflag
& P_LIST_INCREATE
) != 0))
515 * Do not return process marked for termination
516 * or proc_refdrain called without ref wait.
517 * Wait for proc_refdrain_with_refwait to complete if
518 * process in refdrain and refwait flag is set, unless
519 * the current thread is holding to a proc_ref
522 if ((p
->p_stat
!= SZOMB
) &&
523 ((p
->p_listflag
& P_LIST_EXITED
) == 0) &&
524 ((p
->p_listflag
& P_LIST_DEAD
) == 0) &&
525 (((p
->p_listflag
& (P_LIST_DRAIN
| P_LIST_DRAINWAIT
)) == 0) ||
526 ((p
->p_listflag
& P_LIST_REFWAIT
) != 0))) {
527 if ((p
->p_listflag
& P_LIST_REFWAIT
) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
528 msleep(&p
->p_listflag
, proc_list_mlock
, 0, "proc_refwait", 0) ;
530 * the proc might have been recycled since we dropped
531 * the proc list lock, get the proc again.
533 p
= pfind_locked(pid
);
537 record_procref(p
, 1);
546 proc_rele_locked(proc_t p
)
549 if (p
->p_refcount
> 0) {
551 record_procref(p
, -1);
552 if ((p
->p_refcount
== 0) && ((p
->p_listflag
& P_LIST_DRAINWAIT
) == P_LIST_DRAINWAIT
)) {
553 p
->p_listflag
&= ~P_LIST_DRAINWAIT
;
554 wakeup(&p
->p_refcount
);
557 panic("proc_rele_locked -ve ref\n");
562 proc_find_zombref(int pid
)
569 p
= pfind_locked(pid
);
571 /* should we bail? */
572 if ((p
== PROC_NULL
) /* not found */
573 || ((p
->p_listflag
& P_LIST_INCREATE
) != 0) /* not created yet */
574 || ((p
->p_listflag
& P_LIST_EXITED
) == 0)) { /* not started exit */
580 /* If someone else is controlling the (unreaped) zombie - wait */
581 if ((p
->p_listflag
& P_LIST_WAITING
) != 0) {
582 (void)msleep(&p
->p_stat
, proc_list_mlock
, PWAIT
, "waitcoll", 0);
585 p
->p_listflag
|= P_LIST_WAITING
;
593 proc_drop_zombref(proc_t p
)
596 if ((p
->p_listflag
& P_LIST_WAITING
) == P_LIST_WAITING
) {
597 p
->p_listflag
&= ~P_LIST_WAITING
;
605 proc_refdrain(proc_t p
)
607 proc_refdrain_with_refwait(p
, FALSE
);
611 proc_refdrain_with_refwait(proc_t p
, boolean_t get_ref_and_allow_wait
)
613 boolean_t initexec
= FALSE
;
616 p
->p_listflag
|= P_LIST_DRAIN
;
617 if (get_ref_and_allow_wait
) {
619 * All the calls to proc_ref_locked will wait
620 * for the flag to get cleared before returning a ref,
621 * unless the current thread is holding to a proc ref
624 p
->p_listflag
|= P_LIST_REFWAIT
;
630 /* Do not wait in ref drain for launchd exec */
631 while (p
->p_refcount
&& !initexec
) {
632 p
->p_listflag
|= P_LIST_DRAINWAIT
;
633 msleep(&p
->p_refcount
, proc_list_mlock
, 0, "proc_refdrain", 0) ;
636 p
->p_listflag
&= ~P_LIST_DRAIN
;
637 if (!get_ref_and_allow_wait
) {
638 p
->p_listflag
|= P_LIST_DEAD
;
640 /* Return a ref to the caller */
642 record_procref(p
, 1);
647 if (get_ref_and_allow_wait
) {
654 proc_refwake(proc_t p
)
657 p
->p_listflag
&= ~P_LIST_REFWAIT
;
658 wakeup(&p
->p_listflag
);
663 proc_parentholdref(proc_t p
)
665 proc_t parent
= PROC_NULL
;
673 if ((pp
== PROC_NULL
) || (pp
->p_stat
== SZOMB
) || ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
))) {
678 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == P_LIST_CHILDDRSTART
) {
679 pp
->p_listflag
|= P_LIST_CHILDDRWAIT
;
680 msleep(&pp
->p_childrencnt
, proc_list_mlock
, 0, "proc_parent", 0);
689 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == 0) {
700 proc_parentdropref(proc_t p
, int listlocked
)
705 if (p
->p_parentref
> 0) {
707 if ((p
->p_parentref
== 0) && ((p
->p_listflag
& P_LIST_PARENTREFWAIT
) == P_LIST_PARENTREFWAIT
)) {
708 p
->p_listflag
&= ~P_LIST_PARENTREFWAIT
;
709 wakeup(&p
->p_parentref
);
712 panic("proc_parentdropref -ve ref\n");
720 proc_childdrainstart(proc_t p
)
722 #if __PROC_INTERNAL_DEBUG
723 if ((p
->p_listflag
& P_LIST_CHILDDRSTART
) == P_LIST_CHILDDRSTART
)
724 panic("proc_childdrainstart: childdrain already started\n");
726 p
->p_listflag
|= P_LIST_CHILDDRSTART
;
727 /* wait for all that hold parentrefs to drop */
728 while (p
->p_parentref
> 0) {
729 p
->p_listflag
|= P_LIST_PARENTREFWAIT
;
730 msleep(&p
->p_parentref
, proc_list_mlock
, 0, "proc_childdrainstart", 0) ;
736 proc_childdrainend(proc_t p
)
738 #if __PROC_INTERNAL_DEBUG
739 if (p
->p_childrencnt
> 0)
740 panic("exiting: children stil hanging around\n");
742 p
->p_listflag
|= P_LIST_CHILDDRAINED
;
743 if ((p
->p_listflag
& (P_LIST_CHILDLKWAIT
|P_LIST_CHILDDRWAIT
)) != 0) {
744 p
->p_listflag
&= ~(P_LIST_CHILDLKWAIT
|P_LIST_CHILDDRWAIT
);
745 wakeup(&p
->p_childrencnt
);
750 proc_checkdeadrefs(__unused proc_t p
)
752 #if __PROC_INTERNAL_DEBUG
753 if ((p
->p_listflag
& P_LIST_INHASH
) != 0)
754 panic("proc being freed and still in hash %p: %u\n", p
, p
->p_listflag
);
755 if (p
->p_childrencnt
!= 0)
756 panic("proc being freed and pending children cnt %p:%d\n", p
, p
->p_childrencnt
);
757 if (p
->p_refcount
!= 0)
758 panic("proc being freed and pending refcount %p:%d\n", p
, p
->p_refcount
);
759 if (p
->p_parentref
!= 0)
760 panic("proc being freed and pending parentrefs %p:%d\n", p
, p
->p_parentref
);
783 return (current_proc()->p_pid
);
789 return (current_proc()->p_ppid
);
793 proc_selfcsflags(void)
795 return (current_proc()->p_csflags
);
800 dtrace_current_proc_vforking(void)
802 thread_t th
= current_thread();
803 struct uthread
*ut
= get_bsdthread_info(th
);
806 ((ut
->uu_flag
& (UT_VFORK
|UT_VFORKING
)) == (UT_VFORK
|UT_VFORKING
))) {
808 * Handle the narrow window where we're in the vfork syscall,
809 * but we're not quite ready to claim (in particular, to DTrace)
810 * that we're running as the child.
812 return (get_bsdtask_info(get_threadtask(th
)));
814 return (current_proc());
818 dtrace_proc_selfpid(void)
820 return (dtrace_current_proc_vforking()->p_pid
);
824 dtrace_proc_selfppid(void)
826 return (dtrace_current_proc_vforking()->p_ppid
);
830 dtrace_proc_selfruid(void)
832 return (dtrace_current_proc_vforking()->p_ruid
);
834 #endif /* CONFIG_DTRACE */
837 proc_parent(proc_t p
)
845 parent
= proc_ref_locked(pp
);
846 if ((parent
== PROC_NULL
) && (pp
!= PROC_NULL
) && (pp
->p_stat
!= SZOMB
) && ((pp
->p_listflag
& P_LIST_EXITED
) != 0) && ((pp
->p_listflag
& P_LIST_CHILDDRAINED
)== 0)){
847 pp
->p_listflag
|= P_LIST_CHILDLKWAIT
;
848 msleep(&pp
->p_childrencnt
, proc_list_mlock
, 0, "proc_parent", 0);
856 proc_parent_is_currentproc(proc_t p
)
858 boolean_t ret
= FALSE
;
861 if (p
->p_pptr
== current_proc())
869 proc_name(int pid
, char * buf
, int size
)
873 if ((p
= proc_find(pid
)) != PROC_NULL
) {
874 strlcpy(buf
, &p
->p_comm
[0], size
);
880 proc_name_kdp(task_t t
, char * buf
, int size
)
882 proc_t p
= get_bsdtask_info(t
);
886 if ((size_t)size
> sizeof(p
->p_comm
))
887 strlcpy(buf
, &p
->p_name
[0], MIN((int)sizeof(p
->p_name
), size
));
889 strlcpy(buf
, &p
->p_comm
[0], MIN((int)sizeof(p
->p_comm
), size
));
893 proc_threadname_kdp(void * uth
, char * buf
, size_t size
)
895 if (size
< MAXTHREADNAMESIZE
) {
896 /* this is really just a protective measure for the future in
897 * case the thread name size in stackshot gets out of sync with
898 * the BSD max thread name size. Note that bsd_getthreadname
899 * doesn't take input buffer size into account. */
904 bsd_getthreadname(uth
, buf
);
910 /* note that this function is generally going to be called from stackshot,
911 * and the arguments will be coming from a struct which is declared packed
912 * thus the input arguments will in general be unaligned. We have to handle
915 proc_starttime_kdp(void *p
, unaligned_u64
*tv_sec
, unaligned_u64
*tv_usec
, unaligned_u64
*abstime
)
917 proc_t pp
= (proc_t
)p
;
918 if (pp
!= PROC_NULL
) {
920 *tv_sec
= pp
->p_start
.tv_sec
;
922 *tv_usec
= pp
->p_start
.tv_usec
;
923 if (abstime
!= NULL
) {
924 if (pp
->p_stats
!= NULL
)
925 *abstime
= pp
->p_stats
->ps_start
;
933 proc_name_address(void *p
)
935 return &((proc_t
)p
)->p_comm
[0];
939 proc_best_name(proc_t p
)
941 if (p
->p_name
[0] != 0)
942 return (&p
->p_name
[0]);
943 return (&p
->p_comm
[0]);
947 proc_selfname(char * buf
, int size
)
951 if ((p
= current_proc())!= (proc_t
)0) {
952 strlcpy(buf
, &p
->p_comm
[0], size
);
957 proc_signal(int pid
, int signum
)
961 if ((p
= proc_find(pid
)) != PROC_NULL
) {
968 proc_issignal(int pid
, sigset_t mask
)
973 if ((p
= proc_find(pid
)) != PROC_NULL
) {
974 error
= proc_pendingsignals(p
, mask
);
982 proc_noremotehang(proc_t p
)
987 retval
= p
->p_flag
& P_NOREMOTEHANG
;
988 return(retval
? 1: 0);
993 proc_exiting(proc_t p
)
998 retval
= p
->p_lflag
& P_LEXIT
;
999 return(retval
? 1: 0);
1003 proc_in_teardown(proc_t p
)
1008 retval
= p
->p_lflag
& P_LPEXIT
;
1009 return(retval
? 1: 0);
1014 proc_forcequota(proc_t p
)
1019 retval
= p
->p_flag
& P_FORCEQUOTA
;
1020 return(retval
? 1: 0);
1025 proc_suser(proc_t p
)
1027 kauth_cred_t my_cred
;
1030 my_cred
= kauth_cred_proc_ref(p
);
1031 error
= suser(my_cred
, &p
->p_acflag
);
1032 kauth_cred_unref(&my_cred
);
1037 proc_task(proc_t proc
)
1039 return (task_t
)proc
->task
;
1043 * Obtain the first thread in a process
1045 * XXX This is a bad thing to do; it exists predominantly to support the
1046 * XXX use of proc_t's in places that should really be using
1047 * XXX thread_t's instead. This maintains historical behaviour, but really
1048 * XXX needs an audit of the context (proxy vs. not) to clean up.
1051 proc_thread(proc_t proc
)
1053 uthread_t uth
= TAILQ_FIRST(&proc
->p_uthlist
);
1056 return(uth
->uu_context
.vc_thread
);
1062 proc_ucred(proc_t p
)
1070 thread_t th
= current_thread();
1072 return((struct uthread
*)get_bsdthread_info(th
));
1077 proc_is64bit(proc_t p
)
1079 return(IS_64BIT_PROCESS(p
));
1083 proc_is64bit_data(proc_t p
)
1086 return (int)task_get_64bit_data(p
->task
);
1090 proc_pidversion(proc_t p
)
1092 return(p
->p_idversion
);
1096 proc_persona_id(proc_t p
)
1098 return (uint32_t)persona_id_from_proc(p
);
1102 proc_getuid(proc_t p
)
1108 proc_getgid(proc_t p
)
1114 proc_uniqueid(proc_t p
)
1116 return(p
->p_uniqueid
);
1120 proc_puniqueid(proc_t p
)
1122 return(p
->p_puniqueid
);
1126 proc_coalitionids(__unused proc_t p
, __unused
uint64_t ids
[COALITION_NUM_TYPES
])
1128 #if CONFIG_COALITIONS
1129 task_coalition_ids(p
->task
, ids
);
1131 memset(ids
, 0, sizeof(uint64_t [COALITION_NUM_TYPES
]));
1137 proc_was_throttled(proc_t p
)
1139 return (p
->was_throttled
);
1143 proc_did_throttle(proc_t p
)
1145 return (p
->did_throttle
);
1149 proc_getcdhash(proc_t p
, unsigned char *cdhash
)
1151 return vn_getcdhash(p
->p_textvp
, p
->p_textoff
, cdhash
);
1155 proc_getexecutableuuid(proc_t p
, unsigned char *uuidbuf
, unsigned long size
)
1157 if (size
>= sizeof(p
->p_uuid
)) {
1158 memcpy(uuidbuf
, p
->p_uuid
, sizeof(p
->p_uuid
));
1162 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1164 proc_getexecutablevnode(proc_t p
)
1166 vnode_t tvp
= p
->p_textvp
;
1168 if ( tvp
!= NULLVP
) {
1169 if (vnode_getwithref(tvp
) == 0) {
1179 bsd_set_dependency_capable(task_t task
)
1181 proc_t p
= get_bsdtask_info(task
);
1184 OSBitOrAtomic(P_DEPENDENCY_CAPABLE
, &p
->p_flag
);
1191 IS_64BIT_PROCESS(proc_t p
)
1193 if (p
&& (p
->p_flag
& P_LP64
))
1201 * Locate a process by number
1204 pfind_locked(pid_t pid
)
1214 for (p
= PIDHASH(pid
)->lh_first
; p
!= 0; p
= p
->p_hash
.le_next
) {
1215 if (p
->p_pid
== pid
) {
1217 for (q
= p
->p_hash
.le_next
; q
!= 0; q
= q
->p_hash
.le_next
) {
1218 if ((p
!=q
) && (q
->p_pid
== pid
))
1219 panic("two procs with same pid %p:%p:%d:%d\n", p
, q
, p
->p_pid
, q
->p_pid
);
1229 * Locate a zombie by PID
1231 __private_extern__ proc_t
1239 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
)
1240 if (p
->p_pid
== pid
)
1249 * Locate a process group by number
1258 pgrp
= pgfind_internal(pgid
);
1259 if ((pgrp
== NULL
) || ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) != 0))
1262 pgrp
->pg_refcount
++;
1270 pgfind_internal(pid_t pgid
)
1274 for (pgrp
= PGRPHASH(pgid
)->lh_first
; pgrp
!= 0; pgrp
= pgrp
->pg_hash
.le_next
)
1275 if (pgrp
->pg_id
== pgid
)
1281 pg_rele(struct pgrp
* pgrp
)
1283 if(pgrp
== PGRP_NULL
)
1285 pg_rele_dropref(pgrp
);
1289 pg_rele_dropref(struct pgrp
* pgrp
)
1292 if ((pgrp
->pg_refcount
== 1) && ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) == PGRP_FLAG_TERMINATE
)) {
1294 pgdelete_dropref(pgrp
);
1298 pgrp
->pg_refcount
--;
1303 session_find_internal(pid_t sessid
)
1305 struct session
*sess
;
1307 for (sess
= SESSHASH(sessid
)->lh_first
; sess
!= 0; sess
= sess
->s_hash
.le_next
)
1308 if (sess
->s_sid
== sessid
)
1315 * Make a new process ready to become a useful member of society by making it
1316 * visible in all the right places and initialize its own lists to empty.
1318 * Parameters: parent The parent of the process to insert
1319 * child The child process to insert
1323 * Notes: Insert a child process into the parents process group, assign
1324 * the child the parent process pointer and PPID of the parent,
1325 * place it on the parents p_children list as a sibling,
1326 * initialize its own child list, place it in the allproc list,
1327 * insert it in the proper hash bucket, and initialize its
1331 pinsertchild(proc_t parent
, proc_t child
)
1335 LIST_INIT(&child
->p_children
);
1336 TAILQ_INIT(&child
->p_evlist
);
1337 child
->p_pptr
= parent
;
1338 child
->p_ppid
= parent
->p_pid
;
1339 child
->p_puniqueid
= parent
->p_uniqueid
;
1340 child
->p_xhighbits
= 0;
1342 pg
= proc_pgrp(parent
);
1343 pgrp_add(pg
, parent
, child
);
1348 #if CONFIG_MEMORYSTATUS
1349 memorystatus_add(child
, TRUE
);
1352 parent
->p_childrencnt
++;
1353 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
1355 LIST_INSERT_HEAD(&allproc
, child
, p_list
);
1356 /* mark the completion of proc creation */
1357 child
->p_listflag
&= ~P_LIST_INCREATE
;
1363 * Move p to a new or existing process group (and session)
1365 * Returns: 0 Success
1366 * ESRCH No such process
1369 enterpgrp(proc_t p
, pid_t pgid
, int mksess
)
1372 struct pgrp
*mypgrp
;
1373 struct session
* procsp
;
1375 pgrp
= pgfind(pgid
);
1376 mypgrp
= proc_pgrp(p
);
1377 procsp
= proc_session(p
);
1380 if (pgrp
!= NULL
&& mksess
) /* firewalls */
1381 panic("enterpgrp: setsid into non-empty pgrp");
1382 if (SESS_LEADER(p
, procsp
))
1383 panic("enterpgrp: session leader attempted setpgrp");
1385 if (pgrp
== PGRP_NULL
) {
1386 pid_t savepid
= p
->p_pid
;
1387 proc_t np
= PROC_NULL
;
1392 if (p
->p_pid
!= pgid
)
1393 panic("enterpgrp: new pgrp and pid != pgid");
1395 MALLOC_ZONE(pgrp
, struct pgrp
*, sizeof(struct pgrp
), M_PGRP
,
1398 panic("enterpgrp: M_PGRP zone depleted");
1399 if ((np
= proc_find(savepid
)) == NULL
|| np
!= p
) {
1400 if (np
!= PROC_NULL
)
1402 if (mypgrp
!= PGRP_NULL
)
1404 if (procsp
!= SESSION_NULL
)
1405 session_rele(procsp
);
1406 FREE_ZONE(pgrp
, sizeof(struct pgrp
), M_PGRP
);
1411 struct session
*sess
;
1416 MALLOC_ZONE(sess
, struct session
*,
1417 sizeof(struct session
), M_SESSION
, M_WAITOK
);
1419 panic("enterpgrp: M_SESSION zone depleted");
1421 sess
->s_sid
= p
->p_pid
;
1423 sess
->s_ttyvp
= NULL
;
1424 sess
->s_ttyp
= TTY_NULL
;
1426 sess
->s_listflags
= 0;
1427 sess
->s_ttypgrpid
= NO_PID
;
1428 #if CONFIG_FINE_LOCK_GROUPS
1429 lck_mtx_init(&sess
->s_mlock
, proc_mlock_grp
, proc_lck_attr
);
1431 lck_mtx_init(&sess
->s_mlock
, proc_lck_grp
, proc_lck_attr
);
1433 bcopy(procsp
->s_login
, sess
->s_login
,
1434 sizeof(sess
->s_login
));
1435 OSBitAndAtomic(~((uint32_t)P_CONTROLT
), &p
->p_flag
);
1437 LIST_INSERT_HEAD(SESSHASH(sess
->s_sid
), sess
, s_hash
);
1439 pgrp
->pg_session
= sess
;
1441 if (p
!= current_proc())
1442 panic("enterpgrp: mksession and p != curproc");
1446 pgrp
->pg_session
= procsp
;
1448 if ((pgrp
->pg_session
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1449 panic("enterpgrp: providing ref to terminating session ");
1450 pgrp
->pg_session
->s_count
++;
1454 #if CONFIG_FINE_LOCK_GROUPS
1455 lck_mtx_init(&pgrp
->pg_mlock
, proc_mlock_grp
, proc_lck_attr
);
1457 lck_mtx_init(&pgrp
->pg_mlock
, proc_lck_grp
, proc_lck_attr
);
1459 LIST_INIT(&pgrp
->pg_members
);
1460 pgrp
->pg_membercnt
= 0;
1463 pgrp
->pg_refcount
= 1;
1464 pgrp
->pg_listflags
= 0;
1465 LIST_INSERT_HEAD(PGRPHASH(pgid
), pgrp
, pg_hash
);
1467 } else if (pgrp
== mypgrp
) {
1471 if (procsp
!= SESSION_NULL
)
1472 session_rele(procsp
);
1476 if (procsp
!= SESSION_NULL
)
1477 session_rele(procsp
);
1479 * Adjust eligibility of affected pgrps to participate in job control.
1480 * Increment eligibility counts before decrementing, otherwise we
1481 * could reach 0 spuriously during the first call.
1483 fixjobc(p
, pgrp
, 1);
1484 fixjobc(p
, mypgrp
, 0);
1486 if(mypgrp
!= PGRP_NULL
)
1488 pgrp_replace(p
, pgrp
);
1495 * remove process from process group
1506 * delete a process group
1509 pgdelete_dropref(struct pgrp
*pgrp
)
1513 struct session
*sessp
;
1517 if (pgrp
->pg_membercnt
!= 0) {
1523 pgrp
->pg_refcount
--;
1524 if ((emptypgrp
== 0) || (pgrp
->pg_membercnt
!= 0)) {
1529 pgrp
->pg_listflags
|= PGRP_FLAG_TERMINATE
;
1531 if (pgrp
->pg_refcount
> 0) {
1536 pgrp
->pg_listflags
|= PGRP_FLAG_DEAD
;
1537 LIST_REMOVE(pgrp
, pg_hash
);
1541 ttyp
= SESSION_TP(pgrp
->pg_session
);
1542 if (ttyp
!= TTY_NULL
) {
1543 if (ttyp
->t_pgrp
== pgrp
) {
1545 /* Re-check after acquiring the lock */
1546 if (ttyp
->t_pgrp
== pgrp
) {
1547 ttyp
->t_pgrp
= NULL
;
1548 pgrp
->pg_session
->s_ttypgrpid
= NO_PID
;
1556 sessp
= pgrp
->pg_session
;
1557 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1558 panic("pg_deleteref: manipulating refs of already terminating session");
1559 if (--sessp
->s_count
== 0) {
1560 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1561 panic("pg_deleteref: terminating already terminated session");
1562 sessp
->s_listflags
|= S_LIST_TERM
;
1563 ttyp
= SESSION_TP(sessp
);
1564 LIST_REMOVE(sessp
, s_hash
);
1566 if (ttyp
!= TTY_NULL
) {
1568 if (ttyp
->t_session
== sessp
)
1569 ttyp
->t_session
= NULL
;
1573 sessp
->s_listflags
|= S_LIST_DEAD
;
1574 if (sessp
->s_count
!= 0)
1575 panic("pg_deleteref: freeing session in use");
1577 #if CONFIG_FINE_LOCK_GROUPS
1578 lck_mtx_destroy(&sessp
->s_mlock
, proc_mlock_grp
);
1580 lck_mtx_destroy(&sessp
->s_mlock
, proc_lck_grp
);
1582 FREE_ZONE(sessp
, sizeof(struct session
), M_SESSION
);
1585 #if CONFIG_FINE_LOCK_GROUPS
1586 lck_mtx_destroy(&pgrp
->pg_mlock
, proc_mlock_grp
);
1588 lck_mtx_destroy(&pgrp
->pg_mlock
, proc_lck_grp
);
1590 FREE_ZONE(pgrp
, sizeof(*pgrp
), M_PGRP
);
1595 * Adjust pgrp jobc counters when specified process changes process group.
1596 * We count the number of processes in each process group that "qualify"
1597 * the group for terminal job control (those with a parent in a different
1598 * process group of the same session). If that count reaches zero, the
1599 * process group becomes orphaned. Check both the specified process'
1600 * process group and that of its children.
1601 * entering == 0 => p is leaving specified group.
1602 * entering == 1 => p is entering specified group.
1605 fixjob_callback(proc_t p
, void * arg
)
1607 struct fixjob_iterargs
*fp
;
1608 struct pgrp
* pg
, *hispg
;
1609 struct session
* mysession
, *hissess
;
1612 fp
= (struct fixjob_iterargs
*)arg
;
1614 mysession
= fp
->mysession
;
1615 entering
= fp
->entering
;
1617 hispg
= proc_pgrp(p
);
1618 hissess
= proc_session(p
);
1620 if ((hispg
!= pg
) &&
1621 (hissess
== mysession
)) {
1626 } else if (--hispg
->pg_jobc
== 0) {
1632 if (hissess
!= SESSION_NULL
)
1633 session_rele(hissess
);
1634 if (hispg
!= PGRP_NULL
)
1637 return(PROC_RETURNED
);
1641 fixjobc(proc_t p
, struct pgrp
*pgrp
, int entering
)
1643 struct pgrp
*hispgrp
= PGRP_NULL
;
1644 struct session
*hissess
= SESSION_NULL
;
1645 struct session
*mysession
= pgrp
->pg_session
;
1647 struct fixjob_iterargs fjarg
;
1648 boolean_t proc_parent_self
;
1651 * Check if p's parent is current proc, if yes then no need to take
1652 * a ref; calling proc_parent with current proc as parent may
1653 * deadlock if current proc is exiting.
1655 proc_parent_self
= proc_parent_is_currentproc(p
);
1656 if (proc_parent_self
)
1657 parent
= current_proc();
1659 parent
= proc_parent(p
);
1661 if (parent
!= PROC_NULL
) {
1662 hispgrp
= proc_pgrp(parent
);
1663 hissess
= proc_session(parent
);
1664 if (!proc_parent_self
)
1670 * Check p's parent to see whether p qualifies its own process
1671 * group; if so, adjust count for p's process group.
1673 if ((hispgrp
!= pgrp
) &&
1674 (hissess
== mysession
)) {
1679 }else if (--pgrp
->pg_jobc
== 0) {
1686 if (hissess
!= SESSION_NULL
)
1687 session_rele(hissess
);
1688 if (hispgrp
!= PGRP_NULL
)
1692 * Check this process' children to see whether they qualify
1693 * their process groups; if so, adjust counts for children's
1697 fjarg
.mysession
= mysession
;
1698 fjarg
.entering
= entering
;
1699 proc_childrenwalk(p
, fixjob_callback
, &fjarg
);
1703 * A process group has become orphaned; if there are any stopped processes in
1704 * the group, hang-up all process in that group.
1707 orphanpg(struct pgrp
*pgrp
)
1711 vm_size_t pid_list_size
= 0;
1712 vm_size_t pid_list_size_needed
= 0;
1714 int pid_count_available
= 0;
1716 assert(pgrp
!= NULL
);
1718 /* allocate outside of the pgrp_lock */
1722 boolean_t should_iterate
= FALSE
;
1723 pid_count_available
= 0;
1725 PGMEMBERS_FOREACH(pgrp
, p
) {
1726 pid_count_available
++;
1728 if (p
->p_stat
== SSTOP
) {
1729 should_iterate
= TRUE
;
1733 if (pid_count_available
== 0 || !should_iterate
) {
1738 pid_list_size_needed
= pid_count_available
* sizeof(pid_t
);
1739 if (pid_list_size
>= pid_list_size_needed
) {
1744 if (pid_list_size
!= 0) {
1745 kfree(pid_list
, pid_list_size
);
1747 pid_list
= kalloc(pid_list_size_needed
);
1751 pid_list_size
= pid_list_size_needed
;
1754 /* no orphaned processes */
1755 if (pid_list_size
== 0) {
1760 PGMEMBERS_FOREACH(pgrp
, p
) {
1761 pid_list
[pid_count
++] = proc_pid(p
);
1762 if (pid_count
>= pid_count_available
) {
1768 if (pid_count
== 0) {
1772 for (int i
= 0; i
< pid_count
; i
++) {
1773 /* do not handle kernproc */
1774 if (pid_list
[i
] == 0) {
1777 p
= proc_find(pid_list
[i
]);
1782 proc_transwait(p
, 0);
1785 psignal(p
, SIGCONT
);
1790 kfree(pid_list
, pid_list_size
);
1795 proc_is_classic(proc_t p __unused
)
1800 /* XXX Why does this function exist? Need to kill it off... */
1802 current_proc_EXTERNAL(void)
1804 return (current_proc());
1808 proc_is_forcing_hfs_case_sensitivity(proc_t p
)
1810 return (p
->p_vfs_iopolicy
& P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY
) ? 1 : 0;
1815 * proc_core_name(name, uid, pid)
1816 * Expand the name described in corefilename, using name, uid, and pid.
1817 * corefilename is a printf-like string, with three format specifiers:
1818 * %N name of process ("name")
1819 * %P process id (pid)
1821 * For example, "%N.core" is the default; they can be disabled completely
1822 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1823 * This is controlled by the sysctl variable kern.corefile (see above).
1825 __private_extern__
int
1826 proc_core_name(const char *name
, uid_t uid
, pid_t pid
, char *cf_name
,
1829 const char *format
, *appendstr
;
1830 char id_buf
[11]; /* Buffer for pid/uid -- max 4B */
1833 if (cf_name
== NULL
)
1836 format
= corefilename
;
1837 for (i
= 0, n
= 0; n
< cf_name_len
&& format
[i
]; i
++) {
1838 switch (format
[i
]) {
1839 case '%': /* Format character */
1841 switch (format
[i
]) {
1845 case 'N': /* process name */
1848 case 'P': /* process id */
1849 snprintf(id_buf
, sizeof(id_buf
), "%u", pid
);
1852 case 'U': /* user id */
1853 snprintf(id_buf
, sizeof(id_buf
), "%u", uid
);
1856 case '\0': /* format string ended in % symbol */
1861 "Unknown format character %c in `%s'\n",
1864 l
= strlen(appendstr
);
1865 if ((n
+ l
) >= cf_name_len
)
1867 bcopy(appendstr
, cf_name
+ n
, l
);
1871 cf_name
[n
++] = format
[i
];
1874 if (format
[i
] != '\0')
1878 log(LOG_ERR
, "pid %ld (%s), uid (%u): corename is too long\n",
1879 (long)pid
, name
, (uint32_t)uid
);
1882 log(LOG_ERR
, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
1883 (long)pid
, name
, (uint32_t)uid
);
1886 #endif /* CONFIG_COREDUMP */
1888 /* Code Signing related routines */
1891 csops(__unused proc_t p
, struct csops_args
*uap
, __unused
int32_t *retval
)
1893 return(csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
1894 uap
->usersize
, USER_ADDR_NULL
));
1898 csops_audittoken(__unused proc_t p
, struct csops_audittoken_args
*uap
, __unused
int32_t *retval
)
1900 if (uap
->uaudittoken
== USER_ADDR_NULL
)
1902 return(csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
1903 uap
->usersize
, uap
->uaudittoken
));
1907 csops_copy_token(void *start
, size_t length
, user_size_t usize
, user_addr_t uaddr
)
1909 char fakeheader
[8] = { 0 };
1912 if (usize
< sizeof(fakeheader
))
1915 /* if no blob, fill in zero header */
1916 if (NULL
== start
) {
1918 length
= sizeof(fakeheader
);
1919 } else if (usize
< length
) {
1920 /* ... if input too short, copy out length of entitlement */
1921 uint32_t length32
= htonl((uint32_t)length
);
1922 memcpy(&fakeheader
[4], &length32
, sizeof(length32
));
1924 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
1926 return ERANGE
; /* input buffer to short, ERANGE signals that */
1929 return copyout(start
, uaddr
, length
);
1933 csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaudittoken
)
1935 size_t usize
= (size_t)CAST_DOWN(size_t, usersize
);
1941 unsigned char cdhash
[SHA1_RESULTLEN
];
1942 audit_token_t token
;
1943 unsigned int upid
=0, uidversion
= 0;
1945 forself
= error
= 0;
1948 pid
= proc_selfpid();
1949 if (pid
== proc_selfpid())
1956 case CS_OPS_PIDOFFSET
:
1957 case CS_OPS_ENTITLEMENTS_BLOB
:
1958 case CS_OPS_IDENTITY
:
1961 break; /* not restricted to root */
1963 if (forself
== 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE
)
1968 pt
= proc_find(pid
);
1969 if (pt
== PROC_NULL
)
1973 uidversion
= pt
->p_idversion
;
1974 if (uaudittoken
!= USER_ADDR_NULL
) {
1976 error
= copyin(uaudittoken
, &token
, sizeof(audit_token_t
));
1979 /* verify the audit token pid/idversion matches with proc */
1980 if ((token
.val
[5] != upid
) || (token
.val
[7] != uidversion
)) {
1988 case CS_OPS_MARKINVALID
:
1989 case CS_OPS_MARKHARD
:
1990 case CS_OPS_MARKKILL
:
1991 case CS_OPS_MARKRESTRICT
:
1992 case CS_OPS_SET_STATUS
:
1993 case CS_OPS_CLEARINSTALLER
:
1994 case CS_OPS_CLEARPLATFORM
:
1995 if ((error
= mac_proc_check_set_cs_info(current_proc(), pt
, ops
)))
1999 if ((error
= mac_proc_check_get_cs_info(current_proc(), pt
, ops
)))
2006 case CS_OPS_STATUS
: {
2010 retflags
= pt
->p_csflags
;
2011 if (cs_process_enforcement(pt
))
2012 retflags
|= CS_ENFORCEMENT
;
2013 if (csproc_get_platform_binary(pt
))
2014 retflags
|= CS_PLATFORM_BINARY
;
2015 if (csproc_get_platform_path(pt
))
2016 retflags
|= CS_PLATFORM_PATH
;
2017 //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
2018 if ((pt
->p_csflags
& CS_FORCED_LV
) == CS_FORCED_LV
) {
2019 retflags
&= (~CS_REQUIRE_LV
);
2023 if (uaddr
!= USER_ADDR_NULL
)
2024 error
= copyout(&retflags
, uaddr
, sizeof(uint32_t));
2027 case CS_OPS_MARKINVALID
:
2029 if ((pt
->p_csflags
& CS_VALID
) == CS_VALID
) { /* is currently valid */
2030 pt
->p_csflags
&= ~CS_VALID
; /* set invalid */
2031 if ((pt
->p_csflags
& CS_KILL
) == CS_KILL
) {
2032 pt
->p_csflags
|= CS_KILLED
;
2035 printf("CODE SIGNING: marked invalid by pid %d: "
2036 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2037 proc_selfpid(), pt
->p_pid
, pt
->p_comm
, pt
->p_csflags
);
2039 psignal(pt
, SIGKILL
);
2047 case CS_OPS_MARKHARD
:
2049 pt
->p_csflags
|= CS_HARD
;
2050 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2051 /* @@@ allow? reject? kill? @@@ */
2059 case CS_OPS_MARKKILL
:
2061 pt
->p_csflags
|= CS_KILL
;
2062 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2064 psignal(pt
, SIGKILL
);
2069 case CS_OPS_PIDOFFSET
:
2070 toff
= pt
->p_textoff
;
2072 error
= copyout(&toff
, uaddr
, sizeof(toff
));
2077 /* pt already holds a reference on its p_textvp */
2079 toff
= pt
->p_textoff
;
2081 if (tvp
== NULLVP
|| usize
!= SHA1_RESULTLEN
) {
2086 error
= vn_getcdhash(tvp
, toff
, cdhash
);
2090 error
= copyout(cdhash
, uaddr
, sizeof (cdhash
));
2095 case CS_OPS_ENTITLEMENTS_BLOB
: {
2101 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2107 error
= cs_entitlements_blob_get(pt
, &start
, &length
);
2112 error
= csops_copy_token(start
, length
, usize
, uaddr
);
2115 case CS_OPS_MARKRESTRICT
:
2117 pt
->p_csflags
|= CS_RESTRICT
;
2121 case CS_OPS_SET_STATUS
: {
2124 if (usize
< sizeof(flags
)) {
2129 error
= copyin(uaddr
, &flags
, sizeof(flags
));
2133 /* only allow setting a subset of all code sign flags */
2135 CS_HARD
| CS_EXEC_SET_HARD
|
2136 CS_KILL
| CS_EXEC_SET_KILL
|
2139 CS_ENFORCEMENT
| CS_EXEC_SET_ENFORCEMENT
;
2142 if (pt
->p_csflags
& CS_VALID
)
2143 pt
->p_csflags
|= flags
;
2155 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2161 error
= cs_blob_get(pt
, &start
, &length
);
2166 error
= csops_copy_token(start
, length
, usize
, uaddr
);
2169 case CS_OPS_IDENTITY
:
2170 case CS_OPS_TEAMID
: {
2171 const char *identity
;
2172 uint8_t fakeheader
[8];
2177 * Make identity have a blob header to make it
2178 * easier on userland to guess the identity
2181 if (usize
< sizeof(fakeheader
)) {
2185 memset(fakeheader
, 0, sizeof(fakeheader
));
2188 if ((pt
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) == 0) {
2194 identity
= ops
== CS_OPS_TEAMID
? csproc_get_teamid(pt
) : cs_identity_get(pt
);
2196 if (identity
== NULL
) {
2201 length
= strlen(identity
) + 1; /* include NUL */
2202 idlen
= htonl(length
+ sizeof(fakeheader
));
2203 memcpy(&fakeheader
[4], &idlen
, sizeof(idlen
));
2205 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
2209 if (usize
< sizeof(fakeheader
) + length
)
2211 else if (usize
> sizeof(fakeheader
))
2212 error
= copyout(identity
, uaddr
+ sizeof(fakeheader
), length
);
2217 case CS_OPS_CLEARINSTALLER
:
2219 pt
->p_csflags
&= ~(CS_INSTALLER
| CS_DATAVAULT_CONTROLLER
| CS_EXEC_INHERIT_SIP
);
2223 case CS_OPS_CLEARPLATFORM
:
2224 #if DEVELOPMENT || DEBUG
2225 if (cs_process_global_enforcement()) {
2231 if (csr_check(CSR_ALLOW_APPLE_INTERNAL
) != 0) {
2238 pt
->p_csflags
&= ~(CS_PLATFORM_BINARY
|CS_PLATFORM_PATH
);
2239 csproc_clear_platform_binary(pt
);
2245 #endif /* !DEVELOPMENT || DEBUG */
2259 proc_iterate_fn_t callout
,
2261 proc_iterate_fn_t filterfn
,
2264 pid_t
*pid_list
= NULL
;
2265 vm_size_t pid_list_size
= 0;
2266 vm_size_t pid_list_size_needed
= 0;
2268 int pid_count_available
= 0;
2270 assert(callout
!= NULL
);
2272 /* allocate outside of the proc_list_lock */
2276 pid_count_available
= nprocs
+ 1 /* kernel_task not counted in nprocs */;
2277 assert(pid_count_available
> 0);
2279 pid_list_size_needed
= pid_count_available
* sizeof(pid_t
);
2280 if (pid_list_size
>= pid_list_size_needed
) {
2285 if (pid_list_size
!= 0) {
2286 kfree(pid_list
, pid_list_size
);
2288 pid_list
= kalloc(pid_list_size_needed
);
2292 pid_list_size
= pid_list_size_needed
;
2294 assert(pid_list
!= NULL
);
2296 /* filter pids into pid_list */
2298 if (flags
& PROC_ALLPROCLIST
) {
2300 ALLPROC_FOREACH(p
) {
2301 /* ignore processes that are being forked */
2302 if (p
->p_stat
== SIDL
) {
2305 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
2309 pid_list
[pid_count
++] = proc_pid(p
);
2310 if (pid_count
>= pid_count_available
) {
2316 if ((pid_count
< pid_count_available
) &&
2317 (flags
& PROC_ZOMBPROCLIST
))
2320 ZOMBPROC_FOREACH(p
) {
2321 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
2325 pid_list
[pid_count
++] = proc_pid(p
);
2326 if (pid_count
>= pid_count_available
) {
2334 /* call callout on processes in the pid_list */
2336 for (int i
= 0; i
< pid_count
; i
++) {
2337 proc_t p
= proc_find(pid_list
[i
]);
2339 if ((flags
& PROC_NOWAITTRANS
) == 0) {
2340 proc_transwait(p
, 0);
2342 int callout_ret
= callout(p
, arg
);
2344 switch (callout_ret
) {
2345 case PROC_RETURNED_DONE
:
2348 case PROC_CLAIMED_DONE
:
2358 panic("proc_iterate: callout returned %d for pid %d",
2359 callout_ret
, pid_list
[i
]);
2362 } else if (flags
& PROC_ZOMBPROCLIST
) {
2363 p
= proc_find_zombref(pid_list
[i
]);
2367 int callout_ret
= callout(p
, arg
);
2369 switch (callout_ret
) {
2370 case PROC_RETURNED_DONE
:
2371 proc_drop_zombref(p
);
2373 case PROC_CLAIMED_DONE
:
2377 proc_drop_zombref(p
);
2383 panic("proc_iterate: callout returned %d for zombie pid %d",
2384 callout_ret
, pid_list
[i
]);
2391 kfree(pid_list
, pid_list_size
);
2398 proc_iterate_fn_t callout
,
2400 proc_iterate_fn_t filterfn
,
2405 assert(callout
!= NULL
);
2407 proc_shutdown_exitcount
= 0;
2413 ALLPROC_FOREACH(p
) {
2414 if ((filterfn
!= NULL
) && filterfn(p
, filterarg
) == 0) {
2417 p
= proc_ref_locked(p
);
2424 proc_transwait(p
, 0);
2425 (void)callout(p
, arg
);
2428 goto restart_foreach
;
2437 proc_iterate_fn_t callout
,
2441 vm_size_t pid_list_size
= 0;
2442 vm_size_t pid_list_size_needed
= 0;
2444 int pid_count_available
= 0;
2446 assert(parent
!= NULL
);
2447 assert(callout
!= NULL
);
2452 pid_count_available
= parent
->p_childrencnt
;
2453 if (pid_count_available
== 0) {
2458 pid_list_size_needed
= pid_count_available
* sizeof(pid_t
);
2459 if (pid_list_size
>= pid_list_size_needed
) {
2464 if (pid_list_size
!= 0) {
2465 kfree(pid_list
, pid_list_size
);
2467 pid_list
= kalloc(pid_list_size_needed
);
2471 pid_list_size
= pid_list_size_needed
;
2475 PCHILDREN_FOREACH(parent
, p
) {
2476 if (p
->p_stat
== SIDL
) {
2480 pid_list
[pid_count
++] = proc_pid(p
);
2481 if (pid_count
>= pid_count_available
) {
2488 for (int i
= 0; i
< pid_count
; i
++) {
2489 p
= proc_find(pid_list
[i
]);
2494 int callout_ret
= callout(p
, arg
);
2496 switch (callout_ret
) {
2497 case PROC_RETURNED_DONE
:
2500 case PROC_CLAIMED_DONE
:
2509 panic("proc_childrenwalk: callout returned %d for pid %d",
2510 callout_ret
, pid_list
[i
]);
2516 kfree(pid_list
, pid_list_size
);
2524 proc_iterate_fn_t callout
,
2526 proc_iterate_fn_t filterfn
,
2531 vm_size_t pid_list_size
= 0;
2532 vm_size_t pid_list_size_needed
= 0;
2534 int pid_count_available
= 0;
2538 assert(pgrp
!= NULL
);
2539 assert(callout
!= NULL
);
2544 pid_count_available
= pgrp
->pg_membercnt
;
2545 if (pid_count_available
== 0) {
2550 pid_list_size_needed
= pid_count_available
* sizeof(pid_t
);
2551 if (pid_list_size
>= pid_list_size_needed
) {
2556 if (pid_list_size
!= 0) {
2557 kfree(pid_list
, pid_list_size
);
2559 pid_list
= kalloc(pid_list_size_needed
);
2563 pid_list_size
= pid_list_size_needed
;
2568 PGMEMBERS_FOREACH(pgrp
, p
) {
2569 if ((filterfn
!= NULL
) && (filterfn(p
, filterarg
) == 0)) {
2572 pid_list
[pid_count
++] = proc_pid(p
);
2573 if (pid_count
>= pid_count_available
) {
2580 if (flags
& PGRP_DROPREF
) {
2584 for (int i
= 0; i
< pid_count
; i
++) {
2585 /* do not handle kernproc */
2586 if (pid_list
[i
] == 0) {
2589 p
= proc_find(pid_list
[i
]);
2593 if (p
->p_pgrpid
!= pgid
) {
2598 int callout_ret
= callout(p
, arg
);
2600 switch (callout_ret
) {
2607 case PROC_RETURNED_DONE
:
2610 case PROC_CLAIMED_DONE
:
2614 panic("pgrp_iterate: callout returned %d for pid %d",
2615 callout_ret
, pid_list
[i
]);
2620 kfree(pid_list
, pid_list_size
);
2625 pgrp_add(struct pgrp
* pgrp
, struct proc
* parent
, struct proc
* child
)
2628 child
->p_pgrp
= pgrp
;
2629 child
->p_pgrpid
= pgrp
->pg_id
;
2630 child
->p_listflag
|= P_LIST_INPGRP
;
2632 * When pgrp is being freed , a process can still
2633 * request addition using setpgid from bash when
2634 * login is terminated (login cycler) return ESRCH
2635 * Safe to hold lock due to refcount on pgrp
2637 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
2638 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2641 if ((pgrp
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
)
2642 panic("pgrp_add : pgrp is dead adding process");
2646 pgrp
->pg_membercnt
++;
2647 if ( parent
!= PROC_NULL
) {
2648 LIST_INSERT_AFTER(parent
, child
, p_pglist
);
2650 LIST_INSERT_HEAD(&pgrp
->pg_members
, child
, p_pglist
);
2655 if (((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (pgrp
->pg_membercnt
!= 0)) {
2656 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2662 pgrp_remove(struct proc
* p
)
2669 #if __PROC_INTERNAL_DEBUG
2670 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0)
2671 panic("removing from pglist but no named ref\n");
2673 p
->p_pgrpid
= PGRPID_DEAD
;
2674 p
->p_listflag
&= ~P_LIST_INPGRP
;
2678 if (pg
== PGRP_NULL
)
2679 panic("pgrp_remove: pg is NULL");
2683 if (pg
->pg_membercnt
< 0)
2684 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg
, p
);
2686 LIST_REMOVE(p
, p_pglist
);
2687 if (pg
->pg_members
.lh_first
== 0) {
2689 pgdelete_dropref(pg
);
2697 /* cannot use proc_pgrp as it maybe stalled */
2699 pgrp_replace(struct proc
* p
, struct pgrp
* newpg
)
2701 struct pgrp
* oldpg
;
2707 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2708 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2709 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2712 p
->p_listflag
|= P_LIST_PGRPTRANS
;
2715 if (oldpg
== PGRP_NULL
)
2716 panic("pgrp_replace: oldpg NULL");
2717 oldpg
->pg_refcount
++;
2718 #if __PROC_INTERNAL_DEBUG
2719 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0)
2720 panic("removing from pglist but no named ref\n");
2722 p
->p_pgrpid
= PGRPID_DEAD
;
2723 p
->p_listflag
&= ~P_LIST_INPGRP
;
2729 oldpg
->pg_membercnt
--;
2730 if (oldpg
->pg_membercnt
< 0)
2731 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg
, p
);
2732 LIST_REMOVE(p
, p_pglist
);
2733 if (oldpg
->pg_members
.lh_first
== 0) {
2735 pgdelete_dropref(oldpg
);
2743 p
->p_pgrpid
= newpg
->pg_id
;
2744 p
->p_listflag
|= P_LIST_INPGRP
;
2746 * When pgrp is being freed , a process can still
2747 * request addition using setpgid from bash when
2748 * login is terminated (login cycler) return ESRCH
2749 * Safe to hold lock due to refcount on pgrp
2751 if ((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
2752 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2755 if ((newpg
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
)
2756 panic("pgrp_add : pgrp is dead adding process");
2760 newpg
->pg_membercnt
++;
2761 LIST_INSERT_HEAD(&newpg
->pg_members
, p
, p_pglist
);
2765 if (((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (newpg
->pg_membercnt
!= 0)) {
2766 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2769 p
->p_listflag
&= ~P_LIST_PGRPTRANS
;
2770 if ((p
->p_listflag
& P_LIST_PGRPTRWAIT
) == P_LIST_PGRPTRWAIT
) {
2771 p
->p_listflag
&= ~P_LIST_PGRPTRWAIT
;
2772 wakeup(&p
->p_pgrpid
);
2779 pgrp_lock(struct pgrp
* pgrp
)
2781 lck_mtx_lock(&pgrp
->pg_mlock
);
2785 pgrp_unlock(struct pgrp
* pgrp
)
2787 lck_mtx_unlock(&pgrp
->pg_mlock
);
2791 session_lock(struct session
* sess
)
2793 lck_mtx_lock(&sess
->s_mlock
);
2798 session_unlock(struct session
* sess
)
2800 lck_mtx_unlock(&sess
->s_mlock
);
2812 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2813 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2814 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2819 assert(pgrp
!= NULL
);
2821 if (pgrp
!= PGRP_NULL
) {
2822 pgrp
->pg_refcount
++;
2823 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) != 0)
2824 panic("proc_pgrp: ref being povided for dead pgrp");
2833 tty_pgrp(struct tty
* tp
)
2835 struct pgrp
* pg
= PGRP_NULL
;
2840 if (pg
!= PGRP_NULL
) {
2841 if ((pg
->pg_listflags
& PGRP_FLAG_DEAD
) != 0)
2842 panic("tty_pgrp: ref being povided for dead pgrp");
2851 proc_session(proc_t p
)
2853 struct session
* sess
= SESSION_NULL
;
2856 return(SESSION_NULL
);
2860 /* wait during transitions */
2861 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2862 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2863 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2866 if ((p
->p_pgrp
!= PGRP_NULL
) && ((sess
= p
->p_pgrp
->pg_session
) != SESSION_NULL
)) {
2867 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
2868 panic("proc_session:returning sesssion ref on terminating session");
2876 session_rele(struct session
*sess
)
2879 if (--sess
->s_count
== 0) {
2880 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
2881 panic("session_rele: terminating already terminated session");
2882 sess
->s_listflags
|= S_LIST_TERM
;
2883 LIST_REMOVE(sess
, s_hash
);
2884 sess
->s_listflags
|= S_LIST_DEAD
;
2885 if (sess
->s_count
!= 0)
2886 panic("session_rele: freeing session in use");
2888 #if CONFIG_FINE_LOCK_GROUPS
2889 lck_mtx_destroy(&sess
->s_mlock
, proc_mlock_grp
);
2891 lck_mtx_destroy(&sess
->s_mlock
, proc_lck_grp
);
2893 FREE_ZONE(sess
, sizeof(struct session
), M_SESSION
);
2899 proc_transstart(proc_t p
, int locked
, int non_blocking
)
2903 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
2904 if (((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
) || non_blocking
) {
2909 p
->p_lflag
|= P_LTRANSWAIT
;
2910 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
2912 p
->p_lflag
|= P_LINTRANSIT
;
2913 p
->p_transholder
= current_thread();
2920 proc_transcommit(proc_t p
, int locked
)
2925 assert ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
);
2926 assert (p
->p_transholder
== current_thread());
2927 p
->p_lflag
|= P_LTRANSCOMMIT
;
2929 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
2930 p
->p_lflag
&= ~P_LTRANSWAIT
;
2931 wakeup(&p
->p_lflag
);
2938 proc_transend(proc_t p
, int locked
)
2943 p
->p_lflag
&= ~( P_LINTRANSIT
| P_LTRANSCOMMIT
);
2944 p
->p_transholder
= NULL
;
2946 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
2947 p
->p_lflag
&= ~P_LTRANSWAIT
;
2948 wakeup(&p
->p_lflag
);
2955 proc_transwait(proc_t p
, int locked
)
2959 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
2960 if ((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
&& current_proc() == p
) {
2965 p
->p_lflag
|= P_LTRANSWAIT
;
2966 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
2974 proc_klist_lock(void)
2976 lck_mtx_lock(proc_klist_mlock
);
2980 proc_klist_unlock(void)
2982 lck_mtx_unlock(proc_klist_mlock
);
2986 proc_knote(struct proc
* p
, long hint
)
2989 KNOTE(&p
->p_klist
, hint
);
2990 proc_klist_unlock();
2994 proc_knote_drain(struct proc
*p
)
2996 struct knote
*kn
= NULL
;
2999 * Clear the proc's klist to avoid references after the proc is reaped.
3002 while ((kn
= SLIST_FIRST(&p
->p_klist
))) {
3003 kn
->kn_ptr
.p_proc
= PROC_NULL
;
3004 KNOTE_DETACH(&p
->p_klist
, kn
);
3006 proc_klist_unlock();
3010 proc_setregister(proc_t p
)
3013 p
->p_lflag
|= P_LREGISTER
;
3018 proc_resetregister(proc_t p
)
3021 p
->p_lflag
&= ~P_LREGISTER
;
3026 proc_pgrpid(proc_t p
)
3034 return current_proc()->p_pgrpid
;
3038 /* return control and action states */
3040 proc_getpcontrol(int pid
, int * pcontrolp
)
3047 if (pcontrolp
!= NULL
)
3048 *pcontrolp
= p
->p_pcaction
;
3055 proc_dopcontrol(proc_t p
)
3061 pcontrol
= PROC_CONTROL_STATE(p
);
3063 if (PROC_ACTION_STATE(p
) == 0) {
3066 PROC_SETACTION_STATE(p
);
3068 printf("low swap: throttling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3072 PROC_SETACTION_STATE(p
);
3074 printf("low swap: suspending pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3075 task_suspend(p
->task
);
3079 PROC_SETACTION_STATE(p
);
3081 printf("low swap: killing pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3082 psignal(p
, SIGKILL
);
3092 return(PROC_RETURNED
);
3097 * Resume a throttled or suspended process. This is an internal interface that's only
3098 * used by the user level code that presents the GUI when we run out of swap space and
3099 * hence is restricted to processes with superuser privileges.
3103 proc_resetpcontrol(int pid
)
3108 proc_t self
= current_proc();
3110 /* if the process has been validated to handle resource control or root is valid one */
3111 if (((self
->p_lflag
& P_LVMRSRCOWNER
) == 0) && (error
= suser(kauth_cred_get(), 0)))
3120 pcontrol
= PROC_CONTROL_STATE(p
);
3122 if(PROC_ACTION_STATE(p
) !=0) {
3125 PROC_RESETACTION_STATE(p
);
3127 printf("low swap: unthrottling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3131 PROC_RESETACTION_STATE(p
);
3133 printf("low swap: resuming pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3134 task_resume(p
->task
);
3139 PROC_SETACTION_STATE(p
);
3141 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p
->p_pid
, p
->p_comm
);
3157 struct no_paging_space
3159 uint64_t pcs_max_size
;
3160 uint64_t pcs_uniqueid
;
3163 uint64_t pcs_total_size
;
3165 uint64_t npcs_max_size
;
3166 uint64_t npcs_uniqueid
;
3168 int npcs_proc_count
;
3169 uint64_t npcs_total_size
;
3171 int apcs_proc_count
;
3172 uint64_t apcs_total_size
;
3177 proc_pcontrol_filter(proc_t p
, void *arg
)
3179 struct no_paging_space
*nps
;
3180 uint64_t compressed
;
3182 nps
= (struct no_paging_space
*)arg
;
3184 compressed
= get_task_compressed(p
->task
);
3186 if (PROC_CONTROL_STATE(p
)) {
3187 if (PROC_ACTION_STATE(p
) == 0) {
3188 if (compressed
> nps
->pcs_max_size
) {
3189 nps
->pcs_pid
= p
->p_pid
;
3190 nps
->pcs_uniqueid
= p
->p_uniqueid
;
3191 nps
->pcs_max_size
= compressed
;
3193 nps
->pcs_total_size
+= compressed
;
3194 nps
->pcs_proc_count
++;
3196 nps
->apcs_total_size
+= compressed
;
3197 nps
->apcs_proc_count
++;
3200 if (compressed
> nps
->npcs_max_size
) {
3201 nps
->npcs_pid
= p
->p_pid
;
3202 nps
->npcs_uniqueid
= p
->p_uniqueid
;
3203 nps
->npcs_max_size
= compressed
;
3205 nps
->npcs_total_size
+= compressed
;
3206 nps
->npcs_proc_count
++;
3214 proc_pcontrol_null(__unused proc_t p
, __unused
void *arg
)
3216 return(PROC_RETURNED
);
3221 * Deal with the low on compressor pool space condition... this function
3222 * gets called when we are approaching the limits of the compressor pool or
3223 * we are unable to create a new swap file.
3224 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3225 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3226 * There are 2 categories of processes to deal with. Those that have an action
3227 * associated with them by the task itself and those that do not. Actionable
3228 * tasks can have one of three categories specified: ones that
3229 * can be killed immediately, ones that should be suspended, and ones that should
3230 * be throttled. Processes that do not have an action associated with them are normally
3231 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3232 * that only by killing them can we hope to put the system back into a usable state.
3235 #define NO_PAGING_SPACE_DEBUG 0
3237 extern uint64_t vm_compressor_pages_compressed(void);
3239 struct timeval last_no_space_action
= {0, 0};
3241 #if DEVELOPMENT || DEBUG
3242 extern boolean_t kill_on_no_paging_space
;
3243 #endif /* DEVELOPMENT || DEBUG */
3245 #define MB_SIZE (1024 * 1024ULL)
3246 boolean_t
memorystatus_kill_on_VM_compressor_space_shortage(boolean_t
);
3248 extern int32_t max_kill_priority
;
3249 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index
);
3252 no_paging_space_action()
3255 struct no_paging_space nps
;
3259 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3263 if (now
.tv_sec
<= last_no_space_action
.tv_sec
+ 5)
3267 * Examine all processes and find the biggest (biggest is based on the number of pages this
3268 * task has in the compressor pool) that has been marked to have some action
3269 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3272 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3273 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3274 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3276 bzero(&nps
, sizeof(nps
));
3278 proc_iterate(PROC_ALLPROCLIST
, proc_pcontrol_null
, (void *)NULL
, proc_pcontrol_filter
, (void *)&nps
);
3280 #if NO_PAGING_SPACE_DEBUG
3281 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3282 nps
.npcs_proc_count
, nps
.npcs_total_size
, nps
.npcs_max_size
);
3283 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3284 nps
.pcs_proc_count
, nps
.pcs_total_size
, nps
.pcs_max_size
);
3285 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3286 nps
.apcs_proc_count
, nps
.apcs_total_size
);
3288 if (nps
.npcs_max_size
> (vm_compressor_pages_compressed() * 50) / 100) {
3290 * for now we'll knock out any task that has more then 50% of the pages
3291 * held by the compressor
3293 if ((p
= proc_find(nps
.npcs_pid
)) != PROC_NULL
) {
3295 if (nps
.npcs_uniqueid
== p
->p_uniqueid
) {
3297 * verify this is still the same process
3298 * in case the proc exited and the pid got reused while
3299 * we were finishing the proc_iterate and getting to this point
3301 last_no_space_action
= now
;
3303 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p
->p_pid
, p
->p_comm
, (nps
.pcs_max_size
/MB_SIZE
));
3304 psignal(p
, SIGKILL
);
3316 * We have some processes within our jetsam bands of consideration and hence can be killed.
3317 * So we will invoke the memorystatus thread to go ahead and kill something.
3319 if (memorystatus_get_proccnt_upto_priority(max_kill_priority
) > 0) {
3321 last_no_space_action
= now
;
3322 memorystatus_kill_on_VM_compressor_space_shortage(TRUE
/* async */);
3327 * No eligible processes to kill. So let's suspend/kill the largest
3328 * process depending on its policy control specifications.
3331 if (nps
.pcs_max_size
> 0) {
3332 if ((p
= proc_find(nps
.pcs_pid
)) != PROC_NULL
) {
3334 if (nps
.pcs_uniqueid
== p
->p_uniqueid
) {
3336 * verify this is still the same process
3337 * in case the proc exited and the pid got reused while
3338 * we were finishing the proc_iterate and getting to this point
3340 last_no_space_action
= now
;
3352 last_no_space_action
= now
;
3354 printf("low swap: unable to find any eligible processes to take action on\n");
3360 proc_trace_log(__unused proc_t p
, struct proc_trace_log_args
*uap
, __unused
int *retval
)
3363 proc_t target_proc
= PROC_NULL
;
3364 pid_t target_pid
= uap
->pid
;
3365 uint64_t target_uniqueid
= uap
->uniqueid
;
3366 task_t target_task
= NULL
;
3368 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT
, 0)) {
3372 target_proc
= proc_find(target_pid
);
3373 if (target_proc
!= PROC_NULL
) {
3374 if (target_uniqueid
!= proc_uniqueid(target_proc
)) {
3379 target_task
= proc_task(target_proc
);
3380 if (task_send_trace_memory(target_task
, target_pid
, target_uniqueid
)) {
3388 if (target_proc
!= PROC_NULL
)
3389 proc_rele(target_proc
);
3393 #if VM_SCAN_FOR_SHADOW_CHAIN
3394 extern int vm_map_shadow_max(vm_map_t map
);
3395 int proc_shadow_max(void);
3396 int proc_shadow_max(void)
3405 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
3406 if (p
->p_stat
== SIDL
)
3412 map
= get_task_map(task
);
3416 retval
= vm_map_shadow_max(map
);
3424 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3426 void proc_set_responsible_pid(proc_t target_proc
, pid_t responsible_pid
);
3427 void proc_set_responsible_pid(proc_t target_proc
, pid_t responsible_pid
)
3429 if (target_proc
!= NULL
) {
3430 target_proc
->p_responsible_pid
= responsible_pid
;
3436 proc_chrooted(proc_t p
)
3442 retval
= (p
->p_fd
->fd_rdir
!= NULL
) ? 1 : 0;
3450 proc_send_synchronous_EXC_RESOURCE(proc_t p
)
3455 /* Send sync EXC_RESOURCE if the process is traced */
3456 if (ISSET(p
->p_lflag
, P_LTRACED
)) {
3462 #ifdef CONFIG_32BIT_TELEMETRY
3464 proc_log_32bit_telemetry(proc_t p
)
3467 char signature_buf
[MAX_32BIT_EXEC_SIG_SIZE
] = { 0 };
3468 char * signature_cur_end
= &signature_buf
[0];
3469 char * signature_buf_end
= &signature_buf
[MAX_32BIT_EXEC_SIG_SIZE
- 1];
3470 int bytes_printed
= 0;
3472 const char * teamid
= NULL
;
3473 const char * identity
= NULL
;
3474 struct cs_blob
* csblob
= NULL
;
3479 * Get proc name and parent proc name; if the parent execs, we'll get a
3482 bytes_printed
= snprintf(signature_cur_end
,
3483 signature_buf_end
- signature_cur_end
,
3484 "%s,%s,", p
->p_name
,
3485 (p
->p_pptr
? p
->p_pptr
->p_name
: ""));
3487 if (bytes_printed
> 0) {
3488 signature_cur_end
+= bytes_printed
;
3493 /* Get developer info. */
3494 vnode_t v
= proc_getexecutablevnode(p
);
3497 csblob
= csvnode_get_blob(v
, 0);
3500 teamid
= csblob_get_teamid(csblob
);
3501 identity
= csblob_get_identity(csblob
);
3505 if (teamid
== NULL
) {
3509 if (identity
== NULL
) {
3513 bytes_printed
= snprintf(signature_cur_end
,
3514 signature_buf_end
- signature_cur_end
,
3515 "%s,%s", teamid
, identity
);
3517 if (bytes_printed
> 0) {
3518 signature_cur_end
+= bytes_printed
;
3526 * We may want to rate limit here, although the SUMMARIZE key should
3527 * help us aggregate events in userspace.
3531 kern_asl_msg(LOG_DEBUG
, "messagetracer", 3,
3532 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
3533 /* 1 */ "com.apple.message.signature", signature_buf
,
3534 /* 2 */ "com.apple.message.summarize", "YES",
3537 #endif /* CONFIG_32BIT_TELEMETRY */