2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
85 #include <sys/file_internal.h>
87 #include <sys/malloc.h>
90 #include <sys/ioctl.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/assert.h>
103 #include <vm/vm_protos.h>
104 #include <vm/vm_map.h> /* vm_map_switch_protect() */
105 #include <mach/task.h>
106 #include <mach/message.h>
109 #include <security/mac_framework.h>
112 #include <libkern/crypto/sha1.h>
115 * Structure associated with user cacheing.
118 LIST_ENTRY(uidinfo
) ui_hash
;
122 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
123 LIST_HEAD(uihashhead
, uidinfo
) *uihashtbl
;
124 u_long uihash
; /* size of hash table - 1 */
127 * Other process lists
129 struct pidhashhead
*pidhashtbl
;
131 struct pgrphashhead
*pgrphashtbl
;
133 struct sesshashhead
*sesshashtbl
;
136 struct proclist allproc
;
137 struct proclist zombproc
;
138 extern struct tty cons
;
144 static pid_t lastlcid
= 1;
145 static int alllctx_cnt
;
147 #define LCID_MAX 8192 /* Does this really need to be large? */
148 static int maxlcid
= LCID_MAX
;
150 LIST_HEAD(lctxlist
, lctx
);
151 static struct lctxlist alllctx
;
153 lck_mtx_t alllctx_lock
;
154 lck_grp_t
* lctx_lck_grp
;
155 lck_grp_attr_t
* lctx_lck_grp_attr
;
156 lck_attr_t
* lctx_lck_attr
;
158 static void lctxinit(void);
161 int cs_debug
; /* declared further down in this file */
164 #define __PROC_INTERNAL_DEBUG 1
166 /* Name to give to core files */
167 __private_extern__
char corefilename
[MAXPATHLEN
+1] = {"/cores/core.%P"};
169 static void orphanpg(struct pgrp
*pg
);
170 void proc_name_kdp(task_t t
, char * buf
, int size
);
171 char *proc_name_address(void *p
);
173 static void pgrp_add(struct pgrp
* pgrp
, proc_t parent
, proc_t child
);
174 static void pgrp_remove(proc_t p
);
175 static void pgrp_replace(proc_t p
, struct pgrp
*pgrp
);
176 static void pgdelete_dropref(struct pgrp
*pgrp
);
177 extern void pg_rele_dropref(struct pgrp
* pgrp
);
178 static int csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaddittoken
);
180 struct fixjob_iterargs
{
182 struct session
* mysession
;
186 int fixjob_callback(proc_t
, void *);
189 * Initialize global process hashing structures.
195 LIST_INIT(&zombproc
);
196 pidhashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pidhash
);
197 pgrphashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pgrphash
);
198 sesshashtbl
= hashinit(maxproc
/ 4, M_PROC
, &sesshash
);
199 uihashtbl
= hashinit(maxproc
/ 16, M_PROC
, &uihash
);
206 * Change the count associated with number of processes
207 * a given user is using. This routine protects the uihash
211 chgproccnt(uid_t uid
, int diff
)
214 struct uidinfo
*newuip
= NULL
;
215 struct uihashhead
*uipp
;
221 for (uip
= uipp
->lh_first
; uip
!= 0; uip
= uip
->ui_hash
.le_next
)
222 if (uip
->ui_uid
== uid
)
225 uip
->ui_proccnt
+= diff
;
226 if (uip
->ui_proccnt
> 0) {
227 retval
= uip
->ui_proccnt
;
231 if (uip
->ui_proccnt
< 0)
232 panic("chgproccnt: procs < 0");
233 LIST_REMOVE(uip
, ui_hash
);
236 FREE_ZONE(uip
, sizeof(*uip
), M_PROC
);
245 panic("chgproccnt: lost user");
247 if (newuip
!= NULL
) {
250 LIST_INSERT_HEAD(uipp
, uip
, ui_hash
);
252 uip
->ui_proccnt
= diff
;
258 MALLOC_ZONE(newuip
, struct uidinfo
*, sizeof(*uip
), M_PROC
, M_WAITOK
);
260 panic("chgproccnt: M_PROC zone depleted");
264 FREE_ZONE(newuip
, sizeof(*uip
), M_PROC
);
269 * Is p an inferior of the current process?
277 for (; p
!= current_proc(); p
= p
->p_pptr
)
287 * Is p an inferior of t ?
290 isinferior(proc_t p
, proc_t t
)
296 /* if p==t they are not inferior */
301 for (; p
!= t
; p
= p
->p_pptr
) {
304 /* Detect here if we're in a cycle */
305 if ((p
->p_pid
== 0) || (p
->p_pptr
== start
) || (nchecked
>= nprocs
))
315 proc_isinferior(int pid1
, int pid2
)
317 proc_t p
= PROC_NULL
;
318 proc_t t
= PROC_NULL
;
321 if (((p
= proc_find(pid1
)) != (proc_t
)0 ) && ((t
= proc_find(pid2
)) != (proc_t
)0))
322 retval
= isinferior(p
, t
);
335 return(proc_findinternal(pid
, 0));
339 proc_findinternal(int pid
, int locked
)
341 proc_t p
= PROC_NULL
;
347 p
= pfind_locked(pid
);
348 if ((p
== PROC_NULL
) || (p
!= proc_ref_locked(p
)))
359 proc_findthread(thread_t thread
)
361 proc_t p
= PROC_NULL
;
365 uth
= get_bsdthread_info(thread
);
366 if (uth
&& (uth
->uu_flag
& UT_VFORK
))
369 p
= (proc_t
)(get_bsdthreadtask_info(thread
));
370 p
= proc_ref_locked(p
);
393 if (p
!= proc_ref_locked(p
))
401 proc_ref_locked(proc_t p
)
405 /* if process still in creation return failure */
406 if ((p
== PROC_NULL
) || ((p
->p_listflag
& P_LIST_INCREATE
) != 0))
408 /* do not return process marked for termination */
409 if ((p
->p_stat
!= SZOMB
) && ((p
->p_listflag
& P_LIST_EXITED
) == 0) && ((p
->p_listflag
& (P_LIST_DRAINWAIT
| P_LIST_DRAIN
| P_LIST_DEAD
)) == 0))
418 proc_rele_locked(proc_t p
)
421 if (p
->p_refcount
> 0) {
423 if ((p
->p_refcount
== 0) && ((p
->p_listflag
& P_LIST_DRAINWAIT
) == P_LIST_DRAINWAIT
)) {
424 p
->p_listflag
&= ~P_LIST_DRAINWAIT
;
425 wakeup(&p
->p_refcount
);
428 panic("proc_rele_locked -ve ref\n");
433 proc_find_zombref(int pid
)
435 proc_t p1
= PROC_NULL
;
436 proc_t p
= PROC_NULL
;
440 p
= pfind_locked(pid
);
442 /* if process still in creation return NULL */
443 if ((p
== PROC_NULL
) || ((p
->p_listflag
& P_LIST_INCREATE
) != 0)) {
448 /* if process has not started exit or is being reaped, return NULL */
449 if (((p
->p_listflag
& P_LIST_EXITED
) != 0) && ((p
->p_listflag
& P_LIST_WAITING
) == 0)) {
450 p
->p_listflag
|= P_LIST_WAITING
;
461 proc_drop_zombref(proc_t p
)
464 if ((p
->p_listflag
& P_LIST_WAITING
) == P_LIST_WAITING
) {
465 p
->p_listflag
&= ~P_LIST_WAITING
;
473 proc_refdrain(proc_t p
)
478 p
->p_listflag
|= P_LIST_DRAIN
;
479 while (p
->p_refcount
) {
480 p
->p_listflag
|= P_LIST_DRAINWAIT
;
481 msleep(&p
->p_refcount
, proc_list_mlock
, 0, "proc_refdrain", 0) ;
483 p
->p_listflag
&= ~P_LIST_DRAIN
;
484 p
->p_listflag
|= P_LIST_DEAD
;
492 proc_parentholdref(proc_t p
)
494 proc_t parent
= PROC_NULL
;
502 if ((pp
== PROC_NULL
) || (pp
->p_stat
== SZOMB
) || ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
))) {
507 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == P_LIST_CHILDDRSTART
) {
508 pp
->p_listflag
|= P_LIST_CHILDDRWAIT
;
509 msleep(&pp
->p_childrencnt
, proc_list_mlock
, 0, "proc_parent", 0);
518 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == 0) {
529 proc_parentdropref(proc_t p
, int listlocked
)
534 if (p
->p_parentref
> 0) {
536 if ((p
->p_parentref
== 0) && ((p
->p_listflag
& P_LIST_PARENTREFWAIT
) == P_LIST_PARENTREFWAIT
)) {
537 p
->p_listflag
&= ~P_LIST_PARENTREFWAIT
;
538 wakeup(&p
->p_parentref
);
541 panic("proc_parentdropref -ve ref\n");
549 proc_childdrainstart(proc_t p
)
551 #if __PROC_INTERNAL_DEBUG
552 if ((p
->p_listflag
& P_LIST_CHILDDRSTART
) == P_LIST_CHILDDRSTART
)
553 panic("proc_childdrainstart: childdrain already started\n");
555 p
->p_listflag
|= P_LIST_CHILDDRSTART
;
556 /* wait for all that hold parentrefs to drop */
557 while (p
->p_parentref
> 0) {
558 p
->p_listflag
|= P_LIST_PARENTREFWAIT
;
559 msleep(&p
->p_parentref
, proc_list_mlock
, 0, "proc_childdrainstart", 0) ;
565 proc_childdrainend(proc_t p
)
567 #if __PROC_INTERNAL_DEBUG
568 if (p
->p_childrencnt
> 0)
569 panic("exiting: children stil hanging around\n");
571 p
->p_listflag
|= P_LIST_CHILDDRAINED
;
572 if ((p
->p_listflag
& (P_LIST_CHILDLKWAIT
|P_LIST_CHILDDRWAIT
)) != 0) {
573 p
->p_listflag
&= ~(P_LIST_CHILDLKWAIT
|P_LIST_CHILDDRWAIT
);
574 wakeup(&p
->p_childrencnt
);
579 proc_checkdeadrefs(__unused proc_t p
)
581 #if __PROC_INTERNAL_DEBUG
582 if ((p
->p_listflag
& P_LIST_INHASH
) != 0)
583 panic("proc being freed and still in hash %p: %u\n", p
, p
->p_listflag
);
584 if (p
->p_childrencnt
!= 0)
585 panic("proc being freed and pending children cnt %p:%d\n", p
, p
->p_childrencnt
);
586 if (p
->p_refcount
!= 0)
587 panic("proc being freed and pending refcount %p:%d\n", p
, p
->p_refcount
);
588 if (p
->p_parentref
!= 0)
589 panic("proc being freed and pending parentrefs %p:%d\n", p
, p
->p_parentref
);
608 proc_t p
= current_proc();
615 proc_t p
= current_proc();
620 proc_parent(proc_t p
)
628 parent
= proc_ref_locked(pp
);
629 if ((parent
== PROC_NULL
) && (pp
!= PROC_NULL
) && (pp
->p_stat
!= SZOMB
) && ((pp
->p_listflag
& P_LIST_EXITED
) != 0) && ((pp
->p_listflag
& P_LIST_CHILDDRAINED
)== 0)){
630 pp
->p_listflag
|= P_LIST_CHILDLKWAIT
;
631 msleep(&pp
->p_childrencnt
, proc_list_mlock
, 0, "proc_parent", 0);
640 proc_name(int pid
, char * buf
, int size
)
644 if ((p
= proc_find(pid
)) != PROC_NULL
) {
645 strlcpy(buf
, &p
->p_comm
[0], size
);
651 proc_name_kdp(task_t t
, char * buf
, int size
)
653 proc_t p
= get_bsdtask_info(t
);
656 strlcpy(buf
, &p
->p_comm
[0], size
);
660 proc_name_address(void *p
)
662 return &((proc_t
)p
)->p_comm
[0];
666 proc_selfname(char * buf
, int size
)
670 if ((p
= current_proc())!= (proc_t
)0) {
671 strlcpy(buf
, &p
->p_comm
[0], size
);
676 proc_signal(int pid
, int signum
)
680 if ((p
= proc_find(pid
)) != PROC_NULL
) {
687 proc_issignal(int pid
, sigset_t mask
)
692 if ((p
= proc_find(pid
)) != PROC_NULL
) {
693 error
= proc_pendingsignals(p
, mask
);
701 proc_noremotehang(proc_t p
)
706 retval
= p
->p_flag
& P_NOREMOTEHANG
;
707 return(retval
? 1: 0);
712 proc_exiting(proc_t p
)
717 retval
= p
->p_lflag
& P_LEXIT
;
718 return(retval
? 1: 0);
722 proc_forcequota(proc_t p
)
727 retval
= p
->p_flag
& P_FORCEQUOTA
;
728 return(retval
? 1: 0);
738 retval
= p
->p_flag
& P_TBE
;
739 return(retval
? 1: 0);
746 kauth_cred_t my_cred
;
749 my_cred
= kauth_cred_proc_ref(p
);
750 error
= suser(my_cred
, &p
->p_acflag
);
751 kauth_cred_unref(&my_cred
);
756 proc_task(proc_t proc
)
758 return (task_t
)proc
->task
;
762 * Obtain the first thread in a process
764 * XXX This is a bad thing to do; it exists predominantly to support the
765 * XXX use of proc_t's in places that should really be using
766 * XXX thread_t's instead. This maintains historical behaviour, but really
767 * XXX needs an audit of the context (proxy vs. not) to clean up.
770 proc_thread(proc_t proc
)
772 uthread_t uth
= TAILQ_FIRST(&proc
->p_uthlist
);
775 return(uth
->uu_context
.vc_thread
);
789 thread_t th
= current_thread();
791 return((struct uthread
*)get_bsdthread_info(th
));
796 proc_is64bit(proc_t p
)
798 return(IS_64BIT_PROCESS(p
));
802 proc_pidversion(proc_t p
)
804 return(p
->p_idversion
);
808 proc_uniqueid(proc_t p
)
810 return(p
->p_uniqueid
);
814 proc_selfuniqueid(void)
816 proc_t p
= current_proc();
817 return(p
->p_uniqueid
);
821 proc_getcdhash(proc_t p
, unsigned char *cdhash
)
823 return vn_getcdhash(p
->p_textvp
, p
->p_textoff
, cdhash
);
827 proc_getexecutableuuid(proc_t p
, unsigned char *uuidbuf
, unsigned long size
)
829 if (size
>= sizeof(p
->p_uuid
)) {
830 memcpy(uuidbuf
, p
->p_uuid
, sizeof(p
->p_uuid
));
836 bsd_set_dependency_capable(task_t task
)
838 proc_t p
= get_bsdtask_info(task
);
841 OSBitOrAtomic(P_DEPENDENCY_CAPABLE
, &p
->p_flag
);
847 IS_64BIT_PROCESS(proc_t p
)
849 if (p
&& (p
->p_flag
& P_LP64
))
856 * Locate a process by number
859 pfind_locked(pid_t pid
)
869 for (p
= PIDHASH(pid
)->lh_first
; p
!= 0; p
= p
->p_hash
.le_next
) {
870 if (p
->p_pid
== pid
) {
872 for (q
= p
->p_hash
.le_next
; q
!= 0; q
= q
->p_hash
.le_next
) {
873 if ((p
!=q
) && (q
->p_pid
== pid
))
874 panic("two procs with same pid %p:%p:%d:%d\n", p
, q
, p
->p_pid
, q
->p_pid
);
884 * Locate a zombie by PID
886 __private_extern__ proc_t
894 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
)
904 * Locate a process group by number
913 pgrp
= pgfind_internal(pgid
);
914 if ((pgrp
== NULL
) || ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) != 0))
925 pgfind_internal(pid_t pgid
)
929 for (pgrp
= PGRPHASH(pgid
)->lh_first
; pgrp
!= 0; pgrp
= pgrp
->pg_hash
.le_next
)
930 if (pgrp
->pg_id
== pgid
)
936 pg_rele(struct pgrp
* pgrp
)
938 if(pgrp
== PGRP_NULL
)
940 pg_rele_dropref(pgrp
);
944 pg_rele_dropref(struct pgrp
* pgrp
)
947 if ((pgrp
->pg_refcount
== 1) && ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) == PGRP_FLAG_TERMINATE
)) {
949 pgdelete_dropref(pgrp
);
958 session_find_internal(pid_t sessid
)
960 struct session
*sess
;
962 for (sess
= SESSHASH(sessid
)->lh_first
; sess
!= 0; sess
= sess
->s_hash
.le_next
)
963 if (sess
->s_sid
== sessid
)
970 * Make a new process ready to become a useful member of society by making it
971 * visible in all the right places and initialize its own lists to empty.
973 * Parameters: parent The parent of the process to insert
974 * child The child process to insert
978 * Notes: Insert a child process into the parents process group, assign
979 * the child the parent process pointer and PPID of the parent,
980 * place it on the parents p_children list as a sibling,
981 * initialize its own child list, place it in the allproc list,
982 * insert it in the proper hash bucket, and initialize its
986 pinsertchild(proc_t parent
, proc_t child
)
990 LIST_INIT(&child
->p_children
);
991 TAILQ_INIT(&child
->p_evlist
);
992 child
->p_pptr
= parent
;
993 child
->p_ppid
= parent
->p_pid
;
995 pg
= proc_pgrp(parent
);
996 pgrp_add(pg
, parent
, child
);
1000 parent
->p_childrencnt
++;
1001 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
1003 LIST_INSERT_HEAD(&allproc
, child
, p_list
);
1004 /* mark the completion of proc creation */
1005 child
->p_listflag
&= ~P_LIST_INCREATE
;
1012 * Move p to a new or existing process group (and session)
1014 * Returns: 0 Success
1015 * ESRCH No such process
1018 enterpgrp(proc_t p
, pid_t pgid
, int mksess
)
1021 struct pgrp
*mypgrp
;
1022 struct session
* procsp
;
1024 pgrp
= pgfind(pgid
);
1025 mypgrp
= proc_pgrp(p
);
1026 procsp
= proc_session(p
);
1029 if (pgrp
!= NULL
&& mksess
) /* firewalls */
1030 panic("enterpgrp: setsid into non-empty pgrp");
1031 if (SESS_LEADER(p
, procsp
))
1032 panic("enterpgrp: session leader attempted setpgrp");
1034 if (pgrp
== PGRP_NULL
) {
1035 pid_t savepid
= p
->p_pid
;
1036 proc_t np
= PROC_NULL
;
1041 if (p
->p_pid
!= pgid
)
1042 panic("enterpgrp: new pgrp and pid != pgid");
1044 MALLOC_ZONE(pgrp
, struct pgrp
*, sizeof(struct pgrp
), M_PGRP
,
1047 panic("enterpgrp: M_PGRP zone depleted");
1048 if ((np
= proc_find(savepid
)) == NULL
|| np
!= p
) {
1049 if (np
!= PROC_NULL
)
1051 if (mypgrp
!= PGRP_NULL
)
1053 if (procsp
!= SESSION_NULL
)
1054 session_rele(procsp
);
1055 FREE_ZONE(pgrp
, sizeof(struct pgrp
), M_PGRP
);
1060 struct session
*sess
;
1065 MALLOC_ZONE(sess
, struct session
*,
1066 sizeof(struct session
), M_SESSION
, M_WAITOK
);
1068 panic("enterpgrp: M_SESSION zone depleted");
1070 sess
->s_sid
= p
->p_pid
;
1072 sess
->s_ttyvp
= NULL
;
1073 sess
->s_ttyp
= TTY_NULL
;
1075 sess
->s_listflags
= 0;
1076 sess
->s_ttypgrpid
= NO_PID
;
1077 #if CONFIG_FINE_LOCK_GROUPS
1078 lck_mtx_init(&sess
->s_mlock
, proc_mlock_grp
, proc_lck_attr
);
1080 lck_mtx_init(&sess
->s_mlock
, proc_lck_grp
, proc_lck_attr
);
1082 bcopy(procsp
->s_login
, sess
->s_login
,
1083 sizeof(sess
->s_login
));
1084 OSBitAndAtomic(~((uint32_t)P_CONTROLT
), &p
->p_flag
);
1086 LIST_INSERT_HEAD(SESSHASH(sess
->s_sid
), sess
, s_hash
);
1088 pgrp
->pg_session
= sess
;
1090 if (p
!= current_proc())
1091 panic("enterpgrp: mksession and p != curproc");
1095 pgrp
->pg_session
= procsp
;
1097 if ((pgrp
->pg_session
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1098 panic("enterpgrp: providing ref to terminating session ");
1099 pgrp
->pg_session
->s_count
++;
1103 #if CONFIG_FINE_LOCK_GROUPS
1104 lck_mtx_init(&pgrp
->pg_mlock
, proc_mlock_grp
, proc_lck_attr
);
1106 lck_mtx_init(&pgrp
->pg_mlock
, proc_lck_grp
, proc_lck_attr
);
1108 LIST_INIT(&pgrp
->pg_members
);
1109 pgrp
->pg_membercnt
= 0;
1112 pgrp
->pg_refcount
= 1;
1113 pgrp
->pg_listflags
= 0;
1114 LIST_INSERT_HEAD(PGRPHASH(pgid
), pgrp
, pg_hash
);
1116 } else if (pgrp
== mypgrp
) {
1120 if (procsp
!= SESSION_NULL
)
1121 session_rele(procsp
);
1125 if (procsp
!= SESSION_NULL
)
1126 session_rele(procsp
);
1128 * Adjust eligibility of affected pgrps to participate in job control.
1129 * Increment eligibility counts before decrementing, otherwise we
1130 * could reach 0 spuriously during the first call.
1132 fixjobc(p
, pgrp
, 1);
1133 fixjobc(p
, mypgrp
, 0);
1135 if(mypgrp
!= PGRP_NULL
)
1137 pgrp_replace(p
, pgrp
);
1144 * remove process from process group
1155 * delete a process group
1158 pgdelete_dropref(struct pgrp
*pgrp
)
1162 struct session
*sessp
;
1166 if (pgrp
->pg_membercnt
!= 0) {
1172 pgrp
->pg_refcount
--;
1173 if ((emptypgrp
== 0) || (pgrp
->pg_membercnt
!= 0)) {
1178 pgrp
->pg_listflags
|= PGRP_FLAG_TERMINATE
;
1180 if (pgrp
->pg_refcount
> 0) {
1185 pgrp
->pg_listflags
|= PGRP_FLAG_DEAD
;
1186 LIST_REMOVE(pgrp
, pg_hash
);
1190 ttyp
= SESSION_TP(pgrp
->pg_session
);
1191 if (ttyp
!= TTY_NULL
) {
1192 if (ttyp
->t_pgrp
== pgrp
) {
1194 /* Re-check after acquiring the lock */
1195 if (ttyp
->t_pgrp
== pgrp
) {
1196 ttyp
->t_pgrp
= NULL
;
1197 pgrp
->pg_session
->s_ttypgrpid
= NO_PID
;
1205 sessp
= pgrp
->pg_session
;
1206 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1207 panic("pg_deleteref: manipulating refs of already terminating session");
1208 if (--sessp
->s_count
== 0) {
1209 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1210 panic("pg_deleteref: terminating already terminated session");
1211 sessp
->s_listflags
|= S_LIST_TERM
;
1212 ttyp
= SESSION_TP(sessp
);
1213 LIST_REMOVE(sessp
, s_hash
);
1215 if (ttyp
!= TTY_NULL
) {
1217 if (ttyp
->t_session
== sessp
)
1218 ttyp
->t_session
= NULL
;
1222 sessp
->s_listflags
|= S_LIST_DEAD
;
1223 if (sessp
->s_count
!= 0)
1224 panic("pg_deleteref: freeing session in use");
1226 #if CONFIG_FINE_LOCK_GROUPS
1227 lck_mtx_destroy(&sessp
->s_mlock
, proc_mlock_grp
);
1229 lck_mtx_destroy(&sessp
->s_mlock
, proc_lck_grp
);
1231 FREE_ZONE(sessp
, sizeof(struct session
), M_SESSION
);
1234 #if CONFIG_FINE_LOCK_GROUPS
1235 lck_mtx_destroy(&pgrp
->pg_mlock
, proc_mlock_grp
);
1237 lck_mtx_destroy(&pgrp
->pg_mlock
, proc_lck_grp
);
1239 FREE_ZONE(pgrp
, sizeof(*pgrp
), M_PGRP
);
1244 * Adjust pgrp jobc counters when specified process changes process group.
1245 * We count the number of processes in each process group that "qualify"
1246 * the group for terminal job control (those with a parent in a different
1247 * process group of the same session). If that count reaches zero, the
1248 * process group becomes orphaned. Check both the specified process'
1249 * process group and that of its children.
1250 * entering == 0 => p is leaving specified group.
1251 * entering == 1 => p is entering specified group.
1254 fixjob_callback(proc_t p
, void * arg
)
1256 struct fixjob_iterargs
*fp
;
1257 struct pgrp
* pg
, *hispg
;
1258 struct session
* mysession
, *hissess
;
1261 fp
= (struct fixjob_iterargs
*)arg
;
1263 mysession
= fp
->mysession
;
1264 entering
= fp
->entering
;
1266 hispg
= proc_pgrp(p
);
1267 hissess
= proc_session(p
);
1269 if ((hispg
!= pg
) &&
1270 (hissess
== mysession
)) {
1275 } else if (--hispg
->pg_jobc
== 0) {
1281 if (hissess
!= SESSION_NULL
)
1282 session_rele(hissess
);
1283 if (hispg
!= PGRP_NULL
)
1286 return(PROC_RETURNED
);
1290 fixjobc(proc_t p
, struct pgrp
*pgrp
, int entering
)
1292 struct pgrp
*hispgrp
= PGRP_NULL
;
1293 struct session
*hissess
= SESSION_NULL
;
1294 struct session
*mysession
= pgrp
->pg_session
;
1296 struct fixjob_iterargs fjarg
;
1298 parent
= proc_parent(p
);
1299 if (parent
!= PROC_NULL
) {
1300 hispgrp
= proc_pgrp(parent
);
1301 hissess
= proc_session(parent
);
1307 * Check p's parent to see whether p qualifies its own process
1308 * group; if so, adjust count for p's process group.
1310 if ((hispgrp
!= pgrp
) &&
1311 (hissess
== mysession
)) {
1316 }else if (--pgrp
->pg_jobc
== 0) {
1323 if (hissess
!= SESSION_NULL
)
1324 session_rele(hissess
);
1325 if (hispgrp
!= PGRP_NULL
)
1329 * Check this process' children to see whether they qualify
1330 * their process groups; if so, adjust counts for children's
1334 fjarg
.mysession
= mysession
;
1335 fjarg
.entering
= entering
;
1336 proc_childrenwalk(p
, fixjob_callback
, &fjarg
);
1340 * A process group has become orphaned;
1341 * if there are any stopped processes in the group,
1342 * hang-up all process in that group.
1345 orphanpg(struct pgrp
* pgrp
)
1349 int count
, pidcount
, i
, alloc_count
;
1351 if (pgrp
== PGRP_NULL
)
1355 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0; p
= p
->p_pglist
.le_next
) {
1356 if (p
->p_stat
== SSTOP
) {
1357 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0;
1358 p
= p
->p_pglist
.le_next
)
1360 break; /* ??? stops after finding one.. */
1366 if (count
> hard_maxproc
)
1367 count
= hard_maxproc
;
1368 alloc_count
= count
* sizeof(pid_t
);
1369 pid_list
= (pid_t
*)kalloc(alloc_count
);
1370 bzero(pid_list
, alloc_count
);
1374 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0;
1375 p
= p
->p_pglist
.le_next
) {
1376 if (p
->p_stat
== SSTOP
) {
1377 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0;
1378 p
= p
->p_pglist
.le_next
) {
1379 pid_list
[pidcount
] = p
->p_pid
;
1381 if (pidcount
>= count
)
1384 break; /* ??? stops after finding one.. */
1393 for (i
= 0; i
< pidcount
; i
++) {
1394 /* No handling or proc0 */
1395 if (pid_list
[i
] == 0)
1397 p
= proc_find(pid_list
[i
]);
1399 proc_transwait(p
, 0);
1402 psignal(p
, SIGCONT
);
1407 kfree(pid_list
, alloc_count
);
1413 /* XXX should be __private_extern__ */
1415 proc_is_classic(proc_t p
)
1417 return (p
->p_flag
& P_TRANSLATED
) ? 1 : 0;
1420 /* XXX Why does this function exist? Need to kill it off... */
1422 current_proc_EXTERNAL(void)
1424 return (current_proc());
1428 * proc_core_name(name, uid, pid)
1429 * Expand the name described in corefilename, using name, uid, and pid.
1430 * corefilename is a printf-like string, with three format specifiers:
1431 * %N name of process ("name")
1432 * %P process id (pid)
1434 * For example, "%N.core" is the default; they can be disabled completely
1435 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1436 * This is controlled by the sysctl variable kern.corefile (see above).
1438 __private_extern__
int
1439 proc_core_name(const char *name
, uid_t uid
, pid_t pid
, char *cf_name
,
1442 const char *format
, *appendstr
;
1443 char id_buf
[11]; /* Buffer for pid/uid -- max 4B */
1446 if (cf_name
== NULL
)
1449 format
= corefilename
;
1450 for (i
= 0, n
= 0; n
< cf_name_len
&& format
[i
]; i
++) {
1451 switch (format
[i
]) {
1452 case '%': /* Format character */
1454 switch (format
[i
]) {
1458 case 'N': /* process name */
1461 case 'P': /* process id */
1462 snprintf(id_buf
, sizeof(id_buf
), "%u", pid
);
1465 case 'U': /* user id */
1466 snprintf(id_buf
, sizeof(id_buf
), "%u", uid
);
1472 "Unknown format character %c in `%s'\n",
1475 l
= strlen(appendstr
);
1476 if ((n
+ l
) >= cf_name_len
)
1478 bcopy(appendstr
, cf_name
+ n
, l
);
1482 cf_name
[n
++] = format
[i
];
1485 if (format
[i
] != '\0')
1489 log(LOG_ERR
, "pid %ld (%s), uid (%u): corename is too long\n",
1490 (long)pid
, name
, (uint32_t)uid
);
1499 LIST_INIT(&alllctx
);
1502 /* allocate lctx lock group attribute and group */
1503 lctx_lck_grp_attr
= lck_grp_attr_alloc_init();
1504 lck_grp_attr_setstat(lctx_lck_grp_attr
);
1506 lctx_lck_grp
= lck_grp_alloc_init("lctx", lctx_lck_grp_attr
);
1507 /* Allocate lctx lock attribute */
1508 lctx_lck_attr
= lck_attr_alloc_init();
1510 lck_mtx_init(&alllctx_lock
, lctx_lck_grp
, lctx_lck_attr
);
1514 * Locate login context by number.
1522 LIST_FOREACH(l
, &alllctx
, lc_list
) {
1523 if (l
->lc_id
== lcid
) {
1535 if (lastlcid > maxlcid) \
1545 /* Not very efficient but this isn't a common operation. */
1546 while ((l
= lcfind(lastlcid
)) != NULL
) {
1553 MALLOC(l
, struct lctx
*, sizeof(struct lctx
), M_LCTX
, M_WAITOK
|M_ZERO
);
1555 LIST_INIT(&l
->lc_members
);
1556 lck_mtx_init(&l
->lc_mtx
, lctx_lck_grp
, lctx_lck_attr
);
1558 l
->lc_label
= mac_lctx_label_alloc();
1561 LIST_INSERT_HEAD(&alllctx
, l
, lc_list
);
1569 * Call with proc protected (either by being invisible
1570 * or by having the all-login-context lock held) and
1573 * Will unlock lctx on return.
1576 enterlctx (proc_t p
, struct lctx
*l
, __unused
int create
)
1582 LIST_INSERT_HEAD(&l
->lc_members
, p
, p_lclist
);
1587 mac_lctx_notify_create(p
, l
);
1589 mac_lctx_notify_join(p
, l
);
1597 * Remove process from login context (if any). Called with p protected by
1601 leavelctx (proc_t p
)
1605 if (p
->p_lctx
== NULL
) {
1609 LCTX_LOCK(p
->p_lctx
);
1612 LIST_REMOVE(p
, p_lclist
);
1615 mac_lctx_notify_leave(p
, l
);
1617 if (LIST_EMPTY(&l
->lc_members
)) {
1618 LIST_REMOVE(l
, lc_list
);
1621 lck_mtx_destroy(&l
->lc_mtx
, lctx_lck_grp
);
1623 mac_lctx_label_free(l
->lc_label
);
1634 sysctl_kern_lctx SYSCTL_HANDLER_ARGS
1636 int *name
= (int*) arg1
;
1637 u_int namelen
= arg2
;
1638 struct kinfo_lctx kil
;
1644 switch (oidp
->oid_number
) {
1647 /* Request for size. */
1649 error
= SYSCTL_OUT(req
, 0,
1650 sizeof(struct kinfo_lctx
) * (alllctx_cnt
+ 1));
1655 case KERN_LCTX_LCID
:
1657 if (req
->oldlen
< sizeof(struct kinfo_lctx
))
1662 /* No login context */
1663 l
= lcfind((pid_t
)name
[0]);
1669 return (SYSCTL_OUT(req
, (caddr_t
)&kil
, sizeof(kil
)));
1675 /* Provided buffer is too small. */
1676 if (req
->oldlen
< (sizeof(struct kinfo_lctx
) * alllctx_cnt
)) {
1681 LIST_FOREACH(l
, &alllctx
, lc_list
) {
1686 error
= SYSCTL_OUT(req
, (caddr_t
)&kil
, sizeof(kil
));
1696 SYSCTL_NODE(_kern
, KERN_LCTX
, lctx
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "Login Context");
1698 SYSCTL_PROC(_kern_lctx
, KERN_LCTX_ALL
, all
, CTLFLAG_RD
|CTLTYPE_STRUCT
| CTLFLAG_LOCKED
,
1699 0, 0, sysctl_kern_lctx
, "S,lctx",
1700 "Return entire login context table");
1701 SYSCTL_NODE(_kern_lctx
, KERN_LCTX_LCID
, lcid
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1702 sysctl_kern_lctx
, "Login Context Table");
1703 SYSCTL_INT(_kern_lctx
, OID_AUTO
, last
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &lastlcid
, 0, "");
1704 SYSCTL_INT(_kern_lctx
, OID_AUTO
, count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &alllctx_cnt
, 0, "");
1705 SYSCTL_INT(_kern_lctx
, OID_AUTO
, max
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &maxlcid
, 0, "");
1709 /* Code Signing related routines */
1712 csops(__unused proc_t p
, struct csops_args
*uap
, __unused
int32_t *retval
)
1714 return(csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
1715 uap
->usersize
, USER_ADDR_NULL
));
1719 csops_audittoken(__unused proc_t p
, struct csops_audittoken_args
*uap
, __unused
int32_t *retval
)
1721 if (uap
->uaudittoken
== USER_ADDR_NULL
)
1724 case CS_OPS_PIDPATH
:
1725 case CS_OPS_ENTITLEMENTS_BLOB
:
1731 return(csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
1732 uap
->usersize
, uap
->uaudittoken
));
1736 csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaudittoken
)
1738 size_t usize
= (size_t)CAST_DOWN(size_t, usersize
);
1746 unsigned char cdhash
[SHA1_RESULTLEN
];
1747 audit_token_t token
;
1748 unsigned int upid
=0, uidversion
= 0;
1750 forself
= error
= 0;
1753 pid
= proc_selfpid();
1754 if (pid
== proc_selfpid())
1758 /* Pre flight checks for CS_OPS_PIDPATH */
1759 if (ops
== CS_OPS_PIDPATH
) {
1760 /* usize is unsigned.. */
1761 if (usize
> 4 * PATH_MAX
)
1763 if (kauth_cred_issuser(kauth_cred_get()) != TRUE
)
1769 case CS_OPS_PIDOFFSET
:
1770 case CS_OPS_ENTITLEMENTS_BLOB
:
1771 break; /* unrestricted */
1773 if (forself
== 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE
)
1779 pt
= proc_find(pid
);
1780 if (pt
== PROC_NULL
)
1784 uidversion
= pt
->p_idversion
;
1785 if (uaudittoken
!= USER_ADDR_NULL
) {
1787 error
= copyin(uaudittoken
, &token
, sizeof(audit_token_t
));
1790 /* verify the audit token pid/idversion matches with proc */
1791 if ((token
.val
[5] != upid
) || (token
.val
[7] != uidversion
)) {
1800 retflags
= pt
->p_csflags
;
1801 if (uaddr
!= USER_ADDR_NULL
)
1802 error
= copyout(&retflags
, uaddr
, sizeof(uint32_t));
1805 case CS_OPS_MARKINVALID
:
1807 if ((pt
->p_csflags
& CS_VALID
) == CS_VALID
) { /* is currently valid */
1808 pt
->p_csflags
&= ~CS_VALID
; /* set invalid */
1809 if ((pt
->p_csflags
& CS_KILL
) == CS_KILL
) {
1810 pt
->p_csflags
|= CS_KILLED
;
1813 printf("CODE SIGNING: marked invalid by pid %d: "
1814 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1815 proc_selfpid(), pt
->p_pid
, pt
->p_comm
, pt
->p_csflags
);
1817 psignal(pt
, SIGKILL
);
1825 case CS_OPS_MARKHARD
:
1827 pt
->p_csflags
|= CS_HARD
;
1828 if ((pt
->p_csflags
& CS_VALID
) == 0) {
1829 /* @@@ allow? reject? kill? @@@ */
1837 case CS_OPS_MARKKILL
:
1839 pt
->p_csflags
|= CS_KILL
;
1840 if ((pt
->p_csflags
& CS_VALID
) == 0) {
1842 psignal(pt
, SIGKILL
);
1847 case CS_OPS_PIDPATH
:
1849 vid
= vnode_vid(tvp
);
1851 if (tvp
== NULLVP
) {
1856 buf
= (char *)kalloc(usize
);
1863 error
= vnode_getwithvid(tvp
, vid
);
1867 error
= vn_getpath(tvp
, buf
, &len
);
1870 error
= copyout(buf
, uaddr
, usize
);
1879 case CS_OPS_PIDOFFSET
:
1880 toff
= pt
->p_textoff
;
1882 error
= copyout(&toff
, uaddr
, sizeof(toff
));
1887 /* pt already holds a reference on its p_textvp */
1889 toff
= pt
->p_textoff
;
1891 if (tvp
== NULLVP
|| usize
!= SHA1_RESULTLEN
) {
1896 error
= vn_getcdhash(tvp
, toff
, cdhash
);
1900 error
= copyout(cdhash
, uaddr
, sizeof (cdhash
));
1905 case CS_OPS_ENTITLEMENTS_BLOB
: {
1906 char fakeheader
[8] = { 0 };
1910 if ((pt
->p_csflags
& CS_VALID
) == 0) {
1914 if (usize
< sizeof(fakeheader
)) {
1918 if (0 != (error
= cs_entitlements_blob_get(pt
,
1921 /* if no entitlement, fill in zero header */
1922 if (NULL
== start
) {
1924 length
= sizeof(fakeheader
);
1925 } else if (usize
< length
) {
1926 /* ... if input too short, copy out length of entitlement */
1927 uint32_t length32
= htonl((uint32_t)length
);
1928 memcpy(&fakeheader
[4], &length32
, sizeof(length32
));
1930 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
1932 error
= ERANGE
; /* input buffer to short, ERANGE signals that */
1935 error
= copyout(start
, uaddr
, length
);
1939 case CS_OPS_MARKRESTRICT
:
1941 pt
->p_csflags
|= CS_RESTRICT
;
1955 proc_iterate(flags
, callout
, arg
, filterfn
, filterarg
)
1957 int (*callout
)(proc_t
, void *);
1959 int (*filterfn
)(proc_t
, void *);
1964 int count
, pidcount
, alloc_count
, i
, retval
;
1967 if (count
> hard_maxproc
)
1968 count
= hard_maxproc
;
1969 alloc_count
= count
* sizeof(pid_t
);
1970 pid_list
= (pid_t
*)kalloc(alloc_count
);
1971 bzero(pid_list
, alloc_count
);
1978 if (flags
& PROC_ALLPROCLIST
) {
1979 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
1980 if (p
->p_stat
== SIDL
)
1982 if ( (filterfn
== 0 ) || (filterfn(p
, filterarg
) != 0)) {
1983 pid_list
[pidcount
] = p
->p_pid
;
1985 if (pidcount
>= count
)
1990 if ((pidcount
< count
) && (flags
& PROC_ZOMBPROCLIST
)) {
1991 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
) {
1992 if ( (filterfn
== 0 ) || (filterfn(p
, filterarg
) != 0)) {
1993 pid_list
[pidcount
] = p
->p_pid
;
1995 if (pidcount
>= count
)
2005 for (i
= 0; i
< pidcount
; i
++) {
2006 p
= proc_find(pid_list
[i
]);
2008 if ((flags
& PROC_NOWAITTRANS
) == 0)
2009 proc_transwait(p
, 0);
2010 retval
= callout(p
, arg
);
2014 case PROC_RETURNED_DONE
:
2016 if (retval
== PROC_RETURNED_DONE
) {
2021 case PROC_CLAIMED_DONE
:
2027 } else if (flags
& PROC_ZOMBPROCLIST
) {
2028 p
= proc_find_zombref(pid_list
[i
]);
2029 if (p
!= PROC_NULL
) {
2030 retval
= callout(p
, arg
);
2034 case PROC_RETURNED_DONE
:
2035 proc_drop_zombref(p
);
2036 if (retval
== PROC_RETURNED_DONE
) {
2041 case PROC_CLAIMED_DONE
:
2052 kfree(pid_list
, alloc_count
);
2059 /* This is for iteration in case of trivial non blocking callouts */
2061 proc_scanall(flags
, callout
, arg
)
2063 int (*callout
)(proc_t
, void *);
2073 if (flags
& PROC_ALLPROCLIST
) {
2074 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
2075 retval
= callout(p
, arg
);
2076 if (retval
== PROC_RETURNED_DONE
)
2080 if (flags
& PROC_ZOMBPROCLIST
) {
2081 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
) {
2082 retval
= callout(p
, arg
);
2083 if (retval
== PROC_RETURNED_DONE
)
2097 proc_rebootscan(callout
, arg
, filterfn
, filterarg
)
2098 int (*callout
)(proc_t
, void *);
2100 int (*filterfn
)(proc_t
, void *);
2104 int lockheld
= 0, retval
;
2106 proc_shutdown_exitcount
= 0;
2114 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
2115 if ( (filterfn
== 0 ) || (filterfn(p
, filterarg
) != 0)) {
2116 p
= proc_ref_locked(p
);
2122 proc_transwait(p
, 0);
2123 retval
= callout(p
, arg
);
2127 case PROC_RETURNED_DONE
:
2128 case PROC_CLAIMED_DONE
:
2132 goto ps_allprocscan
;
2134 } /* allproc walk thru */
2136 if (lockheld
== 1) {
2148 proc_childrenwalk(parent
, callout
, arg
)
2149 struct proc
* parent
;
2150 int (*callout
)(proc_t
, void *);
2153 register struct proc
*p
;
2155 int count
, pidcount
, alloc_count
, i
, retval
;
2158 if (count
> hard_maxproc
)
2159 count
= hard_maxproc
;
2160 alloc_count
= count
* sizeof(pid_t
);
2161 pid_list
= (pid_t
*)kalloc(alloc_count
);
2162 bzero(pid_list
, alloc_count
);
2169 for (p
= parent
->p_children
.lh_first
; (p
!= 0); p
= p
->p_sibling
.le_next
) {
2170 if (p
->p_stat
== SIDL
)
2172 pid_list
[pidcount
] = p
->p_pid
;
2174 if (pidcount
>= count
)
2180 for (i
= 0; i
< pidcount
; i
++) {
2181 p
= proc_find(pid_list
[i
]);
2183 proc_transwait(p
, 0);
2184 retval
= callout(p
, arg
);
2188 case PROC_RETURNED_DONE
:
2190 if (retval
== PROC_RETURNED_DONE
) {
2195 case PROC_CLAIMED_DONE
:
2205 kfree(pid_list
, alloc_count
);
2212 /* PGRP_BLOCKITERATE is not implemented yet */
2214 pgrp_iterate(pgrp
, flags
, callout
, arg
, filterfn
, filterarg
)
2217 int (*callout
)(proc_t
, void *);
2219 int (*filterfn
)(proc_t
, void *);
2224 int count
, pidcount
, i
, alloc_count
;
2227 int dropref
= flags
& PGRP_DROPREF
;
2229 int serialize
= flags
& PGRP_BLOCKITERATE
;
2236 count
= pgrp
->pg_membercnt
+ 10;
2237 if (count
> hard_maxproc
)
2238 count
= hard_maxproc
;
2239 alloc_count
= count
* sizeof(pid_t
);
2240 pid_list
= (pid_t
*)kalloc(alloc_count
);
2241 bzero(pid_list
, alloc_count
);
2244 if (serialize
!= 0) {
2245 while ((pgrp
->pg_listflags
& PGRP_FLAG_ITERABEGIN
) == PGRP_FLAG_ITERABEGIN
) {
2246 pgrp
->pg_listflags
|= PGRP_FLAG_ITERWAIT
;
2247 msleep(&pgrp
->pg_listflags
, &pgrp
->pg_mlock
, 0, "pgrp_iterate", 0);
2249 pgrp
->pg_listflags
|= PGRP_FLAG_ITERABEGIN
;
2255 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0;
2256 p
= p
->p_pglist
.le_next
) {
2257 if ( (filterfn
== 0 ) || (filterfn(p
, filterarg
) != 0)) {
2258 pid_list
[pidcount
] = p
->p_pid
;
2260 if (pidcount
>= count
)
2267 if ((serialize
== 0) && (dropref
!= 0))
2271 for (i
= 0; i
< pidcount
; i
++) {
2272 /* No handling or proc0 */
2273 if (pid_list
[i
] == 0)
2275 p
= proc_find(pid_list
[i
]);
2277 if (p
->p_pgrpid
!= pgid
) {
2281 proc_transwait(p
, 0);
2282 retval
= callout(p
, arg
);
2286 case PROC_RETURNED_DONE
:
2288 if (retval
== PROC_RETURNED_DONE
) {
2293 case PROC_CLAIMED_DONE
:
2302 if (serialize
!= 0) {
2304 pgrp
->pg_listflags
&= ~PGRP_FLAG_ITERABEGIN
;
2305 if ((pgrp
->pg_listflags
& PGRP_FLAG_ITERWAIT
) == PGRP_FLAG_ITERWAIT
) {
2306 pgrp
->pg_listflags
&= ~PGRP_FLAG_ITERWAIT
;
2307 wakeup(&pgrp
->pg_listflags
);
2313 kfree(pid_list
, alloc_count
);
2318 pgrp_add(struct pgrp
* pgrp
, struct proc
* parent
, struct proc
* child
)
2321 child
->p_pgrp
= pgrp
;
2322 child
->p_pgrpid
= pgrp
->pg_id
;
2323 child
->p_listflag
|= P_LIST_INPGRP
;
2325 * When pgrp is being freed , a process can still
2326 * request addition using setpgid from bash when
2327 * login is terminated (login cycler) return ESRCH
2328 * Safe to hold lock due to refcount on pgrp
2330 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
2331 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2334 if ((pgrp
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
)
2335 panic("pgrp_add : pgrp is dead adding process");
2339 pgrp
->pg_membercnt
++;
2340 if ( parent
!= PROC_NULL
) {
2341 LIST_INSERT_AFTER(parent
, child
, p_pglist
);
2343 LIST_INSERT_HEAD(&pgrp
->pg_members
, child
, p_pglist
);
2348 if (((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (pgrp
->pg_membercnt
!= 0)) {
2349 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2355 pgrp_remove(struct proc
* p
)
2362 #if __PROC_INTERNAL_DEBUG
2363 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0)
2364 panic("removing from pglist but no named ref\n");
2366 p
->p_pgrpid
= PGRPID_DEAD
;
2367 p
->p_listflag
&= ~P_LIST_INPGRP
;
2371 if (pg
== PGRP_NULL
)
2372 panic("pgrp_remove: pg is NULL");
2376 if (pg
->pg_membercnt
< 0)
2377 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg
, p
);
2379 LIST_REMOVE(p
, p_pglist
);
2380 if (pg
->pg_members
.lh_first
== 0) {
2382 pgdelete_dropref(pg
);
2390 /* cannot use proc_pgrp as it maybe stalled */
2392 pgrp_replace(struct proc
* p
, struct pgrp
* newpg
)
2394 struct pgrp
* oldpg
;
2400 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2401 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2402 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2405 p
->p_listflag
|= P_LIST_PGRPTRANS
;
2408 if (oldpg
== PGRP_NULL
)
2409 panic("pgrp_replace: oldpg NULL");
2410 oldpg
->pg_refcount
++;
2411 #if __PROC_INTERNAL_DEBUG
2412 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0)
2413 panic("removing from pglist but no named ref\n");
2415 p
->p_pgrpid
= PGRPID_DEAD
;
2416 p
->p_listflag
&= ~P_LIST_INPGRP
;
2422 oldpg
->pg_membercnt
--;
2423 if (oldpg
->pg_membercnt
< 0)
2424 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg
, p
);
2425 LIST_REMOVE(p
, p_pglist
);
2426 if (oldpg
->pg_members
.lh_first
== 0) {
2428 pgdelete_dropref(oldpg
);
2436 p
->p_pgrpid
= newpg
->pg_id
;
2437 p
->p_listflag
|= P_LIST_INPGRP
;
2439 * When pgrp is being freed , a process can still
2440 * request addition using setpgid from bash when
2441 * login is terminated (login cycler) return ESRCH
2442 * Safe to hold lock due to refcount on pgrp
2444 if ((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
2445 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2448 if ((newpg
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
)
2449 panic("pgrp_add : pgrp is dead adding process");
2453 newpg
->pg_membercnt
++;
2454 LIST_INSERT_HEAD(&newpg
->pg_members
, p
, p_pglist
);
2458 if (((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (newpg
->pg_membercnt
!= 0)) {
2459 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2462 p
->p_listflag
&= ~P_LIST_PGRPTRANS
;
2463 if ((p
->p_listflag
& P_LIST_PGRPTRWAIT
) == P_LIST_PGRPTRWAIT
) {
2464 p
->p_listflag
&= ~P_LIST_PGRPTRWAIT
;
2465 wakeup(&p
->p_pgrpid
);
2472 pgrp_lock(struct pgrp
* pgrp
)
2474 lck_mtx_lock(&pgrp
->pg_mlock
);
2478 pgrp_unlock(struct pgrp
* pgrp
)
2480 lck_mtx_unlock(&pgrp
->pg_mlock
);
2484 session_lock(struct session
* sess
)
2486 lck_mtx_lock(&sess
->s_mlock
);
2491 session_unlock(struct session
* sess
)
2493 lck_mtx_unlock(&sess
->s_mlock
);
2505 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2506 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2507 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2512 assert(pgrp
!= NULL
);
2514 if (pgrp
!= PGRP_NULL
) {
2515 pgrp
->pg_refcount
++;
2516 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) != 0)
2517 panic("proc_pgrp: ref being povided for dead pgrp");
2526 tty_pgrp(struct tty
* tp
)
2528 struct pgrp
* pg
= PGRP_NULL
;
2533 if (pg
!= PGRP_NULL
) {
2534 if ((pg
->pg_listflags
& PGRP_FLAG_DEAD
) != 0)
2535 panic("tty_pgrp: ref being povided for dead pgrp");
2544 proc_session(proc_t p
)
2546 struct session
* sess
= SESSION_NULL
;
2549 return(SESSION_NULL
);
2553 /* wait during transitions */
2554 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2555 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2556 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2559 if ((p
->p_pgrp
!= PGRP_NULL
) && ((sess
= p
->p_pgrp
->pg_session
) != SESSION_NULL
)) {
2560 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
2561 panic("proc_session:returning sesssion ref on terminating session");
2569 session_rele(struct session
*sess
)
2572 if (--sess
->s_count
== 0) {
2573 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
2574 panic("session_rele: terminating already terminated session");
2575 sess
->s_listflags
|= S_LIST_TERM
;
2576 LIST_REMOVE(sess
, s_hash
);
2577 sess
->s_listflags
|= S_LIST_DEAD
;
2578 if (sess
->s_count
!= 0)
2579 panic("session_rele: freeing session in use");
2581 #if CONFIG_FINE_LOCK_GROUPS
2582 lck_mtx_destroy(&sess
->s_mlock
, proc_mlock_grp
);
2584 lck_mtx_destroy(&sess
->s_mlock
, proc_lck_grp
);
2586 FREE_ZONE(sess
, sizeof(struct session
), M_SESSION
);
2592 proc_transstart(proc_t p
, int locked
)
2596 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
2597 if ((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
) {
2602 p
->p_lflag
|= P_LTRANSWAIT
;
2603 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
2605 p
->p_lflag
|= P_LINTRANSIT
;
2606 p
->p_transholder
= current_thread();
2613 proc_transcommit(proc_t p
, int locked
)
2618 assert ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
);
2619 assert (p
->p_transholder
== current_thread());
2620 p
->p_lflag
|= P_LTRANSCOMMIT
;
2622 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
2623 p
->p_lflag
&= ~P_LTRANSWAIT
;
2624 wakeup(&p
->p_lflag
);
2631 proc_transend(proc_t p
, int locked
)
2636 p
->p_lflag
&= ~( P_LINTRANSIT
| P_LTRANSCOMMIT
);
2637 p
->p_transholder
= NULL
;
2639 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
2640 p
->p_lflag
&= ~P_LTRANSWAIT
;
2641 wakeup(&p
->p_lflag
);
2648 proc_transwait(proc_t p
, int locked
)
2652 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
2653 if ((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
&& current_proc() == p
) {
2658 p
->p_lflag
|= P_LTRANSWAIT
;
2659 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
2667 proc_klist_lock(void)
2669 lck_mtx_lock(proc_klist_mlock
);
2673 proc_klist_unlock(void)
2675 lck_mtx_unlock(proc_klist_mlock
);
2679 proc_knote(struct proc
* p
, long hint
)
2682 KNOTE(&p
->p_klist
, hint
);
2683 proc_klist_unlock();
2687 proc_knote_drain(struct proc
*p
)
2689 struct knote
*kn
= NULL
;
2692 * Clear the proc's klist to avoid references after the proc is reaped.
2695 while ((kn
= SLIST_FIRST(&p
->p_klist
))) {
2696 kn
->kn_ptr
.p_proc
= PROC_NULL
;
2697 KNOTE_DETACH(&p
->p_klist
, kn
);
2699 proc_klist_unlock();
2702 unsigned long cs_procs_killed
= 0;
2703 unsigned long cs_procs_invalidated
= 0;
2704 int cs_force_kill
= 0;
2705 int cs_force_hard
= 0;
2707 SYSCTL_INT(_vm
, OID_AUTO
, cs_force_kill
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &cs_force_kill
, 0, "");
2708 SYSCTL_INT(_vm
, OID_AUTO
, cs_force_hard
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &cs_force_hard
, 0, "");
2709 SYSCTL_INT(_vm
, OID_AUTO
, cs_debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &cs_debug
, 0, "");
2712 cs_allow_invalid(struct proc
*p
)
2715 lck_mtx_assert(&p
->p_mlock
, LCK_MTX_ASSERT_NOTOWNED
);
2717 #if CONFIG_MACF && CONFIG_ENFORCE_SIGNED_CODE
2718 /* There needs to be a MAC policy to implement this hook, or else the
2719 * kill bits will be cleared here every time. If we have
2720 * CONFIG_ENFORCE_SIGNED_CODE, we can assume there is a policy
2721 * implementing the hook.
2723 if( 0 != mac_proc_check_run_cs_invalid(p
)) {
2724 if(cs_debug
) printf("CODE SIGNING: cs_allow_invalid() "
2725 "not allowed: pid %d\n",
2729 if(cs_debug
) printf("CODE SIGNING: cs_allow_invalid() "
2730 "allowed: pid %d\n",
2733 p
->p_csflags
&= ~(CS_KILL
| CS_HARD
| CS_VALID
);
2735 vm_map_switch_protect(get_task_map(p
->task
), FALSE
);
2737 return (p
->p_csflags
& (CS_KILL
| CS_HARD
)) == 0;
2750 * XXX revisit locking when proc is no longer protected
2751 * by the kernel funnel...
2754 /* XXX for testing */
2757 p
->p_csflags
|= CS_KILL
;
2759 p
->p_csflags
|= CS_HARD
;
2761 /* CS_KILL triggers us to send a kill signal. Nothing else. */
2762 if (p
->p_csflags
& CS_KILL
) {
2763 p
->p_csflags
|= CS_KILLED
;
2766 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2767 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2768 vaddr
, p
->p_pid
, p
->p_comm
, p
->p_csflags
);
2771 psignal(p
, SIGKILL
);
2775 /* CS_HARD means fail the mapping operation so the process stays valid. */
2776 if (p
->p_csflags
& CS_HARD
) {
2779 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2780 "p=%d[%s] honoring CS_HARD\n",
2781 vaddr
, p
->p_pid
, p
->p_comm
);
2785 if (p
->p_csflags
& CS_VALID
) {
2786 p
->p_csflags
&= ~CS_VALID
;
2789 cs_procs_invalidated
++;
2790 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2791 "p=%d[%s] clearing CS_VALID\n",
2792 vaddr
, p
->p_pid
, p
->p_comm
);
2804 proc_setregister(proc_t p
)
2807 p
->p_lflag
|= P_LREGISTER
;
2812 proc_resetregister(proc_t p
)
2815 p
->p_lflag
&= ~P_LREGISTER
;
2820 proc_pgrpid(proc_t p
)
2828 return current_proc()->p_pgrpid
;
2832 /* return control and action states */
2834 proc_getpcontrol(int pid
, int * pcontrolp
)
2841 if (pcontrolp
!= NULL
)
2842 *pcontrolp
= p
->p_pcaction
;
2849 proc_dopcontrol(proc_t p
, void *num_found
)
2855 pcontrol
= PROC_CONTROL_STATE(p
);
2857 if (PROC_ACTION_STATE(p
) ==0) {
2860 PROC_SETACTION_STATE(p
);
2862 printf("low swap: throttling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
2863 (*(int *)num_found
)++;
2867 PROC_SETACTION_STATE(p
);
2869 printf("low swap: suspending pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
2870 task_suspend(p
->task
);
2871 (*(int *)num_found
)++;
2875 PROC_SETACTION_STATE(p
);
2877 printf("low swap: killing pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
2878 psignal(p
, SIGKILL
);
2879 (*(int *)num_found
)++;
2889 return(PROC_RETURNED
);
2894 * Resume a throttled or suspended process. This is an internal interface that's only
2895 * used by the user level code that presents the GUI when we run out of swap space and
2896 * hence is restricted to processes with superuser privileges.
2900 proc_resetpcontrol(int pid
)
2905 proc_t self
= current_proc();
2907 /* if the process has been validated to handle resource control or root is valid one */
2908 if (((self
->p_lflag
& P_LVMRSRCOWNER
) == 0) && (error
= suser(kauth_cred_get(), 0)))
2917 pcontrol
= PROC_CONTROL_STATE(p
);
2919 if(PROC_ACTION_STATE(p
) !=0) {
2922 PROC_RESETACTION_STATE(p
);
2924 printf("low swap: unthrottling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
2928 PROC_RESETACTION_STATE(p
);
2930 printf("low swap: resuming pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
2931 task_resume(p
->task
);
2936 PROC_SETACTION_STATE(p
);
2938 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p
->p_pid
, p
->p_comm
);
2954 * Return true if the specified process has an action state specified for it and it isn't
2955 * already in an action state and it's using more physical memory than the specified threshold.
2956 * Note: the memory_threshold argument is specified in bytes and is of type uint64_t.
2960 proc_pcontrol_filter(proc_t p
, void *memory_thresholdp
)
2963 return PROC_CONTROL_STATE(p
) && /* if there's an action state specified... */
2964 (PROC_ACTION_STATE(p
) == 0) && /* and we're not in the action state yet... */
2965 (get_task_resident_size(p
->task
) > *((uint64_t *)memory_thresholdp
)); /* and this proc is over the mem threshold, */
2966 /* then return true to take action on this proc */
2972 * Deal with the out of swap space condition. This routine gets called when
2973 * we want to swap something out but there's no more space left. Since this
2974 * creates a memory deadlock situtation, we need to take action to free up
2975 * some memory resources in order to prevent the system from hanging completely.
2976 * The action we take is based on what the system processes running at user level
2977 * have specified. Processes are marked in one of four categories: ones that
2978 * can be killed immediately, ones that should be suspended, ones that should
2979 * be throttled, and all the rest which are basically none of the above. Which
2980 * processes are marked as being in which category is a user level policy decision;
2981 * we just take action based on those decisions here.
2984 #define STARTING_PERCENTAGE 50 /* memory threshold expressed as a percentage */
2985 /* of physical memory */
2987 struct timeval last_no_space_action
= {0, 0};
2990 no_paging_space_action(void)
2993 uint64_t memory_threshold
;
2998 * Throttle how often we come through here. Once every 20 seconds should be plenty.
3003 if (now
.tv_sec
<= last_no_space_action
.tv_sec
+ 20)
3006 last_no_space_action
= now
;
3009 * Examine all processes and find those that have been marked to have some action
3010 * taken when swap space runs out. Of those processes, select one or more and
3011 * apply the specified action to them. The idea is to only take action against
3012 * a few processes rather than hitting too many at once. If the low swap condition
3013 * persists, this routine will get called again and we'll take action against more
3016 * Of the processes that have been marked, we choose which ones to take action
3017 * against according to how much physical memory they're presently using. We
3018 * start with the STARTING_THRESHOLD and any processes using more physical memory
3019 * than the percentage threshold will have action taken against it. If there
3020 * are no processes over the threshold, then the threshold is cut in half and we
3021 * look again for processes using more than this threshold. We continue in
3022 * this fashion until we find at least one process to take action against. This
3023 * iterative approach is less than ideally efficient, however we only get here
3024 * when the system is almost in a memory deadlock and is pretty much just
3025 * thrashing if it's doing anything at all. Therefore, the cpu overhead of
3026 * potentially multiple passes here probably isn't revelant.
3029 memory_threshold
= (sane_size
* STARTING_PERCENTAGE
) / 100; /* resident threshold in bytes */
3031 for (num_found
= 0; num_found
== 0; memory_threshold
= memory_threshold
/ 2) {
3032 proc_iterate(PROC_ALLPROCLIST
, proc_dopcontrol
, (void *)&num_found
, proc_pcontrol_filter
, (void *)&memory_threshold
);
3035 * If we just looked with memory_threshold == 0, then there's no need to iterate any further since
3036 * we won't find any eligible processes at this point.
3039 if (memory_threshold
== 0) {
3040 if (num_found
== 0) /* log that we couldn't do anything in this case */
3041 printf("low swap: unable to find any eligible processes to take action on\n");