2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
85 #include <sys/file_internal.h>
87 #include <sys/malloc.h>
90 #include <sys/ioctl.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
114 #if CONFIG_MEMORYSTATUS
115 #include <sys/kern_memorystatus.h>
119 #include <security/mac_framework.h>
122 #include <libkern/crypto/sha1.h>
125 * Structure associated with user cacheing.
128 LIST_ENTRY(uidinfo
) ui_hash
;
132 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
133 LIST_HEAD(uihashhead
, uidinfo
) *uihashtbl
;
134 u_long uihash
; /* size of hash table - 1 */
137 * Other process lists
139 struct pidhashhead
*pidhashtbl
;
141 struct pgrphashhead
*pgrphashtbl
;
143 struct sesshashhead
*sesshashtbl
;
146 struct proclist allproc
;
147 struct proclist zombproc
;
148 extern struct tty cons
;
154 static pid_t lastlcid
= 1;
155 static int alllctx_cnt
;
157 #define LCID_MAX 8192 /* Does this really need to be large? */
158 static int maxlcid
= LCID_MAX
;
160 LIST_HEAD(lctxlist
, lctx
);
161 static struct lctxlist alllctx
;
163 lck_mtx_t alllctx_lock
;
164 lck_grp_t
* lctx_lck_grp
;
165 lck_grp_attr_t
* lctx_lck_grp_attr
;
166 lck_attr_t
* lctx_lck_attr
;
168 static void lctxinit(void);
174 #define __PROC_INTERNAL_DEBUG 1
176 /* Name to give to core files */
177 __XNU_PRIVATE_EXTERN
char corefilename
[MAXPATHLEN
+1] = {"/cores/core.%P"};
179 static void orphanpg(struct pgrp
*pg
);
180 void proc_name_kdp(task_t t
, char * buf
, int size
);
181 int proc_threadname_kdp(void *uth
, char *buf
, size_t size
);
182 void proc_starttime_kdp(void *p
, uint64_t *tv_sec
, uint64_t *tv_usec
);
183 char *proc_name_address(void *p
);
185 static void pgrp_add(struct pgrp
* pgrp
, proc_t parent
, proc_t child
);
186 static void pgrp_remove(proc_t p
);
187 static void pgrp_replace(proc_t p
, struct pgrp
*pgrp
);
188 static void pgdelete_dropref(struct pgrp
*pgrp
);
189 extern void pg_rele_dropref(struct pgrp
* pgrp
);
190 static int csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaddittoken
);
191 static boolean_t
proc_parent_is_currentproc(proc_t p
);
193 struct fixjob_iterargs
{
195 struct session
* mysession
;
199 int fixjob_callback(proc_t
, void *);
202 * Initialize global process hashing structures.
208 LIST_INIT(&zombproc
);
209 pidhashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pidhash
);
210 pgrphashtbl
= hashinit(maxproc
/ 4, M_PROC
, &pgrphash
);
211 sesshashtbl
= hashinit(maxproc
/ 4, M_PROC
, &sesshash
);
212 uihashtbl
= hashinit(maxproc
/ 16, M_PROC
, &uihash
);
219 * Change the count associated with number of processes
220 * a given user is using. This routine protects the uihash
224 chgproccnt(uid_t uid
, int diff
)
227 struct uidinfo
*newuip
= NULL
;
228 struct uihashhead
*uipp
;
234 for (uip
= uipp
->lh_first
; uip
!= 0; uip
= uip
->ui_hash
.le_next
)
235 if (uip
->ui_uid
== uid
)
238 uip
->ui_proccnt
+= diff
;
239 if (uip
->ui_proccnt
> 0) {
240 retval
= uip
->ui_proccnt
;
244 if (uip
->ui_proccnt
< 0)
245 panic("chgproccnt: procs < 0");
246 LIST_REMOVE(uip
, ui_hash
);
249 FREE_ZONE(uip
, sizeof(*uip
), M_PROC
);
258 panic("chgproccnt: lost user");
260 if (newuip
!= NULL
) {
263 LIST_INSERT_HEAD(uipp
, uip
, ui_hash
);
265 uip
->ui_proccnt
= diff
;
271 MALLOC_ZONE(newuip
, struct uidinfo
*, sizeof(*uip
), M_PROC
, M_WAITOK
);
273 panic("chgproccnt: M_PROC zone depleted");
277 FREE_ZONE(newuip
, sizeof(*uip
), M_PROC
);
282 * Is p an inferior of the current process?
290 for (; p
!= current_proc(); p
= p
->p_pptr
)
300 * Is p an inferior of t ?
303 isinferior(proc_t p
, proc_t t
)
309 /* if p==t they are not inferior */
314 for (; p
!= t
; p
= p
->p_pptr
) {
317 /* Detect here if we're in a cycle */
318 if ((p
->p_pid
== 0) || (p
->p_pptr
== start
) || (nchecked
>= nprocs
))
328 proc_isinferior(int pid1
, int pid2
)
330 proc_t p
= PROC_NULL
;
331 proc_t t
= PROC_NULL
;
334 if (((p
= proc_find(pid1
)) != (proc_t
)0 ) && ((t
= proc_find(pid2
)) != (proc_t
)0))
335 retval
= isinferior(p
, t
);
348 return(proc_findinternal(pid
, 0));
352 proc_findinternal(int pid
, int locked
)
354 proc_t p
= PROC_NULL
;
360 p
= pfind_locked(pid
);
361 if ((p
== PROC_NULL
) || (p
!= proc_ref_locked(p
)))
372 proc_findthread(thread_t thread
)
374 proc_t p
= PROC_NULL
;
378 uth
= get_bsdthread_info(thread
);
379 if (uth
&& (uth
->uu_flag
& UT_VFORK
))
382 p
= (proc_t
)(get_bsdthreadtask_info(thread
));
383 p
= proc_ref_locked(p
);
406 if (p
!= proc_ref_locked(p
))
414 proc_ref_locked(proc_t p
)
418 /* if process still in creation return failure */
419 if ((p
== PROC_NULL
) || ((p
->p_listflag
& P_LIST_INCREATE
) != 0))
421 /* do not return process marked for termination */
422 if ((p
->p_stat
!= SZOMB
) && ((p
->p_listflag
& P_LIST_EXITED
) == 0) && ((p
->p_listflag
& (P_LIST_DRAINWAIT
| P_LIST_DRAIN
| P_LIST_DEAD
)) == 0))
431 proc_rele_locked(proc_t p
)
434 if (p
->p_refcount
> 0) {
436 if ((p
->p_refcount
== 0) && ((p
->p_listflag
& P_LIST_DRAINWAIT
) == P_LIST_DRAINWAIT
)) {
437 p
->p_listflag
&= ~P_LIST_DRAINWAIT
;
438 wakeup(&p
->p_refcount
);
441 panic("proc_rele_locked -ve ref\n");
446 proc_find_zombref(int pid
)
453 p
= pfind_locked(pid
);
455 /* should we bail? */
456 if ((p
== PROC_NULL
) /* not found */
457 || ((p
->p_listflag
& P_LIST_INCREATE
) != 0) /* not created yet */
458 || ((p
->p_listflag
& P_LIST_EXITED
) == 0)) { /* not started exit */
464 /* If someone else is controlling the (unreaped) zombie - wait */
465 if ((p
->p_listflag
& P_LIST_WAITING
) != 0) {
466 (void)msleep(&p
->p_stat
, proc_list_mlock
, PWAIT
, "waitcoll", 0);
469 p
->p_listflag
|= P_LIST_WAITING
;
477 proc_drop_zombref(proc_t p
)
480 if ((p
->p_listflag
& P_LIST_WAITING
) == P_LIST_WAITING
) {
481 p
->p_listflag
&= ~P_LIST_WAITING
;
489 proc_refdrain(proc_t p
)
494 p
->p_listflag
|= P_LIST_DRAIN
;
495 while (p
->p_refcount
) {
496 p
->p_listflag
|= P_LIST_DRAINWAIT
;
497 msleep(&p
->p_refcount
, proc_list_mlock
, 0, "proc_refdrain", 0) ;
499 p
->p_listflag
&= ~P_LIST_DRAIN
;
500 p
->p_listflag
|= P_LIST_DEAD
;
508 proc_parentholdref(proc_t p
)
510 proc_t parent
= PROC_NULL
;
518 if ((pp
== PROC_NULL
) || (pp
->p_stat
== SZOMB
) || ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
))) {
523 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == P_LIST_CHILDDRSTART
) {
524 pp
->p_listflag
|= P_LIST_CHILDDRWAIT
;
525 msleep(&pp
->p_childrencnt
, proc_list_mlock
, 0, "proc_parent", 0);
534 if ((pp
->p_listflag
& (P_LIST_CHILDDRSTART
| P_LIST_CHILDDRAINED
)) == 0) {
545 proc_parentdropref(proc_t p
, int listlocked
)
550 if (p
->p_parentref
> 0) {
552 if ((p
->p_parentref
== 0) && ((p
->p_listflag
& P_LIST_PARENTREFWAIT
) == P_LIST_PARENTREFWAIT
)) {
553 p
->p_listflag
&= ~P_LIST_PARENTREFWAIT
;
554 wakeup(&p
->p_parentref
);
557 panic("proc_parentdropref -ve ref\n");
565 proc_childdrainstart(proc_t p
)
567 #if __PROC_INTERNAL_DEBUG
568 if ((p
->p_listflag
& P_LIST_CHILDDRSTART
) == P_LIST_CHILDDRSTART
)
569 panic("proc_childdrainstart: childdrain already started\n");
571 p
->p_listflag
|= P_LIST_CHILDDRSTART
;
572 /* wait for all that hold parentrefs to drop */
573 while (p
->p_parentref
> 0) {
574 p
->p_listflag
|= P_LIST_PARENTREFWAIT
;
575 msleep(&p
->p_parentref
, proc_list_mlock
, 0, "proc_childdrainstart", 0) ;
581 proc_childdrainend(proc_t p
)
583 #if __PROC_INTERNAL_DEBUG
584 if (p
->p_childrencnt
> 0)
585 panic("exiting: children stil hanging around\n");
587 p
->p_listflag
|= P_LIST_CHILDDRAINED
;
588 if ((p
->p_listflag
& (P_LIST_CHILDLKWAIT
|P_LIST_CHILDDRWAIT
)) != 0) {
589 p
->p_listflag
&= ~(P_LIST_CHILDLKWAIT
|P_LIST_CHILDDRWAIT
);
590 wakeup(&p
->p_childrencnt
);
595 proc_checkdeadrefs(__unused proc_t p
)
597 #if __PROC_INTERNAL_DEBUG
598 if ((p
->p_listflag
& P_LIST_INHASH
) != 0)
599 panic("proc being freed and still in hash %p: %u\n", p
, p
->p_listflag
);
600 if (p
->p_childrencnt
!= 0)
601 panic("proc being freed and pending children cnt %p:%d\n", p
, p
->p_childrencnt
);
602 if (p
->p_refcount
!= 0)
603 panic("proc being freed and pending refcount %p:%d\n", p
, p
->p_refcount
);
604 if (p
->p_parentref
!= 0)
605 panic("proc being freed and pending parentrefs %p:%d\n", p
, p
->p_parentref
);
624 return (current_proc()->p_pid
);
630 return (current_proc()->p_ppid
);
635 dtrace_current_proc_vforking(void)
637 thread_t th
= current_thread();
638 struct uthread
*ut
= get_bsdthread_info(th
);
641 ((ut
->uu_flag
& (UT_VFORK
|UT_VFORKING
)) == (UT_VFORK
|UT_VFORKING
))) {
643 * Handle the narrow window where we're in the vfork syscall,
644 * but we're not quite ready to claim (in particular, to DTrace)
645 * that we're running as the child.
647 return (get_bsdtask_info(get_threadtask(th
)));
649 return (current_proc());
653 dtrace_proc_selfpid(void)
655 return (dtrace_current_proc_vforking()->p_pid
);
659 dtrace_proc_selfppid(void)
661 return (dtrace_current_proc_vforking()->p_ppid
);
665 dtrace_proc_selfruid(void)
667 return (dtrace_current_proc_vforking()->p_ruid
);
669 #endif /* CONFIG_DTRACE */
672 proc_parent(proc_t p
)
680 parent
= proc_ref_locked(pp
);
681 if ((parent
== PROC_NULL
) && (pp
!= PROC_NULL
) && (pp
->p_stat
!= SZOMB
) && ((pp
->p_listflag
& P_LIST_EXITED
) != 0) && ((pp
->p_listflag
& P_LIST_CHILDDRAINED
)== 0)){
682 pp
->p_listflag
|= P_LIST_CHILDLKWAIT
;
683 msleep(&pp
->p_childrencnt
, proc_list_mlock
, 0, "proc_parent", 0);
691 proc_parent_is_currentproc(proc_t p
)
693 boolean_t ret
= FALSE
;
696 if (p
->p_pptr
== current_proc())
704 proc_name(int pid
, char * buf
, int size
)
708 if ((p
= proc_find(pid
)) != PROC_NULL
) {
709 strlcpy(buf
, &p
->p_comm
[0], size
);
715 proc_name_kdp(task_t t
, char * buf
, int size
)
717 proc_t p
= get_bsdtask_info(t
);
720 strlcpy(buf
, &p
->p_comm
[0], size
);
725 proc_threadname_kdp(void *uth
, char *buf
, size_t size
)
727 if (size
< MAXTHREADNAMESIZE
) {
728 /* this is really just a protective measure for the future in
729 * case the thread name size in stackshot gets out of sync with
730 * the BSD max thread name size. Note that bsd_getthreadname
731 * doesn't take input buffer size into account. */
736 bsd_getthreadname(uth
, buf
);
741 /* note that this function is generally going to be called from stackshot,
742 * and the arguments will be coming from a struct which is declared packed
743 * thus the input arguments will in general be unaligned. We have to handle
746 proc_starttime_kdp(void *p
, uint64_t *tv_sec
, uint64_t *tv_usec
)
748 proc_t pp
= (proc_t
)p
;
751 } __attribute__((packed
));
753 if (pp
!= PROC_NULL
) {
755 ((struct uint64p
*)tv_sec
)->val
= pp
->p_start
.tv_sec
;
757 ((struct uint64p
*)tv_usec
)->val
= pp
->p_start
.tv_usec
;
762 proc_name_address(void *p
)
764 return &((proc_t
)p
)->p_comm
[0];
768 proc_selfname(char * buf
, int size
)
772 if ((p
= current_proc())!= (proc_t
)0) {
773 strlcpy(buf
, &p
->p_comm
[0], size
);
778 proc_signal(int pid
, int signum
)
782 if ((p
= proc_find(pid
)) != PROC_NULL
) {
789 proc_issignal(int pid
, sigset_t mask
)
794 if ((p
= proc_find(pid
)) != PROC_NULL
) {
795 error
= proc_pendingsignals(p
, mask
);
803 proc_noremotehang(proc_t p
)
808 retval
= p
->p_flag
& P_NOREMOTEHANG
;
809 return(retval
? 1: 0);
814 proc_exiting(proc_t p
)
819 retval
= p
->p_lflag
& P_LEXIT
;
820 return(retval
? 1: 0);
824 proc_forcequota(proc_t p
)
829 retval
= p
->p_flag
& P_FORCEQUOTA
;
830 return(retval
? 1: 0);
837 kauth_cred_t my_cred
;
840 my_cred
= kauth_cred_proc_ref(p
);
841 error
= suser(my_cred
, &p
->p_acflag
);
842 kauth_cred_unref(&my_cred
);
847 proc_task(proc_t proc
)
849 return (task_t
)proc
->task
;
853 * Obtain the first thread in a process
855 * XXX This is a bad thing to do; it exists predominantly to support the
856 * XXX use of proc_t's in places that should really be using
857 * XXX thread_t's instead. This maintains historical behaviour, but really
858 * XXX needs an audit of the context (proxy vs. not) to clean up.
861 proc_thread(proc_t proc
)
863 uthread_t uth
= TAILQ_FIRST(&proc
->p_uthlist
);
866 return(uth
->uu_context
.vc_thread
);
880 thread_t th
= current_thread();
882 return((struct uthread
*)get_bsdthread_info(th
));
887 proc_is64bit(proc_t p
)
889 return(IS_64BIT_PROCESS(p
));
893 proc_pidversion(proc_t p
)
895 return(p
->p_idversion
);
899 proc_uniqueid(proc_t p
)
901 return(p
->p_uniqueid
);
905 proc_puniqueid(proc_t p
)
907 return(p
->p_puniqueid
);
911 proc_coalitionid(__unused proc_t p
)
913 #if CONFIG_COALITIONS
914 return(task_coalition_id(p
->task
));
921 proc_was_throttled(proc_t p
)
923 return (p
->was_throttled
);
927 proc_did_throttle(proc_t p
)
929 return (p
->did_throttle
);
933 proc_getcdhash(proc_t p
, unsigned char *cdhash
)
935 return vn_getcdhash(p
->p_textvp
, p
->p_textoff
, cdhash
);
939 proc_getexecutableuuid(proc_t p
, unsigned char *uuidbuf
, unsigned long size
)
941 if (size
>= sizeof(p
->p_uuid
)) {
942 memcpy(uuidbuf
, p
->p_uuid
, sizeof(p
->p_uuid
));
946 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
948 proc_getexecutablevnode(proc_t p
)
950 vnode_t tvp
= p
->p_textvp
;
952 if ( tvp
!= NULLVP
) {
953 if (vnode_getwithref(tvp
) == 0) {
963 bsd_set_dependency_capable(task_t task
)
965 proc_t p
= get_bsdtask_info(task
);
968 OSBitOrAtomic(P_DEPENDENCY_CAPABLE
, &p
->p_flag
);
974 IS_64BIT_PROCESS(proc_t p
)
976 if (p
&& (p
->p_flag
& P_LP64
))
983 * Locate a process by number
986 pfind_locked(pid_t pid
)
996 for (p
= PIDHASH(pid
)->lh_first
; p
!= 0; p
= p
->p_hash
.le_next
) {
997 if (p
->p_pid
== pid
) {
999 for (q
= p
->p_hash
.le_next
; q
!= 0; q
= q
->p_hash
.le_next
) {
1000 if ((p
!=q
) && (q
->p_pid
== pid
))
1001 panic("two procs with same pid %p:%p:%d:%d\n", p
, q
, p
->p_pid
, q
->p_pid
);
1011 * Locate a zombie by PID
1013 __private_extern__ proc_t
1021 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
)
1022 if (p
->p_pid
== pid
)
1031 * Locate a process group by number
1040 pgrp
= pgfind_internal(pgid
);
1041 if ((pgrp
== NULL
) || ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) != 0))
1044 pgrp
->pg_refcount
++;
1052 pgfind_internal(pid_t pgid
)
1056 for (pgrp
= PGRPHASH(pgid
)->lh_first
; pgrp
!= 0; pgrp
= pgrp
->pg_hash
.le_next
)
1057 if (pgrp
->pg_id
== pgid
)
1063 pg_rele(struct pgrp
* pgrp
)
1065 if(pgrp
== PGRP_NULL
)
1067 pg_rele_dropref(pgrp
);
1071 pg_rele_dropref(struct pgrp
* pgrp
)
1074 if ((pgrp
->pg_refcount
== 1) && ((pgrp
->pg_listflags
& PGRP_FLAG_TERMINATE
) == PGRP_FLAG_TERMINATE
)) {
1076 pgdelete_dropref(pgrp
);
1080 pgrp
->pg_refcount
--;
1085 session_find_internal(pid_t sessid
)
1087 struct session
*sess
;
1089 for (sess
= SESSHASH(sessid
)->lh_first
; sess
!= 0; sess
= sess
->s_hash
.le_next
)
1090 if (sess
->s_sid
== sessid
)
1097 * Make a new process ready to become a useful member of society by making it
1098 * visible in all the right places and initialize its own lists to empty.
1100 * Parameters: parent The parent of the process to insert
1101 * child The child process to insert
1105 * Notes: Insert a child process into the parents process group, assign
1106 * the child the parent process pointer and PPID of the parent,
1107 * place it on the parents p_children list as a sibling,
1108 * initialize its own child list, place it in the allproc list,
1109 * insert it in the proper hash bucket, and initialize its
1113 pinsertchild(proc_t parent
, proc_t child
)
1117 LIST_INIT(&child
->p_children
);
1118 TAILQ_INIT(&child
->p_evlist
);
1119 child
->p_pptr
= parent
;
1120 child
->p_ppid
= parent
->p_pid
;
1121 child
->p_puniqueid
= parent
->p_uniqueid
;
1123 pg
= proc_pgrp(parent
);
1124 pgrp_add(pg
, parent
, child
);
1129 #if CONFIG_MEMORYSTATUS
1130 memorystatus_add(child
, TRUE
);
1133 parent
->p_childrencnt
++;
1134 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
1136 LIST_INSERT_HEAD(&allproc
, child
, p_list
);
1137 /* mark the completion of proc creation */
1138 child
->p_listflag
&= ~P_LIST_INCREATE
;
1144 * Move p to a new or existing process group (and session)
1146 * Returns: 0 Success
1147 * ESRCH No such process
1150 enterpgrp(proc_t p
, pid_t pgid
, int mksess
)
1153 struct pgrp
*mypgrp
;
1154 struct session
* procsp
;
1156 pgrp
= pgfind(pgid
);
1157 mypgrp
= proc_pgrp(p
);
1158 procsp
= proc_session(p
);
1161 if (pgrp
!= NULL
&& mksess
) /* firewalls */
1162 panic("enterpgrp: setsid into non-empty pgrp");
1163 if (SESS_LEADER(p
, procsp
))
1164 panic("enterpgrp: session leader attempted setpgrp");
1166 if (pgrp
== PGRP_NULL
) {
1167 pid_t savepid
= p
->p_pid
;
1168 proc_t np
= PROC_NULL
;
1173 if (p
->p_pid
!= pgid
)
1174 panic("enterpgrp: new pgrp and pid != pgid");
1176 MALLOC_ZONE(pgrp
, struct pgrp
*, sizeof(struct pgrp
), M_PGRP
,
1179 panic("enterpgrp: M_PGRP zone depleted");
1180 if ((np
= proc_find(savepid
)) == NULL
|| np
!= p
) {
1181 if (np
!= PROC_NULL
)
1183 if (mypgrp
!= PGRP_NULL
)
1185 if (procsp
!= SESSION_NULL
)
1186 session_rele(procsp
);
1187 FREE_ZONE(pgrp
, sizeof(struct pgrp
), M_PGRP
);
1192 struct session
*sess
;
1197 MALLOC_ZONE(sess
, struct session
*,
1198 sizeof(struct session
), M_SESSION
, M_WAITOK
);
1200 panic("enterpgrp: M_SESSION zone depleted");
1202 sess
->s_sid
= p
->p_pid
;
1204 sess
->s_ttyvp
= NULL
;
1205 sess
->s_ttyp
= TTY_NULL
;
1207 sess
->s_listflags
= 0;
1208 sess
->s_ttypgrpid
= NO_PID
;
1209 #if CONFIG_FINE_LOCK_GROUPS
1210 lck_mtx_init(&sess
->s_mlock
, proc_mlock_grp
, proc_lck_attr
);
1212 lck_mtx_init(&sess
->s_mlock
, proc_lck_grp
, proc_lck_attr
);
1214 bcopy(procsp
->s_login
, sess
->s_login
,
1215 sizeof(sess
->s_login
));
1216 OSBitAndAtomic(~((uint32_t)P_CONTROLT
), &p
->p_flag
);
1218 LIST_INSERT_HEAD(SESSHASH(sess
->s_sid
), sess
, s_hash
);
1220 pgrp
->pg_session
= sess
;
1222 if (p
!= current_proc())
1223 panic("enterpgrp: mksession and p != curproc");
1227 pgrp
->pg_session
= procsp
;
1229 if ((pgrp
->pg_session
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1230 panic("enterpgrp: providing ref to terminating session ");
1231 pgrp
->pg_session
->s_count
++;
1235 #if CONFIG_FINE_LOCK_GROUPS
1236 lck_mtx_init(&pgrp
->pg_mlock
, proc_mlock_grp
, proc_lck_attr
);
1238 lck_mtx_init(&pgrp
->pg_mlock
, proc_lck_grp
, proc_lck_attr
);
1240 LIST_INIT(&pgrp
->pg_members
);
1241 pgrp
->pg_membercnt
= 0;
1244 pgrp
->pg_refcount
= 1;
1245 pgrp
->pg_listflags
= 0;
1246 LIST_INSERT_HEAD(PGRPHASH(pgid
), pgrp
, pg_hash
);
1248 } else if (pgrp
== mypgrp
) {
1252 if (procsp
!= SESSION_NULL
)
1253 session_rele(procsp
);
1257 if (procsp
!= SESSION_NULL
)
1258 session_rele(procsp
);
1260 * Adjust eligibility of affected pgrps to participate in job control.
1261 * Increment eligibility counts before decrementing, otherwise we
1262 * could reach 0 spuriously during the first call.
1264 fixjobc(p
, pgrp
, 1);
1265 fixjobc(p
, mypgrp
, 0);
1267 if(mypgrp
!= PGRP_NULL
)
1269 pgrp_replace(p
, pgrp
);
1276 * remove process from process group
1287 * delete a process group
1290 pgdelete_dropref(struct pgrp
*pgrp
)
1294 struct session
*sessp
;
1298 if (pgrp
->pg_membercnt
!= 0) {
1304 pgrp
->pg_refcount
--;
1305 if ((emptypgrp
== 0) || (pgrp
->pg_membercnt
!= 0)) {
1310 pgrp
->pg_listflags
|= PGRP_FLAG_TERMINATE
;
1312 if (pgrp
->pg_refcount
> 0) {
1317 pgrp
->pg_listflags
|= PGRP_FLAG_DEAD
;
1318 LIST_REMOVE(pgrp
, pg_hash
);
1322 ttyp
= SESSION_TP(pgrp
->pg_session
);
1323 if (ttyp
!= TTY_NULL
) {
1324 if (ttyp
->t_pgrp
== pgrp
) {
1326 /* Re-check after acquiring the lock */
1327 if (ttyp
->t_pgrp
== pgrp
) {
1328 ttyp
->t_pgrp
= NULL
;
1329 pgrp
->pg_session
->s_ttypgrpid
= NO_PID
;
1337 sessp
= pgrp
->pg_session
;
1338 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1339 panic("pg_deleteref: manipulating refs of already terminating session");
1340 if (--sessp
->s_count
== 0) {
1341 if ((sessp
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
1342 panic("pg_deleteref: terminating already terminated session");
1343 sessp
->s_listflags
|= S_LIST_TERM
;
1344 ttyp
= SESSION_TP(sessp
);
1345 LIST_REMOVE(sessp
, s_hash
);
1347 if (ttyp
!= TTY_NULL
) {
1349 if (ttyp
->t_session
== sessp
)
1350 ttyp
->t_session
= NULL
;
1354 sessp
->s_listflags
|= S_LIST_DEAD
;
1355 if (sessp
->s_count
!= 0)
1356 panic("pg_deleteref: freeing session in use");
1358 #if CONFIG_FINE_LOCK_GROUPS
1359 lck_mtx_destroy(&sessp
->s_mlock
, proc_mlock_grp
);
1361 lck_mtx_destroy(&sessp
->s_mlock
, proc_lck_grp
);
1363 FREE_ZONE(sessp
, sizeof(struct session
), M_SESSION
);
1366 #if CONFIG_FINE_LOCK_GROUPS
1367 lck_mtx_destroy(&pgrp
->pg_mlock
, proc_mlock_grp
);
1369 lck_mtx_destroy(&pgrp
->pg_mlock
, proc_lck_grp
);
1371 FREE_ZONE(pgrp
, sizeof(*pgrp
), M_PGRP
);
1376 * Adjust pgrp jobc counters when specified process changes process group.
1377 * We count the number of processes in each process group that "qualify"
1378 * the group for terminal job control (those with a parent in a different
1379 * process group of the same session). If that count reaches zero, the
1380 * process group becomes orphaned. Check both the specified process'
1381 * process group and that of its children.
1382 * entering == 0 => p is leaving specified group.
1383 * entering == 1 => p is entering specified group.
1386 fixjob_callback(proc_t p
, void * arg
)
1388 struct fixjob_iterargs
*fp
;
1389 struct pgrp
* pg
, *hispg
;
1390 struct session
* mysession
, *hissess
;
1393 fp
= (struct fixjob_iterargs
*)arg
;
1395 mysession
= fp
->mysession
;
1396 entering
= fp
->entering
;
1398 hispg
= proc_pgrp(p
);
1399 hissess
= proc_session(p
);
1401 if ((hispg
!= pg
) &&
1402 (hissess
== mysession
)) {
1407 } else if (--hispg
->pg_jobc
== 0) {
1413 if (hissess
!= SESSION_NULL
)
1414 session_rele(hissess
);
1415 if (hispg
!= PGRP_NULL
)
1418 return(PROC_RETURNED
);
1422 fixjobc(proc_t p
, struct pgrp
*pgrp
, int entering
)
1424 struct pgrp
*hispgrp
= PGRP_NULL
;
1425 struct session
*hissess
= SESSION_NULL
;
1426 struct session
*mysession
= pgrp
->pg_session
;
1428 struct fixjob_iterargs fjarg
;
1429 boolean_t proc_parent_self
;
1432 * Check if p's parent is current proc, if yes then no need to take
1433 * a ref; calling proc_parent with current proc as parent may
1434 * deadlock if current proc is exiting.
1436 proc_parent_self
= proc_parent_is_currentproc(p
);
1437 if (proc_parent_self
)
1438 parent
= current_proc();
1440 parent
= proc_parent(p
);
1442 if (parent
!= PROC_NULL
) {
1443 hispgrp
= proc_pgrp(parent
);
1444 hissess
= proc_session(parent
);
1445 if (!proc_parent_self
)
1451 * Check p's parent to see whether p qualifies its own process
1452 * group; if so, adjust count for p's process group.
1454 if ((hispgrp
!= pgrp
) &&
1455 (hissess
== mysession
)) {
1460 }else if (--pgrp
->pg_jobc
== 0) {
1467 if (hissess
!= SESSION_NULL
)
1468 session_rele(hissess
);
1469 if (hispgrp
!= PGRP_NULL
)
1473 * Check this process' children to see whether they qualify
1474 * their process groups; if so, adjust counts for children's
1478 fjarg
.mysession
= mysession
;
1479 fjarg
.entering
= entering
;
1480 proc_childrenwalk(p
, fixjob_callback
, &fjarg
);
1484 * A process group has become orphaned;
1485 * if there are any stopped processes in the group,
1486 * hang-up all process in that group.
1489 orphanpg(struct pgrp
* pgrp
)
1493 int count
, pidcount
, i
, alloc_count
;
1495 if (pgrp
== PGRP_NULL
)
1499 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0; p
= p
->p_pglist
.le_next
) {
1500 if (p
->p_stat
== SSTOP
) {
1501 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0;
1502 p
= p
->p_pglist
.le_next
)
1504 break; /* ??? stops after finding one.. */
1510 if (count
> hard_maxproc
)
1511 count
= hard_maxproc
;
1512 alloc_count
= count
* sizeof(pid_t
);
1513 pid_list
= (pid_t
*)kalloc(alloc_count
);
1514 bzero(pid_list
, alloc_count
);
1518 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0;
1519 p
= p
->p_pglist
.le_next
) {
1520 if (p
->p_stat
== SSTOP
) {
1521 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0;
1522 p
= p
->p_pglist
.le_next
) {
1523 pid_list
[pidcount
] = p
->p_pid
;
1525 if (pidcount
>= count
)
1528 break; /* ??? stops after finding one.. */
1537 for (i
= 0; i
< pidcount
; i
++) {
1538 /* No handling or proc0 */
1539 if (pid_list
[i
] == 0)
1541 p
= proc_find(pid_list
[i
]);
1543 proc_transwait(p
, 0);
1546 psignal(p
, SIGCONT
);
1551 kfree(pid_list
, alloc_count
);
1557 /* XXX should be __private_extern__ */
1559 proc_is_classic(proc_t p
)
1561 return (p
->p_flag
& P_TRANSLATED
) ? 1 : 0;
1564 /* XXX Why does this function exist? Need to kill it off... */
1566 current_proc_EXTERNAL(void)
1568 return (current_proc());
1572 proc_is_forcing_hfs_case_sensitivity(proc_t p
)
1574 return (p
->p_vfs_iopolicy
& P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY
) ? 1 : 0;
1578 * proc_core_name(name, uid, pid)
1579 * Expand the name described in corefilename, using name, uid, and pid.
1580 * corefilename is a printf-like string, with three format specifiers:
1581 * %N name of process ("name")
1582 * %P process id (pid)
1584 * For example, "%N.core" is the default; they can be disabled completely
1585 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1586 * This is controlled by the sysctl variable kern.corefile (see above).
1588 __private_extern__
int
1589 proc_core_name(const char *name
, uid_t uid
, pid_t pid
, char *cf_name
,
1592 const char *format
, *appendstr
;
1593 char id_buf
[11]; /* Buffer for pid/uid -- max 4B */
1596 if (cf_name
== NULL
)
1599 format
= corefilename
;
1600 for (i
= 0, n
= 0; n
< cf_name_len
&& format
[i
]; i
++) {
1601 switch (format
[i
]) {
1602 case '%': /* Format character */
1604 switch (format
[i
]) {
1608 case 'N': /* process name */
1611 case 'P': /* process id */
1612 snprintf(id_buf
, sizeof(id_buf
), "%u", pid
);
1615 case 'U': /* user id */
1616 snprintf(id_buf
, sizeof(id_buf
), "%u", uid
);
1622 "Unknown format character %c in `%s'\n",
1625 l
= strlen(appendstr
);
1626 if ((n
+ l
) >= cf_name_len
)
1628 bcopy(appendstr
, cf_name
+ n
, l
);
1632 cf_name
[n
++] = format
[i
];
1635 if (format
[i
] != '\0')
1639 log(LOG_ERR
, "pid %ld (%s), uid (%u): corename is too long\n",
1640 (long)pid
, name
, (uint32_t)uid
);
1649 LIST_INIT(&alllctx
);
1652 /* allocate lctx lock group attribute and group */
1653 lctx_lck_grp_attr
= lck_grp_attr_alloc_init();
1654 lck_grp_attr_setstat(lctx_lck_grp_attr
);
1656 lctx_lck_grp
= lck_grp_alloc_init("lctx", lctx_lck_grp_attr
);
1657 /* Allocate lctx lock attribute */
1658 lctx_lck_attr
= lck_attr_alloc_init();
1660 lck_mtx_init(&alllctx_lock
, lctx_lck_grp
, lctx_lck_attr
);
1664 * Locate login context by number.
1672 LIST_FOREACH(l
, &alllctx
, lc_list
) {
1673 if (l
->lc_id
== lcid
) {
1685 if (lastlcid > maxlcid) \
1695 /* Not very efficient but this isn't a common operation. */
1696 while ((l
= lcfind(lastlcid
)) != NULL
) {
1703 MALLOC(l
, struct lctx
*, sizeof(struct lctx
), M_LCTX
, M_WAITOK
|M_ZERO
);
1705 LIST_INIT(&l
->lc_members
);
1706 lck_mtx_init(&l
->lc_mtx
, lctx_lck_grp
, lctx_lck_attr
);
1708 l
->lc_label
= mac_lctx_label_alloc();
1711 LIST_INSERT_HEAD(&alllctx
, l
, lc_list
);
1719 * Call with proc protected (either by being invisible
1720 * or by having the all-login-context lock held) and
1723 * Will unlock lctx on return.
1726 enterlctx (proc_t p
, struct lctx
*l
, __unused
int create
)
1732 LIST_INSERT_HEAD(&l
->lc_members
, p
, p_lclist
);
1737 mac_lctx_notify_create(p
, l
);
1739 mac_lctx_notify_join(p
, l
);
1747 * Remove process from login context (if any). Called with p protected by
1751 leavelctx (proc_t p
)
1755 if (p
->p_lctx
== NULL
) {
1759 LCTX_LOCK(p
->p_lctx
);
1762 LIST_REMOVE(p
, p_lclist
);
1765 mac_lctx_notify_leave(p
, l
);
1767 if (LIST_EMPTY(&l
->lc_members
)) {
1768 LIST_REMOVE(l
, lc_list
);
1771 lck_mtx_destroy(&l
->lc_mtx
, lctx_lck_grp
);
1773 mac_lctx_label_free(l
->lc_label
);
1784 sysctl_kern_lctx SYSCTL_HANDLER_ARGS
1786 int *name
= (int*) arg1
;
1787 u_int namelen
= arg2
;
1788 struct kinfo_lctx kil
;
1794 switch (oidp
->oid_number
) {
1797 /* Request for size. */
1799 error
= SYSCTL_OUT(req
, 0,
1800 sizeof(struct kinfo_lctx
) * (alllctx_cnt
+ 1));
1805 case KERN_LCTX_LCID
:
1807 if (req
->oldlen
< sizeof(struct kinfo_lctx
))
1812 /* No login context */
1813 l
= lcfind((pid_t
)name
[0]);
1819 return (SYSCTL_OUT(req
, (caddr_t
)&kil
, sizeof(kil
)));
1825 /* Provided buffer is too small. */
1826 if (req
->oldlen
< (sizeof(struct kinfo_lctx
) * alllctx_cnt
)) {
1831 LIST_FOREACH(l
, &alllctx
, lc_list
) {
1836 error
= SYSCTL_OUT(req
, (caddr_t
)&kil
, sizeof(kil
));
1846 SYSCTL_NODE(_kern
, KERN_LCTX
, lctx
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "Login Context");
1848 SYSCTL_PROC(_kern_lctx
, KERN_LCTX_ALL
, all
, CTLFLAG_RD
|CTLTYPE_STRUCT
| CTLFLAG_LOCKED
,
1849 0, 0, sysctl_kern_lctx
, "S,lctx",
1850 "Return entire login context table");
1851 SYSCTL_NODE(_kern_lctx
, KERN_LCTX_LCID
, lcid
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1852 sysctl_kern_lctx
, "Login Context Table");
1853 SYSCTL_INT(_kern_lctx
, OID_AUTO
, last
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &lastlcid
, 0, "");
1854 SYSCTL_INT(_kern_lctx
, OID_AUTO
, count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &alllctx_cnt
, 0, "");
1855 SYSCTL_INT(_kern_lctx
, OID_AUTO
, max
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &maxlcid
, 0, "");
1859 /* Code Signing related routines */
1862 csops(__unused proc_t p
, struct csops_args
*uap
, __unused
int32_t *retval
)
1864 return(csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
1865 uap
->usersize
, USER_ADDR_NULL
));
1869 csops_audittoken(__unused proc_t p
, struct csops_audittoken_args
*uap
, __unused
int32_t *retval
)
1871 if (uap
->uaudittoken
== USER_ADDR_NULL
)
1873 return(csops_internal(uap
->pid
, uap
->ops
, uap
->useraddr
,
1874 uap
->usersize
, uap
->uaudittoken
));
1878 csops_copy_token(void *start
, size_t length
, user_size_t usize
, user_addr_t uaddr
)
1880 char fakeheader
[8] = { 0 };
1883 if (usize
< sizeof(fakeheader
))
1886 /* if no blob, fill in zero header */
1887 if (NULL
== start
) {
1889 length
= sizeof(fakeheader
);
1890 } else if (usize
< length
) {
1891 /* ... if input too short, copy out length of entitlement */
1892 uint32_t length32
= htonl((uint32_t)length
);
1893 memcpy(&fakeheader
[4], &length32
, sizeof(length32
));
1895 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
1897 return ERANGE
; /* input buffer to short, ERANGE signals that */
1900 return copyout(start
, uaddr
, length
);
1904 csops_internal(pid_t pid
, int ops
, user_addr_t uaddr
, user_size_t usersize
, user_addr_t uaudittoken
)
1906 size_t usize
= (size_t)CAST_DOWN(size_t, usersize
);
1912 unsigned char cdhash
[SHA1_RESULTLEN
];
1913 audit_token_t token
;
1914 unsigned int upid
=0, uidversion
= 0;
1916 forself
= error
= 0;
1919 pid
= proc_selfpid();
1920 if (pid
== proc_selfpid())
1927 case CS_OPS_PIDOFFSET
:
1928 case CS_OPS_ENTITLEMENTS_BLOB
:
1929 case CS_OPS_IDENTITY
:
1931 break; /* unrestricted */
1933 if (forself
== 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE
)
1938 pt
= proc_find(pid
);
1939 if (pt
== PROC_NULL
)
1943 uidversion
= pt
->p_idversion
;
1944 if (uaudittoken
!= USER_ADDR_NULL
) {
1946 error
= copyin(uaudittoken
, &token
, sizeof(audit_token_t
));
1949 /* verify the audit token pid/idversion matches with proc */
1950 if ((token
.val
[5] != upid
) || (token
.val
[7] != uidversion
)) {
1958 case CS_OPS_STATUS
: {
1962 retflags
= pt
->p_csflags
;
1963 if (cs_enforcement(pt
))
1964 retflags
|= CS_ENFORCEMENT
;
1967 if (uaddr
!= USER_ADDR_NULL
)
1968 error
= copyout(&retflags
, uaddr
, sizeof(uint32_t));
1971 case CS_OPS_MARKINVALID
:
1973 if ((pt
->p_csflags
& CS_VALID
) == CS_VALID
) { /* is currently valid */
1974 pt
->p_csflags
&= ~CS_VALID
; /* set invalid */
1975 if ((pt
->p_csflags
& CS_KILL
) == CS_KILL
) {
1976 pt
->p_csflags
|= CS_KILLED
;
1979 printf("CODE SIGNING: marked invalid by pid %d: "
1980 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1981 proc_selfpid(), pt
->p_pid
, pt
->p_comm
, pt
->p_csflags
);
1983 psignal(pt
, SIGKILL
);
1991 case CS_OPS_MARKHARD
:
1993 pt
->p_csflags
|= CS_HARD
;
1994 if ((pt
->p_csflags
& CS_VALID
) == 0) {
1995 /* @@@ allow? reject? kill? @@@ */
2003 case CS_OPS_MARKKILL
:
2005 pt
->p_csflags
|= CS_KILL
;
2006 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2008 psignal(pt
, SIGKILL
);
2013 case CS_OPS_PIDOFFSET
:
2014 toff
= pt
->p_textoff
;
2016 error
= copyout(&toff
, uaddr
, sizeof(toff
));
2021 /* pt already holds a reference on its p_textvp */
2023 toff
= pt
->p_textoff
;
2025 if (tvp
== NULLVP
|| usize
!= SHA1_RESULTLEN
) {
2030 error
= vn_getcdhash(tvp
, toff
, cdhash
);
2034 error
= copyout(cdhash
, uaddr
, sizeof (cdhash
));
2039 case CS_OPS_ENTITLEMENTS_BLOB
: {
2045 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2051 error
= cs_entitlements_blob_get(pt
, &start
, &length
);
2056 error
= csops_copy_token(start
, length
, usize
, uaddr
);
2059 case CS_OPS_MARKRESTRICT
:
2061 pt
->p_csflags
|= CS_RESTRICT
;
2065 case CS_OPS_SET_STATUS
: {
2068 if (usize
< sizeof(flags
)) {
2073 error
= copyin(uaddr
, &flags
, sizeof(flags
));
2077 /* only allow setting a subset of all code sign flags */
2079 CS_HARD
| CS_EXEC_SET_HARD
|
2080 CS_KILL
| CS_EXEC_SET_KILL
|
2083 CS_ENFORCEMENT
| CS_EXEC_SET_ENFORCEMENT
|
2084 CS_ENTITLEMENTS_VALIDATED
;
2087 if (pt
->p_csflags
& CS_VALID
)
2088 pt
->p_csflags
|= flags
;
2100 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2106 error
= cs_blob_get(pt
, &start
, &length
);
2111 error
= csops_copy_token(start
, length
, usize
, uaddr
);
2114 case CS_OPS_IDENTITY
: {
2115 const char *identity
;
2116 uint8_t fakeheader
[8];
2121 * Make identity have a blob header to make it
2122 * easier on userland to guess the identity
2125 if (usize
< sizeof(fakeheader
)) {
2129 memset(fakeheader
, 0, sizeof(fakeheader
));
2132 if ((pt
->p_csflags
& CS_VALID
) == 0) {
2138 identity
= cs_identity_get(pt
);
2140 if (identity
== NULL
) {
2145 length
= strlen(identity
) + 1; /* include NUL */
2146 idlen
= htonl(length
+ sizeof(fakeheader
));
2147 memcpy(&fakeheader
[4], &idlen
, sizeof(idlen
));
2149 error
= copyout(fakeheader
, uaddr
, sizeof(fakeheader
));
2153 if (usize
< sizeof(fakeheader
) + length
)
2155 else if (usize
> sizeof(fakeheader
))
2156 error
= copyout(identity
, uaddr
+ sizeof(fakeheader
), length
);
2161 case CS_OPS_SIGPUP_INSTALL
:
2162 error
= sigpup_install(uaddr
);
2165 case CS_OPS_SIGPUP_DROP
:
2166 error
= sigpup_drop();
2179 proc_iterate(flags
, callout
, arg
, filterfn
, filterarg
)
2181 int (*callout
)(proc_t
, void *);
2183 int (*filterfn
)(proc_t
, void *);
2188 int count
, pidcount
, alloc_count
, i
, retval
;
2191 if (count
> hard_maxproc
)
2192 count
= hard_maxproc
;
2193 alloc_count
= count
* sizeof(pid_t
);
2194 pid_list
= (pid_t
*)kalloc(alloc_count
);
2195 bzero(pid_list
, alloc_count
);
2202 if (flags
& PROC_ALLPROCLIST
) {
2203 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
2204 if (p
->p_stat
== SIDL
)
2206 if ( (filterfn
== 0 ) || (filterfn(p
, filterarg
) != 0)) {
2207 pid_list
[pidcount
] = p
->p_pid
;
2209 if (pidcount
>= count
)
2214 if ((pidcount
< count
) && (flags
& PROC_ZOMBPROCLIST
)) {
2215 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
) {
2216 if ( (filterfn
== 0 ) || (filterfn(p
, filterarg
) != 0)) {
2217 pid_list
[pidcount
] = p
->p_pid
;
2219 if (pidcount
>= count
)
2229 for (i
= 0; i
< pidcount
; i
++) {
2230 p
= proc_find(pid_list
[i
]);
2232 if ((flags
& PROC_NOWAITTRANS
) == 0)
2233 proc_transwait(p
, 0);
2234 retval
= callout(p
, arg
);
2240 case PROC_RETURNED_DONE
:
2243 case PROC_CLAIMED_DONE
:
2249 } else if (flags
& PROC_ZOMBPROCLIST
) {
2250 p
= proc_find_zombref(pid_list
[i
]);
2251 if (p
!= PROC_NULL
) {
2252 retval
= callout(p
, arg
);
2256 proc_drop_zombref(p
);
2258 case PROC_RETURNED_DONE
:
2259 proc_drop_zombref(p
);
2261 case PROC_CLAIMED_DONE
:
2272 kfree(pid_list
, alloc_count
);
2279 /* This is for iteration in case of trivial non blocking callouts */
2281 proc_scanall(flags
, callout
, arg
)
2283 int (*callout
)(proc_t
, void *);
2293 if (flags
& PROC_ALLPROCLIST
) {
2294 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
2295 retval
= callout(p
, arg
);
2296 if (retval
== PROC_RETURNED_DONE
)
2300 if (flags
& PROC_ZOMBPROCLIST
) {
2301 for (p
= zombproc
.lh_first
; p
!= 0; p
= p
->p_list
.le_next
) {
2302 retval
= callout(p
, arg
);
2303 if (retval
== PROC_RETURNED_DONE
)
2317 proc_rebootscan(callout
, arg
, filterfn
, filterarg
)
2318 int (*callout
)(proc_t
, void *);
2320 int (*filterfn
)(proc_t
, void *);
2324 int lockheld
= 0, retval
;
2326 proc_shutdown_exitcount
= 0;
2334 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
2335 if ( (filterfn
== 0 ) || (filterfn(p
, filterarg
) != 0)) {
2336 p
= proc_ref_locked(p
);
2342 proc_transwait(p
, 0);
2343 retval
= callout(p
, arg
);
2347 case PROC_RETURNED_DONE
:
2348 case PROC_CLAIMED_DONE
:
2352 goto ps_allprocscan
;
2354 } /* allproc walk thru */
2356 if (lockheld
== 1) {
2368 proc_childrenwalk(parent
, callout
, arg
)
2369 struct proc
* parent
;
2370 int (*callout
)(proc_t
, void *);
2373 register struct proc
*p
;
2375 int count
, pidcount
, alloc_count
, i
, retval
;
2378 if (count
> hard_maxproc
)
2379 count
= hard_maxproc
;
2380 alloc_count
= count
* sizeof(pid_t
);
2381 pid_list
= (pid_t
*)kalloc(alloc_count
);
2382 bzero(pid_list
, alloc_count
);
2389 for (p
= parent
->p_children
.lh_first
; (p
!= 0); p
= p
->p_sibling
.le_next
) {
2390 if (p
->p_stat
== SIDL
)
2392 pid_list
[pidcount
] = p
->p_pid
;
2394 if (pidcount
>= count
)
2400 for (i
= 0; i
< pidcount
; i
++) {
2401 p
= proc_find(pid_list
[i
]);
2403 proc_transwait(p
, 0);
2404 retval
= callout(p
, arg
);
2408 case PROC_RETURNED_DONE
:
2410 if (retval
== PROC_RETURNED_DONE
) {
2415 case PROC_CLAIMED_DONE
:
2425 kfree(pid_list
, alloc_count
);
2432 /* PGRP_BLOCKITERATE is not implemented yet */
2434 pgrp_iterate(pgrp
, flags
, callout
, arg
, filterfn
, filterarg
)
2437 int (*callout
)(proc_t
, void *);
2439 int (*filterfn
)(proc_t
, void *);
2444 int count
, pidcount
, i
, alloc_count
;
2447 int dropref
= flags
& PGRP_DROPREF
;
2449 int serialize
= flags
& PGRP_BLOCKITERATE
;
2456 count
= pgrp
->pg_membercnt
+ 10;
2457 if (count
> hard_maxproc
)
2458 count
= hard_maxproc
;
2459 alloc_count
= count
* sizeof(pid_t
);
2460 pid_list
= (pid_t
*)kalloc(alloc_count
);
2461 bzero(pid_list
, alloc_count
);
2464 if (serialize
!= 0) {
2465 while ((pgrp
->pg_listflags
& PGRP_FLAG_ITERABEGIN
) == PGRP_FLAG_ITERABEGIN
) {
2466 pgrp
->pg_listflags
|= PGRP_FLAG_ITERWAIT
;
2467 msleep(&pgrp
->pg_listflags
, &pgrp
->pg_mlock
, 0, "pgrp_iterate", 0);
2469 pgrp
->pg_listflags
|= PGRP_FLAG_ITERABEGIN
;
2475 for (p
= pgrp
->pg_members
.lh_first
; p
!= 0;
2476 p
= p
->p_pglist
.le_next
) {
2477 if ( (filterfn
== 0 ) || (filterfn(p
, filterarg
) != 0)) {
2478 pid_list
[pidcount
] = p
->p_pid
;
2480 if (pidcount
>= count
)
2487 if ((serialize
== 0) && (dropref
!= 0))
2491 for (i
= 0; i
< pidcount
; i
++) {
2492 /* No handling or proc0 */
2493 if (pid_list
[i
] == 0)
2495 p
= proc_find(pid_list
[i
]);
2497 if (p
->p_pgrpid
!= pgid
) {
2501 proc_transwait(p
, 0);
2502 retval
= callout(p
, arg
);
2506 case PROC_RETURNED_DONE
:
2508 if (retval
== PROC_RETURNED_DONE
) {
2513 case PROC_CLAIMED_DONE
:
2522 if (serialize
!= 0) {
2524 pgrp
->pg_listflags
&= ~PGRP_FLAG_ITERABEGIN
;
2525 if ((pgrp
->pg_listflags
& PGRP_FLAG_ITERWAIT
) == PGRP_FLAG_ITERWAIT
) {
2526 pgrp
->pg_listflags
&= ~PGRP_FLAG_ITERWAIT
;
2527 wakeup(&pgrp
->pg_listflags
);
2533 kfree(pid_list
, alloc_count
);
2538 pgrp_add(struct pgrp
* pgrp
, struct proc
* parent
, struct proc
* child
)
2541 child
->p_pgrp
= pgrp
;
2542 child
->p_pgrpid
= pgrp
->pg_id
;
2543 child
->p_listflag
|= P_LIST_INPGRP
;
2545 * When pgrp is being freed , a process can still
2546 * request addition using setpgid from bash when
2547 * login is terminated (login cycler) return ESRCH
2548 * Safe to hold lock due to refcount on pgrp
2550 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
2551 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2554 if ((pgrp
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
)
2555 panic("pgrp_add : pgrp is dead adding process");
2559 pgrp
->pg_membercnt
++;
2560 if ( parent
!= PROC_NULL
) {
2561 LIST_INSERT_AFTER(parent
, child
, p_pglist
);
2563 LIST_INSERT_HEAD(&pgrp
->pg_members
, child
, p_pglist
);
2568 if (((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (pgrp
->pg_membercnt
!= 0)) {
2569 pgrp
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2575 pgrp_remove(struct proc
* p
)
2582 #if __PROC_INTERNAL_DEBUG
2583 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0)
2584 panic("removing from pglist but no named ref\n");
2586 p
->p_pgrpid
= PGRPID_DEAD
;
2587 p
->p_listflag
&= ~P_LIST_INPGRP
;
2591 if (pg
== PGRP_NULL
)
2592 panic("pgrp_remove: pg is NULL");
2596 if (pg
->pg_membercnt
< 0)
2597 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg
, p
);
2599 LIST_REMOVE(p
, p_pglist
);
2600 if (pg
->pg_members
.lh_first
== 0) {
2602 pgdelete_dropref(pg
);
2610 /* cannot use proc_pgrp as it maybe stalled */
2612 pgrp_replace(struct proc
* p
, struct pgrp
* newpg
)
2614 struct pgrp
* oldpg
;
2620 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2621 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2622 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2625 p
->p_listflag
|= P_LIST_PGRPTRANS
;
2628 if (oldpg
== PGRP_NULL
)
2629 panic("pgrp_replace: oldpg NULL");
2630 oldpg
->pg_refcount
++;
2631 #if __PROC_INTERNAL_DEBUG
2632 if ((p
->p_listflag
& P_LIST_INPGRP
) == 0)
2633 panic("removing from pglist but no named ref\n");
2635 p
->p_pgrpid
= PGRPID_DEAD
;
2636 p
->p_listflag
&= ~P_LIST_INPGRP
;
2642 oldpg
->pg_membercnt
--;
2643 if (oldpg
->pg_membercnt
< 0)
2644 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg
, p
);
2645 LIST_REMOVE(p
, p_pglist
);
2646 if (oldpg
->pg_members
.lh_first
== 0) {
2648 pgdelete_dropref(oldpg
);
2656 p
->p_pgrpid
= newpg
->pg_id
;
2657 p
->p_listflag
|= P_LIST_INPGRP
;
2659 * When pgrp is being freed , a process can still
2660 * request addition using setpgid from bash when
2661 * login is terminated (login cycler) return ESRCH
2662 * Safe to hold lock due to refcount on pgrp
2664 if ((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) {
2665 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2668 if ((newpg
->pg_listflags
& PGRP_FLAG_DEAD
) == PGRP_FLAG_DEAD
)
2669 panic("pgrp_add : pgrp is dead adding process");
2673 newpg
->pg_membercnt
++;
2674 LIST_INSERT_HEAD(&newpg
->pg_members
, p
, p_pglist
);
2678 if (((newpg
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) == PGRP_FLAG_TERMINATE
) && (newpg
->pg_membercnt
!= 0)) {
2679 newpg
->pg_listflags
&= ~PGRP_FLAG_TERMINATE
;
2682 p
->p_listflag
&= ~P_LIST_PGRPTRANS
;
2683 if ((p
->p_listflag
& P_LIST_PGRPTRWAIT
) == P_LIST_PGRPTRWAIT
) {
2684 p
->p_listflag
&= ~P_LIST_PGRPTRWAIT
;
2685 wakeup(&p
->p_pgrpid
);
2692 pgrp_lock(struct pgrp
* pgrp
)
2694 lck_mtx_lock(&pgrp
->pg_mlock
);
2698 pgrp_unlock(struct pgrp
* pgrp
)
2700 lck_mtx_unlock(&pgrp
->pg_mlock
);
2704 session_lock(struct session
* sess
)
2706 lck_mtx_lock(&sess
->s_mlock
);
2711 session_unlock(struct session
* sess
)
2713 lck_mtx_unlock(&sess
->s_mlock
);
2725 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2726 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2727 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2732 assert(pgrp
!= NULL
);
2734 if (pgrp
!= PGRP_NULL
) {
2735 pgrp
->pg_refcount
++;
2736 if ((pgrp
->pg_listflags
& (PGRP_FLAG_TERMINATE
| PGRP_FLAG_DEAD
)) != 0)
2737 panic("proc_pgrp: ref being povided for dead pgrp");
2746 tty_pgrp(struct tty
* tp
)
2748 struct pgrp
* pg
= PGRP_NULL
;
2753 if (pg
!= PGRP_NULL
) {
2754 if ((pg
->pg_listflags
& PGRP_FLAG_DEAD
) != 0)
2755 panic("tty_pgrp: ref being povided for dead pgrp");
2764 proc_session(proc_t p
)
2766 struct session
* sess
= SESSION_NULL
;
2769 return(SESSION_NULL
);
2773 /* wait during transitions */
2774 while ((p
->p_listflag
& P_LIST_PGRPTRANS
) == P_LIST_PGRPTRANS
) {
2775 p
->p_listflag
|= P_LIST_PGRPTRWAIT
;
2776 (void)msleep(&p
->p_pgrpid
, proc_list_mlock
, 0, "proc_pgrp", 0);
2779 if ((p
->p_pgrp
!= PGRP_NULL
) && ((sess
= p
->p_pgrp
->pg_session
) != SESSION_NULL
)) {
2780 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
2781 panic("proc_session:returning sesssion ref on terminating session");
2789 session_rele(struct session
*sess
)
2792 if (--sess
->s_count
== 0) {
2793 if ((sess
->s_listflags
& (S_LIST_TERM
| S_LIST_DEAD
)) != 0)
2794 panic("session_rele: terminating already terminated session");
2795 sess
->s_listflags
|= S_LIST_TERM
;
2796 LIST_REMOVE(sess
, s_hash
);
2797 sess
->s_listflags
|= S_LIST_DEAD
;
2798 if (sess
->s_count
!= 0)
2799 panic("session_rele: freeing session in use");
2801 #if CONFIG_FINE_LOCK_GROUPS
2802 lck_mtx_destroy(&sess
->s_mlock
, proc_mlock_grp
);
2804 lck_mtx_destroy(&sess
->s_mlock
, proc_lck_grp
);
2806 FREE_ZONE(sess
, sizeof(struct session
), M_SESSION
);
2812 proc_transstart(proc_t p
, int locked
, int non_blocking
)
2816 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
2817 if (((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
) || non_blocking
) {
2822 p
->p_lflag
|= P_LTRANSWAIT
;
2823 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
2825 p
->p_lflag
|= P_LINTRANSIT
;
2826 p
->p_transholder
= current_thread();
2833 proc_transcommit(proc_t p
, int locked
)
2838 assert ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
);
2839 assert (p
->p_transholder
== current_thread());
2840 p
->p_lflag
|= P_LTRANSCOMMIT
;
2842 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
2843 p
->p_lflag
&= ~P_LTRANSWAIT
;
2844 wakeup(&p
->p_lflag
);
2851 proc_transend(proc_t p
, int locked
)
2856 p
->p_lflag
&= ~( P_LINTRANSIT
| P_LTRANSCOMMIT
);
2857 p
->p_transholder
= NULL
;
2859 if ((p
->p_lflag
& P_LTRANSWAIT
) == P_LTRANSWAIT
) {
2860 p
->p_lflag
&= ~P_LTRANSWAIT
;
2861 wakeup(&p
->p_lflag
);
2868 proc_transwait(proc_t p
, int locked
)
2872 while ((p
->p_lflag
& P_LINTRANSIT
) == P_LINTRANSIT
) {
2873 if ((p
->p_lflag
& P_LTRANSCOMMIT
) == P_LTRANSCOMMIT
&& current_proc() == p
) {
2878 p
->p_lflag
|= P_LTRANSWAIT
;
2879 msleep(&p
->p_lflag
, &p
->p_mlock
, 0, "proc_signstart", NULL
);
2887 proc_klist_lock(void)
2889 lck_mtx_lock(proc_klist_mlock
);
2893 proc_klist_unlock(void)
2895 lck_mtx_unlock(proc_klist_mlock
);
2899 proc_knote(struct proc
* p
, long hint
)
2902 KNOTE(&p
->p_klist
, hint
);
2903 proc_klist_unlock();
2907 proc_knote_drain(struct proc
*p
)
2909 struct knote
*kn
= NULL
;
2912 * Clear the proc's klist to avoid references after the proc is reaped.
2915 while ((kn
= SLIST_FIRST(&p
->p_klist
))) {
2916 kn
->kn_ptr
.p_proc
= PROC_NULL
;
2917 KNOTE_DETACH(&p
->p_klist
, kn
);
2919 proc_klist_unlock();
2923 proc_setregister(proc_t p
)
2926 p
->p_lflag
|= P_LREGISTER
;
2931 proc_resetregister(proc_t p
)
2934 p
->p_lflag
&= ~P_LREGISTER
;
2939 proc_pgrpid(proc_t p
)
2947 return current_proc()->p_pgrpid
;
2951 /* return control and action states */
2953 proc_getpcontrol(int pid
, int * pcontrolp
)
2960 if (pcontrolp
!= NULL
)
2961 *pcontrolp
= p
->p_pcaction
;
2968 proc_dopcontrol(proc_t p
)
2974 pcontrol
= PROC_CONTROL_STATE(p
);
2976 if (PROC_ACTION_STATE(p
) == 0) {
2979 PROC_SETACTION_STATE(p
);
2981 printf("low swap: throttling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
2985 PROC_SETACTION_STATE(p
);
2987 printf("low swap: suspending pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
2988 task_suspend(p
->task
);
2992 PROC_SETACTION_STATE(p
);
2994 printf("low swap: killing pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
2995 psignal(p
, SIGKILL
);
3005 return(PROC_RETURNED
);
3010 * Resume a throttled or suspended process. This is an internal interface that's only
3011 * used by the user level code that presents the GUI when we run out of swap space and
3012 * hence is restricted to processes with superuser privileges.
3016 proc_resetpcontrol(int pid
)
3021 proc_t self
= current_proc();
3023 /* if the process has been validated to handle resource control or root is valid one */
3024 if (((self
->p_lflag
& P_LVMRSRCOWNER
) == 0) && (error
= suser(kauth_cred_get(), 0)))
3033 pcontrol
= PROC_CONTROL_STATE(p
);
3035 if(PROC_ACTION_STATE(p
) !=0) {
3038 PROC_RESETACTION_STATE(p
);
3040 printf("low swap: unthrottling pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3044 PROC_RESETACTION_STATE(p
);
3046 printf("low swap: resuming pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3047 task_resume(p
->task
);
3052 PROC_SETACTION_STATE(p
);
3054 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p
->p_pid
, p
->p_comm
);
3070 struct no_paging_space
3072 uint64_t pcs_max_size
;
3073 uint64_t pcs_uniqueid
;
3076 uint64_t pcs_total_size
;
3078 uint64_t npcs_max_size
;
3079 uint64_t npcs_uniqueid
;
3081 int npcs_proc_count
;
3082 uint64_t npcs_total_size
;
3084 int apcs_proc_count
;
3085 uint64_t apcs_total_size
;
3090 proc_pcontrol_filter(proc_t p
, void *arg
)
3092 struct no_paging_space
*nps
;
3093 uint64_t compressed
;
3095 nps
= (struct no_paging_space
*)arg
;
3097 compressed
= get_task_compressed(p
->task
);
3099 if (PROC_CONTROL_STATE(p
)) {
3100 if (PROC_ACTION_STATE(p
) == 0) {
3101 if (compressed
> nps
->pcs_max_size
) {
3102 nps
->pcs_pid
= p
->p_pid
;
3103 nps
->pcs_uniqueid
= p
->p_uniqueid
;
3104 nps
->pcs_max_size
= compressed
;
3106 nps
->pcs_total_size
+= compressed
;
3107 nps
->pcs_proc_count
++;
3109 nps
->apcs_total_size
+= compressed
;
3110 nps
->apcs_proc_count
++;
3113 if (compressed
> nps
->npcs_max_size
) {
3114 nps
->npcs_pid
= p
->p_pid
;
3115 nps
->npcs_uniqueid
= p
->p_uniqueid
;
3116 nps
->npcs_max_size
= compressed
;
3118 nps
->npcs_total_size
+= compressed
;
3119 nps
->npcs_proc_count
++;
3127 proc_pcontrol_null(__unused proc_t p
, __unused
void *arg
)
3129 return(PROC_RETURNED
);
3134 * Deal with the low on compressor pool space condition... this function
3135 * gets called when we are approaching the limits of the compressor pool or
3136 * we are unable to create a new swap file.
3137 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3138 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3139 * There are 2 categories of processes to deal with. Those that have an action
3140 * associated with them by the task itself and those that do not. Actionable
3141 * tasks can have one of three categories specified: ones that
3142 * can be killed immediately, ones that should be suspended, and ones that should
3143 * be throttled. Processes that do not have an action associated with them are normally
3144 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3145 * that only by killing them can we hope to put the system back into a usable state.
3148 #define NO_PAGING_SPACE_DEBUG 0
3150 extern uint64_t vm_compressor_pages_compressed(void);
3152 struct timeval last_no_space_action
= {0, 0};
3155 no_paging_space_action()
3158 struct no_paging_space nps
;
3162 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3166 if (now
.tv_sec
<= last_no_space_action
.tv_sec
+ 5)
3170 * Examine all processes and find the biggest (biggest is based on the number of pages this
3171 * task has in the compressor pool) that has been marked to have some action
3172 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3175 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3176 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3177 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3179 bzero(&nps
, sizeof(nps
));
3181 proc_iterate(PROC_ALLPROCLIST
, proc_pcontrol_null
, (void *)NULL
, proc_pcontrol_filter
, (void *)&nps
);
3183 #if NO_PAGING_SPACE_DEBUG
3184 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3185 nps
.npcs_proc_count
, nps
.npcs_total_size
, nps
.npcs_max_size
);
3186 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3187 nps
.pcs_proc_count
, nps
.pcs_total_size
, nps
.pcs_max_size
);
3188 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3189 nps
.apcs_proc_count
, nps
.apcs_total_size
);
3191 if (nps
.npcs_max_size
> (vm_compressor_pages_compressed() * 50) / 100) {
3193 * for now we'll knock out any task that has more then 50% of the pages
3194 * held by the compressor
3196 if ((p
= proc_find(nps
.npcs_pid
)) != PROC_NULL
) {
3198 if (nps
.npcs_uniqueid
== p
->p_uniqueid
) {
3200 * verify this is still the same process
3201 * in case the proc exited and the pid got reused while
3202 * we were finishing the proc_iterate and getting to this point
3204 last_no_space_action
= now
;
3206 printf("low swap: killing pid %d (%s)\n", p
->p_pid
, p
->p_comm
);
3207 psignal(p
, SIGKILL
);
3218 if (nps
.pcs_max_size
> 0) {
3219 if ((p
= proc_find(nps
.pcs_pid
)) != PROC_NULL
) {
3221 if (nps
.pcs_uniqueid
== p
->p_uniqueid
) {
3223 * verify this is still the same process
3224 * in case the proc exited and the pid got reused while
3225 * we were finishing the proc_iterate and getting to this point
3227 last_no_space_action
= now
;
3239 last_no_space_action
= now
;
3241 printf("low swap: unable to find any eligible processes to take action on\n");
3247 proc_trace_log(__unused proc_t p
, struct proc_trace_log_args
*uap
, __unused
int *retval
)
3250 proc_t target_proc
= PROC_NULL
;
3251 pid_t target_pid
= uap
->pid
;
3252 uint64_t target_uniqueid
= uap
->uniqueid
;
3253 task_t target_task
= NULL
;
3255 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT
, 0)) {
3259 target_proc
= proc_find(target_pid
);
3260 if (target_proc
!= PROC_NULL
) {
3261 if (target_uniqueid
!= proc_uniqueid(target_proc
)) {
3266 target_task
= proc_task(target_proc
);
3267 if (task_send_trace_memory(target_task
, target_pid
, target_uniqueid
)) {
3275 if (target_proc
!= PROC_NULL
)
3276 proc_rele(target_proc
);
3280 #if VM_SCAN_FOR_SHADOW_CHAIN
3281 extern int vm_map_shadow_max(vm_map_t map
);
3282 int proc_shadow_max(void);
3283 int proc_shadow_max(void)
3292 for (p
= allproc
.lh_first
; (p
!= 0); p
= p
->p_list
.le_next
) {
3293 if (p
->p_stat
== SIDL
)
3299 map
= get_task_map(task
);
3303 retval
= vm_map_shadow_max(map
);
3311 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */