]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
xnu-2422.110.17.tar.gz
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/assert.h>
103 #include <vm/vm_protos.h>
104 #include <vm/vm_map.h> /* vm_map_switch_protect() */
105 #include <mach/task.h>
106 #include <mach/message.h>
107
108 #if CONFIG_MEMORYSTATUS
109 #include <sys/kern_memorystatus.h>
110 #endif
111
112 #if CONFIG_MACF
113 #include <security/mac_framework.h>
114 #endif
115
116 #include <libkern/crypto/sha1.h>
117
118 /*
119 * Structure associated with user cacheing.
120 */
121 struct uidinfo {
122 LIST_ENTRY(uidinfo) ui_hash;
123 uid_t ui_uid;
124 long ui_proccnt;
125 };
126 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
127 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
128 u_long uihash; /* size of hash table - 1 */
129
130 /*
131 * Other process lists
132 */
133 struct pidhashhead *pidhashtbl;
134 u_long pidhash;
135 struct pgrphashhead *pgrphashtbl;
136 u_long pgrphash;
137 struct sesshashhead *sesshashtbl;
138 u_long sesshash;
139
140 struct proclist allproc;
141 struct proclist zombproc;
142 extern struct tty cons;
143
144 #if CONFIG_LCTX
145 /*
146 * Login Context
147 */
148 static pid_t lastlcid = 1;
149 static int alllctx_cnt;
150
151 #define LCID_MAX 8192 /* Does this really need to be large? */
152 static int maxlcid = LCID_MAX;
153
154 LIST_HEAD(lctxlist, lctx);
155 static struct lctxlist alllctx;
156
157 lck_mtx_t alllctx_lock;
158 lck_grp_t * lctx_lck_grp;
159 lck_grp_attr_t * lctx_lck_grp_attr;
160 lck_attr_t * lctx_lck_attr;
161
162 static void lctxinit(void);
163 #endif
164
165 extern int cs_debug;
166
167 #if DEBUG
168 #define __PROC_INTERNAL_DEBUG 1
169 #endif
170 /* Name to give to core files */
171 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
172
173 static void orphanpg(struct pgrp *pg);
174 void proc_name_kdp(task_t t, char * buf, int size);
175 char *proc_name_address(void *p);
176
177 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
178 static void pgrp_remove(proc_t p);
179 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
180 static void pgdelete_dropref(struct pgrp *pgrp);
181 extern void pg_rele_dropref(struct pgrp * pgrp);
182 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
183 static boolean_t proc_parent_is_currentproc(proc_t p);
184
185 struct fixjob_iterargs {
186 struct pgrp * pg;
187 struct session * mysession;
188 int entering;
189 };
190
191 int fixjob_callback(proc_t, void *);
192
193 /*
194 * Initialize global process hashing structures.
195 */
196 void
197 procinit(void)
198 {
199 LIST_INIT(&allproc);
200 LIST_INIT(&zombproc);
201 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
202 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
203 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
204 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
205 #if CONFIG_LCTX
206 lctxinit();
207 #endif
208 }
209
210 /*
211 * Change the count associated with number of processes
212 * a given user is using. This routine protects the uihash
213 * with the list lock
214 */
215 int
216 chgproccnt(uid_t uid, int diff)
217 {
218 struct uidinfo *uip;
219 struct uidinfo *newuip = NULL;
220 struct uihashhead *uipp;
221 int retval;
222
223 again:
224 proc_list_lock();
225 uipp = UIHASH(uid);
226 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
227 if (uip->ui_uid == uid)
228 break;
229 if (uip) {
230 uip->ui_proccnt += diff;
231 if (uip->ui_proccnt > 0) {
232 retval = uip->ui_proccnt;
233 proc_list_unlock();
234 goto out;
235 }
236 if (uip->ui_proccnt < 0)
237 panic("chgproccnt: procs < 0");
238 LIST_REMOVE(uip, ui_hash);
239 retval = 0;
240 proc_list_unlock();
241 FREE_ZONE(uip, sizeof(*uip), M_PROC);
242 goto out;
243 }
244 if (diff <= 0) {
245 if (diff == 0) {
246 retval = 0;
247 proc_list_unlock();
248 goto out;
249 }
250 panic("chgproccnt: lost user");
251 }
252 if (newuip != NULL) {
253 uip = newuip;
254 newuip = NULL;
255 LIST_INSERT_HEAD(uipp, uip, ui_hash);
256 uip->ui_uid = uid;
257 uip->ui_proccnt = diff;
258 retval = diff;
259 proc_list_unlock();
260 goto out;
261 }
262 proc_list_unlock();
263 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
264 if (newuip == NULL)
265 panic("chgproccnt: M_PROC zone depleted");
266 goto again;
267 out:
268 if (newuip != NULL)
269 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
270 return(retval);
271 }
272
273 /*
274 * Is p an inferior of the current process?
275 */
276 int
277 inferior(proc_t p)
278 {
279 int retval = 0;
280
281 proc_list_lock();
282 for (; p != current_proc(); p = p->p_pptr)
283 if (p->p_pid == 0)
284 goto out;
285 retval = 1;
286 out:
287 proc_list_unlock();
288 return(retval);
289 }
290
291 /*
292 * Is p an inferior of t ?
293 */
294 int
295 isinferior(proc_t p, proc_t t)
296 {
297 int retval = 0;
298 int nchecked = 0;
299 proc_t start = p;
300
301 /* if p==t they are not inferior */
302 if (p == t)
303 return(0);
304
305 proc_list_lock();
306 for (; p != t; p = p->p_pptr) {
307 nchecked++;
308
309 /* Detect here if we're in a cycle */
310 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
311 goto out;
312 }
313 retval = 1;
314 out:
315 proc_list_unlock();
316 return(retval);
317 }
318
319 int
320 proc_isinferior(int pid1, int pid2)
321 {
322 proc_t p = PROC_NULL;
323 proc_t t = PROC_NULL;
324 int retval = 0;
325
326 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
327 retval = isinferior(p, t);
328
329 if (p != PROC_NULL)
330 proc_rele(p);
331 if (t != PROC_NULL)
332 proc_rele(t);
333
334 return(retval);
335 }
336
337 proc_t
338 proc_find(int pid)
339 {
340 return(proc_findinternal(pid, 0));
341 }
342
343 proc_t
344 proc_findinternal(int pid, int locked)
345 {
346 proc_t p = PROC_NULL;
347
348 if (locked == 0) {
349 proc_list_lock();
350 }
351
352 p = pfind_locked(pid);
353 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
354 p = PROC_NULL;
355
356 if (locked == 0) {
357 proc_list_unlock();
358 }
359
360 return(p);
361 }
362
363 proc_t
364 proc_findthread(thread_t thread)
365 {
366 proc_t p = PROC_NULL;
367 struct uthread *uth;
368
369 proc_list_lock();
370 uth = get_bsdthread_info(thread);
371 if (uth && (uth->uu_flag & UT_VFORK))
372 p = uth->uu_proc;
373 else
374 p = (proc_t)(get_bsdthreadtask_info(thread));
375 p = proc_ref_locked(p);
376 proc_list_unlock();
377 return(p);
378 }
379
380 int
381 proc_rele(proc_t p)
382 {
383 proc_list_lock();
384 proc_rele_locked(p);
385 proc_list_unlock();
386
387 return(0);
388 }
389
390 proc_t
391 proc_self(void)
392 {
393 struct proc * p;
394
395 p = current_proc();
396
397 proc_list_lock();
398 if (p != proc_ref_locked(p))
399 p = PROC_NULL;
400 proc_list_unlock();
401 return(p);
402 }
403
404
405 proc_t
406 proc_ref_locked(proc_t p)
407 {
408 proc_t p1 = p;
409
410 /* if process still in creation return failure */
411 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
412 return (PROC_NULL);
413 /* do not return process marked for termination */
414 if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0))
415 p->p_refcount++;
416 else
417 p1 = PROC_NULL;
418
419 return(p1);
420 }
421
422 void
423 proc_rele_locked(proc_t p)
424 {
425
426 if (p->p_refcount > 0) {
427 p->p_refcount--;
428 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
429 p->p_listflag &= ~P_LIST_DRAINWAIT;
430 wakeup(&p->p_refcount);
431 }
432 } else
433 panic("proc_rele_locked -ve ref\n");
434
435 }
436
437 proc_t
438 proc_find_zombref(int pid)
439 {
440 proc_t p;
441
442 proc_list_lock();
443
444 again:
445 p = pfind_locked(pid);
446
447 /* should we bail? */
448 if ((p == PROC_NULL) /* not found */
449 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
450 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
451
452 proc_list_unlock();
453 return (PROC_NULL);
454 }
455
456 /* If someone else is controlling the (unreaped) zombie - wait */
457 if ((p->p_listflag & P_LIST_WAITING) != 0) {
458 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
459 goto again;
460 }
461 p->p_listflag |= P_LIST_WAITING;
462
463 proc_list_unlock();
464
465 return(p);
466 }
467
468 void
469 proc_drop_zombref(proc_t p)
470 {
471 proc_list_lock();
472 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
473 p->p_listflag &= ~P_LIST_WAITING;
474 wakeup(&p->p_stat);
475 }
476 proc_list_unlock();
477 }
478
479
480 void
481 proc_refdrain(proc_t p)
482 {
483
484 proc_list_lock();
485
486 p->p_listflag |= P_LIST_DRAIN;
487 while (p->p_refcount) {
488 p->p_listflag |= P_LIST_DRAINWAIT;
489 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
490 }
491 p->p_listflag &= ~P_LIST_DRAIN;
492 p->p_listflag |= P_LIST_DEAD;
493
494 proc_list_unlock();
495
496
497 }
498
499 proc_t
500 proc_parentholdref(proc_t p)
501 {
502 proc_t parent = PROC_NULL;
503 proc_t pp;
504 int loopcnt = 0;
505
506
507 proc_list_lock();
508 loop:
509 pp = p->p_pptr;
510 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
511 parent = PROC_NULL;
512 goto out;
513 }
514
515 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
516 pp->p_listflag |= P_LIST_CHILDDRWAIT;
517 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
518 loopcnt++;
519 if (loopcnt == 5) {
520 parent = PROC_NULL;
521 goto out;
522 }
523 goto loop;
524 }
525
526 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
527 pp->p_parentref++;
528 parent = pp;
529 goto out;
530 }
531
532 out:
533 proc_list_unlock();
534 return(parent);
535 }
536 int
537 proc_parentdropref(proc_t p, int listlocked)
538 {
539 if (listlocked == 0)
540 proc_list_lock();
541
542 if (p->p_parentref > 0) {
543 p->p_parentref--;
544 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
545 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
546 wakeup(&p->p_parentref);
547 }
548 } else
549 panic("proc_parentdropref -ve ref\n");
550 if (listlocked == 0)
551 proc_list_unlock();
552
553 return(0);
554 }
555
556 void
557 proc_childdrainstart(proc_t p)
558 {
559 #if __PROC_INTERNAL_DEBUG
560 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
561 panic("proc_childdrainstart: childdrain already started\n");
562 #endif
563 p->p_listflag |= P_LIST_CHILDDRSTART;
564 /* wait for all that hold parentrefs to drop */
565 while (p->p_parentref > 0) {
566 p->p_listflag |= P_LIST_PARENTREFWAIT;
567 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
568 }
569 }
570
571
572 void
573 proc_childdrainend(proc_t p)
574 {
575 #if __PROC_INTERNAL_DEBUG
576 if (p->p_childrencnt > 0)
577 panic("exiting: children stil hanging around\n");
578 #endif
579 p->p_listflag |= P_LIST_CHILDDRAINED;
580 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
581 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
582 wakeup(&p->p_childrencnt);
583 }
584 }
585
586 void
587 proc_checkdeadrefs(__unused proc_t p)
588 {
589 #if __PROC_INTERNAL_DEBUG
590 if ((p->p_listflag & P_LIST_INHASH) != 0)
591 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
592 if (p->p_childrencnt != 0)
593 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
594 if (p->p_refcount != 0)
595 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
596 if (p->p_parentref != 0)
597 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
598 #endif
599 }
600
601 int
602 proc_pid(proc_t p)
603 {
604 return (p->p_pid);
605 }
606
607 int
608 proc_ppid(proc_t p)
609 {
610 return (p->p_ppid);
611 }
612
613 int
614 proc_selfpid(void)
615 {
616 return (current_proc()->p_pid);
617 }
618
619 int
620 proc_selfppid(void)
621 {
622 return (current_proc()->p_ppid);
623 }
624
625 #if CONFIG_DTRACE
626 static proc_t
627 dtrace_current_proc_vforking(void)
628 {
629 thread_t th = current_thread();
630 struct uthread *ut = get_bsdthread_info(th);
631
632 if (ut &&
633 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
634 /*
635 * Handle the narrow window where we're in the vfork syscall,
636 * but we're not quite ready to claim (in particular, to DTrace)
637 * that we're running as the child.
638 */
639 return (get_bsdtask_info(get_threadtask(th)));
640 }
641 return (current_proc());
642 }
643
644 int
645 dtrace_proc_selfpid(void)
646 {
647 return (dtrace_current_proc_vforking()->p_pid);
648 }
649
650 int
651 dtrace_proc_selfppid(void)
652 {
653 return (dtrace_current_proc_vforking()->p_ppid);
654 }
655
656 uid_t
657 dtrace_proc_selfruid(void)
658 {
659 return (dtrace_current_proc_vforking()->p_ruid);
660 }
661 #endif /* CONFIG_DTRACE */
662
663 proc_t
664 proc_parent(proc_t p)
665 {
666 proc_t parent;
667 proc_t pp;
668
669 proc_list_lock();
670 loop:
671 pp = p->p_pptr;
672 parent = proc_ref_locked(pp);
673 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
674 pp->p_listflag |= P_LIST_CHILDLKWAIT;
675 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
676 goto loop;
677 }
678 proc_list_unlock();
679 return(parent);
680 }
681
682 static boolean_t
683 proc_parent_is_currentproc(proc_t p)
684 {
685 boolean_t ret = FALSE;
686
687 proc_list_lock();
688 if (p->p_pptr == current_proc())
689 ret = TRUE;
690
691 proc_list_unlock();
692 return ret;
693 }
694
695 void
696 proc_name(int pid, char * buf, int size)
697 {
698 proc_t p;
699
700 if ((p = proc_find(pid)) != PROC_NULL) {
701 strlcpy(buf, &p->p_comm[0], size);
702 proc_rele(p);
703 }
704 }
705
706 void
707 proc_name_kdp(task_t t, char * buf, int size)
708 {
709 proc_t p = get_bsdtask_info(t);
710
711 if (p != PROC_NULL)
712 strlcpy(buf, &p->p_comm[0], size);
713 }
714
715 char *
716 proc_name_address(void *p)
717 {
718 return &((proc_t)p)->p_comm[0];
719 }
720
721 void
722 proc_selfname(char * buf, int size)
723 {
724 proc_t p;
725
726 if ((p = current_proc())!= (proc_t)0) {
727 strlcpy(buf, &p->p_comm[0], size);
728 }
729 }
730
731 void
732 proc_signal(int pid, int signum)
733 {
734 proc_t p;
735
736 if ((p = proc_find(pid)) != PROC_NULL) {
737 psignal(p, signum);
738 proc_rele(p);
739 }
740 }
741
742 int
743 proc_issignal(int pid, sigset_t mask)
744 {
745 proc_t p;
746 int error=0;
747
748 if ((p = proc_find(pid)) != PROC_NULL) {
749 error = proc_pendingsignals(p, mask);
750 proc_rele(p);
751 }
752
753 return(error);
754 }
755
756 int
757 proc_noremotehang(proc_t p)
758 {
759 int retval = 0;
760
761 if (p)
762 retval = p->p_flag & P_NOREMOTEHANG;
763 return(retval? 1: 0);
764
765 }
766
767 int
768 proc_exiting(proc_t p)
769 {
770 int retval = 0;
771
772 if (p)
773 retval = p->p_lflag & P_LEXIT;
774 return(retval? 1: 0);
775 }
776
777 int
778 proc_forcequota(proc_t p)
779 {
780 int retval = 0;
781
782 if (p)
783 retval = p->p_flag & P_FORCEQUOTA;
784 return(retval? 1: 0);
785
786 }
787
788 int
789 proc_tbe(proc_t p)
790 {
791 int retval = 0;
792
793 if (p)
794 retval = p->p_flag & P_TBE;
795 return(retval? 1: 0);
796
797 }
798
799 int
800 proc_suser(proc_t p)
801 {
802 kauth_cred_t my_cred;
803 int error;
804
805 my_cred = kauth_cred_proc_ref(p);
806 error = suser(my_cred, &p->p_acflag);
807 kauth_cred_unref(&my_cred);
808 return(error);
809 }
810
811 task_t
812 proc_task(proc_t proc)
813 {
814 return (task_t)proc->task;
815 }
816
817 /*
818 * Obtain the first thread in a process
819 *
820 * XXX This is a bad thing to do; it exists predominantly to support the
821 * XXX use of proc_t's in places that should really be using
822 * XXX thread_t's instead. This maintains historical behaviour, but really
823 * XXX needs an audit of the context (proxy vs. not) to clean up.
824 */
825 thread_t
826 proc_thread(proc_t proc)
827 {
828 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
829
830 if (uth != NULL)
831 return(uth->uu_context.vc_thread);
832
833 return(NULL);
834 }
835
836 kauth_cred_t
837 proc_ucred(proc_t p)
838 {
839 return(p->p_ucred);
840 }
841
842 struct uthread *
843 current_uthread()
844 {
845 thread_t th = current_thread();
846
847 return((struct uthread *)get_bsdthread_info(th));
848 }
849
850
851 int
852 proc_is64bit(proc_t p)
853 {
854 return(IS_64BIT_PROCESS(p));
855 }
856
857 int
858 proc_pidversion(proc_t p)
859 {
860 return(p->p_idversion);
861 }
862
863 uint64_t
864 proc_uniqueid(proc_t p)
865 {
866 return(p->p_uniqueid);
867 }
868
869 uint64_t
870 proc_puniqueid(proc_t p)
871 {
872 return(p->p_puniqueid);
873 }
874
875 uint64_t
876 proc_was_throttled(proc_t p)
877 {
878 return (p->was_throttled);
879 }
880
881 uint64_t
882 proc_did_throttle(proc_t p)
883 {
884 return (p->did_throttle);
885 }
886
887 int
888 proc_getcdhash(proc_t p, unsigned char *cdhash)
889 {
890 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
891 }
892
893 void
894 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
895 {
896 if (size >= sizeof(p->p_uuid)) {
897 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
898 }
899 }
900
901
902 void
903 bsd_set_dependency_capable(task_t task)
904 {
905 proc_t p = get_bsdtask_info(task);
906
907 if (p) {
908 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
909 }
910 }
911
912
913 int
914 IS_64BIT_PROCESS(proc_t p)
915 {
916 if (p && (p->p_flag & P_LP64))
917 return(1);
918 else
919 return(0);
920 }
921
922 /*
923 * Locate a process by number
924 */
925 proc_t
926 pfind_locked(pid_t pid)
927 {
928 proc_t p;
929 #if DEBUG
930 proc_t q;
931 #endif
932
933 if (!pid)
934 return (kernproc);
935
936 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
937 if (p->p_pid == pid) {
938 #if DEBUG
939 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
940 if ((p !=q) && (q->p_pid == pid))
941 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
942 }
943 #endif
944 return (p);
945 }
946 }
947 return (NULL);
948 }
949
950 /*
951 * Locate a zombie by PID
952 */
953 __private_extern__ proc_t
954 pzfind(pid_t pid)
955 {
956 proc_t p;
957
958
959 proc_list_lock();
960
961 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
962 if (p->p_pid == pid)
963 break;
964
965 proc_list_unlock();
966
967 return (p);
968 }
969
970 /*
971 * Locate a process group by number
972 */
973
974 struct pgrp *
975 pgfind(pid_t pgid)
976 {
977 struct pgrp * pgrp;
978
979 proc_list_lock();
980 pgrp = pgfind_internal(pgid);
981 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
982 pgrp = PGRP_NULL;
983 else
984 pgrp->pg_refcount++;
985 proc_list_unlock();
986 return(pgrp);
987 }
988
989
990
991 struct pgrp *
992 pgfind_internal(pid_t pgid)
993 {
994 struct pgrp *pgrp;
995
996 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
997 if (pgrp->pg_id == pgid)
998 return (pgrp);
999 return (NULL);
1000 }
1001
1002 void
1003 pg_rele(struct pgrp * pgrp)
1004 {
1005 if(pgrp == PGRP_NULL)
1006 return;
1007 pg_rele_dropref(pgrp);
1008 }
1009
1010 void
1011 pg_rele_dropref(struct pgrp * pgrp)
1012 {
1013 proc_list_lock();
1014 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1015 proc_list_unlock();
1016 pgdelete_dropref(pgrp);
1017 return;
1018 }
1019
1020 pgrp->pg_refcount--;
1021 proc_list_unlock();
1022 }
1023
1024 struct session *
1025 session_find_internal(pid_t sessid)
1026 {
1027 struct session *sess;
1028
1029 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1030 if (sess->s_sid == sessid)
1031 return (sess);
1032 return (NULL);
1033 }
1034
1035
1036 /*
1037 * Make a new process ready to become a useful member of society by making it
1038 * visible in all the right places and initialize its own lists to empty.
1039 *
1040 * Parameters: parent The parent of the process to insert
1041 * child The child process to insert
1042 *
1043 * Returns: (void)
1044 *
1045 * Notes: Insert a child process into the parents process group, assign
1046 * the child the parent process pointer and PPID of the parent,
1047 * place it on the parents p_children list as a sibling,
1048 * initialize its own child list, place it in the allproc list,
1049 * insert it in the proper hash bucket, and initialize its
1050 * event list.
1051 */
1052 void
1053 pinsertchild(proc_t parent, proc_t child)
1054 {
1055 struct pgrp * pg;
1056
1057 LIST_INIT(&child->p_children);
1058 TAILQ_INIT(&child->p_evlist);
1059 child->p_pptr = parent;
1060 child->p_ppid = parent->p_pid;
1061 child->p_puniqueid = parent->p_uniqueid;
1062
1063 pg = proc_pgrp(parent);
1064 pgrp_add(pg, parent, child);
1065 pg_rele(pg);
1066
1067 proc_list_lock();
1068
1069 #if CONFIG_MEMORYSTATUS
1070 memorystatus_add(child, TRUE);
1071 #endif
1072
1073 parent->p_childrencnt++;
1074 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1075
1076 LIST_INSERT_HEAD(&allproc, child, p_list);
1077 /* mark the completion of proc creation */
1078 child->p_listflag &= ~P_LIST_INCREATE;
1079
1080 proc_list_unlock();
1081 }
1082
1083 /*
1084 * Move p to a new or existing process group (and session)
1085 *
1086 * Returns: 0 Success
1087 * ESRCH No such process
1088 */
1089 int
1090 enterpgrp(proc_t p, pid_t pgid, int mksess)
1091 {
1092 struct pgrp *pgrp;
1093 struct pgrp *mypgrp;
1094 struct session * procsp;
1095
1096 pgrp = pgfind(pgid);
1097 mypgrp = proc_pgrp(p);
1098 procsp = proc_session(p);
1099
1100 #if DIAGNOSTIC
1101 if (pgrp != NULL && mksess) /* firewalls */
1102 panic("enterpgrp: setsid into non-empty pgrp");
1103 if (SESS_LEADER(p, procsp))
1104 panic("enterpgrp: session leader attempted setpgrp");
1105 #endif
1106 if (pgrp == PGRP_NULL) {
1107 pid_t savepid = p->p_pid;
1108 proc_t np = PROC_NULL;
1109 /*
1110 * new process group
1111 */
1112 #if DIAGNOSTIC
1113 if (p->p_pid != pgid)
1114 panic("enterpgrp: new pgrp and pid != pgid");
1115 #endif
1116 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1117 M_WAITOK);
1118 if (pgrp == NULL)
1119 panic("enterpgrp: M_PGRP zone depleted");
1120 if ((np = proc_find(savepid)) == NULL || np != p) {
1121 if (np != PROC_NULL)
1122 proc_rele(np);
1123 if (mypgrp != PGRP_NULL)
1124 pg_rele(mypgrp);
1125 if (procsp != SESSION_NULL)
1126 session_rele(procsp);
1127 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1128 return (ESRCH);
1129 }
1130 proc_rele(np);
1131 if (mksess) {
1132 struct session *sess;
1133
1134 /*
1135 * new session
1136 */
1137 MALLOC_ZONE(sess, struct session *,
1138 sizeof(struct session), M_SESSION, M_WAITOK);
1139 if (sess == NULL)
1140 panic("enterpgrp: M_SESSION zone depleted");
1141 sess->s_leader = p;
1142 sess->s_sid = p->p_pid;
1143 sess->s_count = 1;
1144 sess->s_ttyvp = NULL;
1145 sess->s_ttyp = TTY_NULL;
1146 sess->s_flags = 0;
1147 sess->s_listflags = 0;
1148 sess->s_ttypgrpid = NO_PID;
1149 #if CONFIG_FINE_LOCK_GROUPS
1150 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1151 #else
1152 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1153 #endif
1154 bcopy(procsp->s_login, sess->s_login,
1155 sizeof(sess->s_login));
1156 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1157 proc_list_lock();
1158 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1159 proc_list_unlock();
1160 pgrp->pg_session = sess;
1161 #if DIAGNOSTIC
1162 if (p != current_proc())
1163 panic("enterpgrp: mksession and p != curproc");
1164 #endif
1165 } else {
1166 proc_list_lock();
1167 pgrp->pg_session = procsp;
1168
1169 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1170 panic("enterpgrp: providing ref to terminating session ");
1171 pgrp->pg_session->s_count++;
1172 proc_list_unlock();
1173 }
1174 pgrp->pg_id = pgid;
1175 #if CONFIG_FINE_LOCK_GROUPS
1176 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1177 #else
1178 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1179 #endif
1180 LIST_INIT(&pgrp->pg_members);
1181 pgrp->pg_membercnt = 0;
1182 pgrp->pg_jobc = 0;
1183 proc_list_lock();
1184 pgrp->pg_refcount = 1;
1185 pgrp->pg_listflags = 0;
1186 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1187 proc_list_unlock();
1188 } else if (pgrp == mypgrp) {
1189 pg_rele(pgrp);
1190 if (mypgrp != NULL)
1191 pg_rele(mypgrp);
1192 if (procsp != SESSION_NULL)
1193 session_rele(procsp);
1194 return (0);
1195 }
1196
1197 if (procsp != SESSION_NULL)
1198 session_rele(procsp);
1199 /*
1200 * Adjust eligibility of affected pgrps to participate in job control.
1201 * Increment eligibility counts before decrementing, otherwise we
1202 * could reach 0 spuriously during the first call.
1203 */
1204 fixjobc(p, pgrp, 1);
1205 fixjobc(p, mypgrp, 0);
1206
1207 if(mypgrp != PGRP_NULL)
1208 pg_rele(mypgrp);
1209 pgrp_replace(p, pgrp);
1210 pg_rele(pgrp);
1211
1212 return(0);
1213 }
1214
1215 /*
1216 * remove process from process group
1217 */
1218 int
1219 leavepgrp(proc_t p)
1220 {
1221
1222 pgrp_remove(p);
1223 return (0);
1224 }
1225
1226 /*
1227 * delete a process group
1228 */
1229 static void
1230 pgdelete_dropref(struct pgrp *pgrp)
1231 {
1232 struct tty *ttyp;
1233 int emptypgrp = 1;
1234 struct session *sessp;
1235
1236
1237 pgrp_lock(pgrp);
1238 if (pgrp->pg_membercnt != 0) {
1239 emptypgrp = 0;
1240 }
1241 pgrp_unlock(pgrp);
1242
1243 proc_list_lock();
1244 pgrp->pg_refcount--;
1245 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1246 proc_list_unlock();
1247 return;
1248 }
1249
1250 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1251
1252 if (pgrp->pg_refcount > 0) {
1253 proc_list_unlock();
1254 return;
1255 }
1256
1257 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1258 LIST_REMOVE(pgrp, pg_hash);
1259
1260 proc_list_unlock();
1261
1262 ttyp = SESSION_TP(pgrp->pg_session);
1263 if (ttyp != TTY_NULL) {
1264 if (ttyp->t_pgrp == pgrp) {
1265 tty_lock(ttyp);
1266 /* Re-check after acquiring the lock */
1267 if (ttyp->t_pgrp == pgrp) {
1268 ttyp->t_pgrp = NULL;
1269 pgrp->pg_session->s_ttypgrpid = NO_PID;
1270 }
1271 tty_unlock(ttyp);
1272 }
1273 }
1274
1275 proc_list_lock();
1276
1277 sessp = pgrp->pg_session;
1278 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1279 panic("pg_deleteref: manipulating refs of already terminating session");
1280 if (--sessp->s_count == 0) {
1281 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1282 panic("pg_deleteref: terminating already terminated session");
1283 sessp->s_listflags |= S_LIST_TERM;
1284 ttyp = SESSION_TP(sessp);
1285 LIST_REMOVE(sessp, s_hash);
1286 proc_list_unlock();
1287 if (ttyp != TTY_NULL) {
1288 tty_lock(ttyp);
1289 if (ttyp->t_session == sessp)
1290 ttyp->t_session = NULL;
1291 tty_unlock(ttyp);
1292 }
1293 proc_list_lock();
1294 sessp->s_listflags |= S_LIST_DEAD;
1295 if (sessp->s_count != 0)
1296 panic("pg_deleteref: freeing session in use");
1297 proc_list_unlock();
1298 #if CONFIG_FINE_LOCK_GROUPS
1299 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1300 #else
1301 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1302 #endif
1303 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1304 } else
1305 proc_list_unlock();
1306 #if CONFIG_FINE_LOCK_GROUPS
1307 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1308 #else
1309 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1310 #endif
1311 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1312 }
1313
1314
1315 /*
1316 * Adjust pgrp jobc counters when specified process changes process group.
1317 * We count the number of processes in each process group that "qualify"
1318 * the group for terminal job control (those with a parent in a different
1319 * process group of the same session). If that count reaches zero, the
1320 * process group becomes orphaned. Check both the specified process'
1321 * process group and that of its children.
1322 * entering == 0 => p is leaving specified group.
1323 * entering == 1 => p is entering specified group.
1324 */
1325 int
1326 fixjob_callback(proc_t p, void * arg)
1327 {
1328 struct fixjob_iterargs *fp;
1329 struct pgrp * pg, *hispg;
1330 struct session * mysession, *hissess;
1331 int entering;
1332
1333 fp = (struct fixjob_iterargs *)arg;
1334 pg = fp->pg;
1335 mysession = fp->mysession;
1336 entering = fp->entering;
1337
1338 hispg = proc_pgrp(p);
1339 hissess = proc_session(p);
1340
1341 if ((hispg != pg) &&
1342 (hissess == mysession)) {
1343 pgrp_lock(hispg);
1344 if (entering) {
1345 hispg->pg_jobc++;
1346 pgrp_unlock(hispg);
1347 } else if (--hispg->pg_jobc == 0) {
1348 pgrp_unlock(hispg);
1349 orphanpg(hispg);
1350 } else
1351 pgrp_unlock(hispg);
1352 }
1353 if (hissess != SESSION_NULL)
1354 session_rele(hissess);
1355 if (hispg != PGRP_NULL)
1356 pg_rele(hispg);
1357
1358 return(PROC_RETURNED);
1359 }
1360
1361 void
1362 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1363 {
1364 struct pgrp *hispgrp = PGRP_NULL;
1365 struct session *hissess = SESSION_NULL;
1366 struct session *mysession = pgrp->pg_session;
1367 proc_t parent;
1368 struct fixjob_iterargs fjarg;
1369 boolean_t proc_parent_self;
1370
1371 /*
1372 * Check if p's parent is current proc, if yes then no need to take
1373 * a ref; calling proc_parent with current proc as parent may
1374 * deadlock if current proc is exiting.
1375 */
1376 proc_parent_self = proc_parent_is_currentproc(p);
1377 if (proc_parent_self)
1378 parent = current_proc();
1379 else
1380 parent = proc_parent(p);
1381
1382 if (parent != PROC_NULL) {
1383 hispgrp = proc_pgrp(parent);
1384 hissess = proc_session(parent);
1385 if (!proc_parent_self)
1386 proc_rele(parent);
1387 }
1388
1389
1390 /*
1391 * Check p's parent to see whether p qualifies its own process
1392 * group; if so, adjust count for p's process group.
1393 */
1394 if ((hispgrp != pgrp) &&
1395 (hissess == mysession)) {
1396 pgrp_lock(pgrp);
1397 if (entering) {
1398 pgrp->pg_jobc++;
1399 pgrp_unlock(pgrp);
1400 }else if (--pgrp->pg_jobc == 0) {
1401 pgrp_unlock(pgrp);
1402 orphanpg(pgrp);
1403 } else
1404 pgrp_unlock(pgrp);
1405 }
1406
1407 if (hissess != SESSION_NULL)
1408 session_rele(hissess);
1409 if (hispgrp != PGRP_NULL)
1410 pg_rele(hispgrp);
1411
1412 /*
1413 * Check this process' children to see whether they qualify
1414 * their process groups; if so, adjust counts for children's
1415 * process groups.
1416 */
1417 fjarg.pg = pgrp;
1418 fjarg.mysession = mysession;
1419 fjarg.entering = entering;
1420 proc_childrenwalk(p, fixjob_callback, &fjarg);
1421 }
1422
1423 /*
1424 * A process group has become orphaned;
1425 * if there are any stopped processes in the group,
1426 * hang-up all process in that group.
1427 */
1428 static void
1429 orphanpg(struct pgrp * pgrp)
1430 {
1431 proc_t p;
1432 pid_t * pid_list;
1433 int count, pidcount, i, alloc_count;
1434
1435 if (pgrp == PGRP_NULL)
1436 return;
1437 count = 0;
1438 pgrp_lock(pgrp);
1439 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
1440 if (p->p_stat == SSTOP) {
1441 for (p = pgrp->pg_members.lh_first; p != 0;
1442 p = p->p_pglist.le_next)
1443 count++;
1444 break; /* ??? stops after finding one.. */
1445 }
1446 }
1447 pgrp_unlock(pgrp);
1448
1449 count += 20;
1450 if (count > hard_maxproc)
1451 count = hard_maxproc;
1452 alloc_count = count * sizeof(pid_t);
1453 pid_list = (pid_t *)kalloc(alloc_count);
1454 bzero(pid_list, alloc_count);
1455
1456 pidcount = 0;
1457 pgrp_lock(pgrp);
1458 for (p = pgrp->pg_members.lh_first; p != 0;
1459 p = p->p_pglist.le_next) {
1460 if (p->p_stat == SSTOP) {
1461 for (p = pgrp->pg_members.lh_first; p != 0;
1462 p = p->p_pglist.le_next) {
1463 pid_list[pidcount] = p->p_pid;
1464 pidcount++;
1465 if (pidcount >= count)
1466 break;
1467 }
1468 break; /* ??? stops after finding one.. */
1469 }
1470 }
1471 pgrp_unlock(pgrp);
1472
1473 if (pidcount == 0)
1474 goto out;
1475
1476
1477 for (i = 0; i< pidcount; i++) {
1478 /* No handling or proc0 */
1479 if (pid_list[i] == 0)
1480 continue;
1481 p = proc_find(pid_list[i]);
1482 if (p) {
1483 proc_transwait(p, 0);
1484 pt_setrunnable(p);
1485 psignal(p, SIGHUP);
1486 psignal(p, SIGCONT);
1487 proc_rele(p);
1488 }
1489 }
1490 out:
1491 kfree(pid_list, alloc_count);
1492 return;
1493 }
1494
1495
1496
1497 /* XXX should be __private_extern__ */
1498 int
1499 proc_is_classic(proc_t p)
1500 {
1501 return (p->p_flag & P_TRANSLATED) ? 1 : 0;
1502 }
1503
1504 /* XXX Why does this function exist? Need to kill it off... */
1505 proc_t
1506 current_proc_EXTERNAL(void)
1507 {
1508 return (current_proc());
1509 }
1510
1511 int
1512 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1513 {
1514 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1515 }
1516
1517 /*
1518 * proc_core_name(name, uid, pid)
1519 * Expand the name described in corefilename, using name, uid, and pid.
1520 * corefilename is a printf-like string, with three format specifiers:
1521 * %N name of process ("name")
1522 * %P process id (pid)
1523 * %U user id (uid)
1524 * For example, "%N.core" is the default; they can be disabled completely
1525 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1526 * This is controlled by the sysctl variable kern.corefile (see above).
1527 */
1528 __private_extern__ int
1529 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1530 size_t cf_name_len)
1531 {
1532 const char *format, *appendstr;
1533 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1534 size_t i, l, n;
1535
1536 if (cf_name == NULL)
1537 goto toolong;
1538
1539 format = corefilename;
1540 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1541 switch (format[i]) {
1542 case '%': /* Format character */
1543 i++;
1544 switch (format[i]) {
1545 case '%':
1546 appendstr = "%";
1547 break;
1548 case 'N': /* process name */
1549 appendstr = name;
1550 break;
1551 case 'P': /* process id */
1552 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1553 appendstr = id_buf;
1554 break;
1555 case 'U': /* user id */
1556 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1557 appendstr = id_buf;
1558 break;
1559 default:
1560 appendstr = "";
1561 log(LOG_ERR,
1562 "Unknown format character %c in `%s'\n",
1563 format[i], format);
1564 }
1565 l = strlen(appendstr);
1566 if ((n + l) >= cf_name_len)
1567 goto toolong;
1568 bcopy(appendstr, cf_name + n, l);
1569 n += l;
1570 break;
1571 default:
1572 cf_name[n++] = format[i];
1573 }
1574 }
1575 if (format[i] != '\0')
1576 goto toolong;
1577 return (0);
1578 toolong:
1579 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1580 (long)pid, name, (uint32_t)uid);
1581 return (1);
1582 }
1583
1584 #if CONFIG_LCTX
1585
1586 static void
1587 lctxinit(void)
1588 {
1589 LIST_INIT(&alllctx);
1590 alllctx_cnt = 0;
1591
1592 /* allocate lctx lock group attribute and group */
1593 lctx_lck_grp_attr = lck_grp_attr_alloc_init();
1594 lck_grp_attr_setstat(lctx_lck_grp_attr);
1595
1596 lctx_lck_grp = lck_grp_alloc_init("lctx", lctx_lck_grp_attr);
1597 /* Allocate lctx lock attribute */
1598 lctx_lck_attr = lck_attr_alloc_init();
1599
1600 lck_mtx_init(&alllctx_lock, lctx_lck_grp, lctx_lck_attr);
1601 }
1602
1603 /*
1604 * Locate login context by number.
1605 */
1606 struct lctx *
1607 lcfind(pid_t lcid)
1608 {
1609 struct lctx *l;
1610
1611 ALLLCTX_LOCK;
1612 LIST_FOREACH(l, &alllctx, lc_list) {
1613 if (l->lc_id == lcid) {
1614 LCTX_LOCK(l);
1615 break;
1616 }
1617 }
1618 ALLLCTX_UNLOCK;
1619 return (l);
1620 }
1621
1622 #define LCID_INC \
1623 do { \
1624 lastlcid++; \
1625 if (lastlcid > maxlcid) \
1626 lastlcid = 1; \
1627 } while (0) \
1628
1629 struct lctx *
1630 lccreate(void)
1631 {
1632 struct lctx *l;
1633 pid_t newlcid;
1634
1635 /* Not very efficient but this isn't a common operation. */
1636 while ((l = lcfind(lastlcid)) != NULL) {
1637 LCTX_UNLOCK(l);
1638 LCID_INC;
1639 }
1640 newlcid = lastlcid;
1641 LCID_INC;
1642
1643 MALLOC(l, struct lctx *, sizeof(struct lctx), M_LCTX, M_WAITOK|M_ZERO);
1644 l->lc_id = newlcid;
1645 LIST_INIT(&l->lc_members);
1646 lck_mtx_init(&l->lc_mtx, lctx_lck_grp, lctx_lck_attr);
1647 #if CONFIG_MACF
1648 l->lc_label = mac_lctx_label_alloc();
1649 #endif
1650 ALLLCTX_LOCK;
1651 LIST_INSERT_HEAD(&alllctx, l, lc_list);
1652 alllctx_cnt++;
1653 ALLLCTX_UNLOCK;
1654
1655 return (l);
1656 }
1657
1658 /*
1659 * Call with proc protected (either by being invisible
1660 * or by having the all-login-context lock held) and
1661 * the lctx locked.
1662 *
1663 * Will unlock lctx on return.
1664 */
1665 void
1666 enterlctx (proc_t p, struct lctx *l, __unused int create)
1667 {
1668 if (l == NULL)
1669 return;
1670
1671 p->p_lctx = l;
1672 LIST_INSERT_HEAD(&l->lc_members, p, p_lclist);
1673 l->lc_mc++;
1674
1675 #if CONFIG_MACF
1676 if (create)
1677 mac_lctx_notify_create(p, l);
1678 else
1679 mac_lctx_notify_join(p, l);
1680 #endif
1681 LCTX_UNLOCK(l);
1682
1683 return;
1684 }
1685
1686 /*
1687 * Remove process from login context (if any). Called with p protected by
1688 * the alllctx lock.
1689 */
1690 void
1691 leavelctx (proc_t p)
1692 {
1693 struct lctx *l;
1694
1695 if (p->p_lctx == NULL) {
1696 return;
1697 }
1698
1699 LCTX_LOCK(p->p_lctx);
1700 l = p->p_lctx;
1701 p->p_lctx = NULL;
1702 LIST_REMOVE(p, p_lclist);
1703 l->lc_mc--;
1704 #if CONFIG_MACF
1705 mac_lctx_notify_leave(p, l);
1706 #endif
1707 if (LIST_EMPTY(&l->lc_members)) {
1708 LIST_REMOVE(l, lc_list);
1709 alllctx_cnt--;
1710 LCTX_UNLOCK(l);
1711 lck_mtx_destroy(&l->lc_mtx, lctx_lck_grp);
1712 #if CONFIG_MACF
1713 mac_lctx_label_free(l->lc_label);
1714 l->lc_label = NULL;
1715 #endif
1716 FREE(l, M_LCTX);
1717 } else {
1718 LCTX_UNLOCK(l);
1719 }
1720 return;
1721 }
1722
1723 static int
1724 sysctl_kern_lctx SYSCTL_HANDLER_ARGS
1725 {
1726 int *name = (int*) arg1;
1727 u_int namelen = arg2;
1728 struct kinfo_lctx kil;
1729 struct lctx *l;
1730 int error;
1731
1732 error = 0;
1733
1734 switch (oidp->oid_number) {
1735 case KERN_LCTX_ALL:
1736 ALLLCTX_LOCK;
1737 /* Request for size. */
1738 if (!req->oldptr) {
1739 error = SYSCTL_OUT(req, 0,
1740 sizeof(struct kinfo_lctx) * (alllctx_cnt + 1));
1741 goto out;
1742 }
1743 break;
1744
1745 case KERN_LCTX_LCID:
1746 /* No space */
1747 if (req->oldlen < sizeof(struct kinfo_lctx))
1748 return (ENOMEM);
1749 /* No argument */
1750 if (namelen != 1)
1751 return (EINVAL);
1752 /* No login context */
1753 l = lcfind((pid_t)name[0]);
1754 if (l == NULL)
1755 return (ENOENT);
1756 kil.id = l->lc_id;
1757 kil.mc = l->lc_mc;
1758 LCTX_UNLOCK(l);
1759 return (SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil)));
1760
1761 default:
1762 return (EINVAL);
1763 }
1764
1765 /* Provided buffer is too small. */
1766 if (req->oldlen < (sizeof(struct kinfo_lctx) * alllctx_cnt)) {
1767 error = ENOMEM;
1768 goto out;
1769 }
1770
1771 LIST_FOREACH(l, &alllctx, lc_list) {
1772 LCTX_LOCK(l);
1773 kil.id = l->lc_id;
1774 kil.mc = l->lc_mc;
1775 LCTX_UNLOCK(l);
1776 error = SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil));
1777 if (error)
1778 break;
1779 }
1780 out:
1781 ALLLCTX_UNLOCK;
1782
1783 return (error);
1784 }
1785
1786 SYSCTL_NODE(_kern, KERN_LCTX, lctx, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Login Context");
1787
1788 SYSCTL_PROC(_kern_lctx, KERN_LCTX_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT | CTLFLAG_LOCKED,
1789 0, 0, sysctl_kern_lctx, "S,lctx",
1790 "Return entire login context table");
1791 SYSCTL_NODE(_kern_lctx, KERN_LCTX_LCID, lcid, CTLFLAG_RD | CTLFLAG_LOCKED,
1792 sysctl_kern_lctx, "Login Context Table");
1793 SYSCTL_INT(_kern_lctx, OID_AUTO, last, CTLFLAG_RD | CTLFLAG_LOCKED, &lastlcid, 0, "");
1794 SYSCTL_INT(_kern_lctx, OID_AUTO, count, CTLFLAG_RD | CTLFLAG_LOCKED, &alllctx_cnt, 0, "");
1795 SYSCTL_INT(_kern_lctx, OID_AUTO, max, CTLFLAG_RW | CTLFLAG_LOCKED, &maxlcid, 0, "");
1796
1797 #endif /* LCTX */
1798
1799 /* Code Signing related routines */
1800
1801 int
1802 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1803 {
1804 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1805 uap->usersize, USER_ADDR_NULL));
1806 }
1807
1808 int
1809 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1810 {
1811 if (uap->uaudittoken == USER_ADDR_NULL)
1812 return(EINVAL);
1813 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1814 uap->usersize, uap->uaudittoken));
1815 }
1816
1817 static int
1818 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1819 {
1820 char fakeheader[8] = { 0 };
1821 int error;
1822
1823 if (usize < sizeof(fakeheader))
1824 return ERANGE;
1825
1826 /* if no blob, fill in zero header */
1827 if (NULL == start) {
1828 start = fakeheader;
1829 length = sizeof(fakeheader);
1830 } else if (usize < length) {
1831 /* ... if input too short, copy out length of entitlement */
1832 uint32_t length32 = htonl((uint32_t)length);
1833 memcpy(&fakeheader[4], &length32, sizeof(length32));
1834
1835 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1836 if (error == 0)
1837 return ERANGE; /* input buffer to short, ERANGE signals that */
1838 return error;
1839 }
1840 return copyout(start, uaddr, length);
1841 }
1842
1843 static int
1844 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1845 {
1846 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1847 proc_t pt;
1848 int forself;
1849 int error;
1850 vnode_t tvp;
1851 off_t toff;
1852 unsigned char cdhash[SHA1_RESULTLEN];
1853 audit_token_t token;
1854 unsigned int upid=0, uidversion = 0;
1855
1856 forself = error = 0;
1857
1858 if (pid == 0)
1859 pid = proc_selfpid();
1860 if (pid == proc_selfpid())
1861 forself = 1;
1862
1863
1864 switch (ops) {
1865 case CS_OPS_STATUS:
1866 case CS_OPS_CDHASH:
1867 case CS_OPS_PIDOFFSET:
1868 case CS_OPS_ENTITLEMENTS_BLOB:
1869 case CS_OPS_BLOB:
1870 break; /* unrestricted */
1871 default:
1872 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1873 return(EPERM);
1874 break;
1875 }
1876
1877 pt = proc_find(pid);
1878 if (pt == PROC_NULL)
1879 return(ESRCH);
1880
1881 upid = pt->p_pid;
1882 uidversion = pt->p_idversion;
1883 if (uaudittoken != USER_ADDR_NULL) {
1884
1885 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1886 if (error != 0)
1887 goto out;
1888 /* verify the audit token pid/idversion matches with proc */
1889 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1890 error = ESRCH;
1891 goto out;
1892 }
1893 }
1894
1895 switch (ops) {
1896
1897 case CS_OPS_STATUS: {
1898 uint32_t retflags;
1899
1900 proc_lock(pt);
1901 retflags = pt->p_csflags;
1902 if (cs_enforcement(pt))
1903 retflags |= CS_ENFORCEMENT;
1904 proc_unlock(pt);
1905
1906 if (uaddr != USER_ADDR_NULL)
1907 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1908 break;
1909 }
1910 case CS_OPS_MARKINVALID:
1911 proc_lock(pt);
1912 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1913 pt->p_csflags &= ~CS_VALID; /* set invalid */
1914 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1915 pt->p_csflags |= CS_KILLED;
1916 proc_unlock(pt);
1917 if (cs_debug) {
1918 printf("CODE SIGNING: marked invalid by pid %d: "
1919 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1920 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
1921 }
1922 psignal(pt, SIGKILL);
1923 } else
1924 proc_unlock(pt);
1925 } else
1926 proc_unlock(pt);
1927
1928 break;
1929
1930 case CS_OPS_MARKHARD:
1931 proc_lock(pt);
1932 pt->p_csflags |= CS_HARD;
1933 if ((pt->p_csflags & CS_VALID) == 0) {
1934 /* @@@ allow? reject? kill? @@@ */
1935 proc_unlock(pt);
1936 error = EINVAL;
1937 goto out;
1938 } else
1939 proc_unlock(pt);
1940 break;
1941
1942 case CS_OPS_MARKKILL:
1943 proc_lock(pt);
1944 pt->p_csflags |= CS_KILL;
1945 if ((pt->p_csflags & CS_VALID) == 0) {
1946 proc_unlock(pt);
1947 psignal(pt, SIGKILL);
1948 } else
1949 proc_unlock(pt);
1950 break;
1951
1952 case CS_OPS_PIDOFFSET:
1953 toff = pt->p_textoff;
1954 proc_rele(pt);
1955 error = copyout(&toff, uaddr, sizeof(toff));
1956 return(error);
1957
1958 case CS_OPS_CDHASH:
1959
1960 /* pt already holds a reference on its p_textvp */
1961 tvp = pt->p_textvp;
1962 toff = pt->p_textoff;
1963
1964 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
1965 proc_rele(pt);
1966 return EINVAL;
1967 }
1968
1969 error = vn_getcdhash(tvp, toff, cdhash);
1970 proc_rele(pt);
1971
1972 if (error == 0) {
1973 error = copyout(cdhash, uaddr, sizeof (cdhash));
1974 }
1975
1976 return error;
1977
1978 case CS_OPS_ENTITLEMENTS_BLOB: {
1979 void *start;
1980 size_t length;
1981
1982 proc_lock(pt);
1983
1984 if ((pt->p_csflags & CS_VALID) == 0) {
1985 proc_unlock(pt);
1986 error = EINVAL;
1987 break;
1988 }
1989
1990 error = cs_entitlements_blob_get(pt, &start, &length);
1991 proc_unlock(pt);
1992 if (error)
1993 break;
1994
1995 error = csops_copy_token(start, length, usize, uaddr);
1996 break;
1997 }
1998 case CS_OPS_MARKRESTRICT:
1999 proc_lock(pt);
2000 pt->p_csflags |= CS_RESTRICT;
2001 proc_unlock(pt);
2002 break;
2003
2004 case CS_OPS_SET_STATUS: {
2005 uint32_t flags;
2006
2007 if (usize < sizeof(flags)) {
2008 error = ERANGE;
2009 break;
2010 }
2011
2012 error = copyin(uaddr, &flags, sizeof(flags));
2013 if (error)
2014 break;
2015
2016 /* only allow setting a subset of all code sign flags */
2017 flags &=
2018 CS_HARD | CS_EXEC_SET_HARD |
2019 CS_KILL | CS_EXEC_SET_KILL |
2020 CS_RESTRICT |
2021 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2022
2023 proc_lock(pt);
2024 if (pt->p_csflags & CS_VALID)
2025 pt->p_csflags |= flags;
2026 else
2027 error = EINVAL;
2028 proc_unlock(pt);
2029
2030 break;
2031 }
2032 case CS_OPS_BLOB: {
2033 void *start;
2034 size_t length;
2035
2036 proc_lock(pt);
2037 if ((pt->p_csflags & CS_VALID) == 0) {
2038 proc_unlock(pt);
2039 error = EINVAL;
2040 break;
2041 }
2042
2043 error = cs_blob_get(pt, &start, &length);
2044 proc_unlock(pt);
2045 if (error)
2046 break;
2047
2048 error = csops_copy_token(start, length, usize, uaddr);
2049 break;
2050 }
2051 case CS_OPS_IDENTITY: {
2052 const char *identity;
2053 uint8_t fakeheader[8];
2054 uint32_t idlen;
2055 size_t length;
2056
2057 /*
2058 * Make identity have a blob header to make it
2059 * easier on userland to guess the identity
2060 * length.
2061 */
2062 if (usize < sizeof(fakeheader)) {
2063 error = ERANGE;
2064 break;
2065 }
2066 memset(fakeheader, 0, sizeof(fakeheader));
2067
2068 proc_lock(pt);
2069 if ((pt->p_csflags & CS_VALID) == 0) {
2070 proc_unlock(pt);
2071 error = EINVAL;
2072 break;
2073 }
2074
2075 identity = cs_identity_get(pt);
2076 proc_unlock(pt);
2077 if (identity == NULL) {
2078 error = ENOENT;
2079 break;
2080 }
2081
2082 length = strlen(identity) + 1; /* include NUL */
2083 idlen = htonl(length + sizeof(fakeheader));
2084 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2085
2086 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2087 if (error)
2088 break;
2089
2090 if (usize < sizeof(fakeheader) + length)
2091 error = ERANGE;
2092 else if (usize > sizeof(fakeheader))
2093 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2094
2095 break;
2096 }
2097
2098 case CS_OPS_SIGPUP_INSTALL:
2099 error = sigpup_install(uaddr);
2100 break;
2101
2102 case CS_OPS_SIGPUP_DROP:
2103 error = sigpup_drop();
2104 break;
2105
2106 default:
2107 error = EINVAL;
2108 break;
2109 }
2110 out:
2111 proc_rele(pt);
2112 return(error);
2113 }
2114
2115 int
2116 proc_iterate(flags, callout, arg, filterfn, filterarg)
2117 int flags;
2118 int (*callout)(proc_t, void *);
2119 void * arg;
2120 int (*filterfn)(proc_t, void *);
2121 void * filterarg;
2122 {
2123 proc_t p;
2124 pid_t * pid_list;
2125 int count, pidcount, alloc_count, i, retval;
2126
2127 count = nprocs+ 10;
2128 if (count > hard_maxproc)
2129 count = hard_maxproc;
2130 alloc_count = count * sizeof(pid_t);
2131 pid_list = (pid_t *)kalloc(alloc_count);
2132 bzero(pid_list, alloc_count);
2133
2134
2135 proc_list_lock();
2136
2137
2138 pidcount = 0;
2139 if (flags & PROC_ALLPROCLIST) {
2140 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2141 if (p->p_stat == SIDL)
2142 continue;
2143 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2144 pid_list[pidcount] = p->p_pid;
2145 pidcount++;
2146 if (pidcount >= count)
2147 break;
2148 }
2149 }
2150 }
2151 if ((pidcount < count ) && (flags & PROC_ZOMBPROCLIST)) {
2152 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2153 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2154 pid_list[pidcount] = p->p_pid;
2155 pidcount++;
2156 if (pidcount >= count)
2157 break;
2158 }
2159 }
2160 }
2161
2162
2163 proc_list_unlock();
2164
2165
2166 for (i = 0; i< pidcount; i++) {
2167 p = proc_find(pid_list[i]);
2168 if (p) {
2169 if ((flags & PROC_NOWAITTRANS) == 0)
2170 proc_transwait(p, 0);
2171 retval = callout(p, arg);
2172
2173 switch (retval) {
2174 case PROC_RETURNED:
2175 case PROC_RETURNED_DONE:
2176 proc_rele(p);
2177 if (retval == PROC_RETURNED_DONE) {
2178 goto out;
2179 }
2180 break;
2181
2182 case PROC_CLAIMED_DONE:
2183 goto out;
2184 case PROC_CLAIMED:
2185 default:
2186 break;
2187 }
2188 } else if (flags & PROC_ZOMBPROCLIST) {
2189 p = proc_find_zombref(pid_list[i]);
2190 if (p != PROC_NULL) {
2191 retval = callout(p, arg);
2192
2193 switch (retval) {
2194 case PROC_RETURNED:
2195 case PROC_RETURNED_DONE:
2196 proc_drop_zombref(p);
2197 if (retval == PROC_RETURNED_DONE) {
2198 goto out;
2199 }
2200 break;
2201
2202 case PROC_CLAIMED_DONE:
2203 goto out;
2204 case PROC_CLAIMED:
2205 default:
2206 break;
2207 }
2208 }
2209 }
2210 }
2211
2212 out:
2213 kfree(pid_list, alloc_count);
2214 return(0);
2215
2216 }
2217
2218
2219 #if 0
2220 /* This is for iteration in case of trivial non blocking callouts */
2221 int
2222 proc_scanall(flags, callout, arg)
2223 int flags;
2224 int (*callout)(proc_t, void *);
2225 void * arg;
2226 {
2227 proc_t p;
2228 int retval;
2229
2230
2231 proc_list_lock();
2232
2233
2234 if (flags & PROC_ALLPROCLIST) {
2235 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2236 retval = callout(p, arg);
2237 if (retval == PROC_RETURNED_DONE)
2238 goto out;
2239 }
2240 }
2241 if (flags & PROC_ZOMBPROCLIST) {
2242 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2243 retval = callout(p, arg);
2244 if (retval == PROC_RETURNED_DONE)
2245 goto out;
2246 }
2247 }
2248 out:
2249
2250 proc_list_unlock();
2251
2252 return(0);
2253 }
2254 #endif
2255
2256
2257 int
2258 proc_rebootscan(callout, arg, filterfn, filterarg)
2259 int (*callout)(proc_t, void *);
2260 void * arg;
2261 int (*filterfn)(proc_t, void *);
2262 void * filterarg;
2263 {
2264 proc_t p;
2265 int lockheld = 0, retval;
2266
2267 proc_shutdown_exitcount = 0;
2268
2269 ps_allprocscan:
2270
2271 proc_list_lock();
2272
2273 lockheld = 1;
2274
2275 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2276 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2277 p = proc_ref_locked(p);
2278
2279 proc_list_unlock();
2280 lockheld = 0;
2281
2282 if (p) {
2283 proc_transwait(p, 0);
2284 retval = callout(p, arg);
2285 proc_rele(p);
2286
2287 switch (retval) {
2288 case PROC_RETURNED_DONE:
2289 case PROC_CLAIMED_DONE:
2290 goto out;
2291 }
2292 }
2293 goto ps_allprocscan;
2294 } /* filter pass */
2295 } /* allproc walk thru */
2296
2297 if (lockheld == 1) {
2298 proc_list_unlock();
2299 lockheld = 0;
2300 }
2301
2302 out:
2303 return(0);
2304
2305 }
2306
2307
2308 int
2309 proc_childrenwalk(parent, callout, arg)
2310 struct proc * parent;
2311 int (*callout)(proc_t, void *);
2312 void * arg;
2313 {
2314 register struct proc *p;
2315 pid_t * pid_list;
2316 int count, pidcount, alloc_count, i, retval;
2317
2318 count = nprocs+ 10;
2319 if (count > hard_maxproc)
2320 count = hard_maxproc;
2321 alloc_count = count * sizeof(pid_t);
2322 pid_list = (pid_t *)kalloc(alloc_count);
2323 bzero(pid_list, alloc_count);
2324
2325
2326 proc_list_lock();
2327
2328
2329 pidcount = 0;
2330 for (p = parent->p_children.lh_first; (p != 0); p = p->p_sibling.le_next) {
2331 if (p->p_stat == SIDL)
2332 continue;
2333 pid_list[pidcount] = p->p_pid;
2334 pidcount++;
2335 if (pidcount >= count)
2336 break;
2337 }
2338 proc_list_unlock();
2339
2340
2341 for (i = 0; i< pidcount; i++) {
2342 p = proc_find(pid_list[i]);
2343 if (p) {
2344 proc_transwait(p, 0);
2345 retval = callout(p, arg);
2346
2347 switch (retval) {
2348 case PROC_RETURNED:
2349 case PROC_RETURNED_DONE:
2350 proc_rele(p);
2351 if (retval == PROC_RETURNED_DONE) {
2352 goto out;
2353 }
2354 break;
2355
2356 case PROC_CLAIMED_DONE:
2357 goto out;
2358 case PROC_CLAIMED:
2359 default:
2360 break;
2361 }
2362 }
2363 }
2364
2365 out:
2366 kfree(pid_list, alloc_count);
2367 return(0);
2368
2369 }
2370
2371 /*
2372 */
2373 /* PGRP_BLOCKITERATE is not implemented yet */
2374 int
2375 pgrp_iterate(pgrp, flags, callout, arg, filterfn, filterarg)
2376 struct pgrp *pgrp;
2377 int flags;
2378 int (*callout)(proc_t, void *);
2379 void * arg;
2380 int (*filterfn)(proc_t, void *);
2381 void * filterarg;
2382 {
2383 proc_t p;
2384 pid_t * pid_list;
2385 int count, pidcount, i, alloc_count;
2386 int retval;
2387 pid_t pgid;
2388 int dropref = flags & PGRP_DROPREF;
2389 #if 0
2390 int serialize = flags & PGRP_BLOCKITERATE;
2391 #else
2392 int serialize = 0;
2393 #endif
2394
2395 if (pgrp == 0)
2396 return(0);
2397 count = pgrp->pg_membercnt + 10;
2398 if (count > hard_maxproc)
2399 count = hard_maxproc;
2400 alloc_count = count * sizeof(pid_t);
2401 pid_list = (pid_t *)kalloc(alloc_count);
2402 bzero(pid_list, alloc_count);
2403
2404 pgrp_lock(pgrp);
2405 if (serialize != 0) {
2406 while ((pgrp->pg_listflags & PGRP_FLAG_ITERABEGIN) == PGRP_FLAG_ITERABEGIN) {
2407 pgrp->pg_listflags |= PGRP_FLAG_ITERWAIT;
2408 msleep(&pgrp->pg_listflags, &pgrp->pg_mlock, 0, "pgrp_iterate", 0);
2409 }
2410 pgrp->pg_listflags |= PGRP_FLAG_ITERABEGIN;
2411 }
2412
2413 pgid = pgrp->pg_id;
2414
2415 pidcount = 0;
2416 for (p = pgrp->pg_members.lh_first; p != 0;
2417 p = p->p_pglist.le_next) {
2418 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2419 pid_list[pidcount] = p->p_pid;
2420 pidcount++;
2421 if (pidcount >= count)
2422 break;
2423 }
2424 }
2425
2426
2427 pgrp_unlock(pgrp);
2428 if ((serialize == 0) && (dropref != 0))
2429 pg_rele(pgrp);
2430
2431
2432 for (i = 0; i< pidcount; i++) {
2433 /* No handling or proc0 */
2434 if (pid_list[i] == 0)
2435 continue;
2436 p = proc_find(pid_list[i]);
2437 if (p) {
2438 if (p->p_pgrpid != pgid) {
2439 proc_rele(p);
2440 continue;
2441 }
2442 proc_transwait(p, 0);
2443 retval = callout(p, arg);
2444
2445 switch (retval) {
2446 case PROC_RETURNED:
2447 case PROC_RETURNED_DONE:
2448 proc_rele(p);
2449 if (retval == PROC_RETURNED_DONE) {
2450 goto out;
2451 }
2452 break;
2453
2454 case PROC_CLAIMED_DONE:
2455 goto out;
2456 case PROC_CLAIMED:
2457 default:
2458 break;
2459 }
2460 }
2461 }
2462 out:
2463 if (serialize != 0) {
2464 pgrp_lock(pgrp);
2465 pgrp->pg_listflags &= ~PGRP_FLAG_ITERABEGIN;
2466 if ((pgrp->pg_listflags & PGRP_FLAG_ITERWAIT) == PGRP_FLAG_ITERWAIT) {
2467 pgrp->pg_listflags &= ~PGRP_FLAG_ITERWAIT;
2468 wakeup(&pgrp->pg_listflags);
2469 }
2470 pgrp_unlock(pgrp);
2471 if (dropref != 0)
2472 pg_rele(pgrp);
2473 }
2474 kfree(pid_list, alloc_count);
2475 return(0);
2476 }
2477
2478 static void
2479 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2480 {
2481 proc_list_lock();
2482 child->p_pgrp = pgrp;
2483 child->p_pgrpid = pgrp->pg_id;
2484 child->p_listflag |= P_LIST_INPGRP;
2485 /*
2486 * When pgrp is being freed , a process can still
2487 * request addition using setpgid from bash when
2488 * login is terminated (login cycler) return ESRCH
2489 * Safe to hold lock due to refcount on pgrp
2490 */
2491 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2492 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2493 }
2494
2495 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2496 panic("pgrp_add : pgrp is dead adding process");
2497 proc_list_unlock();
2498
2499 pgrp_lock(pgrp);
2500 pgrp->pg_membercnt++;
2501 if ( parent != PROC_NULL) {
2502 LIST_INSERT_AFTER(parent, child, p_pglist);
2503 }else {
2504 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2505 }
2506 pgrp_unlock(pgrp);
2507
2508 proc_list_lock();
2509 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2510 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2511 }
2512 proc_list_unlock();
2513 }
2514
2515 static void
2516 pgrp_remove(struct proc * p)
2517 {
2518 struct pgrp * pg;
2519
2520 pg = proc_pgrp(p);
2521
2522 proc_list_lock();
2523 #if __PROC_INTERNAL_DEBUG
2524 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2525 panic("removing from pglist but no named ref\n");
2526 #endif
2527 p->p_pgrpid = PGRPID_DEAD;
2528 p->p_listflag &= ~P_LIST_INPGRP;
2529 p->p_pgrp = NULL;
2530 proc_list_unlock();
2531
2532 if (pg == PGRP_NULL)
2533 panic("pgrp_remove: pg is NULL");
2534 pgrp_lock(pg);
2535 pg->pg_membercnt--;
2536
2537 if (pg->pg_membercnt < 0)
2538 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2539
2540 LIST_REMOVE(p, p_pglist);
2541 if (pg->pg_members.lh_first == 0) {
2542 pgrp_unlock(pg);
2543 pgdelete_dropref(pg);
2544 } else {
2545 pgrp_unlock(pg);
2546 pg_rele(pg);
2547 }
2548 }
2549
2550
2551 /* cannot use proc_pgrp as it maybe stalled */
2552 static void
2553 pgrp_replace(struct proc * p, struct pgrp * newpg)
2554 {
2555 struct pgrp * oldpg;
2556
2557
2558
2559 proc_list_lock();
2560
2561 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2562 p->p_listflag |= P_LIST_PGRPTRWAIT;
2563 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2564 }
2565
2566 p->p_listflag |= P_LIST_PGRPTRANS;
2567
2568 oldpg = p->p_pgrp;
2569 if (oldpg == PGRP_NULL)
2570 panic("pgrp_replace: oldpg NULL");
2571 oldpg->pg_refcount++;
2572 #if __PROC_INTERNAL_DEBUG
2573 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2574 panic("removing from pglist but no named ref\n");
2575 #endif
2576 p->p_pgrpid = PGRPID_DEAD;
2577 p->p_listflag &= ~P_LIST_INPGRP;
2578 p->p_pgrp = NULL;
2579
2580 proc_list_unlock();
2581
2582 pgrp_lock(oldpg);
2583 oldpg->pg_membercnt--;
2584 if (oldpg->pg_membercnt < 0)
2585 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2586 LIST_REMOVE(p, p_pglist);
2587 if (oldpg->pg_members.lh_first == 0) {
2588 pgrp_unlock(oldpg);
2589 pgdelete_dropref(oldpg);
2590 } else {
2591 pgrp_unlock(oldpg);
2592 pg_rele(oldpg);
2593 }
2594
2595 proc_list_lock();
2596 p->p_pgrp = newpg;
2597 p->p_pgrpid = newpg->pg_id;
2598 p->p_listflag |= P_LIST_INPGRP;
2599 /*
2600 * When pgrp is being freed , a process can still
2601 * request addition using setpgid from bash when
2602 * login is terminated (login cycler) return ESRCH
2603 * Safe to hold lock due to refcount on pgrp
2604 */
2605 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2606 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2607 }
2608
2609 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2610 panic("pgrp_add : pgrp is dead adding process");
2611 proc_list_unlock();
2612
2613 pgrp_lock(newpg);
2614 newpg->pg_membercnt++;
2615 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2616 pgrp_unlock(newpg);
2617
2618 proc_list_lock();
2619 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2620 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2621 }
2622
2623 p->p_listflag &= ~P_LIST_PGRPTRANS;
2624 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2625 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2626 wakeup(&p->p_pgrpid);
2627
2628 }
2629 proc_list_unlock();
2630 }
2631
2632 void
2633 pgrp_lock(struct pgrp * pgrp)
2634 {
2635 lck_mtx_lock(&pgrp->pg_mlock);
2636 }
2637
2638 void
2639 pgrp_unlock(struct pgrp * pgrp)
2640 {
2641 lck_mtx_unlock(&pgrp->pg_mlock);
2642 }
2643
2644 void
2645 session_lock(struct session * sess)
2646 {
2647 lck_mtx_lock(&sess->s_mlock);
2648 }
2649
2650
2651 void
2652 session_unlock(struct session * sess)
2653 {
2654 lck_mtx_unlock(&sess->s_mlock);
2655 }
2656
2657 struct pgrp *
2658 proc_pgrp(proc_t p)
2659 {
2660 struct pgrp * pgrp;
2661
2662 if (p == PROC_NULL)
2663 return(PGRP_NULL);
2664 proc_list_lock();
2665
2666 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2667 p->p_listflag |= P_LIST_PGRPTRWAIT;
2668 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2669 }
2670
2671 pgrp = p->p_pgrp;
2672
2673 assert(pgrp != NULL);
2674
2675 if (pgrp != PGRP_NULL) {
2676 pgrp->pg_refcount++;
2677 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2678 panic("proc_pgrp: ref being povided for dead pgrp");
2679 }
2680
2681 proc_list_unlock();
2682
2683 return(pgrp);
2684 }
2685
2686 struct pgrp *
2687 tty_pgrp(struct tty * tp)
2688 {
2689 struct pgrp * pg = PGRP_NULL;
2690
2691 proc_list_lock();
2692 pg = tp->t_pgrp;
2693
2694 if (pg != PGRP_NULL) {
2695 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2696 panic("tty_pgrp: ref being povided for dead pgrp");
2697 pg->pg_refcount++;
2698 }
2699 proc_list_unlock();
2700
2701 return(pg);
2702 }
2703
2704 struct session *
2705 proc_session(proc_t p)
2706 {
2707 struct session * sess = SESSION_NULL;
2708
2709 if (p == PROC_NULL)
2710 return(SESSION_NULL);
2711
2712 proc_list_lock();
2713
2714 /* wait during transitions */
2715 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2716 p->p_listflag |= P_LIST_PGRPTRWAIT;
2717 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2718 }
2719
2720 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2721 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2722 panic("proc_session:returning sesssion ref on terminating session");
2723 sess->s_count++;
2724 }
2725 proc_list_unlock();
2726 return(sess);
2727 }
2728
2729 void
2730 session_rele(struct session *sess)
2731 {
2732 proc_list_lock();
2733 if (--sess->s_count == 0) {
2734 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2735 panic("session_rele: terminating already terminated session");
2736 sess->s_listflags |= S_LIST_TERM;
2737 LIST_REMOVE(sess, s_hash);
2738 sess->s_listflags |= S_LIST_DEAD;
2739 if (sess->s_count != 0)
2740 panic("session_rele: freeing session in use");
2741 proc_list_unlock();
2742 #if CONFIG_FINE_LOCK_GROUPS
2743 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2744 #else
2745 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2746 #endif
2747 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2748 } else
2749 proc_list_unlock();
2750 }
2751
2752 int
2753 proc_transstart(proc_t p, int locked)
2754 {
2755 if (locked == 0)
2756 proc_lock(p);
2757 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2758 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) {
2759 if (locked == 0)
2760 proc_unlock(p);
2761 return EDEADLK;
2762 }
2763 p->p_lflag |= P_LTRANSWAIT;
2764 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2765 }
2766 p->p_lflag |= P_LINTRANSIT;
2767 p->p_transholder = current_thread();
2768 if (locked == 0)
2769 proc_unlock(p);
2770 return 0;
2771 }
2772
2773 void
2774 proc_transcommit(proc_t p, int locked)
2775 {
2776 if (locked == 0)
2777 proc_lock(p);
2778
2779 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2780 assert (p->p_transholder == current_thread());
2781 p->p_lflag |= P_LTRANSCOMMIT;
2782
2783 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2784 p->p_lflag &= ~P_LTRANSWAIT;
2785 wakeup(&p->p_lflag);
2786 }
2787 if (locked == 0)
2788 proc_unlock(p);
2789 }
2790
2791 void
2792 proc_transend(proc_t p, int locked)
2793 {
2794 if (locked == 0)
2795 proc_lock(p);
2796
2797 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2798 p->p_transholder = NULL;
2799
2800 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2801 p->p_lflag &= ~P_LTRANSWAIT;
2802 wakeup(&p->p_lflag);
2803 }
2804 if (locked == 0)
2805 proc_unlock(p);
2806 }
2807
2808 int
2809 proc_transwait(proc_t p, int locked)
2810 {
2811 if (locked == 0)
2812 proc_lock(p);
2813 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2814 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2815 if (locked == 0)
2816 proc_unlock(p);
2817 return EDEADLK;
2818 }
2819 p->p_lflag |= P_LTRANSWAIT;
2820 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2821 }
2822 if (locked == 0)
2823 proc_unlock(p);
2824 return 0;
2825 }
2826
2827 void
2828 proc_klist_lock(void)
2829 {
2830 lck_mtx_lock(proc_klist_mlock);
2831 }
2832
2833 void
2834 proc_klist_unlock(void)
2835 {
2836 lck_mtx_unlock(proc_klist_mlock);
2837 }
2838
2839 void
2840 proc_knote(struct proc * p, long hint)
2841 {
2842 proc_klist_lock();
2843 KNOTE(&p->p_klist, hint);
2844 proc_klist_unlock();
2845 }
2846
2847 void
2848 proc_knote_drain(struct proc *p)
2849 {
2850 struct knote *kn = NULL;
2851
2852 /*
2853 * Clear the proc's klist to avoid references after the proc is reaped.
2854 */
2855 proc_klist_lock();
2856 while ((kn = SLIST_FIRST(&p->p_klist))) {
2857 kn->kn_ptr.p_proc = PROC_NULL;
2858 KNOTE_DETACH(&p->p_klist, kn);
2859 }
2860 proc_klist_unlock();
2861 }
2862
2863 void
2864 proc_setregister(proc_t p)
2865 {
2866 proc_lock(p);
2867 p->p_lflag |= P_LREGISTER;
2868 proc_unlock(p);
2869 }
2870
2871 void
2872 proc_resetregister(proc_t p)
2873 {
2874 proc_lock(p);
2875 p->p_lflag &= ~P_LREGISTER;
2876 proc_unlock(p);
2877 }
2878
2879 pid_t
2880 proc_pgrpid(proc_t p)
2881 {
2882 return p->p_pgrpid;
2883 }
2884
2885 pid_t
2886 proc_selfpgrpid()
2887 {
2888 return current_proc()->p_pgrpid;
2889 }
2890
2891
2892 /* return control and action states */
2893 int
2894 proc_getpcontrol(int pid, int * pcontrolp)
2895 {
2896 proc_t p;
2897
2898 p = proc_find(pid);
2899 if (p == PROC_NULL)
2900 return(ESRCH);
2901 if (pcontrolp != NULL)
2902 *pcontrolp = p->p_pcaction;
2903
2904 proc_rele(p);
2905 return(0);
2906 }
2907
2908 int
2909 proc_dopcontrol(proc_t p, void *num_found)
2910 {
2911 int pcontrol;
2912
2913 proc_lock(p);
2914
2915 pcontrol = PROC_CONTROL_STATE(p);
2916
2917 if (PROC_ACTION_STATE(p) ==0) {
2918 switch(pcontrol) {
2919 case P_PCTHROTTLE:
2920 PROC_SETACTION_STATE(p);
2921 proc_unlock(p);
2922 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2923 (*(int *)num_found)++;
2924 break;
2925
2926 case P_PCSUSP:
2927 PROC_SETACTION_STATE(p);
2928 proc_unlock(p);
2929 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2930 task_suspend(p->task);
2931 (*(int *)num_found)++;
2932 break;
2933
2934 case P_PCKILL:
2935 PROC_SETACTION_STATE(p);
2936 proc_unlock(p);
2937 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2938 psignal(p, SIGKILL);
2939 (*(int *)num_found)++;
2940 break;
2941
2942 default:
2943 proc_unlock(p);
2944 }
2945
2946 } else
2947 proc_unlock(p);
2948
2949 return(PROC_RETURNED);
2950 }
2951
2952
2953 /*
2954 * Resume a throttled or suspended process. This is an internal interface that's only
2955 * used by the user level code that presents the GUI when we run out of swap space and
2956 * hence is restricted to processes with superuser privileges.
2957 */
2958
2959 int
2960 proc_resetpcontrol(int pid)
2961 {
2962 proc_t p;
2963 int pcontrol;
2964 int error;
2965 proc_t self = current_proc();
2966
2967 /* if the process has been validated to handle resource control or root is valid one */
2968 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
2969 return error;
2970
2971 p = proc_find(pid);
2972 if (p == PROC_NULL)
2973 return(ESRCH);
2974
2975 proc_lock(p);
2976
2977 pcontrol = PROC_CONTROL_STATE(p);
2978
2979 if(PROC_ACTION_STATE(p) !=0) {
2980 switch(pcontrol) {
2981 case P_PCTHROTTLE:
2982 PROC_RESETACTION_STATE(p);
2983 proc_unlock(p);
2984 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
2985 break;
2986
2987 case P_PCSUSP:
2988 PROC_RESETACTION_STATE(p);
2989 proc_unlock(p);
2990 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
2991 task_resume(p->task);
2992 break;
2993
2994 case P_PCKILL:
2995 /* Huh? */
2996 PROC_SETACTION_STATE(p);
2997 proc_unlock(p);
2998 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
2999 break;
3000
3001 default:
3002 proc_unlock(p);
3003 }
3004
3005 } else
3006 proc_unlock(p);
3007
3008 proc_rele(p);
3009 return(0);
3010 }
3011
3012
3013 /*
3014 * Return true if the specified process has an action state specified for it and it isn't
3015 * already in an action state and it's using more physical memory than the specified threshold.
3016 * Note: the memory_threshold argument is specified in bytes and is of type uint64_t.
3017 */
3018
3019 static int
3020 proc_pcontrol_filter(proc_t p, void *memory_thresholdp)
3021 {
3022
3023 return PROC_CONTROL_STATE(p) && /* if there's an action state specified... */
3024 (PROC_ACTION_STATE(p) == 0) && /* and we're not in the action state yet... */
3025 (get_task_resident_size(p->task) > *((uint64_t *)memory_thresholdp)); /* and this proc is over the mem threshold, */
3026 /* then return true to take action on this proc */
3027 }
3028
3029
3030
3031 /*
3032 * Deal with the out of swap space condition. This routine gets called when
3033 * we want to swap something out but there's no more space left. Since this
3034 * creates a memory deadlock situtation, we need to take action to free up
3035 * some memory resources in order to prevent the system from hanging completely.
3036 * The action we take is based on what the system processes running at user level
3037 * have specified. Processes are marked in one of four categories: ones that
3038 * can be killed immediately, ones that should be suspended, ones that should
3039 * be throttled, and all the rest which are basically none of the above. Which
3040 * processes are marked as being in which category is a user level policy decision;
3041 * we just take action based on those decisions here.
3042 */
3043
3044 #define STARTING_PERCENTAGE 50 /* memory threshold expressed as a percentage */
3045 /* of physical memory */
3046
3047 struct timeval last_no_space_action = {0, 0};
3048
3049 void
3050 no_paging_space_action(void)
3051 {
3052
3053 uint64_t memory_threshold;
3054 int num_found;
3055 struct timeval now;
3056
3057 /*
3058 * Throttle how often we come through here. Once every 20 seconds should be plenty.
3059 */
3060
3061 microtime(&now);
3062
3063 if (now.tv_sec <= last_no_space_action.tv_sec + 20)
3064 return;
3065
3066 last_no_space_action = now;
3067
3068 /*
3069 * Examine all processes and find those that have been marked to have some action
3070 * taken when swap space runs out. Of those processes, select one or more and
3071 * apply the specified action to them. The idea is to only take action against
3072 * a few processes rather than hitting too many at once. If the low swap condition
3073 * persists, this routine will get called again and we'll take action against more
3074 * processes.
3075 *
3076 * Of the processes that have been marked, we choose which ones to take action
3077 * against according to how much physical memory they're presently using. We
3078 * start with the STARTING_THRESHOLD and any processes using more physical memory
3079 * than the percentage threshold will have action taken against it. If there
3080 * are no processes over the threshold, then the threshold is cut in half and we
3081 * look again for processes using more than this threshold. We continue in
3082 * this fashion until we find at least one process to take action against. This
3083 * iterative approach is less than ideally efficient, however we only get here
3084 * when the system is almost in a memory deadlock and is pretty much just
3085 * thrashing if it's doing anything at all. Therefore, the cpu overhead of
3086 * potentially multiple passes here probably isn't revelant.
3087 */
3088
3089 memory_threshold = (sane_size * STARTING_PERCENTAGE) / 100; /* resident threshold in bytes */
3090
3091 for (num_found = 0; num_found == 0; memory_threshold = memory_threshold / 2) {
3092 proc_iterate(PROC_ALLPROCLIST, proc_dopcontrol, (void *)&num_found, proc_pcontrol_filter, (void *)&memory_threshold);
3093
3094 /*
3095 * If we just looked with memory_threshold == 0, then there's no need to iterate any further since
3096 * we won't find any eligible processes at this point.
3097 */
3098
3099 if (memory_threshold == 0) {
3100 if (num_found == 0) /* log that we couldn't do anything in this case */
3101 printf("low swap: unable to find any eligible processes to take action on\n");
3102
3103 break;
3104 }
3105 }
3106 }