]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
9213b82f3ff312fc7f294d0ef7fbc35656ef5ae8
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113
114 #if CONFIG_MEMORYSTATUS
115 #include <sys/kern_memorystatus.h>
116 #endif
117
118 #if CONFIG_MACF
119 #include <security/mac_framework.h>
120 #endif
121
122 #include <libkern/crypto/sha1.h>
123
124 /*
125 * Structure associated with user cacheing.
126 */
127 struct uidinfo {
128 LIST_ENTRY(uidinfo) ui_hash;
129 uid_t ui_uid;
130 long ui_proccnt;
131 };
132 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
133 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
134 u_long uihash; /* size of hash table - 1 */
135
136 /*
137 * Other process lists
138 */
139 struct pidhashhead *pidhashtbl;
140 u_long pidhash;
141 struct pgrphashhead *pgrphashtbl;
142 u_long pgrphash;
143 struct sesshashhead *sesshashtbl;
144 u_long sesshash;
145
146 struct proclist allproc;
147 struct proclist zombproc;
148 extern struct tty cons;
149
150 extern int cs_debug;
151
152 #if DEBUG
153 #define __PROC_INTERNAL_DEBUG 1
154 #endif
155 /* Name to give to core files */
156 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
157
158 #if PROC_REF_DEBUG
159 extern uint32_t fastbacktrace(uintptr_t* bt, uint32_t max_frames) __attribute__((noinline));
160 #endif
161
162 static void orphanpg(struct pgrp *pg);
163 void proc_name_kdp(task_t t, char * buf, int size);
164 int proc_threadname_kdp(void *uth, char *buf, size_t size);
165 void proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec);
166 char *proc_name_address(void *p);
167
168 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
169 static void pgrp_remove(proc_t p);
170 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
171 static void pgdelete_dropref(struct pgrp *pgrp);
172 extern void pg_rele_dropref(struct pgrp * pgrp);
173 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
174 static boolean_t proc_parent_is_currentproc(proc_t p);
175
176 struct fixjob_iterargs {
177 struct pgrp * pg;
178 struct session * mysession;
179 int entering;
180 };
181
182 int fixjob_callback(proc_t, void *);
183
184 /*
185 * Initialize global process hashing structures.
186 */
187 void
188 procinit(void)
189 {
190 LIST_INIT(&allproc);
191 LIST_INIT(&zombproc);
192 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
193 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
194 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
195 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
196 }
197
198 /*
199 * Change the count associated with number of processes
200 * a given user is using. This routine protects the uihash
201 * with the list lock
202 */
203 int
204 chgproccnt(uid_t uid, int diff)
205 {
206 struct uidinfo *uip;
207 struct uidinfo *newuip = NULL;
208 struct uihashhead *uipp;
209 int retval;
210
211 again:
212 proc_list_lock();
213 uipp = UIHASH(uid);
214 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
215 if (uip->ui_uid == uid)
216 break;
217 if (uip) {
218 uip->ui_proccnt += diff;
219 if (uip->ui_proccnt > 0) {
220 retval = uip->ui_proccnt;
221 proc_list_unlock();
222 goto out;
223 }
224 if (uip->ui_proccnt < 0)
225 panic("chgproccnt: procs < 0");
226 LIST_REMOVE(uip, ui_hash);
227 retval = 0;
228 proc_list_unlock();
229 FREE_ZONE(uip, sizeof(*uip), M_PROC);
230 goto out;
231 }
232 if (diff <= 0) {
233 if (diff == 0) {
234 retval = 0;
235 proc_list_unlock();
236 goto out;
237 }
238 panic("chgproccnt: lost user");
239 }
240 if (newuip != NULL) {
241 uip = newuip;
242 newuip = NULL;
243 LIST_INSERT_HEAD(uipp, uip, ui_hash);
244 uip->ui_uid = uid;
245 uip->ui_proccnt = diff;
246 retval = diff;
247 proc_list_unlock();
248 goto out;
249 }
250 proc_list_unlock();
251 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
252 if (newuip == NULL)
253 panic("chgproccnt: M_PROC zone depleted");
254 goto again;
255 out:
256 if (newuip != NULL)
257 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
258 return(retval);
259 }
260
261 /*
262 * Is p an inferior of the current process?
263 */
264 int
265 inferior(proc_t p)
266 {
267 int retval = 0;
268
269 proc_list_lock();
270 for (; p != current_proc(); p = p->p_pptr)
271 if (p->p_pid == 0)
272 goto out;
273 retval = 1;
274 out:
275 proc_list_unlock();
276 return(retval);
277 }
278
279 /*
280 * Is p an inferior of t ?
281 */
282 int
283 isinferior(proc_t p, proc_t t)
284 {
285 int retval = 0;
286 int nchecked = 0;
287 proc_t start = p;
288
289 /* if p==t they are not inferior */
290 if (p == t)
291 return(0);
292
293 proc_list_lock();
294 for (; p != t; p = p->p_pptr) {
295 nchecked++;
296
297 /* Detect here if we're in a cycle */
298 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
299 goto out;
300 }
301 retval = 1;
302 out:
303 proc_list_unlock();
304 return(retval);
305 }
306
307 int
308 proc_isinferior(int pid1, int pid2)
309 {
310 proc_t p = PROC_NULL;
311 proc_t t = PROC_NULL;
312 int retval = 0;
313
314 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
315 retval = isinferior(p, t);
316
317 if (p != PROC_NULL)
318 proc_rele(p);
319 if (t != PROC_NULL)
320 proc_rele(t);
321
322 return(retval);
323 }
324
325 proc_t
326 proc_find(int pid)
327 {
328 return(proc_findinternal(pid, 0));
329 }
330
331 proc_t
332 proc_findinternal(int pid, int locked)
333 {
334 proc_t p = PROC_NULL;
335
336 if (locked == 0) {
337 proc_list_lock();
338 }
339
340 p = pfind_locked(pid);
341 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
342 p = PROC_NULL;
343
344 if (locked == 0) {
345 proc_list_unlock();
346 }
347
348 return(p);
349 }
350
351 proc_t
352 proc_findthread(thread_t thread)
353 {
354 proc_t p = PROC_NULL;
355 struct uthread *uth;
356
357 proc_list_lock();
358 uth = get_bsdthread_info(thread);
359 if (uth && (uth->uu_flag & UT_VFORK))
360 p = uth->uu_proc;
361 else
362 p = (proc_t)(get_bsdthreadtask_info(thread));
363 p = proc_ref_locked(p);
364 proc_list_unlock();
365 return(p);
366 }
367
368 #if PROC_REF_DEBUG
369 void
370 uthread_reset_proc_refcount(void *uthread) {
371 uthread_t uth;
372
373 if (proc_ref_tracking_disabled) {
374 return;
375 }
376
377 uth = (uthread_t) uthread;
378
379 uth->uu_proc_refcount = 0;
380 uth->uu_pindex = 0;
381 }
382
383 int
384 uthread_get_proc_refcount(void *uthread) {
385 uthread_t uth;
386
387 if (proc_ref_tracking_disabled) {
388 return 0;
389 }
390
391 uth = (uthread_t) uthread;
392
393 return uth->uu_proc_refcount;
394 }
395
396 static void
397 record_procref(proc_t p, int count) {
398 uthread_t uth;
399
400 if (proc_ref_tracking_disabled) {
401 return;
402 }
403
404 uth = current_uthread();
405 uth->uu_proc_refcount += count;
406
407 if (count == 1) {
408 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
409 fastbacktrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
410
411 uth->uu_proc_ps[uth->uu_pindex] = p;
412 uth->uu_pindex++;
413 }
414 }
415 }
416 #endif
417
418 int
419 proc_rele(proc_t p)
420 {
421 proc_list_lock();
422 proc_rele_locked(p);
423 proc_list_unlock();
424
425 return(0);
426 }
427
428 proc_t
429 proc_self(void)
430 {
431 struct proc * p;
432
433 p = current_proc();
434
435 proc_list_lock();
436 if (p != proc_ref_locked(p))
437 p = PROC_NULL;
438 proc_list_unlock();
439 return(p);
440 }
441
442
443 proc_t
444 proc_ref_locked(proc_t p)
445 {
446 proc_t p1 = p;
447
448 /* if process still in creation return failure */
449 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
450 return (PROC_NULL);
451 /* do not return process marked for termination */
452 if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0)) {
453 p->p_refcount++;
454 #if PROC_REF_DEBUG
455 record_procref(p, 1);
456 #endif
457 }
458 else
459 p1 = PROC_NULL;
460
461 return(p1);
462 }
463
464 void
465 proc_rele_locked(proc_t p)
466 {
467
468 if (p->p_refcount > 0) {
469 p->p_refcount--;
470 #if PROC_REF_DEBUG
471 record_procref(p, -1);
472 #endif
473 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
474 p->p_listflag &= ~P_LIST_DRAINWAIT;
475 wakeup(&p->p_refcount);
476 }
477 } else
478 panic("proc_rele_locked -ve ref\n");
479
480 }
481
482 proc_t
483 proc_find_zombref(int pid)
484 {
485 proc_t p;
486
487 proc_list_lock();
488
489 again:
490 p = pfind_locked(pid);
491
492 /* should we bail? */
493 if ((p == PROC_NULL) /* not found */
494 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
495 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
496
497 proc_list_unlock();
498 return (PROC_NULL);
499 }
500
501 /* If someone else is controlling the (unreaped) zombie - wait */
502 if ((p->p_listflag & P_LIST_WAITING) != 0) {
503 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
504 goto again;
505 }
506 p->p_listflag |= P_LIST_WAITING;
507
508 proc_list_unlock();
509
510 return(p);
511 }
512
513 void
514 proc_drop_zombref(proc_t p)
515 {
516 proc_list_lock();
517 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
518 p->p_listflag &= ~P_LIST_WAITING;
519 wakeup(&p->p_stat);
520 }
521 proc_list_unlock();
522 }
523
524
525 void
526 proc_refdrain(proc_t p)
527 {
528
529 proc_list_lock();
530
531 p->p_listflag |= P_LIST_DRAIN;
532 while (p->p_refcount) {
533 p->p_listflag |= P_LIST_DRAINWAIT;
534 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
535 }
536 p->p_listflag &= ~P_LIST_DRAIN;
537 p->p_listflag |= P_LIST_DEAD;
538
539 proc_list_unlock();
540
541
542 }
543
544 proc_t
545 proc_parentholdref(proc_t p)
546 {
547 proc_t parent = PROC_NULL;
548 proc_t pp;
549 int loopcnt = 0;
550
551
552 proc_list_lock();
553 loop:
554 pp = p->p_pptr;
555 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
556 parent = PROC_NULL;
557 goto out;
558 }
559
560 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
561 pp->p_listflag |= P_LIST_CHILDDRWAIT;
562 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
563 loopcnt++;
564 if (loopcnt == 5) {
565 parent = PROC_NULL;
566 goto out;
567 }
568 goto loop;
569 }
570
571 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
572 pp->p_parentref++;
573 parent = pp;
574 goto out;
575 }
576
577 out:
578 proc_list_unlock();
579 return(parent);
580 }
581 int
582 proc_parentdropref(proc_t p, int listlocked)
583 {
584 if (listlocked == 0)
585 proc_list_lock();
586
587 if (p->p_parentref > 0) {
588 p->p_parentref--;
589 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
590 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
591 wakeup(&p->p_parentref);
592 }
593 } else
594 panic("proc_parentdropref -ve ref\n");
595 if (listlocked == 0)
596 proc_list_unlock();
597
598 return(0);
599 }
600
601 void
602 proc_childdrainstart(proc_t p)
603 {
604 #if __PROC_INTERNAL_DEBUG
605 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
606 panic("proc_childdrainstart: childdrain already started\n");
607 #endif
608 p->p_listflag |= P_LIST_CHILDDRSTART;
609 /* wait for all that hold parentrefs to drop */
610 while (p->p_parentref > 0) {
611 p->p_listflag |= P_LIST_PARENTREFWAIT;
612 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
613 }
614 }
615
616
617 void
618 proc_childdrainend(proc_t p)
619 {
620 #if __PROC_INTERNAL_DEBUG
621 if (p->p_childrencnt > 0)
622 panic("exiting: children stil hanging around\n");
623 #endif
624 p->p_listflag |= P_LIST_CHILDDRAINED;
625 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
626 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
627 wakeup(&p->p_childrencnt);
628 }
629 }
630
631 void
632 proc_checkdeadrefs(__unused proc_t p)
633 {
634 #if __PROC_INTERNAL_DEBUG
635 if ((p->p_listflag & P_LIST_INHASH) != 0)
636 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
637 if (p->p_childrencnt != 0)
638 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
639 if (p->p_refcount != 0)
640 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
641 if (p->p_parentref != 0)
642 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
643 #endif
644 }
645
646 int
647 proc_pid(proc_t p)
648 {
649 if (p != NULL)
650 return (p->p_pid);
651 return -1;
652 }
653
654 int
655 proc_ppid(proc_t p)
656 {
657 if (p != NULL)
658 return (p->p_ppid);
659 return -1;
660 }
661
662 int
663 proc_selfpid(void)
664 {
665 return (current_proc()->p_pid);
666 }
667
668 int
669 proc_selfppid(void)
670 {
671 return (current_proc()->p_ppid);
672 }
673
674 #if CONFIG_DTRACE
675 static proc_t
676 dtrace_current_proc_vforking(void)
677 {
678 thread_t th = current_thread();
679 struct uthread *ut = get_bsdthread_info(th);
680
681 if (ut &&
682 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
683 /*
684 * Handle the narrow window where we're in the vfork syscall,
685 * but we're not quite ready to claim (in particular, to DTrace)
686 * that we're running as the child.
687 */
688 return (get_bsdtask_info(get_threadtask(th)));
689 }
690 return (current_proc());
691 }
692
693 int
694 dtrace_proc_selfpid(void)
695 {
696 return (dtrace_current_proc_vforking()->p_pid);
697 }
698
699 int
700 dtrace_proc_selfppid(void)
701 {
702 return (dtrace_current_proc_vforking()->p_ppid);
703 }
704
705 uid_t
706 dtrace_proc_selfruid(void)
707 {
708 return (dtrace_current_proc_vforking()->p_ruid);
709 }
710 #endif /* CONFIG_DTRACE */
711
712 proc_t
713 proc_parent(proc_t p)
714 {
715 proc_t parent;
716 proc_t pp;
717
718 proc_list_lock();
719 loop:
720 pp = p->p_pptr;
721 parent = proc_ref_locked(pp);
722 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
723 pp->p_listflag |= P_LIST_CHILDLKWAIT;
724 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
725 goto loop;
726 }
727 proc_list_unlock();
728 return(parent);
729 }
730
731 static boolean_t
732 proc_parent_is_currentproc(proc_t p)
733 {
734 boolean_t ret = FALSE;
735
736 proc_list_lock();
737 if (p->p_pptr == current_proc())
738 ret = TRUE;
739
740 proc_list_unlock();
741 return ret;
742 }
743
744 void
745 proc_name(int pid, char * buf, int size)
746 {
747 proc_t p;
748
749 if ((p = proc_find(pid)) != PROC_NULL) {
750 strlcpy(buf, &p->p_comm[0], size);
751 proc_rele(p);
752 }
753 }
754
755 void
756 proc_name_kdp(task_t t, char * buf, int size)
757 {
758 proc_t p = get_bsdtask_info(t);
759 if (p == PROC_NULL)
760 return;
761
762 if ((size_t)size > sizeof(p->p_comm))
763 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
764 else
765 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
766 }
767
768
769 int
770 proc_threadname_kdp(void *uth, char *buf, size_t size)
771 {
772 if (size < MAXTHREADNAMESIZE) {
773 /* this is really just a protective measure for the future in
774 * case the thread name size in stackshot gets out of sync with
775 * the BSD max thread name size. Note that bsd_getthreadname
776 * doesn't take input buffer size into account. */
777 return -1;
778 }
779
780 if (uth != NULL) {
781 bsd_getthreadname(uth, buf);
782 }
783 return 0;
784 }
785
786 /* note that this function is generally going to be called from stackshot,
787 * and the arguments will be coming from a struct which is declared packed
788 * thus the input arguments will in general be unaligned. We have to handle
789 * that here. */
790 void
791 proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec)
792 {
793 proc_t pp = (proc_t)p;
794 struct uint64p {
795 uint64_t val;
796 } __attribute__((packed));
797
798 if (pp != PROC_NULL) {
799 if (tv_sec != NULL)
800 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
801 if (tv_usec != NULL)
802 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
803 }
804 }
805
806 char *
807 proc_name_address(void *p)
808 {
809 return &((proc_t)p)->p_comm[0];
810 }
811
812 void
813 proc_selfname(char * buf, int size)
814 {
815 proc_t p;
816
817 if ((p = current_proc())!= (proc_t)0) {
818 strlcpy(buf, &p->p_comm[0], size);
819 }
820 }
821
822 void
823 proc_signal(int pid, int signum)
824 {
825 proc_t p;
826
827 if ((p = proc_find(pid)) != PROC_NULL) {
828 psignal(p, signum);
829 proc_rele(p);
830 }
831 }
832
833 int
834 proc_issignal(int pid, sigset_t mask)
835 {
836 proc_t p;
837 int error=0;
838
839 if ((p = proc_find(pid)) != PROC_NULL) {
840 error = proc_pendingsignals(p, mask);
841 proc_rele(p);
842 }
843
844 return(error);
845 }
846
847 int
848 proc_noremotehang(proc_t p)
849 {
850 int retval = 0;
851
852 if (p)
853 retval = p->p_flag & P_NOREMOTEHANG;
854 return(retval? 1: 0);
855
856 }
857
858 int
859 proc_exiting(proc_t p)
860 {
861 int retval = 0;
862
863 if (p)
864 retval = p->p_lflag & P_LEXIT;
865 return(retval? 1: 0);
866 }
867
868 int
869 proc_forcequota(proc_t p)
870 {
871 int retval = 0;
872
873 if (p)
874 retval = p->p_flag & P_FORCEQUOTA;
875 return(retval? 1: 0);
876
877 }
878
879 int
880 proc_suser(proc_t p)
881 {
882 kauth_cred_t my_cred;
883 int error;
884
885 my_cred = kauth_cred_proc_ref(p);
886 error = suser(my_cred, &p->p_acflag);
887 kauth_cred_unref(&my_cred);
888 return(error);
889 }
890
891 task_t
892 proc_task(proc_t proc)
893 {
894 return (task_t)proc->task;
895 }
896
897 /*
898 * Obtain the first thread in a process
899 *
900 * XXX This is a bad thing to do; it exists predominantly to support the
901 * XXX use of proc_t's in places that should really be using
902 * XXX thread_t's instead. This maintains historical behaviour, but really
903 * XXX needs an audit of the context (proxy vs. not) to clean up.
904 */
905 thread_t
906 proc_thread(proc_t proc)
907 {
908 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
909
910 if (uth != NULL)
911 return(uth->uu_context.vc_thread);
912
913 return(NULL);
914 }
915
916 kauth_cred_t
917 proc_ucred(proc_t p)
918 {
919 return(p->p_ucred);
920 }
921
922 struct uthread *
923 current_uthread()
924 {
925 thread_t th = current_thread();
926
927 return((struct uthread *)get_bsdthread_info(th));
928 }
929
930
931 int
932 proc_is64bit(proc_t p)
933 {
934 return(IS_64BIT_PROCESS(p));
935 }
936
937 int
938 proc_pidversion(proc_t p)
939 {
940 return(p->p_idversion);
941 }
942
943 uint64_t
944 proc_uniqueid(proc_t p)
945 {
946 return(p->p_uniqueid);
947 }
948
949 uint64_t
950 proc_puniqueid(proc_t p)
951 {
952 return(p->p_puniqueid);
953 }
954
955 void
956 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
957 {
958 #if CONFIG_COALITIONS
959 task_coalition_ids(p->task, ids);
960 #else
961 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
962 #endif
963 return;
964 }
965
966 uint64_t
967 proc_was_throttled(proc_t p)
968 {
969 return (p->was_throttled);
970 }
971
972 uint64_t
973 proc_did_throttle(proc_t p)
974 {
975 return (p->did_throttle);
976 }
977
978 int
979 proc_getcdhash(proc_t p, unsigned char *cdhash)
980 {
981 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
982 }
983
984 void
985 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
986 {
987 if (size >= sizeof(p->p_uuid)) {
988 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
989 }
990 }
991
992 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
993 vnode_t
994 proc_getexecutablevnode(proc_t p)
995 {
996 vnode_t tvp = p->p_textvp;
997
998 if ( tvp != NULLVP) {
999 if (vnode_getwithref(tvp) == 0) {
1000 return tvp;
1001 }
1002 }
1003
1004 return NULLVP;
1005 }
1006
1007
1008 void
1009 bsd_set_dependency_capable(task_t task)
1010 {
1011 proc_t p = get_bsdtask_info(task);
1012
1013 if (p) {
1014 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1015 }
1016 }
1017
1018
1019 int
1020 IS_64BIT_PROCESS(proc_t p)
1021 {
1022 if (p && (p->p_flag & P_LP64))
1023 return(1);
1024 else
1025 return(0);
1026 }
1027
1028 /*
1029 * Locate a process by number
1030 */
1031 proc_t
1032 pfind_locked(pid_t pid)
1033 {
1034 proc_t p;
1035 #if DEBUG
1036 proc_t q;
1037 #endif
1038
1039 if (!pid)
1040 return (kernproc);
1041
1042 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1043 if (p->p_pid == pid) {
1044 #if DEBUG
1045 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1046 if ((p !=q) && (q->p_pid == pid))
1047 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1048 }
1049 #endif
1050 return (p);
1051 }
1052 }
1053 return (NULL);
1054 }
1055
1056 /*
1057 * Locate a zombie by PID
1058 */
1059 __private_extern__ proc_t
1060 pzfind(pid_t pid)
1061 {
1062 proc_t p;
1063
1064
1065 proc_list_lock();
1066
1067 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1068 if (p->p_pid == pid)
1069 break;
1070
1071 proc_list_unlock();
1072
1073 return (p);
1074 }
1075
1076 /*
1077 * Locate a process group by number
1078 */
1079
1080 struct pgrp *
1081 pgfind(pid_t pgid)
1082 {
1083 struct pgrp * pgrp;
1084
1085 proc_list_lock();
1086 pgrp = pgfind_internal(pgid);
1087 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1088 pgrp = PGRP_NULL;
1089 else
1090 pgrp->pg_refcount++;
1091 proc_list_unlock();
1092 return(pgrp);
1093 }
1094
1095
1096
1097 struct pgrp *
1098 pgfind_internal(pid_t pgid)
1099 {
1100 struct pgrp *pgrp;
1101
1102 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1103 if (pgrp->pg_id == pgid)
1104 return (pgrp);
1105 return (NULL);
1106 }
1107
1108 void
1109 pg_rele(struct pgrp * pgrp)
1110 {
1111 if(pgrp == PGRP_NULL)
1112 return;
1113 pg_rele_dropref(pgrp);
1114 }
1115
1116 void
1117 pg_rele_dropref(struct pgrp * pgrp)
1118 {
1119 proc_list_lock();
1120 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1121 proc_list_unlock();
1122 pgdelete_dropref(pgrp);
1123 return;
1124 }
1125
1126 pgrp->pg_refcount--;
1127 proc_list_unlock();
1128 }
1129
1130 struct session *
1131 session_find_internal(pid_t sessid)
1132 {
1133 struct session *sess;
1134
1135 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1136 if (sess->s_sid == sessid)
1137 return (sess);
1138 return (NULL);
1139 }
1140
1141
1142 /*
1143 * Make a new process ready to become a useful member of society by making it
1144 * visible in all the right places and initialize its own lists to empty.
1145 *
1146 * Parameters: parent The parent of the process to insert
1147 * child The child process to insert
1148 *
1149 * Returns: (void)
1150 *
1151 * Notes: Insert a child process into the parents process group, assign
1152 * the child the parent process pointer and PPID of the parent,
1153 * place it on the parents p_children list as a sibling,
1154 * initialize its own child list, place it in the allproc list,
1155 * insert it in the proper hash bucket, and initialize its
1156 * event list.
1157 */
1158 void
1159 pinsertchild(proc_t parent, proc_t child)
1160 {
1161 struct pgrp * pg;
1162
1163 LIST_INIT(&child->p_children);
1164 TAILQ_INIT(&child->p_evlist);
1165 child->p_pptr = parent;
1166 child->p_ppid = parent->p_pid;
1167 child->p_puniqueid = parent->p_uniqueid;
1168
1169 pg = proc_pgrp(parent);
1170 pgrp_add(pg, parent, child);
1171 pg_rele(pg);
1172
1173 proc_list_lock();
1174
1175 #if CONFIG_MEMORYSTATUS
1176 memorystatus_add(child, TRUE);
1177 #endif
1178
1179 parent->p_childrencnt++;
1180 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1181
1182 LIST_INSERT_HEAD(&allproc, child, p_list);
1183 /* mark the completion of proc creation */
1184 child->p_listflag &= ~P_LIST_INCREATE;
1185
1186 proc_list_unlock();
1187 }
1188
1189 /*
1190 * Move p to a new or existing process group (and session)
1191 *
1192 * Returns: 0 Success
1193 * ESRCH No such process
1194 */
1195 int
1196 enterpgrp(proc_t p, pid_t pgid, int mksess)
1197 {
1198 struct pgrp *pgrp;
1199 struct pgrp *mypgrp;
1200 struct session * procsp;
1201
1202 pgrp = pgfind(pgid);
1203 mypgrp = proc_pgrp(p);
1204 procsp = proc_session(p);
1205
1206 #if DIAGNOSTIC
1207 if (pgrp != NULL && mksess) /* firewalls */
1208 panic("enterpgrp: setsid into non-empty pgrp");
1209 if (SESS_LEADER(p, procsp))
1210 panic("enterpgrp: session leader attempted setpgrp");
1211 #endif
1212 if (pgrp == PGRP_NULL) {
1213 pid_t savepid = p->p_pid;
1214 proc_t np = PROC_NULL;
1215 /*
1216 * new process group
1217 */
1218 #if DIAGNOSTIC
1219 if (p->p_pid != pgid)
1220 panic("enterpgrp: new pgrp and pid != pgid");
1221 #endif
1222 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1223 M_WAITOK);
1224 if (pgrp == NULL)
1225 panic("enterpgrp: M_PGRP zone depleted");
1226 if ((np = proc_find(savepid)) == NULL || np != p) {
1227 if (np != PROC_NULL)
1228 proc_rele(np);
1229 if (mypgrp != PGRP_NULL)
1230 pg_rele(mypgrp);
1231 if (procsp != SESSION_NULL)
1232 session_rele(procsp);
1233 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1234 return (ESRCH);
1235 }
1236 proc_rele(np);
1237 if (mksess) {
1238 struct session *sess;
1239
1240 /*
1241 * new session
1242 */
1243 MALLOC_ZONE(sess, struct session *,
1244 sizeof(struct session), M_SESSION, M_WAITOK);
1245 if (sess == NULL)
1246 panic("enterpgrp: M_SESSION zone depleted");
1247 sess->s_leader = p;
1248 sess->s_sid = p->p_pid;
1249 sess->s_count = 1;
1250 sess->s_ttyvp = NULL;
1251 sess->s_ttyp = TTY_NULL;
1252 sess->s_flags = 0;
1253 sess->s_listflags = 0;
1254 sess->s_ttypgrpid = NO_PID;
1255 #if CONFIG_FINE_LOCK_GROUPS
1256 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1257 #else
1258 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1259 #endif
1260 bcopy(procsp->s_login, sess->s_login,
1261 sizeof(sess->s_login));
1262 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1263 proc_list_lock();
1264 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1265 proc_list_unlock();
1266 pgrp->pg_session = sess;
1267 #if DIAGNOSTIC
1268 if (p != current_proc())
1269 panic("enterpgrp: mksession and p != curproc");
1270 #endif
1271 } else {
1272 proc_list_lock();
1273 pgrp->pg_session = procsp;
1274
1275 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1276 panic("enterpgrp: providing ref to terminating session ");
1277 pgrp->pg_session->s_count++;
1278 proc_list_unlock();
1279 }
1280 pgrp->pg_id = pgid;
1281 #if CONFIG_FINE_LOCK_GROUPS
1282 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1283 #else
1284 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1285 #endif
1286 LIST_INIT(&pgrp->pg_members);
1287 pgrp->pg_membercnt = 0;
1288 pgrp->pg_jobc = 0;
1289 proc_list_lock();
1290 pgrp->pg_refcount = 1;
1291 pgrp->pg_listflags = 0;
1292 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1293 proc_list_unlock();
1294 } else if (pgrp == mypgrp) {
1295 pg_rele(pgrp);
1296 if (mypgrp != NULL)
1297 pg_rele(mypgrp);
1298 if (procsp != SESSION_NULL)
1299 session_rele(procsp);
1300 return (0);
1301 }
1302
1303 if (procsp != SESSION_NULL)
1304 session_rele(procsp);
1305 /*
1306 * Adjust eligibility of affected pgrps to participate in job control.
1307 * Increment eligibility counts before decrementing, otherwise we
1308 * could reach 0 spuriously during the first call.
1309 */
1310 fixjobc(p, pgrp, 1);
1311 fixjobc(p, mypgrp, 0);
1312
1313 if(mypgrp != PGRP_NULL)
1314 pg_rele(mypgrp);
1315 pgrp_replace(p, pgrp);
1316 pg_rele(pgrp);
1317
1318 return(0);
1319 }
1320
1321 /*
1322 * remove process from process group
1323 */
1324 int
1325 leavepgrp(proc_t p)
1326 {
1327
1328 pgrp_remove(p);
1329 return (0);
1330 }
1331
1332 /*
1333 * delete a process group
1334 */
1335 static void
1336 pgdelete_dropref(struct pgrp *pgrp)
1337 {
1338 struct tty *ttyp;
1339 int emptypgrp = 1;
1340 struct session *sessp;
1341
1342
1343 pgrp_lock(pgrp);
1344 if (pgrp->pg_membercnt != 0) {
1345 emptypgrp = 0;
1346 }
1347 pgrp_unlock(pgrp);
1348
1349 proc_list_lock();
1350 pgrp->pg_refcount--;
1351 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1352 proc_list_unlock();
1353 return;
1354 }
1355
1356 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1357
1358 if (pgrp->pg_refcount > 0) {
1359 proc_list_unlock();
1360 return;
1361 }
1362
1363 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1364 LIST_REMOVE(pgrp, pg_hash);
1365
1366 proc_list_unlock();
1367
1368 ttyp = SESSION_TP(pgrp->pg_session);
1369 if (ttyp != TTY_NULL) {
1370 if (ttyp->t_pgrp == pgrp) {
1371 tty_lock(ttyp);
1372 /* Re-check after acquiring the lock */
1373 if (ttyp->t_pgrp == pgrp) {
1374 ttyp->t_pgrp = NULL;
1375 pgrp->pg_session->s_ttypgrpid = NO_PID;
1376 }
1377 tty_unlock(ttyp);
1378 }
1379 }
1380
1381 proc_list_lock();
1382
1383 sessp = pgrp->pg_session;
1384 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1385 panic("pg_deleteref: manipulating refs of already terminating session");
1386 if (--sessp->s_count == 0) {
1387 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1388 panic("pg_deleteref: terminating already terminated session");
1389 sessp->s_listflags |= S_LIST_TERM;
1390 ttyp = SESSION_TP(sessp);
1391 LIST_REMOVE(sessp, s_hash);
1392 proc_list_unlock();
1393 if (ttyp != TTY_NULL) {
1394 tty_lock(ttyp);
1395 if (ttyp->t_session == sessp)
1396 ttyp->t_session = NULL;
1397 tty_unlock(ttyp);
1398 }
1399 proc_list_lock();
1400 sessp->s_listflags |= S_LIST_DEAD;
1401 if (sessp->s_count != 0)
1402 panic("pg_deleteref: freeing session in use");
1403 proc_list_unlock();
1404 #if CONFIG_FINE_LOCK_GROUPS
1405 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1406 #else
1407 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1408 #endif
1409 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1410 } else
1411 proc_list_unlock();
1412 #if CONFIG_FINE_LOCK_GROUPS
1413 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1414 #else
1415 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1416 #endif
1417 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1418 }
1419
1420
1421 /*
1422 * Adjust pgrp jobc counters when specified process changes process group.
1423 * We count the number of processes in each process group that "qualify"
1424 * the group for terminal job control (those with a parent in a different
1425 * process group of the same session). If that count reaches zero, the
1426 * process group becomes orphaned. Check both the specified process'
1427 * process group and that of its children.
1428 * entering == 0 => p is leaving specified group.
1429 * entering == 1 => p is entering specified group.
1430 */
1431 int
1432 fixjob_callback(proc_t p, void * arg)
1433 {
1434 struct fixjob_iterargs *fp;
1435 struct pgrp * pg, *hispg;
1436 struct session * mysession, *hissess;
1437 int entering;
1438
1439 fp = (struct fixjob_iterargs *)arg;
1440 pg = fp->pg;
1441 mysession = fp->mysession;
1442 entering = fp->entering;
1443
1444 hispg = proc_pgrp(p);
1445 hissess = proc_session(p);
1446
1447 if ((hispg != pg) &&
1448 (hissess == mysession)) {
1449 pgrp_lock(hispg);
1450 if (entering) {
1451 hispg->pg_jobc++;
1452 pgrp_unlock(hispg);
1453 } else if (--hispg->pg_jobc == 0) {
1454 pgrp_unlock(hispg);
1455 orphanpg(hispg);
1456 } else
1457 pgrp_unlock(hispg);
1458 }
1459 if (hissess != SESSION_NULL)
1460 session_rele(hissess);
1461 if (hispg != PGRP_NULL)
1462 pg_rele(hispg);
1463
1464 return(PROC_RETURNED);
1465 }
1466
1467 void
1468 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1469 {
1470 struct pgrp *hispgrp = PGRP_NULL;
1471 struct session *hissess = SESSION_NULL;
1472 struct session *mysession = pgrp->pg_session;
1473 proc_t parent;
1474 struct fixjob_iterargs fjarg;
1475 boolean_t proc_parent_self;
1476
1477 /*
1478 * Check if p's parent is current proc, if yes then no need to take
1479 * a ref; calling proc_parent with current proc as parent may
1480 * deadlock if current proc is exiting.
1481 */
1482 proc_parent_self = proc_parent_is_currentproc(p);
1483 if (proc_parent_self)
1484 parent = current_proc();
1485 else
1486 parent = proc_parent(p);
1487
1488 if (parent != PROC_NULL) {
1489 hispgrp = proc_pgrp(parent);
1490 hissess = proc_session(parent);
1491 if (!proc_parent_self)
1492 proc_rele(parent);
1493 }
1494
1495
1496 /*
1497 * Check p's parent to see whether p qualifies its own process
1498 * group; if so, adjust count for p's process group.
1499 */
1500 if ((hispgrp != pgrp) &&
1501 (hissess == mysession)) {
1502 pgrp_lock(pgrp);
1503 if (entering) {
1504 pgrp->pg_jobc++;
1505 pgrp_unlock(pgrp);
1506 }else if (--pgrp->pg_jobc == 0) {
1507 pgrp_unlock(pgrp);
1508 orphanpg(pgrp);
1509 } else
1510 pgrp_unlock(pgrp);
1511 }
1512
1513 if (hissess != SESSION_NULL)
1514 session_rele(hissess);
1515 if (hispgrp != PGRP_NULL)
1516 pg_rele(hispgrp);
1517
1518 /*
1519 * Check this process' children to see whether they qualify
1520 * their process groups; if so, adjust counts for children's
1521 * process groups.
1522 */
1523 fjarg.pg = pgrp;
1524 fjarg.mysession = mysession;
1525 fjarg.entering = entering;
1526 proc_childrenwalk(p, fixjob_callback, &fjarg);
1527 }
1528
1529 /*
1530 * A process group has become orphaned;
1531 * if there are any stopped processes in the group,
1532 * hang-up all process in that group.
1533 */
1534 static void
1535 orphanpg(struct pgrp * pgrp)
1536 {
1537 proc_t p;
1538 pid_t * pid_list;
1539 int count, pidcount, i, alloc_count;
1540
1541 if (pgrp == PGRP_NULL)
1542 return;
1543 count = 0;
1544 pgrp_lock(pgrp);
1545 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
1546 if (p->p_stat == SSTOP) {
1547 for (p = pgrp->pg_members.lh_first; p != 0;
1548 p = p->p_pglist.le_next)
1549 count++;
1550 break; /* ??? stops after finding one.. */
1551 }
1552 }
1553 pgrp_unlock(pgrp);
1554
1555 count += 20;
1556 if (count > hard_maxproc)
1557 count = hard_maxproc;
1558 alloc_count = count * sizeof(pid_t);
1559 pid_list = (pid_t *)kalloc(alloc_count);
1560 bzero(pid_list, alloc_count);
1561
1562 pidcount = 0;
1563 pgrp_lock(pgrp);
1564 for (p = pgrp->pg_members.lh_first; p != 0;
1565 p = p->p_pglist.le_next) {
1566 if (p->p_stat == SSTOP) {
1567 for (p = pgrp->pg_members.lh_first; p != 0;
1568 p = p->p_pglist.le_next) {
1569 pid_list[pidcount] = p->p_pid;
1570 pidcount++;
1571 if (pidcount >= count)
1572 break;
1573 }
1574 break; /* ??? stops after finding one.. */
1575 }
1576 }
1577 pgrp_unlock(pgrp);
1578
1579 if (pidcount == 0)
1580 goto out;
1581
1582
1583 for (i = 0; i< pidcount; i++) {
1584 /* No handling or proc0 */
1585 if (pid_list[i] == 0)
1586 continue;
1587 p = proc_find(pid_list[i]);
1588 if (p) {
1589 proc_transwait(p, 0);
1590 pt_setrunnable(p);
1591 psignal(p, SIGHUP);
1592 psignal(p, SIGCONT);
1593 proc_rele(p);
1594 }
1595 }
1596 out:
1597 kfree(pid_list, alloc_count);
1598 return;
1599 }
1600
1601 int
1602 proc_is_classic(proc_t p __unused)
1603 {
1604 return (0);
1605 }
1606
1607 /* XXX Why does this function exist? Need to kill it off... */
1608 proc_t
1609 current_proc_EXTERNAL(void)
1610 {
1611 return (current_proc());
1612 }
1613
1614 int
1615 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1616 {
1617 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1618 }
1619
1620 /*
1621 * proc_core_name(name, uid, pid)
1622 * Expand the name described in corefilename, using name, uid, and pid.
1623 * corefilename is a printf-like string, with three format specifiers:
1624 * %N name of process ("name")
1625 * %P process id (pid)
1626 * %U user id (uid)
1627 * For example, "%N.core" is the default; they can be disabled completely
1628 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1629 * This is controlled by the sysctl variable kern.corefile (see above).
1630 */
1631 __private_extern__ int
1632 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1633 size_t cf_name_len)
1634 {
1635 const char *format, *appendstr;
1636 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1637 size_t i, l, n;
1638
1639 if (cf_name == NULL)
1640 goto toolong;
1641
1642 format = corefilename;
1643 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1644 switch (format[i]) {
1645 case '%': /* Format character */
1646 i++;
1647 switch (format[i]) {
1648 case '%':
1649 appendstr = "%";
1650 break;
1651 case 'N': /* process name */
1652 appendstr = name;
1653 break;
1654 case 'P': /* process id */
1655 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1656 appendstr = id_buf;
1657 break;
1658 case 'U': /* user id */
1659 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1660 appendstr = id_buf;
1661 break;
1662 default:
1663 appendstr = "";
1664 log(LOG_ERR,
1665 "Unknown format character %c in `%s'\n",
1666 format[i], format);
1667 }
1668 l = strlen(appendstr);
1669 if ((n + l) >= cf_name_len)
1670 goto toolong;
1671 bcopy(appendstr, cf_name + n, l);
1672 n += l;
1673 break;
1674 default:
1675 cf_name[n++] = format[i];
1676 }
1677 }
1678 if (format[i] != '\0')
1679 goto toolong;
1680 return (0);
1681 toolong:
1682 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1683 (long)pid, name, (uint32_t)uid);
1684 return (1);
1685 }
1686
1687 /* Code Signing related routines */
1688
1689 int
1690 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1691 {
1692 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1693 uap->usersize, USER_ADDR_NULL));
1694 }
1695
1696 int
1697 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1698 {
1699 if (uap->uaudittoken == USER_ADDR_NULL)
1700 return(EINVAL);
1701 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1702 uap->usersize, uap->uaudittoken));
1703 }
1704
1705 static int
1706 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1707 {
1708 char fakeheader[8] = { 0 };
1709 int error;
1710
1711 if (usize < sizeof(fakeheader))
1712 return ERANGE;
1713
1714 /* if no blob, fill in zero header */
1715 if (NULL == start) {
1716 start = fakeheader;
1717 length = sizeof(fakeheader);
1718 } else if (usize < length) {
1719 /* ... if input too short, copy out length of entitlement */
1720 uint32_t length32 = htonl((uint32_t)length);
1721 memcpy(&fakeheader[4], &length32, sizeof(length32));
1722
1723 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1724 if (error == 0)
1725 return ERANGE; /* input buffer to short, ERANGE signals that */
1726 return error;
1727 }
1728 return copyout(start, uaddr, length);
1729 }
1730
1731 static int
1732 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1733 {
1734 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1735 proc_t pt;
1736 int forself;
1737 int error;
1738 vnode_t tvp;
1739 off_t toff;
1740 unsigned char cdhash[SHA1_RESULTLEN];
1741 audit_token_t token;
1742 unsigned int upid=0, uidversion = 0;
1743
1744 forself = error = 0;
1745
1746 if (pid == 0)
1747 pid = proc_selfpid();
1748 if (pid == proc_selfpid())
1749 forself = 1;
1750
1751
1752 switch (ops) {
1753 case CS_OPS_STATUS:
1754 case CS_OPS_CDHASH:
1755 case CS_OPS_PIDOFFSET:
1756 case CS_OPS_ENTITLEMENTS_BLOB:
1757 case CS_OPS_IDENTITY:
1758 case CS_OPS_BLOB:
1759 break; /* unrestricted */
1760 default:
1761 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1762 return(EPERM);
1763 break;
1764 }
1765
1766 pt = proc_find(pid);
1767 if (pt == PROC_NULL)
1768 return(ESRCH);
1769
1770 upid = pt->p_pid;
1771 uidversion = pt->p_idversion;
1772 if (uaudittoken != USER_ADDR_NULL) {
1773
1774 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1775 if (error != 0)
1776 goto out;
1777 /* verify the audit token pid/idversion matches with proc */
1778 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1779 error = ESRCH;
1780 goto out;
1781 }
1782 }
1783
1784 switch (ops) {
1785
1786 case CS_OPS_STATUS: {
1787 uint32_t retflags;
1788
1789 proc_lock(pt);
1790 retflags = pt->p_csflags;
1791 if (cs_enforcement(pt))
1792 retflags |= CS_ENFORCEMENT;
1793 if (csproc_get_platform_binary(pt))
1794 retflags |= CS_PLATFORM_BINARY;
1795 proc_unlock(pt);
1796
1797 if (uaddr != USER_ADDR_NULL)
1798 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1799 break;
1800 }
1801 case CS_OPS_MARKINVALID:
1802 proc_lock(pt);
1803 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1804 pt->p_csflags &= ~CS_VALID; /* set invalid */
1805 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1806 pt->p_csflags |= CS_KILLED;
1807 proc_unlock(pt);
1808 if (cs_debug) {
1809 printf("CODE SIGNING: marked invalid by pid %d: "
1810 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1811 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
1812 }
1813 psignal(pt, SIGKILL);
1814 } else
1815 proc_unlock(pt);
1816 } else
1817 proc_unlock(pt);
1818
1819 break;
1820
1821 case CS_OPS_MARKHARD:
1822 proc_lock(pt);
1823 pt->p_csflags |= CS_HARD;
1824 if ((pt->p_csflags & CS_VALID) == 0) {
1825 /* @@@ allow? reject? kill? @@@ */
1826 proc_unlock(pt);
1827 error = EINVAL;
1828 goto out;
1829 } else
1830 proc_unlock(pt);
1831 break;
1832
1833 case CS_OPS_MARKKILL:
1834 proc_lock(pt);
1835 pt->p_csflags |= CS_KILL;
1836 if ((pt->p_csflags & CS_VALID) == 0) {
1837 proc_unlock(pt);
1838 psignal(pt, SIGKILL);
1839 } else
1840 proc_unlock(pt);
1841 break;
1842
1843 case CS_OPS_PIDOFFSET:
1844 toff = pt->p_textoff;
1845 proc_rele(pt);
1846 error = copyout(&toff, uaddr, sizeof(toff));
1847 return(error);
1848
1849 case CS_OPS_CDHASH:
1850
1851 /* pt already holds a reference on its p_textvp */
1852 tvp = pt->p_textvp;
1853 toff = pt->p_textoff;
1854
1855 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
1856 proc_rele(pt);
1857 return EINVAL;
1858 }
1859
1860 error = vn_getcdhash(tvp, toff, cdhash);
1861 proc_rele(pt);
1862
1863 if (error == 0) {
1864 error = copyout(cdhash, uaddr, sizeof (cdhash));
1865 }
1866
1867 return error;
1868
1869 case CS_OPS_ENTITLEMENTS_BLOB: {
1870 void *start;
1871 size_t length;
1872
1873 proc_lock(pt);
1874
1875 if ((pt->p_csflags & CS_VALID) == 0) {
1876 proc_unlock(pt);
1877 error = EINVAL;
1878 break;
1879 }
1880
1881 error = cs_entitlements_blob_get(pt, &start, &length);
1882 proc_unlock(pt);
1883 if (error)
1884 break;
1885
1886 error = csops_copy_token(start, length, usize, uaddr);
1887 break;
1888 }
1889 case CS_OPS_MARKRESTRICT:
1890 proc_lock(pt);
1891 pt->p_csflags |= CS_RESTRICT;
1892 proc_unlock(pt);
1893 break;
1894
1895 case CS_OPS_SET_STATUS: {
1896 uint32_t flags;
1897
1898 if (usize < sizeof(flags)) {
1899 error = ERANGE;
1900 break;
1901 }
1902
1903 error = copyin(uaddr, &flags, sizeof(flags));
1904 if (error)
1905 break;
1906
1907 /* only allow setting a subset of all code sign flags */
1908 flags &=
1909 CS_HARD | CS_EXEC_SET_HARD |
1910 CS_KILL | CS_EXEC_SET_KILL |
1911 CS_RESTRICT |
1912 CS_REQUIRE_LV |
1913 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT |
1914 CS_ENTITLEMENTS_VALIDATED;
1915
1916 proc_lock(pt);
1917 if (pt->p_csflags & CS_VALID)
1918 pt->p_csflags |= flags;
1919 else
1920 error = EINVAL;
1921 proc_unlock(pt);
1922
1923 break;
1924 }
1925 case CS_OPS_BLOB: {
1926 void *start;
1927 size_t length;
1928
1929 proc_lock(pt);
1930 if ((pt->p_csflags & CS_VALID) == 0) {
1931 proc_unlock(pt);
1932 error = EINVAL;
1933 break;
1934 }
1935
1936 error = cs_blob_get(pt, &start, &length);
1937 proc_unlock(pt);
1938 if (error)
1939 break;
1940
1941 error = csops_copy_token(start, length, usize, uaddr);
1942 break;
1943 }
1944 case CS_OPS_IDENTITY: {
1945 const char *identity;
1946 uint8_t fakeheader[8];
1947 uint32_t idlen;
1948 size_t length;
1949
1950 /*
1951 * Make identity have a blob header to make it
1952 * easier on userland to guess the identity
1953 * length.
1954 */
1955 if (usize < sizeof(fakeheader)) {
1956 error = ERANGE;
1957 break;
1958 }
1959 memset(fakeheader, 0, sizeof(fakeheader));
1960
1961 proc_lock(pt);
1962 if ((pt->p_csflags & CS_VALID) == 0) {
1963 proc_unlock(pt);
1964 error = EINVAL;
1965 break;
1966 }
1967
1968 identity = cs_identity_get(pt);
1969 proc_unlock(pt);
1970 if (identity == NULL) {
1971 error = ENOENT;
1972 break;
1973 }
1974
1975 length = strlen(identity) + 1; /* include NUL */
1976 idlen = htonl(length + sizeof(fakeheader));
1977 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
1978
1979 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1980 if (error)
1981 break;
1982
1983 if (usize < sizeof(fakeheader) + length)
1984 error = ERANGE;
1985 else if (usize > sizeof(fakeheader))
1986 error = copyout(identity, uaddr + sizeof(fakeheader), length);
1987
1988 break;
1989 }
1990
1991 default:
1992 error = EINVAL;
1993 break;
1994 }
1995 out:
1996 proc_rele(pt);
1997 return(error);
1998 }
1999
2000 int
2001 proc_iterate(flags, callout, arg, filterfn, filterarg)
2002 int flags;
2003 int (*callout)(proc_t, void *);
2004 void * arg;
2005 int (*filterfn)(proc_t, void *);
2006 void * filterarg;
2007 {
2008 proc_t p;
2009 pid_t * pid_list;
2010 int count, pidcount, alloc_count, i, retval;
2011
2012 count = nprocs+ 10;
2013 if (count > hard_maxproc)
2014 count = hard_maxproc;
2015 alloc_count = count * sizeof(pid_t);
2016 pid_list = (pid_t *)kalloc(alloc_count);
2017 bzero(pid_list, alloc_count);
2018
2019
2020 proc_list_lock();
2021
2022
2023 pidcount = 0;
2024 if (flags & PROC_ALLPROCLIST) {
2025 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2026 if (p->p_stat == SIDL)
2027 continue;
2028 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2029 pid_list[pidcount] = p->p_pid;
2030 pidcount++;
2031 if (pidcount >= count)
2032 break;
2033 }
2034 }
2035 }
2036 if ((pidcount < count ) && (flags & PROC_ZOMBPROCLIST)) {
2037 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2038 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2039 pid_list[pidcount] = p->p_pid;
2040 pidcount++;
2041 if (pidcount >= count)
2042 break;
2043 }
2044 }
2045 }
2046
2047
2048 proc_list_unlock();
2049
2050
2051 for (i = 0; i< pidcount; i++) {
2052 p = proc_find(pid_list[i]);
2053 if (p) {
2054 if ((flags & PROC_NOWAITTRANS) == 0)
2055 proc_transwait(p, 0);
2056 retval = callout(p, arg);
2057
2058 switch (retval) {
2059 case PROC_RETURNED:
2060 proc_rele(p);
2061 break;
2062 case PROC_RETURNED_DONE:
2063 proc_rele(p);
2064 goto out;
2065 case PROC_CLAIMED_DONE:
2066 goto out;
2067 case PROC_CLAIMED:
2068 default:
2069 break;
2070 }
2071 } else if (flags & PROC_ZOMBPROCLIST) {
2072 p = proc_find_zombref(pid_list[i]);
2073 if (p != PROC_NULL) {
2074 retval = callout(p, arg);
2075
2076 switch (retval) {
2077 case PROC_RETURNED:
2078 proc_drop_zombref(p);
2079 break;
2080 case PROC_RETURNED_DONE:
2081 proc_drop_zombref(p);
2082 goto out;
2083 case PROC_CLAIMED_DONE:
2084 goto out;
2085 case PROC_CLAIMED:
2086 default:
2087 break;
2088 }
2089 }
2090 }
2091 }
2092
2093 out:
2094 kfree(pid_list, alloc_count);
2095 return(0);
2096
2097 }
2098
2099
2100 #if 0
2101 /* This is for iteration in case of trivial non blocking callouts */
2102 int
2103 proc_scanall(flags, callout, arg)
2104 int flags;
2105 int (*callout)(proc_t, void *);
2106 void * arg;
2107 {
2108 proc_t p;
2109 int retval;
2110
2111
2112 proc_list_lock();
2113
2114
2115 if (flags & PROC_ALLPROCLIST) {
2116 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2117 retval = callout(p, arg);
2118 if (retval == PROC_RETURNED_DONE)
2119 goto out;
2120 }
2121 }
2122 if (flags & PROC_ZOMBPROCLIST) {
2123 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2124 retval = callout(p, arg);
2125 if (retval == PROC_RETURNED_DONE)
2126 goto out;
2127 }
2128 }
2129 out:
2130
2131 proc_list_unlock();
2132
2133 return(0);
2134 }
2135 #endif
2136
2137
2138 int
2139 proc_rebootscan(callout, arg, filterfn, filterarg)
2140 int (*callout)(proc_t, void *);
2141 void * arg;
2142 int (*filterfn)(proc_t, void *);
2143 void * filterarg;
2144 {
2145 proc_t p;
2146 int lockheld = 0, retval;
2147
2148 proc_shutdown_exitcount = 0;
2149
2150 ps_allprocscan:
2151
2152 proc_list_lock();
2153
2154 lockheld = 1;
2155
2156 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2157 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2158 p = proc_ref_locked(p);
2159
2160 proc_list_unlock();
2161 lockheld = 0;
2162
2163 if (p) {
2164 proc_transwait(p, 0);
2165 retval = callout(p, arg);
2166 proc_rele(p);
2167
2168 switch (retval) {
2169 case PROC_RETURNED_DONE:
2170 case PROC_CLAIMED_DONE:
2171 goto out;
2172 }
2173 }
2174 goto ps_allprocscan;
2175 } /* filter pass */
2176 } /* allproc walk thru */
2177
2178 if (lockheld == 1) {
2179 proc_list_unlock();
2180 lockheld = 0;
2181 }
2182
2183 out:
2184 return(0);
2185
2186 }
2187
2188
2189 int
2190 proc_childrenwalk(parent, callout, arg)
2191 struct proc * parent;
2192 int (*callout)(proc_t, void *);
2193 void * arg;
2194 {
2195 register struct proc *p;
2196 pid_t * pid_list;
2197 int count, pidcount, alloc_count, i, retval;
2198
2199 count = nprocs+ 10;
2200 if (count > hard_maxproc)
2201 count = hard_maxproc;
2202 alloc_count = count * sizeof(pid_t);
2203 pid_list = (pid_t *)kalloc(alloc_count);
2204 bzero(pid_list, alloc_count);
2205
2206
2207 proc_list_lock();
2208
2209
2210 pidcount = 0;
2211 for (p = parent->p_children.lh_first; (p != 0); p = p->p_sibling.le_next) {
2212 if (p->p_stat == SIDL)
2213 continue;
2214 pid_list[pidcount] = p->p_pid;
2215 pidcount++;
2216 if (pidcount >= count)
2217 break;
2218 }
2219 proc_list_unlock();
2220
2221
2222 for (i = 0; i< pidcount; i++) {
2223 p = proc_find(pid_list[i]);
2224 if (p) {
2225 proc_transwait(p, 0);
2226 retval = callout(p, arg);
2227
2228 switch (retval) {
2229 case PROC_RETURNED:
2230 case PROC_RETURNED_DONE:
2231 proc_rele(p);
2232 if (retval == PROC_RETURNED_DONE) {
2233 goto out;
2234 }
2235 break;
2236
2237 case PROC_CLAIMED_DONE:
2238 goto out;
2239 case PROC_CLAIMED:
2240 default:
2241 break;
2242 }
2243 }
2244 }
2245
2246 out:
2247 kfree(pid_list, alloc_count);
2248 return(0);
2249
2250 }
2251
2252 /*
2253 */
2254 /* PGRP_BLOCKITERATE is not implemented yet */
2255 int
2256 pgrp_iterate(pgrp, flags, callout, arg, filterfn, filterarg)
2257 struct pgrp *pgrp;
2258 int flags;
2259 int (*callout)(proc_t, void *);
2260 void * arg;
2261 int (*filterfn)(proc_t, void *);
2262 void * filterarg;
2263 {
2264 proc_t p;
2265 pid_t * pid_list;
2266 int count, pidcount, i, alloc_count;
2267 int retval;
2268 pid_t pgid;
2269 int dropref = flags & PGRP_DROPREF;
2270 #if 0
2271 int serialize = flags & PGRP_BLOCKITERATE;
2272 #else
2273 int serialize = 0;
2274 #endif
2275
2276 if (pgrp == 0)
2277 return(0);
2278 count = pgrp->pg_membercnt + 10;
2279 if (count > hard_maxproc)
2280 count = hard_maxproc;
2281 alloc_count = count * sizeof(pid_t);
2282 pid_list = (pid_t *)kalloc(alloc_count);
2283 bzero(pid_list, alloc_count);
2284
2285 pgrp_lock(pgrp);
2286 if (serialize != 0) {
2287 while ((pgrp->pg_listflags & PGRP_FLAG_ITERABEGIN) == PGRP_FLAG_ITERABEGIN) {
2288 pgrp->pg_listflags |= PGRP_FLAG_ITERWAIT;
2289 msleep(&pgrp->pg_listflags, &pgrp->pg_mlock, 0, "pgrp_iterate", 0);
2290 }
2291 pgrp->pg_listflags |= PGRP_FLAG_ITERABEGIN;
2292 }
2293
2294 pgid = pgrp->pg_id;
2295
2296 pidcount = 0;
2297 for (p = pgrp->pg_members.lh_first; p != 0;
2298 p = p->p_pglist.le_next) {
2299 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2300 pid_list[pidcount] = p->p_pid;
2301 pidcount++;
2302 if (pidcount >= count)
2303 break;
2304 }
2305 }
2306
2307
2308 pgrp_unlock(pgrp);
2309 if ((serialize == 0) && (dropref != 0))
2310 pg_rele(pgrp);
2311
2312
2313 for (i = 0; i< pidcount; i++) {
2314 /* No handling or proc0 */
2315 if (pid_list[i] == 0)
2316 continue;
2317 p = proc_find(pid_list[i]);
2318 if (p) {
2319 if (p->p_pgrpid != pgid) {
2320 proc_rele(p);
2321 continue;
2322 }
2323 proc_transwait(p, 0);
2324 retval = callout(p, arg);
2325
2326 switch (retval) {
2327 case PROC_RETURNED:
2328 case PROC_RETURNED_DONE:
2329 proc_rele(p);
2330 if (retval == PROC_RETURNED_DONE) {
2331 goto out;
2332 }
2333 break;
2334
2335 case PROC_CLAIMED_DONE:
2336 goto out;
2337 case PROC_CLAIMED:
2338 default:
2339 break;
2340 }
2341 }
2342 }
2343 out:
2344 if (serialize != 0) {
2345 pgrp_lock(pgrp);
2346 pgrp->pg_listflags &= ~PGRP_FLAG_ITERABEGIN;
2347 if ((pgrp->pg_listflags & PGRP_FLAG_ITERWAIT) == PGRP_FLAG_ITERWAIT) {
2348 pgrp->pg_listflags &= ~PGRP_FLAG_ITERWAIT;
2349 wakeup(&pgrp->pg_listflags);
2350 }
2351 pgrp_unlock(pgrp);
2352 if (dropref != 0)
2353 pg_rele(pgrp);
2354 }
2355 kfree(pid_list, alloc_count);
2356 return(0);
2357 }
2358
2359 static void
2360 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2361 {
2362 proc_list_lock();
2363 child->p_pgrp = pgrp;
2364 child->p_pgrpid = pgrp->pg_id;
2365 child->p_listflag |= P_LIST_INPGRP;
2366 /*
2367 * When pgrp is being freed , a process can still
2368 * request addition using setpgid from bash when
2369 * login is terminated (login cycler) return ESRCH
2370 * Safe to hold lock due to refcount on pgrp
2371 */
2372 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2373 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2374 }
2375
2376 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2377 panic("pgrp_add : pgrp is dead adding process");
2378 proc_list_unlock();
2379
2380 pgrp_lock(pgrp);
2381 pgrp->pg_membercnt++;
2382 if ( parent != PROC_NULL) {
2383 LIST_INSERT_AFTER(parent, child, p_pglist);
2384 }else {
2385 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2386 }
2387 pgrp_unlock(pgrp);
2388
2389 proc_list_lock();
2390 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2391 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2392 }
2393 proc_list_unlock();
2394 }
2395
2396 static void
2397 pgrp_remove(struct proc * p)
2398 {
2399 struct pgrp * pg;
2400
2401 pg = proc_pgrp(p);
2402
2403 proc_list_lock();
2404 #if __PROC_INTERNAL_DEBUG
2405 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2406 panic("removing from pglist but no named ref\n");
2407 #endif
2408 p->p_pgrpid = PGRPID_DEAD;
2409 p->p_listflag &= ~P_LIST_INPGRP;
2410 p->p_pgrp = NULL;
2411 proc_list_unlock();
2412
2413 if (pg == PGRP_NULL)
2414 panic("pgrp_remove: pg is NULL");
2415 pgrp_lock(pg);
2416 pg->pg_membercnt--;
2417
2418 if (pg->pg_membercnt < 0)
2419 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2420
2421 LIST_REMOVE(p, p_pglist);
2422 if (pg->pg_members.lh_first == 0) {
2423 pgrp_unlock(pg);
2424 pgdelete_dropref(pg);
2425 } else {
2426 pgrp_unlock(pg);
2427 pg_rele(pg);
2428 }
2429 }
2430
2431
2432 /* cannot use proc_pgrp as it maybe stalled */
2433 static void
2434 pgrp_replace(struct proc * p, struct pgrp * newpg)
2435 {
2436 struct pgrp * oldpg;
2437
2438
2439
2440 proc_list_lock();
2441
2442 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2443 p->p_listflag |= P_LIST_PGRPTRWAIT;
2444 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2445 }
2446
2447 p->p_listflag |= P_LIST_PGRPTRANS;
2448
2449 oldpg = p->p_pgrp;
2450 if (oldpg == PGRP_NULL)
2451 panic("pgrp_replace: oldpg NULL");
2452 oldpg->pg_refcount++;
2453 #if __PROC_INTERNAL_DEBUG
2454 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2455 panic("removing from pglist but no named ref\n");
2456 #endif
2457 p->p_pgrpid = PGRPID_DEAD;
2458 p->p_listflag &= ~P_LIST_INPGRP;
2459 p->p_pgrp = NULL;
2460
2461 proc_list_unlock();
2462
2463 pgrp_lock(oldpg);
2464 oldpg->pg_membercnt--;
2465 if (oldpg->pg_membercnt < 0)
2466 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2467 LIST_REMOVE(p, p_pglist);
2468 if (oldpg->pg_members.lh_first == 0) {
2469 pgrp_unlock(oldpg);
2470 pgdelete_dropref(oldpg);
2471 } else {
2472 pgrp_unlock(oldpg);
2473 pg_rele(oldpg);
2474 }
2475
2476 proc_list_lock();
2477 p->p_pgrp = newpg;
2478 p->p_pgrpid = newpg->pg_id;
2479 p->p_listflag |= P_LIST_INPGRP;
2480 /*
2481 * When pgrp is being freed , a process can still
2482 * request addition using setpgid from bash when
2483 * login is terminated (login cycler) return ESRCH
2484 * Safe to hold lock due to refcount on pgrp
2485 */
2486 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2487 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2488 }
2489
2490 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2491 panic("pgrp_add : pgrp is dead adding process");
2492 proc_list_unlock();
2493
2494 pgrp_lock(newpg);
2495 newpg->pg_membercnt++;
2496 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2497 pgrp_unlock(newpg);
2498
2499 proc_list_lock();
2500 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2501 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2502 }
2503
2504 p->p_listflag &= ~P_LIST_PGRPTRANS;
2505 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2506 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2507 wakeup(&p->p_pgrpid);
2508
2509 }
2510 proc_list_unlock();
2511 }
2512
2513 void
2514 pgrp_lock(struct pgrp * pgrp)
2515 {
2516 lck_mtx_lock(&pgrp->pg_mlock);
2517 }
2518
2519 void
2520 pgrp_unlock(struct pgrp * pgrp)
2521 {
2522 lck_mtx_unlock(&pgrp->pg_mlock);
2523 }
2524
2525 void
2526 session_lock(struct session * sess)
2527 {
2528 lck_mtx_lock(&sess->s_mlock);
2529 }
2530
2531
2532 void
2533 session_unlock(struct session * sess)
2534 {
2535 lck_mtx_unlock(&sess->s_mlock);
2536 }
2537
2538 struct pgrp *
2539 proc_pgrp(proc_t p)
2540 {
2541 struct pgrp * pgrp;
2542
2543 if (p == PROC_NULL)
2544 return(PGRP_NULL);
2545 proc_list_lock();
2546
2547 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2548 p->p_listflag |= P_LIST_PGRPTRWAIT;
2549 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2550 }
2551
2552 pgrp = p->p_pgrp;
2553
2554 assert(pgrp != NULL);
2555
2556 if (pgrp != PGRP_NULL) {
2557 pgrp->pg_refcount++;
2558 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2559 panic("proc_pgrp: ref being povided for dead pgrp");
2560 }
2561
2562 proc_list_unlock();
2563
2564 return(pgrp);
2565 }
2566
2567 struct pgrp *
2568 tty_pgrp(struct tty * tp)
2569 {
2570 struct pgrp * pg = PGRP_NULL;
2571
2572 proc_list_lock();
2573 pg = tp->t_pgrp;
2574
2575 if (pg != PGRP_NULL) {
2576 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2577 panic("tty_pgrp: ref being povided for dead pgrp");
2578 pg->pg_refcount++;
2579 }
2580 proc_list_unlock();
2581
2582 return(pg);
2583 }
2584
2585 struct session *
2586 proc_session(proc_t p)
2587 {
2588 struct session * sess = SESSION_NULL;
2589
2590 if (p == PROC_NULL)
2591 return(SESSION_NULL);
2592
2593 proc_list_lock();
2594
2595 /* wait during transitions */
2596 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2597 p->p_listflag |= P_LIST_PGRPTRWAIT;
2598 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2599 }
2600
2601 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2602 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2603 panic("proc_session:returning sesssion ref on terminating session");
2604 sess->s_count++;
2605 }
2606 proc_list_unlock();
2607 return(sess);
2608 }
2609
2610 void
2611 session_rele(struct session *sess)
2612 {
2613 proc_list_lock();
2614 if (--sess->s_count == 0) {
2615 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2616 panic("session_rele: terminating already terminated session");
2617 sess->s_listflags |= S_LIST_TERM;
2618 LIST_REMOVE(sess, s_hash);
2619 sess->s_listflags |= S_LIST_DEAD;
2620 if (sess->s_count != 0)
2621 panic("session_rele: freeing session in use");
2622 proc_list_unlock();
2623 #if CONFIG_FINE_LOCK_GROUPS
2624 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2625 #else
2626 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2627 #endif
2628 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2629 } else
2630 proc_list_unlock();
2631 }
2632
2633 int
2634 proc_transstart(proc_t p, int locked, int non_blocking)
2635 {
2636 if (locked == 0)
2637 proc_lock(p);
2638 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2639 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2640 if (locked == 0)
2641 proc_unlock(p);
2642 return EDEADLK;
2643 }
2644 p->p_lflag |= P_LTRANSWAIT;
2645 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2646 }
2647 p->p_lflag |= P_LINTRANSIT;
2648 p->p_transholder = current_thread();
2649 if (locked == 0)
2650 proc_unlock(p);
2651 return 0;
2652 }
2653
2654 void
2655 proc_transcommit(proc_t p, int locked)
2656 {
2657 if (locked == 0)
2658 proc_lock(p);
2659
2660 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2661 assert (p->p_transholder == current_thread());
2662 p->p_lflag |= P_LTRANSCOMMIT;
2663
2664 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2665 p->p_lflag &= ~P_LTRANSWAIT;
2666 wakeup(&p->p_lflag);
2667 }
2668 if (locked == 0)
2669 proc_unlock(p);
2670 }
2671
2672 void
2673 proc_transend(proc_t p, int locked)
2674 {
2675 if (locked == 0)
2676 proc_lock(p);
2677
2678 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2679 p->p_transholder = NULL;
2680
2681 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2682 p->p_lflag &= ~P_LTRANSWAIT;
2683 wakeup(&p->p_lflag);
2684 }
2685 if (locked == 0)
2686 proc_unlock(p);
2687 }
2688
2689 int
2690 proc_transwait(proc_t p, int locked)
2691 {
2692 if (locked == 0)
2693 proc_lock(p);
2694 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2695 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2696 if (locked == 0)
2697 proc_unlock(p);
2698 return EDEADLK;
2699 }
2700 p->p_lflag |= P_LTRANSWAIT;
2701 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2702 }
2703 if (locked == 0)
2704 proc_unlock(p);
2705 return 0;
2706 }
2707
2708 void
2709 proc_klist_lock(void)
2710 {
2711 lck_mtx_lock(proc_klist_mlock);
2712 }
2713
2714 void
2715 proc_klist_unlock(void)
2716 {
2717 lck_mtx_unlock(proc_klist_mlock);
2718 }
2719
2720 void
2721 proc_knote(struct proc * p, long hint)
2722 {
2723 proc_klist_lock();
2724 KNOTE(&p->p_klist, hint);
2725 proc_klist_unlock();
2726 }
2727
2728 void
2729 proc_knote_drain(struct proc *p)
2730 {
2731 struct knote *kn = NULL;
2732
2733 /*
2734 * Clear the proc's klist to avoid references after the proc is reaped.
2735 */
2736 proc_klist_lock();
2737 while ((kn = SLIST_FIRST(&p->p_klist))) {
2738 kn->kn_ptr.p_proc = PROC_NULL;
2739 KNOTE_DETACH(&p->p_klist, kn);
2740 }
2741 proc_klist_unlock();
2742 }
2743
2744 void
2745 proc_setregister(proc_t p)
2746 {
2747 proc_lock(p);
2748 p->p_lflag |= P_LREGISTER;
2749 proc_unlock(p);
2750 }
2751
2752 void
2753 proc_resetregister(proc_t p)
2754 {
2755 proc_lock(p);
2756 p->p_lflag &= ~P_LREGISTER;
2757 proc_unlock(p);
2758 }
2759
2760 pid_t
2761 proc_pgrpid(proc_t p)
2762 {
2763 return p->p_pgrpid;
2764 }
2765
2766 pid_t
2767 proc_selfpgrpid()
2768 {
2769 return current_proc()->p_pgrpid;
2770 }
2771
2772
2773 /* return control and action states */
2774 int
2775 proc_getpcontrol(int pid, int * pcontrolp)
2776 {
2777 proc_t p;
2778
2779 p = proc_find(pid);
2780 if (p == PROC_NULL)
2781 return(ESRCH);
2782 if (pcontrolp != NULL)
2783 *pcontrolp = p->p_pcaction;
2784
2785 proc_rele(p);
2786 return(0);
2787 }
2788
2789 int
2790 proc_dopcontrol(proc_t p)
2791 {
2792 int pcontrol;
2793
2794 proc_lock(p);
2795
2796 pcontrol = PROC_CONTROL_STATE(p);
2797
2798 if (PROC_ACTION_STATE(p) == 0) {
2799 switch(pcontrol) {
2800 case P_PCTHROTTLE:
2801 PROC_SETACTION_STATE(p);
2802 proc_unlock(p);
2803 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2804 break;
2805
2806 case P_PCSUSP:
2807 PROC_SETACTION_STATE(p);
2808 proc_unlock(p);
2809 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2810 task_suspend(p->task);
2811 break;
2812
2813 case P_PCKILL:
2814 PROC_SETACTION_STATE(p);
2815 proc_unlock(p);
2816 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2817 psignal(p, SIGKILL);
2818 break;
2819
2820 default:
2821 proc_unlock(p);
2822 }
2823
2824 } else
2825 proc_unlock(p);
2826
2827 return(PROC_RETURNED);
2828 }
2829
2830
2831 /*
2832 * Resume a throttled or suspended process. This is an internal interface that's only
2833 * used by the user level code that presents the GUI when we run out of swap space and
2834 * hence is restricted to processes with superuser privileges.
2835 */
2836
2837 int
2838 proc_resetpcontrol(int pid)
2839 {
2840 proc_t p;
2841 int pcontrol;
2842 int error;
2843 proc_t self = current_proc();
2844
2845 /* if the process has been validated to handle resource control or root is valid one */
2846 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
2847 return error;
2848
2849 p = proc_find(pid);
2850 if (p == PROC_NULL)
2851 return(ESRCH);
2852
2853 proc_lock(p);
2854
2855 pcontrol = PROC_CONTROL_STATE(p);
2856
2857 if(PROC_ACTION_STATE(p) !=0) {
2858 switch(pcontrol) {
2859 case P_PCTHROTTLE:
2860 PROC_RESETACTION_STATE(p);
2861 proc_unlock(p);
2862 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
2863 break;
2864
2865 case P_PCSUSP:
2866 PROC_RESETACTION_STATE(p);
2867 proc_unlock(p);
2868 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
2869 task_resume(p->task);
2870 break;
2871
2872 case P_PCKILL:
2873 /* Huh? */
2874 PROC_SETACTION_STATE(p);
2875 proc_unlock(p);
2876 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
2877 break;
2878
2879 default:
2880 proc_unlock(p);
2881 }
2882
2883 } else
2884 proc_unlock(p);
2885
2886 proc_rele(p);
2887 return(0);
2888 }
2889
2890
2891
2892 struct no_paging_space
2893 {
2894 uint64_t pcs_max_size;
2895 uint64_t pcs_uniqueid;
2896 int pcs_pid;
2897 int pcs_proc_count;
2898 uint64_t pcs_total_size;
2899
2900 uint64_t npcs_max_size;
2901 uint64_t npcs_uniqueid;
2902 int npcs_pid;
2903 int npcs_proc_count;
2904 uint64_t npcs_total_size;
2905
2906 int apcs_proc_count;
2907 uint64_t apcs_total_size;
2908 };
2909
2910
2911 static int
2912 proc_pcontrol_filter(proc_t p, void *arg)
2913 {
2914 struct no_paging_space *nps;
2915 uint64_t compressed;
2916
2917 nps = (struct no_paging_space *)arg;
2918
2919 compressed = get_task_compressed(p->task);
2920
2921 if (PROC_CONTROL_STATE(p)) {
2922 if (PROC_ACTION_STATE(p) == 0) {
2923 if (compressed > nps->pcs_max_size) {
2924 nps->pcs_pid = p->p_pid;
2925 nps->pcs_uniqueid = p->p_uniqueid;
2926 nps->pcs_max_size = compressed;
2927 }
2928 nps->pcs_total_size += compressed;
2929 nps->pcs_proc_count++;
2930 } else {
2931 nps->apcs_total_size += compressed;
2932 nps->apcs_proc_count++;
2933 }
2934 } else {
2935 if (compressed > nps->npcs_max_size) {
2936 nps->npcs_pid = p->p_pid;
2937 nps->npcs_uniqueid = p->p_uniqueid;
2938 nps->npcs_max_size = compressed;
2939 }
2940 nps->npcs_total_size += compressed;
2941 nps->npcs_proc_count++;
2942
2943 }
2944 return (0);
2945 }
2946
2947
2948 static int
2949 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
2950 {
2951 return(PROC_RETURNED);
2952 }
2953
2954
2955 /*
2956 * Deal with the low on compressor pool space condition... this function
2957 * gets called when we are approaching the limits of the compressor pool or
2958 * we are unable to create a new swap file.
2959 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
2960 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
2961 * There are 2 categories of processes to deal with. Those that have an action
2962 * associated with them by the task itself and those that do not. Actionable
2963 * tasks can have one of three categories specified: ones that
2964 * can be killed immediately, ones that should be suspended, and ones that should
2965 * be throttled. Processes that do not have an action associated with them are normally
2966 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
2967 * that only by killing them can we hope to put the system back into a usable state.
2968 */
2969
2970 #define NO_PAGING_SPACE_DEBUG 0
2971
2972 extern uint64_t vm_compressor_pages_compressed(void);
2973
2974 struct timeval last_no_space_action = {0, 0};
2975
2976 int
2977 no_paging_space_action()
2978 {
2979 proc_t p;
2980 struct no_paging_space nps;
2981 struct timeval now;
2982
2983 /*
2984 * Throttle how often we come through here. Once every 5 seconds should be plenty.
2985 */
2986 microtime(&now);
2987
2988 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
2989 return (0);
2990
2991 /*
2992 * Examine all processes and find the biggest (biggest is based on the number of pages this
2993 * task has in the compressor pool) that has been marked to have some action
2994 * taken when swap space runs out... we also find the biggest that hasn't been marked for
2995 * action.
2996 *
2997 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
2998 * the total number of pages held by the compressor, we go ahead and kill it since no other task
2999 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3000 */
3001 bzero(&nps, sizeof(nps));
3002
3003 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3004
3005 #if NO_PAGING_SPACE_DEBUG
3006 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3007 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3008 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3009 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3010 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3011 nps.apcs_proc_count, nps.apcs_total_size);
3012 #endif
3013 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3014 /*
3015 * for now we'll knock out any task that has more then 50% of the pages
3016 * held by the compressor
3017 */
3018 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3019
3020 if (nps.npcs_uniqueid == p->p_uniqueid) {
3021 /*
3022 * verify this is still the same process
3023 * in case the proc exited and the pid got reused while
3024 * we were finishing the proc_iterate and getting to this point
3025 */
3026 last_no_space_action = now;
3027
3028 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3029 psignal(p, SIGKILL);
3030
3031 proc_rele(p);
3032
3033 return (0);
3034 }
3035
3036 proc_rele(p);
3037 }
3038 }
3039
3040 if (nps.pcs_max_size > 0) {
3041 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3042
3043 if (nps.pcs_uniqueid == p->p_uniqueid) {
3044 /*
3045 * verify this is still the same process
3046 * in case the proc exited and the pid got reused while
3047 * we were finishing the proc_iterate and getting to this point
3048 */
3049 last_no_space_action = now;
3050
3051 proc_dopcontrol(p);
3052
3053 proc_rele(p);
3054
3055 return (1);
3056 }
3057
3058 proc_rele(p);
3059 }
3060 }
3061 last_no_space_action = now;
3062
3063 printf("low swap: unable to find any eligible processes to take action on\n");
3064
3065 return (0);
3066 }
3067
3068 int
3069 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3070 {
3071 int ret = 0;
3072 proc_t target_proc = PROC_NULL;
3073 pid_t target_pid = uap->pid;
3074 uint64_t target_uniqueid = uap->uniqueid;
3075 task_t target_task = NULL;
3076
3077 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3078 ret = EPERM;
3079 goto out;
3080 }
3081 target_proc = proc_find(target_pid);
3082 if (target_proc != PROC_NULL) {
3083 if (target_uniqueid != proc_uniqueid(target_proc)) {
3084 ret = ENOENT;
3085 goto out;
3086 }
3087
3088 target_task = proc_task(target_proc);
3089 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3090 ret = EINVAL;
3091 goto out;
3092 }
3093 } else
3094 ret = ENOENT;
3095
3096 out:
3097 if (target_proc != PROC_NULL)
3098 proc_rele(target_proc);
3099 return (ret);
3100 }
3101
3102 #if VM_SCAN_FOR_SHADOW_CHAIN
3103 extern int vm_map_shadow_max(vm_map_t map);
3104 int proc_shadow_max(void);
3105 int proc_shadow_max(void)
3106 {
3107 int retval, max;
3108 proc_t p;
3109 task_t task;
3110 vm_map_t map;
3111
3112 max = 0;
3113 proc_list_lock();
3114 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3115 if (p->p_stat == SIDL)
3116 continue;
3117 task = p->task;
3118 if (task == NULL) {
3119 continue;
3120 }
3121 map = get_task_map(task);
3122 if (map == NULL) {
3123 continue;
3124 }
3125 retval = vm_map_shadow_max(map);
3126 if (retval > max) {
3127 max = retval;
3128 }
3129 }
3130 proc_list_unlock();
3131 return max;
3132 }
3133 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3134
3135 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3136 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3137 {
3138 if (target_proc != NULL) {
3139 target_proc->p_responsible_pid = responsible_pid;
3140 }
3141 return;
3142 }
3143
3144 int
3145 proc_chrooted(proc_t p)
3146 {
3147 int retval = 0;
3148
3149 if (p) {
3150 proc_fdlock(p);
3151 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3152 proc_fdunlock(p);
3153 }
3154
3155 return retval;
3156 }