]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
0d719cb4d84fe5366d0e59393f67ff5887e3755a
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114
115 #if CONFIG_MEMORYSTATUS
116 #include <sys/kern_memorystatus.h>
117 #endif
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #include <libkern/crypto/sha1.h>
124
125 /*
126 * Structure associated with user cacheing.
127 */
128 struct uidinfo {
129 LIST_ENTRY(uidinfo) ui_hash;
130 uid_t ui_uid;
131 long ui_proccnt;
132 };
133 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
134 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
135 u_long uihash; /* size of hash table - 1 */
136
137 /*
138 * Other process lists
139 */
140 struct pidhashhead *pidhashtbl;
141 u_long pidhash;
142 struct pgrphashhead *pgrphashtbl;
143 u_long pgrphash;
144 struct sesshashhead *sesshashtbl;
145 u_long sesshash;
146
147 struct proclist allproc;
148 struct proclist zombproc;
149 extern struct tty cons;
150
151 extern int cs_debug;
152
153 #if DEBUG
154 #define __PROC_INTERNAL_DEBUG 1
155 #endif
156 #if CONFIG_COREDUMP
157 /* Name to give to core files */
158 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
159 #endif
160
161 #if PROC_REF_DEBUG
162 #include <kern/backtrace.h>
163 #endif
164
165 static void orphanpg(struct pgrp * pg);
166 void proc_name_kdp(task_t t, char * buf, int size);
167 void * proc_get_uthread_uu_threadlist(void * uthread_v);
168 int proc_threadname_kdp(void * uth, char * buf, size_t size);
169 void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime);
170 char * proc_name_address(void * p);
171
172 /* TODO: make a header that's exported and usable in osfmk */
173 char* proc_best_name(proc_t p);
174
175 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
176 static void pgrp_remove(proc_t p);
177 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
178 static void pgdelete_dropref(struct pgrp *pgrp);
179 extern void pg_rele_dropref(struct pgrp * pgrp);
180 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
181 static boolean_t proc_parent_is_currentproc(proc_t p);
182
183 struct fixjob_iterargs {
184 struct pgrp * pg;
185 struct session * mysession;
186 int entering;
187 };
188
189 int fixjob_callback(proc_t, void *);
190
191 uint64_t get_current_unique_pid(void);
192
193
194 uint64_t
195 get_current_unique_pid(void)
196 {
197 proc_t p = current_proc();
198
199 if (p)
200 return p->p_uniqueid;
201 else
202 return 0;
203 }
204
205 /*
206 * Initialize global process hashing structures.
207 */
208 void
209 procinit(void)
210 {
211 LIST_INIT(&allproc);
212 LIST_INIT(&zombproc);
213 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
214 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
215 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
216 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
217 #if CONFIG_PERSONAS
218 personas_bootstrap();
219 #endif
220 }
221
222 /*
223 * Change the count associated with number of processes
224 * a given user is using. This routine protects the uihash
225 * with the list lock
226 */
227 int
228 chgproccnt(uid_t uid, int diff)
229 {
230 struct uidinfo *uip;
231 struct uidinfo *newuip = NULL;
232 struct uihashhead *uipp;
233 int retval;
234
235 again:
236 proc_list_lock();
237 uipp = UIHASH(uid);
238 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
239 if (uip->ui_uid == uid)
240 break;
241 if (uip) {
242 uip->ui_proccnt += diff;
243 if (uip->ui_proccnt > 0) {
244 retval = uip->ui_proccnt;
245 proc_list_unlock();
246 goto out;
247 }
248 if (uip->ui_proccnt < 0)
249 panic("chgproccnt: procs < 0");
250 LIST_REMOVE(uip, ui_hash);
251 retval = 0;
252 proc_list_unlock();
253 FREE_ZONE(uip, sizeof(*uip), M_PROC);
254 goto out;
255 }
256 if (diff <= 0) {
257 if (diff == 0) {
258 retval = 0;
259 proc_list_unlock();
260 goto out;
261 }
262 panic("chgproccnt: lost user");
263 }
264 if (newuip != NULL) {
265 uip = newuip;
266 newuip = NULL;
267 LIST_INSERT_HEAD(uipp, uip, ui_hash);
268 uip->ui_uid = uid;
269 uip->ui_proccnt = diff;
270 retval = diff;
271 proc_list_unlock();
272 goto out;
273 }
274 proc_list_unlock();
275 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
276 if (newuip == NULL)
277 panic("chgproccnt: M_PROC zone depleted");
278 goto again;
279 out:
280 if (newuip != NULL)
281 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
282 return(retval);
283 }
284
285 /*
286 * Is p an inferior of the current process?
287 */
288 int
289 inferior(proc_t p)
290 {
291 int retval = 0;
292
293 proc_list_lock();
294 for (; p != current_proc(); p = p->p_pptr)
295 if (p->p_pid == 0)
296 goto out;
297 retval = 1;
298 out:
299 proc_list_unlock();
300 return(retval);
301 }
302
303 /*
304 * Is p an inferior of t ?
305 */
306 int
307 isinferior(proc_t p, proc_t t)
308 {
309 int retval = 0;
310 int nchecked = 0;
311 proc_t start = p;
312
313 /* if p==t they are not inferior */
314 if (p == t)
315 return(0);
316
317 proc_list_lock();
318 for (; p != t; p = p->p_pptr) {
319 nchecked++;
320
321 /* Detect here if we're in a cycle */
322 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
323 goto out;
324 }
325 retval = 1;
326 out:
327 proc_list_unlock();
328 return(retval);
329 }
330
331 int
332 proc_isinferior(int pid1, int pid2)
333 {
334 proc_t p = PROC_NULL;
335 proc_t t = PROC_NULL;
336 int retval = 0;
337
338 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
339 retval = isinferior(p, t);
340
341 if (p != PROC_NULL)
342 proc_rele(p);
343 if (t != PROC_NULL)
344 proc_rele(t);
345
346 return(retval);
347 }
348
349 proc_t
350 proc_find(int pid)
351 {
352 return(proc_findinternal(pid, 0));
353 }
354
355 proc_t
356 proc_findinternal(int pid, int locked)
357 {
358 proc_t p = PROC_NULL;
359
360 if (locked == 0) {
361 proc_list_lock();
362 }
363
364 p = pfind_locked(pid);
365 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
366 p = PROC_NULL;
367
368 if (locked == 0) {
369 proc_list_unlock();
370 }
371
372 return(p);
373 }
374
375 proc_t
376 proc_findthread(thread_t thread)
377 {
378 proc_t p = PROC_NULL;
379 struct uthread *uth;
380
381 proc_list_lock();
382 uth = get_bsdthread_info(thread);
383 if (uth && (uth->uu_flag & UT_VFORK))
384 p = uth->uu_proc;
385 else
386 p = (proc_t)(get_bsdthreadtask_info(thread));
387 p = proc_ref_locked(p);
388 proc_list_unlock();
389 return(p);
390 }
391
392 #if PROC_REF_DEBUG
393 void
394 uthread_reset_proc_refcount(void *uthread) {
395 uthread_t uth;
396
397 if (proc_ref_tracking_disabled) {
398 return;
399 }
400
401 uth = (uthread_t) uthread;
402
403 uth->uu_proc_refcount = 0;
404 uth->uu_pindex = 0;
405 }
406
407 int
408 uthread_get_proc_refcount(void *uthread) {
409 uthread_t uth;
410
411 if (proc_ref_tracking_disabled) {
412 return 0;
413 }
414
415 uth = (uthread_t) uthread;
416
417 return uth->uu_proc_refcount;
418 }
419
420 static void
421 record_procref(proc_t p, int count) {
422 uthread_t uth;
423
424 if (proc_ref_tracking_disabled) {
425 return;
426 }
427
428 uth = current_uthread();
429 uth->uu_proc_refcount += count;
430
431 if (count == 1) {
432 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
433 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
434
435 uth->uu_proc_ps[uth->uu_pindex] = p;
436 uth->uu_pindex++;
437 }
438 }
439 }
440 #endif
441
442 int
443 proc_rele(proc_t p)
444 {
445 proc_list_lock();
446 proc_rele_locked(p);
447 proc_list_unlock();
448
449 return(0);
450 }
451
452 proc_t
453 proc_self(void)
454 {
455 struct proc * p;
456
457 p = current_proc();
458
459 proc_list_lock();
460 if (p != proc_ref_locked(p))
461 p = PROC_NULL;
462 proc_list_unlock();
463 return(p);
464 }
465
466
467 proc_t
468 proc_ref_locked(proc_t p)
469 {
470 proc_t p1 = p;
471
472 /* if process still in creation return failure */
473 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
474 return (PROC_NULL);
475 /* do not return process marked for termination */
476 if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0)) {
477 p->p_refcount++;
478 #if PROC_REF_DEBUG
479 record_procref(p, 1);
480 #endif
481 }
482 else
483 p1 = PROC_NULL;
484
485 return(p1);
486 }
487
488 void
489 proc_rele_locked(proc_t p)
490 {
491
492 if (p->p_refcount > 0) {
493 p->p_refcount--;
494 #if PROC_REF_DEBUG
495 record_procref(p, -1);
496 #endif
497 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
498 p->p_listflag &= ~P_LIST_DRAINWAIT;
499 wakeup(&p->p_refcount);
500 }
501 } else
502 panic("proc_rele_locked -ve ref\n");
503
504 }
505
506 proc_t
507 proc_find_zombref(int pid)
508 {
509 proc_t p;
510
511 proc_list_lock();
512
513 again:
514 p = pfind_locked(pid);
515
516 /* should we bail? */
517 if ((p == PROC_NULL) /* not found */
518 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
519 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
520
521 proc_list_unlock();
522 return (PROC_NULL);
523 }
524
525 /* If someone else is controlling the (unreaped) zombie - wait */
526 if ((p->p_listflag & P_LIST_WAITING) != 0) {
527 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
528 goto again;
529 }
530 p->p_listflag |= P_LIST_WAITING;
531
532 proc_list_unlock();
533
534 return(p);
535 }
536
537 void
538 proc_drop_zombref(proc_t p)
539 {
540 proc_list_lock();
541 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
542 p->p_listflag &= ~P_LIST_WAITING;
543 wakeup(&p->p_stat);
544 }
545 proc_list_unlock();
546 }
547
548
549 void
550 proc_refdrain(proc_t p)
551 {
552
553 proc_list_lock();
554
555 p->p_listflag |= P_LIST_DRAIN;
556 while (p->p_refcount) {
557 p->p_listflag |= P_LIST_DRAINWAIT;
558 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
559 }
560 p->p_listflag &= ~P_LIST_DRAIN;
561 p->p_listflag |= P_LIST_DEAD;
562
563 proc_list_unlock();
564
565
566 }
567
568 proc_t
569 proc_parentholdref(proc_t p)
570 {
571 proc_t parent = PROC_NULL;
572 proc_t pp;
573 int loopcnt = 0;
574
575
576 proc_list_lock();
577 loop:
578 pp = p->p_pptr;
579 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
580 parent = PROC_NULL;
581 goto out;
582 }
583
584 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
585 pp->p_listflag |= P_LIST_CHILDDRWAIT;
586 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
587 loopcnt++;
588 if (loopcnt == 5) {
589 parent = PROC_NULL;
590 goto out;
591 }
592 goto loop;
593 }
594
595 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
596 pp->p_parentref++;
597 parent = pp;
598 goto out;
599 }
600
601 out:
602 proc_list_unlock();
603 return(parent);
604 }
605 int
606 proc_parentdropref(proc_t p, int listlocked)
607 {
608 if (listlocked == 0)
609 proc_list_lock();
610
611 if (p->p_parentref > 0) {
612 p->p_parentref--;
613 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
614 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
615 wakeup(&p->p_parentref);
616 }
617 } else
618 panic("proc_parentdropref -ve ref\n");
619 if (listlocked == 0)
620 proc_list_unlock();
621
622 return(0);
623 }
624
625 void
626 proc_childdrainstart(proc_t p)
627 {
628 #if __PROC_INTERNAL_DEBUG
629 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
630 panic("proc_childdrainstart: childdrain already started\n");
631 #endif
632 p->p_listflag |= P_LIST_CHILDDRSTART;
633 /* wait for all that hold parentrefs to drop */
634 while (p->p_parentref > 0) {
635 p->p_listflag |= P_LIST_PARENTREFWAIT;
636 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
637 }
638 }
639
640
641 void
642 proc_childdrainend(proc_t p)
643 {
644 #if __PROC_INTERNAL_DEBUG
645 if (p->p_childrencnt > 0)
646 panic("exiting: children stil hanging around\n");
647 #endif
648 p->p_listflag |= P_LIST_CHILDDRAINED;
649 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
650 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
651 wakeup(&p->p_childrencnt);
652 }
653 }
654
655 void
656 proc_checkdeadrefs(__unused proc_t p)
657 {
658 #if __PROC_INTERNAL_DEBUG
659 if ((p->p_listflag & P_LIST_INHASH) != 0)
660 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
661 if (p->p_childrencnt != 0)
662 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
663 if (p->p_refcount != 0)
664 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
665 if (p->p_parentref != 0)
666 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
667 #endif
668 }
669
670 int
671 proc_pid(proc_t p)
672 {
673 if (p != NULL)
674 return (p->p_pid);
675 return -1;
676 }
677
678 int
679 proc_ppid(proc_t p)
680 {
681 if (p != NULL)
682 return (p->p_ppid);
683 return -1;
684 }
685
686 int
687 proc_selfpid(void)
688 {
689 return (current_proc()->p_pid);
690 }
691
692 int
693 proc_selfppid(void)
694 {
695 return (current_proc()->p_ppid);
696 }
697
698 int
699 proc_selfcsflags(void)
700 {
701 return (current_proc()->p_csflags);
702 }
703
704 #if CONFIG_DTRACE
705 static proc_t
706 dtrace_current_proc_vforking(void)
707 {
708 thread_t th = current_thread();
709 struct uthread *ut = get_bsdthread_info(th);
710
711 if (ut &&
712 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
713 /*
714 * Handle the narrow window where we're in the vfork syscall,
715 * but we're not quite ready to claim (in particular, to DTrace)
716 * that we're running as the child.
717 */
718 return (get_bsdtask_info(get_threadtask(th)));
719 }
720 return (current_proc());
721 }
722
723 int
724 dtrace_proc_selfpid(void)
725 {
726 return (dtrace_current_proc_vforking()->p_pid);
727 }
728
729 int
730 dtrace_proc_selfppid(void)
731 {
732 return (dtrace_current_proc_vforking()->p_ppid);
733 }
734
735 uid_t
736 dtrace_proc_selfruid(void)
737 {
738 return (dtrace_current_proc_vforking()->p_ruid);
739 }
740 #endif /* CONFIG_DTRACE */
741
742 proc_t
743 proc_parent(proc_t p)
744 {
745 proc_t parent;
746 proc_t pp;
747
748 proc_list_lock();
749 loop:
750 pp = p->p_pptr;
751 parent = proc_ref_locked(pp);
752 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
753 pp->p_listflag |= P_LIST_CHILDLKWAIT;
754 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
755 goto loop;
756 }
757 proc_list_unlock();
758 return(parent);
759 }
760
761 static boolean_t
762 proc_parent_is_currentproc(proc_t p)
763 {
764 boolean_t ret = FALSE;
765
766 proc_list_lock();
767 if (p->p_pptr == current_proc())
768 ret = TRUE;
769
770 proc_list_unlock();
771 return ret;
772 }
773
774 void
775 proc_name(int pid, char * buf, int size)
776 {
777 proc_t p;
778
779 if ((p = proc_find(pid)) != PROC_NULL) {
780 strlcpy(buf, &p->p_comm[0], size);
781 proc_rele(p);
782 }
783 }
784
785 void
786 proc_name_kdp(task_t t, char * buf, int size)
787 {
788 proc_t p = get_bsdtask_info(t);
789 if (p == PROC_NULL)
790 return;
791
792 if ((size_t)size > sizeof(p->p_comm))
793 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
794 else
795 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
796 }
797
798 int
799 proc_threadname_kdp(void * uth, char * buf, size_t size)
800 {
801 if (size < MAXTHREADNAMESIZE) {
802 /* this is really just a protective measure for the future in
803 * case the thread name size in stackshot gets out of sync with
804 * the BSD max thread name size. Note that bsd_getthreadname
805 * doesn't take input buffer size into account. */
806 return -1;
807 }
808
809 if (uth != NULL) {
810 bsd_getthreadname(uth, buf);
811 }
812 return 0;
813 }
814
815 /* note that this function is generally going to be called from stackshot,
816 * and the arguments will be coming from a struct which is declared packed
817 * thus the input arguments will in general be unaligned. We have to handle
818 * that here. */
819 void
820 proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec, uint64_t *abstime)
821 {
822 proc_t pp = (proc_t)p;
823 struct uint64p {
824 uint64_t val;
825 } __attribute__((packed));
826
827 if (pp != PROC_NULL) {
828 if (tv_sec != NULL)
829 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
830 if (tv_usec != NULL)
831 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
832 if (abstime != NULL) {
833 if (pp->p_stats != NULL)
834 *abstime = pp->p_stats->ps_start;
835 else
836 *abstime = 0;
837 }
838 }
839 }
840
841 char *
842 proc_name_address(void *p)
843 {
844 return &((proc_t)p)->p_comm[0];
845 }
846
847 char *
848 proc_best_name(proc_t p)
849 {
850 if (p->p_name[0] != 0)
851 return (&p->p_name[0]);
852 return (&p->p_comm[0]);
853 }
854
855 void
856 proc_selfname(char * buf, int size)
857 {
858 proc_t p;
859
860 if ((p = current_proc())!= (proc_t)0) {
861 strlcpy(buf, &p->p_comm[0], size);
862 }
863 }
864
865 void
866 proc_signal(int pid, int signum)
867 {
868 proc_t p;
869
870 if ((p = proc_find(pid)) != PROC_NULL) {
871 psignal(p, signum);
872 proc_rele(p);
873 }
874 }
875
876 int
877 proc_issignal(int pid, sigset_t mask)
878 {
879 proc_t p;
880 int error=0;
881
882 if ((p = proc_find(pid)) != PROC_NULL) {
883 error = proc_pendingsignals(p, mask);
884 proc_rele(p);
885 }
886
887 return(error);
888 }
889
890 int
891 proc_noremotehang(proc_t p)
892 {
893 int retval = 0;
894
895 if (p)
896 retval = p->p_flag & P_NOREMOTEHANG;
897 return(retval? 1: 0);
898
899 }
900
901 int
902 proc_exiting(proc_t p)
903 {
904 int retval = 0;
905
906 if (p)
907 retval = p->p_lflag & P_LEXIT;
908 return(retval? 1: 0);
909 }
910
911 int
912 proc_forcequota(proc_t p)
913 {
914 int retval = 0;
915
916 if (p)
917 retval = p->p_flag & P_FORCEQUOTA;
918 return(retval? 1: 0);
919
920 }
921
922 int
923 proc_suser(proc_t p)
924 {
925 kauth_cred_t my_cred;
926 int error;
927
928 my_cred = kauth_cred_proc_ref(p);
929 error = suser(my_cred, &p->p_acflag);
930 kauth_cred_unref(&my_cred);
931 return(error);
932 }
933
934 task_t
935 proc_task(proc_t proc)
936 {
937 return (task_t)proc->task;
938 }
939
940 /*
941 * Obtain the first thread in a process
942 *
943 * XXX This is a bad thing to do; it exists predominantly to support the
944 * XXX use of proc_t's in places that should really be using
945 * XXX thread_t's instead. This maintains historical behaviour, but really
946 * XXX needs an audit of the context (proxy vs. not) to clean up.
947 */
948 thread_t
949 proc_thread(proc_t proc)
950 {
951 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
952
953 if (uth != NULL)
954 return(uth->uu_context.vc_thread);
955
956 return(NULL);
957 }
958
959 kauth_cred_t
960 proc_ucred(proc_t p)
961 {
962 return(p->p_ucred);
963 }
964
965 struct uthread *
966 current_uthread()
967 {
968 thread_t th = current_thread();
969
970 return((struct uthread *)get_bsdthread_info(th));
971 }
972
973
974 int
975 proc_is64bit(proc_t p)
976 {
977 return(IS_64BIT_PROCESS(p));
978 }
979
980 int
981 proc_pidversion(proc_t p)
982 {
983 return(p->p_idversion);
984 }
985
986 uint32_t
987 proc_persona_id(proc_t p)
988 {
989 return (uint32_t)persona_id_from_proc(p);
990 }
991
992 uint32_t
993 proc_getuid(proc_t p)
994 {
995 return(p->p_uid);
996 }
997
998 uint32_t
999 proc_getgid(proc_t p)
1000 {
1001 return(p->p_gid);
1002 }
1003
1004 uint64_t
1005 proc_uniqueid(proc_t p)
1006 {
1007 return(p->p_uniqueid);
1008 }
1009
1010 uint64_t
1011 proc_puniqueid(proc_t p)
1012 {
1013 return(p->p_puniqueid);
1014 }
1015
1016 void
1017 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1018 {
1019 #if CONFIG_COALITIONS
1020 task_coalition_ids(p->task, ids);
1021 #else
1022 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
1023 #endif
1024 return;
1025 }
1026
1027 uint64_t
1028 proc_was_throttled(proc_t p)
1029 {
1030 return (p->was_throttled);
1031 }
1032
1033 uint64_t
1034 proc_did_throttle(proc_t p)
1035 {
1036 return (p->did_throttle);
1037 }
1038
1039 int
1040 proc_getcdhash(proc_t p, unsigned char *cdhash)
1041 {
1042 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1043 }
1044
1045 void
1046 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1047 {
1048 if (size >= sizeof(p->p_uuid)) {
1049 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1050 }
1051 }
1052
1053 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1054 vnode_t
1055 proc_getexecutablevnode(proc_t p)
1056 {
1057 vnode_t tvp = p->p_textvp;
1058
1059 if ( tvp != NULLVP) {
1060 if (vnode_getwithref(tvp) == 0) {
1061 return tvp;
1062 }
1063 }
1064
1065 return NULLVP;
1066 }
1067
1068
1069 void
1070 bsd_set_dependency_capable(task_t task)
1071 {
1072 proc_t p = get_bsdtask_info(task);
1073
1074 if (p) {
1075 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1076 }
1077 }
1078
1079
1080 int
1081 IS_64BIT_PROCESS(proc_t p)
1082 {
1083 if (p && (p->p_flag & P_LP64))
1084 return(1);
1085 else
1086 return(0);
1087 }
1088
1089 /*
1090 * Locate a process by number
1091 */
1092 proc_t
1093 pfind_locked(pid_t pid)
1094 {
1095 proc_t p;
1096 #if DEBUG
1097 proc_t q;
1098 #endif
1099
1100 if (!pid)
1101 return (kernproc);
1102
1103 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1104 if (p->p_pid == pid) {
1105 #if DEBUG
1106 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1107 if ((p !=q) && (q->p_pid == pid))
1108 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1109 }
1110 #endif
1111 return (p);
1112 }
1113 }
1114 return (NULL);
1115 }
1116
1117 /*
1118 * Locate a zombie by PID
1119 */
1120 __private_extern__ proc_t
1121 pzfind(pid_t pid)
1122 {
1123 proc_t p;
1124
1125
1126 proc_list_lock();
1127
1128 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1129 if (p->p_pid == pid)
1130 break;
1131
1132 proc_list_unlock();
1133
1134 return (p);
1135 }
1136
1137 /*
1138 * Locate a process group by number
1139 */
1140
1141 struct pgrp *
1142 pgfind(pid_t pgid)
1143 {
1144 struct pgrp * pgrp;
1145
1146 proc_list_lock();
1147 pgrp = pgfind_internal(pgid);
1148 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1149 pgrp = PGRP_NULL;
1150 else
1151 pgrp->pg_refcount++;
1152 proc_list_unlock();
1153 return(pgrp);
1154 }
1155
1156
1157
1158 struct pgrp *
1159 pgfind_internal(pid_t pgid)
1160 {
1161 struct pgrp *pgrp;
1162
1163 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1164 if (pgrp->pg_id == pgid)
1165 return (pgrp);
1166 return (NULL);
1167 }
1168
1169 void
1170 pg_rele(struct pgrp * pgrp)
1171 {
1172 if(pgrp == PGRP_NULL)
1173 return;
1174 pg_rele_dropref(pgrp);
1175 }
1176
1177 void
1178 pg_rele_dropref(struct pgrp * pgrp)
1179 {
1180 proc_list_lock();
1181 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1182 proc_list_unlock();
1183 pgdelete_dropref(pgrp);
1184 return;
1185 }
1186
1187 pgrp->pg_refcount--;
1188 proc_list_unlock();
1189 }
1190
1191 struct session *
1192 session_find_internal(pid_t sessid)
1193 {
1194 struct session *sess;
1195
1196 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1197 if (sess->s_sid == sessid)
1198 return (sess);
1199 return (NULL);
1200 }
1201
1202
1203 /*
1204 * Make a new process ready to become a useful member of society by making it
1205 * visible in all the right places and initialize its own lists to empty.
1206 *
1207 * Parameters: parent The parent of the process to insert
1208 * child The child process to insert
1209 *
1210 * Returns: (void)
1211 *
1212 * Notes: Insert a child process into the parents process group, assign
1213 * the child the parent process pointer and PPID of the parent,
1214 * place it on the parents p_children list as a sibling,
1215 * initialize its own child list, place it in the allproc list,
1216 * insert it in the proper hash bucket, and initialize its
1217 * event list.
1218 */
1219 void
1220 pinsertchild(proc_t parent, proc_t child)
1221 {
1222 struct pgrp * pg;
1223
1224 LIST_INIT(&child->p_children);
1225 TAILQ_INIT(&child->p_evlist);
1226 child->p_pptr = parent;
1227 child->p_ppid = parent->p_pid;
1228 child->p_puniqueid = parent->p_uniqueid;
1229
1230 pg = proc_pgrp(parent);
1231 pgrp_add(pg, parent, child);
1232 pg_rele(pg);
1233
1234 proc_list_lock();
1235
1236 #if CONFIG_MEMORYSTATUS
1237 memorystatus_add(child, TRUE);
1238 #endif
1239
1240 parent->p_childrencnt++;
1241 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1242
1243 LIST_INSERT_HEAD(&allproc, child, p_list);
1244 /* mark the completion of proc creation */
1245 child->p_listflag &= ~P_LIST_INCREATE;
1246
1247 proc_list_unlock();
1248 }
1249
1250 /*
1251 * Move p to a new or existing process group (and session)
1252 *
1253 * Returns: 0 Success
1254 * ESRCH No such process
1255 */
1256 int
1257 enterpgrp(proc_t p, pid_t pgid, int mksess)
1258 {
1259 struct pgrp *pgrp;
1260 struct pgrp *mypgrp;
1261 struct session * procsp;
1262
1263 pgrp = pgfind(pgid);
1264 mypgrp = proc_pgrp(p);
1265 procsp = proc_session(p);
1266
1267 #if DIAGNOSTIC
1268 if (pgrp != NULL && mksess) /* firewalls */
1269 panic("enterpgrp: setsid into non-empty pgrp");
1270 if (SESS_LEADER(p, procsp))
1271 panic("enterpgrp: session leader attempted setpgrp");
1272 #endif
1273 if (pgrp == PGRP_NULL) {
1274 pid_t savepid = p->p_pid;
1275 proc_t np = PROC_NULL;
1276 /*
1277 * new process group
1278 */
1279 #if DIAGNOSTIC
1280 if (p->p_pid != pgid)
1281 panic("enterpgrp: new pgrp and pid != pgid");
1282 #endif
1283 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1284 M_WAITOK);
1285 if (pgrp == NULL)
1286 panic("enterpgrp: M_PGRP zone depleted");
1287 if ((np = proc_find(savepid)) == NULL || np != p) {
1288 if (np != PROC_NULL)
1289 proc_rele(np);
1290 if (mypgrp != PGRP_NULL)
1291 pg_rele(mypgrp);
1292 if (procsp != SESSION_NULL)
1293 session_rele(procsp);
1294 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1295 return (ESRCH);
1296 }
1297 proc_rele(np);
1298 if (mksess) {
1299 struct session *sess;
1300
1301 /*
1302 * new session
1303 */
1304 MALLOC_ZONE(sess, struct session *,
1305 sizeof(struct session), M_SESSION, M_WAITOK);
1306 if (sess == NULL)
1307 panic("enterpgrp: M_SESSION zone depleted");
1308 sess->s_leader = p;
1309 sess->s_sid = p->p_pid;
1310 sess->s_count = 1;
1311 sess->s_ttyvp = NULL;
1312 sess->s_ttyp = TTY_NULL;
1313 sess->s_flags = 0;
1314 sess->s_listflags = 0;
1315 sess->s_ttypgrpid = NO_PID;
1316 #if CONFIG_FINE_LOCK_GROUPS
1317 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1318 #else
1319 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1320 #endif
1321 bcopy(procsp->s_login, sess->s_login,
1322 sizeof(sess->s_login));
1323 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1324 proc_list_lock();
1325 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1326 proc_list_unlock();
1327 pgrp->pg_session = sess;
1328 #if DIAGNOSTIC
1329 if (p != current_proc())
1330 panic("enterpgrp: mksession and p != curproc");
1331 #endif
1332 } else {
1333 proc_list_lock();
1334 pgrp->pg_session = procsp;
1335
1336 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1337 panic("enterpgrp: providing ref to terminating session ");
1338 pgrp->pg_session->s_count++;
1339 proc_list_unlock();
1340 }
1341 pgrp->pg_id = pgid;
1342 #if CONFIG_FINE_LOCK_GROUPS
1343 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1344 #else
1345 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1346 #endif
1347 LIST_INIT(&pgrp->pg_members);
1348 pgrp->pg_membercnt = 0;
1349 pgrp->pg_jobc = 0;
1350 proc_list_lock();
1351 pgrp->pg_refcount = 1;
1352 pgrp->pg_listflags = 0;
1353 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1354 proc_list_unlock();
1355 } else if (pgrp == mypgrp) {
1356 pg_rele(pgrp);
1357 if (mypgrp != NULL)
1358 pg_rele(mypgrp);
1359 if (procsp != SESSION_NULL)
1360 session_rele(procsp);
1361 return (0);
1362 }
1363
1364 if (procsp != SESSION_NULL)
1365 session_rele(procsp);
1366 /*
1367 * Adjust eligibility of affected pgrps to participate in job control.
1368 * Increment eligibility counts before decrementing, otherwise we
1369 * could reach 0 spuriously during the first call.
1370 */
1371 fixjobc(p, pgrp, 1);
1372 fixjobc(p, mypgrp, 0);
1373
1374 if(mypgrp != PGRP_NULL)
1375 pg_rele(mypgrp);
1376 pgrp_replace(p, pgrp);
1377 pg_rele(pgrp);
1378
1379 return(0);
1380 }
1381
1382 /*
1383 * remove process from process group
1384 */
1385 int
1386 leavepgrp(proc_t p)
1387 {
1388
1389 pgrp_remove(p);
1390 return (0);
1391 }
1392
1393 /*
1394 * delete a process group
1395 */
1396 static void
1397 pgdelete_dropref(struct pgrp *pgrp)
1398 {
1399 struct tty *ttyp;
1400 int emptypgrp = 1;
1401 struct session *sessp;
1402
1403
1404 pgrp_lock(pgrp);
1405 if (pgrp->pg_membercnt != 0) {
1406 emptypgrp = 0;
1407 }
1408 pgrp_unlock(pgrp);
1409
1410 proc_list_lock();
1411 pgrp->pg_refcount--;
1412 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1413 proc_list_unlock();
1414 return;
1415 }
1416
1417 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1418
1419 if (pgrp->pg_refcount > 0) {
1420 proc_list_unlock();
1421 return;
1422 }
1423
1424 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1425 LIST_REMOVE(pgrp, pg_hash);
1426
1427 proc_list_unlock();
1428
1429 ttyp = SESSION_TP(pgrp->pg_session);
1430 if (ttyp != TTY_NULL) {
1431 if (ttyp->t_pgrp == pgrp) {
1432 tty_lock(ttyp);
1433 /* Re-check after acquiring the lock */
1434 if (ttyp->t_pgrp == pgrp) {
1435 ttyp->t_pgrp = NULL;
1436 pgrp->pg_session->s_ttypgrpid = NO_PID;
1437 }
1438 tty_unlock(ttyp);
1439 }
1440 }
1441
1442 proc_list_lock();
1443
1444 sessp = pgrp->pg_session;
1445 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1446 panic("pg_deleteref: manipulating refs of already terminating session");
1447 if (--sessp->s_count == 0) {
1448 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1449 panic("pg_deleteref: terminating already terminated session");
1450 sessp->s_listflags |= S_LIST_TERM;
1451 ttyp = SESSION_TP(sessp);
1452 LIST_REMOVE(sessp, s_hash);
1453 proc_list_unlock();
1454 if (ttyp != TTY_NULL) {
1455 tty_lock(ttyp);
1456 if (ttyp->t_session == sessp)
1457 ttyp->t_session = NULL;
1458 tty_unlock(ttyp);
1459 }
1460 proc_list_lock();
1461 sessp->s_listflags |= S_LIST_DEAD;
1462 if (sessp->s_count != 0)
1463 panic("pg_deleteref: freeing session in use");
1464 proc_list_unlock();
1465 #if CONFIG_FINE_LOCK_GROUPS
1466 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1467 #else
1468 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1469 #endif
1470 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1471 } else
1472 proc_list_unlock();
1473 #if CONFIG_FINE_LOCK_GROUPS
1474 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1475 #else
1476 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1477 #endif
1478 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1479 }
1480
1481
1482 /*
1483 * Adjust pgrp jobc counters when specified process changes process group.
1484 * We count the number of processes in each process group that "qualify"
1485 * the group for terminal job control (those with a parent in a different
1486 * process group of the same session). If that count reaches zero, the
1487 * process group becomes orphaned. Check both the specified process'
1488 * process group and that of its children.
1489 * entering == 0 => p is leaving specified group.
1490 * entering == 1 => p is entering specified group.
1491 */
1492 int
1493 fixjob_callback(proc_t p, void * arg)
1494 {
1495 struct fixjob_iterargs *fp;
1496 struct pgrp * pg, *hispg;
1497 struct session * mysession, *hissess;
1498 int entering;
1499
1500 fp = (struct fixjob_iterargs *)arg;
1501 pg = fp->pg;
1502 mysession = fp->mysession;
1503 entering = fp->entering;
1504
1505 hispg = proc_pgrp(p);
1506 hissess = proc_session(p);
1507
1508 if ((hispg != pg) &&
1509 (hissess == mysession)) {
1510 pgrp_lock(hispg);
1511 if (entering) {
1512 hispg->pg_jobc++;
1513 pgrp_unlock(hispg);
1514 } else if (--hispg->pg_jobc == 0) {
1515 pgrp_unlock(hispg);
1516 orphanpg(hispg);
1517 } else
1518 pgrp_unlock(hispg);
1519 }
1520 if (hissess != SESSION_NULL)
1521 session_rele(hissess);
1522 if (hispg != PGRP_NULL)
1523 pg_rele(hispg);
1524
1525 return(PROC_RETURNED);
1526 }
1527
1528 void
1529 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1530 {
1531 struct pgrp *hispgrp = PGRP_NULL;
1532 struct session *hissess = SESSION_NULL;
1533 struct session *mysession = pgrp->pg_session;
1534 proc_t parent;
1535 struct fixjob_iterargs fjarg;
1536 boolean_t proc_parent_self;
1537
1538 /*
1539 * Check if p's parent is current proc, if yes then no need to take
1540 * a ref; calling proc_parent with current proc as parent may
1541 * deadlock if current proc is exiting.
1542 */
1543 proc_parent_self = proc_parent_is_currentproc(p);
1544 if (proc_parent_self)
1545 parent = current_proc();
1546 else
1547 parent = proc_parent(p);
1548
1549 if (parent != PROC_NULL) {
1550 hispgrp = proc_pgrp(parent);
1551 hissess = proc_session(parent);
1552 if (!proc_parent_self)
1553 proc_rele(parent);
1554 }
1555
1556
1557 /*
1558 * Check p's parent to see whether p qualifies its own process
1559 * group; if so, adjust count for p's process group.
1560 */
1561 if ((hispgrp != pgrp) &&
1562 (hissess == mysession)) {
1563 pgrp_lock(pgrp);
1564 if (entering) {
1565 pgrp->pg_jobc++;
1566 pgrp_unlock(pgrp);
1567 }else if (--pgrp->pg_jobc == 0) {
1568 pgrp_unlock(pgrp);
1569 orphanpg(pgrp);
1570 } else
1571 pgrp_unlock(pgrp);
1572 }
1573
1574 if (hissess != SESSION_NULL)
1575 session_rele(hissess);
1576 if (hispgrp != PGRP_NULL)
1577 pg_rele(hispgrp);
1578
1579 /*
1580 * Check this process' children to see whether they qualify
1581 * their process groups; if so, adjust counts for children's
1582 * process groups.
1583 */
1584 fjarg.pg = pgrp;
1585 fjarg.mysession = mysession;
1586 fjarg.entering = entering;
1587 proc_childrenwalk(p, fixjob_callback, &fjarg);
1588 }
1589
1590 /*
1591 * A process group has become orphaned; if there are any stopped processes in
1592 * the group, hang-up all process in that group.
1593 */
1594 static void
1595 orphanpg(struct pgrp *pgrp)
1596 {
1597 pid_t *pid_list;
1598 proc_t p;
1599 vm_size_t pid_list_size = 0;
1600 vm_size_t pid_list_size_needed = 0;
1601 int pid_count = 0;
1602 int pid_count_available = 0;
1603
1604 assert(pgrp != NULL);
1605
1606 /* allocate outside of the pgrp_lock */
1607 for (;;) {
1608 pgrp_lock(pgrp);
1609
1610 boolean_t should_iterate = FALSE;
1611 pid_count_available = 0;
1612
1613 PGMEMBERS_FOREACH(pgrp, p) {
1614 pid_count_available++;
1615
1616 if (p->p_stat == SSTOP) {
1617 should_iterate = TRUE;
1618 }
1619 }
1620
1621 if (pid_count_available == 0 || !should_iterate) {
1622 pgrp_unlock(pgrp);
1623 return;
1624 }
1625
1626 pid_list_size_needed = pid_count_available * sizeof(pid_t);
1627 if (pid_list_size >= pid_list_size_needed) {
1628 break;
1629 }
1630 pgrp_unlock(pgrp);
1631
1632 if (pid_list_size != 0) {
1633 kfree(pid_list, pid_list_size);
1634 }
1635 pid_list = kalloc(pid_list_size_needed);
1636 if (!pid_list) {
1637 return;
1638 }
1639 pid_list_size = pid_list_size_needed;
1640 }
1641
1642 /* no orphaned processes */
1643 if (pid_list_size == 0) {
1644 pgrp_unlock(pgrp);
1645 return;
1646 }
1647
1648 PGMEMBERS_FOREACH(pgrp, p) {
1649 pid_list[pid_count++] = proc_pid(p);
1650 if (pid_count >= pid_count_available) {
1651 break;
1652 }
1653 }
1654 pgrp_unlock(pgrp);
1655
1656 if (pid_count == 0) {
1657 goto out;
1658 }
1659
1660 for (int i = 0; i < pid_count; i++) {
1661 /* do not handle kernproc */
1662 if (pid_list[i] == 0) {
1663 continue;
1664 }
1665 p = proc_find(pid_list[i]);
1666 if (!p) {
1667 continue;
1668 }
1669
1670 proc_transwait(p, 0);
1671 pt_setrunnable(p);
1672 psignal(p, SIGHUP);
1673 psignal(p, SIGCONT);
1674 proc_rele(p);
1675 }
1676
1677 out:
1678 kfree(pid_list, pid_list_size);
1679 return;
1680 }
1681
1682 int
1683 proc_is_classic(proc_t p __unused)
1684 {
1685 return (0);
1686 }
1687
1688 /* XXX Why does this function exist? Need to kill it off... */
1689 proc_t
1690 current_proc_EXTERNAL(void)
1691 {
1692 return (current_proc());
1693 }
1694
1695 int
1696 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1697 {
1698 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1699 }
1700
1701 #if CONFIG_COREDUMP
1702 /*
1703 * proc_core_name(name, uid, pid)
1704 * Expand the name described in corefilename, using name, uid, and pid.
1705 * corefilename is a printf-like string, with three format specifiers:
1706 * %N name of process ("name")
1707 * %P process id (pid)
1708 * %U user id (uid)
1709 * For example, "%N.core" is the default; they can be disabled completely
1710 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1711 * This is controlled by the sysctl variable kern.corefile (see above).
1712 */
1713 __private_extern__ int
1714 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1715 size_t cf_name_len)
1716 {
1717 const char *format, *appendstr;
1718 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1719 size_t i, l, n;
1720
1721 if (cf_name == NULL)
1722 goto toolong;
1723
1724 format = corefilename;
1725 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1726 switch (format[i]) {
1727 case '%': /* Format character */
1728 i++;
1729 switch (format[i]) {
1730 case '%':
1731 appendstr = "%";
1732 break;
1733 case 'N': /* process name */
1734 appendstr = name;
1735 break;
1736 case 'P': /* process id */
1737 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1738 appendstr = id_buf;
1739 break;
1740 case 'U': /* user id */
1741 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1742 appendstr = id_buf;
1743 break;
1744 case '\0': /* format string ended in % symbol */
1745 goto endofstring;
1746 default:
1747 appendstr = "";
1748 log(LOG_ERR,
1749 "Unknown format character %c in `%s'\n",
1750 format[i], format);
1751 }
1752 l = strlen(appendstr);
1753 if ((n + l) >= cf_name_len)
1754 goto toolong;
1755 bcopy(appendstr, cf_name + n, l);
1756 n += l;
1757 break;
1758 default:
1759 cf_name[n++] = format[i];
1760 }
1761 }
1762 if (format[i] != '\0')
1763 goto toolong;
1764 return (0);
1765 toolong:
1766 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1767 (long)pid, name, (uint32_t)uid);
1768 return (1);
1769 endofstring:
1770 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
1771 (long)pid, name, (uint32_t)uid);
1772 return (1);
1773 }
1774 #endif /* CONFIG_COREDUMP */
1775
1776 /* Code Signing related routines */
1777
1778 int
1779 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1780 {
1781 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1782 uap->usersize, USER_ADDR_NULL));
1783 }
1784
1785 int
1786 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1787 {
1788 if (uap->uaudittoken == USER_ADDR_NULL)
1789 return(EINVAL);
1790 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1791 uap->usersize, uap->uaudittoken));
1792 }
1793
1794 static int
1795 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1796 {
1797 char fakeheader[8] = { 0 };
1798 int error;
1799
1800 if (usize < sizeof(fakeheader))
1801 return ERANGE;
1802
1803 /* if no blob, fill in zero header */
1804 if (NULL == start) {
1805 start = fakeheader;
1806 length = sizeof(fakeheader);
1807 } else if (usize < length) {
1808 /* ... if input too short, copy out length of entitlement */
1809 uint32_t length32 = htonl((uint32_t)length);
1810 memcpy(&fakeheader[4], &length32, sizeof(length32));
1811
1812 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1813 if (error == 0)
1814 return ERANGE; /* input buffer to short, ERANGE signals that */
1815 return error;
1816 }
1817 return copyout(start, uaddr, length);
1818 }
1819
1820 static int
1821 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1822 {
1823 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1824 proc_t pt;
1825 int forself;
1826 int error;
1827 vnode_t tvp;
1828 off_t toff;
1829 unsigned char cdhash[SHA1_RESULTLEN];
1830 audit_token_t token;
1831 unsigned int upid=0, uidversion = 0;
1832
1833 forself = error = 0;
1834
1835 if (pid == 0)
1836 pid = proc_selfpid();
1837 if (pid == proc_selfpid())
1838 forself = 1;
1839
1840
1841 switch (ops) {
1842 case CS_OPS_STATUS:
1843 case CS_OPS_CDHASH:
1844 case CS_OPS_PIDOFFSET:
1845 case CS_OPS_ENTITLEMENTS_BLOB:
1846 case CS_OPS_IDENTITY:
1847 case CS_OPS_BLOB:
1848 break; /* not restricted to root */
1849 default:
1850 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1851 return(EPERM);
1852 break;
1853 }
1854
1855 pt = proc_find(pid);
1856 if (pt == PROC_NULL)
1857 return(ESRCH);
1858
1859 upid = pt->p_pid;
1860 uidversion = pt->p_idversion;
1861 if (uaudittoken != USER_ADDR_NULL) {
1862
1863 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1864 if (error != 0)
1865 goto out;
1866 /* verify the audit token pid/idversion matches with proc */
1867 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1868 error = ESRCH;
1869 goto out;
1870 }
1871 }
1872
1873 #if CONFIG_MACF
1874 switch (ops) {
1875 case CS_OPS_MARKINVALID:
1876 case CS_OPS_MARKHARD:
1877 case CS_OPS_MARKKILL:
1878 case CS_OPS_MARKRESTRICT:
1879 case CS_OPS_SET_STATUS:
1880 case CS_OPS_CLEARINSTALLER:
1881 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
1882 goto out;
1883 break;
1884 default:
1885 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
1886 goto out;
1887 }
1888 #endif
1889
1890 switch (ops) {
1891
1892 case CS_OPS_STATUS: {
1893 uint32_t retflags;
1894
1895 proc_lock(pt);
1896 retflags = pt->p_csflags;
1897 if (cs_enforcement(pt))
1898 retflags |= CS_ENFORCEMENT;
1899 if (csproc_get_platform_binary(pt))
1900 retflags |= CS_PLATFORM_BINARY;
1901 if (csproc_get_platform_path(pt))
1902 retflags |= CS_PLATFORM_PATH;
1903 proc_unlock(pt);
1904
1905 if (uaddr != USER_ADDR_NULL)
1906 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1907 break;
1908 }
1909 case CS_OPS_MARKINVALID:
1910 proc_lock(pt);
1911 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1912 pt->p_csflags &= ~CS_VALID; /* set invalid */
1913 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1914 pt->p_csflags |= CS_KILLED;
1915 proc_unlock(pt);
1916 if (cs_debug) {
1917 printf("CODE SIGNING: marked invalid by pid %d: "
1918 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1919 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
1920 }
1921 psignal(pt, SIGKILL);
1922 } else
1923 proc_unlock(pt);
1924 } else
1925 proc_unlock(pt);
1926
1927 break;
1928
1929 case CS_OPS_MARKHARD:
1930 proc_lock(pt);
1931 pt->p_csflags |= CS_HARD;
1932 if ((pt->p_csflags & CS_VALID) == 0) {
1933 /* @@@ allow? reject? kill? @@@ */
1934 proc_unlock(pt);
1935 error = EINVAL;
1936 goto out;
1937 } else
1938 proc_unlock(pt);
1939 break;
1940
1941 case CS_OPS_MARKKILL:
1942 proc_lock(pt);
1943 pt->p_csflags |= CS_KILL;
1944 if ((pt->p_csflags & CS_VALID) == 0) {
1945 proc_unlock(pt);
1946 psignal(pt, SIGKILL);
1947 } else
1948 proc_unlock(pt);
1949 break;
1950
1951 case CS_OPS_PIDOFFSET:
1952 toff = pt->p_textoff;
1953 proc_rele(pt);
1954 error = copyout(&toff, uaddr, sizeof(toff));
1955 return(error);
1956
1957 case CS_OPS_CDHASH:
1958
1959 /* pt already holds a reference on its p_textvp */
1960 tvp = pt->p_textvp;
1961 toff = pt->p_textoff;
1962
1963 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
1964 proc_rele(pt);
1965 return EINVAL;
1966 }
1967
1968 error = vn_getcdhash(tvp, toff, cdhash);
1969 proc_rele(pt);
1970
1971 if (error == 0) {
1972 error = copyout(cdhash, uaddr, sizeof (cdhash));
1973 }
1974
1975 return error;
1976
1977 case CS_OPS_ENTITLEMENTS_BLOB: {
1978 void *start;
1979 size_t length;
1980
1981 proc_lock(pt);
1982
1983 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
1984 proc_unlock(pt);
1985 error = EINVAL;
1986 break;
1987 }
1988
1989 error = cs_entitlements_blob_get(pt, &start, &length);
1990 proc_unlock(pt);
1991 if (error)
1992 break;
1993
1994 error = csops_copy_token(start, length, usize, uaddr);
1995 break;
1996 }
1997 case CS_OPS_MARKRESTRICT:
1998 proc_lock(pt);
1999 pt->p_csflags |= CS_RESTRICT;
2000 proc_unlock(pt);
2001 break;
2002
2003 case CS_OPS_SET_STATUS: {
2004 uint32_t flags;
2005
2006 if (usize < sizeof(flags)) {
2007 error = ERANGE;
2008 break;
2009 }
2010
2011 error = copyin(uaddr, &flags, sizeof(flags));
2012 if (error)
2013 break;
2014
2015 /* only allow setting a subset of all code sign flags */
2016 flags &=
2017 CS_HARD | CS_EXEC_SET_HARD |
2018 CS_KILL | CS_EXEC_SET_KILL |
2019 CS_RESTRICT |
2020 CS_REQUIRE_LV |
2021 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2022
2023 proc_lock(pt);
2024 if (pt->p_csflags & CS_VALID)
2025 pt->p_csflags |= flags;
2026 else
2027 error = EINVAL;
2028 proc_unlock(pt);
2029
2030 break;
2031 }
2032 case CS_OPS_BLOB: {
2033 void *start;
2034 size_t length;
2035
2036 proc_lock(pt);
2037 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2038 proc_unlock(pt);
2039 error = EINVAL;
2040 break;
2041 }
2042
2043 error = cs_blob_get(pt, &start, &length);
2044 proc_unlock(pt);
2045 if (error)
2046 break;
2047
2048 error = csops_copy_token(start, length, usize, uaddr);
2049 break;
2050 }
2051 case CS_OPS_IDENTITY: {
2052 const char *identity;
2053 uint8_t fakeheader[8];
2054 uint32_t idlen;
2055 size_t length;
2056
2057 /*
2058 * Make identity have a blob header to make it
2059 * easier on userland to guess the identity
2060 * length.
2061 */
2062 if (usize < sizeof(fakeheader)) {
2063 error = ERANGE;
2064 break;
2065 }
2066 memset(fakeheader, 0, sizeof(fakeheader));
2067
2068 proc_lock(pt);
2069 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2070 proc_unlock(pt);
2071 error = EINVAL;
2072 break;
2073 }
2074
2075 identity = cs_identity_get(pt);
2076 proc_unlock(pt);
2077 if (identity == NULL) {
2078 error = ENOENT;
2079 break;
2080 }
2081
2082 length = strlen(identity) + 1; /* include NUL */
2083 idlen = htonl(length + sizeof(fakeheader));
2084 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2085
2086 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2087 if (error)
2088 break;
2089
2090 if (usize < sizeof(fakeheader) + length)
2091 error = ERANGE;
2092 else if (usize > sizeof(fakeheader))
2093 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2094
2095 break;
2096 }
2097
2098 case CS_OPS_CLEARINSTALLER:
2099 proc_lock(pt);
2100 pt->p_csflags &= ~(CS_INSTALLER | CS_EXEC_SET_INSTALLER);
2101 proc_unlock(pt);
2102 break;
2103
2104 default:
2105 error = EINVAL;
2106 break;
2107 }
2108 out:
2109 proc_rele(pt);
2110 return(error);
2111 }
2112
2113 int
2114 proc_iterate(
2115 unsigned int flags,
2116 proc_iterate_fn_t callout,
2117 void *arg,
2118 proc_iterate_fn_t filterfn,
2119 void *filterarg)
2120 {
2121 pid_t *pid_list;
2122 vm_size_t pid_list_size = 0;
2123 vm_size_t pid_list_size_needed = 0;
2124 int pid_count = 0;
2125 int pid_count_available = 0;
2126
2127 assert(callout != NULL);
2128
2129 /* allocate outside of the proc_list_lock */
2130 for (;;) {
2131 proc_list_lock();
2132
2133 pid_count_available = nprocs;
2134 assert(pid_count_available > 0);
2135
2136 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2137 if (pid_list_size >= pid_list_size_needed) {
2138 break;
2139 }
2140 proc_list_unlock();
2141
2142 if (pid_list_size != 0) {
2143 kfree(pid_list, pid_list_size);
2144 }
2145 pid_list = kalloc(pid_list_size_needed);
2146 if (!pid_list) {
2147 return 1;
2148 }
2149 pid_list_size = pid_list_size_needed;
2150 }
2151
2152 /* filter pids into pid_list */
2153
2154 if (flags & PROC_ALLPROCLIST) {
2155 proc_t p;
2156 ALLPROC_FOREACH(p) {
2157 /* ignore processes that are being forked */
2158 if (p->p_stat == SIDL) {
2159 continue;
2160 }
2161 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2162 continue;
2163 }
2164
2165 pid_list[pid_count++] = proc_pid(p);
2166 if (pid_count >= pid_count_available) {
2167 break;
2168 }
2169 }
2170 }
2171
2172 if ((pid_count < pid_count_available) &&
2173 (flags & PROC_ZOMBPROCLIST))
2174 {
2175 proc_t p;
2176 ZOMBPROC_FOREACH(p) {
2177 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2178 continue;
2179 }
2180
2181 pid_list[pid_count++] = proc_pid(p);
2182 if (pid_count >= pid_count_available) {
2183 break;
2184 }
2185 }
2186 }
2187
2188 proc_list_unlock();
2189
2190 /* call callout on processes in the pid_list */
2191
2192 for (int i = 0; i < pid_count; i++) {
2193 proc_t p = proc_find(pid_list[i]);
2194 if (p) {
2195 if ((flags & PROC_NOWAITTRANS) == 0) {
2196 proc_transwait(p, 0);
2197 }
2198 int callout_ret = callout(p, arg);
2199
2200 switch (callout_ret) {
2201 case PROC_RETURNED_DONE:
2202 proc_rele(p);
2203 /* FALLTHROUGH */
2204 case PROC_CLAIMED_DONE:
2205 goto out;
2206
2207 case PROC_RETURNED:
2208 proc_rele(p);
2209 /* FALLTHROUGH */
2210 case PROC_CLAIMED:
2211 break;
2212
2213 default:
2214 panic("proc_iterate: callout returned %d for pid %d",
2215 callout_ret, pid_list[i]);
2216 break;
2217 }
2218 } else if (flags & PROC_ZOMBPROCLIST) {
2219 p = proc_find_zombref(pid_list[i]);
2220 if (!p) {
2221 continue;
2222 }
2223 int callout_ret = callout(p, arg);
2224
2225 switch (callout_ret) {
2226 case PROC_RETURNED_DONE:
2227 proc_drop_zombref(p);
2228 /* FALLTHROUGH */
2229 case PROC_CLAIMED_DONE:
2230 goto out;
2231
2232 case PROC_RETURNED:
2233 proc_drop_zombref(p);
2234 /* FALLTHROUGH */
2235 case PROC_CLAIMED:
2236 break;
2237
2238 default:
2239 panic("proc_iterate: callout returned %d for zombie pid %d",
2240 callout_ret, pid_list[i]);
2241 break;
2242 }
2243 }
2244 }
2245
2246 out:
2247 kfree(pid_list, pid_list_size);
2248 return 0;
2249
2250 }
2251
2252 void
2253 proc_rebootscan(
2254 proc_iterate_fn_t callout,
2255 void *arg,
2256 proc_iterate_fn_t filterfn,
2257 void *filterarg)
2258 {
2259 proc_t p;
2260
2261 assert(callout != NULL);
2262
2263 proc_shutdown_exitcount = 0;
2264
2265 restart_foreach:
2266
2267 proc_list_lock();
2268
2269 ALLPROC_FOREACH(p) {
2270 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2271 continue;
2272 }
2273 p = proc_ref_locked(p);
2274 if (!p) {
2275 continue;
2276 }
2277
2278 proc_list_unlock();
2279
2280 proc_transwait(p, 0);
2281 (void)callout(p, arg);
2282 proc_rele(p);
2283
2284 goto restart_foreach;
2285 }
2286
2287 proc_list_unlock();
2288 }
2289
2290 int
2291 proc_childrenwalk(
2292 proc_t parent,
2293 proc_iterate_fn_t callout,
2294 void *arg)
2295 {
2296 pid_t *pid_list;
2297 vm_size_t pid_list_size = 0;
2298 vm_size_t pid_list_size_needed = 0;
2299 int pid_count = 0;
2300 int pid_count_available = 0;
2301
2302 assert(parent != NULL);
2303 assert(callout != NULL);
2304
2305 for (;;) {
2306 proc_list_lock();
2307
2308 pid_count_available = parent->p_childrencnt;
2309 if (pid_count_available == 0) {
2310 proc_list_unlock();
2311 return 0;
2312 }
2313
2314 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2315 if (pid_list_size >= pid_list_size_needed) {
2316 break;
2317 }
2318 proc_list_unlock();
2319
2320 if (pid_list_size != 0) {
2321 kfree(pid_list, pid_list_size);
2322 }
2323 pid_list = kalloc(pid_list_size_needed);
2324 if (!pid_list) {
2325 return 1;
2326 }
2327 pid_list_size = pid_list_size_needed;
2328 }
2329
2330 proc_t p;
2331 PCHILDREN_FOREACH(parent, p) {
2332 if (p->p_stat == SIDL) {
2333 continue;
2334 }
2335
2336 pid_list[pid_count++] = proc_pid(p);
2337 if (pid_count >= pid_count_available) {
2338 break;
2339 }
2340 }
2341
2342 proc_list_unlock();
2343
2344 for (int i = 0; i < pid_count; i++) {
2345 p = proc_find(pid_list[i]);
2346 if (!p) {
2347 continue;
2348 }
2349
2350 int callout_ret = callout(p, arg);
2351
2352 switch (callout_ret) {
2353 case PROC_RETURNED_DONE:
2354 proc_rele(p);
2355 /* FALLTHROUGH */
2356 case PROC_CLAIMED_DONE:
2357 goto out;
2358
2359 case PROC_RETURNED:
2360 proc_rele(p);
2361 /* FALLTHROUGH */
2362 case PROC_CLAIMED:
2363 break;
2364 default:
2365 panic("proc_childrenwalk: callout returned %d for pid %d",
2366 callout_ret, pid_list[i]);
2367 break;
2368 }
2369 }
2370
2371 out:
2372 kfree(pid_list, pid_list_size);
2373 return 0;
2374 }
2375
2376 int
2377 pgrp_iterate(
2378 struct pgrp *pgrp,
2379 unsigned int flags,
2380 proc_iterate_fn_t callout,
2381 void * arg,
2382 proc_iterate_fn_t filterfn,
2383 void * filterarg)
2384 {
2385 pid_t *pid_list;
2386 proc_t p;
2387 vm_size_t pid_list_size = 0;
2388 vm_size_t pid_list_size_needed = 0;
2389 int pid_count = 0;
2390 int pid_count_available = 0;
2391
2392 pid_t pgid;
2393
2394 assert(pgrp != NULL);
2395 assert(callout != NULL);
2396
2397 for (;;) {
2398 pgrp_lock(pgrp);
2399
2400 pid_count_available = pgrp->pg_membercnt;
2401 if (pid_count_available == 0) {
2402 pgrp_unlock(pgrp);
2403 return 0;
2404 }
2405
2406 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2407 if (pid_list_size >= pid_list_size_needed) {
2408 break;
2409 }
2410 pgrp_unlock(pgrp);
2411
2412 if (pid_list_size != 0) {
2413 kfree(pid_list, pid_list_size);
2414 }
2415 pid_list = kalloc(pid_list_size_needed);
2416 if (!pid_list) {
2417 return 1;
2418 }
2419 pid_list_size = pid_list_size_needed;
2420 }
2421
2422 pgid = pgrp->pg_id;
2423
2424 PGMEMBERS_FOREACH(pgrp, p) {
2425 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2426 continue;;
2427 }
2428 pid_list[pid_count++] = proc_pid(p);
2429 if (pid_count >= pid_count_available) {
2430 break;
2431 }
2432 }
2433
2434 pgrp_unlock(pgrp);
2435
2436 if (flags & PGRP_DROPREF) {
2437 pg_rele(pgrp);
2438 }
2439
2440 for (int i = 0; i< pid_count; i++) {
2441 /* do not handle kernproc */
2442 if (pid_list[i] == 0) {
2443 continue;
2444 }
2445 p = proc_find(pid_list[i]);
2446 if (!p) {
2447 continue;
2448 }
2449 if (p->p_pgrpid != pgid) {
2450 proc_rele(p);
2451 continue;
2452 }
2453
2454 int callout_ret = callout(p, arg);
2455
2456 switch (callout_ret) {
2457 case PROC_RETURNED:
2458 proc_rele(p);
2459 /* FALLTHROUGH */
2460 case PROC_CLAIMED:
2461 break;
2462
2463 case PROC_RETURNED_DONE:
2464 proc_rele(p);
2465 /* FALLTHROUGH */
2466 case PROC_CLAIMED_DONE:
2467 goto out;
2468
2469 default:
2470 panic("pgrp_iterate: callout returned %d for pid %d",
2471 callout_ret, pid_list[i]);
2472 }
2473 }
2474
2475 out:
2476 kfree(pid_list, pid_list_size);
2477 return 0;
2478 }
2479
2480 static void
2481 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2482 {
2483 proc_list_lock();
2484 child->p_pgrp = pgrp;
2485 child->p_pgrpid = pgrp->pg_id;
2486 child->p_listflag |= P_LIST_INPGRP;
2487 /*
2488 * When pgrp is being freed , a process can still
2489 * request addition using setpgid from bash when
2490 * login is terminated (login cycler) return ESRCH
2491 * Safe to hold lock due to refcount on pgrp
2492 */
2493 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2494 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2495 }
2496
2497 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2498 panic("pgrp_add : pgrp is dead adding process");
2499 proc_list_unlock();
2500
2501 pgrp_lock(pgrp);
2502 pgrp->pg_membercnt++;
2503 if ( parent != PROC_NULL) {
2504 LIST_INSERT_AFTER(parent, child, p_pglist);
2505 }else {
2506 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2507 }
2508 pgrp_unlock(pgrp);
2509
2510 proc_list_lock();
2511 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2512 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2513 }
2514 proc_list_unlock();
2515 }
2516
2517 static void
2518 pgrp_remove(struct proc * p)
2519 {
2520 struct pgrp * pg;
2521
2522 pg = proc_pgrp(p);
2523
2524 proc_list_lock();
2525 #if __PROC_INTERNAL_DEBUG
2526 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2527 panic("removing from pglist but no named ref\n");
2528 #endif
2529 p->p_pgrpid = PGRPID_DEAD;
2530 p->p_listflag &= ~P_LIST_INPGRP;
2531 p->p_pgrp = NULL;
2532 proc_list_unlock();
2533
2534 if (pg == PGRP_NULL)
2535 panic("pgrp_remove: pg is NULL");
2536 pgrp_lock(pg);
2537 pg->pg_membercnt--;
2538
2539 if (pg->pg_membercnt < 0)
2540 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2541
2542 LIST_REMOVE(p, p_pglist);
2543 if (pg->pg_members.lh_first == 0) {
2544 pgrp_unlock(pg);
2545 pgdelete_dropref(pg);
2546 } else {
2547 pgrp_unlock(pg);
2548 pg_rele(pg);
2549 }
2550 }
2551
2552
2553 /* cannot use proc_pgrp as it maybe stalled */
2554 static void
2555 pgrp_replace(struct proc * p, struct pgrp * newpg)
2556 {
2557 struct pgrp * oldpg;
2558
2559
2560
2561 proc_list_lock();
2562
2563 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2564 p->p_listflag |= P_LIST_PGRPTRWAIT;
2565 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2566 }
2567
2568 p->p_listflag |= P_LIST_PGRPTRANS;
2569
2570 oldpg = p->p_pgrp;
2571 if (oldpg == PGRP_NULL)
2572 panic("pgrp_replace: oldpg NULL");
2573 oldpg->pg_refcount++;
2574 #if __PROC_INTERNAL_DEBUG
2575 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2576 panic("removing from pglist but no named ref\n");
2577 #endif
2578 p->p_pgrpid = PGRPID_DEAD;
2579 p->p_listflag &= ~P_LIST_INPGRP;
2580 p->p_pgrp = NULL;
2581
2582 proc_list_unlock();
2583
2584 pgrp_lock(oldpg);
2585 oldpg->pg_membercnt--;
2586 if (oldpg->pg_membercnt < 0)
2587 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2588 LIST_REMOVE(p, p_pglist);
2589 if (oldpg->pg_members.lh_first == 0) {
2590 pgrp_unlock(oldpg);
2591 pgdelete_dropref(oldpg);
2592 } else {
2593 pgrp_unlock(oldpg);
2594 pg_rele(oldpg);
2595 }
2596
2597 proc_list_lock();
2598 p->p_pgrp = newpg;
2599 p->p_pgrpid = newpg->pg_id;
2600 p->p_listflag |= P_LIST_INPGRP;
2601 /*
2602 * When pgrp is being freed , a process can still
2603 * request addition using setpgid from bash when
2604 * login is terminated (login cycler) return ESRCH
2605 * Safe to hold lock due to refcount on pgrp
2606 */
2607 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2608 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2609 }
2610
2611 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2612 panic("pgrp_add : pgrp is dead adding process");
2613 proc_list_unlock();
2614
2615 pgrp_lock(newpg);
2616 newpg->pg_membercnt++;
2617 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2618 pgrp_unlock(newpg);
2619
2620 proc_list_lock();
2621 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2622 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2623 }
2624
2625 p->p_listflag &= ~P_LIST_PGRPTRANS;
2626 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2627 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2628 wakeup(&p->p_pgrpid);
2629
2630 }
2631 proc_list_unlock();
2632 }
2633
2634 void
2635 pgrp_lock(struct pgrp * pgrp)
2636 {
2637 lck_mtx_lock(&pgrp->pg_mlock);
2638 }
2639
2640 void
2641 pgrp_unlock(struct pgrp * pgrp)
2642 {
2643 lck_mtx_unlock(&pgrp->pg_mlock);
2644 }
2645
2646 void
2647 session_lock(struct session * sess)
2648 {
2649 lck_mtx_lock(&sess->s_mlock);
2650 }
2651
2652
2653 void
2654 session_unlock(struct session * sess)
2655 {
2656 lck_mtx_unlock(&sess->s_mlock);
2657 }
2658
2659 struct pgrp *
2660 proc_pgrp(proc_t p)
2661 {
2662 struct pgrp * pgrp;
2663
2664 if (p == PROC_NULL)
2665 return(PGRP_NULL);
2666 proc_list_lock();
2667
2668 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2669 p->p_listflag |= P_LIST_PGRPTRWAIT;
2670 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2671 }
2672
2673 pgrp = p->p_pgrp;
2674
2675 assert(pgrp != NULL);
2676
2677 if (pgrp != PGRP_NULL) {
2678 pgrp->pg_refcount++;
2679 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2680 panic("proc_pgrp: ref being povided for dead pgrp");
2681 }
2682
2683 proc_list_unlock();
2684
2685 return(pgrp);
2686 }
2687
2688 struct pgrp *
2689 tty_pgrp(struct tty * tp)
2690 {
2691 struct pgrp * pg = PGRP_NULL;
2692
2693 proc_list_lock();
2694 pg = tp->t_pgrp;
2695
2696 if (pg != PGRP_NULL) {
2697 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2698 panic("tty_pgrp: ref being povided for dead pgrp");
2699 pg->pg_refcount++;
2700 }
2701 proc_list_unlock();
2702
2703 return(pg);
2704 }
2705
2706 struct session *
2707 proc_session(proc_t p)
2708 {
2709 struct session * sess = SESSION_NULL;
2710
2711 if (p == PROC_NULL)
2712 return(SESSION_NULL);
2713
2714 proc_list_lock();
2715
2716 /* wait during transitions */
2717 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2718 p->p_listflag |= P_LIST_PGRPTRWAIT;
2719 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2720 }
2721
2722 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2723 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2724 panic("proc_session:returning sesssion ref on terminating session");
2725 sess->s_count++;
2726 }
2727 proc_list_unlock();
2728 return(sess);
2729 }
2730
2731 void
2732 session_rele(struct session *sess)
2733 {
2734 proc_list_lock();
2735 if (--sess->s_count == 0) {
2736 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2737 panic("session_rele: terminating already terminated session");
2738 sess->s_listflags |= S_LIST_TERM;
2739 LIST_REMOVE(sess, s_hash);
2740 sess->s_listflags |= S_LIST_DEAD;
2741 if (sess->s_count != 0)
2742 panic("session_rele: freeing session in use");
2743 proc_list_unlock();
2744 #if CONFIG_FINE_LOCK_GROUPS
2745 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2746 #else
2747 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2748 #endif
2749 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2750 } else
2751 proc_list_unlock();
2752 }
2753
2754 int
2755 proc_transstart(proc_t p, int locked, int non_blocking)
2756 {
2757 if (locked == 0)
2758 proc_lock(p);
2759 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2760 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2761 if (locked == 0)
2762 proc_unlock(p);
2763 return EDEADLK;
2764 }
2765 p->p_lflag |= P_LTRANSWAIT;
2766 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2767 }
2768 p->p_lflag |= P_LINTRANSIT;
2769 p->p_transholder = current_thread();
2770 if (locked == 0)
2771 proc_unlock(p);
2772 return 0;
2773 }
2774
2775 void
2776 proc_transcommit(proc_t p, int locked)
2777 {
2778 if (locked == 0)
2779 proc_lock(p);
2780
2781 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2782 assert (p->p_transholder == current_thread());
2783 p->p_lflag |= P_LTRANSCOMMIT;
2784
2785 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2786 p->p_lflag &= ~P_LTRANSWAIT;
2787 wakeup(&p->p_lflag);
2788 }
2789 if (locked == 0)
2790 proc_unlock(p);
2791 }
2792
2793 void
2794 proc_transend(proc_t p, int locked)
2795 {
2796 if (locked == 0)
2797 proc_lock(p);
2798
2799 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2800 p->p_transholder = NULL;
2801
2802 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2803 p->p_lflag &= ~P_LTRANSWAIT;
2804 wakeup(&p->p_lflag);
2805 }
2806 if (locked == 0)
2807 proc_unlock(p);
2808 }
2809
2810 int
2811 proc_transwait(proc_t p, int locked)
2812 {
2813 if (locked == 0)
2814 proc_lock(p);
2815 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2816 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2817 if (locked == 0)
2818 proc_unlock(p);
2819 return EDEADLK;
2820 }
2821 p->p_lflag |= P_LTRANSWAIT;
2822 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2823 }
2824 if (locked == 0)
2825 proc_unlock(p);
2826 return 0;
2827 }
2828
2829 void
2830 proc_klist_lock(void)
2831 {
2832 lck_mtx_lock(proc_klist_mlock);
2833 }
2834
2835 void
2836 proc_klist_unlock(void)
2837 {
2838 lck_mtx_unlock(proc_klist_mlock);
2839 }
2840
2841 void
2842 proc_knote(struct proc * p, long hint)
2843 {
2844 proc_klist_lock();
2845 KNOTE(&p->p_klist, hint);
2846 proc_klist_unlock();
2847 }
2848
2849 void
2850 proc_knote_drain(struct proc *p)
2851 {
2852 struct knote *kn = NULL;
2853
2854 /*
2855 * Clear the proc's klist to avoid references after the proc is reaped.
2856 */
2857 proc_klist_lock();
2858 while ((kn = SLIST_FIRST(&p->p_klist))) {
2859 kn->kn_ptr.p_proc = PROC_NULL;
2860 KNOTE_DETACH(&p->p_klist, kn);
2861 }
2862 proc_klist_unlock();
2863 }
2864
2865 void
2866 proc_setregister(proc_t p)
2867 {
2868 proc_lock(p);
2869 p->p_lflag |= P_LREGISTER;
2870 proc_unlock(p);
2871 }
2872
2873 void
2874 proc_resetregister(proc_t p)
2875 {
2876 proc_lock(p);
2877 p->p_lflag &= ~P_LREGISTER;
2878 proc_unlock(p);
2879 }
2880
2881 pid_t
2882 proc_pgrpid(proc_t p)
2883 {
2884 return p->p_pgrpid;
2885 }
2886
2887 pid_t
2888 proc_selfpgrpid()
2889 {
2890 return current_proc()->p_pgrpid;
2891 }
2892
2893
2894 /* return control and action states */
2895 int
2896 proc_getpcontrol(int pid, int * pcontrolp)
2897 {
2898 proc_t p;
2899
2900 p = proc_find(pid);
2901 if (p == PROC_NULL)
2902 return(ESRCH);
2903 if (pcontrolp != NULL)
2904 *pcontrolp = p->p_pcaction;
2905
2906 proc_rele(p);
2907 return(0);
2908 }
2909
2910 int
2911 proc_dopcontrol(proc_t p)
2912 {
2913 int pcontrol;
2914
2915 proc_lock(p);
2916
2917 pcontrol = PROC_CONTROL_STATE(p);
2918
2919 if (PROC_ACTION_STATE(p) == 0) {
2920 switch(pcontrol) {
2921 case P_PCTHROTTLE:
2922 PROC_SETACTION_STATE(p);
2923 proc_unlock(p);
2924 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2925 break;
2926
2927 case P_PCSUSP:
2928 PROC_SETACTION_STATE(p);
2929 proc_unlock(p);
2930 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2931 task_suspend(p->task);
2932 break;
2933
2934 case P_PCKILL:
2935 PROC_SETACTION_STATE(p);
2936 proc_unlock(p);
2937 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2938 psignal(p, SIGKILL);
2939 break;
2940
2941 default:
2942 proc_unlock(p);
2943 }
2944
2945 } else
2946 proc_unlock(p);
2947
2948 return(PROC_RETURNED);
2949 }
2950
2951
2952 /*
2953 * Resume a throttled or suspended process. This is an internal interface that's only
2954 * used by the user level code that presents the GUI when we run out of swap space and
2955 * hence is restricted to processes with superuser privileges.
2956 */
2957
2958 int
2959 proc_resetpcontrol(int pid)
2960 {
2961 proc_t p;
2962 int pcontrol;
2963 int error;
2964 proc_t self = current_proc();
2965
2966 /* if the process has been validated to handle resource control or root is valid one */
2967 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
2968 return error;
2969
2970 p = proc_find(pid);
2971 if (p == PROC_NULL)
2972 return(ESRCH);
2973
2974 proc_lock(p);
2975
2976 pcontrol = PROC_CONTROL_STATE(p);
2977
2978 if(PROC_ACTION_STATE(p) !=0) {
2979 switch(pcontrol) {
2980 case P_PCTHROTTLE:
2981 PROC_RESETACTION_STATE(p);
2982 proc_unlock(p);
2983 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
2984 break;
2985
2986 case P_PCSUSP:
2987 PROC_RESETACTION_STATE(p);
2988 proc_unlock(p);
2989 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
2990 task_resume(p->task);
2991 break;
2992
2993 case P_PCKILL:
2994 /* Huh? */
2995 PROC_SETACTION_STATE(p);
2996 proc_unlock(p);
2997 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
2998 break;
2999
3000 default:
3001 proc_unlock(p);
3002 }
3003
3004 } else
3005 proc_unlock(p);
3006
3007 proc_rele(p);
3008 return(0);
3009 }
3010
3011
3012
3013 struct no_paging_space
3014 {
3015 uint64_t pcs_max_size;
3016 uint64_t pcs_uniqueid;
3017 int pcs_pid;
3018 int pcs_proc_count;
3019 uint64_t pcs_total_size;
3020
3021 uint64_t npcs_max_size;
3022 uint64_t npcs_uniqueid;
3023 int npcs_pid;
3024 int npcs_proc_count;
3025 uint64_t npcs_total_size;
3026
3027 int apcs_proc_count;
3028 uint64_t apcs_total_size;
3029 };
3030
3031
3032 static int
3033 proc_pcontrol_filter(proc_t p, void *arg)
3034 {
3035 struct no_paging_space *nps;
3036 uint64_t compressed;
3037
3038 nps = (struct no_paging_space *)arg;
3039
3040 compressed = get_task_compressed(p->task);
3041
3042 if (PROC_CONTROL_STATE(p)) {
3043 if (PROC_ACTION_STATE(p) == 0) {
3044 if (compressed > nps->pcs_max_size) {
3045 nps->pcs_pid = p->p_pid;
3046 nps->pcs_uniqueid = p->p_uniqueid;
3047 nps->pcs_max_size = compressed;
3048 }
3049 nps->pcs_total_size += compressed;
3050 nps->pcs_proc_count++;
3051 } else {
3052 nps->apcs_total_size += compressed;
3053 nps->apcs_proc_count++;
3054 }
3055 } else {
3056 if (compressed > nps->npcs_max_size) {
3057 nps->npcs_pid = p->p_pid;
3058 nps->npcs_uniqueid = p->p_uniqueid;
3059 nps->npcs_max_size = compressed;
3060 }
3061 nps->npcs_total_size += compressed;
3062 nps->npcs_proc_count++;
3063
3064 }
3065 return (0);
3066 }
3067
3068
3069 static int
3070 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3071 {
3072 return(PROC_RETURNED);
3073 }
3074
3075
3076 /*
3077 * Deal with the low on compressor pool space condition... this function
3078 * gets called when we are approaching the limits of the compressor pool or
3079 * we are unable to create a new swap file.
3080 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3081 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3082 * There are 2 categories of processes to deal with. Those that have an action
3083 * associated with them by the task itself and those that do not. Actionable
3084 * tasks can have one of three categories specified: ones that
3085 * can be killed immediately, ones that should be suspended, and ones that should
3086 * be throttled. Processes that do not have an action associated with them are normally
3087 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3088 * that only by killing them can we hope to put the system back into a usable state.
3089 */
3090
3091 #define NO_PAGING_SPACE_DEBUG 0
3092
3093 extern uint64_t vm_compressor_pages_compressed(void);
3094
3095 struct timeval last_no_space_action = {0, 0};
3096
3097 int
3098 no_paging_space_action()
3099 {
3100 proc_t p;
3101 struct no_paging_space nps;
3102 struct timeval now;
3103
3104 /*
3105 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3106 */
3107 microtime(&now);
3108
3109 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3110 return (0);
3111
3112 /*
3113 * Examine all processes and find the biggest (biggest is based on the number of pages this
3114 * task has in the compressor pool) that has been marked to have some action
3115 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3116 * action.
3117 *
3118 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3119 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3120 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3121 */
3122 bzero(&nps, sizeof(nps));
3123
3124 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3125
3126 #if NO_PAGING_SPACE_DEBUG
3127 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3128 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3129 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3130 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3131 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3132 nps.apcs_proc_count, nps.apcs_total_size);
3133 #endif
3134 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3135 /*
3136 * for now we'll knock out any task that has more then 50% of the pages
3137 * held by the compressor
3138 */
3139 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3140
3141 if (nps.npcs_uniqueid == p->p_uniqueid) {
3142 /*
3143 * verify this is still the same process
3144 * in case the proc exited and the pid got reused while
3145 * we were finishing the proc_iterate and getting to this point
3146 */
3147 last_no_space_action = now;
3148
3149 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3150 psignal(p, SIGKILL);
3151
3152 proc_rele(p);
3153
3154 return (0);
3155 }
3156
3157 proc_rele(p);
3158 }
3159 }
3160
3161 if (nps.pcs_max_size > 0) {
3162 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3163
3164 if (nps.pcs_uniqueid == p->p_uniqueid) {
3165 /*
3166 * verify this is still the same process
3167 * in case the proc exited and the pid got reused while
3168 * we were finishing the proc_iterate and getting to this point
3169 */
3170 last_no_space_action = now;
3171
3172 proc_dopcontrol(p);
3173
3174 proc_rele(p);
3175
3176 return (1);
3177 }
3178
3179 proc_rele(p);
3180 }
3181 }
3182 last_no_space_action = now;
3183
3184 printf("low swap: unable to find any eligible processes to take action on\n");
3185
3186 return (0);
3187 }
3188
3189 int
3190 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3191 {
3192 int ret = 0;
3193 proc_t target_proc = PROC_NULL;
3194 pid_t target_pid = uap->pid;
3195 uint64_t target_uniqueid = uap->uniqueid;
3196 task_t target_task = NULL;
3197
3198 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3199 ret = EPERM;
3200 goto out;
3201 }
3202 target_proc = proc_find(target_pid);
3203 if (target_proc != PROC_NULL) {
3204 if (target_uniqueid != proc_uniqueid(target_proc)) {
3205 ret = ENOENT;
3206 goto out;
3207 }
3208
3209 target_task = proc_task(target_proc);
3210 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3211 ret = EINVAL;
3212 goto out;
3213 }
3214 } else
3215 ret = ENOENT;
3216
3217 out:
3218 if (target_proc != PROC_NULL)
3219 proc_rele(target_proc);
3220 return (ret);
3221 }
3222
3223 #if VM_SCAN_FOR_SHADOW_CHAIN
3224 extern int vm_map_shadow_max(vm_map_t map);
3225 int proc_shadow_max(void);
3226 int proc_shadow_max(void)
3227 {
3228 int retval, max;
3229 proc_t p;
3230 task_t task;
3231 vm_map_t map;
3232
3233 max = 0;
3234 proc_list_lock();
3235 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3236 if (p->p_stat == SIDL)
3237 continue;
3238 task = p->task;
3239 if (task == NULL) {
3240 continue;
3241 }
3242 map = get_task_map(task);
3243 if (map == NULL) {
3244 continue;
3245 }
3246 retval = vm_map_shadow_max(map);
3247 if (retval > max) {
3248 max = retval;
3249 }
3250 }
3251 proc_list_unlock();
3252 return max;
3253 }
3254 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3255
3256 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3257 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3258 {
3259 if (target_proc != NULL) {
3260 target_proc->p_responsible_pid = responsible_pid;
3261 }
3262 return;
3263 }
3264
3265 int
3266 proc_chrooted(proc_t p)
3267 {
3268 int retval = 0;
3269
3270 if (p) {
3271 proc_fdlock(p);
3272 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3273 proc_fdunlock(p);
3274 }
3275
3276 return retval;
3277 }
3278
3279 void *
3280 proc_get_uthread_uu_threadlist(void * uthread_v)
3281 {
3282 uthread_t uth = (uthread_t)uthread_v;
3283 return (uth != NULL) ? uth->uu_threadlist : NULL;
3284 }