]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
83bd75cd2d8dffa88363cacf6860269c23ac2226
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114
115 #if CONFIG_CSR
116 #include <sys/csr.h>
117 #endif
118
119 #if CONFIG_MEMORYSTATUS
120 #include <sys/kern_memorystatus.h>
121 #endif
122
123 #if CONFIG_MACF
124 #include <security/mac_framework.h>
125 #endif
126
127 #include <libkern/crypto/sha1.h>
128
129 /*
130 * Structure associated with user cacheing.
131 */
132 struct uidinfo {
133 LIST_ENTRY(uidinfo) ui_hash;
134 uid_t ui_uid;
135 long ui_proccnt;
136 };
137 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
138 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
139 u_long uihash; /* size of hash table - 1 */
140
141 /*
142 * Other process lists
143 */
144 struct pidhashhead *pidhashtbl;
145 u_long pidhash;
146 struct pgrphashhead *pgrphashtbl;
147 u_long pgrphash;
148 struct sesshashhead *sesshashtbl;
149 u_long sesshash;
150
151 struct proclist allproc;
152 struct proclist zombproc;
153 extern struct tty cons;
154
155 extern int cs_debug;
156
157 #if DEVELOPMENT || DEBUG
158 extern int cs_enforcement_enable;
159 #endif
160
161 #if DEBUG
162 #define __PROC_INTERNAL_DEBUG 1
163 #endif
164 #if CONFIG_COREDUMP
165 /* Name to give to core files */
166 #if defined(XNU_TARGET_OS_BRIDGE)
167 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/internal/%N.core"};
168 #elif CONFIG_EMBEDDED
169 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/cores/%N.core"};
170 #else
171 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
172 #endif
173 #endif
174
175 #if PROC_REF_DEBUG
176 #include <kern/backtrace.h>
177 #endif
178
179 static void orphanpg(struct pgrp * pg);
180 void proc_name_kdp(task_t t, char * buf, int size);
181 void * proc_get_uthread_uu_threadlist(void * uthread_v);
182 int proc_threadname_kdp(void * uth, char * buf, size_t size);
183 void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime);
184 char * proc_name_address(void * p);
185
186 /* TODO: make a header that's exported and usable in osfmk */
187 char* proc_best_name(proc_t p);
188
189 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
190 static void pgrp_remove(proc_t p);
191 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
192 static void pgdelete_dropref(struct pgrp *pgrp);
193 extern void pg_rele_dropref(struct pgrp * pgrp);
194 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
195 static boolean_t proc_parent_is_currentproc(proc_t p);
196
197 struct fixjob_iterargs {
198 struct pgrp * pg;
199 struct session * mysession;
200 int entering;
201 };
202
203 int fixjob_callback(proc_t, void *);
204
205 uint64_t get_current_unique_pid(void);
206
207
208 uint64_t
209 get_current_unique_pid(void)
210 {
211 proc_t p = current_proc();
212
213 if (p)
214 return p->p_uniqueid;
215 else
216 return 0;
217 }
218
219 /*
220 * Initialize global process hashing structures.
221 */
222 void
223 procinit(void)
224 {
225 LIST_INIT(&allproc);
226 LIST_INIT(&zombproc);
227 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
228 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
229 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
230 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
231 #if CONFIG_PERSONAS
232 personas_bootstrap();
233 #endif
234 }
235
236 /*
237 * Change the count associated with number of processes
238 * a given user is using. This routine protects the uihash
239 * with the list lock
240 */
241 int
242 chgproccnt(uid_t uid, int diff)
243 {
244 struct uidinfo *uip;
245 struct uidinfo *newuip = NULL;
246 struct uihashhead *uipp;
247 int retval;
248
249 again:
250 proc_list_lock();
251 uipp = UIHASH(uid);
252 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
253 if (uip->ui_uid == uid)
254 break;
255 if (uip) {
256 uip->ui_proccnt += diff;
257 if (uip->ui_proccnt > 0) {
258 retval = uip->ui_proccnt;
259 proc_list_unlock();
260 goto out;
261 }
262 if (uip->ui_proccnt < 0)
263 panic("chgproccnt: procs < 0");
264 LIST_REMOVE(uip, ui_hash);
265 retval = 0;
266 proc_list_unlock();
267 FREE_ZONE(uip, sizeof(*uip), M_PROC);
268 goto out;
269 }
270 if (diff <= 0) {
271 if (diff == 0) {
272 retval = 0;
273 proc_list_unlock();
274 goto out;
275 }
276 panic("chgproccnt: lost user");
277 }
278 if (newuip != NULL) {
279 uip = newuip;
280 newuip = NULL;
281 LIST_INSERT_HEAD(uipp, uip, ui_hash);
282 uip->ui_uid = uid;
283 uip->ui_proccnt = diff;
284 retval = diff;
285 proc_list_unlock();
286 goto out;
287 }
288 proc_list_unlock();
289 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
290 if (newuip == NULL)
291 panic("chgproccnt: M_PROC zone depleted");
292 goto again;
293 out:
294 if (newuip != NULL)
295 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
296 return(retval);
297 }
298
299 /*
300 * Is p an inferior of the current process?
301 */
302 int
303 inferior(proc_t p)
304 {
305 int retval = 0;
306
307 proc_list_lock();
308 for (; p != current_proc(); p = p->p_pptr)
309 if (p->p_pid == 0)
310 goto out;
311 retval = 1;
312 out:
313 proc_list_unlock();
314 return(retval);
315 }
316
317 /*
318 * Is p an inferior of t ?
319 */
320 int
321 isinferior(proc_t p, proc_t t)
322 {
323 int retval = 0;
324 int nchecked = 0;
325 proc_t start = p;
326
327 /* if p==t they are not inferior */
328 if (p == t)
329 return(0);
330
331 proc_list_lock();
332 for (; p != t; p = p->p_pptr) {
333 nchecked++;
334
335 /* Detect here if we're in a cycle */
336 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
337 goto out;
338 }
339 retval = 1;
340 out:
341 proc_list_unlock();
342 return(retval);
343 }
344
345 int
346 proc_isinferior(int pid1, int pid2)
347 {
348 proc_t p = PROC_NULL;
349 proc_t t = PROC_NULL;
350 int retval = 0;
351
352 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
353 retval = isinferior(p, t);
354
355 if (p != PROC_NULL)
356 proc_rele(p);
357 if (t != PROC_NULL)
358 proc_rele(t);
359
360 return(retval);
361 }
362
363 proc_t
364 proc_find(int pid)
365 {
366 return(proc_findinternal(pid, 0));
367 }
368
369 proc_t
370 proc_findinternal(int pid, int locked)
371 {
372 proc_t p = PROC_NULL;
373
374 if (locked == 0) {
375 proc_list_lock();
376 }
377
378 p = pfind_locked(pid);
379 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
380 p = PROC_NULL;
381
382 if (locked == 0) {
383 proc_list_unlock();
384 }
385
386 return(p);
387 }
388
389 proc_t
390 proc_findthread(thread_t thread)
391 {
392 proc_t p = PROC_NULL;
393 struct uthread *uth;
394
395 proc_list_lock();
396 uth = get_bsdthread_info(thread);
397 if (uth && (uth->uu_flag & UT_VFORK))
398 p = uth->uu_proc;
399 else
400 p = (proc_t)(get_bsdthreadtask_info(thread));
401 p = proc_ref_locked(p);
402 proc_list_unlock();
403 return(p);
404 }
405
406 void
407 uthread_reset_proc_refcount(void *uthread) {
408 uthread_t uth;
409
410 uth = (uthread_t) uthread;
411 uth->uu_proc_refcount = 0;
412
413 #if PROC_REF_DEBUG
414 if (proc_ref_tracking_disabled) {
415 return;
416 }
417
418 uth->uu_pindex = 0;
419 #endif
420 }
421
422 #if PROC_REF_DEBUG
423 int
424 uthread_get_proc_refcount(void *uthread) {
425 uthread_t uth;
426
427 if (proc_ref_tracking_disabled) {
428 return 0;
429 }
430
431 uth = (uthread_t) uthread;
432
433 return uth->uu_proc_refcount;
434 }
435 #endif
436
437 static void
438 record_procref(proc_t p __unused, int count) {
439 uthread_t uth;
440
441 uth = current_uthread();
442 uth->uu_proc_refcount += count;
443
444 #if PROC_REF_DEBUG
445 if (proc_ref_tracking_disabled) {
446 return;
447 }
448
449 if (count == 1) {
450 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
451 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
452
453 uth->uu_proc_ps[uth->uu_pindex] = p;
454 uth->uu_pindex++;
455 }
456 }
457 #endif
458 }
459
460 static boolean_t
461 uthread_needs_to_wait_in_proc_refwait(void) {
462 uthread_t uth = current_uthread();
463
464 /*
465 * Allow threads holding no proc refs to wait
466 * in proc_refwait, allowing threads holding
467 * proc refs to wait in proc_refwait causes
468 * deadlocks and makes proc_find non-reentrant.
469 */
470 if (uth->uu_proc_refcount == 0)
471 return TRUE;
472
473 return FALSE;
474 }
475
476 int
477 proc_rele(proc_t p)
478 {
479 proc_list_lock();
480 proc_rele_locked(p);
481 proc_list_unlock();
482
483 return(0);
484 }
485
486 proc_t
487 proc_self(void)
488 {
489 struct proc * p;
490
491 p = current_proc();
492
493 proc_list_lock();
494 if (p != proc_ref_locked(p))
495 p = PROC_NULL;
496 proc_list_unlock();
497 return(p);
498 }
499
500
501 proc_t
502 proc_ref_locked(proc_t p)
503 {
504 proc_t p1 = p;
505 int pid = proc_pid(p);
506
507 retry:
508 /*
509 * if process still in creation or proc got recycled
510 * during msleep then return failure.
511 */
512 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0))
513 return (PROC_NULL);
514
515 /*
516 * Do not return process marked for termination
517 * or proc_refdrain called without ref wait.
518 * Wait for proc_refdrain_with_refwait to complete if
519 * process in refdrain and refwait flag is set, unless
520 * the current thread is holding to a proc_ref
521 * for any proc.
522 */
523 if ((p->p_stat != SZOMB) &&
524 ((p->p_listflag & P_LIST_EXITED) == 0) &&
525 ((p->p_listflag & P_LIST_DEAD) == 0) &&
526 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
527 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
528 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
529 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0) ;
530 /*
531 * the proc might have been recycled since we dropped
532 * the proc list lock, get the proc again.
533 */
534 p = pfind_locked(pid);
535 goto retry;
536 }
537 p->p_refcount++;
538 record_procref(p, 1);
539 }
540 else
541 p1 = PROC_NULL;
542
543 return(p1);
544 }
545
546 void
547 proc_rele_locked(proc_t p)
548 {
549
550 if (p->p_refcount > 0) {
551 p->p_refcount--;
552 record_procref(p, -1);
553 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
554 p->p_listflag &= ~P_LIST_DRAINWAIT;
555 wakeup(&p->p_refcount);
556 }
557 } else
558 panic("proc_rele_locked -ve ref\n");
559
560 }
561
562 proc_t
563 proc_find_zombref(int pid)
564 {
565 proc_t p;
566
567 proc_list_lock();
568
569 again:
570 p = pfind_locked(pid);
571
572 /* should we bail? */
573 if ((p == PROC_NULL) /* not found */
574 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
575 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
576
577 proc_list_unlock();
578 return (PROC_NULL);
579 }
580
581 /* If someone else is controlling the (unreaped) zombie - wait */
582 if ((p->p_listflag & P_LIST_WAITING) != 0) {
583 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
584 goto again;
585 }
586 p->p_listflag |= P_LIST_WAITING;
587
588 proc_list_unlock();
589
590 return(p);
591 }
592
593 void
594 proc_drop_zombref(proc_t p)
595 {
596 proc_list_lock();
597 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
598 p->p_listflag &= ~P_LIST_WAITING;
599 wakeup(&p->p_stat);
600 }
601 proc_list_unlock();
602 }
603
604
605 void
606 proc_refdrain(proc_t p)
607 {
608 proc_refdrain_with_refwait(p, FALSE);
609 }
610
611 proc_t
612 proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
613 {
614 boolean_t initexec = FALSE;
615 proc_list_lock();
616
617 p->p_listflag |= P_LIST_DRAIN;
618 if (get_ref_and_allow_wait) {
619 /*
620 * All the calls to proc_ref_locked will wait
621 * for the flag to get cleared before returning a ref,
622 * unless the current thread is holding to a proc ref
623 * for any proc.
624 */
625 p->p_listflag |= P_LIST_REFWAIT;
626 if (p == initproc) {
627 initexec = TRUE;
628 }
629 }
630
631 /* Do not wait in ref drain for launchd exec */
632 while (p->p_refcount && !initexec) {
633 p->p_listflag |= P_LIST_DRAINWAIT;
634 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
635 }
636
637 p->p_listflag &= ~P_LIST_DRAIN;
638 if (!get_ref_and_allow_wait) {
639 p->p_listflag |= P_LIST_DEAD;
640 } else {
641 /* Return a ref to the caller */
642 p->p_refcount++;
643 record_procref(p, 1);
644 }
645
646 proc_list_unlock();
647
648 if (get_ref_and_allow_wait) {
649 return (p);
650 }
651 return NULL;
652 }
653
654 void
655 proc_refwake(proc_t p)
656 {
657 proc_list_lock();
658 p->p_listflag &= ~P_LIST_REFWAIT;
659 wakeup(&p->p_listflag);
660 proc_list_unlock();
661 }
662
663 proc_t
664 proc_parentholdref(proc_t p)
665 {
666 proc_t parent = PROC_NULL;
667 proc_t pp;
668 int loopcnt = 0;
669
670
671 proc_list_lock();
672 loop:
673 pp = p->p_pptr;
674 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
675 parent = PROC_NULL;
676 goto out;
677 }
678
679 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
680 pp->p_listflag |= P_LIST_CHILDDRWAIT;
681 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
682 loopcnt++;
683 if (loopcnt == 5) {
684 parent = PROC_NULL;
685 goto out;
686 }
687 goto loop;
688 }
689
690 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
691 pp->p_parentref++;
692 parent = pp;
693 goto out;
694 }
695
696 out:
697 proc_list_unlock();
698 return(parent);
699 }
700 int
701 proc_parentdropref(proc_t p, int listlocked)
702 {
703 if (listlocked == 0)
704 proc_list_lock();
705
706 if (p->p_parentref > 0) {
707 p->p_parentref--;
708 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
709 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
710 wakeup(&p->p_parentref);
711 }
712 } else
713 panic("proc_parentdropref -ve ref\n");
714 if (listlocked == 0)
715 proc_list_unlock();
716
717 return(0);
718 }
719
720 void
721 proc_childdrainstart(proc_t p)
722 {
723 #if __PROC_INTERNAL_DEBUG
724 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
725 panic("proc_childdrainstart: childdrain already started\n");
726 #endif
727 p->p_listflag |= P_LIST_CHILDDRSTART;
728 /* wait for all that hold parentrefs to drop */
729 while (p->p_parentref > 0) {
730 p->p_listflag |= P_LIST_PARENTREFWAIT;
731 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
732 }
733 }
734
735
736 void
737 proc_childdrainend(proc_t p)
738 {
739 #if __PROC_INTERNAL_DEBUG
740 if (p->p_childrencnt > 0)
741 panic("exiting: children stil hanging around\n");
742 #endif
743 p->p_listflag |= P_LIST_CHILDDRAINED;
744 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
745 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
746 wakeup(&p->p_childrencnt);
747 }
748 }
749
750 void
751 proc_checkdeadrefs(__unused proc_t p)
752 {
753 #if __PROC_INTERNAL_DEBUG
754 if ((p->p_listflag & P_LIST_INHASH) != 0)
755 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
756 if (p->p_childrencnt != 0)
757 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
758 if (p->p_refcount != 0)
759 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
760 if (p->p_parentref != 0)
761 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
762 #endif
763 }
764
765 int
766 proc_pid(proc_t p)
767 {
768 if (p != NULL)
769 return (p->p_pid);
770 return -1;
771 }
772
773 int
774 proc_ppid(proc_t p)
775 {
776 if (p != NULL)
777 return (p->p_ppid);
778 return -1;
779 }
780
781 int
782 proc_selfpid(void)
783 {
784 return (current_proc()->p_pid);
785 }
786
787 int
788 proc_selfppid(void)
789 {
790 return (current_proc()->p_ppid);
791 }
792
793 int
794 proc_selfcsflags(void)
795 {
796 return (current_proc()->p_csflags);
797 }
798
799 #if CONFIG_DTRACE
800 static proc_t
801 dtrace_current_proc_vforking(void)
802 {
803 thread_t th = current_thread();
804 struct uthread *ut = get_bsdthread_info(th);
805
806 if (ut &&
807 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
808 /*
809 * Handle the narrow window where we're in the vfork syscall,
810 * but we're not quite ready to claim (in particular, to DTrace)
811 * that we're running as the child.
812 */
813 return (get_bsdtask_info(get_threadtask(th)));
814 }
815 return (current_proc());
816 }
817
818 int
819 dtrace_proc_selfpid(void)
820 {
821 return (dtrace_current_proc_vforking()->p_pid);
822 }
823
824 int
825 dtrace_proc_selfppid(void)
826 {
827 return (dtrace_current_proc_vforking()->p_ppid);
828 }
829
830 uid_t
831 dtrace_proc_selfruid(void)
832 {
833 return (dtrace_current_proc_vforking()->p_ruid);
834 }
835 #endif /* CONFIG_DTRACE */
836
837 proc_t
838 proc_parent(proc_t p)
839 {
840 proc_t parent;
841 proc_t pp;
842
843 proc_list_lock();
844 loop:
845 pp = p->p_pptr;
846 parent = proc_ref_locked(pp);
847 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
848 pp->p_listflag |= P_LIST_CHILDLKWAIT;
849 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
850 goto loop;
851 }
852 proc_list_unlock();
853 return(parent);
854 }
855
856 static boolean_t
857 proc_parent_is_currentproc(proc_t p)
858 {
859 boolean_t ret = FALSE;
860
861 proc_list_lock();
862 if (p->p_pptr == current_proc())
863 ret = TRUE;
864
865 proc_list_unlock();
866 return ret;
867 }
868
869 void
870 proc_name(int pid, char * buf, int size)
871 {
872 proc_t p;
873
874 if ((p = proc_find(pid)) != PROC_NULL) {
875 strlcpy(buf, &p->p_comm[0], size);
876 proc_rele(p);
877 }
878 }
879
880 void
881 proc_name_kdp(task_t t, char * buf, int size)
882 {
883 proc_t p = get_bsdtask_info(t);
884 if (p == PROC_NULL)
885 return;
886
887 if ((size_t)size > sizeof(p->p_comm))
888 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
889 else
890 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
891 }
892
893 int
894 proc_threadname_kdp(void * uth, char * buf, size_t size)
895 {
896 if (size < MAXTHREADNAMESIZE) {
897 /* this is really just a protective measure for the future in
898 * case the thread name size in stackshot gets out of sync with
899 * the BSD max thread name size. Note that bsd_getthreadname
900 * doesn't take input buffer size into account. */
901 return -1;
902 }
903
904 if (uth != NULL) {
905 bsd_getthreadname(uth, buf);
906 }
907 return 0;
908 }
909
910 /* note that this function is generally going to be called from stackshot,
911 * and the arguments will be coming from a struct which is declared packed
912 * thus the input arguments will in general be unaligned. We have to handle
913 * that here. */
914 void
915 proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec, uint64_t *abstime)
916 {
917 proc_t pp = (proc_t)p;
918 struct uint64p {
919 uint64_t val;
920 } __attribute__((packed));
921
922 if (pp != PROC_NULL) {
923 if (tv_sec != NULL)
924 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
925 if (tv_usec != NULL)
926 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
927 if (abstime != NULL) {
928 if (pp->p_stats != NULL)
929 *abstime = pp->p_stats->ps_start;
930 else
931 *abstime = 0;
932 }
933 }
934 }
935
936 char *
937 proc_name_address(void *p)
938 {
939 return &((proc_t)p)->p_comm[0];
940 }
941
942 char *
943 proc_best_name(proc_t p)
944 {
945 if (p->p_name[0] != 0)
946 return (&p->p_name[0]);
947 return (&p->p_comm[0]);
948 }
949
950 void
951 proc_selfname(char * buf, int size)
952 {
953 proc_t p;
954
955 if ((p = current_proc())!= (proc_t)0) {
956 strlcpy(buf, &p->p_comm[0], size);
957 }
958 }
959
960 void
961 proc_signal(int pid, int signum)
962 {
963 proc_t p;
964
965 if ((p = proc_find(pid)) != PROC_NULL) {
966 psignal(p, signum);
967 proc_rele(p);
968 }
969 }
970
971 int
972 proc_issignal(int pid, sigset_t mask)
973 {
974 proc_t p;
975 int error=0;
976
977 if ((p = proc_find(pid)) != PROC_NULL) {
978 error = proc_pendingsignals(p, mask);
979 proc_rele(p);
980 }
981
982 return(error);
983 }
984
985 int
986 proc_noremotehang(proc_t p)
987 {
988 int retval = 0;
989
990 if (p)
991 retval = p->p_flag & P_NOREMOTEHANG;
992 return(retval? 1: 0);
993
994 }
995
996 int
997 proc_exiting(proc_t p)
998 {
999 int retval = 0;
1000
1001 if (p)
1002 retval = p->p_lflag & P_LEXIT;
1003 return(retval? 1: 0);
1004 }
1005
1006 int
1007 proc_forcequota(proc_t p)
1008 {
1009 int retval = 0;
1010
1011 if (p)
1012 retval = p->p_flag & P_FORCEQUOTA;
1013 return(retval? 1: 0);
1014
1015 }
1016
1017 int
1018 proc_suser(proc_t p)
1019 {
1020 kauth_cred_t my_cred;
1021 int error;
1022
1023 my_cred = kauth_cred_proc_ref(p);
1024 error = suser(my_cred, &p->p_acflag);
1025 kauth_cred_unref(&my_cred);
1026 return(error);
1027 }
1028
1029 task_t
1030 proc_task(proc_t proc)
1031 {
1032 return (task_t)proc->task;
1033 }
1034
1035 /*
1036 * Obtain the first thread in a process
1037 *
1038 * XXX This is a bad thing to do; it exists predominantly to support the
1039 * XXX use of proc_t's in places that should really be using
1040 * XXX thread_t's instead. This maintains historical behaviour, but really
1041 * XXX needs an audit of the context (proxy vs. not) to clean up.
1042 */
1043 thread_t
1044 proc_thread(proc_t proc)
1045 {
1046 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1047
1048 if (uth != NULL)
1049 return(uth->uu_context.vc_thread);
1050
1051 return(NULL);
1052 }
1053
1054 kauth_cred_t
1055 proc_ucred(proc_t p)
1056 {
1057 return(p->p_ucred);
1058 }
1059
1060 struct uthread *
1061 current_uthread()
1062 {
1063 thread_t th = current_thread();
1064
1065 return((struct uthread *)get_bsdthread_info(th));
1066 }
1067
1068
1069 int
1070 proc_is64bit(proc_t p)
1071 {
1072 return(IS_64BIT_PROCESS(p));
1073 }
1074
1075 int
1076 proc_pidversion(proc_t p)
1077 {
1078 return(p->p_idversion);
1079 }
1080
1081 uint32_t
1082 proc_persona_id(proc_t p)
1083 {
1084 return (uint32_t)persona_id_from_proc(p);
1085 }
1086
1087 uint32_t
1088 proc_getuid(proc_t p)
1089 {
1090 return(p->p_uid);
1091 }
1092
1093 uint32_t
1094 proc_getgid(proc_t p)
1095 {
1096 return(p->p_gid);
1097 }
1098
1099 uint64_t
1100 proc_uniqueid(proc_t p)
1101 {
1102 return(p->p_uniqueid);
1103 }
1104
1105 uint64_t
1106 proc_puniqueid(proc_t p)
1107 {
1108 return(p->p_puniqueid);
1109 }
1110
1111 void
1112 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1113 {
1114 #if CONFIG_COALITIONS
1115 task_coalition_ids(p->task, ids);
1116 #else
1117 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
1118 #endif
1119 return;
1120 }
1121
1122 uint64_t
1123 proc_was_throttled(proc_t p)
1124 {
1125 return (p->was_throttled);
1126 }
1127
1128 uint64_t
1129 proc_did_throttle(proc_t p)
1130 {
1131 return (p->did_throttle);
1132 }
1133
1134 int
1135 proc_getcdhash(proc_t p, unsigned char *cdhash)
1136 {
1137 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1138 }
1139
1140 void
1141 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1142 {
1143 if (size >= sizeof(p->p_uuid)) {
1144 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1145 }
1146 }
1147
1148 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1149 vnode_t
1150 proc_getexecutablevnode(proc_t p)
1151 {
1152 vnode_t tvp = p->p_textvp;
1153
1154 if ( tvp != NULLVP) {
1155 if (vnode_getwithref(tvp) == 0) {
1156 return tvp;
1157 }
1158 }
1159
1160 return NULLVP;
1161 }
1162
1163
1164 void
1165 bsd_set_dependency_capable(task_t task)
1166 {
1167 proc_t p = get_bsdtask_info(task);
1168
1169 if (p) {
1170 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1171 }
1172 }
1173
1174
1175 #ifndef __arm__
1176 int
1177 IS_64BIT_PROCESS(proc_t p)
1178 {
1179 if (p && (p->p_flag & P_LP64))
1180 return(1);
1181 else
1182 return(0);
1183 }
1184 #endif
1185
1186 /*
1187 * Locate a process by number
1188 */
1189 proc_t
1190 pfind_locked(pid_t pid)
1191 {
1192 proc_t p;
1193 #if DEBUG
1194 proc_t q;
1195 #endif
1196
1197 if (!pid)
1198 return (kernproc);
1199
1200 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1201 if (p->p_pid == pid) {
1202 #if DEBUG
1203 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1204 if ((p !=q) && (q->p_pid == pid))
1205 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1206 }
1207 #endif
1208 return (p);
1209 }
1210 }
1211 return (NULL);
1212 }
1213
1214 /*
1215 * Locate a zombie by PID
1216 */
1217 __private_extern__ proc_t
1218 pzfind(pid_t pid)
1219 {
1220 proc_t p;
1221
1222
1223 proc_list_lock();
1224
1225 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1226 if (p->p_pid == pid)
1227 break;
1228
1229 proc_list_unlock();
1230
1231 return (p);
1232 }
1233
1234 /*
1235 * Locate a process group by number
1236 */
1237
1238 struct pgrp *
1239 pgfind(pid_t pgid)
1240 {
1241 struct pgrp * pgrp;
1242
1243 proc_list_lock();
1244 pgrp = pgfind_internal(pgid);
1245 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1246 pgrp = PGRP_NULL;
1247 else
1248 pgrp->pg_refcount++;
1249 proc_list_unlock();
1250 return(pgrp);
1251 }
1252
1253
1254
1255 struct pgrp *
1256 pgfind_internal(pid_t pgid)
1257 {
1258 struct pgrp *pgrp;
1259
1260 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1261 if (pgrp->pg_id == pgid)
1262 return (pgrp);
1263 return (NULL);
1264 }
1265
1266 void
1267 pg_rele(struct pgrp * pgrp)
1268 {
1269 if(pgrp == PGRP_NULL)
1270 return;
1271 pg_rele_dropref(pgrp);
1272 }
1273
1274 void
1275 pg_rele_dropref(struct pgrp * pgrp)
1276 {
1277 proc_list_lock();
1278 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1279 proc_list_unlock();
1280 pgdelete_dropref(pgrp);
1281 return;
1282 }
1283
1284 pgrp->pg_refcount--;
1285 proc_list_unlock();
1286 }
1287
1288 struct session *
1289 session_find_internal(pid_t sessid)
1290 {
1291 struct session *sess;
1292
1293 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1294 if (sess->s_sid == sessid)
1295 return (sess);
1296 return (NULL);
1297 }
1298
1299
1300 /*
1301 * Make a new process ready to become a useful member of society by making it
1302 * visible in all the right places and initialize its own lists to empty.
1303 *
1304 * Parameters: parent The parent of the process to insert
1305 * child The child process to insert
1306 *
1307 * Returns: (void)
1308 *
1309 * Notes: Insert a child process into the parents process group, assign
1310 * the child the parent process pointer and PPID of the parent,
1311 * place it on the parents p_children list as a sibling,
1312 * initialize its own child list, place it in the allproc list,
1313 * insert it in the proper hash bucket, and initialize its
1314 * event list.
1315 */
1316 void
1317 pinsertchild(proc_t parent, proc_t child)
1318 {
1319 struct pgrp * pg;
1320
1321 LIST_INIT(&child->p_children);
1322 TAILQ_INIT(&child->p_evlist);
1323 child->p_pptr = parent;
1324 child->p_ppid = parent->p_pid;
1325 child->p_puniqueid = parent->p_uniqueid;
1326 child->p_xhighbits = 0;
1327
1328 pg = proc_pgrp(parent);
1329 pgrp_add(pg, parent, child);
1330 pg_rele(pg);
1331
1332 proc_list_lock();
1333
1334 #if CONFIG_MEMORYSTATUS
1335 memorystatus_add(child, TRUE);
1336 #endif
1337
1338 parent->p_childrencnt++;
1339 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1340
1341 LIST_INSERT_HEAD(&allproc, child, p_list);
1342 /* mark the completion of proc creation */
1343 child->p_listflag &= ~P_LIST_INCREATE;
1344
1345 proc_list_unlock();
1346 }
1347
1348 /*
1349 * Move p to a new or existing process group (and session)
1350 *
1351 * Returns: 0 Success
1352 * ESRCH No such process
1353 */
1354 int
1355 enterpgrp(proc_t p, pid_t pgid, int mksess)
1356 {
1357 struct pgrp *pgrp;
1358 struct pgrp *mypgrp;
1359 struct session * procsp;
1360
1361 pgrp = pgfind(pgid);
1362 mypgrp = proc_pgrp(p);
1363 procsp = proc_session(p);
1364
1365 #if DIAGNOSTIC
1366 if (pgrp != NULL && mksess) /* firewalls */
1367 panic("enterpgrp: setsid into non-empty pgrp");
1368 if (SESS_LEADER(p, procsp))
1369 panic("enterpgrp: session leader attempted setpgrp");
1370 #endif
1371 if (pgrp == PGRP_NULL) {
1372 pid_t savepid = p->p_pid;
1373 proc_t np = PROC_NULL;
1374 /*
1375 * new process group
1376 */
1377 #if DIAGNOSTIC
1378 if (p->p_pid != pgid)
1379 panic("enterpgrp: new pgrp and pid != pgid");
1380 #endif
1381 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1382 M_WAITOK);
1383 if (pgrp == NULL)
1384 panic("enterpgrp: M_PGRP zone depleted");
1385 if ((np = proc_find(savepid)) == NULL || np != p) {
1386 if (np != PROC_NULL)
1387 proc_rele(np);
1388 if (mypgrp != PGRP_NULL)
1389 pg_rele(mypgrp);
1390 if (procsp != SESSION_NULL)
1391 session_rele(procsp);
1392 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1393 return (ESRCH);
1394 }
1395 proc_rele(np);
1396 if (mksess) {
1397 struct session *sess;
1398
1399 /*
1400 * new session
1401 */
1402 MALLOC_ZONE(sess, struct session *,
1403 sizeof(struct session), M_SESSION, M_WAITOK);
1404 if (sess == NULL)
1405 panic("enterpgrp: M_SESSION zone depleted");
1406 sess->s_leader = p;
1407 sess->s_sid = p->p_pid;
1408 sess->s_count = 1;
1409 sess->s_ttyvp = NULL;
1410 sess->s_ttyp = TTY_NULL;
1411 sess->s_flags = 0;
1412 sess->s_listflags = 0;
1413 sess->s_ttypgrpid = NO_PID;
1414 #if CONFIG_FINE_LOCK_GROUPS
1415 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1416 #else
1417 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1418 #endif
1419 bcopy(procsp->s_login, sess->s_login,
1420 sizeof(sess->s_login));
1421 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1422 proc_list_lock();
1423 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1424 proc_list_unlock();
1425 pgrp->pg_session = sess;
1426 #if DIAGNOSTIC
1427 if (p != current_proc())
1428 panic("enterpgrp: mksession and p != curproc");
1429 #endif
1430 } else {
1431 proc_list_lock();
1432 pgrp->pg_session = procsp;
1433
1434 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1435 panic("enterpgrp: providing ref to terminating session ");
1436 pgrp->pg_session->s_count++;
1437 proc_list_unlock();
1438 }
1439 pgrp->pg_id = pgid;
1440 #if CONFIG_FINE_LOCK_GROUPS
1441 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1442 #else
1443 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1444 #endif
1445 LIST_INIT(&pgrp->pg_members);
1446 pgrp->pg_membercnt = 0;
1447 pgrp->pg_jobc = 0;
1448 proc_list_lock();
1449 pgrp->pg_refcount = 1;
1450 pgrp->pg_listflags = 0;
1451 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1452 proc_list_unlock();
1453 } else if (pgrp == mypgrp) {
1454 pg_rele(pgrp);
1455 if (mypgrp != NULL)
1456 pg_rele(mypgrp);
1457 if (procsp != SESSION_NULL)
1458 session_rele(procsp);
1459 return (0);
1460 }
1461
1462 if (procsp != SESSION_NULL)
1463 session_rele(procsp);
1464 /*
1465 * Adjust eligibility of affected pgrps to participate in job control.
1466 * Increment eligibility counts before decrementing, otherwise we
1467 * could reach 0 spuriously during the first call.
1468 */
1469 fixjobc(p, pgrp, 1);
1470 fixjobc(p, mypgrp, 0);
1471
1472 if(mypgrp != PGRP_NULL)
1473 pg_rele(mypgrp);
1474 pgrp_replace(p, pgrp);
1475 pg_rele(pgrp);
1476
1477 return(0);
1478 }
1479
1480 /*
1481 * remove process from process group
1482 */
1483 int
1484 leavepgrp(proc_t p)
1485 {
1486
1487 pgrp_remove(p);
1488 return (0);
1489 }
1490
1491 /*
1492 * delete a process group
1493 */
1494 static void
1495 pgdelete_dropref(struct pgrp *pgrp)
1496 {
1497 struct tty *ttyp;
1498 int emptypgrp = 1;
1499 struct session *sessp;
1500
1501
1502 pgrp_lock(pgrp);
1503 if (pgrp->pg_membercnt != 0) {
1504 emptypgrp = 0;
1505 }
1506 pgrp_unlock(pgrp);
1507
1508 proc_list_lock();
1509 pgrp->pg_refcount--;
1510 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1511 proc_list_unlock();
1512 return;
1513 }
1514
1515 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1516
1517 if (pgrp->pg_refcount > 0) {
1518 proc_list_unlock();
1519 return;
1520 }
1521
1522 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1523 LIST_REMOVE(pgrp, pg_hash);
1524
1525 proc_list_unlock();
1526
1527 ttyp = SESSION_TP(pgrp->pg_session);
1528 if (ttyp != TTY_NULL) {
1529 if (ttyp->t_pgrp == pgrp) {
1530 tty_lock(ttyp);
1531 /* Re-check after acquiring the lock */
1532 if (ttyp->t_pgrp == pgrp) {
1533 ttyp->t_pgrp = NULL;
1534 pgrp->pg_session->s_ttypgrpid = NO_PID;
1535 }
1536 tty_unlock(ttyp);
1537 }
1538 }
1539
1540 proc_list_lock();
1541
1542 sessp = pgrp->pg_session;
1543 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1544 panic("pg_deleteref: manipulating refs of already terminating session");
1545 if (--sessp->s_count == 0) {
1546 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1547 panic("pg_deleteref: terminating already terminated session");
1548 sessp->s_listflags |= S_LIST_TERM;
1549 ttyp = SESSION_TP(sessp);
1550 LIST_REMOVE(sessp, s_hash);
1551 proc_list_unlock();
1552 if (ttyp != TTY_NULL) {
1553 tty_lock(ttyp);
1554 if (ttyp->t_session == sessp)
1555 ttyp->t_session = NULL;
1556 tty_unlock(ttyp);
1557 }
1558 proc_list_lock();
1559 sessp->s_listflags |= S_LIST_DEAD;
1560 if (sessp->s_count != 0)
1561 panic("pg_deleteref: freeing session in use");
1562 proc_list_unlock();
1563 #if CONFIG_FINE_LOCK_GROUPS
1564 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1565 #else
1566 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1567 #endif
1568 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1569 } else
1570 proc_list_unlock();
1571 #if CONFIG_FINE_LOCK_GROUPS
1572 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1573 #else
1574 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1575 #endif
1576 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1577 }
1578
1579
1580 /*
1581 * Adjust pgrp jobc counters when specified process changes process group.
1582 * We count the number of processes in each process group that "qualify"
1583 * the group for terminal job control (those with a parent in a different
1584 * process group of the same session). If that count reaches zero, the
1585 * process group becomes orphaned. Check both the specified process'
1586 * process group and that of its children.
1587 * entering == 0 => p is leaving specified group.
1588 * entering == 1 => p is entering specified group.
1589 */
1590 int
1591 fixjob_callback(proc_t p, void * arg)
1592 {
1593 struct fixjob_iterargs *fp;
1594 struct pgrp * pg, *hispg;
1595 struct session * mysession, *hissess;
1596 int entering;
1597
1598 fp = (struct fixjob_iterargs *)arg;
1599 pg = fp->pg;
1600 mysession = fp->mysession;
1601 entering = fp->entering;
1602
1603 hispg = proc_pgrp(p);
1604 hissess = proc_session(p);
1605
1606 if ((hispg != pg) &&
1607 (hissess == mysession)) {
1608 pgrp_lock(hispg);
1609 if (entering) {
1610 hispg->pg_jobc++;
1611 pgrp_unlock(hispg);
1612 } else if (--hispg->pg_jobc == 0) {
1613 pgrp_unlock(hispg);
1614 orphanpg(hispg);
1615 } else
1616 pgrp_unlock(hispg);
1617 }
1618 if (hissess != SESSION_NULL)
1619 session_rele(hissess);
1620 if (hispg != PGRP_NULL)
1621 pg_rele(hispg);
1622
1623 return(PROC_RETURNED);
1624 }
1625
1626 void
1627 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1628 {
1629 struct pgrp *hispgrp = PGRP_NULL;
1630 struct session *hissess = SESSION_NULL;
1631 struct session *mysession = pgrp->pg_session;
1632 proc_t parent;
1633 struct fixjob_iterargs fjarg;
1634 boolean_t proc_parent_self;
1635
1636 /*
1637 * Check if p's parent is current proc, if yes then no need to take
1638 * a ref; calling proc_parent with current proc as parent may
1639 * deadlock if current proc is exiting.
1640 */
1641 proc_parent_self = proc_parent_is_currentproc(p);
1642 if (proc_parent_self)
1643 parent = current_proc();
1644 else
1645 parent = proc_parent(p);
1646
1647 if (parent != PROC_NULL) {
1648 hispgrp = proc_pgrp(parent);
1649 hissess = proc_session(parent);
1650 if (!proc_parent_self)
1651 proc_rele(parent);
1652 }
1653
1654
1655 /*
1656 * Check p's parent to see whether p qualifies its own process
1657 * group; if so, adjust count for p's process group.
1658 */
1659 if ((hispgrp != pgrp) &&
1660 (hissess == mysession)) {
1661 pgrp_lock(pgrp);
1662 if (entering) {
1663 pgrp->pg_jobc++;
1664 pgrp_unlock(pgrp);
1665 }else if (--pgrp->pg_jobc == 0) {
1666 pgrp_unlock(pgrp);
1667 orphanpg(pgrp);
1668 } else
1669 pgrp_unlock(pgrp);
1670 }
1671
1672 if (hissess != SESSION_NULL)
1673 session_rele(hissess);
1674 if (hispgrp != PGRP_NULL)
1675 pg_rele(hispgrp);
1676
1677 /*
1678 * Check this process' children to see whether they qualify
1679 * their process groups; if so, adjust counts for children's
1680 * process groups.
1681 */
1682 fjarg.pg = pgrp;
1683 fjarg.mysession = mysession;
1684 fjarg.entering = entering;
1685 proc_childrenwalk(p, fixjob_callback, &fjarg);
1686 }
1687
1688 /*
1689 * A process group has become orphaned; if there are any stopped processes in
1690 * the group, hang-up all process in that group.
1691 */
1692 static void
1693 orphanpg(struct pgrp *pgrp)
1694 {
1695 pid_t *pid_list;
1696 proc_t p;
1697 vm_size_t pid_list_size = 0;
1698 vm_size_t pid_list_size_needed = 0;
1699 int pid_count = 0;
1700 int pid_count_available = 0;
1701
1702 assert(pgrp != NULL);
1703
1704 /* allocate outside of the pgrp_lock */
1705 for (;;) {
1706 pgrp_lock(pgrp);
1707
1708 boolean_t should_iterate = FALSE;
1709 pid_count_available = 0;
1710
1711 PGMEMBERS_FOREACH(pgrp, p) {
1712 pid_count_available++;
1713
1714 if (p->p_stat == SSTOP) {
1715 should_iterate = TRUE;
1716 }
1717 }
1718
1719 if (pid_count_available == 0 || !should_iterate) {
1720 pgrp_unlock(pgrp);
1721 return;
1722 }
1723
1724 pid_list_size_needed = pid_count_available * sizeof(pid_t);
1725 if (pid_list_size >= pid_list_size_needed) {
1726 break;
1727 }
1728 pgrp_unlock(pgrp);
1729
1730 if (pid_list_size != 0) {
1731 kfree(pid_list, pid_list_size);
1732 }
1733 pid_list = kalloc(pid_list_size_needed);
1734 if (!pid_list) {
1735 return;
1736 }
1737 pid_list_size = pid_list_size_needed;
1738 }
1739
1740 /* no orphaned processes */
1741 if (pid_list_size == 0) {
1742 pgrp_unlock(pgrp);
1743 return;
1744 }
1745
1746 PGMEMBERS_FOREACH(pgrp, p) {
1747 pid_list[pid_count++] = proc_pid(p);
1748 if (pid_count >= pid_count_available) {
1749 break;
1750 }
1751 }
1752 pgrp_unlock(pgrp);
1753
1754 if (pid_count == 0) {
1755 goto out;
1756 }
1757
1758 for (int i = 0; i < pid_count; i++) {
1759 /* do not handle kernproc */
1760 if (pid_list[i] == 0) {
1761 continue;
1762 }
1763 p = proc_find(pid_list[i]);
1764 if (!p) {
1765 continue;
1766 }
1767
1768 proc_transwait(p, 0);
1769 pt_setrunnable(p);
1770 psignal(p, SIGHUP);
1771 psignal(p, SIGCONT);
1772 proc_rele(p);
1773 }
1774
1775 out:
1776 kfree(pid_list, pid_list_size);
1777 return;
1778 }
1779
1780 int
1781 proc_is_classic(proc_t p __unused)
1782 {
1783 return (0);
1784 }
1785
1786 /* XXX Why does this function exist? Need to kill it off... */
1787 proc_t
1788 current_proc_EXTERNAL(void)
1789 {
1790 return (current_proc());
1791 }
1792
1793 int
1794 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1795 {
1796 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1797 }
1798
1799 #if CONFIG_COREDUMP
1800 /*
1801 * proc_core_name(name, uid, pid)
1802 * Expand the name described in corefilename, using name, uid, and pid.
1803 * corefilename is a printf-like string, with three format specifiers:
1804 * %N name of process ("name")
1805 * %P process id (pid)
1806 * %U user id (uid)
1807 * For example, "%N.core" is the default; they can be disabled completely
1808 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1809 * This is controlled by the sysctl variable kern.corefile (see above).
1810 */
1811 __private_extern__ int
1812 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1813 size_t cf_name_len)
1814 {
1815 const char *format, *appendstr;
1816 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1817 size_t i, l, n;
1818
1819 if (cf_name == NULL)
1820 goto toolong;
1821
1822 format = corefilename;
1823 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1824 switch (format[i]) {
1825 case '%': /* Format character */
1826 i++;
1827 switch (format[i]) {
1828 case '%':
1829 appendstr = "%";
1830 break;
1831 case 'N': /* process name */
1832 appendstr = name;
1833 break;
1834 case 'P': /* process id */
1835 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1836 appendstr = id_buf;
1837 break;
1838 case 'U': /* user id */
1839 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1840 appendstr = id_buf;
1841 break;
1842 case '\0': /* format string ended in % symbol */
1843 goto endofstring;
1844 default:
1845 appendstr = "";
1846 log(LOG_ERR,
1847 "Unknown format character %c in `%s'\n",
1848 format[i], format);
1849 }
1850 l = strlen(appendstr);
1851 if ((n + l) >= cf_name_len)
1852 goto toolong;
1853 bcopy(appendstr, cf_name + n, l);
1854 n += l;
1855 break;
1856 default:
1857 cf_name[n++] = format[i];
1858 }
1859 }
1860 if (format[i] != '\0')
1861 goto toolong;
1862 return (0);
1863 toolong:
1864 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1865 (long)pid, name, (uint32_t)uid);
1866 return (1);
1867 endofstring:
1868 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
1869 (long)pid, name, (uint32_t)uid);
1870 return (1);
1871 }
1872 #endif /* CONFIG_COREDUMP */
1873
1874 /* Code Signing related routines */
1875
1876 int
1877 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1878 {
1879 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1880 uap->usersize, USER_ADDR_NULL));
1881 }
1882
1883 int
1884 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1885 {
1886 if (uap->uaudittoken == USER_ADDR_NULL)
1887 return(EINVAL);
1888 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1889 uap->usersize, uap->uaudittoken));
1890 }
1891
1892 static int
1893 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1894 {
1895 char fakeheader[8] = { 0 };
1896 int error;
1897
1898 if (usize < sizeof(fakeheader))
1899 return ERANGE;
1900
1901 /* if no blob, fill in zero header */
1902 if (NULL == start) {
1903 start = fakeheader;
1904 length = sizeof(fakeheader);
1905 } else if (usize < length) {
1906 /* ... if input too short, copy out length of entitlement */
1907 uint32_t length32 = htonl((uint32_t)length);
1908 memcpy(&fakeheader[4], &length32, sizeof(length32));
1909
1910 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1911 if (error == 0)
1912 return ERANGE; /* input buffer to short, ERANGE signals that */
1913 return error;
1914 }
1915 return copyout(start, uaddr, length);
1916 }
1917
1918 static int
1919 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1920 {
1921 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1922 proc_t pt;
1923 int forself;
1924 int error;
1925 vnode_t tvp;
1926 off_t toff;
1927 unsigned char cdhash[SHA1_RESULTLEN];
1928 audit_token_t token;
1929 unsigned int upid=0, uidversion = 0;
1930
1931 forself = error = 0;
1932
1933 if (pid == 0)
1934 pid = proc_selfpid();
1935 if (pid == proc_selfpid())
1936 forself = 1;
1937
1938
1939 switch (ops) {
1940 case CS_OPS_STATUS:
1941 case CS_OPS_CDHASH:
1942 case CS_OPS_PIDOFFSET:
1943 case CS_OPS_ENTITLEMENTS_BLOB:
1944 case CS_OPS_IDENTITY:
1945 case CS_OPS_BLOB:
1946 break; /* not restricted to root */
1947 default:
1948 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1949 return(EPERM);
1950 break;
1951 }
1952
1953 pt = proc_find(pid);
1954 if (pt == PROC_NULL)
1955 return(ESRCH);
1956
1957 upid = pt->p_pid;
1958 uidversion = pt->p_idversion;
1959 if (uaudittoken != USER_ADDR_NULL) {
1960
1961 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1962 if (error != 0)
1963 goto out;
1964 /* verify the audit token pid/idversion matches with proc */
1965 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1966 error = ESRCH;
1967 goto out;
1968 }
1969 }
1970
1971 #if CONFIG_MACF
1972 switch (ops) {
1973 case CS_OPS_MARKINVALID:
1974 case CS_OPS_MARKHARD:
1975 case CS_OPS_MARKKILL:
1976 case CS_OPS_MARKRESTRICT:
1977 case CS_OPS_SET_STATUS:
1978 case CS_OPS_CLEARINSTALLER:
1979 case CS_OPS_CLEARPLATFORM:
1980 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
1981 goto out;
1982 break;
1983 default:
1984 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
1985 goto out;
1986 }
1987 #endif
1988
1989 switch (ops) {
1990
1991 case CS_OPS_STATUS: {
1992 uint32_t retflags;
1993
1994 proc_lock(pt);
1995 retflags = pt->p_csflags;
1996 if (cs_enforcement(pt))
1997 retflags |= CS_ENFORCEMENT;
1998 if (csproc_get_platform_binary(pt))
1999 retflags |= CS_PLATFORM_BINARY;
2000 if (csproc_get_platform_path(pt))
2001 retflags |= CS_PLATFORM_PATH;
2002 proc_unlock(pt);
2003
2004 if (uaddr != USER_ADDR_NULL)
2005 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2006 break;
2007 }
2008 case CS_OPS_MARKINVALID:
2009 proc_lock(pt);
2010 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2011 pt->p_csflags &= ~CS_VALID; /* set invalid */
2012 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2013 pt->p_csflags |= CS_KILLED;
2014 proc_unlock(pt);
2015 if (cs_debug) {
2016 printf("CODE SIGNING: marked invalid by pid %d: "
2017 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2018 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2019 }
2020 psignal(pt, SIGKILL);
2021 } else
2022 proc_unlock(pt);
2023 } else
2024 proc_unlock(pt);
2025
2026 break;
2027
2028 case CS_OPS_MARKHARD:
2029 proc_lock(pt);
2030 pt->p_csflags |= CS_HARD;
2031 if ((pt->p_csflags & CS_VALID) == 0) {
2032 /* @@@ allow? reject? kill? @@@ */
2033 proc_unlock(pt);
2034 error = EINVAL;
2035 goto out;
2036 } else
2037 proc_unlock(pt);
2038 break;
2039
2040 case CS_OPS_MARKKILL:
2041 proc_lock(pt);
2042 pt->p_csflags |= CS_KILL;
2043 if ((pt->p_csflags & CS_VALID) == 0) {
2044 proc_unlock(pt);
2045 psignal(pt, SIGKILL);
2046 } else
2047 proc_unlock(pt);
2048 break;
2049
2050 case CS_OPS_PIDOFFSET:
2051 toff = pt->p_textoff;
2052 proc_rele(pt);
2053 error = copyout(&toff, uaddr, sizeof(toff));
2054 return(error);
2055
2056 case CS_OPS_CDHASH:
2057
2058 /* pt already holds a reference on its p_textvp */
2059 tvp = pt->p_textvp;
2060 toff = pt->p_textoff;
2061
2062 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2063 proc_rele(pt);
2064 return EINVAL;
2065 }
2066
2067 error = vn_getcdhash(tvp, toff, cdhash);
2068 proc_rele(pt);
2069
2070 if (error == 0) {
2071 error = copyout(cdhash, uaddr, sizeof (cdhash));
2072 }
2073
2074 return error;
2075
2076 case CS_OPS_ENTITLEMENTS_BLOB: {
2077 void *start;
2078 size_t length;
2079
2080 proc_lock(pt);
2081
2082 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2083 proc_unlock(pt);
2084 error = EINVAL;
2085 break;
2086 }
2087
2088 error = cs_entitlements_blob_get(pt, &start, &length);
2089 proc_unlock(pt);
2090 if (error)
2091 break;
2092
2093 error = csops_copy_token(start, length, usize, uaddr);
2094 break;
2095 }
2096 case CS_OPS_MARKRESTRICT:
2097 proc_lock(pt);
2098 pt->p_csflags |= CS_RESTRICT;
2099 proc_unlock(pt);
2100 break;
2101
2102 case CS_OPS_SET_STATUS: {
2103 uint32_t flags;
2104
2105 if (usize < sizeof(flags)) {
2106 error = ERANGE;
2107 break;
2108 }
2109
2110 error = copyin(uaddr, &flags, sizeof(flags));
2111 if (error)
2112 break;
2113
2114 /* only allow setting a subset of all code sign flags */
2115 flags &=
2116 CS_HARD | CS_EXEC_SET_HARD |
2117 CS_KILL | CS_EXEC_SET_KILL |
2118 CS_RESTRICT |
2119 CS_REQUIRE_LV |
2120 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2121
2122 proc_lock(pt);
2123 if (pt->p_csflags & CS_VALID)
2124 pt->p_csflags |= flags;
2125 else
2126 error = EINVAL;
2127 proc_unlock(pt);
2128
2129 break;
2130 }
2131 case CS_OPS_BLOB: {
2132 void *start;
2133 size_t length;
2134
2135 proc_lock(pt);
2136 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2137 proc_unlock(pt);
2138 error = EINVAL;
2139 break;
2140 }
2141
2142 error = cs_blob_get(pt, &start, &length);
2143 proc_unlock(pt);
2144 if (error)
2145 break;
2146
2147 error = csops_copy_token(start, length, usize, uaddr);
2148 break;
2149 }
2150 case CS_OPS_IDENTITY: {
2151 const char *identity;
2152 uint8_t fakeheader[8];
2153 uint32_t idlen;
2154 size_t length;
2155
2156 /*
2157 * Make identity have a blob header to make it
2158 * easier on userland to guess the identity
2159 * length.
2160 */
2161 if (usize < sizeof(fakeheader)) {
2162 error = ERANGE;
2163 break;
2164 }
2165 memset(fakeheader, 0, sizeof(fakeheader));
2166
2167 proc_lock(pt);
2168 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2169 proc_unlock(pt);
2170 error = EINVAL;
2171 break;
2172 }
2173
2174 identity = cs_identity_get(pt);
2175 proc_unlock(pt);
2176 if (identity == NULL) {
2177 error = ENOENT;
2178 break;
2179 }
2180
2181 length = strlen(identity) + 1; /* include NUL */
2182 idlen = htonl(length + sizeof(fakeheader));
2183 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2184
2185 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2186 if (error)
2187 break;
2188
2189 if (usize < sizeof(fakeheader) + length)
2190 error = ERANGE;
2191 else if (usize > sizeof(fakeheader))
2192 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2193
2194 break;
2195 }
2196
2197 case CS_OPS_CLEARINSTALLER:
2198 proc_lock(pt);
2199 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2200 proc_unlock(pt);
2201 break;
2202
2203 case CS_OPS_CLEARPLATFORM:
2204 #if DEVELOPMENT || DEBUG
2205 if (cs_enforcement_enable) {
2206 error = ENOTSUP;
2207 break;
2208 }
2209
2210 #if CONFIG_CSR
2211 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2212 error = ENOTSUP;
2213 break;
2214 }
2215 #endif
2216
2217 proc_lock(pt);
2218 pt->p_csflags &= ~(CS_PLATFORM_BINARY|CS_PLATFORM_PATH);
2219 csproc_clear_platform_binary(pt);
2220 proc_unlock(pt);
2221 break;
2222 #else
2223 error = ENOTSUP;
2224 break;
2225 #endif /* !DEVELOPMENT || DEBUG */
2226
2227 default:
2228 error = EINVAL;
2229 break;
2230 }
2231 out:
2232 proc_rele(pt);
2233 return(error);
2234 }
2235
2236 int
2237 proc_iterate(
2238 unsigned int flags,
2239 proc_iterate_fn_t callout,
2240 void *arg,
2241 proc_iterate_fn_t filterfn,
2242 void *filterarg)
2243 {
2244 pid_t *pid_list;
2245 vm_size_t pid_list_size = 0;
2246 vm_size_t pid_list_size_needed = 0;
2247 int pid_count = 0;
2248 int pid_count_available = 0;
2249
2250 assert(callout != NULL);
2251
2252 /* allocate outside of the proc_list_lock */
2253 for (;;) {
2254 proc_list_lock();
2255
2256 pid_count_available = nprocs + 1; //kernel_task is not counted in nprocs
2257 assert(pid_count_available > 0);
2258
2259 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2260 if (pid_list_size >= pid_list_size_needed) {
2261 break;
2262 }
2263 proc_list_unlock();
2264
2265 if (pid_list_size != 0) {
2266 kfree(pid_list, pid_list_size);
2267 }
2268 pid_list = kalloc(pid_list_size_needed);
2269 if (!pid_list) {
2270 return 1;
2271 }
2272 pid_list_size = pid_list_size_needed;
2273 }
2274
2275 /* filter pids into pid_list */
2276
2277 if (flags & PROC_ALLPROCLIST) {
2278 proc_t p;
2279 ALLPROC_FOREACH(p) {
2280 /* ignore processes that are being forked */
2281 if (p->p_stat == SIDL) {
2282 continue;
2283 }
2284 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2285 continue;
2286 }
2287
2288 pid_list[pid_count++] = proc_pid(p);
2289 if (pid_count >= pid_count_available) {
2290 break;
2291 }
2292 }
2293 }
2294
2295 if ((pid_count < pid_count_available) &&
2296 (flags & PROC_ZOMBPROCLIST))
2297 {
2298 proc_t p;
2299 ZOMBPROC_FOREACH(p) {
2300 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2301 continue;
2302 }
2303
2304 pid_list[pid_count++] = proc_pid(p);
2305 if (pid_count >= pid_count_available) {
2306 break;
2307 }
2308 }
2309 }
2310
2311 proc_list_unlock();
2312
2313 /* call callout on processes in the pid_list */
2314
2315 for (int i = 0; i < pid_count; i++) {
2316 proc_t p = proc_find(pid_list[i]);
2317 if (p) {
2318 if ((flags & PROC_NOWAITTRANS) == 0) {
2319 proc_transwait(p, 0);
2320 }
2321 int callout_ret = callout(p, arg);
2322
2323 switch (callout_ret) {
2324 case PROC_RETURNED_DONE:
2325 proc_rele(p);
2326 /* FALLTHROUGH */
2327 case PROC_CLAIMED_DONE:
2328 goto out;
2329
2330 case PROC_RETURNED:
2331 proc_rele(p);
2332 /* FALLTHROUGH */
2333 case PROC_CLAIMED:
2334 break;
2335
2336 default:
2337 panic("proc_iterate: callout returned %d for pid %d",
2338 callout_ret, pid_list[i]);
2339 break;
2340 }
2341 } else if (flags & PROC_ZOMBPROCLIST) {
2342 p = proc_find_zombref(pid_list[i]);
2343 if (!p) {
2344 continue;
2345 }
2346 int callout_ret = callout(p, arg);
2347
2348 switch (callout_ret) {
2349 case PROC_RETURNED_DONE:
2350 proc_drop_zombref(p);
2351 /* FALLTHROUGH */
2352 case PROC_CLAIMED_DONE:
2353 goto out;
2354
2355 case PROC_RETURNED:
2356 proc_drop_zombref(p);
2357 /* FALLTHROUGH */
2358 case PROC_CLAIMED:
2359 break;
2360
2361 default:
2362 panic("proc_iterate: callout returned %d for zombie pid %d",
2363 callout_ret, pid_list[i]);
2364 break;
2365 }
2366 }
2367 }
2368
2369 out:
2370 kfree(pid_list, pid_list_size);
2371 return 0;
2372
2373 }
2374
2375 void
2376 proc_rebootscan(
2377 proc_iterate_fn_t callout,
2378 void *arg,
2379 proc_iterate_fn_t filterfn,
2380 void *filterarg)
2381 {
2382 proc_t p;
2383
2384 assert(callout != NULL);
2385
2386 proc_shutdown_exitcount = 0;
2387
2388 restart_foreach:
2389
2390 proc_list_lock();
2391
2392 ALLPROC_FOREACH(p) {
2393 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2394 continue;
2395 }
2396 p = proc_ref_locked(p);
2397 if (!p) {
2398 continue;
2399 }
2400
2401 proc_list_unlock();
2402
2403 proc_transwait(p, 0);
2404 (void)callout(p, arg);
2405 proc_rele(p);
2406
2407 goto restart_foreach;
2408 }
2409
2410 proc_list_unlock();
2411 }
2412
2413 int
2414 proc_childrenwalk(
2415 proc_t parent,
2416 proc_iterate_fn_t callout,
2417 void *arg)
2418 {
2419 pid_t *pid_list;
2420 vm_size_t pid_list_size = 0;
2421 vm_size_t pid_list_size_needed = 0;
2422 int pid_count = 0;
2423 int pid_count_available = 0;
2424
2425 assert(parent != NULL);
2426 assert(callout != NULL);
2427
2428 for (;;) {
2429 proc_list_lock();
2430
2431 pid_count_available = parent->p_childrencnt;
2432 if (pid_count_available == 0) {
2433 proc_list_unlock();
2434 return 0;
2435 }
2436
2437 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2438 if (pid_list_size >= pid_list_size_needed) {
2439 break;
2440 }
2441 proc_list_unlock();
2442
2443 if (pid_list_size != 0) {
2444 kfree(pid_list, pid_list_size);
2445 }
2446 pid_list = kalloc(pid_list_size_needed);
2447 if (!pid_list) {
2448 return 1;
2449 }
2450 pid_list_size = pid_list_size_needed;
2451 }
2452
2453 proc_t p;
2454 PCHILDREN_FOREACH(parent, p) {
2455 if (p->p_stat == SIDL) {
2456 continue;
2457 }
2458
2459 pid_list[pid_count++] = proc_pid(p);
2460 if (pid_count >= pid_count_available) {
2461 break;
2462 }
2463 }
2464
2465 proc_list_unlock();
2466
2467 for (int i = 0; i < pid_count; i++) {
2468 p = proc_find(pid_list[i]);
2469 if (!p) {
2470 continue;
2471 }
2472
2473 int callout_ret = callout(p, arg);
2474
2475 switch (callout_ret) {
2476 case PROC_RETURNED_DONE:
2477 proc_rele(p);
2478 /* FALLTHROUGH */
2479 case PROC_CLAIMED_DONE:
2480 goto out;
2481
2482 case PROC_RETURNED:
2483 proc_rele(p);
2484 /* FALLTHROUGH */
2485 case PROC_CLAIMED:
2486 break;
2487 default:
2488 panic("proc_childrenwalk: callout returned %d for pid %d",
2489 callout_ret, pid_list[i]);
2490 break;
2491 }
2492 }
2493
2494 out:
2495 kfree(pid_list, pid_list_size);
2496 return 0;
2497 }
2498
2499 int
2500 pgrp_iterate(
2501 struct pgrp *pgrp,
2502 unsigned int flags,
2503 proc_iterate_fn_t callout,
2504 void * arg,
2505 proc_iterate_fn_t filterfn,
2506 void * filterarg)
2507 {
2508 pid_t *pid_list;
2509 proc_t p;
2510 vm_size_t pid_list_size = 0;
2511 vm_size_t pid_list_size_needed = 0;
2512 int pid_count = 0;
2513 int pid_count_available = 0;
2514
2515 pid_t pgid;
2516
2517 assert(pgrp != NULL);
2518 assert(callout != NULL);
2519
2520 for (;;) {
2521 pgrp_lock(pgrp);
2522
2523 pid_count_available = pgrp->pg_membercnt;
2524 if (pid_count_available == 0) {
2525 pgrp_unlock(pgrp);
2526 return 0;
2527 }
2528
2529 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2530 if (pid_list_size >= pid_list_size_needed) {
2531 break;
2532 }
2533 pgrp_unlock(pgrp);
2534
2535 if (pid_list_size != 0) {
2536 kfree(pid_list, pid_list_size);
2537 }
2538 pid_list = kalloc(pid_list_size_needed);
2539 if (!pid_list) {
2540 return 1;
2541 }
2542 pid_list_size = pid_list_size_needed;
2543 }
2544
2545 pgid = pgrp->pg_id;
2546
2547 PGMEMBERS_FOREACH(pgrp, p) {
2548 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2549 continue;;
2550 }
2551 pid_list[pid_count++] = proc_pid(p);
2552 if (pid_count >= pid_count_available) {
2553 break;
2554 }
2555 }
2556
2557 pgrp_unlock(pgrp);
2558
2559 if (flags & PGRP_DROPREF) {
2560 pg_rele(pgrp);
2561 }
2562
2563 for (int i = 0; i< pid_count; i++) {
2564 /* do not handle kernproc */
2565 if (pid_list[i] == 0) {
2566 continue;
2567 }
2568 p = proc_find(pid_list[i]);
2569 if (!p) {
2570 continue;
2571 }
2572 if (p->p_pgrpid != pgid) {
2573 proc_rele(p);
2574 continue;
2575 }
2576
2577 int callout_ret = callout(p, arg);
2578
2579 switch (callout_ret) {
2580 case PROC_RETURNED:
2581 proc_rele(p);
2582 /* FALLTHROUGH */
2583 case PROC_CLAIMED:
2584 break;
2585
2586 case PROC_RETURNED_DONE:
2587 proc_rele(p);
2588 /* FALLTHROUGH */
2589 case PROC_CLAIMED_DONE:
2590 goto out;
2591
2592 default:
2593 panic("pgrp_iterate: callout returned %d for pid %d",
2594 callout_ret, pid_list[i]);
2595 }
2596 }
2597
2598 out:
2599 kfree(pid_list, pid_list_size);
2600 return 0;
2601 }
2602
2603 static void
2604 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2605 {
2606 proc_list_lock();
2607 child->p_pgrp = pgrp;
2608 child->p_pgrpid = pgrp->pg_id;
2609 child->p_listflag |= P_LIST_INPGRP;
2610 /*
2611 * When pgrp is being freed , a process can still
2612 * request addition using setpgid from bash when
2613 * login is terminated (login cycler) return ESRCH
2614 * Safe to hold lock due to refcount on pgrp
2615 */
2616 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2617 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2618 }
2619
2620 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2621 panic("pgrp_add : pgrp is dead adding process");
2622 proc_list_unlock();
2623
2624 pgrp_lock(pgrp);
2625 pgrp->pg_membercnt++;
2626 if ( parent != PROC_NULL) {
2627 LIST_INSERT_AFTER(parent, child, p_pglist);
2628 }else {
2629 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2630 }
2631 pgrp_unlock(pgrp);
2632
2633 proc_list_lock();
2634 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2635 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2636 }
2637 proc_list_unlock();
2638 }
2639
2640 static void
2641 pgrp_remove(struct proc * p)
2642 {
2643 struct pgrp * pg;
2644
2645 pg = proc_pgrp(p);
2646
2647 proc_list_lock();
2648 #if __PROC_INTERNAL_DEBUG
2649 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2650 panic("removing from pglist but no named ref\n");
2651 #endif
2652 p->p_pgrpid = PGRPID_DEAD;
2653 p->p_listflag &= ~P_LIST_INPGRP;
2654 p->p_pgrp = NULL;
2655 proc_list_unlock();
2656
2657 if (pg == PGRP_NULL)
2658 panic("pgrp_remove: pg is NULL");
2659 pgrp_lock(pg);
2660 pg->pg_membercnt--;
2661
2662 if (pg->pg_membercnt < 0)
2663 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2664
2665 LIST_REMOVE(p, p_pglist);
2666 if (pg->pg_members.lh_first == 0) {
2667 pgrp_unlock(pg);
2668 pgdelete_dropref(pg);
2669 } else {
2670 pgrp_unlock(pg);
2671 pg_rele(pg);
2672 }
2673 }
2674
2675
2676 /* cannot use proc_pgrp as it maybe stalled */
2677 static void
2678 pgrp_replace(struct proc * p, struct pgrp * newpg)
2679 {
2680 struct pgrp * oldpg;
2681
2682
2683
2684 proc_list_lock();
2685
2686 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2687 p->p_listflag |= P_LIST_PGRPTRWAIT;
2688 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2689 }
2690
2691 p->p_listflag |= P_LIST_PGRPTRANS;
2692
2693 oldpg = p->p_pgrp;
2694 if (oldpg == PGRP_NULL)
2695 panic("pgrp_replace: oldpg NULL");
2696 oldpg->pg_refcount++;
2697 #if __PROC_INTERNAL_DEBUG
2698 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2699 panic("removing from pglist but no named ref\n");
2700 #endif
2701 p->p_pgrpid = PGRPID_DEAD;
2702 p->p_listflag &= ~P_LIST_INPGRP;
2703 p->p_pgrp = NULL;
2704
2705 proc_list_unlock();
2706
2707 pgrp_lock(oldpg);
2708 oldpg->pg_membercnt--;
2709 if (oldpg->pg_membercnt < 0)
2710 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2711 LIST_REMOVE(p, p_pglist);
2712 if (oldpg->pg_members.lh_first == 0) {
2713 pgrp_unlock(oldpg);
2714 pgdelete_dropref(oldpg);
2715 } else {
2716 pgrp_unlock(oldpg);
2717 pg_rele(oldpg);
2718 }
2719
2720 proc_list_lock();
2721 p->p_pgrp = newpg;
2722 p->p_pgrpid = newpg->pg_id;
2723 p->p_listflag |= P_LIST_INPGRP;
2724 /*
2725 * When pgrp is being freed , a process can still
2726 * request addition using setpgid from bash when
2727 * login is terminated (login cycler) return ESRCH
2728 * Safe to hold lock due to refcount on pgrp
2729 */
2730 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2731 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2732 }
2733
2734 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2735 panic("pgrp_add : pgrp is dead adding process");
2736 proc_list_unlock();
2737
2738 pgrp_lock(newpg);
2739 newpg->pg_membercnt++;
2740 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2741 pgrp_unlock(newpg);
2742
2743 proc_list_lock();
2744 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2745 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2746 }
2747
2748 p->p_listflag &= ~P_LIST_PGRPTRANS;
2749 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2750 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2751 wakeup(&p->p_pgrpid);
2752
2753 }
2754 proc_list_unlock();
2755 }
2756
2757 void
2758 pgrp_lock(struct pgrp * pgrp)
2759 {
2760 lck_mtx_lock(&pgrp->pg_mlock);
2761 }
2762
2763 void
2764 pgrp_unlock(struct pgrp * pgrp)
2765 {
2766 lck_mtx_unlock(&pgrp->pg_mlock);
2767 }
2768
2769 void
2770 session_lock(struct session * sess)
2771 {
2772 lck_mtx_lock(&sess->s_mlock);
2773 }
2774
2775
2776 void
2777 session_unlock(struct session * sess)
2778 {
2779 lck_mtx_unlock(&sess->s_mlock);
2780 }
2781
2782 struct pgrp *
2783 proc_pgrp(proc_t p)
2784 {
2785 struct pgrp * pgrp;
2786
2787 if (p == PROC_NULL)
2788 return(PGRP_NULL);
2789 proc_list_lock();
2790
2791 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2792 p->p_listflag |= P_LIST_PGRPTRWAIT;
2793 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2794 }
2795
2796 pgrp = p->p_pgrp;
2797
2798 assert(pgrp != NULL);
2799
2800 if (pgrp != PGRP_NULL) {
2801 pgrp->pg_refcount++;
2802 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2803 panic("proc_pgrp: ref being povided for dead pgrp");
2804 }
2805
2806 proc_list_unlock();
2807
2808 return(pgrp);
2809 }
2810
2811 struct pgrp *
2812 tty_pgrp(struct tty * tp)
2813 {
2814 struct pgrp * pg = PGRP_NULL;
2815
2816 proc_list_lock();
2817 pg = tp->t_pgrp;
2818
2819 if (pg != PGRP_NULL) {
2820 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2821 panic("tty_pgrp: ref being povided for dead pgrp");
2822 pg->pg_refcount++;
2823 }
2824 proc_list_unlock();
2825
2826 return(pg);
2827 }
2828
2829 struct session *
2830 proc_session(proc_t p)
2831 {
2832 struct session * sess = SESSION_NULL;
2833
2834 if (p == PROC_NULL)
2835 return(SESSION_NULL);
2836
2837 proc_list_lock();
2838
2839 /* wait during transitions */
2840 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2841 p->p_listflag |= P_LIST_PGRPTRWAIT;
2842 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2843 }
2844
2845 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2846 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2847 panic("proc_session:returning sesssion ref on terminating session");
2848 sess->s_count++;
2849 }
2850 proc_list_unlock();
2851 return(sess);
2852 }
2853
2854 void
2855 session_rele(struct session *sess)
2856 {
2857 proc_list_lock();
2858 if (--sess->s_count == 0) {
2859 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2860 panic("session_rele: terminating already terminated session");
2861 sess->s_listflags |= S_LIST_TERM;
2862 LIST_REMOVE(sess, s_hash);
2863 sess->s_listflags |= S_LIST_DEAD;
2864 if (sess->s_count != 0)
2865 panic("session_rele: freeing session in use");
2866 proc_list_unlock();
2867 #if CONFIG_FINE_LOCK_GROUPS
2868 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2869 #else
2870 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2871 #endif
2872 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2873 } else
2874 proc_list_unlock();
2875 }
2876
2877 int
2878 proc_transstart(proc_t p, int locked, int non_blocking)
2879 {
2880 if (locked == 0)
2881 proc_lock(p);
2882 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2883 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2884 if (locked == 0)
2885 proc_unlock(p);
2886 return EDEADLK;
2887 }
2888 p->p_lflag |= P_LTRANSWAIT;
2889 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2890 }
2891 p->p_lflag |= P_LINTRANSIT;
2892 p->p_transholder = current_thread();
2893 if (locked == 0)
2894 proc_unlock(p);
2895 return 0;
2896 }
2897
2898 void
2899 proc_transcommit(proc_t p, int locked)
2900 {
2901 if (locked == 0)
2902 proc_lock(p);
2903
2904 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2905 assert (p->p_transholder == current_thread());
2906 p->p_lflag |= P_LTRANSCOMMIT;
2907
2908 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2909 p->p_lflag &= ~P_LTRANSWAIT;
2910 wakeup(&p->p_lflag);
2911 }
2912 if (locked == 0)
2913 proc_unlock(p);
2914 }
2915
2916 void
2917 proc_transend(proc_t p, int locked)
2918 {
2919 if (locked == 0)
2920 proc_lock(p);
2921
2922 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2923 p->p_transholder = NULL;
2924
2925 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2926 p->p_lflag &= ~P_LTRANSWAIT;
2927 wakeup(&p->p_lflag);
2928 }
2929 if (locked == 0)
2930 proc_unlock(p);
2931 }
2932
2933 int
2934 proc_transwait(proc_t p, int locked)
2935 {
2936 if (locked == 0)
2937 proc_lock(p);
2938 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2939 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2940 if (locked == 0)
2941 proc_unlock(p);
2942 return EDEADLK;
2943 }
2944 p->p_lflag |= P_LTRANSWAIT;
2945 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2946 }
2947 if (locked == 0)
2948 proc_unlock(p);
2949 return 0;
2950 }
2951
2952 void
2953 proc_klist_lock(void)
2954 {
2955 lck_mtx_lock(proc_klist_mlock);
2956 }
2957
2958 void
2959 proc_klist_unlock(void)
2960 {
2961 lck_mtx_unlock(proc_klist_mlock);
2962 }
2963
2964 void
2965 proc_knote(struct proc * p, long hint)
2966 {
2967 proc_klist_lock();
2968 KNOTE(&p->p_klist, hint);
2969 proc_klist_unlock();
2970 }
2971
2972 void
2973 proc_knote_drain(struct proc *p)
2974 {
2975 struct knote *kn = NULL;
2976
2977 /*
2978 * Clear the proc's klist to avoid references after the proc is reaped.
2979 */
2980 proc_klist_lock();
2981 while ((kn = SLIST_FIRST(&p->p_klist))) {
2982 kn->kn_ptr.p_proc = PROC_NULL;
2983 KNOTE_DETACH(&p->p_klist, kn);
2984 }
2985 proc_klist_unlock();
2986 }
2987
2988 void
2989 proc_setregister(proc_t p)
2990 {
2991 proc_lock(p);
2992 p->p_lflag |= P_LREGISTER;
2993 proc_unlock(p);
2994 }
2995
2996 void
2997 proc_resetregister(proc_t p)
2998 {
2999 proc_lock(p);
3000 p->p_lflag &= ~P_LREGISTER;
3001 proc_unlock(p);
3002 }
3003
3004 pid_t
3005 proc_pgrpid(proc_t p)
3006 {
3007 return p->p_pgrpid;
3008 }
3009
3010 pid_t
3011 proc_selfpgrpid()
3012 {
3013 return current_proc()->p_pgrpid;
3014 }
3015
3016
3017 /* return control and action states */
3018 int
3019 proc_getpcontrol(int pid, int * pcontrolp)
3020 {
3021 proc_t p;
3022
3023 p = proc_find(pid);
3024 if (p == PROC_NULL)
3025 return(ESRCH);
3026 if (pcontrolp != NULL)
3027 *pcontrolp = p->p_pcaction;
3028
3029 proc_rele(p);
3030 return(0);
3031 }
3032
3033 int
3034 proc_dopcontrol(proc_t p)
3035 {
3036 int pcontrol;
3037
3038 proc_lock(p);
3039
3040 pcontrol = PROC_CONTROL_STATE(p);
3041
3042 if (PROC_ACTION_STATE(p) == 0) {
3043 switch(pcontrol) {
3044 case P_PCTHROTTLE:
3045 PROC_SETACTION_STATE(p);
3046 proc_unlock(p);
3047 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3048 break;
3049
3050 case P_PCSUSP:
3051 PROC_SETACTION_STATE(p);
3052 proc_unlock(p);
3053 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3054 task_suspend(p->task);
3055 break;
3056
3057 case P_PCKILL:
3058 PROC_SETACTION_STATE(p);
3059 proc_unlock(p);
3060 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3061 psignal(p, SIGKILL);
3062 break;
3063
3064 default:
3065 proc_unlock(p);
3066 }
3067
3068 } else
3069 proc_unlock(p);
3070
3071 return(PROC_RETURNED);
3072 }
3073
3074
3075 /*
3076 * Resume a throttled or suspended process. This is an internal interface that's only
3077 * used by the user level code that presents the GUI when we run out of swap space and
3078 * hence is restricted to processes with superuser privileges.
3079 */
3080
3081 int
3082 proc_resetpcontrol(int pid)
3083 {
3084 proc_t p;
3085 int pcontrol;
3086 int error;
3087 proc_t self = current_proc();
3088
3089 /* if the process has been validated to handle resource control or root is valid one */
3090 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
3091 return error;
3092
3093 p = proc_find(pid);
3094 if (p == PROC_NULL)
3095 return(ESRCH);
3096
3097 proc_lock(p);
3098
3099 pcontrol = PROC_CONTROL_STATE(p);
3100
3101 if(PROC_ACTION_STATE(p) !=0) {
3102 switch(pcontrol) {
3103 case P_PCTHROTTLE:
3104 PROC_RESETACTION_STATE(p);
3105 proc_unlock(p);
3106 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3107 break;
3108
3109 case P_PCSUSP:
3110 PROC_RESETACTION_STATE(p);
3111 proc_unlock(p);
3112 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3113 task_resume(p->task);
3114 break;
3115
3116 case P_PCKILL:
3117 /* Huh? */
3118 PROC_SETACTION_STATE(p);
3119 proc_unlock(p);
3120 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3121 break;
3122
3123 default:
3124 proc_unlock(p);
3125 }
3126
3127 } else
3128 proc_unlock(p);
3129
3130 proc_rele(p);
3131 return(0);
3132 }
3133
3134
3135
3136 struct no_paging_space
3137 {
3138 uint64_t pcs_max_size;
3139 uint64_t pcs_uniqueid;
3140 int pcs_pid;
3141 int pcs_proc_count;
3142 uint64_t pcs_total_size;
3143
3144 uint64_t npcs_max_size;
3145 uint64_t npcs_uniqueid;
3146 int npcs_pid;
3147 int npcs_proc_count;
3148 uint64_t npcs_total_size;
3149
3150 int apcs_proc_count;
3151 uint64_t apcs_total_size;
3152 };
3153
3154
3155 static int
3156 proc_pcontrol_filter(proc_t p, void *arg)
3157 {
3158 struct no_paging_space *nps;
3159 uint64_t compressed;
3160
3161 nps = (struct no_paging_space *)arg;
3162
3163 compressed = get_task_compressed(p->task);
3164
3165 if (PROC_CONTROL_STATE(p)) {
3166 if (PROC_ACTION_STATE(p) == 0) {
3167 if (compressed > nps->pcs_max_size) {
3168 nps->pcs_pid = p->p_pid;
3169 nps->pcs_uniqueid = p->p_uniqueid;
3170 nps->pcs_max_size = compressed;
3171 }
3172 nps->pcs_total_size += compressed;
3173 nps->pcs_proc_count++;
3174 } else {
3175 nps->apcs_total_size += compressed;
3176 nps->apcs_proc_count++;
3177 }
3178 } else {
3179 if (compressed > nps->npcs_max_size) {
3180 nps->npcs_pid = p->p_pid;
3181 nps->npcs_uniqueid = p->p_uniqueid;
3182 nps->npcs_max_size = compressed;
3183 }
3184 nps->npcs_total_size += compressed;
3185 nps->npcs_proc_count++;
3186
3187 }
3188 return (0);
3189 }
3190
3191
3192 static int
3193 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3194 {
3195 return(PROC_RETURNED);
3196 }
3197
3198
3199 /*
3200 * Deal with the low on compressor pool space condition... this function
3201 * gets called when we are approaching the limits of the compressor pool or
3202 * we are unable to create a new swap file.
3203 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3204 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3205 * There are 2 categories of processes to deal with. Those that have an action
3206 * associated with them by the task itself and those that do not. Actionable
3207 * tasks can have one of three categories specified: ones that
3208 * can be killed immediately, ones that should be suspended, and ones that should
3209 * be throttled. Processes that do not have an action associated with them are normally
3210 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3211 * that only by killing them can we hope to put the system back into a usable state.
3212 */
3213
3214 #define NO_PAGING_SPACE_DEBUG 0
3215
3216 extern uint64_t vm_compressor_pages_compressed(void);
3217
3218 struct timeval last_no_space_action = {0, 0};
3219
3220 #if DEVELOPMENT || DEBUG
3221 extern boolean_t kill_on_no_paging_space;
3222 #endif /* DEVELOPMENT || DEBUG */
3223
3224 #define MB_SIZE (1024 * 1024ULL)
3225 boolean_t memorystatus_kill_on_VM_thrashing(boolean_t);
3226
3227 extern int32_t max_kill_priority;
3228 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3229
3230 int
3231 no_paging_space_action()
3232 {
3233 proc_t p;
3234 struct no_paging_space nps;
3235 struct timeval now;
3236
3237 /*
3238 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3239 */
3240 microtime(&now);
3241
3242 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3243 return (0);
3244
3245 /*
3246 * Examine all processes and find the biggest (biggest is based on the number of pages this
3247 * task has in the compressor pool) that has been marked to have some action
3248 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3249 * action.
3250 *
3251 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3252 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3253 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3254 */
3255 bzero(&nps, sizeof(nps));
3256
3257 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3258
3259 #if NO_PAGING_SPACE_DEBUG
3260 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3261 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3262 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3263 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3264 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3265 nps.apcs_proc_count, nps.apcs_total_size);
3266 #endif
3267 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3268 /*
3269 * for now we'll knock out any task that has more then 50% of the pages
3270 * held by the compressor
3271 */
3272 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3273
3274 if (nps.npcs_uniqueid == p->p_uniqueid) {
3275 /*
3276 * verify this is still the same process
3277 * in case the proc exited and the pid got reused while
3278 * we were finishing the proc_iterate and getting to this point
3279 */
3280 last_no_space_action = now;
3281
3282 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size/MB_SIZE));
3283 psignal(p, SIGKILL);
3284
3285 proc_rele(p);
3286
3287 return (0);
3288 }
3289
3290 proc_rele(p);
3291 }
3292 }
3293
3294 /*
3295 * We have some processes within our jetsam bands of consideration and hence can be killed.
3296 * So we will invoke the memorystatus thread to go ahead and kill something.
3297 */
3298 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3299
3300 last_no_space_action = now;
3301 memorystatus_kill_on_VM_thrashing(TRUE /* async */);
3302 return (1);
3303 }
3304
3305 /*
3306 * No eligible processes to kill. So let's suspend/kill the largest
3307 * process depending on its policy control specifications.
3308 */
3309
3310 if (nps.pcs_max_size > 0) {
3311 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3312
3313 if (nps.pcs_uniqueid == p->p_uniqueid) {
3314 /*
3315 * verify this is still the same process
3316 * in case the proc exited and the pid got reused while
3317 * we were finishing the proc_iterate and getting to this point
3318 */
3319 last_no_space_action = now;
3320
3321 proc_dopcontrol(p);
3322
3323 proc_rele(p);
3324
3325 return (1);
3326 }
3327
3328 proc_rele(p);
3329 }
3330 }
3331 last_no_space_action = now;
3332
3333 printf("low swap: unable to find any eligible processes to take action on\n");
3334
3335 return (0);
3336 }
3337
3338 int
3339 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3340 {
3341 int ret = 0;
3342 proc_t target_proc = PROC_NULL;
3343 pid_t target_pid = uap->pid;
3344 uint64_t target_uniqueid = uap->uniqueid;
3345 task_t target_task = NULL;
3346
3347 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3348 ret = EPERM;
3349 goto out;
3350 }
3351 target_proc = proc_find(target_pid);
3352 if (target_proc != PROC_NULL) {
3353 if (target_uniqueid != proc_uniqueid(target_proc)) {
3354 ret = ENOENT;
3355 goto out;
3356 }
3357
3358 target_task = proc_task(target_proc);
3359 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3360 ret = EINVAL;
3361 goto out;
3362 }
3363 } else
3364 ret = ENOENT;
3365
3366 out:
3367 if (target_proc != PROC_NULL)
3368 proc_rele(target_proc);
3369 return (ret);
3370 }
3371
3372 #if VM_SCAN_FOR_SHADOW_CHAIN
3373 extern int vm_map_shadow_max(vm_map_t map);
3374 int proc_shadow_max(void);
3375 int proc_shadow_max(void)
3376 {
3377 int retval, max;
3378 proc_t p;
3379 task_t task;
3380 vm_map_t map;
3381
3382 max = 0;
3383 proc_list_lock();
3384 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3385 if (p->p_stat == SIDL)
3386 continue;
3387 task = p->task;
3388 if (task == NULL) {
3389 continue;
3390 }
3391 map = get_task_map(task);
3392 if (map == NULL) {
3393 continue;
3394 }
3395 retval = vm_map_shadow_max(map);
3396 if (retval > max) {
3397 max = retval;
3398 }
3399 }
3400 proc_list_unlock();
3401 return max;
3402 }
3403 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3404
3405 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3406 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3407 {
3408 if (target_proc != NULL) {
3409 target_proc->p_responsible_pid = responsible_pid;
3410 }
3411 return;
3412 }
3413
3414 int
3415 proc_chrooted(proc_t p)
3416 {
3417 int retval = 0;
3418
3419 if (p) {
3420 proc_fdlock(p);
3421 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3422 proc_fdunlock(p);
3423 }
3424
3425 return retval;
3426 }
3427
3428 void *
3429 proc_get_uthread_uu_threadlist(void * uthread_v)
3430 {
3431 uthread_t uth = (uthread_t)uthread_v;
3432 return (uth != NULL) ? uth->uu_threadlist : NULL;
3433 }