]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
c599a4bc7e033551197db59d5b686ee7902f0276
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114
115 #if CONFIG_CSR
116 #include <sys/csr.h>
117 #endif
118
119 #if CONFIG_MEMORYSTATUS
120 #include <sys/kern_memorystatus.h>
121 #endif
122
123 #if CONFIG_MACF
124 #include <security/mac_framework.h>
125 #endif
126
127 #include <libkern/crypto/sha1.h>
128
129 /*
130 * Structure associated with user cacheing.
131 */
132 struct uidinfo {
133 LIST_ENTRY(uidinfo) ui_hash;
134 uid_t ui_uid;
135 long ui_proccnt;
136 };
137 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
138 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
139 u_long uihash; /* size of hash table - 1 */
140
141 /*
142 * Other process lists
143 */
144 struct pidhashhead *pidhashtbl;
145 u_long pidhash;
146 struct pgrphashhead *pgrphashtbl;
147 u_long pgrphash;
148 struct sesshashhead *sesshashtbl;
149 u_long sesshash;
150
151 struct proclist allproc;
152 struct proclist zombproc;
153 extern struct tty cons;
154
155 extern int cs_debug;
156
157 #if DEVELOPMENT || DEBUG
158 extern int cs_enforcement_enable;
159 #endif
160
161 #if DEBUG
162 #define __PROC_INTERNAL_DEBUG 1
163 #endif
164 #if CONFIG_COREDUMP
165 /* Name to give to core files */
166 #if CONFIG_EMBEDDED
167 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/cores/%N.core"};
168 #else
169 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
170 #endif
171 #endif
172
173 #if PROC_REF_DEBUG
174 #include <kern/backtrace.h>
175 #endif
176
177 static void orphanpg(struct pgrp * pg);
178 void proc_name_kdp(task_t t, char * buf, int size);
179 void * proc_get_uthread_uu_threadlist(void * uthread_v);
180 int proc_threadname_kdp(void * uth, char * buf, size_t size);
181 void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime);
182 char * proc_name_address(void * p);
183
184 /* TODO: make a header that's exported and usable in osfmk */
185 char* proc_best_name(proc_t p);
186
187 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
188 static void pgrp_remove(proc_t p);
189 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
190 static void pgdelete_dropref(struct pgrp *pgrp);
191 extern void pg_rele_dropref(struct pgrp * pgrp);
192 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
193 static boolean_t proc_parent_is_currentproc(proc_t p);
194
195 struct fixjob_iterargs {
196 struct pgrp * pg;
197 struct session * mysession;
198 int entering;
199 };
200
201 int fixjob_callback(proc_t, void *);
202
203 uint64_t get_current_unique_pid(void);
204
205
206 uint64_t
207 get_current_unique_pid(void)
208 {
209 proc_t p = current_proc();
210
211 if (p)
212 return p->p_uniqueid;
213 else
214 return 0;
215 }
216
217 /*
218 * Initialize global process hashing structures.
219 */
220 void
221 procinit(void)
222 {
223 LIST_INIT(&allproc);
224 LIST_INIT(&zombproc);
225 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
226 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
227 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
228 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
229 #if CONFIG_PERSONAS
230 personas_bootstrap();
231 #endif
232 }
233
234 /*
235 * Change the count associated with number of processes
236 * a given user is using. This routine protects the uihash
237 * with the list lock
238 */
239 int
240 chgproccnt(uid_t uid, int diff)
241 {
242 struct uidinfo *uip;
243 struct uidinfo *newuip = NULL;
244 struct uihashhead *uipp;
245 int retval;
246
247 again:
248 proc_list_lock();
249 uipp = UIHASH(uid);
250 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
251 if (uip->ui_uid == uid)
252 break;
253 if (uip) {
254 uip->ui_proccnt += diff;
255 if (uip->ui_proccnt > 0) {
256 retval = uip->ui_proccnt;
257 proc_list_unlock();
258 goto out;
259 }
260 if (uip->ui_proccnt < 0)
261 panic("chgproccnt: procs < 0");
262 LIST_REMOVE(uip, ui_hash);
263 retval = 0;
264 proc_list_unlock();
265 FREE_ZONE(uip, sizeof(*uip), M_PROC);
266 goto out;
267 }
268 if (diff <= 0) {
269 if (diff == 0) {
270 retval = 0;
271 proc_list_unlock();
272 goto out;
273 }
274 panic("chgproccnt: lost user");
275 }
276 if (newuip != NULL) {
277 uip = newuip;
278 newuip = NULL;
279 LIST_INSERT_HEAD(uipp, uip, ui_hash);
280 uip->ui_uid = uid;
281 uip->ui_proccnt = diff;
282 retval = diff;
283 proc_list_unlock();
284 goto out;
285 }
286 proc_list_unlock();
287 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
288 if (newuip == NULL)
289 panic("chgproccnt: M_PROC zone depleted");
290 goto again;
291 out:
292 if (newuip != NULL)
293 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
294 return(retval);
295 }
296
297 /*
298 * Is p an inferior of the current process?
299 */
300 int
301 inferior(proc_t p)
302 {
303 int retval = 0;
304
305 proc_list_lock();
306 for (; p != current_proc(); p = p->p_pptr)
307 if (p->p_pid == 0)
308 goto out;
309 retval = 1;
310 out:
311 proc_list_unlock();
312 return(retval);
313 }
314
315 /*
316 * Is p an inferior of t ?
317 */
318 int
319 isinferior(proc_t p, proc_t t)
320 {
321 int retval = 0;
322 int nchecked = 0;
323 proc_t start = p;
324
325 /* if p==t they are not inferior */
326 if (p == t)
327 return(0);
328
329 proc_list_lock();
330 for (; p != t; p = p->p_pptr) {
331 nchecked++;
332
333 /* Detect here if we're in a cycle */
334 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
335 goto out;
336 }
337 retval = 1;
338 out:
339 proc_list_unlock();
340 return(retval);
341 }
342
343 int
344 proc_isinferior(int pid1, int pid2)
345 {
346 proc_t p = PROC_NULL;
347 proc_t t = PROC_NULL;
348 int retval = 0;
349
350 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
351 retval = isinferior(p, t);
352
353 if (p != PROC_NULL)
354 proc_rele(p);
355 if (t != PROC_NULL)
356 proc_rele(t);
357
358 return(retval);
359 }
360
361 proc_t
362 proc_find(int pid)
363 {
364 return(proc_findinternal(pid, 0));
365 }
366
367 proc_t
368 proc_findinternal(int pid, int locked)
369 {
370 proc_t p = PROC_NULL;
371
372 if (locked == 0) {
373 proc_list_lock();
374 }
375
376 p = pfind_locked(pid);
377 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
378 p = PROC_NULL;
379
380 if (locked == 0) {
381 proc_list_unlock();
382 }
383
384 return(p);
385 }
386
387 proc_t
388 proc_findthread(thread_t thread)
389 {
390 proc_t p = PROC_NULL;
391 struct uthread *uth;
392
393 proc_list_lock();
394 uth = get_bsdthread_info(thread);
395 if (uth && (uth->uu_flag & UT_VFORK))
396 p = uth->uu_proc;
397 else
398 p = (proc_t)(get_bsdthreadtask_info(thread));
399 p = proc_ref_locked(p);
400 proc_list_unlock();
401 return(p);
402 }
403
404 void
405 uthread_reset_proc_refcount(void *uthread) {
406 uthread_t uth;
407
408 uth = (uthread_t) uthread;
409 uth->uu_proc_refcount = 0;
410
411 #if PROC_REF_DEBUG
412 if (proc_ref_tracking_disabled) {
413 return;
414 }
415
416 uth->uu_pindex = 0;
417 #endif
418 }
419
420 #if PROC_REF_DEBUG
421 int
422 uthread_get_proc_refcount(void *uthread) {
423 uthread_t uth;
424
425 if (proc_ref_tracking_disabled) {
426 return 0;
427 }
428
429 uth = (uthread_t) uthread;
430
431 return uth->uu_proc_refcount;
432 }
433 #endif
434
435 static void
436 record_procref(proc_t p __unused, int count) {
437 uthread_t uth;
438
439 uth = current_uthread();
440 uth->uu_proc_refcount += count;
441
442 #if PROC_REF_DEBUG
443 if (proc_ref_tracking_disabled) {
444 return;
445 }
446
447 if (count == 1) {
448 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
449 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
450
451 uth->uu_proc_ps[uth->uu_pindex] = p;
452 uth->uu_pindex++;
453 }
454 }
455 #endif
456 }
457
458 static boolean_t
459 uthread_needs_to_wait_in_proc_refwait(void) {
460 uthread_t uth = current_uthread();
461
462 /*
463 * Allow threads holding no proc refs to wait
464 * in proc_refwait, allowing threads holding
465 * proc refs to wait in proc_refwait causes
466 * deadlocks and makes proc_find non-reentrant.
467 */
468 if (uth->uu_proc_refcount == 0)
469 return TRUE;
470
471 return FALSE;
472 }
473
474 int
475 proc_rele(proc_t p)
476 {
477 proc_list_lock();
478 proc_rele_locked(p);
479 proc_list_unlock();
480
481 return(0);
482 }
483
484 proc_t
485 proc_self(void)
486 {
487 struct proc * p;
488
489 p = current_proc();
490
491 proc_list_lock();
492 if (p != proc_ref_locked(p))
493 p = PROC_NULL;
494 proc_list_unlock();
495 return(p);
496 }
497
498
499 proc_t
500 proc_ref_locked(proc_t p)
501 {
502 proc_t p1 = p;
503 int pid = proc_pid(p);
504
505 retry:
506 /*
507 * if process still in creation or proc got recycled
508 * during msleep then return failure.
509 */
510 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0))
511 return (PROC_NULL);
512
513 /*
514 * Do not return process marked for termination
515 * or proc_refdrain called without ref wait.
516 * Wait for proc_refdrain_with_refwait to complete if
517 * process in refdrain and refwait flag is set, unless
518 * the current thread is holding to a proc_ref
519 * for any proc.
520 */
521 if ((p->p_stat != SZOMB) &&
522 ((p->p_listflag & P_LIST_EXITED) == 0) &&
523 ((p->p_listflag & P_LIST_DEAD) == 0) &&
524 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
525 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
526 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
527 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0) ;
528 /*
529 * the proc might have been recycled since we dropped
530 * the proc list lock, get the proc again.
531 */
532 p = pfind_locked(pid);
533 goto retry;
534 }
535 p->p_refcount++;
536 record_procref(p, 1);
537 }
538 else
539 p1 = PROC_NULL;
540
541 return(p1);
542 }
543
544 void
545 proc_rele_locked(proc_t p)
546 {
547
548 if (p->p_refcount > 0) {
549 p->p_refcount--;
550 record_procref(p, -1);
551 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
552 p->p_listflag &= ~P_LIST_DRAINWAIT;
553 wakeup(&p->p_refcount);
554 }
555 } else
556 panic("proc_rele_locked -ve ref\n");
557
558 }
559
560 proc_t
561 proc_find_zombref(int pid)
562 {
563 proc_t p;
564
565 proc_list_lock();
566
567 again:
568 p = pfind_locked(pid);
569
570 /* should we bail? */
571 if ((p == PROC_NULL) /* not found */
572 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
573 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
574
575 proc_list_unlock();
576 return (PROC_NULL);
577 }
578
579 /* If someone else is controlling the (unreaped) zombie - wait */
580 if ((p->p_listflag & P_LIST_WAITING) != 0) {
581 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
582 goto again;
583 }
584 p->p_listflag |= P_LIST_WAITING;
585
586 proc_list_unlock();
587
588 return(p);
589 }
590
591 void
592 proc_drop_zombref(proc_t p)
593 {
594 proc_list_lock();
595 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
596 p->p_listflag &= ~P_LIST_WAITING;
597 wakeup(&p->p_stat);
598 }
599 proc_list_unlock();
600 }
601
602
603 void
604 proc_refdrain(proc_t p)
605 {
606 proc_refdrain_with_refwait(p, FALSE);
607 }
608
609 proc_t
610 proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
611 {
612 boolean_t initexec = FALSE;
613 proc_list_lock();
614
615 p->p_listflag |= P_LIST_DRAIN;
616 if (get_ref_and_allow_wait) {
617 /*
618 * All the calls to proc_ref_locked will wait
619 * for the flag to get cleared before returning a ref,
620 * unless the current thread is holding to a proc ref
621 * for any proc.
622 */
623 p->p_listflag |= P_LIST_REFWAIT;
624 if (p == initproc) {
625 initexec = TRUE;
626 }
627 }
628
629 /* Do not wait in ref drain for launchd exec */
630 while (p->p_refcount && !initexec) {
631 p->p_listflag |= P_LIST_DRAINWAIT;
632 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
633 }
634
635 p->p_listflag &= ~P_LIST_DRAIN;
636 if (!get_ref_and_allow_wait) {
637 p->p_listflag |= P_LIST_DEAD;
638 } else {
639 /* Return a ref to the caller */
640 p->p_refcount++;
641 record_procref(p, 1);
642 }
643
644 proc_list_unlock();
645
646 if (get_ref_and_allow_wait) {
647 return (p);
648 }
649 return NULL;
650 }
651
652 void
653 proc_refwake(proc_t p)
654 {
655 proc_list_lock();
656 p->p_listflag &= ~P_LIST_REFWAIT;
657 wakeup(&p->p_listflag);
658 proc_list_unlock();
659 }
660
661 proc_t
662 proc_parentholdref(proc_t p)
663 {
664 proc_t parent = PROC_NULL;
665 proc_t pp;
666 int loopcnt = 0;
667
668
669 proc_list_lock();
670 loop:
671 pp = p->p_pptr;
672 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
673 parent = PROC_NULL;
674 goto out;
675 }
676
677 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
678 pp->p_listflag |= P_LIST_CHILDDRWAIT;
679 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
680 loopcnt++;
681 if (loopcnt == 5) {
682 parent = PROC_NULL;
683 goto out;
684 }
685 goto loop;
686 }
687
688 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
689 pp->p_parentref++;
690 parent = pp;
691 goto out;
692 }
693
694 out:
695 proc_list_unlock();
696 return(parent);
697 }
698 int
699 proc_parentdropref(proc_t p, int listlocked)
700 {
701 if (listlocked == 0)
702 proc_list_lock();
703
704 if (p->p_parentref > 0) {
705 p->p_parentref--;
706 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
707 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
708 wakeup(&p->p_parentref);
709 }
710 } else
711 panic("proc_parentdropref -ve ref\n");
712 if (listlocked == 0)
713 proc_list_unlock();
714
715 return(0);
716 }
717
718 void
719 proc_childdrainstart(proc_t p)
720 {
721 #if __PROC_INTERNAL_DEBUG
722 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
723 panic("proc_childdrainstart: childdrain already started\n");
724 #endif
725 p->p_listflag |= P_LIST_CHILDDRSTART;
726 /* wait for all that hold parentrefs to drop */
727 while (p->p_parentref > 0) {
728 p->p_listflag |= P_LIST_PARENTREFWAIT;
729 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
730 }
731 }
732
733
734 void
735 proc_childdrainend(proc_t p)
736 {
737 #if __PROC_INTERNAL_DEBUG
738 if (p->p_childrencnt > 0)
739 panic("exiting: children stil hanging around\n");
740 #endif
741 p->p_listflag |= P_LIST_CHILDDRAINED;
742 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
743 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
744 wakeup(&p->p_childrencnt);
745 }
746 }
747
748 void
749 proc_checkdeadrefs(__unused proc_t p)
750 {
751 #if __PROC_INTERNAL_DEBUG
752 if ((p->p_listflag & P_LIST_INHASH) != 0)
753 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
754 if (p->p_childrencnt != 0)
755 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
756 if (p->p_refcount != 0)
757 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
758 if (p->p_parentref != 0)
759 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
760 #endif
761 }
762
763 int
764 proc_pid(proc_t p)
765 {
766 if (p != NULL)
767 return (p->p_pid);
768 return -1;
769 }
770
771 int
772 proc_ppid(proc_t p)
773 {
774 if (p != NULL)
775 return (p->p_ppid);
776 return -1;
777 }
778
779 int
780 proc_selfpid(void)
781 {
782 return (current_proc()->p_pid);
783 }
784
785 int
786 proc_selfppid(void)
787 {
788 return (current_proc()->p_ppid);
789 }
790
791 int
792 proc_selfcsflags(void)
793 {
794 return (current_proc()->p_csflags);
795 }
796
797 #if CONFIG_DTRACE
798 static proc_t
799 dtrace_current_proc_vforking(void)
800 {
801 thread_t th = current_thread();
802 struct uthread *ut = get_bsdthread_info(th);
803
804 if (ut &&
805 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
806 /*
807 * Handle the narrow window where we're in the vfork syscall,
808 * but we're not quite ready to claim (in particular, to DTrace)
809 * that we're running as the child.
810 */
811 return (get_bsdtask_info(get_threadtask(th)));
812 }
813 return (current_proc());
814 }
815
816 int
817 dtrace_proc_selfpid(void)
818 {
819 return (dtrace_current_proc_vforking()->p_pid);
820 }
821
822 int
823 dtrace_proc_selfppid(void)
824 {
825 return (dtrace_current_proc_vforking()->p_ppid);
826 }
827
828 uid_t
829 dtrace_proc_selfruid(void)
830 {
831 return (dtrace_current_proc_vforking()->p_ruid);
832 }
833 #endif /* CONFIG_DTRACE */
834
835 proc_t
836 proc_parent(proc_t p)
837 {
838 proc_t parent;
839 proc_t pp;
840
841 proc_list_lock();
842 loop:
843 pp = p->p_pptr;
844 parent = proc_ref_locked(pp);
845 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
846 pp->p_listflag |= P_LIST_CHILDLKWAIT;
847 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
848 goto loop;
849 }
850 proc_list_unlock();
851 return(parent);
852 }
853
854 static boolean_t
855 proc_parent_is_currentproc(proc_t p)
856 {
857 boolean_t ret = FALSE;
858
859 proc_list_lock();
860 if (p->p_pptr == current_proc())
861 ret = TRUE;
862
863 proc_list_unlock();
864 return ret;
865 }
866
867 void
868 proc_name(int pid, char * buf, int size)
869 {
870 proc_t p;
871
872 if ((p = proc_find(pid)) != PROC_NULL) {
873 strlcpy(buf, &p->p_comm[0], size);
874 proc_rele(p);
875 }
876 }
877
878 void
879 proc_name_kdp(task_t t, char * buf, int size)
880 {
881 proc_t p = get_bsdtask_info(t);
882 if (p == PROC_NULL)
883 return;
884
885 if ((size_t)size > sizeof(p->p_comm))
886 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
887 else
888 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
889 }
890
891 int
892 proc_threadname_kdp(void * uth, char * buf, size_t size)
893 {
894 if (size < MAXTHREADNAMESIZE) {
895 /* this is really just a protective measure for the future in
896 * case the thread name size in stackshot gets out of sync with
897 * the BSD max thread name size. Note that bsd_getthreadname
898 * doesn't take input buffer size into account. */
899 return -1;
900 }
901
902 if (uth != NULL) {
903 bsd_getthreadname(uth, buf);
904 }
905 return 0;
906 }
907
908 /* note that this function is generally going to be called from stackshot,
909 * and the arguments will be coming from a struct which is declared packed
910 * thus the input arguments will in general be unaligned. We have to handle
911 * that here. */
912 void
913 proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec, uint64_t *abstime)
914 {
915 proc_t pp = (proc_t)p;
916 struct uint64p {
917 uint64_t val;
918 } __attribute__((packed));
919
920 if (pp != PROC_NULL) {
921 if (tv_sec != NULL)
922 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
923 if (tv_usec != NULL)
924 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
925 if (abstime != NULL) {
926 if (pp->p_stats != NULL)
927 *abstime = pp->p_stats->ps_start;
928 else
929 *abstime = 0;
930 }
931 }
932 }
933
934 char *
935 proc_name_address(void *p)
936 {
937 return &((proc_t)p)->p_comm[0];
938 }
939
940 char *
941 proc_best_name(proc_t p)
942 {
943 if (p->p_name[0] != 0)
944 return (&p->p_name[0]);
945 return (&p->p_comm[0]);
946 }
947
948 void
949 proc_selfname(char * buf, int size)
950 {
951 proc_t p;
952
953 if ((p = current_proc())!= (proc_t)0) {
954 strlcpy(buf, &p->p_comm[0], size);
955 }
956 }
957
958 void
959 proc_signal(int pid, int signum)
960 {
961 proc_t p;
962
963 if ((p = proc_find(pid)) != PROC_NULL) {
964 psignal(p, signum);
965 proc_rele(p);
966 }
967 }
968
969 int
970 proc_issignal(int pid, sigset_t mask)
971 {
972 proc_t p;
973 int error=0;
974
975 if ((p = proc_find(pid)) != PROC_NULL) {
976 error = proc_pendingsignals(p, mask);
977 proc_rele(p);
978 }
979
980 return(error);
981 }
982
983 int
984 proc_noremotehang(proc_t p)
985 {
986 int retval = 0;
987
988 if (p)
989 retval = p->p_flag & P_NOREMOTEHANG;
990 return(retval? 1: 0);
991
992 }
993
994 int
995 proc_exiting(proc_t p)
996 {
997 int retval = 0;
998
999 if (p)
1000 retval = p->p_lflag & P_LEXIT;
1001 return(retval? 1: 0);
1002 }
1003
1004 int
1005 proc_forcequota(proc_t p)
1006 {
1007 int retval = 0;
1008
1009 if (p)
1010 retval = p->p_flag & P_FORCEQUOTA;
1011 return(retval? 1: 0);
1012
1013 }
1014
1015 int
1016 proc_suser(proc_t p)
1017 {
1018 kauth_cred_t my_cred;
1019 int error;
1020
1021 my_cred = kauth_cred_proc_ref(p);
1022 error = suser(my_cred, &p->p_acflag);
1023 kauth_cred_unref(&my_cred);
1024 return(error);
1025 }
1026
1027 task_t
1028 proc_task(proc_t proc)
1029 {
1030 return (task_t)proc->task;
1031 }
1032
1033 /*
1034 * Obtain the first thread in a process
1035 *
1036 * XXX This is a bad thing to do; it exists predominantly to support the
1037 * XXX use of proc_t's in places that should really be using
1038 * XXX thread_t's instead. This maintains historical behaviour, but really
1039 * XXX needs an audit of the context (proxy vs. not) to clean up.
1040 */
1041 thread_t
1042 proc_thread(proc_t proc)
1043 {
1044 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1045
1046 if (uth != NULL)
1047 return(uth->uu_context.vc_thread);
1048
1049 return(NULL);
1050 }
1051
1052 kauth_cred_t
1053 proc_ucred(proc_t p)
1054 {
1055 return(p->p_ucred);
1056 }
1057
1058 struct uthread *
1059 current_uthread()
1060 {
1061 thread_t th = current_thread();
1062
1063 return((struct uthread *)get_bsdthread_info(th));
1064 }
1065
1066
1067 int
1068 proc_is64bit(proc_t p)
1069 {
1070 return(IS_64BIT_PROCESS(p));
1071 }
1072
1073 int
1074 proc_pidversion(proc_t p)
1075 {
1076 return(p->p_idversion);
1077 }
1078
1079 uint32_t
1080 proc_persona_id(proc_t p)
1081 {
1082 return (uint32_t)persona_id_from_proc(p);
1083 }
1084
1085 uint32_t
1086 proc_getuid(proc_t p)
1087 {
1088 return(p->p_uid);
1089 }
1090
1091 uint32_t
1092 proc_getgid(proc_t p)
1093 {
1094 return(p->p_gid);
1095 }
1096
1097 uint64_t
1098 proc_uniqueid(proc_t p)
1099 {
1100 return(p->p_uniqueid);
1101 }
1102
1103 uint64_t
1104 proc_puniqueid(proc_t p)
1105 {
1106 return(p->p_puniqueid);
1107 }
1108
1109 void
1110 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1111 {
1112 #if CONFIG_COALITIONS
1113 task_coalition_ids(p->task, ids);
1114 #else
1115 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
1116 #endif
1117 return;
1118 }
1119
1120 uint64_t
1121 proc_was_throttled(proc_t p)
1122 {
1123 return (p->was_throttled);
1124 }
1125
1126 uint64_t
1127 proc_did_throttle(proc_t p)
1128 {
1129 return (p->did_throttle);
1130 }
1131
1132 int
1133 proc_getcdhash(proc_t p, unsigned char *cdhash)
1134 {
1135 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1136 }
1137
1138 void
1139 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1140 {
1141 if (size >= sizeof(p->p_uuid)) {
1142 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1143 }
1144 }
1145
1146 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1147 vnode_t
1148 proc_getexecutablevnode(proc_t p)
1149 {
1150 vnode_t tvp = p->p_textvp;
1151
1152 if ( tvp != NULLVP) {
1153 if (vnode_getwithref(tvp) == 0) {
1154 return tvp;
1155 }
1156 }
1157
1158 return NULLVP;
1159 }
1160
1161
1162 void
1163 bsd_set_dependency_capable(task_t task)
1164 {
1165 proc_t p = get_bsdtask_info(task);
1166
1167 if (p) {
1168 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1169 }
1170 }
1171
1172
1173 #ifndef __arm__
1174 int
1175 IS_64BIT_PROCESS(proc_t p)
1176 {
1177 if (p && (p->p_flag & P_LP64))
1178 return(1);
1179 else
1180 return(0);
1181 }
1182 #endif
1183
1184 /*
1185 * Locate a process by number
1186 */
1187 proc_t
1188 pfind_locked(pid_t pid)
1189 {
1190 proc_t p;
1191 #if DEBUG
1192 proc_t q;
1193 #endif
1194
1195 if (!pid)
1196 return (kernproc);
1197
1198 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1199 if (p->p_pid == pid) {
1200 #if DEBUG
1201 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1202 if ((p !=q) && (q->p_pid == pid))
1203 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1204 }
1205 #endif
1206 return (p);
1207 }
1208 }
1209 return (NULL);
1210 }
1211
1212 /*
1213 * Locate a zombie by PID
1214 */
1215 __private_extern__ proc_t
1216 pzfind(pid_t pid)
1217 {
1218 proc_t p;
1219
1220
1221 proc_list_lock();
1222
1223 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1224 if (p->p_pid == pid)
1225 break;
1226
1227 proc_list_unlock();
1228
1229 return (p);
1230 }
1231
1232 /*
1233 * Locate a process group by number
1234 */
1235
1236 struct pgrp *
1237 pgfind(pid_t pgid)
1238 {
1239 struct pgrp * pgrp;
1240
1241 proc_list_lock();
1242 pgrp = pgfind_internal(pgid);
1243 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1244 pgrp = PGRP_NULL;
1245 else
1246 pgrp->pg_refcount++;
1247 proc_list_unlock();
1248 return(pgrp);
1249 }
1250
1251
1252
1253 struct pgrp *
1254 pgfind_internal(pid_t pgid)
1255 {
1256 struct pgrp *pgrp;
1257
1258 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1259 if (pgrp->pg_id == pgid)
1260 return (pgrp);
1261 return (NULL);
1262 }
1263
1264 void
1265 pg_rele(struct pgrp * pgrp)
1266 {
1267 if(pgrp == PGRP_NULL)
1268 return;
1269 pg_rele_dropref(pgrp);
1270 }
1271
1272 void
1273 pg_rele_dropref(struct pgrp * pgrp)
1274 {
1275 proc_list_lock();
1276 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1277 proc_list_unlock();
1278 pgdelete_dropref(pgrp);
1279 return;
1280 }
1281
1282 pgrp->pg_refcount--;
1283 proc_list_unlock();
1284 }
1285
1286 struct session *
1287 session_find_internal(pid_t sessid)
1288 {
1289 struct session *sess;
1290
1291 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1292 if (sess->s_sid == sessid)
1293 return (sess);
1294 return (NULL);
1295 }
1296
1297
1298 /*
1299 * Make a new process ready to become a useful member of society by making it
1300 * visible in all the right places and initialize its own lists to empty.
1301 *
1302 * Parameters: parent The parent of the process to insert
1303 * child The child process to insert
1304 *
1305 * Returns: (void)
1306 *
1307 * Notes: Insert a child process into the parents process group, assign
1308 * the child the parent process pointer and PPID of the parent,
1309 * place it on the parents p_children list as a sibling,
1310 * initialize its own child list, place it in the allproc list,
1311 * insert it in the proper hash bucket, and initialize its
1312 * event list.
1313 */
1314 void
1315 pinsertchild(proc_t parent, proc_t child)
1316 {
1317 struct pgrp * pg;
1318
1319 LIST_INIT(&child->p_children);
1320 TAILQ_INIT(&child->p_evlist);
1321 child->p_pptr = parent;
1322 child->p_ppid = parent->p_pid;
1323 child->p_puniqueid = parent->p_uniqueid;
1324 child->p_xhighbits = 0;
1325
1326 pg = proc_pgrp(parent);
1327 pgrp_add(pg, parent, child);
1328 pg_rele(pg);
1329
1330 proc_list_lock();
1331
1332 #if CONFIG_MEMORYSTATUS
1333 memorystatus_add(child, TRUE);
1334 #endif
1335
1336 parent->p_childrencnt++;
1337 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1338
1339 LIST_INSERT_HEAD(&allproc, child, p_list);
1340 /* mark the completion of proc creation */
1341 child->p_listflag &= ~P_LIST_INCREATE;
1342
1343 proc_list_unlock();
1344 }
1345
1346 /*
1347 * Move p to a new or existing process group (and session)
1348 *
1349 * Returns: 0 Success
1350 * ESRCH No such process
1351 */
1352 int
1353 enterpgrp(proc_t p, pid_t pgid, int mksess)
1354 {
1355 struct pgrp *pgrp;
1356 struct pgrp *mypgrp;
1357 struct session * procsp;
1358
1359 pgrp = pgfind(pgid);
1360 mypgrp = proc_pgrp(p);
1361 procsp = proc_session(p);
1362
1363 #if DIAGNOSTIC
1364 if (pgrp != NULL && mksess) /* firewalls */
1365 panic("enterpgrp: setsid into non-empty pgrp");
1366 if (SESS_LEADER(p, procsp))
1367 panic("enterpgrp: session leader attempted setpgrp");
1368 #endif
1369 if (pgrp == PGRP_NULL) {
1370 pid_t savepid = p->p_pid;
1371 proc_t np = PROC_NULL;
1372 /*
1373 * new process group
1374 */
1375 #if DIAGNOSTIC
1376 if (p->p_pid != pgid)
1377 panic("enterpgrp: new pgrp and pid != pgid");
1378 #endif
1379 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1380 M_WAITOK);
1381 if (pgrp == NULL)
1382 panic("enterpgrp: M_PGRP zone depleted");
1383 if ((np = proc_find(savepid)) == NULL || np != p) {
1384 if (np != PROC_NULL)
1385 proc_rele(np);
1386 if (mypgrp != PGRP_NULL)
1387 pg_rele(mypgrp);
1388 if (procsp != SESSION_NULL)
1389 session_rele(procsp);
1390 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1391 return (ESRCH);
1392 }
1393 proc_rele(np);
1394 if (mksess) {
1395 struct session *sess;
1396
1397 /*
1398 * new session
1399 */
1400 MALLOC_ZONE(sess, struct session *,
1401 sizeof(struct session), M_SESSION, M_WAITOK);
1402 if (sess == NULL)
1403 panic("enterpgrp: M_SESSION zone depleted");
1404 sess->s_leader = p;
1405 sess->s_sid = p->p_pid;
1406 sess->s_count = 1;
1407 sess->s_ttyvp = NULL;
1408 sess->s_ttyp = TTY_NULL;
1409 sess->s_flags = 0;
1410 sess->s_listflags = 0;
1411 sess->s_ttypgrpid = NO_PID;
1412 #if CONFIG_FINE_LOCK_GROUPS
1413 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1414 #else
1415 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1416 #endif
1417 bcopy(procsp->s_login, sess->s_login,
1418 sizeof(sess->s_login));
1419 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1420 proc_list_lock();
1421 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1422 proc_list_unlock();
1423 pgrp->pg_session = sess;
1424 #if DIAGNOSTIC
1425 if (p != current_proc())
1426 panic("enterpgrp: mksession and p != curproc");
1427 #endif
1428 } else {
1429 proc_list_lock();
1430 pgrp->pg_session = procsp;
1431
1432 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1433 panic("enterpgrp: providing ref to terminating session ");
1434 pgrp->pg_session->s_count++;
1435 proc_list_unlock();
1436 }
1437 pgrp->pg_id = pgid;
1438 #if CONFIG_FINE_LOCK_GROUPS
1439 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1440 #else
1441 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1442 #endif
1443 LIST_INIT(&pgrp->pg_members);
1444 pgrp->pg_membercnt = 0;
1445 pgrp->pg_jobc = 0;
1446 proc_list_lock();
1447 pgrp->pg_refcount = 1;
1448 pgrp->pg_listflags = 0;
1449 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1450 proc_list_unlock();
1451 } else if (pgrp == mypgrp) {
1452 pg_rele(pgrp);
1453 if (mypgrp != NULL)
1454 pg_rele(mypgrp);
1455 if (procsp != SESSION_NULL)
1456 session_rele(procsp);
1457 return (0);
1458 }
1459
1460 if (procsp != SESSION_NULL)
1461 session_rele(procsp);
1462 /*
1463 * Adjust eligibility of affected pgrps to participate in job control.
1464 * Increment eligibility counts before decrementing, otherwise we
1465 * could reach 0 spuriously during the first call.
1466 */
1467 fixjobc(p, pgrp, 1);
1468 fixjobc(p, mypgrp, 0);
1469
1470 if(mypgrp != PGRP_NULL)
1471 pg_rele(mypgrp);
1472 pgrp_replace(p, pgrp);
1473 pg_rele(pgrp);
1474
1475 return(0);
1476 }
1477
1478 /*
1479 * remove process from process group
1480 */
1481 int
1482 leavepgrp(proc_t p)
1483 {
1484
1485 pgrp_remove(p);
1486 return (0);
1487 }
1488
1489 /*
1490 * delete a process group
1491 */
1492 static void
1493 pgdelete_dropref(struct pgrp *pgrp)
1494 {
1495 struct tty *ttyp;
1496 int emptypgrp = 1;
1497 struct session *sessp;
1498
1499
1500 pgrp_lock(pgrp);
1501 if (pgrp->pg_membercnt != 0) {
1502 emptypgrp = 0;
1503 }
1504 pgrp_unlock(pgrp);
1505
1506 proc_list_lock();
1507 pgrp->pg_refcount--;
1508 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1509 proc_list_unlock();
1510 return;
1511 }
1512
1513 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1514
1515 if (pgrp->pg_refcount > 0) {
1516 proc_list_unlock();
1517 return;
1518 }
1519
1520 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1521 LIST_REMOVE(pgrp, pg_hash);
1522
1523 proc_list_unlock();
1524
1525 ttyp = SESSION_TP(pgrp->pg_session);
1526 if (ttyp != TTY_NULL) {
1527 if (ttyp->t_pgrp == pgrp) {
1528 tty_lock(ttyp);
1529 /* Re-check after acquiring the lock */
1530 if (ttyp->t_pgrp == pgrp) {
1531 ttyp->t_pgrp = NULL;
1532 pgrp->pg_session->s_ttypgrpid = NO_PID;
1533 }
1534 tty_unlock(ttyp);
1535 }
1536 }
1537
1538 proc_list_lock();
1539
1540 sessp = pgrp->pg_session;
1541 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1542 panic("pg_deleteref: manipulating refs of already terminating session");
1543 if (--sessp->s_count == 0) {
1544 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1545 panic("pg_deleteref: terminating already terminated session");
1546 sessp->s_listflags |= S_LIST_TERM;
1547 ttyp = SESSION_TP(sessp);
1548 LIST_REMOVE(sessp, s_hash);
1549 proc_list_unlock();
1550 if (ttyp != TTY_NULL) {
1551 tty_lock(ttyp);
1552 if (ttyp->t_session == sessp)
1553 ttyp->t_session = NULL;
1554 tty_unlock(ttyp);
1555 }
1556 proc_list_lock();
1557 sessp->s_listflags |= S_LIST_DEAD;
1558 if (sessp->s_count != 0)
1559 panic("pg_deleteref: freeing session in use");
1560 proc_list_unlock();
1561 #if CONFIG_FINE_LOCK_GROUPS
1562 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1563 #else
1564 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1565 #endif
1566 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1567 } else
1568 proc_list_unlock();
1569 #if CONFIG_FINE_LOCK_GROUPS
1570 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1571 #else
1572 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1573 #endif
1574 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1575 }
1576
1577
1578 /*
1579 * Adjust pgrp jobc counters when specified process changes process group.
1580 * We count the number of processes in each process group that "qualify"
1581 * the group for terminal job control (those with a parent in a different
1582 * process group of the same session). If that count reaches zero, the
1583 * process group becomes orphaned. Check both the specified process'
1584 * process group and that of its children.
1585 * entering == 0 => p is leaving specified group.
1586 * entering == 1 => p is entering specified group.
1587 */
1588 int
1589 fixjob_callback(proc_t p, void * arg)
1590 {
1591 struct fixjob_iterargs *fp;
1592 struct pgrp * pg, *hispg;
1593 struct session * mysession, *hissess;
1594 int entering;
1595
1596 fp = (struct fixjob_iterargs *)arg;
1597 pg = fp->pg;
1598 mysession = fp->mysession;
1599 entering = fp->entering;
1600
1601 hispg = proc_pgrp(p);
1602 hissess = proc_session(p);
1603
1604 if ((hispg != pg) &&
1605 (hissess == mysession)) {
1606 pgrp_lock(hispg);
1607 if (entering) {
1608 hispg->pg_jobc++;
1609 pgrp_unlock(hispg);
1610 } else if (--hispg->pg_jobc == 0) {
1611 pgrp_unlock(hispg);
1612 orphanpg(hispg);
1613 } else
1614 pgrp_unlock(hispg);
1615 }
1616 if (hissess != SESSION_NULL)
1617 session_rele(hissess);
1618 if (hispg != PGRP_NULL)
1619 pg_rele(hispg);
1620
1621 return(PROC_RETURNED);
1622 }
1623
1624 void
1625 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1626 {
1627 struct pgrp *hispgrp = PGRP_NULL;
1628 struct session *hissess = SESSION_NULL;
1629 struct session *mysession = pgrp->pg_session;
1630 proc_t parent;
1631 struct fixjob_iterargs fjarg;
1632 boolean_t proc_parent_self;
1633
1634 /*
1635 * Check if p's parent is current proc, if yes then no need to take
1636 * a ref; calling proc_parent with current proc as parent may
1637 * deadlock if current proc is exiting.
1638 */
1639 proc_parent_self = proc_parent_is_currentproc(p);
1640 if (proc_parent_self)
1641 parent = current_proc();
1642 else
1643 parent = proc_parent(p);
1644
1645 if (parent != PROC_NULL) {
1646 hispgrp = proc_pgrp(parent);
1647 hissess = proc_session(parent);
1648 if (!proc_parent_self)
1649 proc_rele(parent);
1650 }
1651
1652
1653 /*
1654 * Check p's parent to see whether p qualifies its own process
1655 * group; if so, adjust count for p's process group.
1656 */
1657 if ((hispgrp != pgrp) &&
1658 (hissess == mysession)) {
1659 pgrp_lock(pgrp);
1660 if (entering) {
1661 pgrp->pg_jobc++;
1662 pgrp_unlock(pgrp);
1663 }else if (--pgrp->pg_jobc == 0) {
1664 pgrp_unlock(pgrp);
1665 orphanpg(pgrp);
1666 } else
1667 pgrp_unlock(pgrp);
1668 }
1669
1670 if (hissess != SESSION_NULL)
1671 session_rele(hissess);
1672 if (hispgrp != PGRP_NULL)
1673 pg_rele(hispgrp);
1674
1675 /*
1676 * Check this process' children to see whether they qualify
1677 * their process groups; if so, adjust counts for children's
1678 * process groups.
1679 */
1680 fjarg.pg = pgrp;
1681 fjarg.mysession = mysession;
1682 fjarg.entering = entering;
1683 proc_childrenwalk(p, fixjob_callback, &fjarg);
1684 }
1685
1686 /*
1687 * A process group has become orphaned; if there are any stopped processes in
1688 * the group, hang-up all process in that group.
1689 */
1690 static void
1691 orphanpg(struct pgrp *pgrp)
1692 {
1693 pid_t *pid_list;
1694 proc_t p;
1695 vm_size_t pid_list_size = 0;
1696 vm_size_t pid_list_size_needed = 0;
1697 int pid_count = 0;
1698 int pid_count_available = 0;
1699
1700 assert(pgrp != NULL);
1701
1702 /* allocate outside of the pgrp_lock */
1703 for (;;) {
1704 pgrp_lock(pgrp);
1705
1706 boolean_t should_iterate = FALSE;
1707 pid_count_available = 0;
1708
1709 PGMEMBERS_FOREACH(pgrp, p) {
1710 pid_count_available++;
1711
1712 if (p->p_stat == SSTOP) {
1713 should_iterate = TRUE;
1714 }
1715 }
1716
1717 if (pid_count_available == 0 || !should_iterate) {
1718 pgrp_unlock(pgrp);
1719 return;
1720 }
1721
1722 pid_list_size_needed = pid_count_available * sizeof(pid_t);
1723 if (pid_list_size >= pid_list_size_needed) {
1724 break;
1725 }
1726 pgrp_unlock(pgrp);
1727
1728 if (pid_list_size != 0) {
1729 kfree(pid_list, pid_list_size);
1730 }
1731 pid_list = kalloc(pid_list_size_needed);
1732 if (!pid_list) {
1733 return;
1734 }
1735 pid_list_size = pid_list_size_needed;
1736 }
1737
1738 /* no orphaned processes */
1739 if (pid_list_size == 0) {
1740 pgrp_unlock(pgrp);
1741 return;
1742 }
1743
1744 PGMEMBERS_FOREACH(pgrp, p) {
1745 pid_list[pid_count++] = proc_pid(p);
1746 if (pid_count >= pid_count_available) {
1747 break;
1748 }
1749 }
1750 pgrp_unlock(pgrp);
1751
1752 if (pid_count == 0) {
1753 goto out;
1754 }
1755
1756 for (int i = 0; i < pid_count; i++) {
1757 /* do not handle kernproc */
1758 if (pid_list[i] == 0) {
1759 continue;
1760 }
1761 p = proc_find(pid_list[i]);
1762 if (!p) {
1763 continue;
1764 }
1765
1766 proc_transwait(p, 0);
1767 pt_setrunnable(p);
1768 psignal(p, SIGHUP);
1769 psignal(p, SIGCONT);
1770 proc_rele(p);
1771 }
1772
1773 out:
1774 kfree(pid_list, pid_list_size);
1775 return;
1776 }
1777
1778 int
1779 proc_is_classic(proc_t p __unused)
1780 {
1781 return (0);
1782 }
1783
1784 /* XXX Why does this function exist? Need to kill it off... */
1785 proc_t
1786 current_proc_EXTERNAL(void)
1787 {
1788 return (current_proc());
1789 }
1790
1791 int
1792 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1793 {
1794 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1795 }
1796
1797 #if CONFIG_COREDUMP
1798 /*
1799 * proc_core_name(name, uid, pid)
1800 * Expand the name described in corefilename, using name, uid, and pid.
1801 * corefilename is a printf-like string, with three format specifiers:
1802 * %N name of process ("name")
1803 * %P process id (pid)
1804 * %U user id (uid)
1805 * For example, "%N.core" is the default; they can be disabled completely
1806 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1807 * This is controlled by the sysctl variable kern.corefile (see above).
1808 */
1809 __private_extern__ int
1810 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1811 size_t cf_name_len)
1812 {
1813 const char *format, *appendstr;
1814 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1815 size_t i, l, n;
1816
1817 if (cf_name == NULL)
1818 goto toolong;
1819
1820 format = corefilename;
1821 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1822 switch (format[i]) {
1823 case '%': /* Format character */
1824 i++;
1825 switch (format[i]) {
1826 case '%':
1827 appendstr = "%";
1828 break;
1829 case 'N': /* process name */
1830 appendstr = name;
1831 break;
1832 case 'P': /* process id */
1833 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1834 appendstr = id_buf;
1835 break;
1836 case 'U': /* user id */
1837 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1838 appendstr = id_buf;
1839 break;
1840 case '\0': /* format string ended in % symbol */
1841 goto endofstring;
1842 default:
1843 appendstr = "";
1844 log(LOG_ERR,
1845 "Unknown format character %c in `%s'\n",
1846 format[i], format);
1847 }
1848 l = strlen(appendstr);
1849 if ((n + l) >= cf_name_len)
1850 goto toolong;
1851 bcopy(appendstr, cf_name + n, l);
1852 n += l;
1853 break;
1854 default:
1855 cf_name[n++] = format[i];
1856 }
1857 }
1858 if (format[i] != '\0')
1859 goto toolong;
1860 return (0);
1861 toolong:
1862 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1863 (long)pid, name, (uint32_t)uid);
1864 return (1);
1865 endofstring:
1866 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
1867 (long)pid, name, (uint32_t)uid);
1868 return (1);
1869 }
1870 #endif /* CONFIG_COREDUMP */
1871
1872 /* Code Signing related routines */
1873
1874 int
1875 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1876 {
1877 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1878 uap->usersize, USER_ADDR_NULL));
1879 }
1880
1881 int
1882 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1883 {
1884 if (uap->uaudittoken == USER_ADDR_NULL)
1885 return(EINVAL);
1886 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1887 uap->usersize, uap->uaudittoken));
1888 }
1889
1890 static int
1891 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1892 {
1893 char fakeheader[8] = { 0 };
1894 int error;
1895
1896 if (usize < sizeof(fakeheader))
1897 return ERANGE;
1898
1899 /* if no blob, fill in zero header */
1900 if (NULL == start) {
1901 start = fakeheader;
1902 length = sizeof(fakeheader);
1903 } else if (usize < length) {
1904 /* ... if input too short, copy out length of entitlement */
1905 uint32_t length32 = htonl((uint32_t)length);
1906 memcpy(&fakeheader[4], &length32, sizeof(length32));
1907
1908 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1909 if (error == 0)
1910 return ERANGE; /* input buffer to short, ERANGE signals that */
1911 return error;
1912 }
1913 return copyout(start, uaddr, length);
1914 }
1915
1916 static int
1917 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1918 {
1919 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1920 proc_t pt;
1921 int forself;
1922 int error;
1923 vnode_t tvp;
1924 off_t toff;
1925 unsigned char cdhash[SHA1_RESULTLEN];
1926 audit_token_t token;
1927 unsigned int upid=0, uidversion = 0;
1928
1929 forself = error = 0;
1930
1931 if (pid == 0)
1932 pid = proc_selfpid();
1933 if (pid == proc_selfpid())
1934 forself = 1;
1935
1936
1937 switch (ops) {
1938 case CS_OPS_STATUS:
1939 case CS_OPS_CDHASH:
1940 case CS_OPS_PIDOFFSET:
1941 case CS_OPS_ENTITLEMENTS_BLOB:
1942 case CS_OPS_IDENTITY:
1943 case CS_OPS_BLOB:
1944 break; /* not restricted to root */
1945 default:
1946 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1947 return(EPERM);
1948 break;
1949 }
1950
1951 pt = proc_find(pid);
1952 if (pt == PROC_NULL)
1953 return(ESRCH);
1954
1955 upid = pt->p_pid;
1956 uidversion = pt->p_idversion;
1957 if (uaudittoken != USER_ADDR_NULL) {
1958
1959 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1960 if (error != 0)
1961 goto out;
1962 /* verify the audit token pid/idversion matches with proc */
1963 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1964 error = ESRCH;
1965 goto out;
1966 }
1967 }
1968
1969 #if CONFIG_MACF
1970 switch (ops) {
1971 case CS_OPS_MARKINVALID:
1972 case CS_OPS_MARKHARD:
1973 case CS_OPS_MARKKILL:
1974 case CS_OPS_MARKRESTRICT:
1975 case CS_OPS_SET_STATUS:
1976 case CS_OPS_CLEARINSTALLER:
1977 case CS_OPS_CLEARPLATFORM:
1978 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
1979 goto out;
1980 break;
1981 default:
1982 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
1983 goto out;
1984 }
1985 #endif
1986
1987 switch (ops) {
1988
1989 case CS_OPS_STATUS: {
1990 uint32_t retflags;
1991
1992 proc_lock(pt);
1993 retflags = pt->p_csflags;
1994 if (cs_enforcement(pt))
1995 retflags |= CS_ENFORCEMENT;
1996 if (csproc_get_platform_binary(pt))
1997 retflags |= CS_PLATFORM_BINARY;
1998 if (csproc_get_platform_path(pt))
1999 retflags |= CS_PLATFORM_PATH;
2000 proc_unlock(pt);
2001
2002 if (uaddr != USER_ADDR_NULL)
2003 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2004 break;
2005 }
2006 case CS_OPS_MARKINVALID:
2007 proc_lock(pt);
2008 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2009 pt->p_csflags &= ~CS_VALID; /* set invalid */
2010 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2011 pt->p_csflags |= CS_KILLED;
2012 proc_unlock(pt);
2013 if (cs_debug) {
2014 printf("CODE SIGNING: marked invalid by pid %d: "
2015 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2016 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2017 }
2018 psignal(pt, SIGKILL);
2019 } else
2020 proc_unlock(pt);
2021 } else
2022 proc_unlock(pt);
2023
2024 break;
2025
2026 case CS_OPS_MARKHARD:
2027 proc_lock(pt);
2028 pt->p_csflags |= CS_HARD;
2029 if ((pt->p_csflags & CS_VALID) == 0) {
2030 /* @@@ allow? reject? kill? @@@ */
2031 proc_unlock(pt);
2032 error = EINVAL;
2033 goto out;
2034 } else
2035 proc_unlock(pt);
2036 break;
2037
2038 case CS_OPS_MARKKILL:
2039 proc_lock(pt);
2040 pt->p_csflags |= CS_KILL;
2041 if ((pt->p_csflags & CS_VALID) == 0) {
2042 proc_unlock(pt);
2043 psignal(pt, SIGKILL);
2044 } else
2045 proc_unlock(pt);
2046 break;
2047
2048 case CS_OPS_PIDOFFSET:
2049 toff = pt->p_textoff;
2050 proc_rele(pt);
2051 error = copyout(&toff, uaddr, sizeof(toff));
2052 return(error);
2053
2054 case CS_OPS_CDHASH:
2055
2056 /* pt already holds a reference on its p_textvp */
2057 tvp = pt->p_textvp;
2058 toff = pt->p_textoff;
2059
2060 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2061 proc_rele(pt);
2062 return EINVAL;
2063 }
2064
2065 error = vn_getcdhash(tvp, toff, cdhash);
2066 proc_rele(pt);
2067
2068 if (error == 0) {
2069 error = copyout(cdhash, uaddr, sizeof (cdhash));
2070 }
2071
2072 return error;
2073
2074 case CS_OPS_ENTITLEMENTS_BLOB: {
2075 void *start;
2076 size_t length;
2077
2078 proc_lock(pt);
2079
2080 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2081 proc_unlock(pt);
2082 error = EINVAL;
2083 break;
2084 }
2085
2086 error = cs_entitlements_blob_get(pt, &start, &length);
2087 proc_unlock(pt);
2088 if (error)
2089 break;
2090
2091 error = csops_copy_token(start, length, usize, uaddr);
2092 break;
2093 }
2094 case CS_OPS_MARKRESTRICT:
2095 proc_lock(pt);
2096 pt->p_csflags |= CS_RESTRICT;
2097 proc_unlock(pt);
2098 break;
2099
2100 case CS_OPS_SET_STATUS: {
2101 uint32_t flags;
2102
2103 if (usize < sizeof(flags)) {
2104 error = ERANGE;
2105 break;
2106 }
2107
2108 error = copyin(uaddr, &flags, sizeof(flags));
2109 if (error)
2110 break;
2111
2112 /* only allow setting a subset of all code sign flags */
2113 flags &=
2114 CS_HARD | CS_EXEC_SET_HARD |
2115 CS_KILL | CS_EXEC_SET_KILL |
2116 CS_RESTRICT |
2117 CS_REQUIRE_LV |
2118 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2119
2120 proc_lock(pt);
2121 if (pt->p_csflags & CS_VALID)
2122 pt->p_csflags |= flags;
2123 else
2124 error = EINVAL;
2125 proc_unlock(pt);
2126
2127 break;
2128 }
2129 case CS_OPS_BLOB: {
2130 void *start;
2131 size_t length;
2132
2133 proc_lock(pt);
2134 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2135 proc_unlock(pt);
2136 error = EINVAL;
2137 break;
2138 }
2139
2140 error = cs_blob_get(pt, &start, &length);
2141 proc_unlock(pt);
2142 if (error)
2143 break;
2144
2145 error = csops_copy_token(start, length, usize, uaddr);
2146 break;
2147 }
2148 case CS_OPS_IDENTITY: {
2149 const char *identity;
2150 uint8_t fakeheader[8];
2151 uint32_t idlen;
2152 size_t length;
2153
2154 /*
2155 * Make identity have a blob header to make it
2156 * easier on userland to guess the identity
2157 * length.
2158 */
2159 if (usize < sizeof(fakeheader)) {
2160 error = ERANGE;
2161 break;
2162 }
2163 memset(fakeheader, 0, sizeof(fakeheader));
2164
2165 proc_lock(pt);
2166 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2167 proc_unlock(pt);
2168 error = EINVAL;
2169 break;
2170 }
2171
2172 identity = cs_identity_get(pt);
2173 proc_unlock(pt);
2174 if (identity == NULL) {
2175 error = ENOENT;
2176 break;
2177 }
2178
2179 length = strlen(identity) + 1; /* include NUL */
2180 idlen = htonl(length + sizeof(fakeheader));
2181 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2182
2183 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2184 if (error)
2185 break;
2186
2187 if (usize < sizeof(fakeheader) + length)
2188 error = ERANGE;
2189 else if (usize > sizeof(fakeheader))
2190 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2191
2192 break;
2193 }
2194
2195 case CS_OPS_CLEARINSTALLER:
2196 proc_lock(pt);
2197 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2198 proc_unlock(pt);
2199 break;
2200
2201 case CS_OPS_CLEARPLATFORM:
2202 #if DEVELOPMENT || DEBUG
2203 if (cs_enforcement_enable) {
2204 error = ENOTSUP;
2205 break;
2206 }
2207
2208 #if CONFIG_CSR
2209 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2210 error = ENOTSUP;
2211 break;
2212 }
2213 #endif
2214
2215 proc_lock(pt);
2216 pt->p_csflags &= ~(CS_PLATFORM_BINARY|CS_PLATFORM_PATH);
2217 csproc_clear_platform_binary(pt);
2218 proc_unlock(pt);
2219 break;
2220 #else
2221 error = ENOTSUP;
2222 break;
2223 #endif /* !DEVELOPMENT || DEBUG */
2224
2225 default:
2226 error = EINVAL;
2227 break;
2228 }
2229 out:
2230 proc_rele(pt);
2231 return(error);
2232 }
2233
2234 int
2235 proc_iterate(
2236 unsigned int flags,
2237 proc_iterate_fn_t callout,
2238 void *arg,
2239 proc_iterate_fn_t filterfn,
2240 void *filterarg)
2241 {
2242 pid_t *pid_list;
2243 vm_size_t pid_list_size = 0;
2244 vm_size_t pid_list_size_needed = 0;
2245 int pid_count = 0;
2246 int pid_count_available = 0;
2247
2248 assert(callout != NULL);
2249
2250 /* allocate outside of the proc_list_lock */
2251 for (;;) {
2252 proc_list_lock();
2253
2254 pid_count_available = nprocs + 1; //kernel_task is not counted in nprocs
2255 assert(pid_count_available > 0);
2256
2257 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2258 if (pid_list_size >= pid_list_size_needed) {
2259 break;
2260 }
2261 proc_list_unlock();
2262
2263 if (pid_list_size != 0) {
2264 kfree(pid_list, pid_list_size);
2265 }
2266 pid_list = kalloc(pid_list_size_needed);
2267 if (!pid_list) {
2268 return 1;
2269 }
2270 pid_list_size = pid_list_size_needed;
2271 }
2272
2273 /* filter pids into pid_list */
2274
2275 if (flags & PROC_ALLPROCLIST) {
2276 proc_t p;
2277 ALLPROC_FOREACH(p) {
2278 /* ignore processes that are being forked */
2279 if (p->p_stat == SIDL) {
2280 continue;
2281 }
2282 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2283 continue;
2284 }
2285
2286 pid_list[pid_count++] = proc_pid(p);
2287 if (pid_count >= pid_count_available) {
2288 break;
2289 }
2290 }
2291 }
2292
2293 if ((pid_count < pid_count_available) &&
2294 (flags & PROC_ZOMBPROCLIST))
2295 {
2296 proc_t p;
2297 ZOMBPROC_FOREACH(p) {
2298 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2299 continue;
2300 }
2301
2302 pid_list[pid_count++] = proc_pid(p);
2303 if (pid_count >= pid_count_available) {
2304 break;
2305 }
2306 }
2307 }
2308
2309 proc_list_unlock();
2310
2311 /* call callout on processes in the pid_list */
2312
2313 for (int i = 0; i < pid_count; i++) {
2314 proc_t p = proc_find(pid_list[i]);
2315 if (p) {
2316 if ((flags & PROC_NOWAITTRANS) == 0) {
2317 proc_transwait(p, 0);
2318 }
2319 int callout_ret = callout(p, arg);
2320
2321 switch (callout_ret) {
2322 case PROC_RETURNED_DONE:
2323 proc_rele(p);
2324 /* FALLTHROUGH */
2325 case PROC_CLAIMED_DONE:
2326 goto out;
2327
2328 case PROC_RETURNED:
2329 proc_rele(p);
2330 /* FALLTHROUGH */
2331 case PROC_CLAIMED:
2332 break;
2333
2334 default:
2335 panic("proc_iterate: callout returned %d for pid %d",
2336 callout_ret, pid_list[i]);
2337 break;
2338 }
2339 } else if (flags & PROC_ZOMBPROCLIST) {
2340 p = proc_find_zombref(pid_list[i]);
2341 if (!p) {
2342 continue;
2343 }
2344 int callout_ret = callout(p, arg);
2345
2346 switch (callout_ret) {
2347 case PROC_RETURNED_DONE:
2348 proc_drop_zombref(p);
2349 /* FALLTHROUGH */
2350 case PROC_CLAIMED_DONE:
2351 goto out;
2352
2353 case PROC_RETURNED:
2354 proc_drop_zombref(p);
2355 /* FALLTHROUGH */
2356 case PROC_CLAIMED:
2357 break;
2358
2359 default:
2360 panic("proc_iterate: callout returned %d for zombie pid %d",
2361 callout_ret, pid_list[i]);
2362 break;
2363 }
2364 }
2365 }
2366
2367 out:
2368 kfree(pid_list, pid_list_size);
2369 return 0;
2370
2371 }
2372
2373 void
2374 proc_rebootscan(
2375 proc_iterate_fn_t callout,
2376 void *arg,
2377 proc_iterate_fn_t filterfn,
2378 void *filterarg)
2379 {
2380 proc_t p;
2381
2382 assert(callout != NULL);
2383
2384 proc_shutdown_exitcount = 0;
2385
2386 restart_foreach:
2387
2388 proc_list_lock();
2389
2390 ALLPROC_FOREACH(p) {
2391 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2392 continue;
2393 }
2394 p = proc_ref_locked(p);
2395 if (!p) {
2396 continue;
2397 }
2398
2399 proc_list_unlock();
2400
2401 proc_transwait(p, 0);
2402 (void)callout(p, arg);
2403 proc_rele(p);
2404
2405 goto restart_foreach;
2406 }
2407
2408 proc_list_unlock();
2409 }
2410
2411 int
2412 proc_childrenwalk(
2413 proc_t parent,
2414 proc_iterate_fn_t callout,
2415 void *arg)
2416 {
2417 pid_t *pid_list;
2418 vm_size_t pid_list_size = 0;
2419 vm_size_t pid_list_size_needed = 0;
2420 int pid_count = 0;
2421 int pid_count_available = 0;
2422
2423 assert(parent != NULL);
2424 assert(callout != NULL);
2425
2426 for (;;) {
2427 proc_list_lock();
2428
2429 pid_count_available = parent->p_childrencnt;
2430 if (pid_count_available == 0) {
2431 proc_list_unlock();
2432 return 0;
2433 }
2434
2435 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2436 if (pid_list_size >= pid_list_size_needed) {
2437 break;
2438 }
2439 proc_list_unlock();
2440
2441 if (pid_list_size != 0) {
2442 kfree(pid_list, pid_list_size);
2443 }
2444 pid_list = kalloc(pid_list_size_needed);
2445 if (!pid_list) {
2446 return 1;
2447 }
2448 pid_list_size = pid_list_size_needed;
2449 }
2450
2451 proc_t p;
2452 PCHILDREN_FOREACH(parent, p) {
2453 if (p->p_stat == SIDL) {
2454 continue;
2455 }
2456
2457 pid_list[pid_count++] = proc_pid(p);
2458 if (pid_count >= pid_count_available) {
2459 break;
2460 }
2461 }
2462
2463 proc_list_unlock();
2464
2465 for (int i = 0; i < pid_count; i++) {
2466 p = proc_find(pid_list[i]);
2467 if (!p) {
2468 continue;
2469 }
2470
2471 int callout_ret = callout(p, arg);
2472
2473 switch (callout_ret) {
2474 case PROC_RETURNED_DONE:
2475 proc_rele(p);
2476 /* FALLTHROUGH */
2477 case PROC_CLAIMED_DONE:
2478 goto out;
2479
2480 case PROC_RETURNED:
2481 proc_rele(p);
2482 /* FALLTHROUGH */
2483 case PROC_CLAIMED:
2484 break;
2485 default:
2486 panic("proc_childrenwalk: callout returned %d for pid %d",
2487 callout_ret, pid_list[i]);
2488 break;
2489 }
2490 }
2491
2492 out:
2493 kfree(pid_list, pid_list_size);
2494 return 0;
2495 }
2496
2497 int
2498 pgrp_iterate(
2499 struct pgrp *pgrp,
2500 unsigned int flags,
2501 proc_iterate_fn_t callout,
2502 void * arg,
2503 proc_iterate_fn_t filterfn,
2504 void * filterarg)
2505 {
2506 pid_t *pid_list;
2507 proc_t p;
2508 vm_size_t pid_list_size = 0;
2509 vm_size_t pid_list_size_needed = 0;
2510 int pid_count = 0;
2511 int pid_count_available = 0;
2512
2513 pid_t pgid;
2514
2515 assert(pgrp != NULL);
2516 assert(callout != NULL);
2517
2518 for (;;) {
2519 pgrp_lock(pgrp);
2520
2521 pid_count_available = pgrp->pg_membercnt;
2522 if (pid_count_available == 0) {
2523 pgrp_unlock(pgrp);
2524 return 0;
2525 }
2526
2527 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2528 if (pid_list_size >= pid_list_size_needed) {
2529 break;
2530 }
2531 pgrp_unlock(pgrp);
2532
2533 if (pid_list_size != 0) {
2534 kfree(pid_list, pid_list_size);
2535 }
2536 pid_list = kalloc(pid_list_size_needed);
2537 if (!pid_list) {
2538 return 1;
2539 }
2540 pid_list_size = pid_list_size_needed;
2541 }
2542
2543 pgid = pgrp->pg_id;
2544
2545 PGMEMBERS_FOREACH(pgrp, p) {
2546 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2547 continue;;
2548 }
2549 pid_list[pid_count++] = proc_pid(p);
2550 if (pid_count >= pid_count_available) {
2551 break;
2552 }
2553 }
2554
2555 pgrp_unlock(pgrp);
2556
2557 if (flags & PGRP_DROPREF) {
2558 pg_rele(pgrp);
2559 }
2560
2561 for (int i = 0; i< pid_count; i++) {
2562 /* do not handle kernproc */
2563 if (pid_list[i] == 0) {
2564 continue;
2565 }
2566 p = proc_find(pid_list[i]);
2567 if (!p) {
2568 continue;
2569 }
2570 if (p->p_pgrpid != pgid) {
2571 proc_rele(p);
2572 continue;
2573 }
2574
2575 int callout_ret = callout(p, arg);
2576
2577 switch (callout_ret) {
2578 case PROC_RETURNED:
2579 proc_rele(p);
2580 /* FALLTHROUGH */
2581 case PROC_CLAIMED:
2582 break;
2583
2584 case PROC_RETURNED_DONE:
2585 proc_rele(p);
2586 /* FALLTHROUGH */
2587 case PROC_CLAIMED_DONE:
2588 goto out;
2589
2590 default:
2591 panic("pgrp_iterate: callout returned %d for pid %d",
2592 callout_ret, pid_list[i]);
2593 }
2594 }
2595
2596 out:
2597 kfree(pid_list, pid_list_size);
2598 return 0;
2599 }
2600
2601 static void
2602 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2603 {
2604 proc_list_lock();
2605 child->p_pgrp = pgrp;
2606 child->p_pgrpid = pgrp->pg_id;
2607 child->p_listflag |= P_LIST_INPGRP;
2608 /*
2609 * When pgrp is being freed , a process can still
2610 * request addition using setpgid from bash when
2611 * login is terminated (login cycler) return ESRCH
2612 * Safe to hold lock due to refcount on pgrp
2613 */
2614 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2615 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2616 }
2617
2618 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2619 panic("pgrp_add : pgrp is dead adding process");
2620 proc_list_unlock();
2621
2622 pgrp_lock(pgrp);
2623 pgrp->pg_membercnt++;
2624 if ( parent != PROC_NULL) {
2625 LIST_INSERT_AFTER(parent, child, p_pglist);
2626 }else {
2627 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2628 }
2629 pgrp_unlock(pgrp);
2630
2631 proc_list_lock();
2632 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2633 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2634 }
2635 proc_list_unlock();
2636 }
2637
2638 static void
2639 pgrp_remove(struct proc * p)
2640 {
2641 struct pgrp * pg;
2642
2643 pg = proc_pgrp(p);
2644
2645 proc_list_lock();
2646 #if __PROC_INTERNAL_DEBUG
2647 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2648 panic("removing from pglist but no named ref\n");
2649 #endif
2650 p->p_pgrpid = PGRPID_DEAD;
2651 p->p_listflag &= ~P_LIST_INPGRP;
2652 p->p_pgrp = NULL;
2653 proc_list_unlock();
2654
2655 if (pg == PGRP_NULL)
2656 panic("pgrp_remove: pg is NULL");
2657 pgrp_lock(pg);
2658 pg->pg_membercnt--;
2659
2660 if (pg->pg_membercnt < 0)
2661 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2662
2663 LIST_REMOVE(p, p_pglist);
2664 if (pg->pg_members.lh_first == 0) {
2665 pgrp_unlock(pg);
2666 pgdelete_dropref(pg);
2667 } else {
2668 pgrp_unlock(pg);
2669 pg_rele(pg);
2670 }
2671 }
2672
2673
2674 /* cannot use proc_pgrp as it maybe stalled */
2675 static void
2676 pgrp_replace(struct proc * p, struct pgrp * newpg)
2677 {
2678 struct pgrp * oldpg;
2679
2680
2681
2682 proc_list_lock();
2683
2684 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2685 p->p_listflag |= P_LIST_PGRPTRWAIT;
2686 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2687 }
2688
2689 p->p_listflag |= P_LIST_PGRPTRANS;
2690
2691 oldpg = p->p_pgrp;
2692 if (oldpg == PGRP_NULL)
2693 panic("pgrp_replace: oldpg NULL");
2694 oldpg->pg_refcount++;
2695 #if __PROC_INTERNAL_DEBUG
2696 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2697 panic("removing from pglist but no named ref\n");
2698 #endif
2699 p->p_pgrpid = PGRPID_DEAD;
2700 p->p_listflag &= ~P_LIST_INPGRP;
2701 p->p_pgrp = NULL;
2702
2703 proc_list_unlock();
2704
2705 pgrp_lock(oldpg);
2706 oldpg->pg_membercnt--;
2707 if (oldpg->pg_membercnt < 0)
2708 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2709 LIST_REMOVE(p, p_pglist);
2710 if (oldpg->pg_members.lh_first == 0) {
2711 pgrp_unlock(oldpg);
2712 pgdelete_dropref(oldpg);
2713 } else {
2714 pgrp_unlock(oldpg);
2715 pg_rele(oldpg);
2716 }
2717
2718 proc_list_lock();
2719 p->p_pgrp = newpg;
2720 p->p_pgrpid = newpg->pg_id;
2721 p->p_listflag |= P_LIST_INPGRP;
2722 /*
2723 * When pgrp is being freed , a process can still
2724 * request addition using setpgid from bash when
2725 * login is terminated (login cycler) return ESRCH
2726 * Safe to hold lock due to refcount on pgrp
2727 */
2728 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2729 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2730 }
2731
2732 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2733 panic("pgrp_add : pgrp is dead adding process");
2734 proc_list_unlock();
2735
2736 pgrp_lock(newpg);
2737 newpg->pg_membercnt++;
2738 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2739 pgrp_unlock(newpg);
2740
2741 proc_list_lock();
2742 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2743 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2744 }
2745
2746 p->p_listflag &= ~P_LIST_PGRPTRANS;
2747 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2748 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2749 wakeup(&p->p_pgrpid);
2750
2751 }
2752 proc_list_unlock();
2753 }
2754
2755 void
2756 pgrp_lock(struct pgrp * pgrp)
2757 {
2758 lck_mtx_lock(&pgrp->pg_mlock);
2759 }
2760
2761 void
2762 pgrp_unlock(struct pgrp * pgrp)
2763 {
2764 lck_mtx_unlock(&pgrp->pg_mlock);
2765 }
2766
2767 void
2768 session_lock(struct session * sess)
2769 {
2770 lck_mtx_lock(&sess->s_mlock);
2771 }
2772
2773
2774 void
2775 session_unlock(struct session * sess)
2776 {
2777 lck_mtx_unlock(&sess->s_mlock);
2778 }
2779
2780 struct pgrp *
2781 proc_pgrp(proc_t p)
2782 {
2783 struct pgrp * pgrp;
2784
2785 if (p == PROC_NULL)
2786 return(PGRP_NULL);
2787 proc_list_lock();
2788
2789 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2790 p->p_listflag |= P_LIST_PGRPTRWAIT;
2791 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2792 }
2793
2794 pgrp = p->p_pgrp;
2795
2796 assert(pgrp != NULL);
2797
2798 if (pgrp != PGRP_NULL) {
2799 pgrp->pg_refcount++;
2800 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2801 panic("proc_pgrp: ref being povided for dead pgrp");
2802 }
2803
2804 proc_list_unlock();
2805
2806 return(pgrp);
2807 }
2808
2809 struct pgrp *
2810 tty_pgrp(struct tty * tp)
2811 {
2812 struct pgrp * pg = PGRP_NULL;
2813
2814 proc_list_lock();
2815 pg = tp->t_pgrp;
2816
2817 if (pg != PGRP_NULL) {
2818 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2819 panic("tty_pgrp: ref being povided for dead pgrp");
2820 pg->pg_refcount++;
2821 }
2822 proc_list_unlock();
2823
2824 return(pg);
2825 }
2826
2827 struct session *
2828 proc_session(proc_t p)
2829 {
2830 struct session * sess = SESSION_NULL;
2831
2832 if (p == PROC_NULL)
2833 return(SESSION_NULL);
2834
2835 proc_list_lock();
2836
2837 /* wait during transitions */
2838 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2839 p->p_listflag |= P_LIST_PGRPTRWAIT;
2840 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2841 }
2842
2843 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2844 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2845 panic("proc_session:returning sesssion ref on terminating session");
2846 sess->s_count++;
2847 }
2848 proc_list_unlock();
2849 return(sess);
2850 }
2851
2852 void
2853 session_rele(struct session *sess)
2854 {
2855 proc_list_lock();
2856 if (--sess->s_count == 0) {
2857 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2858 panic("session_rele: terminating already terminated session");
2859 sess->s_listflags |= S_LIST_TERM;
2860 LIST_REMOVE(sess, s_hash);
2861 sess->s_listflags |= S_LIST_DEAD;
2862 if (sess->s_count != 0)
2863 panic("session_rele: freeing session in use");
2864 proc_list_unlock();
2865 #if CONFIG_FINE_LOCK_GROUPS
2866 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2867 #else
2868 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2869 #endif
2870 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2871 } else
2872 proc_list_unlock();
2873 }
2874
2875 int
2876 proc_transstart(proc_t p, int locked, int non_blocking)
2877 {
2878 if (locked == 0)
2879 proc_lock(p);
2880 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2881 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2882 if (locked == 0)
2883 proc_unlock(p);
2884 return EDEADLK;
2885 }
2886 p->p_lflag |= P_LTRANSWAIT;
2887 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2888 }
2889 p->p_lflag |= P_LINTRANSIT;
2890 p->p_transholder = current_thread();
2891 if (locked == 0)
2892 proc_unlock(p);
2893 return 0;
2894 }
2895
2896 void
2897 proc_transcommit(proc_t p, int locked)
2898 {
2899 if (locked == 0)
2900 proc_lock(p);
2901
2902 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2903 assert (p->p_transholder == current_thread());
2904 p->p_lflag |= P_LTRANSCOMMIT;
2905
2906 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2907 p->p_lflag &= ~P_LTRANSWAIT;
2908 wakeup(&p->p_lflag);
2909 }
2910 if (locked == 0)
2911 proc_unlock(p);
2912 }
2913
2914 void
2915 proc_transend(proc_t p, int locked)
2916 {
2917 if (locked == 0)
2918 proc_lock(p);
2919
2920 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2921 p->p_transholder = NULL;
2922
2923 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2924 p->p_lflag &= ~P_LTRANSWAIT;
2925 wakeup(&p->p_lflag);
2926 }
2927 if (locked == 0)
2928 proc_unlock(p);
2929 }
2930
2931 int
2932 proc_transwait(proc_t p, int locked)
2933 {
2934 if (locked == 0)
2935 proc_lock(p);
2936 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2937 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2938 if (locked == 0)
2939 proc_unlock(p);
2940 return EDEADLK;
2941 }
2942 p->p_lflag |= P_LTRANSWAIT;
2943 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2944 }
2945 if (locked == 0)
2946 proc_unlock(p);
2947 return 0;
2948 }
2949
2950 void
2951 proc_klist_lock(void)
2952 {
2953 lck_mtx_lock(proc_klist_mlock);
2954 }
2955
2956 void
2957 proc_klist_unlock(void)
2958 {
2959 lck_mtx_unlock(proc_klist_mlock);
2960 }
2961
2962 void
2963 proc_knote(struct proc * p, long hint)
2964 {
2965 proc_klist_lock();
2966 KNOTE(&p->p_klist, hint);
2967 proc_klist_unlock();
2968 }
2969
2970 void
2971 proc_knote_drain(struct proc *p)
2972 {
2973 struct knote *kn = NULL;
2974
2975 /*
2976 * Clear the proc's klist to avoid references after the proc is reaped.
2977 */
2978 proc_klist_lock();
2979 while ((kn = SLIST_FIRST(&p->p_klist))) {
2980 kn->kn_ptr.p_proc = PROC_NULL;
2981 KNOTE_DETACH(&p->p_klist, kn);
2982 }
2983 proc_klist_unlock();
2984 }
2985
2986 void
2987 proc_setregister(proc_t p)
2988 {
2989 proc_lock(p);
2990 p->p_lflag |= P_LREGISTER;
2991 proc_unlock(p);
2992 }
2993
2994 void
2995 proc_resetregister(proc_t p)
2996 {
2997 proc_lock(p);
2998 p->p_lflag &= ~P_LREGISTER;
2999 proc_unlock(p);
3000 }
3001
3002 pid_t
3003 proc_pgrpid(proc_t p)
3004 {
3005 return p->p_pgrpid;
3006 }
3007
3008 pid_t
3009 proc_selfpgrpid()
3010 {
3011 return current_proc()->p_pgrpid;
3012 }
3013
3014
3015 /* return control and action states */
3016 int
3017 proc_getpcontrol(int pid, int * pcontrolp)
3018 {
3019 proc_t p;
3020
3021 p = proc_find(pid);
3022 if (p == PROC_NULL)
3023 return(ESRCH);
3024 if (pcontrolp != NULL)
3025 *pcontrolp = p->p_pcaction;
3026
3027 proc_rele(p);
3028 return(0);
3029 }
3030
3031 int
3032 proc_dopcontrol(proc_t p)
3033 {
3034 int pcontrol;
3035
3036 proc_lock(p);
3037
3038 pcontrol = PROC_CONTROL_STATE(p);
3039
3040 if (PROC_ACTION_STATE(p) == 0) {
3041 switch(pcontrol) {
3042 case P_PCTHROTTLE:
3043 PROC_SETACTION_STATE(p);
3044 proc_unlock(p);
3045 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3046 break;
3047
3048 case P_PCSUSP:
3049 PROC_SETACTION_STATE(p);
3050 proc_unlock(p);
3051 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3052 task_suspend(p->task);
3053 break;
3054
3055 case P_PCKILL:
3056 PROC_SETACTION_STATE(p);
3057 proc_unlock(p);
3058 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3059 psignal(p, SIGKILL);
3060 break;
3061
3062 default:
3063 proc_unlock(p);
3064 }
3065
3066 } else
3067 proc_unlock(p);
3068
3069 return(PROC_RETURNED);
3070 }
3071
3072
3073 /*
3074 * Resume a throttled or suspended process. This is an internal interface that's only
3075 * used by the user level code that presents the GUI when we run out of swap space and
3076 * hence is restricted to processes with superuser privileges.
3077 */
3078
3079 int
3080 proc_resetpcontrol(int pid)
3081 {
3082 proc_t p;
3083 int pcontrol;
3084 int error;
3085 proc_t self = current_proc();
3086
3087 /* if the process has been validated to handle resource control or root is valid one */
3088 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
3089 return error;
3090
3091 p = proc_find(pid);
3092 if (p == PROC_NULL)
3093 return(ESRCH);
3094
3095 proc_lock(p);
3096
3097 pcontrol = PROC_CONTROL_STATE(p);
3098
3099 if(PROC_ACTION_STATE(p) !=0) {
3100 switch(pcontrol) {
3101 case P_PCTHROTTLE:
3102 PROC_RESETACTION_STATE(p);
3103 proc_unlock(p);
3104 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3105 break;
3106
3107 case P_PCSUSP:
3108 PROC_RESETACTION_STATE(p);
3109 proc_unlock(p);
3110 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3111 task_resume(p->task);
3112 break;
3113
3114 case P_PCKILL:
3115 /* Huh? */
3116 PROC_SETACTION_STATE(p);
3117 proc_unlock(p);
3118 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3119 break;
3120
3121 default:
3122 proc_unlock(p);
3123 }
3124
3125 } else
3126 proc_unlock(p);
3127
3128 proc_rele(p);
3129 return(0);
3130 }
3131
3132
3133
3134 struct no_paging_space
3135 {
3136 uint64_t pcs_max_size;
3137 uint64_t pcs_uniqueid;
3138 int pcs_pid;
3139 int pcs_proc_count;
3140 uint64_t pcs_total_size;
3141
3142 uint64_t npcs_max_size;
3143 uint64_t npcs_uniqueid;
3144 int npcs_pid;
3145 int npcs_proc_count;
3146 uint64_t npcs_total_size;
3147
3148 int apcs_proc_count;
3149 uint64_t apcs_total_size;
3150 };
3151
3152
3153 static int
3154 proc_pcontrol_filter(proc_t p, void *arg)
3155 {
3156 struct no_paging_space *nps;
3157 uint64_t compressed;
3158
3159 nps = (struct no_paging_space *)arg;
3160
3161 compressed = get_task_compressed(p->task);
3162
3163 if (PROC_CONTROL_STATE(p)) {
3164 if (PROC_ACTION_STATE(p) == 0) {
3165 if (compressed > nps->pcs_max_size) {
3166 nps->pcs_pid = p->p_pid;
3167 nps->pcs_uniqueid = p->p_uniqueid;
3168 nps->pcs_max_size = compressed;
3169 }
3170 nps->pcs_total_size += compressed;
3171 nps->pcs_proc_count++;
3172 } else {
3173 nps->apcs_total_size += compressed;
3174 nps->apcs_proc_count++;
3175 }
3176 } else {
3177 if (compressed > nps->npcs_max_size) {
3178 nps->npcs_pid = p->p_pid;
3179 nps->npcs_uniqueid = p->p_uniqueid;
3180 nps->npcs_max_size = compressed;
3181 }
3182 nps->npcs_total_size += compressed;
3183 nps->npcs_proc_count++;
3184
3185 }
3186 return (0);
3187 }
3188
3189
3190 static int
3191 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3192 {
3193 return(PROC_RETURNED);
3194 }
3195
3196
3197 /*
3198 * Deal with the low on compressor pool space condition... this function
3199 * gets called when we are approaching the limits of the compressor pool or
3200 * we are unable to create a new swap file.
3201 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3202 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3203 * There are 2 categories of processes to deal with. Those that have an action
3204 * associated with them by the task itself and those that do not. Actionable
3205 * tasks can have one of three categories specified: ones that
3206 * can be killed immediately, ones that should be suspended, and ones that should
3207 * be throttled. Processes that do not have an action associated with them are normally
3208 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3209 * that only by killing them can we hope to put the system back into a usable state.
3210 */
3211
3212 #define NO_PAGING_SPACE_DEBUG 0
3213
3214 extern uint64_t vm_compressor_pages_compressed(void);
3215
3216 struct timeval last_no_space_action = {0, 0};
3217
3218 #if DEVELOPMENT || DEBUG
3219 extern boolean_t kill_on_no_paging_space;
3220 #endif /* DEVELOPMENT || DEBUG */
3221
3222 #define MB_SIZE (1024 * 1024ULL)
3223 boolean_t memorystatus_kill_on_VM_thrashing(boolean_t);
3224
3225 extern int32_t max_kill_priority;
3226 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3227
3228 int
3229 no_paging_space_action()
3230 {
3231 proc_t p;
3232 struct no_paging_space nps;
3233 struct timeval now;
3234
3235 /*
3236 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3237 */
3238 microtime(&now);
3239
3240 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3241 return (0);
3242
3243 /*
3244 * Examine all processes and find the biggest (biggest is based on the number of pages this
3245 * task has in the compressor pool) that has been marked to have some action
3246 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3247 * action.
3248 *
3249 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3250 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3251 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3252 */
3253 bzero(&nps, sizeof(nps));
3254
3255 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3256
3257 #if NO_PAGING_SPACE_DEBUG
3258 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3259 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3260 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3261 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3262 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3263 nps.apcs_proc_count, nps.apcs_total_size);
3264 #endif
3265 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3266 /*
3267 * for now we'll knock out any task that has more then 50% of the pages
3268 * held by the compressor
3269 */
3270 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3271
3272 if (nps.npcs_uniqueid == p->p_uniqueid) {
3273 /*
3274 * verify this is still the same process
3275 * in case the proc exited and the pid got reused while
3276 * we were finishing the proc_iterate and getting to this point
3277 */
3278 last_no_space_action = now;
3279
3280 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size/MB_SIZE));
3281 psignal(p, SIGKILL);
3282
3283 proc_rele(p);
3284
3285 return (0);
3286 }
3287
3288 proc_rele(p);
3289 }
3290 }
3291
3292 /*
3293 * We have some processes within our jetsam bands of consideration and hence can be killed.
3294 * So we will invoke the memorystatus thread to go ahead and kill something.
3295 */
3296 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3297
3298 last_no_space_action = now;
3299 memorystatus_kill_on_VM_thrashing(TRUE /* async */);
3300 return (1);
3301 }
3302
3303 /*
3304 * No eligible processes to kill. So let's suspend/kill the largest
3305 * process depending on its policy control specifications.
3306 */
3307
3308 if (nps.pcs_max_size > 0) {
3309 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3310
3311 if (nps.pcs_uniqueid == p->p_uniqueid) {
3312 /*
3313 * verify this is still the same process
3314 * in case the proc exited and the pid got reused while
3315 * we were finishing the proc_iterate and getting to this point
3316 */
3317 last_no_space_action = now;
3318
3319 proc_dopcontrol(p);
3320
3321 proc_rele(p);
3322
3323 return (1);
3324 }
3325
3326 proc_rele(p);
3327 }
3328 }
3329 last_no_space_action = now;
3330
3331 printf("low swap: unable to find any eligible processes to take action on\n");
3332
3333 return (0);
3334 }
3335
3336 int
3337 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3338 {
3339 int ret = 0;
3340 proc_t target_proc = PROC_NULL;
3341 pid_t target_pid = uap->pid;
3342 uint64_t target_uniqueid = uap->uniqueid;
3343 task_t target_task = NULL;
3344
3345 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3346 ret = EPERM;
3347 goto out;
3348 }
3349 target_proc = proc_find(target_pid);
3350 if (target_proc != PROC_NULL) {
3351 if (target_uniqueid != proc_uniqueid(target_proc)) {
3352 ret = ENOENT;
3353 goto out;
3354 }
3355
3356 target_task = proc_task(target_proc);
3357 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3358 ret = EINVAL;
3359 goto out;
3360 }
3361 } else
3362 ret = ENOENT;
3363
3364 out:
3365 if (target_proc != PROC_NULL)
3366 proc_rele(target_proc);
3367 return (ret);
3368 }
3369
3370 #if VM_SCAN_FOR_SHADOW_CHAIN
3371 extern int vm_map_shadow_max(vm_map_t map);
3372 int proc_shadow_max(void);
3373 int proc_shadow_max(void)
3374 {
3375 int retval, max;
3376 proc_t p;
3377 task_t task;
3378 vm_map_t map;
3379
3380 max = 0;
3381 proc_list_lock();
3382 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3383 if (p->p_stat == SIDL)
3384 continue;
3385 task = p->task;
3386 if (task == NULL) {
3387 continue;
3388 }
3389 map = get_task_map(task);
3390 if (map == NULL) {
3391 continue;
3392 }
3393 retval = vm_map_shadow_max(map);
3394 if (retval > max) {
3395 max = retval;
3396 }
3397 }
3398 proc_list_unlock();
3399 return max;
3400 }
3401 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3402
3403 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3404 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3405 {
3406 if (target_proc != NULL) {
3407 target_proc->p_responsible_pid = responsible_pid;
3408 }
3409 return;
3410 }
3411
3412 int
3413 proc_chrooted(proc_t p)
3414 {
3415 int retval = 0;
3416
3417 if (p) {
3418 proc_fdlock(p);
3419 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3420 proc_fdunlock(p);
3421 }
3422
3423 return retval;
3424 }
3425
3426 void *
3427 proc_get_uthread_uu_threadlist(void * uthread_v)
3428 {
3429 uthread_t uth = (uthread_t)uthread_v;
3430 return (uth != NULL) ? uth->uu_threadlist : NULL;
3431 }