]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114
115 #ifdef CONFIG_32BIT_TELEMETRY
116 #include <sys/kasl.h>
117 #endif /* CONFIG_32BIT_TELEMETRY */
118
119 #if CONFIG_CSR
120 #include <sys/csr.h>
121 #endif
122
123 #if CONFIG_MEMORYSTATUS
124 #include <sys/kern_memorystatus.h>
125 #endif
126
127 #if CONFIG_MACF
128 #include <security/mac_framework.h>
129 #endif
130
131 #include <libkern/crypto/sha1.h>
132
133 #ifdef CONFIG_32BIT_TELEMETRY
134 #define MAX_32BIT_EXEC_SIG_SIZE 160
135 #endif /* CONFIG_32BIT_TELEMETRY */
136
137 /*
138 * Structure associated with user cacheing.
139 */
140 struct uidinfo {
141 LIST_ENTRY(uidinfo) ui_hash;
142 uid_t ui_uid;
143 long ui_proccnt;
144 };
145 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
146 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
147 u_long uihash; /* size of hash table - 1 */
148
149 /*
150 * Other process lists
151 */
152 struct pidhashhead *pidhashtbl;
153 u_long pidhash;
154 struct pgrphashhead *pgrphashtbl;
155 u_long pgrphash;
156 struct sesshashhead *sesshashtbl;
157 u_long sesshash;
158
159 struct proclist allproc;
160 struct proclist zombproc;
161 extern struct tty cons;
162
163 extern int cs_debug;
164
165 #if DEVELOPMENT || DEBUG
166 extern int cs_enforcement_enable;
167 #endif
168
169 #if DEBUG
170 #define __PROC_INTERNAL_DEBUG 1
171 #endif
172 #if CONFIG_COREDUMP
173 /* Name to give to core files */
174 #if defined(XNU_TARGET_OS_BRIDGE)
175 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/internal/%N.core"};
176 #elif CONFIG_EMBEDDED
177 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/cores/%N.core"};
178 #else
179 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
180 #endif
181 #endif
182
183 #if PROC_REF_DEBUG
184 #include <kern/backtrace.h>
185 #endif
186
187 typedef uint64_t unaligned_u64 __attribute__((aligned(1)));
188
189 static void orphanpg(struct pgrp * pg);
190 void proc_name_kdp(task_t t, char * buf, int size);
191 void * proc_get_uthread_uu_threadlist(void * uthread_v);
192 int proc_threadname_kdp(void * uth, char * buf, size_t size);
193 void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime);
194 char * proc_name_address(void * p);
195
196 /* TODO: make a header that's exported and usable in osfmk */
197 char* proc_best_name(proc_t p);
198
199 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
200 static void pgrp_remove(proc_t p);
201 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
202 static void pgdelete_dropref(struct pgrp *pgrp);
203 extern void pg_rele_dropref(struct pgrp * pgrp);
204 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
205 static boolean_t proc_parent_is_currentproc(proc_t p);
206
207 struct fixjob_iterargs {
208 struct pgrp * pg;
209 struct session * mysession;
210 int entering;
211 };
212
213 int fixjob_callback(proc_t, void *);
214
215 uint64_t get_current_unique_pid(void);
216
217
218 uint64_t
219 get_current_unique_pid(void)
220 {
221 proc_t p = current_proc();
222
223 if (p)
224 return p->p_uniqueid;
225 else
226 return 0;
227 }
228
229 /*
230 * Initialize global process hashing structures.
231 */
232 void
233 procinit(void)
234 {
235 LIST_INIT(&allproc);
236 LIST_INIT(&zombproc);
237 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
238 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
239 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
240 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
241 #if CONFIG_PERSONAS
242 personas_bootstrap();
243 #endif
244 }
245
246 /*
247 * Change the count associated with number of processes
248 * a given user is using. This routine protects the uihash
249 * with the list lock
250 */
251 int
252 chgproccnt(uid_t uid, int diff)
253 {
254 struct uidinfo *uip;
255 struct uidinfo *newuip = NULL;
256 struct uihashhead *uipp;
257 int retval;
258
259 again:
260 proc_list_lock();
261 uipp = UIHASH(uid);
262 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
263 if (uip->ui_uid == uid)
264 break;
265 if (uip) {
266 uip->ui_proccnt += diff;
267 if (uip->ui_proccnt > 0) {
268 retval = uip->ui_proccnt;
269 proc_list_unlock();
270 goto out;
271 }
272 if (uip->ui_proccnt < 0)
273 panic("chgproccnt: procs < 0");
274 LIST_REMOVE(uip, ui_hash);
275 retval = 0;
276 proc_list_unlock();
277 FREE_ZONE(uip, sizeof(*uip), M_PROC);
278 goto out;
279 }
280 if (diff <= 0) {
281 if (diff == 0) {
282 retval = 0;
283 proc_list_unlock();
284 goto out;
285 }
286 panic("chgproccnt: lost user");
287 }
288 if (newuip != NULL) {
289 uip = newuip;
290 newuip = NULL;
291 LIST_INSERT_HEAD(uipp, uip, ui_hash);
292 uip->ui_uid = uid;
293 uip->ui_proccnt = diff;
294 retval = diff;
295 proc_list_unlock();
296 goto out;
297 }
298 proc_list_unlock();
299 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
300 if (newuip == NULL)
301 panic("chgproccnt: M_PROC zone depleted");
302 goto again;
303 out:
304 if (newuip != NULL)
305 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
306 return(retval);
307 }
308
309 /*
310 * Is p an inferior of the current process?
311 */
312 int
313 inferior(proc_t p)
314 {
315 int retval = 0;
316
317 proc_list_lock();
318 for (; p != current_proc(); p = p->p_pptr)
319 if (p->p_pid == 0)
320 goto out;
321 retval = 1;
322 out:
323 proc_list_unlock();
324 return(retval);
325 }
326
327 /*
328 * Is p an inferior of t ?
329 */
330 int
331 isinferior(proc_t p, proc_t t)
332 {
333 int retval = 0;
334 int nchecked = 0;
335 proc_t start = p;
336
337 /* if p==t they are not inferior */
338 if (p == t)
339 return(0);
340
341 proc_list_lock();
342 for (; p != t; p = p->p_pptr) {
343 nchecked++;
344
345 /* Detect here if we're in a cycle */
346 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
347 goto out;
348 }
349 retval = 1;
350 out:
351 proc_list_unlock();
352 return(retval);
353 }
354
355 int
356 proc_isinferior(int pid1, int pid2)
357 {
358 proc_t p = PROC_NULL;
359 proc_t t = PROC_NULL;
360 int retval = 0;
361
362 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
363 retval = isinferior(p, t);
364
365 if (p != PROC_NULL)
366 proc_rele(p);
367 if (t != PROC_NULL)
368 proc_rele(t);
369
370 return(retval);
371 }
372
373 proc_t
374 proc_find(int pid)
375 {
376 return(proc_findinternal(pid, 0));
377 }
378
379 proc_t
380 proc_findinternal(int pid, int locked)
381 {
382 proc_t p = PROC_NULL;
383
384 if (locked == 0) {
385 proc_list_lock();
386 }
387
388 p = pfind_locked(pid);
389 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
390 p = PROC_NULL;
391
392 if (locked == 0) {
393 proc_list_unlock();
394 }
395
396 return(p);
397 }
398
399 proc_t
400 proc_findthread(thread_t thread)
401 {
402 proc_t p = PROC_NULL;
403 struct uthread *uth;
404
405 proc_list_lock();
406 uth = get_bsdthread_info(thread);
407 if (uth && (uth->uu_flag & UT_VFORK))
408 p = uth->uu_proc;
409 else
410 p = (proc_t)(get_bsdthreadtask_info(thread));
411 p = proc_ref_locked(p);
412 proc_list_unlock();
413 return(p);
414 }
415
416 void
417 uthread_reset_proc_refcount(void *uthread) {
418 uthread_t uth;
419
420 uth = (uthread_t) uthread;
421 uth->uu_proc_refcount = 0;
422
423 #if PROC_REF_DEBUG
424 if (proc_ref_tracking_disabled) {
425 return;
426 }
427
428 uth->uu_pindex = 0;
429 #endif
430 }
431
432 #if PROC_REF_DEBUG
433 int
434 uthread_get_proc_refcount(void *uthread) {
435 uthread_t uth;
436
437 if (proc_ref_tracking_disabled) {
438 return 0;
439 }
440
441 uth = (uthread_t) uthread;
442
443 return uth->uu_proc_refcount;
444 }
445 #endif
446
447 static void
448 record_procref(proc_t p __unused, int count) {
449 uthread_t uth;
450
451 uth = current_uthread();
452 uth->uu_proc_refcount += count;
453
454 #if PROC_REF_DEBUG
455 if (proc_ref_tracking_disabled) {
456 return;
457 }
458
459 if (count == 1) {
460 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
461 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
462
463 uth->uu_proc_ps[uth->uu_pindex] = p;
464 uth->uu_pindex++;
465 }
466 }
467 #endif
468 }
469
470 static boolean_t
471 uthread_needs_to_wait_in_proc_refwait(void) {
472 uthread_t uth = current_uthread();
473
474 /*
475 * Allow threads holding no proc refs to wait
476 * in proc_refwait, allowing threads holding
477 * proc refs to wait in proc_refwait causes
478 * deadlocks and makes proc_find non-reentrant.
479 */
480 if (uth->uu_proc_refcount == 0)
481 return TRUE;
482
483 return FALSE;
484 }
485
486 int
487 proc_rele(proc_t p)
488 {
489 proc_list_lock();
490 proc_rele_locked(p);
491 proc_list_unlock();
492
493 return(0);
494 }
495
496 proc_t
497 proc_self(void)
498 {
499 struct proc * p;
500
501 p = current_proc();
502
503 proc_list_lock();
504 if (p != proc_ref_locked(p))
505 p = PROC_NULL;
506 proc_list_unlock();
507 return(p);
508 }
509
510
511 proc_t
512 proc_ref_locked(proc_t p)
513 {
514 proc_t p1 = p;
515 int pid = proc_pid(p);
516
517 retry:
518 /*
519 * if process still in creation or proc got recycled
520 * during msleep then return failure.
521 */
522 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0))
523 return (PROC_NULL);
524
525 /*
526 * Do not return process marked for termination
527 * or proc_refdrain called without ref wait.
528 * Wait for proc_refdrain_with_refwait to complete if
529 * process in refdrain and refwait flag is set, unless
530 * the current thread is holding to a proc_ref
531 * for any proc.
532 */
533 if ((p->p_stat != SZOMB) &&
534 ((p->p_listflag & P_LIST_EXITED) == 0) &&
535 ((p->p_listflag & P_LIST_DEAD) == 0) &&
536 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
537 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
538 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
539 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0) ;
540 /*
541 * the proc might have been recycled since we dropped
542 * the proc list lock, get the proc again.
543 */
544 p = pfind_locked(pid);
545 goto retry;
546 }
547 p->p_refcount++;
548 record_procref(p, 1);
549 }
550 else
551 p1 = PROC_NULL;
552
553 return(p1);
554 }
555
556 void
557 proc_rele_locked(proc_t p)
558 {
559
560 if (p->p_refcount > 0) {
561 p->p_refcount--;
562 record_procref(p, -1);
563 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
564 p->p_listflag &= ~P_LIST_DRAINWAIT;
565 wakeup(&p->p_refcount);
566 }
567 } else
568 panic("proc_rele_locked -ve ref\n");
569
570 }
571
572 proc_t
573 proc_find_zombref(int pid)
574 {
575 proc_t p;
576
577 proc_list_lock();
578
579 again:
580 p = pfind_locked(pid);
581
582 /* should we bail? */
583 if ((p == PROC_NULL) /* not found */
584 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
585 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
586
587 proc_list_unlock();
588 return (PROC_NULL);
589 }
590
591 /* If someone else is controlling the (unreaped) zombie - wait */
592 if ((p->p_listflag & P_LIST_WAITING) != 0) {
593 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
594 goto again;
595 }
596 p->p_listflag |= P_LIST_WAITING;
597
598 proc_list_unlock();
599
600 return(p);
601 }
602
603 void
604 proc_drop_zombref(proc_t p)
605 {
606 proc_list_lock();
607 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
608 p->p_listflag &= ~P_LIST_WAITING;
609 wakeup(&p->p_stat);
610 }
611 proc_list_unlock();
612 }
613
614
615 void
616 proc_refdrain(proc_t p)
617 {
618 proc_refdrain_with_refwait(p, FALSE);
619 }
620
621 proc_t
622 proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
623 {
624 boolean_t initexec = FALSE;
625 proc_list_lock();
626
627 p->p_listflag |= P_LIST_DRAIN;
628 if (get_ref_and_allow_wait) {
629 /*
630 * All the calls to proc_ref_locked will wait
631 * for the flag to get cleared before returning a ref,
632 * unless the current thread is holding to a proc ref
633 * for any proc.
634 */
635 p->p_listflag |= P_LIST_REFWAIT;
636 if (p == initproc) {
637 initexec = TRUE;
638 }
639 }
640
641 /* Do not wait in ref drain for launchd exec */
642 while (p->p_refcount && !initexec) {
643 p->p_listflag |= P_LIST_DRAINWAIT;
644 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
645 }
646
647 p->p_listflag &= ~P_LIST_DRAIN;
648 if (!get_ref_and_allow_wait) {
649 p->p_listflag |= P_LIST_DEAD;
650 } else {
651 /* Return a ref to the caller */
652 p->p_refcount++;
653 record_procref(p, 1);
654 }
655
656 proc_list_unlock();
657
658 if (get_ref_and_allow_wait) {
659 return (p);
660 }
661 return NULL;
662 }
663
664 void
665 proc_refwake(proc_t p)
666 {
667 proc_list_lock();
668 p->p_listflag &= ~P_LIST_REFWAIT;
669 wakeup(&p->p_listflag);
670 proc_list_unlock();
671 }
672
673 proc_t
674 proc_parentholdref(proc_t p)
675 {
676 proc_t parent = PROC_NULL;
677 proc_t pp;
678 int loopcnt = 0;
679
680
681 proc_list_lock();
682 loop:
683 pp = p->p_pptr;
684 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
685 parent = PROC_NULL;
686 goto out;
687 }
688
689 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
690 pp->p_listflag |= P_LIST_CHILDDRWAIT;
691 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
692 loopcnt++;
693 if (loopcnt == 5) {
694 parent = PROC_NULL;
695 goto out;
696 }
697 goto loop;
698 }
699
700 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
701 pp->p_parentref++;
702 parent = pp;
703 goto out;
704 }
705
706 out:
707 proc_list_unlock();
708 return(parent);
709 }
710 int
711 proc_parentdropref(proc_t p, int listlocked)
712 {
713 if (listlocked == 0)
714 proc_list_lock();
715
716 if (p->p_parentref > 0) {
717 p->p_parentref--;
718 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
719 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
720 wakeup(&p->p_parentref);
721 }
722 } else
723 panic("proc_parentdropref -ve ref\n");
724 if (listlocked == 0)
725 proc_list_unlock();
726
727 return(0);
728 }
729
730 void
731 proc_childdrainstart(proc_t p)
732 {
733 #if __PROC_INTERNAL_DEBUG
734 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
735 panic("proc_childdrainstart: childdrain already started\n");
736 #endif
737 p->p_listflag |= P_LIST_CHILDDRSTART;
738 /* wait for all that hold parentrefs to drop */
739 while (p->p_parentref > 0) {
740 p->p_listflag |= P_LIST_PARENTREFWAIT;
741 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
742 }
743 }
744
745
746 void
747 proc_childdrainend(proc_t p)
748 {
749 #if __PROC_INTERNAL_DEBUG
750 if (p->p_childrencnt > 0)
751 panic("exiting: children stil hanging around\n");
752 #endif
753 p->p_listflag |= P_LIST_CHILDDRAINED;
754 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
755 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
756 wakeup(&p->p_childrencnt);
757 }
758 }
759
760 void
761 proc_checkdeadrefs(__unused proc_t p)
762 {
763 #if __PROC_INTERNAL_DEBUG
764 if ((p->p_listflag & P_LIST_INHASH) != 0)
765 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
766 if (p->p_childrencnt != 0)
767 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
768 if (p->p_refcount != 0)
769 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
770 if (p->p_parentref != 0)
771 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
772 #endif
773 }
774
775 int
776 proc_pid(proc_t p)
777 {
778 if (p != NULL)
779 return (p->p_pid);
780 return -1;
781 }
782
783 int
784 proc_ppid(proc_t p)
785 {
786 if (p != NULL)
787 return (p->p_ppid);
788 return -1;
789 }
790
791 int
792 proc_selfpid(void)
793 {
794 return (current_proc()->p_pid);
795 }
796
797 int
798 proc_selfppid(void)
799 {
800 return (current_proc()->p_ppid);
801 }
802
803 int
804 proc_selfcsflags(void)
805 {
806 return (current_proc()->p_csflags);
807 }
808
809 #if CONFIG_DTRACE
810 static proc_t
811 dtrace_current_proc_vforking(void)
812 {
813 thread_t th = current_thread();
814 struct uthread *ut = get_bsdthread_info(th);
815
816 if (ut &&
817 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
818 /*
819 * Handle the narrow window where we're in the vfork syscall,
820 * but we're not quite ready to claim (in particular, to DTrace)
821 * that we're running as the child.
822 */
823 return (get_bsdtask_info(get_threadtask(th)));
824 }
825 return (current_proc());
826 }
827
828 int
829 dtrace_proc_selfpid(void)
830 {
831 return (dtrace_current_proc_vforking()->p_pid);
832 }
833
834 int
835 dtrace_proc_selfppid(void)
836 {
837 return (dtrace_current_proc_vforking()->p_ppid);
838 }
839
840 uid_t
841 dtrace_proc_selfruid(void)
842 {
843 return (dtrace_current_proc_vforking()->p_ruid);
844 }
845 #endif /* CONFIG_DTRACE */
846
847 proc_t
848 proc_parent(proc_t p)
849 {
850 proc_t parent;
851 proc_t pp;
852
853 proc_list_lock();
854 loop:
855 pp = p->p_pptr;
856 parent = proc_ref_locked(pp);
857 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
858 pp->p_listflag |= P_LIST_CHILDLKWAIT;
859 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
860 goto loop;
861 }
862 proc_list_unlock();
863 return(parent);
864 }
865
866 static boolean_t
867 proc_parent_is_currentproc(proc_t p)
868 {
869 boolean_t ret = FALSE;
870
871 proc_list_lock();
872 if (p->p_pptr == current_proc())
873 ret = TRUE;
874
875 proc_list_unlock();
876 return ret;
877 }
878
879 void
880 proc_name(int pid, char * buf, int size)
881 {
882 proc_t p;
883
884 if ((p = proc_find(pid)) != PROC_NULL) {
885 strlcpy(buf, &p->p_comm[0], size);
886 proc_rele(p);
887 }
888 }
889
890 void
891 proc_name_kdp(task_t t, char * buf, int size)
892 {
893 proc_t p = get_bsdtask_info(t);
894 if (p == PROC_NULL)
895 return;
896
897 if ((size_t)size > sizeof(p->p_comm))
898 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
899 else
900 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
901 }
902
903 int
904 proc_threadname_kdp(void * uth, char * buf, size_t size)
905 {
906 if (size < MAXTHREADNAMESIZE) {
907 /* this is really just a protective measure for the future in
908 * case the thread name size in stackshot gets out of sync with
909 * the BSD max thread name size. Note that bsd_getthreadname
910 * doesn't take input buffer size into account. */
911 return -1;
912 }
913
914 if (uth != NULL) {
915 bsd_getthreadname(uth, buf);
916 }
917 return 0;
918 }
919
920
921 /* note that this function is generally going to be called from stackshot,
922 * and the arguments will be coming from a struct which is declared packed
923 * thus the input arguments will in general be unaligned. We have to handle
924 * that here. */
925 void
926 proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime)
927 {
928 proc_t pp = (proc_t)p;
929 if (pp != PROC_NULL) {
930 if (tv_sec != NULL)
931 *tv_sec = pp->p_start.tv_sec;
932 if (tv_usec != NULL)
933 *tv_usec = pp->p_start.tv_usec;
934 if (abstime != NULL) {
935 if (pp->p_stats != NULL)
936 *abstime = pp->p_stats->ps_start;
937 else
938 *abstime = 0;
939 }
940 }
941 }
942
943 char *
944 proc_name_address(void *p)
945 {
946 return &((proc_t)p)->p_comm[0];
947 }
948
949 char *
950 proc_best_name(proc_t p)
951 {
952 if (p->p_name[0] != 0)
953 return (&p->p_name[0]);
954 return (&p->p_comm[0]);
955 }
956
957 void
958 proc_selfname(char * buf, int size)
959 {
960 proc_t p;
961
962 if ((p = current_proc())!= (proc_t)0) {
963 strlcpy(buf, &p->p_comm[0], size);
964 }
965 }
966
967 void
968 proc_signal(int pid, int signum)
969 {
970 proc_t p;
971
972 if ((p = proc_find(pid)) != PROC_NULL) {
973 psignal(p, signum);
974 proc_rele(p);
975 }
976 }
977
978 int
979 proc_issignal(int pid, sigset_t mask)
980 {
981 proc_t p;
982 int error=0;
983
984 if ((p = proc_find(pid)) != PROC_NULL) {
985 error = proc_pendingsignals(p, mask);
986 proc_rele(p);
987 }
988
989 return(error);
990 }
991
992 int
993 proc_noremotehang(proc_t p)
994 {
995 int retval = 0;
996
997 if (p)
998 retval = p->p_flag & P_NOREMOTEHANG;
999 return(retval? 1: 0);
1000
1001 }
1002
1003 int
1004 proc_exiting(proc_t p)
1005 {
1006 int retval = 0;
1007
1008 if (p)
1009 retval = p->p_lflag & P_LEXIT;
1010 return(retval? 1: 0);
1011 }
1012
1013 int
1014 proc_forcequota(proc_t p)
1015 {
1016 int retval = 0;
1017
1018 if (p)
1019 retval = p->p_flag & P_FORCEQUOTA;
1020 return(retval? 1: 0);
1021
1022 }
1023
1024 int
1025 proc_suser(proc_t p)
1026 {
1027 kauth_cred_t my_cred;
1028 int error;
1029
1030 my_cred = kauth_cred_proc_ref(p);
1031 error = suser(my_cred, &p->p_acflag);
1032 kauth_cred_unref(&my_cred);
1033 return(error);
1034 }
1035
1036 task_t
1037 proc_task(proc_t proc)
1038 {
1039 return (task_t)proc->task;
1040 }
1041
1042 /*
1043 * Obtain the first thread in a process
1044 *
1045 * XXX This is a bad thing to do; it exists predominantly to support the
1046 * XXX use of proc_t's in places that should really be using
1047 * XXX thread_t's instead. This maintains historical behaviour, but really
1048 * XXX needs an audit of the context (proxy vs. not) to clean up.
1049 */
1050 thread_t
1051 proc_thread(proc_t proc)
1052 {
1053 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1054
1055 if (uth != NULL)
1056 return(uth->uu_context.vc_thread);
1057
1058 return(NULL);
1059 }
1060
1061 kauth_cred_t
1062 proc_ucred(proc_t p)
1063 {
1064 return(p->p_ucred);
1065 }
1066
1067 struct uthread *
1068 current_uthread()
1069 {
1070 thread_t th = current_thread();
1071
1072 return((struct uthread *)get_bsdthread_info(th));
1073 }
1074
1075
1076 int
1077 proc_is64bit(proc_t p)
1078 {
1079 return(IS_64BIT_PROCESS(p));
1080 }
1081
1082 int
1083 proc_pidversion(proc_t p)
1084 {
1085 return(p->p_idversion);
1086 }
1087
1088 uint32_t
1089 proc_persona_id(proc_t p)
1090 {
1091 return (uint32_t)persona_id_from_proc(p);
1092 }
1093
1094 uint32_t
1095 proc_getuid(proc_t p)
1096 {
1097 return(p->p_uid);
1098 }
1099
1100 uint32_t
1101 proc_getgid(proc_t p)
1102 {
1103 return(p->p_gid);
1104 }
1105
1106 uint64_t
1107 proc_uniqueid(proc_t p)
1108 {
1109 return(p->p_uniqueid);
1110 }
1111
1112 uint64_t
1113 proc_puniqueid(proc_t p)
1114 {
1115 return(p->p_puniqueid);
1116 }
1117
1118 void
1119 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1120 {
1121 #if CONFIG_COALITIONS
1122 task_coalition_ids(p->task, ids);
1123 #else
1124 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
1125 #endif
1126 return;
1127 }
1128
1129 uint64_t
1130 proc_was_throttled(proc_t p)
1131 {
1132 return (p->was_throttled);
1133 }
1134
1135 uint64_t
1136 proc_did_throttle(proc_t p)
1137 {
1138 return (p->did_throttle);
1139 }
1140
1141 int
1142 proc_getcdhash(proc_t p, unsigned char *cdhash)
1143 {
1144 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1145 }
1146
1147 void
1148 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1149 {
1150 if (size >= sizeof(p->p_uuid)) {
1151 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1152 }
1153 }
1154
1155 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1156 vnode_t
1157 proc_getexecutablevnode(proc_t p)
1158 {
1159 vnode_t tvp = p->p_textvp;
1160
1161 if ( tvp != NULLVP) {
1162 if (vnode_getwithref(tvp) == 0) {
1163 return tvp;
1164 }
1165 }
1166
1167 return NULLVP;
1168 }
1169
1170
1171 void
1172 bsd_set_dependency_capable(task_t task)
1173 {
1174 proc_t p = get_bsdtask_info(task);
1175
1176 if (p) {
1177 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1178 }
1179 }
1180
1181
1182 #ifndef __arm__
1183 int
1184 IS_64BIT_PROCESS(proc_t p)
1185 {
1186 if (p && (p->p_flag & P_LP64))
1187 return(1);
1188 else
1189 return(0);
1190 }
1191 #endif
1192
1193 /*
1194 * Locate a process by number
1195 */
1196 proc_t
1197 pfind_locked(pid_t pid)
1198 {
1199 proc_t p;
1200 #if DEBUG
1201 proc_t q;
1202 #endif
1203
1204 if (!pid)
1205 return (kernproc);
1206
1207 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1208 if (p->p_pid == pid) {
1209 #if DEBUG
1210 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1211 if ((p !=q) && (q->p_pid == pid))
1212 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1213 }
1214 #endif
1215 return (p);
1216 }
1217 }
1218 return (NULL);
1219 }
1220
1221 /*
1222 * Locate a zombie by PID
1223 */
1224 __private_extern__ proc_t
1225 pzfind(pid_t pid)
1226 {
1227 proc_t p;
1228
1229
1230 proc_list_lock();
1231
1232 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1233 if (p->p_pid == pid)
1234 break;
1235
1236 proc_list_unlock();
1237
1238 return (p);
1239 }
1240
1241 /*
1242 * Locate a process group by number
1243 */
1244
1245 struct pgrp *
1246 pgfind(pid_t pgid)
1247 {
1248 struct pgrp * pgrp;
1249
1250 proc_list_lock();
1251 pgrp = pgfind_internal(pgid);
1252 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1253 pgrp = PGRP_NULL;
1254 else
1255 pgrp->pg_refcount++;
1256 proc_list_unlock();
1257 return(pgrp);
1258 }
1259
1260
1261
1262 struct pgrp *
1263 pgfind_internal(pid_t pgid)
1264 {
1265 struct pgrp *pgrp;
1266
1267 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1268 if (pgrp->pg_id == pgid)
1269 return (pgrp);
1270 return (NULL);
1271 }
1272
1273 void
1274 pg_rele(struct pgrp * pgrp)
1275 {
1276 if(pgrp == PGRP_NULL)
1277 return;
1278 pg_rele_dropref(pgrp);
1279 }
1280
1281 void
1282 pg_rele_dropref(struct pgrp * pgrp)
1283 {
1284 proc_list_lock();
1285 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1286 proc_list_unlock();
1287 pgdelete_dropref(pgrp);
1288 return;
1289 }
1290
1291 pgrp->pg_refcount--;
1292 proc_list_unlock();
1293 }
1294
1295 struct session *
1296 session_find_internal(pid_t sessid)
1297 {
1298 struct session *sess;
1299
1300 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1301 if (sess->s_sid == sessid)
1302 return (sess);
1303 return (NULL);
1304 }
1305
1306
1307 /*
1308 * Make a new process ready to become a useful member of society by making it
1309 * visible in all the right places and initialize its own lists to empty.
1310 *
1311 * Parameters: parent The parent of the process to insert
1312 * child The child process to insert
1313 *
1314 * Returns: (void)
1315 *
1316 * Notes: Insert a child process into the parents process group, assign
1317 * the child the parent process pointer and PPID of the parent,
1318 * place it on the parents p_children list as a sibling,
1319 * initialize its own child list, place it in the allproc list,
1320 * insert it in the proper hash bucket, and initialize its
1321 * event list.
1322 */
1323 void
1324 pinsertchild(proc_t parent, proc_t child)
1325 {
1326 struct pgrp * pg;
1327
1328 LIST_INIT(&child->p_children);
1329 TAILQ_INIT(&child->p_evlist);
1330 child->p_pptr = parent;
1331 child->p_ppid = parent->p_pid;
1332 child->p_puniqueid = parent->p_uniqueid;
1333 child->p_xhighbits = 0;
1334
1335 pg = proc_pgrp(parent);
1336 pgrp_add(pg, parent, child);
1337 pg_rele(pg);
1338
1339 proc_list_lock();
1340
1341 #if CONFIG_MEMORYSTATUS
1342 memorystatus_add(child, TRUE);
1343 #endif
1344
1345 parent->p_childrencnt++;
1346 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1347
1348 LIST_INSERT_HEAD(&allproc, child, p_list);
1349 /* mark the completion of proc creation */
1350 child->p_listflag &= ~P_LIST_INCREATE;
1351
1352 proc_list_unlock();
1353 }
1354
1355 /*
1356 * Move p to a new or existing process group (and session)
1357 *
1358 * Returns: 0 Success
1359 * ESRCH No such process
1360 */
1361 int
1362 enterpgrp(proc_t p, pid_t pgid, int mksess)
1363 {
1364 struct pgrp *pgrp;
1365 struct pgrp *mypgrp;
1366 struct session * procsp;
1367
1368 pgrp = pgfind(pgid);
1369 mypgrp = proc_pgrp(p);
1370 procsp = proc_session(p);
1371
1372 #if DIAGNOSTIC
1373 if (pgrp != NULL && mksess) /* firewalls */
1374 panic("enterpgrp: setsid into non-empty pgrp");
1375 if (SESS_LEADER(p, procsp))
1376 panic("enterpgrp: session leader attempted setpgrp");
1377 #endif
1378 if (pgrp == PGRP_NULL) {
1379 pid_t savepid = p->p_pid;
1380 proc_t np = PROC_NULL;
1381 /*
1382 * new process group
1383 */
1384 #if DIAGNOSTIC
1385 if (p->p_pid != pgid)
1386 panic("enterpgrp: new pgrp and pid != pgid");
1387 #endif
1388 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1389 M_WAITOK);
1390 if (pgrp == NULL)
1391 panic("enterpgrp: M_PGRP zone depleted");
1392 if ((np = proc_find(savepid)) == NULL || np != p) {
1393 if (np != PROC_NULL)
1394 proc_rele(np);
1395 if (mypgrp != PGRP_NULL)
1396 pg_rele(mypgrp);
1397 if (procsp != SESSION_NULL)
1398 session_rele(procsp);
1399 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1400 return (ESRCH);
1401 }
1402 proc_rele(np);
1403 if (mksess) {
1404 struct session *sess;
1405
1406 /*
1407 * new session
1408 */
1409 MALLOC_ZONE(sess, struct session *,
1410 sizeof(struct session), M_SESSION, M_WAITOK);
1411 if (sess == NULL)
1412 panic("enterpgrp: M_SESSION zone depleted");
1413 sess->s_leader = p;
1414 sess->s_sid = p->p_pid;
1415 sess->s_count = 1;
1416 sess->s_ttyvp = NULL;
1417 sess->s_ttyp = TTY_NULL;
1418 sess->s_flags = 0;
1419 sess->s_listflags = 0;
1420 sess->s_ttypgrpid = NO_PID;
1421 #if CONFIG_FINE_LOCK_GROUPS
1422 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1423 #else
1424 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1425 #endif
1426 bcopy(procsp->s_login, sess->s_login,
1427 sizeof(sess->s_login));
1428 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1429 proc_list_lock();
1430 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1431 proc_list_unlock();
1432 pgrp->pg_session = sess;
1433 #if DIAGNOSTIC
1434 if (p != current_proc())
1435 panic("enterpgrp: mksession and p != curproc");
1436 #endif
1437 } else {
1438 proc_list_lock();
1439 pgrp->pg_session = procsp;
1440
1441 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1442 panic("enterpgrp: providing ref to terminating session ");
1443 pgrp->pg_session->s_count++;
1444 proc_list_unlock();
1445 }
1446 pgrp->pg_id = pgid;
1447 #if CONFIG_FINE_LOCK_GROUPS
1448 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1449 #else
1450 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1451 #endif
1452 LIST_INIT(&pgrp->pg_members);
1453 pgrp->pg_membercnt = 0;
1454 pgrp->pg_jobc = 0;
1455 proc_list_lock();
1456 pgrp->pg_refcount = 1;
1457 pgrp->pg_listflags = 0;
1458 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1459 proc_list_unlock();
1460 } else if (pgrp == mypgrp) {
1461 pg_rele(pgrp);
1462 if (mypgrp != NULL)
1463 pg_rele(mypgrp);
1464 if (procsp != SESSION_NULL)
1465 session_rele(procsp);
1466 return (0);
1467 }
1468
1469 if (procsp != SESSION_NULL)
1470 session_rele(procsp);
1471 /*
1472 * Adjust eligibility of affected pgrps to participate in job control.
1473 * Increment eligibility counts before decrementing, otherwise we
1474 * could reach 0 spuriously during the first call.
1475 */
1476 fixjobc(p, pgrp, 1);
1477 fixjobc(p, mypgrp, 0);
1478
1479 if(mypgrp != PGRP_NULL)
1480 pg_rele(mypgrp);
1481 pgrp_replace(p, pgrp);
1482 pg_rele(pgrp);
1483
1484 return(0);
1485 }
1486
1487 /*
1488 * remove process from process group
1489 */
1490 int
1491 leavepgrp(proc_t p)
1492 {
1493
1494 pgrp_remove(p);
1495 return (0);
1496 }
1497
1498 /*
1499 * delete a process group
1500 */
1501 static void
1502 pgdelete_dropref(struct pgrp *pgrp)
1503 {
1504 struct tty *ttyp;
1505 int emptypgrp = 1;
1506 struct session *sessp;
1507
1508
1509 pgrp_lock(pgrp);
1510 if (pgrp->pg_membercnt != 0) {
1511 emptypgrp = 0;
1512 }
1513 pgrp_unlock(pgrp);
1514
1515 proc_list_lock();
1516 pgrp->pg_refcount--;
1517 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1518 proc_list_unlock();
1519 return;
1520 }
1521
1522 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1523
1524 if (pgrp->pg_refcount > 0) {
1525 proc_list_unlock();
1526 return;
1527 }
1528
1529 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1530 LIST_REMOVE(pgrp, pg_hash);
1531
1532 proc_list_unlock();
1533
1534 ttyp = SESSION_TP(pgrp->pg_session);
1535 if (ttyp != TTY_NULL) {
1536 if (ttyp->t_pgrp == pgrp) {
1537 tty_lock(ttyp);
1538 /* Re-check after acquiring the lock */
1539 if (ttyp->t_pgrp == pgrp) {
1540 ttyp->t_pgrp = NULL;
1541 pgrp->pg_session->s_ttypgrpid = NO_PID;
1542 }
1543 tty_unlock(ttyp);
1544 }
1545 }
1546
1547 proc_list_lock();
1548
1549 sessp = pgrp->pg_session;
1550 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1551 panic("pg_deleteref: manipulating refs of already terminating session");
1552 if (--sessp->s_count == 0) {
1553 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1554 panic("pg_deleteref: terminating already terminated session");
1555 sessp->s_listflags |= S_LIST_TERM;
1556 ttyp = SESSION_TP(sessp);
1557 LIST_REMOVE(sessp, s_hash);
1558 proc_list_unlock();
1559 if (ttyp != TTY_NULL) {
1560 tty_lock(ttyp);
1561 if (ttyp->t_session == sessp)
1562 ttyp->t_session = NULL;
1563 tty_unlock(ttyp);
1564 }
1565 proc_list_lock();
1566 sessp->s_listflags |= S_LIST_DEAD;
1567 if (sessp->s_count != 0)
1568 panic("pg_deleteref: freeing session in use");
1569 proc_list_unlock();
1570 #if CONFIG_FINE_LOCK_GROUPS
1571 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1572 #else
1573 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1574 #endif
1575 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1576 } else
1577 proc_list_unlock();
1578 #if CONFIG_FINE_LOCK_GROUPS
1579 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1580 #else
1581 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1582 #endif
1583 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1584 }
1585
1586
1587 /*
1588 * Adjust pgrp jobc counters when specified process changes process group.
1589 * We count the number of processes in each process group that "qualify"
1590 * the group for terminal job control (those with a parent in a different
1591 * process group of the same session). If that count reaches zero, the
1592 * process group becomes orphaned. Check both the specified process'
1593 * process group and that of its children.
1594 * entering == 0 => p is leaving specified group.
1595 * entering == 1 => p is entering specified group.
1596 */
1597 int
1598 fixjob_callback(proc_t p, void * arg)
1599 {
1600 struct fixjob_iterargs *fp;
1601 struct pgrp * pg, *hispg;
1602 struct session * mysession, *hissess;
1603 int entering;
1604
1605 fp = (struct fixjob_iterargs *)arg;
1606 pg = fp->pg;
1607 mysession = fp->mysession;
1608 entering = fp->entering;
1609
1610 hispg = proc_pgrp(p);
1611 hissess = proc_session(p);
1612
1613 if ((hispg != pg) &&
1614 (hissess == mysession)) {
1615 pgrp_lock(hispg);
1616 if (entering) {
1617 hispg->pg_jobc++;
1618 pgrp_unlock(hispg);
1619 } else if (--hispg->pg_jobc == 0) {
1620 pgrp_unlock(hispg);
1621 orphanpg(hispg);
1622 } else
1623 pgrp_unlock(hispg);
1624 }
1625 if (hissess != SESSION_NULL)
1626 session_rele(hissess);
1627 if (hispg != PGRP_NULL)
1628 pg_rele(hispg);
1629
1630 return(PROC_RETURNED);
1631 }
1632
1633 void
1634 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1635 {
1636 struct pgrp *hispgrp = PGRP_NULL;
1637 struct session *hissess = SESSION_NULL;
1638 struct session *mysession = pgrp->pg_session;
1639 proc_t parent;
1640 struct fixjob_iterargs fjarg;
1641 boolean_t proc_parent_self;
1642
1643 /*
1644 * Check if p's parent is current proc, if yes then no need to take
1645 * a ref; calling proc_parent with current proc as parent may
1646 * deadlock if current proc is exiting.
1647 */
1648 proc_parent_self = proc_parent_is_currentproc(p);
1649 if (proc_parent_self)
1650 parent = current_proc();
1651 else
1652 parent = proc_parent(p);
1653
1654 if (parent != PROC_NULL) {
1655 hispgrp = proc_pgrp(parent);
1656 hissess = proc_session(parent);
1657 if (!proc_parent_self)
1658 proc_rele(parent);
1659 }
1660
1661
1662 /*
1663 * Check p's parent to see whether p qualifies its own process
1664 * group; if so, adjust count for p's process group.
1665 */
1666 if ((hispgrp != pgrp) &&
1667 (hissess == mysession)) {
1668 pgrp_lock(pgrp);
1669 if (entering) {
1670 pgrp->pg_jobc++;
1671 pgrp_unlock(pgrp);
1672 }else if (--pgrp->pg_jobc == 0) {
1673 pgrp_unlock(pgrp);
1674 orphanpg(pgrp);
1675 } else
1676 pgrp_unlock(pgrp);
1677 }
1678
1679 if (hissess != SESSION_NULL)
1680 session_rele(hissess);
1681 if (hispgrp != PGRP_NULL)
1682 pg_rele(hispgrp);
1683
1684 /*
1685 * Check this process' children to see whether they qualify
1686 * their process groups; if so, adjust counts for children's
1687 * process groups.
1688 */
1689 fjarg.pg = pgrp;
1690 fjarg.mysession = mysession;
1691 fjarg.entering = entering;
1692 proc_childrenwalk(p, fixjob_callback, &fjarg);
1693 }
1694
1695 /*
1696 * A process group has become orphaned; if there are any stopped processes in
1697 * the group, hang-up all process in that group.
1698 */
1699 static void
1700 orphanpg(struct pgrp *pgrp)
1701 {
1702 pid_t *pid_list;
1703 proc_t p;
1704 vm_size_t pid_list_size = 0;
1705 vm_size_t pid_list_size_needed = 0;
1706 int pid_count = 0;
1707 int pid_count_available = 0;
1708
1709 assert(pgrp != NULL);
1710
1711 /* allocate outside of the pgrp_lock */
1712 for (;;) {
1713 pgrp_lock(pgrp);
1714
1715 boolean_t should_iterate = FALSE;
1716 pid_count_available = 0;
1717
1718 PGMEMBERS_FOREACH(pgrp, p) {
1719 pid_count_available++;
1720
1721 if (p->p_stat == SSTOP) {
1722 should_iterate = TRUE;
1723 }
1724 }
1725
1726 if (pid_count_available == 0 || !should_iterate) {
1727 pgrp_unlock(pgrp);
1728 return;
1729 }
1730
1731 pid_list_size_needed = pid_count_available * sizeof(pid_t);
1732 if (pid_list_size >= pid_list_size_needed) {
1733 break;
1734 }
1735 pgrp_unlock(pgrp);
1736
1737 if (pid_list_size != 0) {
1738 kfree(pid_list, pid_list_size);
1739 }
1740 pid_list = kalloc(pid_list_size_needed);
1741 if (!pid_list) {
1742 return;
1743 }
1744 pid_list_size = pid_list_size_needed;
1745 }
1746
1747 /* no orphaned processes */
1748 if (pid_list_size == 0) {
1749 pgrp_unlock(pgrp);
1750 return;
1751 }
1752
1753 PGMEMBERS_FOREACH(pgrp, p) {
1754 pid_list[pid_count++] = proc_pid(p);
1755 if (pid_count >= pid_count_available) {
1756 break;
1757 }
1758 }
1759 pgrp_unlock(pgrp);
1760
1761 if (pid_count == 0) {
1762 goto out;
1763 }
1764
1765 for (int i = 0; i < pid_count; i++) {
1766 /* do not handle kernproc */
1767 if (pid_list[i] == 0) {
1768 continue;
1769 }
1770 p = proc_find(pid_list[i]);
1771 if (!p) {
1772 continue;
1773 }
1774
1775 proc_transwait(p, 0);
1776 pt_setrunnable(p);
1777 psignal(p, SIGHUP);
1778 psignal(p, SIGCONT);
1779 proc_rele(p);
1780 }
1781
1782 out:
1783 kfree(pid_list, pid_list_size);
1784 return;
1785 }
1786
1787 int
1788 proc_is_classic(proc_t p __unused)
1789 {
1790 return (0);
1791 }
1792
1793 /* XXX Why does this function exist? Need to kill it off... */
1794 proc_t
1795 current_proc_EXTERNAL(void)
1796 {
1797 return (current_proc());
1798 }
1799
1800 int
1801 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1802 {
1803 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1804 }
1805
1806 #if CONFIG_COREDUMP
1807 /*
1808 * proc_core_name(name, uid, pid)
1809 * Expand the name described in corefilename, using name, uid, and pid.
1810 * corefilename is a printf-like string, with three format specifiers:
1811 * %N name of process ("name")
1812 * %P process id (pid)
1813 * %U user id (uid)
1814 * For example, "%N.core" is the default; they can be disabled completely
1815 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1816 * This is controlled by the sysctl variable kern.corefile (see above).
1817 */
1818 __private_extern__ int
1819 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1820 size_t cf_name_len)
1821 {
1822 const char *format, *appendstr;
1823 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1824 size_t i, l, n;
1825
1826 if (cf_name == NULL)
1827 goto toolong;
1828
1829 format = corefilename;
1830 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1831 switch (format[i]) {
1832 case '%': /* Format character */
1833 i++;
1834 switch (format[i]) {
1835 case '%':
1836 appendstr = "%";
1837 break;
1838 case 'N': /* process name */
1839 appendstr = name;
1840 break;
1841 case 'P': /* process id */
1842 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1843 appendstr = id_buf;
1844 break;
1845 case 'U': /* user id */
1846 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1847 appendstr = id_buf;
1848 break;
1849 case '\0': /* format string ended in % symbol */
1850 goto endofstring;
1851 default:
1852 appendstr = "";
1853 log(LOG_ERR,
1854 "Unknown format character %c in `%s'\n",
1855 format[i], format);
1856 }
1857 l = strlen(appendstr);
1858 if ((n + l) >= cf_name_len)
1859 goto toolong;
1860 bcopy(appendstr, cf_name + n, l);
1861 n += l;
1862 break;
1863 default:
1864 cf_name[n++] = format[i];
1865 }
1866 }
1867 if (format[i] != '\0')
1868 goto toolong;
1869 return (0);
1870 toolong:
1871 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1872 (long)pid, name, (uint32_t)uid);
1873 return (1);
1874 endofstring:
1875 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
1876 (long)pid, name, (uint32_t)uid);
1877 return (1);
1878 }
1879 #endif /* CONFIG_COREDUMP */
1880
1881 /* Code Signing related routines */
1882
1883 int
1884 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1885 {
1886 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1887 uap->usersize, USER_ADDR_NULL));
1888 }
1889
1890 int
1891 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1892 {
1893 if (uap->uaudittoken == USER_ADDR_NULL)
1894 return(EINVAL);
1895 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1896 uap->usersize, uap->uaudittoken));
1897 }
1898
1899 static int
1900 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1901 {
1902 char fakeheader[8] = { 0 };
1903 int error;
1904
1905 if (usize < sizeof(fakeheader))
1906 return ERANGE;
1907
1908 /* if no blob, fill in zero header */
1909 if (NULL == start) {
1910 start = fakeheader;
1911 length = sizeof(fakeheader);
1912 } else if (usize < length) {
1913 /* ... if input too short, copy out length of entitlement */
1914 uint32_t length32 = htonl((uint32_t)length);
1915 memcpy(&fakeheader[4], &length32, sizeof(length32));
1916
1917 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1918 if (error == 0)
1919 return ERANGE; /* input buffer to short, ERANGE signals that */
1920 return error;
1921 }
1922 return copyout(start, uaddr, length);
1923 }
1924
1925 static int
1926 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1927 {
1928 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1929 proc_t pt;
1930 int forself;
1931 int error;
1932 vnode_t tvp;
1933 off_t toff;
1934 unsigned char cdhash[SHA1_RESULTLEN];
1935 audit_token_t token;
1936 unsigned int upid=0, uidversion = 0;
1937
1938 forself = error = 0;
1939
1940 if (pid == 0)
1941 pid = proc_selfpid();
1942 if (pid == proc_selfpid())
1943 forself = 1;
1944
1945
1946 switch (ops) {
1947 case CS_OPS_STATUS:
1948 case CS_OPS_CDHASH:
1949 case CS_OPS_PIDOFFSET:
1950 case CS_OPS_ENTITLEMENTS_BLOB:
1951 case CS_OPS_IDENTITY:
1952 case CS_OPS_BLOB:
1953 break; /* not restricted to root */
1954 default:
1955 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1956 return(EPERM);
1957 break;
1958 }
1959
1960 pt = proc_find(pid);
1961 if (pt == PROC_NULL)
1962 return(ESRCH);
1963
1964 upid = pt->p_pid;
1965 uidversion = pt->p_idversion;
1966 if (uaudittoken != USER_ADDR_NULL) {
1967
1968 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1969 if (error != 0)
1970 goto out;
1971 /* verify the audit token pid/idversion matches with proc */
1972 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1973 error = ESRCH;
1974 goto out;
1975 }
1976 }
1977
1978 #if CONFIG_MACF
1979 switch (ops) {
1980 case CS_OPS_MARKINVALID:
1981 case CS_OPS_MARKHARD:
1982 case CS_OPS_MARKKILL:
1983 case CS_OPS_MARKRESTRICT:
1984 case CS_OPS_SET_STATUS:
1985 case CS_OPS_CLEARINSTALLER:
1986 case CS_OPS_CLEARPLATFORM:
1987 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
1988 goto out;
1989 break;
1990 default:
1991 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
1992 goto out;
1993 }
1994 #endif
1995
1996 switch (ops) {
1997
1998 case CS_OPS_STATUS: {
1999 uint32_t retflags;
2000
2001 proc_lock(pt);
2002 retflags = pt->p_csflags;
2003 if (cs_enforcement(pt))
2004 retflags |= CS_ENFORCEMENT;
2005 if (csproc_get_platform_binary(pt))
2006 retflags |= CS_PLATFORM_BINARY;
2007 if (csproc_get_platform_path(pt))
2008 retflags |= CS_PLATFORM_PATH;
2009 proc_unlock(pt);
2010
2011 if (uaddr != USER_ADDR_NULL)
2012 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2013 break;
2014 }
2015 case CS_OPS_MARKINVALID:
2016 proc_lock(pt);
2017 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2018 pt->p_csflags &= ~CS_VALID; /* set invalid */
2019 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2020 pt->p_csflags |= CS_KILLED;
2021 proc_unlock(pt);
2022 if (cs_debug) {
2023 printf("CODE SIGNING: marked invalid by pid %d: "
2024 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2025 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2026 }
2027 psignal(pt, SIGKILL);
2028 } else
2029 proc_unlock(pt);
2030 } else
2031 proc_unlock(pt);
2032
2033 break;
2034
2035 case CS_OPS_MARKHARD:
2036 proc_lock(pt);
2037 pt->p_csflags |= CS_HARD;
2038 if ((pt->p_csflags & CS_VALID) == 0) {
2039 /* @@@ allow? reject? kill? @@@ */
2040 proc_unlock(pt);
2041 error = EINVAL;
2042 goto out;
2043 } else
2044 proc_unlock(pt);
2045 break;
2046
2047 case CS_OPS_MARKKILL:
2048 proc_lock(pt);
2049 pt->p_csflags |= CS_KILL;
2050 if ((pt->p_csflags & CS_VALID) == 0) {
2051 proc_unlock(pt);
2052 psignal(pt, SIGKILL);
2053 } else
2054 proc_unlock(pt);
2055 break;
2056
2057 case CS_OPS_PIDOFFSET:
2058 toff = pt->p_textoff;
2059 proc_rele(pt);
2060 error = copyout(&toff, uaddr, sizeof(toff));
2061 return(error);
2062
2063 case CS_OPS_CDHASH:
2064
2065 /* pt already holds a reference on its p_textvp */
2066 tvp = pt->p_textvp;
2067 toff = pt->p_textoff;
2068
2069 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2070 proc_rele(pt);
2071 return EINVAL;
2072 }
2073
2074 error = vn_getcdhash(tvp, toff, cdhash);
2075 proc_rele(pt);
2076
2077 if (error == 0) {
2078 error = copyout(cdhash, uaddr, sizeof (cdhash));
2079 }
2080
2081 return error;
2082
2083 case CS_OPS_ENTITLEMENTS_BLOB: {
2084 void *start;
2085 size_t length;
2086
2087 proc_lock(pt);
2088
2089 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2090 proc_unlock(pt);
2091 error = EINVAL;
2092 break;
2093 }
2094
2095 error = cs_entitlements_blob_get(pt, &start, &length);
2096 proc_unlock(pt);
2097 if (error)
2098 break;
2099
2100 error = csops_copy_token(start, length, usize, uaddr);
2101 break;
2102 }
2103 case CS_OPS_MARKRESTRICT:
2104 proc_lock(pt);
2105 pt->p_csflags |= CS_RESTRICT;
2106 proc_unlock(pt);
2107 break;
2108
2109 case CS_OPS_SET_STATUS: {
2110 uint32_t flags;
2111
2112 if (usize < sizeof(flags)) {
2113 error = ERANGE;
2114 break;
2115 }
2116
2117 error = copyin(uaddr, &flags, sizeof(flags));
2118 if (error)
2119 break;
2120
2121 /* only allow setting a subset of all code sign flags */
2122 flags &=
2123 CS_HARD | CS_EXEC_SET_HARD |
2124 CS_KILL | CS_EXEC_SET_KILL |
2125 CS_RESTRICT |
2126 CS_REQUIRE_LV |
2127 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2128
2129 proc_lock(pt);
2130 if (pt->p_csflags & CS_VALID)
2131 pt->p_csflags |= flags;
2132 else
2133 error = EINVAL;
2134 proc_unlock(pt);
2135
2136 break;
2137 }
2138 case CS_OPS_BLOB: {
2139 void *start;
2140 size_t length;
2141
2142 proc_lock(pt);
2143 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2144 proc_unlock(pt);
2145 error = EINVAL;
2146 break;
2147 }
2148
2149 error = cs_blob_get(pt, &start, &length);
2150 proc_unlock(pt);
2151 if (error)
2152 break;
2153
2154 error = csops_copy_token(start, length, usize, uaddr);
2155 break;
2156 }
2157 case CS_OPS_IDENTITY: {
2158 const char *identity;
2159 uint8_t fakeheader[8];
2160 uint32_t idlen;
2161 size_t length;
2162
2163 /*
2164 * Make identity have a blob header to make it
2165 * easier on userland to guess the identity
2166 * length.
2167 */
2168 if (usize < sizeof(fakeheader)) {
2169 error = ERANGE;
2170 break;
2171 }
2172 memset(fakeheader, 0, sizeof(fakeheader));
2173
2174 proc_lock(pt);
2175 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2176 proc_unlock(pt);
2177 error = EINVAL;
2178 break;
2179 }
2180
2181 identity = cs_identity_get(pt);
2182 proc_unlock(pt);
2183 if (identity == NULL) {
2184 error = ENOENT;
2185 break;
2186 }
2187
2188 length = strlen(identity) + 1; /* include NUL */
2189 idlen = htonl(length + sizeof(fakeheader));
2190 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2191
2192 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2193 if (error)
2194 break;
2195
2196 if (usize < sizeof(fakeheader) + length)
2197 error = ERANGE;
2198 else if (usize > sizeof(fakeheader))
2199 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2200
2201 break;
2202 }
2203
2204 case CS_OPS_CLEARINSTALLER:
2205 proc_lock(pt);
2206 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2207 proc_unlock(pt);
2208 break;
2209
2210 case CS_OPS_CLEARPLATFORM:
2211 #if DEVELOPMENT || DEBUG
2212 if (cs_enforcement_enable) {
2213 error = ENOTSUP;
2214 break;
2215 }
2216
2217 #if CONFIG_CSR
2218 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2219 error = ENOTSUP;
2220 break;
2221 }
2222 #endif
2223
2224 proc_lock(pt);
2225 pt->p_csflags &= ~(CS_PLATFORM_BINARY|CS_PLATFORM_PATH);
2226 csproc_clear_platform_binary(pt);
2227 proc_unlock(pt);
2228 break;
2229 #else
2230 error = ENOTSUP;
2231 break;
2232 #endif /* !DEVELOPMENT || DEBUG */
2233
2234 default:
2235 error = EINVAL;
2236 break;
2237 }
2238 out:
2239 proc_rele(pt);
2240 return(error);
2241 }
2242
2243 int
2244 proc_iterate(
2245 unsigned int flags,
2246 proc_iterate_fn_t callout,
2247 void *arg,
2248 proc_iterate_fn_t filterfn,
2249 void *filterarg)
2250 {
2251 pid_t *pid_list;
2252 vm_size_t pid_list_size = 0;
2253 vm_size_t pid_list_size_needed = 0;
2254 int pid_count = 0;
2255 int pid_count_available = 0;
2256
2257 assert(callout != NULL);
2258
2259 /* allocate outside of the proc_list_lock */
2260 for (;;) {
2261 proc_list_lock();
2262
2263 pid_count_available = nprocs + 1; //kernel_task is not counted in nprocs
2264 assert(pid_count_available > 0);
2265
2266 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2267 if (pid_list_size >= pid_list_size_needed) {
2268 break;
2269 }
2270 proc_list_unlock();
2271
2272 if (pid_list_size != 0) {
2273 kfree(pid_list, pid_list_size);
2274 }
2275 pid_list = kalloc(pid_list_size_needed);
2276 if (!pid_list) {
2277 return 1;
2278 }
2279 pid_list_size = pid_list_size_needed;
2280 }
2281
2282 /* filter pids into pid_list */
2283
2284 if (flags & PROC_ALLPROCLIST) {
2285 proc_t p;
2286 ALLPROC_FOREACH(p) {
2287 /* ignore processes that are being forked */
2288 if (p->p_stat == SIDL) {
2289 continue;
2290 }
2291 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2292 continue;
2293 }
2294
2295 pid_list[pid_count++] = proc_pid(p);
2296 if (pid_count >= pid_count_available) {
2297 break;
2298 }
2299 }
2300 }
2301
2302 if ((pid_count < pid_count_available) &&
2303 (flags & PROC_ZOMBPROCLIST))
2304 {
2305 proc_t p;
2306 ZOMBPROC_FOREACH(p) {
2307 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2308 continue;
2309 }
2310
2311 pid_list[pid_count++] = proc_pid(p);
2312 if (pid_count >= pid_count_available) {
2313 break;
2314 }
2315 }
2316 }
2317
2318 proc_list_unlock();
2319
2320 /* call callout on processes in the pid_list */
2321
2322 for (int i = 0; i < pid_count; i++) {
2323 proc_t p = proc_find(pid_list[i]);
2324 if (p) {
2325 if ((flags & PROC_NOWAITTRANS) == 0) {
2326 proc_transwait(p, 0);
2327 }
2328 int callout_ret = callout(p, arg);
2329
2330 switch (callout_ret) {
2331 case PROC_RETURNED_DONE:
2332 proc_rele(p);
2333 /* FALLTHROUGH */
2334 case PROC_CLAIMED_DONE:
2335 goto out;
2336
2337 case PROC_RETURNED:
2338 proc_rele(p);
2339 /* FALLTHROUGH */
2340 case PROC_CLAIMED:
2341 break;
2342
2343 default:
2344 panic("proc_iterate: callout returned %d for pid %d",
2345 callout_ret, pid_list[i]);
2346 break;
2347 }
2348 } else if (flags & PROC_ZOMBPROCLIST) {
2349 p = proc_find_zombref(pid_list[i]);
2350 if (!p) {
2351 continue;
2352 }
2353 int callout_ret = callout(p, arg);
2354
2355 switch (callout_ret) {
2356 case PROC_RETURNED_DONE:
2357 proc_drop_zombref(p);
2358 /* FALLTHROUGH */
2359 case PROC_CLAIMED_DONE:
2360 goto out;
2361
2362 case PROC_RETURNED:
2363 proc_drop_zombref(p);
2364 /* FALLTHROUGH */
2365 case PROC_CLAIMED:
2366 break;
2367
2368 default:
2369 panic("proc_iterate: callout returned %d for zombie pid %d",
2370 callout_ret, pid_list[i]);
2371 break;
2372 }
2373 }
2374 }
2375
2376 out:
2377 kfree(pid_list, pid_list_size);
2378 return 0;
2379
2380 }
2381
2382 void
2383 proc_rebootscan(
2384 proc_iterate_fn_t callout,
2385 void *arg,
2386 proc_iterate_fn_t filterfn,
2387 void *filterarg)
2388 {
2389 proc_t p;
2390
2391 assert(callout != NULL);
2392
2393 proc_shutdown_exitcount = 0;
2394
2395 restart_foreach:
2396
2397 proc_list_lock();
2398
2399 ALLPROC_FOREACH(p) {
2400 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2401 continue;
2402 }
2403 p = proc_ref_locked(p);
2404 if (!p) {
2405 continue;
2406 }
2407
2408 proc_list_unlock();
2409
2410 proc_transwait(p, 0);
2411 (void)callout(p, arg);
2412 proc_rele(p);
2413
2414 goto restart_foreach;
2415 }
2416
2417 proc_list_unlock();
2418 }
2419
2420 int
2421 proc_childrenwalk(
2422 proc_t parent,
2423 proc_iterate_fn_t callout,
2424 void *arg)
2425 {
2426 pid_t *pid_list;
2427 vm_size_t pid_list_size = 0;
2428 vm_size_t pid_list_size_needed = 0;
2429 int pid_count = 0;
2430 int pid_count_available = 0;
2431
2432 assert(parent != NULL);
2433 assert(callout != NULL);
2434
2435 for (;;) {
2436 proc_list_lock();
2437
2438 pid_count_available = parent->p_childrencnt;
2439 if (pid_count_available == 0) {
2440 proc_list_unlock();
2441 return 0;
2442 }
2443
2444 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2445 if (pid_list_size >= pid_list_size_needed) {
2446 break;
2447 }
2448 proc_list_unlock();
2449
2450 if (pid_list_size != 0) {
2451 kfree(pid_list, pid_list_size);
2452 }
2453 pid_list = kalloc(pid_list_size_needed);
2454 if (!pid_list) {
2455 return 1;
2456 }
2457 pid_list_size = pid_list_size_needed;
2458 }
2459
2460 proc_t p;
2461 PCHILDREN_FOREACH(parent, p) {
2462 if (p->p_stat == SIDL) {
2463 continue;
2464 }
2465
2466 pid_list[pid_count++] = proc_pid(p);
2467 if (pid_count >= pid_count_available) {
2468 break;
2469 }
2470 }
2471
2472 proc_list_unlock();
2473
2474 for (int i = 0; i < pid_count; i++) {
2475 p = proc_find(pid_list[i]);
2476 if (!p) {
2477 continue;
2478 }
2479
2480 int callout_ret = callout(p, arg);
2481
2482 switch (callout_ret) {
2483 case PROC_RETURNED_DONE:
2484 proc_rele(p);
2485 /* FALLTHROUGH */
2486 case PROC_CLAIMED_DONE:
2487 goto out;
2488
2489 case PROC_RETURNED:
2490 proc_rele(p);
2491 /* FALLTHROUGH */
2492 case PROC_CLAIMED:
2493 break;
2494 default:
2495 panic("proc_childrenwalk: callout returned %d for pid %d",
2496 callout_ret, pid_list[i]);
2497 break;
2498 }
2499 }
2500
2501 out:
2502 kfree(pid_list, pid_list_size);
2503 return 0;
2504 }
2505
2506 int
2507 pgrp_iterate(
2508 struct pgrp *pgrp,
2509 unsigned int flags,
2510 proc_iterate_fn_t callout,
2511 void * arg,
2512 proc_iterate_fn_t filterfn,
2513 void * filterarg)
2514 {
2515 pid_t *pid_list;
2516 proc_t p;
2517 vm_size_t pid_list_size = 0;
2518 vm_size_t pid_list_size_needed = 0;
2519 int pid_count = 0;
2520 int pid_count_available = 0;
2521
2522 pid_t pgid;
2523
2524 assert(pgrp != NULL);
2525 assert(callout != NULL);
2526
2527 for (;;) {
2528 pgrp_lock(pgrp);
2529
2530 pid_count_available = pgrp->pg_membercnt;
2531 if (pid_count_available == 0) {
2532 pgrp_unlock(pgrp);
2533 return 0;
2534 }
2535
2536 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2537 if (pid_list_size >= pid_list_size_needed) {
2538 break;
2539 }
2540 pgrp_unlock(pgrp);
2541
2542 if (pid_list_size != 0) {
2543 kfree(pid_list, pid_list_size);
2544 }
2545 pid_list = kalloc(pid_list_size_needed);
2546 if (!pid_list) {
2547 return 1;
2548 }
2549 pid_list_size = pid_list_size_needed;
2550 }
2551
2552 pgid = pgrp->pg_id;
2553
2554 PGMEMBERS_FOREACH(pgrp, p) {
2555 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2556 continue;;
2557 }
2558 pid_list[pid_count++] = proc_pid(p);
2559 if (pid_count >= pid_count_available) {
2560 break;
2561 }
2562 }
2563
2564 pgrp_unlock(pgrp);
2565
2566 if (flags & PGRP_DROPREF) {
2567 pg_rele(pgrp);
2568 }
2569
2570 for (int i = 0; i< pid_count; i++) {
2571 /* do not handle kernproc */
2572 if (pid_list[i] == 0) {
2573 continue;
2574 }
2575 p = proc_find(pid_list[i]);
2576 if (!p) {
2577 continue;
2578 }
2579 if (p->p_pgrpid != pgid) {
2580 proc_rele(p);
2581 continue;
2582 }
2583
2584 int callout_ret = callout(p, arg);
2585
2586 switch (callout_ret) {
2587 case PROC_RETURNED:
2588 proc_rele(p);
2589 /* FALLTHROUGH */
2590 case PROC_CLAIMED:
2591 break;
2592
2593 case PROC_RETURNED_DONE:
2594 proc_rele(p);
2595 /* FALLTHROUGH */
2596 case PROC_CLAIMED_DONE:
2597 goto out;
2598
2599 default:
2600 panic("pgrp_iterate: callout returned %d for pid %d",
2601 callout_ret, pid_list[i]);
2602 }
2603 }
2604
2605 out:
2606 kfree(pid_list, pid_list_size);
2607 return 0;
2608 }
2609
2610 static void
2611 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2612 {
2613 proc_list_lock();
2614 child->p_pgrp = pgrp;
2615 child->p_pgrpid = pgrp->pg_id;
2616 child->p_listflag |= P_LIST_INPGRP;
2617 /*
2618 * When pgrp is being freed , a process can still
2619 * request addition using setpgid from bash when
2620 * login is terminated (login cycler) return ESRCH
2621 * Safe to hold lock due to refcount on pgrp
2622 */
2623 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2624 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2625 }
2626
2627 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2628 panic("pgrp_add : pgrp is dead adding process");
2629 proc_list_unlock();
2630
2631 pgrp_lock(pgrp);
2632 pgrp->pg_membercnt++;
2633 if ( parent != PROC_NULL) {
2634 LIST_INSERT_AFTER(parent, child, p_pglist);
2635 }else {
2636 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2637 }
2638 pgrp_unlock(pgrp);
2639
2640 proc_list_lock();
2641 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2642 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2643 }
2644 proc_list_unlock();
2645 }
2646
2647 static void
2648 pgrp_remove(struct proc * p)
2649 {
2650 struct pgrp * pg;
2651
2652 pg = proc_pgrp(p);
2653
2654 proc_list_lock();
2655 #if __PROC_INTERNAL_DEBUG
2656 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2657 panic("removing from pglist but no named ref\n");
2658 #endif
2659 p->p_pgrpid = PGRPID_DEAD;
2660 p->p_listflag &= ~P_LIST_INPGRP;
2661 p->p_pgrp = NULL;
2662 proc_list_unlock();
2663
2664 if (pg == PGRP_NULL)
2665 panic("pgrp_remove: pg is NULL");
2666 pgrp_lock(pg);
2667 pg->pg_membercnt--;
2668
2669 if (pg->pg_membercnt < 0)
2670 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2671
2672 LIST_REMOVE(p, p_pglist);
2673 if (pg->pg_members.lh_first == 0) {
2674 pgrp_unlock(pg);
2675 pgdelete_dropref(pg);
2676 } else {
2677 pgrp_unlock(pg);
2678 pg_rele(pg);
2679 }
2680 }
2681
2682
2683 /* cannot use proc_pgrp as it maybe stalled */
2684 static void
2685 pgrp_replace(struct proc * p, struct pgrp * newpg)
2686 {
2687 struct pgrp * oldpg;
2688
2689
2690
2691 proc_list_lock();
2692
2693 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2694 p->p_listflag |= P_LIST_PGRPTRWAIT;
2695 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2696 }
2697
2698 p->p_listflag |= P_LIST_PGRPTRANS;
2699
2700 oldpg = p->p_pgrp;
2701 if (oldpg == PGRP_NULL)
2702 panic("pgrp_replace: oldpg NULL");
2703 oldpg->pg_refcount++;
2704 #if __PROC_INTERNAL_DEBUG
2705 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2706 panic("removing from pglist but no named ref\n");
2707 #endif
2708 p->p_pgrpid = PGRPID_DEAD;
2709 p->p_listflag &= ~P_LIST_INPGRP;
2710 p->p_pgrp = NULL;
2711
2712 proc_list_unlock();
2713
2714 pgrp_lock(oldpg);
2715 oldpg->pg_membercnt--;
2716 if (oldpg->pg_membercnt < 0)
2717 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2718 LIST_REMOVE(p, p_pglist);
2719 if (oldpg->pg_members.lh_first == 0) {
2720 pgrp_unlock(oldpg);
2721 pgdelete_dropref(oldpg);
2722 } else {
2723 pgrp_unlock(oldpg);
2724 pg_rele(oldpg);
2725 }
2726
2727 proc_list_lock();
2728 p->p_pgrp = newpg;
2729 p->p_pgrpid = newpg->pg_id;
2730 p->p_listflag |= P_LIST_INPGRP;
2731 /*
2732 * When pgrp is being freed , a process can still
2733 * request addition using setpgid from bash when
2734 * login is terminated (login cycler) return ESRCH
2735 * Safe to hold lock due to refcount on pgrp
2736 */
2737 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2738 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2739 }
2740
2741 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2742 panic("pgrp_add : pgrp is dead adding process");
2743 proc_list_unlock();
2744
2745 pgrp_lock(newpg);
2746 newpg->pg_membercnt++;
2747 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2748 pgrp_unlock(newpg);
2749
2750 proc_list_lock();
2751 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2752 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2753 }
2754
2755 p->p_listflag &= ~P_LIST_PGRPTRANS;
2756 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2757 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2758 wakeup(&p->p_pgrpid);
2759
2760 }
2761 proc_list_unlock();
2762 }
2763
2764 void
2765 pgrp_lock(struct pgrp * pgrp)
2766 {
2767 lck_mtx_lock(&pgrp->pg_mlock);
2768 }
2769
2770 void
2771 pgrp_unlock(struct pgrp * pgrp)
2772 {
2773 lck_mtx_unlock(&pgrp->pg_mlock);
2774 }
2775
2776 void
2777 session_lock(struct session * sess)
2778 {
2779 lck_mtx_lock(&sess->s_mlock);
2780 }
2781
2782
2783 void
2784 session_unlock(struct session * sess)
2785 {
2786 lck_mtx_unlock(&sess->s_mlock);
2787 }
2788
2789 struct pgrp *
2790 proc_pgrp(proc_t p)
2791 {
2792 struct pgrp * pgrp;
2793
2794 if (p == PROC_NULL)
2795 return(PGRP_NULL);
2796 proc_list_lock();
2797
2798 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2799 p->p_listflag |= P_LIST_PGRPTRWAIT;
2800 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2801 }
2802
2803 pgrp = p->p_pgrp;
2804
2805 assert(pgrp != NULL);
2806
2807 if (pgrp != PGRP_NULL) {
2808 pgrp->pg_refcount++;
2809 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2810 panic("proc_pgrp: ref being povided for dead pgrp");
2811 }
2812
2813 proc_list_unlock();
2814
2815 return(pgrp);
2816 }
2817
2818 struct pgrp *
2819 tty_pgrp(struct tty * tp)
2820 {
2821 struct pgrp * pg = PGRP_NULL;
2822
2823 proc_list_lock();
2824 pg = tp->t_pgrp;
2825
2826 if (pg != PGRP_NULL) {
2827 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2828 panic("tty_pgrp: ref being povided for dead pgrp");
2829 pg->pg_refcount++;
2830 }
2831 proc_list_unlock();
2832
2833 return(pg);
2834 }
2835
2836 struct session *
2837 proc_session(proc_t p)
2838 {
2839 struct session * sess = SESSION_NULL;
2840
2841 if (p == PROC_NULL)
2842 return(SESSION_NULL);
2843
2844 proc_list_lock();
2845
2846 /* wait during transitions */
2847 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2848 p->p_listflag |= P_LIST_PGRPTRWAIT;
2849 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2850 }
2851
2852 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2853 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2854 panic("proc_session:returning sesssion ref on terminating session");
2855 sess->s_count++;
2856 }
2857 proc_list_unlock();
2858 return(sess);
2859 }
2860
2861 void
2862 session_rele(struct session *sess)
2863 {
2864 proc_list_lock();
2865 if (--sess->s_count == 0) {
2866 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2867 panic("session_rele: terminating already terminated session");
2868 sess->s_listflags |= S_LIST_TERM;
2869 LIST_REMOVE(sess, s_hash);
2870 sess->s_listflags |= S_LIST_DEAD;
2871 if (sess->s_count != 0)
2872 panic("session_rele: freeing session in use");
2873 proc_list_unlock();
2874 #if CONFIG_FINE_LOCK_GROUPS
2875 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2876 #else
2877 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2878 #endif
2879 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2880 } else
2881 proc_list_unlock();
2882 }
2883
2884 int
2885 proc_transstart(proc_t p, int locked, int non_blocking)
2886 {
2887 if (locked == 0)
2888 proc_lock(p);
2889 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2890 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2891 if (locked == 0)
2892 proc_unlock(p);
2893 return EDEADLK;
2894 }
2895 p->p_lflag |= P_LTRANSWAIT;
2896 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2897 }
2898 p->p_lflag |= P_LINTRANSIT;
2899 p->p_transholder = current_thread();
2900 if (locked == 0)
2901 proc_unlock(p);
2902 return 0;
2903 }
2904
2905 void
2906 proc_transcommit(proc_t p, int locked)
2907 {
2908 if (locked == 0)
2909 proc_lock(p);
2910
2911 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2912 assert (p->p_transholder == current_thread());
2913 p->p_lflag |= P_LTRANSCOMMIT;
2914
2915 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2916 p->p_lflag &= ~P_LTRANSWAIT;
2917 wakeup(&p->p_lflag);
2918 }
2919 if (locked == 0)
2920 proc_unlock(p);
2921 }
2922
2923 void
2924 proc_transend(proc_t p, int locked)
2925 {
2926 if (locked == 0)
2927 proc_lock(p);
2928
2929 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2930 p->p_transholder = NULL;
2931
2932 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2933 p->p_lflag &= ~P_LTRANSWAIT;
2934 wakeup(&p->p_lflag);
2935 }
2936 if (locked == 0)
2937 proc_unlock(p);
2938 }
2939
2940 int
2941 proc_transwait(proc_t p, int locked)
2942 {
2943 if (locked == 0)
2944 proc_lock(p);
2945 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2946 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2947 if (locked == 0)
2948 proc_unlock(p);
2949 return EDEADLK;
2950 }
2951 p->p_lflag |= P_LTRANSWAIT;
2952 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2953 }
2954 if (locked == 0)
2955 proc_unlock(p);
2956 return 0;
2957 }
2958
2959 void
2960 proc_klist_lock(void)
2961 {
2962 lck_mtx_lock(proc_klist_mlock);
2963 }
2964
2965 void
2966 proc_klist_unlock(void)
2967 {
2968 lck_mtx_unlock(proc_klist_mlock);
2969 }
2970
2971 void
2972 proc_knote(struct proc * p, long hint)
2973 {
2974 proc_klist_lock();
2975 KNOTE(&p->p_klist, hint);
2976 proc_klist_unlock();
2977 }
2978
2979 void
2980 proc_knote_drain(struct proc *p)
2981 {
2982 struct knote *kn = NULL;
2983
2984 /*
2985 * Clear the proc's klist to avoid references after the proc is reaped.
2986 */
2987 proc_klist_lock();
2988 while ((kn = SLIST_FIRST(&p->p_klist))) {
2989 kn->kn_ptr.p_proc = PROC_NULL;
2990 KNOTE_DETACH(&p->p_klist, kn);
2991 }
2992 proc_klist_unlock();
2993 }
2994
2995 void
2996 proc_setregister(proc_t p)
2997 {
2998 proc_lock(p);
2999 p->p_lflag |= P_LREGISTER;
3000 proc_unlock(p);
3001 }
3002
3003 void
3004 proc_resetregister(proc_t p)
3005 {
3006 proc_lock(p);
3007 p->p_lflag &= ~P_LREGISTER;
3008 proc_unlock(p);
3009 }
3010
3011 pid_t
3012 proc_pgrpid(proc_t p)
3013 {
3014 return p->p_pgrpid;
3015 }
3016
3017 pid_t
3018 proc_selfpgrpid()
3019 {
3020 return current_proc()->p_pgrpid;
3021 }
3022
3023
3024 /* return control and action states */
3025 int
3026 proc_getpcontrol(int pid, int * pcontrolp)
3027 {
3028 proc_t p;
3029
3030 p = proc_find(pid);
3031 if (p == PROC_NULL)
3032 return(ESRCH);
3033 if (pcontrolp != NULL)
3034 *pcontrolp = p->p_pcaction;
3035
3036 proc_rele(p);
3037 return(0);
3038 }
3039
3040 int
3041 proc_dopcontrol(proc_t p)
3042 {
3043 int pcontrol;
3044
3045 proc_lock(p);
3046
3047 pcontrol = PROC_CONTROL_STATE(p);
3048
3049 if (PROC_ACTION_STATE(p) == 0) {
3050 switch(pcontrol) {
3051 case P_PCTHROTTLE:
3052 PROC_SETACTION_STATE(p);
3053 proc_unlock(p);
3054 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3055 break;
3056
3057 case P_PCSUSP:
3058 PROC_SETACTION_STATE(p);
3059 proc_unlock(p);
3060 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3061 task_suspend(p->task);
3062 break;
3063
3064 case P_PCKILL:
3065 PROC_SETACTION_STATE(p);
3066 proc_unlock(p);
3067 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3068 psignal(p, SIGKILL);
3069 break;
3070
3071 default:
3072 proc_unlock(p);
3073 }
3074
3075 } else
3076 proc_unlock(p);
3077
3078 return(PROC_RETURNED);
3079 }
3080
3081
3082 /*
3083 * Resume a throttled or suspended process. This is an internal interface that's only
3084 * used by the user level code that presents the GUI when we run out of swap space and
3085 * hence is restricted to processes with superuser privileges.
3086 */
3087
3088 int
3089 proc_resetpcontrol(int pid)
3090 {
3091 proc_t p;
3092 int pcontrol;
3093 int error;
3094 proc_t self = current_proc();
3095
3096 /* if the process has been validated to handle resource control or root is valid one */
3097 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
3098 return error;
3099
3100 p = proc_find(pid);
3101 if (p == PROC_NULL)
3102 return(ESRCH);
3103
3104 proc_lock(p);
3105
3106 pcontrol = PROC_CONTROL_STATE(p);
3107
3108 if(PROC_ACTION_STATE(p) !=0) {
3109 switch(pcontrol) {
3110 case P_PCTHROTTLE:
3111 PROC_RESETACTION_STATE(p);
3112 proc_unlock(p);
3113 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3114 break;
3115
3116 case P_PCSUSP:
3117 PROC_RESETACTION_STATE(p);
3118 proc_unlock(p);
3119 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3120 task_resume(p->task);
3121 break;
3122
3123 case P_PCKILL:
3124 /* Huh? */
3125 PROC_SETACTION_STATE(p);
3126 proc_unlock(p);
3127 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3128 break;
3129
3130 default:
3131 proc_unlock(p);
3132 }
3133
3134 } else
3135 proc_unlock(p);
3136
3137 proc_rele(p);
3138 return(0);
3139 }
3140
3141
3142
3143 struct no_paging_space
3144 {
3145 uint64_t pcs_max_size;
3146 uint64_t pcs_uniqueid;
3147 int pcs_pid;
3148 int pcs_proc_count;
3149 uint64_t pcs_total_size;
3150
3151 uint64_t npcs_max_size;
3152 uint64_t npcs_uniqueid;
3153 int npcs_pid;
3154 int npcs_proc_count;
3155 uint64_t npcs_total_size;
3156
3157 int apcs_proc_count;
3158 uint64_t apcs_total_size;
3159 };
3160
3161
3162 static int
3163 proc_pcontrol_filter(proc_t p, void *arg)
3164 {
3165 struct no_paging_space *nps;
3166 uint64_t compressed;
3167
3168 nps = (struct no_paging_space *)arg;
3169
3170 compressed = get_task_compressed(p->task);
3171
3172 if (PROC_CONTROL_STATE(p)) {
3173 if (PROC_ACTION_STATE(p) == 0) {
3174 if (compressed > nps->pcs_max_size) {
3175 nps->pcs_pid = p->p_pid;
3176 nps->pcs_uniqueid = p->p_uniqueid;
3177 nps->pcs_max_size = compressed;
3178 }
3179 nps->pcs_total_size += compressed;
3180 nps->pcs_proc_count++;
3181 } else {
3182 nps->apcs_total_size += compressed;
3183 nps->apcs_proc_count++;
3184 }
3185 } else {
3186 if (compressed > nps->npcs_max_size) {
3187 nps->npcs_pid = p->p_pid;
3188 nps->npcs_uniqueid = p->p_uniqueid;
3189 nps->npcs_max_size = compressed;
3190 }
3191 nps->npcs_total_size += compressed;
3192 nps->npcs_proc_count++;
3193
3194 }
3195 return (0);
3196 }
3197
3198
3199 static int
3200 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3201 {
3202 return(PROC_RETURNED);
3203 }
3204
3205
3206 /*
3207 * Deal with the low on compressor pool space condition... this function
3208 * gets called when we are approaching the limits of the compressor pool or
3209 * we are unable to create a new swap file.
3210 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3211 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3212 * There are 2 categories of processes to deal with. Those that have an action
3213 * associated with them by the task itself and those that do not. Actionable
3214 * tasks can have one of three categories specified: ones that
3215 * can be killed immediately, ones that should be suspended, and ones that should
3216 * be throttled. Processes that do not have an action associated with them are normally
3217 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3218 * that only by killing them can we hope to put the system back into a usable state.
3219 */
3220
3221 #define NO_PAGING_SPACE_DEBUG 0
3222
3223 extern uint64_t vm_compressor_pages_compressed(void);
3224
3225 struct timeval last_no_space_action = {0, 0};
3226
3227 #if DEVELOPMENT || DEBUG
3228 extern boolean_t kill_on_no_paging_space;
3229 #endif /* DEVELOPMENT || DEBUG */
3230
3231 #define MB_SIZE (1024 * 1024ULL)
3232 boolean_t memorystatus_kill_on_VM_thrashing(boolean_t);
3233
3234 extern int32_t max_kill_priority;
3235 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3236
3237 int
3238 no_paging_space_action()
3239 {
3240 proc_t p;
3241 struct no_paging_space nps;
3242 struct timeval now;
3243
3244 /*
3245 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3246 */
3247 microtime(&now);
3248
3249 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3250 return (0);
3251
3252 /*
3253 * Examine all processes and find the biggest (biggest is based on the number of pages this
3254 * task has in the compressor pool) that has been marked to have some action
3255 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3256 * action.
3257 *
3258 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3259 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3260 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3261 */
3262 bzero(&nps, sizeof(nps));
3263
3264 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3265
3266 #if NO_PAGING_SPACE_DEBUG
3267 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3268 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3269 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3270 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3271 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3272 nps.apcs_proc_count, nps.apcs_total_size);
3273 #endif
3274 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3275 /*
3276 * for now we'll knock out any task that has more then 50% of the pages
3277 * held by the compressor
3278 */
3279 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3280
3281 if (nps.npcs_uniqueid == p->p_uniqueid) {
3282 /*
3283 * verify this is still the same process
3284 * in case the proc exited and the pid got reused while
3285 * we were finishing the proc_iterate and getting to this point
3286 */
3287 last_no_space_action = now;
3288
3289 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size/MB_SIZE));
3290 psignal(p, SIGKILL);
3291
3292 proc_rele(p);
3293
3294 return (0);
3295 }
3296
3297 proc_rele(p);
3298 }
3299 }
3300
3301 /*
3302 * We have some processes within our jetsam bands of consideration and hence can be killed.
3303 * So we will invoke the memorystatus thread to go ahead and kill something.
3304 */
3305 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3306
3307 last_no_space_action = now;
3308 memorystatus_kill_on_VM_thrashing(TRUE /* async */);
3309 return (1);
3310 }
3311
3312 /*
3313 * No eligible processes to kill. So let's suspend/kill the largest
3314 * process depending on its policy control specifications.
3315 */
3316
3317 if (nps.pcs_max_size > 0) {
3318 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3319
3320 if (nps.pcs_uniqueid == p->p_uniqueid) {
3321 /*
3322 * verify this is still the same process
3323 * in case the proc exited and the pid got reused while
3324 * we were finishing the proc_iterate and getting to this point
3325 */
3326 last_no_space_action = now;
3327
3328 proc_dopcontrol(p);
3329
3330 proc_rele(p);
3331
3332 return (1);
3333 }
3334
3335 proc_rele(p);
3336 }
3337 }
3338 last_no_space_action = now;
3339
3340 printf("low swap: unable to find any eligible processes to take action on\n");
3341
3342 return (0);
3343 }
3344
3345 int
3346 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3347 {
3348 int ret = 0;
3349 proc_t target_proc = PROC_NULL;
3350 pid_t target_pid = uap->pid;
3351 uint64_t target_uniqueid = uap->uniqueid;
3352 task_t target_task = NULL;
3353
3354 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3355 ret = EPERM;
3356 goto out;
3357 }
3358 target_proc = proc_find(target_pid);
3359 if (target_proc != PROC_NULL) {
3360 if (target_uniqueid != proc_uniqueid(target_proc)) {
3361 ret = ENOENT;
3362 goto out;
3363 }
3364
3365 target_task = proc_task(target_proc);
3366 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3367 ret = EINVAL;
3368 goto out;
3369 }
3370 } else
3371 ret = ENOENT;
3372
3373 out:
3374 if (target_proc != PROC_NULL)
3375 proc_rele(target_proc);
3376 return (ret);
3377 }
3378
3379 #if VM_SCAN_FOR_SHADOW_CHAIN
3380 extern int vm_map_shadow_max(vm_map_t map);
3381 int proc_shadow_max(void);
3382 int proc_shadow_max(void)
3383 {
3384 int retval, max;
3385 proc_t p;
3386 task_t task;
3387 vm_map_t map;
3388
3389 max = 0;
3390 proc_list_lock();
3391 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3392 if (p->p_stat == SIDL)
3393 continue;
3394 task = p->task;
3395 if (task == NULL) {
3396 continue;
3397 }
3398 map = get_task_map(task);
3399 if (map == NULL) {
3400 continue;
3401 }
3402 retval = vm_map_shadow_max(map);
3403 if (retval > max) {
3404 max = retval;
3405 }
3406 }
3407 proc_list_unlock();
3408 return max;
3409 }
3410 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3411
3412 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3413 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3414 {
3415 if (target_proc != NULL) {
3416 target_proc->p_responsible_pid = responsible_pid;
3417 }
3418 return;
3419 }
3420
3421 int
3422 proc_chrooted(proc_t p)
3423 {
3424 int retval = 0;
3425
3426 if (p) {
3427 proc_fdlock(p);
3428 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3429 proc_fdunlock(p);
3430 }
3431
3432 return retval;
3433 }
3434
3435 void *
3436 proc_get_uthread_uu_threadlist(void * uthread_v)
3437 {
3438 uthread_t uth = (uthread_t)uthread_v;
3439 return (uth != NULL) ? uth->uu_threadlist : NULL;
3440 }
3441
3442 #ifdef CONFIG_32BIT_TELEMETRY
3443 void
3444 proc_log_32bit_telemetry(proc_t p)
3445 {
3446 /* Gather info */
3447 char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
3448 char * signature_cur_end = &signature_buf[0];
3449 char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
3450 int bytes_printed = 0;
3451
3452 const char * teamid = NULL;
3453 const char * identity = NULL;
3454 struct cs_blob * csblob = NULL;
3455
3456 proc_list_lock();
3457
3458 /*
3459 * Get proc name and parent proc name; if the parent execs, we'll get a
3460 * garbled name.
3461 */
3462 bytes_printed = snprintf(signature_cur_end,
3463 signature_buf_end - signature_cur_end,
3464 "%s,%s,", p->p_name,
3465 (p->p_pptr ? p->p_pptr->p_name : ""));
3466
3467 if (bytes_printed > 0) {
3468 signature_cur_end += bytes_printed;
3469 }
3470
3471 proc_list_unlock();
3472
3473 /* Get developer info. */
3474 vnode_t v = proc_getexecutablevnode(p);
3475
3476 if (v) {
3477 csblob = csvnode_get_blob(v, 0);
3478
3479 if (csblob) {
3480 teamid = csblob_get_teamid(csblob);
3481 identity = csblob_get_identity(csblob);
3482 }
3483 }
3484
3485 if (teamid == NULL) {
3486 teamid = "";
3487 }
3488
3489 if (identity == NULL) {
3490 identity = "";
3491 }
3492
3493 bytes_printed = snprintf(signature_cur_end,
3494 signature_buf_end - signature_cur_end,
3495 "%s,%s", teamid, identity);
3496
3497 if (bytes_printed > 0) {
3498 signature_cur_end += bytes_printed;
3499 }
3500
3501 if (v) {
3502 vnode_put(v);
3503 }
3504
3505 /*
3506 * We may want to rate limit here, although the SUMMARIZE key should
3507 * help us aggregate events in userspace.
3508 */
3509
3510 /* Emit log */
3511 kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
3512 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
3513 /* 1 */ "com.apple.message.signature", signature_buf,
3514 /* 2 */ "com.apple.message.summarize", "YES",
3515 NULL);
3516 }
3517 #endif /* CONFIG_32BIT_TELEMETRY */