]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
249d8c3553db97e05dd327b1e416b8b5d2ade481
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114
115 #ifdef CONFIG_32BIT_TELEMETRY
116 #include <sys/kasl.h>
117 #endif /* CONFIG_32BIT_TELEMETRY */
118
119 #if CONFIG_CSR
120 #include <sys/csr.h>
121 #endif
122
123 #if CONFIG_MEMORYSTATUS
124 #include <sys/kern_memorystatus.h>
125 #endif
126
127 #if CONFIG_MACF
128 #include <security/mac_framework.h>
129 #endif
130
131 #include <libkern/crypto/sha1.h>
132
133 #ifdef CONFIG_32BIT_TELEMETRY
134 #define MAX_32BIT_EXEC_SIG_SIZE 160
135 #endif /* CONFIG_32BIT_TELEMETRY */
136
137 /*
138 * Structure associated with user cacheing.
139 */
140 struct uidinfo {
141 LIST_ENTRY(uidinfo) ui_hash;
142 uid_t ui_uid;
143 long ui_proccnt;
144 };
145 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
146 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
147 u_long uihash; /* size of hash table - 1 */
148
149 /*
150 * Other process lists
151 */
152 struct pidhashhead *pidhashtbl;
153 u_long pidhash;
154 struct pgrphashhead *pgrphashtbl;
155 u_long pgrphash;
156 struct sesshashhead *sesshashtbl;
157 u_long sesshash;
158
159 struct proclist allproc;
160 struct proclist zombproc;
161 extern struct tty cons;
162
163 extern int cs_debug;
164
165 #if DEVELOPMENT || DEBUG
166 extern int cs_enforcement_enable;
167 #endif
168
169 #if DEBUG
170 #define __PROC_INTERNAL_DEBUG 1
171 #endif
172 #if CONFIG_COREDUMP
173 /* Name to give to core files */
174 #if defined(XNU_TARGET_OS_BRIDGE)
175 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/internal/%N.core"};
176 #elif CONFIG_EMBEDDED
177 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/cores/%N.core"};
178 #else
179 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
180 #endif
181 #endif
182
183 #if PROC_REF_DEBUG
184 #include <kern/backtrace.h>
185 #endif
186
187 static void orphanpg(struct pgrp * pg);
188 void proc_name_kdp(task_t t, char * buf, int size);
189 void * proc_get_uthread_uu_threadlist(void * uthread_v);
190 int proc_threadname_kdp(void * uth, char * buf, size_t size);
191 void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime);
192 char * proc_name_address(void * p);
193
194 /* TODO: make a header that's exported and usable in osfmk */
195 char* proc_best_name(proc_t p);
196
197 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
198 static void pgrp_remove(proc_t p);
199 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
200 static void pgdelete_dropref(struct pgrp *pgrp);
201 extern void pg_rele_dropref(struct pgrp * pgrp);
202 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
203 static boolean_t proc_parent_is_currentproc(proc_t p);
204
205 struct fixjob_iterargs {
206 struct pgrp * pg;
207 struct session * mysession;
208 int entering;
209 };
210
211 int fixjob_callback(proc_t, void *);
212
213 uint64_t get_current_unique_pid(void);
214
215
216 uint64_t
217 get_current_unique_pid(void)
218 {
219 proc_t p = current_proc();
220
221 if (p)
222 return p->p_uniqueid;
223 else
224 return 0;
225 }
226
227 /*
228 * Initialize global process hashing structures.
229 */
230 void
231 procinit(void)
232 {
233 LIST_INIT(&allproc);
234 LIST_INIT(&zombproc);
235 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
236 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
237 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
238 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
239 #if CONFIG_PERSONAS
240 personas_bootstrap();
241 #endif
242 }
243
244 /*
245 * Change the count associated with number of processes
246 * a given user is using. This routine protects the uihash
247 * with the list lock
248 */
249 int
250 chgproccnt(uid_t uid, int diff)
251 {
252 struct uidinfo *uip;
253 struct uidinfo *newuip = NULL;
254 struct uihashhead *uipp;
255 int retval;
256
257 again:
258 proc_list_lock();
259 uipp = UIHASH(uid);
260 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
261 if (uip->ui_uid == uid)
262 break;
263 if (uip) {
264 uip->ui_proccnt += diff;
265 if (uip->ui_proccnt > 0) {
266 retval = uip->ui_proccnt;
267 proc_list_unlock();
268 goto out;
269 }
270 if (uip->ui_proccnt < 0)
271 panic("chgproccnt: procs < 0");
272 LIST_REMOVE(uip, ui_hash);
273 retval = 0;
274 proc_list_unlock();
275 FREE_ZONE(uip, sizeof(*uip), M_PROC);
276 goto out;
277 }
278 if (diff <= 0) {
279 if (diff == 0) {
280 retval = 0;
281 proc_list_unlock();
282 goto out;
283 }
284 panic("chgproccnt: lost user");
285 }
286 if (newuip != NULL) {
287 uip = newuip;
288 newuip = NULL;
289 LIST_INSERT_HEAD(uipp, uip, ui_hash);
290 uip->ui_uid = uid;
291 uip->ui_proccnt = diff;
292 retval = diff;
293 proc_list_unlock();
294 goto out;
295 }
296 proc_list_unlock();
297 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
298 if (newuip == NULL)
299 panic("chgproccnt: M_PROC zone depleted");
300 goto again;
301 out:
302 if (newuip != NULL)
303 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
304 return(retval);
305 }
306
307 /*
308 * Is p an inferior of the current process?
309 */
310 int
311 inferior(proc_t p)
312 {
313 int retval = 0;
314
315 proc_list_lock();
316 for (; p != current_proc(); p = p->p_pptr)
317 if (p->p_pid == 0)
318 goto out;
319 retval = 1;
320 out:
321 proc_list_unlock();
322 return(retval);
323 }
324
325 /*
326 * Is p an inferior of t ?
327 */
328 int
329 isinferior(proc_t p, proc_t t)
330 {
331 int retval = 0;
332 int nchecked = 0;
333 proc_t start = p;
334
335 /* if p==t they are not inferior */
336 if (p == t)
337 return(0);
338
339 proc_list_lock();
340 for (; p != t; p = p->p_pptr) {
341 nchecked++;
342
343 /* Detect here if we're in a cycle */
344 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
345 goto out;
346 }
347 retval = 1;
348 out:
349 proc_list_unlock();
350 return(retval);
351 }
352
353 int
354 proc_isinferior(int pid1, int pid2)
355 {
356 proc_t p = PROC_NULL;
357 proc_t t = PROC_NULL;
358 int retval = 0;
359
360 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
361 retval = isinferior(p, t);
362
363 if (p != PROC_NULL)
364 proc_rele(p);
365 if (t != PROC_NULL)
366 proc_rele(t);
367
368 return(retval);
369 }
370
371 proc_t
372 proc_find(int pid)
373 {
374 return(proc_findinternal(pid, 0));
375 }
376
377 proc_t
378 proc_findinternal(int pid, int locked)
379 {
380 proc_t p = PROC_NULL;
381
382 if (locked == 0) {
383 proc_list_lock();
384 }
385
386 p = pfind_locked(pid);
387 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
388 p = PROC_NULL;
389
390 if (locked == 0) {
391 proc_list_unlock();
392 }
393
394 return(p);
395 }
396
397 proc_t
398 proc_findthread(thread_t thread)
399 {
400 proc_t p = PROC_NULL;
401 struct uthread *uth;
402
403 proc_list_lock();
404 uth = get_bsdthread_info(thread);
405 if (uth && (uth->uu_flag & UT_VFORK))
406 p = uth->uu_proc;
407 else
408 p = (proc_t)(get_bsdthreadtask_info(thread));
409 p = proc_ref_locked(p);
410 proc_list_unlock();
411 return(p);
412 }
413
414 void
415 uthread_reset_proc_refcount(void *uthread) {
416 uthread_t uth;
417
418 uth = (uthread_t) uthread;
419 uth->uu_proc_refcount = 0;
420
421 #if PROC_REF_DEBUG
422 if (proc_ref_tracking_disabled) {
423 return;
424 }
425
426 uth->uu_pindex = 0;
427 #endif
428 }
429
430 #if PROC_REF_DEBUG
431 int
432 uthread_get_proc_refcount(void *uthread) {
433 uthread_t uth;
434
435 if (proc_ref_tracking_disabled) {
436 return 0;
437 }
438
439 uth = (uthread_t) uthread;
440
441 return uth->uu_proc_refcount;
442 }
443 #endif
444
445 static void
446 record_procref(proc_t p __unused, int count) {
447 uthread_t uth;
448
449 uth = current_uthread();
450 uth->uu_proc_refcount += count;
451
452 #if PROC_REF_DEBUG
453 if (proc_ref_tracking_disabled) {
454 return;
455 }
456
457 if (count == 1) {
458 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
459 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
460
461 uth->uu_proc_ps[uth->uu_pindex] = p;
462 uth->uu_pindex++;
463 }
464 }
465 #endif
466 }
467
468 static boolean_t
469 uthread_needs_to_wait_in_proc_refwait(void) {
470 uthread_t uth = current_uthread();
471
472 /*
473 * Allow threads holding no proc refs to wait
474 * in proc_refwait, allowing threads holding
475 * proc refs to wait in proc_refwait causes
476 * deadlocks and makes proc_find non-reentrant.
477 */
478 if (uth->uu_proc_refcount == 0)
479 return TRUE;
480
481 return FALSE;
482 }
483
484 int
485 proc_rele(proc_t p)
486 {
487 proc_list_lock();
488 proc_rele_locked(p);
489 proc_list_unlock();
490
491 return(0);
492 }
493
494 proc_t
495 proc_self(void)
496 {
497 struct proc * p;
498
499 p = current_proc();
500
501 proc_list_lock();
502 if (p != proc_ref_locked(p))
503 p = PROC_NULL;
504 proc_list_unlock();
505 return(p);
506 }
507
508
509 proc_t
510 proc_ref_locked(proc_t p)
511 {
512 proc_t p1 = p;
513 int pid = proc_pid(p);
514
515 retry:
516 /*
517 * if process still in creation or proc got recycled
518 * during msleep then return failure.
519 */
520 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0))
521 return (PROC_NULL);
522
523 /*
524 * Do not return process marked for termination
525 * or proc_refdrain called without ref wait.
526 * Wait for proc_refdrain_with_refwait to complete if
527 * process in refdrain and refwait flag is set, unless
528 * the current thread is holding to a proc_ref
529 * for any proc.
530 */
531 if ((p->p_stat != SZOMB) &&
532 ((p->p_listflag & P_LIST_EXITED) == 0) &&
533 ((p->p_listflag & P_LIST_DEAD) == 0) &&
534 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
535 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
536 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
537 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0) ;
538 /*
539 * the proc might have been recycled since we dropped
540 * the proc list lock, get the proc again.
541 */
542 p = pfind_locked(pid);
543 goto retry;
544 }
545 p->p_refcount++;
546 record_procref(p, 1);
547 }
548 else
549 p1 = PROC_NULL;
550
551 return(p1);
552 }
553
554 void
555 proc_rele_locked(proc_t p)
556 {
557
558 if (p->p_refcount > 0) {
559 p->p_refcount--;
560 record_procref(p, -1);
561 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
562 p->p_listflag &= ~P_LIST_DRAINWAIT;
563 wakeup(&p->p_refcount);
564 }
565 } else
566 panic("proc_rele_locked -ve ref\n");
567
568 }
569
570 proc_t
571 proc_find_zombref(int pid)
572 {
573 proc_t p;
574
575 proc_list_lock();
576
577 again:
578 p = pfind_locked(pid);
579
580 /* should we bail? */
581 if ((p == PROC_NULL) /* not found */
582 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
583 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
584
585 proc_list_unlock();
586 return (PROC_NULL);
587 }
588
589 /* If someone else is controlling the (unreaped) zombie - wait */
590 if ((p->p_listflag & P_LIST_WAITING) != 0) {
591 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
592 goto again;
593 }
594 p->p_listflag |= P_LIST_WAITING;
595
596 proc_list_unlock();
597
598 return(p);
599 }
600
601 void
602 proc_drop_zombref(proc_t p)
603 {
604 proc_list_lock();
605 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
606 p->p_listflag &= ~P_LIST_WAITING;
607 wakeup(&p->p_stat);
608 }
609 proc_list_unlock();
610 }
611
612
613 void
614 proc_refdrain(proc_t p)
615 {
616 proc_refdrain_with_refwait(p, FALSE);
617 }
618
619 proc_t
620 proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
621 {
622 boolean_t initexec = FALSE;
623 proc_list_lock();
624
625 p->p_listflag |= P_LIST_DRAIN;
626 if (get_ref_and_allow_wait) {
627 /*
628 * All the calls to proc_ref_locked will wait
629 * for the flag to get cleared before returning a ref,
630 * unless the current thread is holding to a proc ref
631 * for any proc.
632 */
633 p->p_listflag |= P_LIST_REFWAIT;
634 if (p == initproc) {
635 initexec = TRUE;
636 }
637 }
638
639 /* Do not wait in ref drain for launchd exec */
640 while (p->p_refcount && !initexec) {
641 p->p_listflag |= P_LIST_DRAINWAIT;
642 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
643 }
644
645 p->p_listflag &= ~P_LIST_DRAIN;
646 if (!get_ref_and_allow_wait) {
647 p->p_listflag |= P_LIST_DEAD;
648 } else {
649 /* Return a ref to the caller */
650 p->p_refcount++;
651 record_procref(p, 1);
652 }
653
654 proc_list_unlock();
655
656 if (get_ref_and_allow_wait) {
657 return (p);
658 }
659 return NULL;
660 }
661
662 void
663 proc_refwake(proc_t p)
664 {
665 proc_list_lock();
666 p->p_listflag &= ~P_LIST_REFWAIT;
667 wakeup(&p->p_listflag);
668 proc_list_unlock();
669 }
670
671 proc_t
672 proc_parentholdref(proc_t p)
673 {
674 proc_t parent = PROC_NULL;
675 proc_t pp;
676 int loopcnt = 0;
677
678
679 proc_list_lock();
680 loop:
681 pp = p->p_pptr;
682 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
683 parent = PROC_NULL;
684 goto out;
685 }
686
687 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
688 pp->p_listflag |= P_LIST_CHILDDRWAIT;
689 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
690 loopcnt++;
691 if (loopcnt == 5) {
692 parent = PROC_NULL;
693 goto out;
694 }
695 goto loop;
696 }
697
698 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
699 pp->p_parentref++;
700 parent = pp;
701 goto out;
702 }
703
704 out:
705 proc_list_unlock();
706 return(parent);
707 }
708 int
709 proc_parentdropref(proc_t p, int listlocked)
710 {
711 if (listlocked == 0)
712 proc_list_lock();
713
714 if (p->p_parentref > 0) {
715 p->p_parentref--;
716 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
717 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
718 wakeup(&p->p_parentref);
719 }
720 } else
721 panic("proc_parentdropref -ve ref\n");
722 if (listlocked == 0)
723 proc_list_unlock();
724
725 return(0);
726 }
727
728 void
729 proc_childdrainstart(proc_t p)
730 {
731 #if __PROC_INTERNAL_DEBUG
732 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
733 panic("proc_childdrainstart: childdrain already started\n");
734 #endif
735 p->p_listflag |= P_LIST_CHILDDRSTART;
736 /* wait for all that hold parentrefs to drop */
737 while (p->p_parentref > 0) {
738 p->p_listflag |= P_LIST_PARENTREFWAIT;
739 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
740 }
741 }
742
743
744 void
745 proc_childdrainend(proc_t p)
746 {
747 #if __PROC_INTERNAL_DEBUG
748 if (p->p_childrencnt > 0)
749 panic("exiting: children stil hanging around\n");
750 #endif
751 p->p_listflag |= P_LIST_CHILDDRAINED;
752 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
753 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
754 wakeup(&p->p_childrencnt);
755 }
756 }
757
758 void
759 proc_checkdeadrefs(__unused proc_t p)
760 {
761 #if __PROC_INTERNAL_DEBUG
762 if ((p->p_listflag & P_LIST_INHASH) != 0)
763 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
764 if (p->p_childrencnt != 0)
765 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
766 if (p->p_refcount != 0)
767 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
768 if (p->p_parentref != 0)
769 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
770 #endif
771 }
772
773 int
774 proc_pid(proc_t p)
775 {
776 if (p != NULL)
777 return (p->p_pid);
778 return -1;
779 }
780
781 int
782 proc_ppid(proc_t p)
783 {
784 if (p != NULL)
785 return (p->p_ppid);
786 return -1;
787 }
788
789 int
790 proc_selfpid(void)
791 {
792 return (current_proc()->p_pid);
793 }
794
795 int
796 proc_selfppid(void)
797 {
798 return (current_proc()->p_ppid);
799 }
800
801 int
802 proc_selfcsflags(void)
803 {
804 return (current_proc()->p_csflags);
805 }
806
807 #if CONFIG_DTRACE
808 static proc_t
809 dtrace_current_proc_vforking(void)
810 {
811 thread_t th = current_thread();
812 struct uthread *ut = get_bsdthread_info(th);
813
814 if (ut &&
815 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
816 /*
817 * Handle the narrow window where we're in the vfork syscall,
818 * but we're not quite ready to claim (in particular, to DTrace)
819 * that we're running as the child.
820 */
821 return (get_bsdtask_info(get_threadtask(th)));
822 }
823 return (current_proc());
824 }
825
826 int
827 dtrace_proc_selfpid(void)
828 {
829 return (dtrace_current_proc_vforking()->p_pid);
830 }
831
832 int
833 dtrace_proc_selfppid(void)
834 {
835 return (dtrace_current_proc_vforking()->p_ppid);
836 }
837
838 uid_t
839 dtrace_proc_selfruid(void)
840 {
841 return (dtrace_current_proc_vforking()->p_ruid);
842 }
843 #endif /* CONFIG_DTRACE */
844
845 proc_t
846 proc_parent(proc_t p)
847 {
848 proc_t parent;
849 proc_t pp;
850
851 proc_list_lock();
852 loop:
853 pp = p->p_pptr;
854 parent = proc_ref_locked(pp);
855 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
856 pp->p_listflag |= P_LIST_CHILDLKWAIT;
857 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
858 goto loop;
859 }
860 proc_list_unlock();
861 return(parent);
862 }
863
864 static boolean_t
865 proc_parent_is_currentproc(proc_t p)
866 {
867 boolean_t ret = FALSE;
868
869 proc_list_lock();
870 if (p->p_pptr == current_proc())
871 ret = TRUE;
872
873 proc_list_unlock();
874 return ret;
875 }
876
877 void
878 proc_name(int pid, char * buf, int size)
879 {
880 proc_t p;
881
882 if ((p = proc_find(pid)) != PROC_NULL) {
883 strlcpy(buf, &p->p_comm[0], size);
884 proc_rele(p);
885 }
886 }
887
888 void
889 proc_name_kdp(task_t t, char * buf, int size)
890 {
891 proc_t p = get_bsdtask_info(t);
892 if (p == PROC_NULL)
893 return;
894
895 if ((size_t)size > sizeof(p->p_comm))
896 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
897 else
898 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
899 }
900
901 int
902 proc_threadname_kdp(void * uth, char * buf, size_t size)
903 {
904 if (size < MAXTHREADNAMESIZE) {
905 /* this is really just a protective measure for the future in
906 * case the thread name size in stackshot gets out of sync with
907 * the BSD max thread name size. Note that bsd_getthreadname
908 * doesn't take input buffer size into account. */
909 return -1;
910 }
911
912 if (uth != NULL) {
913 bsd_getthreadname(uth, buf);
914 }
915 return 0;
916 }
917
918 /* note that this function is generally going to be called from stackshot,
919 * and the arguments will be coming from a struct which is declared packed
920 * thus the input arguments will in general be unaligned. We have to handle
921 * that here. */
922 void
923 proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec, uint64_t *abstime)
924 {
925 proc_t pp = (proc_t)p;
926 struct uint64p {
927 uint64_t val;
928 } __attribute__((packed));
929
930 if (pp != PROC_NULL) {
931 if (tv_sec != NULL)
932 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
933 if (tv_usec != NULL)
934 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
935 if (abstime != NULL) {
936 if (pp->p_stats != NULL)
937 *abstime = pp->p_stats->ps_start;
938 else
939 *abstime = 0;
940 }
941 }
942 }
943
944 char *
945 proc_name_address(void *p)
946 {
947 return &((proc_t)p)->p_comm[0];
948 }
949
950 char *
951 proc_best_name(proc_t p)
952 {
953 if (p->p_name[0] != 0)
954 return (&p->p_name[0]);
955 return (&p->p_comm[0]);
956 }
957
958 void
959 proc_selfname(char * buf, int size)
960 {
961 proc_t p;
962
963 if ((p = current_proc())!= (proc_t)0) {
964 strlcpy(buf, &p->p_comm[0], size);
965 }
966 }
967
968 void
969 proc_signal(int pid, int signum)
970 {
971 proc_t p;
972
973 if ((p = proc_find(pid)) != PROC_NULL) {
974 psignal(p, signum);
975 proc_rele(p);
976 }
977 }
978
979 int
980 proc_issignal(int pid, sigset_t mask)
981 {
982 proc_t p;
983 int error=0;
984
985 if ((p = proc_find(pid)) != PROC_NULL) {
986 error = proc_pendingsignals(p, mask);
987 proc_rele(p);
988 }
989
990 return(error);
991 }
992
993 int
994 proc_noremotehang(proc_t p)
995 {
996 int retval = 0;
997
998 if (p)
999 retval = p->p_flag & P_NOREMOTEHANG;
1000 return(retval? 1: 0);
1001
1002 }
1003
1004 int
1005 proc_exiting(proc_t p)
1006 {
1007 int retval = 0;
1008
1009 if (p)
1010 retval = p->p_lflag & P_LEXIT;
1011 return(retval? 1: 0);
1012 }
1013
1014 int
1015 proc_forcequota(proc_t p)
1016 {
1017 int retval = 0;
1018
1019 if (p)
1020 retval = p->p_flag & P_FORCEQUOTA;
1021 return(retval? 1: 0);
1022
1023 }
1024
1025 int
1026 proc_suser(proc_t p)
1027 {
1028 kauth_cred_t my_cred;
1029 int error;
1030
1031 my_cred = kauth_cred_proc_ref(p);
1032 error = suser(my_cred, &p->p_acflag);
1033 kauth_cred_unref(&my_cred);
1034 return(error);
1035 }
1036
1037 task_t
1038 proc_task(proc_t proc)
1039 {
1040 return (task_t)proc->task;
1041 }
1042
1043 /*
1044 * Obtain the first thread in a process
1045 *
1046 * XXX This is a bad thing to do; it exists predominantly to support the
1047 * XXX use of proc_t's in places that should really be using
1048 * XXX thread_t's instead. This maintains historical behaviour, but really
1049 * XXX needs an audit of the context (proxy vs. not) to clean up.
1050 */
1051 thread_t
1052 proc_thread(proc_t proc)
1053 {
1054 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1055
1056 if (uth != NULL)
1057 return(uth->uu_context.vc_thread);
1058
1059 return(NULL);
1060 }
1061
1062 kauth_cred_t
1063 proc_ucred(proc_t p)
1064 {
1065 return(p->p_ucred);
1066 }
1067
1068 struct uthread *
1069 current_uthread()
1070 {
1071 thread_t th = current_thread();
1072
1073 return((struct uthread *)get_bsdthread_info(th));
1074 }
1075
1076
1077 int
1078 proc_is64bit(proc_t p)
1079 {
1080 return(IS_64BIT_PROCESS(p));
1081 }
1082
1083 int
1084 proc_pidversion(proc_t p)
1085 {
1086 return(p->p_idversion);
1087 }
1088
1089 uint32_t
1090 proc_persona_id(proc_t p)
1091 {
1092 return (uint32_t)persona_id_from_proc(p);
1093 }
1094
1095 uint32_t
1096 proc_getuid(proc_t p)
1097 {
1098 return(p->p_uid);
1099 }
1100
1101 uint32_t
1102 proc_getgid(proc_t p)
1103 {
1104 return(p->p_gid);
1105 }
1106
1107 uint64_t
1108 proc_uniqueid(proc_t p)
1109 {
1110 return(p->p_uniqueid);
1111 }
1112
1113 uint64_t
1114 proc_puniqueid(proc_t p)
1115 {
1116 return(p->p_puniqueid);
1117 }
1118
1119 void
1120 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1121 {
1122 #if CONFIG_COALITIONS
1123 task_coalition_ids(p->task, ids);
1124 #else
1125 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
1126 #endif
1127 return;
1128 }
1129
1130 uint64_t
1131 proc_was_throttled(proc_t p)
1132 {
1133 return (p->was_throttled);
1134 }
1135
1136 uint64_t
1137 proc_did_throttle(proc_t p)
1138 {
1139 return (p->did_throttle);
1140 }
1141
1142 int
1143 proc_getcdhash(proc_t p, unsigned char *cdhash)
1144 {
1145 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1146 }
1147
1148 void
1149 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1150 {
1151 if (size >= sizeof(p->p_uuid)) {
1152 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1153 }
1154 }
1155
1156 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1157 vnode_t
1158 proc_getexecutablevnode(proc_t p)
1159 {
1160 vnode_t tvp = p->p_textvp;
1161
1162 if ( tvp != NULLVP) {
1163 if (vnode_getwithref(tvp) == 0) {
1164 return tvp;
1165 }
1166 }
1167
1168 return NULLVP;
1169 }
1170
1171
1172 void
1173 bsd_set_dependency_capable(task_t task)
1174 {
1175 proc_t p = get_bsdtask_info(task);
1176
1177 if (p) {
1178 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1179 }
1180 }
1181
1182
1183 #ifndef __arm__
1184 int
1185 IS_64BIT_PROCESS(proc_t p)
1186 {
1187 if (p && (p->p_flag & P_LP64))
1188 return(1);
1189 else
1190 return(0);
1191 }
1192 #endif
1193
1194 /*
1195 * Locate a process by number
1196 */
1197 proc_t
1198 pfind_locked(pid_t pid)
1199 {
1200 proc_t p;
1201 #if DEBUG
1202 proc_t q;
1203 #endif
1204
1205 if (!pid)
1206 return (kernproc);
1207
1208 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1209 if (p->p_pid == pid) {
1210 #if DEBUG
1211 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1212 if ((p !=q) && (q->p_pid == pid))
1213 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1214 }
1215 #endif
1216 return (p);
1217 }
1218 }
1219 return (NULL);
1220 }
1221
1222 /*
1223 * Locate a zombie by PID
1224 */
1225 __private_extern__ proc_t
1226 pzfind(pid_t pid)
1227 {
1228 proc_t p;
1229
1230
1231 proc_list_lock();
1232
1233 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1234 if (p->p_pid == pid)
1235 break;
1236
1237 proc_list_unlock();
1238
1239 return (p);
1240 }
1241
1242 /*
1243 * Locate a process group by number
1244 */
1245
1246 struct pgrp *
1247 pgfind(pid_t pgid)
1248 {
1249 struct pgrp * pgrp;
1250
1251 proc_list_lock();
1252 pgrp = pgfind_internal(pgid);
1253 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1254 pgrp = PGRP_NULL;
1255 else
1256 pgrp->pg_refcount++;
1257 proc_list_unlock();
1258 return(pgrp);
1259 }
1260
1261
1262
1263 struct pgrp *
1264 pgfind_internal(pid_t pgid)
1265 {
1266 struct pgrp *pgrp;
1267
1268 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1269 if (pgrp->pg_id == pgid)
1270 return (pgrp);
1271 return (NULL);
1272 }
1273
1274 void
1275 pg_rele(struct pgrp * pgrp)
1276 {
1277 if(pgrp == PGRP_NULL)
1278 return;
1279 pg_rele_dropref(pgrp);
1280 }
1281
1282 void
1283 pg_rele_dropref(struct pgrp * pgrp)
1284 {
1285 proc_list_lock();
1286 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1287 proc_list_unlock();
1288 pgdelete_dropref(pgrp);
1289 return;
1290 }
1291
1292 pgrp->pg_refcount--;
1293 proc_list_unlock();
1294 }
1295
1296 struct session *
1297 session_find_internal(pid_t sessid)
1298 {
1299 struct session *sess;
1300
1301 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1302 if (sess->s_sid == sessid)
1303 return (sess);
1304 return (NULL);
1305 }
1306
1307
1308 /*
1309 * Make a new process ready to become a useful member of society by making it
1310 * visible in all the right places and initialize its own lists to empty.
1311 *
1312 * Parameters: parent The parent of the process to insert
1313 * child The child process to insert
1314 *
1315 * Returns: (void)
1316 *
1317 * Notes: Insert a child process into the parents process group, assign
1318 * the child the parent process pointer and PPID of the parent,
1319 * place it on the parents p_children list as a sibling,
1320 * initialize its own child list, place it in the allproc list,
1321 * insert it in the proper hash bucket, and initialize its
1322 * event list.
1323 */
1324 void
1325 pinsertchild(proc_t parent, proc_t child)
1326 {
1327 struct pgrp * pg;
1328
1329 LIST_INIT(&child->p_children);
1330 TAILQ_INIT(&child->p_evlist);
1331 child->p_pptr = parent;
1332 child->p_ppid = parent->p_pid;
1333 child->p_puniqueid = parent->p_uniqueid;
1334 child->p_xhighbits = 0;
1335
1336 pg = proc_pgrp(parent);
1337 pgrp_add(pg, parent, child);
1338 pg_rele(pg);
1339
1340 proc_list_lock();
1341
1342 #if CONFIG_MEMORYSTATUS
1343 memorystatus_add(child, TRUE);
1344 #endif
1345
1346 parent->p_childrencnt++;
1347 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1348
1349 LIST_INSERT_HEAD(&allproc, child, p_list);
1350 /* mark the completion of proc creation */
1351 child->p_listflag &= ~P_LIST_INCREATE;
1352
1353 proc_list_unlock();
1354 }
1355
1356 /*
1357 * Move p to a new or existing process group (and session)
1358 *
1359 * Returns: 0 Success
1360 * ESRCH No such process
1361 */
1362 int
1363 enterpgrp(proc_t p, pid_t pgid, int mksess)
1364 {
1365 struct pgrp *pgrp;
1366 struct pgrp *mypgrp;
1367 struct session * procsp;
1368
1369 pgrp = pgfind(pgid);
1370 mypgrp = proc_pgrp(p);
1371 procsp = proc_session(p);
1372
1373 #if DIAGNOSTIC
1374 if (pgrp != NULL && mksess) /* firewalls */
1375 panic("enterpgrp: setsid into non-empty pgrp");
1376 if (SESS_LEADER(p, procsp))
1377 panic("enterpgrp: session leader attempted setpgrp");
1378 #endif
1379 if (pgrp == PGRP_NULL) {
1380 pid_t savepid = p->p_pid;
1381 proc_t np = PROC_NULL;
1382 /*
1383 * new process group
1384 */
1385 #if DIAGNOSTIC
1386 if (p->p_pid != pgid)
1387 panic("enterpgrp: new pgrp and pid != pgid");
1388 #endif
1389 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1390 M_WAITOK);
1391 if (pgrp == NULL)
1392 panic("enterpgrp: M_PGRP zone depleted");
1393 if ((np = proc_find(savepid)) == NULL || np != p) {
1394 if (np != PROC_NULL)
1395 proc_rele(np);
1396 if (mypgrp != PGRP_NULL)
1397 pg_rele(mypgrp);
1398 if (procsp != SESSION_NULL)
1399 session_rele(procsp);
1400 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1401 return (ESRCH);
1402 }
1403 proc_rele(np);
1404 if (mksess) {
1405 struct session *sess;
1406
1407 /*
1408 * new session
1409 */
1410 MALLOC_ZONE(sess, struct session *,
1411 sizeof(struct session), M_SESSION, M_WAITOK);
1412 if (sess == NULL)
1413 panic("enterpgrp: M_SESSION zone depleted");
1414 sess->s_leader = p;
1415 sess->s_sid = p->p_pid;
1416 sess->s_count = 1;
1417 sess->s_ttyvp = NULL;
1418 sess->s_ttyp = TTY_NULL;
1419 sess->s_flags = 0;
1420 sess->s_listflags = 0;
1421 sess->s_ttypgrpid = NO_PID;
1422 #if CONFIG_FINE_LOCK_GROUPS
1423 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1424 #else
1425 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1426 #endif
1427 bcopy(procsp->s_login, sess->s_login,
1428 sizeof(sess->s_login));
1429 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1430 proc_list_lock();
1431 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1432 proc_list_unlock();
1433 pgrp->pg_session = sess;
1434 #if DIAGNOSTIC
1435 if (p != current_proc())
1436 panic("enterpgrp: mksession and p != curproc");
1437 #endif
1438 } else {
1439 proc_list_lock();
1440 pgrp->pg_session = procsp;
1441
1442 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1443 panic("enterpgrp: providing ref to terminating session ");
1444 pgrp->pg_session->s_count++;
1445 proc_list_unlock();
1446 }
1447 pgrp->pg_id = pgid;
1448 #if CONFIG_FINE_LOCK_GROUPS
1449 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1450 #else
1451 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1452 #endif
1453 LIST_INIT(&pgrp->pg_members);
1454 pgrp->pg_membercnt = 0;
1455 pgrp->pg_jobc = 0;
1456 proc_list_lock();
1457 pgrp->pg_refcount = 1;
1458 pgrp->pg_listflags = 0;
1459 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1460 proc_list_unlock();
1461 } else if (pgrp == mypgrp) {
1462 pg_rele(pgrp);
1463 if (mypgrp != NULL)
1464 pg_rele(mypgrp);
1465 if (procsp != SESSION_NULL)
1466 session_rele(procsp);
1467 return (0);
1468 }
1469
1470 if (procsp != SESSION_NULL)
1471 session_rele(procsp);
1472 /*
1473 * Adjust eligibility of affected pgrps to participate in job control.
1474 * Increment eligibility counts before decrementing, otherwise we
1475 * could reach 0 spuriously during the first call.
1476 */
1477 fixjobc(p, pgrp, 1);
1478 fixjobc(p, mypgrp, 0);
1479
1480 if(mypgrp != PGRP_NULL)
1481 pg_rele(mypgrp);
1482 pgrp_replace(p, pgrp);
1483 pg_rele(pgrp);
1484
1485 return(0);
1486 }
1487
1488 /*
1489 * remove process from process group
1490 */
1491 int
1492 leavepgrp(proc_t p)
1493 {
1494
1495 pgrp_remove(p);
1496 return (0);
1497 }
1498
1499 /*
1500 * delete a process group
1501 */
1502 static void
1503 pgdelete_dropref(struct pgrp *pgrp)
1504 {
1505 struct tty *ttyp;
1506 int emptypgrp = 1;
1507 struct session *sessp;
1508
1509
1510 pgrp_lock(pgrp);
1511 if (pgrp->pg_membercnt != 0) {
1512 emptypgrp = 0;
1513 }
1514 pgrp_unlock(pgrp);
1515
1516 proc_list_lock();
1517 pgrp->pg_refcount--;
1518 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1519 proc_list_unlock();
1520 return;
1521 }
1522
1523 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1524
1525 if (pgrp->pg_refcount > 0) {
1526 proc_list_unlock();
1527 return;
1528 }
1529
1530 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1531 LIST_REMOVE(pgrp, pg_hash);
1532
1533 proc_list_unlock();
1534
1535 ttyp = SESSION_TP(pgrp->pg_session);
1536 if (ttyp != TTY_NULL) {
1537 if (ttyp->t_pgrp == pgrp) {
1538 tty_lock(ttyp);
1539 /* Re-check after acquiring the lock */
1540 if (ttyp->t_pgrp == pgrp) {
1541 ttyp->t_pgrp = NULL;
1542 pgrp->pg_session->s_ttypgrpid = NO_PID;
1543 }
1544 tty_unlock(ttyp);
1545 }
1546 }
1547
1548 proc_list_lock();
1549
1550 sessp = pgrp->pg_session;
1551 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1552 panic("pg_deleteref: manipulating refs of already terminating session");
1553 if (--sessp->s_count == 0) {
1554 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1555 panic("pg_deleteref: terminating already terminated session");
1556 sessp->s_listflags |= S_LIST_TERM;
1557 ttyp = SESSION_TP(sessp);
1558 LIST_REMOVE(sessp, s_hash);
1559 proc_list_unlock();
1560 if (ttyp != TTY_NULL) {
1561 tty_lock(ttyp);
1562 if (ttyp->t_session == sessp)
1563 ttyp->t_session = NULL;
1564 tty_unlock(ttyp);
1565 }
1566 proc_list_lock();
1567 sessp->s_listflags |= S_LIST_DEAD;
1568 if (sessp->s_count != 0)
1569 panic("pg_deleteref: freeing session in use");
1570 proc_list_unlock();
1571 #if CONFIG_FINE_LOCK_GROUPS
1572 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1573 #else
1574 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1575 #endif
1576 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1577 } else
1578 proc_list_unlock();
1579 #if CONFIG_FINE_LOCK_GROUPS
1580 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1581 #else
1582 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1583 #endif
1584 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1585 }
1586
1587
1588 /*
1589 * Adjust pgrp jobc counters when specified process changes process group.
1590 * We count the number of processes in each process group that "qualify"
1591 * the group for terminal job control (those with a parent in a different
1592 * process group of the same session). If that count reaches zero, the
1593 * process group becomes orphaned. Check both the specified process'
1594 * process group and that of its children.
1595 * entering == 0 => p is leaving specified group.
1596 * entering == 1 => p is entering specified group.
1597 */
1598 int
1599 fixjob_callback(proc_t p, void * arg)
1600 {
1601 struct fixjob_iterargs *fp;
1602 struct pgrp * pg, *hispg;
1603 struct session * mysession, *hissess;
1604 int entering;
1605
1606 fp = (struct fixjob_iterargs *)arg;
1607 pg = fp->pg;
1608 mysession = fp->mysession;
1609 entering = fp->entering;
1610
1611 hispg = proc_pgrp(p);
1612 hissess = proc_session(p);
1613
1614 if ((hispg != pg) &&
1615 (hissess == mysession)) {
1616 pgrp_lock(hispg);
1617 if (entering) {
1618 hispg->pg_jobc++;
1619 pgrp_unlock(hispg);
1620 } else if (--hispg->pg_jobc == 0) {
1621 pgrp_unlock(hispg);
1622 orphanpg(hispg);
1623 } else
1624 pgrp_unlock(hispg);
1625 }
1626 if (hissess != SESSION_NULL)
1627 session_rele(hissess);
1628 if (hispg != PGRP_NULL)
1629 pg_rele(hispg);
1630
1631 return(PROC_RETURNED);
1632 }
1633
1634 void
1635 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1636 {
1637 struct pgrp *hispgrp = PGRP_NULL;
1638 struct session *hissess = SESSION_NULL;
1639 struct session *mysession = pgrp->pg_session;
1640 proc_t parent;
1641 struct fixjob_iterargs fjarg;
1642 boolean_t proc_parent_self;
1643
1644 /*
1645 * Check if p's parent is current proc, if yes then no need to take
1646 * a ref; calling proc_parent with current proc as parent may
1647 * deadlock if current proc is exiting.
1648 */
1649 proc_parent_self = proc_parent_is_currentproc(p);
1650 if (proc_parent_self)
1651 parent = current_proc();
1652 else
1653 parent = proc_parent(p);
1654
1655 if (parent != PROC_NULL) {
1656 hispgrp = proc_pgrp(parent);
1657 hissess = proc_session(parent);
1658 if (!proc_parent_self)
1659 proc_rele(parent);
1660 }
1661
1662
1663 /*
1664 * Check p's parent to see whether p qualifies its own process
1665 * group; if so, adjust count for p's process group.
1666 */
1667 if ((hispgrp != pgrp) &&
1668 (hissess == mysession)) {
1669 pgrp_lock(pgrp);
1670 if (entering) {
1671 pgrp->pg_jobc++;
1672 pgrp_unlock(pgrp);
1673 }else if (--pgrp->pg_jobc == 0) {
1674 pgrp_unlock(pgrp);
1675 orphanpg(pgrp);
1676 } else
1677 pgrp_unlock(pgrp);
1678 }
1679
1680 if (hissess != SESSION_NULL)
1681 session_rele(hissess);
1682 if (hispgrp != PGRP_NULL)
1683 pg_rele(hispgrp);
1684
1685 /*
1686 * Check this process' children to see whether they qualify
1687 * their process groups; if so, adjust counts for children's
1688 * process groups.
1689 */
1690 fjarg.pg = pgrp;
1691 fjarg.mysession = mysession;
1692 fjarg.entering = entering;
1693 proc_childrenwalk(p, fixjob_callback, &fjarg);
1694 }
1695
1696 /*
1697 * A process group has become orphaned; if there are any stopped processes in
1698 * the group, hang-up all process in that group.
1699 */
1700 static void
1701 orphanpg(struct pgrp *pgrp)
1702 {
1703 pid_t *pid_list;
1704 proc_t p;
1705 vm_size_t pid_list_size = 0;
1706 vm_size_t pid_list_size_needed = 0;
1707 int pid_count = 0;
1708 int pid_count_available = 0;
1709
1710 assert(pgrp != NULL);
1711
1712 /* allocate outside of the pgrp_lock */
1713 for (;;) {
1714 pgrp_lock(pgrp);
1715
1716 boolean_t should_iterate = FALSE;
1717 pid_count_available = 0;
1718
1719 PGMEMBERS_FOREACH(pgrp, p) {
1720 pid_count_available++;
1721
1722 if (p->p_stat == SSTOP) {
1723 should_iterate = TRUE;
1724 }
1725 }
1726
1727 if (pid_count_available == 0 || !should_iterate) {
1728 pgrp_unlock(pgrp);
1729 return;
1730 }
1731
1732 pid_list_size_needed = pid_count_available * sizeof(pid_t);
1733 if (pid_list_size >= pid_list_size_needed) {
1734 break;
1735 }
1736 pgrp_unlock(pgrp);
1737
1738 if (pid_list_size != 0) {
1739 kfree(pid_list, pid_list_size);
1740 }
1741 pid_list = kalloc(pid_list_size_needed);
1742 if (!pid_list) {
1743 return;
1744 }
1745 pid_list_size = pid_list_size_needed;
1746 }
1747
1748 /* no orphaned processes */
1749 if (pid_list_size == 0) {
1750 pgrp_unlock(pgrp);
1751 return;
1752 }
1753
1754 PGMEMBERS_FOREACH(pgrp, p) {
1755 pid_list[pid_count++] = proc_pid(p);
1756 if (pid_count >= pid_count_available) {
1757 break;
1758 }
1759 }
1760 pgrp_unlock(pgrp);
1761
1762 if (pid_count == 0) {
1763 goto out;
1764 }
1765
1766 for (int i = 0; i < pid_count; i++) {
1767 /* do not handle kernproc */
1768 if (pid_list[i] == 0) {
1769 continue;
1770 }
1771 p = proc_find(pid_list[i]);
1772 if (!p) {
1773 continue;
1774 }
1775
1776 proc_transwait(p, 0);
1777 pt_setrunnable(p);
1778 psignal(p, SIGHUP);
1779 psignal(p, SIGCONT);
1780 proc_rele(p);
1781 }
1782
1783 out:
1784 kfree(pid_list, pid_list_size);
1785 return;
1786 }
1787
1788 int
1789 proc_is_classic(proc_t p __unused)
1790 {
1791 return (0);
1792 }
1793
1794 /* XXX Why does this function exist? Need to kill it off... */
1795 proc_t
1796 current_proc_EXTERNAL(void)
1797 {
1798 return (current_proc());
1799 }
1800
1801 int
1802 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1803 {
1804 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1805 }
1806
1807 #if CONFIG_COREDUMP
1808 /*
1809 * proc_core_name(name, uid, pid)
1810 * Expand the name described in corefilename, using name, uid, and pid.
1811 * corefilename is a printf-like string, with three format specifiers:
1812 * %N name of process ("name")
1813 * %P process id (pid)
1814 * %U user id (uid)
1815 * For example, "%N.core" is the default; they can be disabled completely
1816 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1817 * This is controlled by the sysctl variable kern.corefile (see above).
1818 */
1819 __private_extern__ int
1820 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1821 size_t cf_name_len)
1822 {
1823 const char *format, *appendstr;
1824 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1825 size_t i, l, n;
1826
1827 if (cf_name == NULL)
1828 goto toolong;
1829
1830 format = corefilename;
1831 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1832 switch (format[i]) {
1833 case '%': /* Format character */
1834 i++;
1835 switch (format[i]) {
1836 case '%':
1837 appendstr = "%";
1838 break;
1839 case 'N': /* process name */
1840 appendstr = name;
1841 break;
1842 case 'P': /* process id */
1843 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1844 appendstr = id_buf;
1845 break;
1846 case 'U': /* user id */
1847 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1848 appendstr = id_buf;
1849 break;
1850 case '\0': /* format string ended in % symbol */
1851 goto endofstring;
1852 default:
1853 appendstr = "";
1854 log(LOG_ERR,
1855 "Unknown format character %c in `%s'\n",
1856 format[i], format);
1857 }
1858 l = strlen(appendstr);
1859 if ((n + l) >= cf_name_len)
1860 goto toolong;
1861 bcopy(appendstr, cf_name + n, l);
1862 n += l;
1863 break;
1864 default:
1865 cf_name[n++] = format[i];
1866 }
1867 }
1868 if (format[i] != '\0')
1869 goto toolong;
1870 return (0);
1871 toolong:
1872 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1873 (long)pid, name, (uint32_t)uid);
1874 return (1);
1875 endofstring:
1876 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
1877 (long)pid, name, (uint32_t)uid);
1878 return (1);
1879 }
1880 #endif /* CONFIG_COREDUMP */
1881
1882 /* Code Signing related routines */
1883
1884 int
1885 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1886 {
1887 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1888 uap->usersize, USER_ADDR_NULL));
1889 }
1890
1891 int
1892 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1893 {
1894 if (uap->uaudittoken == USER_ADDR_NULL)
1895 return(EINVAL);
1896 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1897 uap->usersize, uap->uaudittoken));
1898 }
1899
1900 static int
1901 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1902 {
1903 char fakeheader[8] = { 0 };
1904 int error;
1905
1906 if (usize < sizeof(fakeheader))
1907 return ERANGE;
1908
1909 /* if no blob, fill in zero header */
1910 if (NULL == start) {
1911 start = fakeheader;
1912 length = sizeof(fakeheader);
1913 } else if (usize < length) {
1914 /* ... if input too short, copy out length of entitlement */
1915 uint32_t length32 = htonl((uint32_t)length);
1916 memcpy(&fakeheader[4], &length32, sizeof(length32));
1917
1918 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1919 if (error == 0)
1920 return ERANGE; /* input buffer to short, ERANGE signals that */
1921 return error;
1922 }
1923 return copyout(start, uaddr, length);
1924 }
1925
1926 static int
1927 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1928 {
1929 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1930 proc_t pt;
1931 int forself;
1932 int error;
1933 vnode_t tvp;
1934 off_t toff;
1935 unsigned char cdhash[SHA1_RESULTLEN];
1936 audit_token_t token;
1937 unsigned int upid=0, uidversion = 0;
1938
1939 forself = error = 0;
1940
1941 if (pid == 0)
1942 pid = proc_selfpid();
1943 if (pid == proc_selfpid())
1944 forself = 1;
1945
1946
1947 switch (ops) {
1948 case CS_OPS_STATUS:
1949 case CS_OPS_CDHASH:
1950 case CS_OPS_PIDOFFSET:
1951 case CS_OPS_ENTITLEMENTS_BLOB:
1952 case CS_OPS_IDENTITY:
1953 case CS_OPS_BLOB:
1954 break; /* not restricted to root */
1955 default:
1956 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1957 return(EPERM);
1958 break;
1959 }
1960
1961 pt = proc_find(pid);
1962 if (pt == PROC_NULL)
1963 return(ESRCH);
1964
1965 upid = pt->p_pid;
1966 uidversion = pt->p_idversion;
1967 if (uaudittoken != USER_ADDR_NULL) {
1968
1969 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1970 if (error != 0)
1971 goto out;
1972 /* verify the audit token pid/idversion matches with proc */
1973 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1974 error = ESRCH;
1975 goto out;
1976 }
1977 }
1978
1979 #if CONFIG_MACF
1980 switch (ops) {
1981 case CS_OPS_MARKINVALID:
1982 case CS_OPS_MARKHARD:
1983 case CS_OPS_MARKKILL:
1984 case CS_OPS_MARKRESTRICT:
1985 case CS_OPS_SET_STATUS:
1986 case CS_OPS_CLEARINSTALLER:
1987 case CS_OPS_CLEARPLATFORM:
1988 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
1989 goto out;
1990 break;
1991 default:
1992 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
1993 goto out;
1994 }
1995 #endif
1996
1997 switch (ops) {
1998
1999 case CS_OPS_STATUS: {
2000 uint32_t retflags;
2001
2002 proc_lock(pt);
2003 retflags = pt->p_csflags;
2004 if (cs_enforcement(pt))
2005 retflags |= CS_ENFORCEMENT;
2006 if (csproc_get_platform_binary(pt))
2007 retflags |= CS_PLATFORM_BINARY;
2008 if (csproc_get_platform_path(pt))
2009 retflags |= CS_PLATFORM_PATH;
2010 proc_unlock(pt);
2011
2012 if (uaddr != USER_ADDR_NULL)
2013 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2014 break;
2015 }
2016 case CS_OPS_MARKINVALID:
2017 proc_lock(pt);
2018 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2019 pt->p_csflags &= ~CS_VALID; /* set invalid */
2020 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2021 pt->p_csflags |= CS_KILLED;
2022 proc_unlock(pt);
2023 if (cs_debug) {
2024 printf("CODE SIGNING: marked invalid by pid %d: "
2025 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2026 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2027 }
2028 psignal(pt, SIGKILL);
2029 } else
2030 proc_unlock(pt);
2031 } else
2032 proc_unlock(pt);
2033
2034 break;
2035
2036 case CS_OPS_MARKHARD:
2037 proc_lock(pt);
2038 pt->p_csflags |= CS_HARD;
2039 if ((pt->p_csflags & CS_VALID) == 0) {
2040 /* @@@ allow? reject? kill? @@@ */
2041 proc_unlock(pt);
2042 error = EINVAL;
2043 goto out;
2044 } else
2045 proc_unlock(pt);
2046 break;
2047
2048 case CS_OPS_MARKKILL:
2049 proc_lock(pt);
2050 pt->p_csflags |= CS_KILL;
2051 if ((pt->p_csflags & CS_VALID) == 0) {
2052 proc_unlock(pt);
2053 psignal(pt, SIGKILL);
2054 } else
2055 proc_unlock(pt);
2056 break;
2057
2058 case CS_OPS_PIDOFFSET:
2059 toff = pt->p_textoff;
2060 proc_rele(pt);
2061 error = copyout(&toff, uaddr, sizeof(toff));
2062 return(error);
2063
2064 case CS_OPS_CDHASH:
2065
2066 /* pt already holds a reference on its p_textvp */
2067 tvp = pt->p_textvp;
2068 toff = pt->p_textoff;
2069
2070 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2071 proc_rele(pt);
2072 return EINVAL;
2073 }
2074
2075 error = vn_getcdhash(tvp, toff, cdhash);
2076 proc_rele(pt);
2077
2078 if (error == 0) {
2079 error = copyout(cdhash, uaddr, sizeof (cdhash));
2080 }
2081
2082 return error;
2083
2084 case CS_OPS_ENTITLEMENTS_BLOB: {
2085 void *start;
2086 size_t length;
2087
2088 proc_lock(pt);
2089
2090 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2091 proc_unlock(pt);
2092 error = EINVAL;
2093 break;
2094 }
2095
2096 error = cs_entitlements_blob_get(pt, &start, &length);
2097 proc_unlock(pt);
2098 if (error)
2099 break;
2100
2101 error = csops_copy_token(start, length, usize, uaddr);
2102 break;
2103 }
2104 case CS_OPS_MARKRESTRICT:
2105 proc_lock(pt);
2106 pt->p_csflags |= CS_RESTRICT;
2107 proc_unlock(pt);
2108 break;
2109
2110 case CS_OPS_SET_STATUS: {
2111 uint32_t flags;
2112
2113 if (usize < sizeof(flags)) {
2114 error = ERANGE;
2115 break;
2116 }
2117
2118 error = copyin(uaddr, &flags, sizeof(flags));
2119 if (error)
2120 break;
2121
2122 /* only allow setting a subset of all code sign flags */
2123 flags &=
2124 CS_HARD | CS_EXEC_SET_HARD |
2125 CS_KILL | CS_EXEC_SET_KILL |
2126 CS_RESTRICT |
2127 CS_REQUIRE_LV |
2128 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2129
2130 proc_lock(pt);
2131 if (pt->p_csflags & CS_VALID)
2132 pt->p_csflags |= flags;
2133 else
2134 error = EINVAL;
2135 proc_unlock(pt);
2136
2137 break;
2138 }
2139 case CS_OPS_BLOB: {
2140 void *start;
2141 size_t length;
2142
2143 proc_lock(pt);
2144 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2145 proc_unlock(pt);
2146 error = EINVAL;
2147 break;
2148 }
2149
2150 error = cs_blob_get(pt, &start, &length);
2151 proc_unlock(pt);
2152 if (error)
2153 break;
2154
2155 error = csops_copy_token(start, length, usize, uaddr);
2156 break;
2157 }
2158 case CS_OPS_IDENTITY: {
2159 const char *identity;
2160 uint8_t fakeheader[8];
2161 uint32_t idlen;
2162 size_t length;
2163
2164 /*
2165 * Make identity have a blob header to make it
2166 * easier on userland to guess the identity
2167 * length.
2168 */
2169 if (usize < sizeof(fakeheader)) {
2170 error = ERANGE;
2171 break;
2172 }
2173 memset(fakeheader, 0, sizeof(fakeheader));
2174
2175 proc_lock(pt);
2176 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2177 proc_unlock(pt);
2178 error = EINVAL;
2179 break;
2180 }
2181
2182 identity = cs_identity_get(pt);
2183 proc_unlock(pt);
2184 if (identity == NULL) {
2185 error = ENOENT;
2186 break;
2187 }
2188
2189 length = strlen(identity) + 1; /* include NUL */
2190 idlen = htonl(length + sizeof(fakeheader));
2191 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2192
2193 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2194 if (error)
2195 break;
2196
2197 if (usize < sizeof(fakeheader) + length)
2198 error = ERANGE;
2199 else if (usize > sizeof(fakeheader))
2200 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2201
2202 break;
2203 }
2204
2205 case CS_OPS_CLEARINSTALLER:
2206 proc_lock(pt);
2207 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2208 proc_unlock(pt);
2209 break;
2210
2211 case CS_OPS_CLEARPLATFORM:
2212 #if DEVELOPMENT || DEBUG
2213 if (cs_enforcement_enable) {
2214 error = ENOTSUP;
2215 break;
2216 }
2217
2218 #if CONFIG_CSR
2219 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2220 error = ENOTSUP;
2221 break;
2222 }
2223 #endif
2224
2225 proc_lock(pt);
2226 pt->p_csflags &= ~(CS_PLATFORM_BINARY|CS_PLATFORM_PATH);
2227 csproc_clear_platform_binary(pt);
2228 proc_unlock(pt);
2229 break;
2230 #else
2231 error = ENOTSUP;
2232 break;
2233 #endif /* !DEVELOPMENT || DEBUG */
2234
2235 default:
2236 error = EINVAL;
2237 break;
2238 }
2239 out:
2240 proc_rele(pt);
2241 return(error);
2242 }
2243
2244 int
2245 proc_iterate(
2246 unsigned int flags,
2247 proc_iterate_fn_t callout,
2248 void *arg,
2249 proc_iterate_fn_t filterfn,
2250 void *filterarg)
2251 {
2252 pid_t *pid_list;
2253 vm_size_t pid_list_size = 0;
2254 vm_size_t pid_list_size_needed = 0;
2255 int pid_count = 0;
2256 int pid_count_available = 0;
2257
2258 assert(callout != NULL);
2259
2260 /* allocate outside of the proc_list_lock */
2261 for (;;) {
2262 proc_list_lock();
2263
2264 pid_count_available = nprocs + 1; //kernel_task is not counted in nprocs
2265 assert(pid_count_available > 0);
2266
2267 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2268 if (pid_list_size >= pid_list_size_needed) {
2269 break;
2270 }
2271 proc_list_unlock();
2272
2273 if (pid_list_size != 0) {
2274 kfree(pid_list, pid_list_size);
2275 }
2276 pid_list = kalloc(pid_list_size_needed);
2277 if (!pid_list) {
2278 return 1;
2279 }
2280 pid_list_size = pid_list_size_needed;
2281 }
2282
2283 /* filter pids into pid_list */
2284
2285 if (flags & PROC_ALLPROCLIST) {
2286 proc_t p;
2287 ALLPROC_FOREACH(p) {
2288 /* ignore processes that are being forked */
2289 if (p->p_stat == SIDL) {
2290 continue;
2291 }
2292 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2293 continue;
2294 }
2295
2296 pid_list[pid_count++] = proc_pid(p);
2297 if (pid_count >= pid_count_available) {
2298 break;
2299 }
2300 }
2301 }
2302
2303 if ((pid_count < pid_count_available) &&
2304 (flags & PROC_ZOMBPROCLIST))
2305 {
2306 proc_t p;
2307 ZOMBPROC_FOREACH(p) {
2308 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2309 continue;
2310 }
2311
2312 pid_list[pid_count++] = proc_pid(p);
2313 if (pid_count >= pid_count_available) {
2314 break;
2315 }
2316 }
2317 }
2318
2319 proc_list_unlock();
2320
2321 /* call callout on processes in the pid_list */
2322
2323 for (int i = 0; i < pid_count; i++) {
2324 proc_t p = proc_find(pid_list[i]);
2325 if (p) {
2326 if ((flags & PROC_NOWAITTRANS) == 0) {
2327 proc_transwait(p, 0);
2328 }
2329 int callout_ret = callout(p, arg);
2330
2331 switch (callout_ret) {
2332 case PROC_RETURNED_DONE:
2333 proc_rele(p);
2334 /* FALLTHROUGH */
2335 case PROC_CLAIMED_DONE:
2336 goto out;
2337
2338 case PROC_RETURNED:
2339 proc_rele(p);
2340 /* FALLTHROUGH */
2341 case PROC_CLAIMED:
2342 break;
2343
2344 default:
2345 panic("proc_iterate: callout returned %d for pid %d",
2346 callout_ret, pid_list[i]);
2347 break;
2348 }
2349 } else if (flags & PROC_ZOMBPROCLIST) {
2350 p = proc_find_zombref(pid_list[i]);
2351 if (!p) {
2352 continue;
2353 }
2354 int callout_ret = callout(p, arg);
2355
2356 switch (callout_ret) {
2357 case PROC_RETURNED_DONE:
2358 proc_drop_zombref(p);
2359 /* FALLTHROUGH */
2360 case PROC_CLAIMED_DONE:
2361 goto out;
2362
2363 case PROC_RETURNED:
2364 proc_drop_zombref(p);
2365 /* FALLTHROUGH */
2366 case PROC_CLAIMED:
2367 break;
2368
2369 default:
2370 panic("proc_iterate: callout returned %d for zombie pid %d",
2371 callout_ret, pid_list[i]);
2372 break;
2373 }
2374 }
2375 }
2376
2377 out:
2378 kfree(pid_list, pid_list_size);
2379 return 0;
2380
2381 }
2382
2383 void
2384 proc_rebootscan(
2385 proc_iterate_fn_t callout,
2386 void *arg,
2387 proc_iterate_fn_t filterfn,
2388 void *filterarg)
2389 {
2390 proc_t p;
2391
2392 assert(callout != NULL);
2393
2394 proc_shutdown_exitcount = 0;
2395
2396 restart_foreach:
2397
2398 proc_list_lock();
2399
2400 ALLPROC_FOREACH(p) {
2401 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2402 continue;
2403 }
2404 p = proc_ref_locked(p);
2405 if (!p) {
2406 continue;
2407 }
2408
2409 proc_list_unlock();
2410
2411 proc_transwait(p, 0);
2412 (void)callout(p, arg);
2413 proc_rele(p);
2414
2415 goto restart_foreach;
2416 }
2417
2418 proc_list_unlock();
2419 }
2420
2421 int
2422 proc_childrenwalk(
2423 proc_t parent,
2424 proc_iterate_fn_t callout,
2425 void *arg)
2426 {
2427 pid_t *pid_list;
2428 vm_size_t pid_list_size = 0;
2429 vm_size_t pid_list_size_needed = 0;
2430 int pid_count = 0;
2431 int pid_count_available = 0;
2432
2433 assert(parent != NULL);
2434 assert(callout != NULL);
2435
2436 for (;;) {
2437 proc_list_lock();
2438
2439 pid_count_available = parent->p_childrencnt;
2440 if (pid_count_available == 0) {
2441 proc_list_unlock();
2442 return 0;
2443 }
2444
2445 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2446 if (pid_list_size >= pid_list_size_needed) {
2447 break;
2448 }
2449 proc_list_unlock();
2450
2451 if (pid_list_size != 0) {
2452 kfree(pid_list, pid_list_size);
2453 }
2454 pid_list = kalloc(pid_list_size_needed);
2455 if (!pid_list) {
2456 return 1;
2457 }
2458 pid_list_size = pid_list_size_needed;
2459 }
2460
2461 proc_t p;
2462 PCHILDREN_FOREACH(parent, p) {
2463 if (p->p_stat == SIDL) {
2464 continue;
2465 }
2466
2467 pid_list[pid_count++] = proc_pid(p);
2468 if (pid_count >= pid_count_available) {
2469 break;
2470 }
2471 }
2472
2473 proc_list_unlock();
2474
2475 for (int i = 0; i < pid_count; i++) {
2476 p = proc_find(pid_list[i]);
2477 if (!p) {
2478 continue;
2479 }
2480
2481 int callout_ret = callout(p, arg);
2482
2483 switch (callout_ret) {
2484 case PROC_RETURNED_DONE:
2485 proc_rele(p);
2486 /* FALLTHROUGH */
2487 case PROC_CLAIMED_DONE:
2488 goto out;
2489
2490 case PROC_RETURNED:
2491 proc_rele(p);
2492 /* FALLTHROUGH */
2493 case PROC_CLAIMED:
2494 break;
2495 default:
2496 panic("proc_childrenwalk: callout returned %d for pid %d",
2497 callout_ret, pid_list[i]);
2498 break;
2499 }
2500 }
2501
2502 out:
2503 kfree(pid_list, pid_list_size);
2504 return 0;
2505 }
2506
2507 int
2508 pgrp_iterate(
2509 struct pgrp *pgrp,
2510 unsigned int flags,
2511 proc_iterate_fn_t callout,
2512 void * arg,
2513 proc_iterate_fn_t filterfn,
2514 void * filterarg)
2515 {
2516 pid_t *pid_list;
2517 proc_t p;
2518 vm_size_t pid_list_size = 0;
2519 vm_size_t pid_list_size_needed = 0;
2520 int pid_count = 0;
2521 int pid_count_available = 0;
2522
2523 pid_t pgid;
2524
2525 assert(pgrp != NULL);
2526 assert(callout != NULL);
2527
2528 for (;;) {
2529 pgrp_lock(pgrp);
2530
2531 pid_count_available = pgrp->pg_membercnt;
2532 if (pid_count_available == 0) {
2533 pgrp_unlock(pgrp);
2534 return 0;
2535 }
2536
2537 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2538 if (pid_list_size >= pid_list_size_needed) {
2539 break;
2540 }
2541 pgrp_unlock(pgrp);
2542
2543 if (pid_list_size != 0) {
2544 kfree(pid_list, pid_list_size);
2545 }
2546 pid_list = kalloc(pid_list_size_needed);
2547 if (!pid_list) {
2548 return 1;
2549 }
2550 pid_list_size = pid_list_size_needed;
2551 }
2552
2553 pgid = pgrp->pg_id;
2554
2555 PGMEMBERS_FOREACH(pgrp, p) {
2556 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2557 continue;;
2558 }
2559 pid_list[pid_count++] = proc_pid(p);
2560 if (pid_count >= pid_count_available) {
2561 break;
2562 }
2563 }
2564
2565 pgrp_unlock(pgrp);
2566
2567 if (flags & PGRP_DROPREF) {
2568 pg_rele(pgrp);
2569 }
2570
2571 for (int i = 0; i< pid_count; i++) {
2572 /* do not handle kernproc */
2573 if (pid_list[i] == 0) {
2574 continue;
2575 }
2576 p = proc_find(pid_list[i]);
2577 if (!p) {
2578 continue;
2579 }
2580 if (p->p_pgrpid != pgid) {
2581 proc_rele(p);
2582 continue;
2583 }
2584
2585 int callout_ret = callout(p, arg);
2586
2587 switch (callout_ret) {
2588 case PROC_RETURNED:
2589 proc_rele(p);
2590 /* FALLTHROUGH */
2591 case PROC_CLAIMED:
2592 break;
2593
2594 case PROC_RETURNED_DONE:
2595 proc_rele(p);
2596 /* FALLTHROUGH */
2597 case PROC_CLAIMED_DONE:
2598 goto out;
2599
2600 default:
2601 panic("pgrp_iterate: callout returned %d for pid %d",
2602 callout_ret, pid_list[i]);
2603 }
2604 }
2605
2606 out:
2607 kfree(pid_list, pid_list_size);
2608 return 0;
2609 }
2610
2611 static void
2612 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2613 {
2614 proc_list_lock();
2615 child->p_pgrp = pgrp;
2616 child->p_pgrpid = pgrp->pg_id;
2617 child->p_listflag |= P_LIST_INPGRP;
2618 /*
2619 * When pgrp is being freed , a process can still
2620 * request addition using setpgid from bash when
2621 * login is terminated (login cycler) return ESRCH
2622 * Safe to hold lock due to refcount on pgrp
2623 */
2624 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2625 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2626 }
2627
2628 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2629 panic("pgrp_add : pgrp is dead adding process");
2630 proc_list_unlock();
2631
2632 pgrp_lock(pgrp);
2633 pgrp->pg_membercnt++;
2634 if ( parent != PROC_NULL) {
2635 LIST_INSERT_AFTER(parent, child, p_pglist);
2636 }else {
2637 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2638 }
2639 pgrp_unlock(pgrp);
2640
2641 proc_list_lock();
2642 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2643 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2644 }
2645 proc_list_unlock();
2646 }
2647
2648 static void
2649 pgrp_remove(struct proc * p)
2650 {
2651 struct pgrp * pg;
2652
2653 pg = proc_pgrp(p);
2654
2655 proc_list_lock();
2656 #if __PROC_INTERNAL_DEBUG
2657 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2658 panic("removing from pglist but no named ref\n");
2659 #endif
2660 p->p_pgrpid = PGRPID_DEAD;
2661 p->p_listflag &= ~P_LIST_INPGRP;
2662 p->p_pgrp = NULL;
2663 proc_list_unlock();
2664
2665 if (pg == PGRP_NULL)
2666 panic("pgrp_remove: pg is NULL");
2667 pgrp_lock(pg);
2668 pg->pg_membercnt--;
2669
2670 if (pg->pg_membercnt < 0)
2671 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2672
2673 LIST_REMOVE(p, p_pglist);
2674 if (pg->pg_members.lh_first == 0) {
2675 pgrp_unlock(pg);
2676 pgdelete_dropref(pg);
2677 } else {
2678 pgrp_unlock(pg);
2679 pg_rele(pg);
2680 }
2681 }
2682
2683
2684 /* cannot use proc_pgrp as it maybe stalled */
2685 static void
2686 pgrp_replace(struct proc * p, struct pgrp * newpg)
2687 {
2688 struct pgrp * oldpg;
2689
2690
2691
2692 proc_list_lock();
2693
2694 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2695 p->p_listflag |= P_LIST_PGRPTRWAIT;
2696 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2697 }
2698
2699 p->p_listflag |= P_LIST_PGRPTRANS;
2700
2701 oldpg = p->p_pgrp;
2702 if (oldpg == PGRP_NULL)
2703 panic("pgrp_replace: oldpg NULL");
2704 oldpg->pg_refcount++;
2705 #if __PROC_INTERNAL_DEBUG
2706 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2707 panic("removing from pglist but no named ref\n");
2708 #endif
2709 p->p_pgrpid = PGRPID_DEAD;
2710 p->p_listflag &= ~P_LIST_INPGRP;
2711 p->p_pgrp = NULL;
2712
2713 proc_list_unlock();
2714
2715 pgrp_lock(oldpg);
2716 oldpg->pg_membercnt--;
2717 if (oldpg->pg_membercnt < 0)
2718 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2719 LIST_REMOVE(p, p_pglist);
2720 if (oldpg->pg_members.lh_first == 0) {
2721 pgrp_unlock(oldpg);
2722 pgdelete_dropref(oldpg);
2723 } else {
2724 pgrp_unlock(oldpg);
2725 pg_rele(oldpg);
2726 }
2727
2728 proc_list_lock();
2729 p->p_pgrp = newpg;
2730 p->p_pgrpid = newpg->pg_id;
2731 p->p_listflag |= P_LIST_INPGRP;
2732 /*
2733 * When pgrp is being freed , a process can still
2734 * request addition using setpgid from bash when
2735 * login is terminated (login cycler) return ESRCH
2736 * Safe to hold lock due to refcount on pgrp
2737 */
2738 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2739 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2740 }
2741
2742 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2743 panic("pgrp_add : pgrp is dead adding process");
2744 proc_list_unlock();
2745
2746 pgrp_lock(newpg);
2747 newpg->pg_membercnt++;
2748 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2749 pgrp_unlock(newpg);
2750
2751 proc_list_lock();
2752 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2753 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2754 }
2755
2756 p->p_listflag &= ~P_LIST_PGRPTRANS;
2757 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2758 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2759 wakeup(&p->p_pgrpid);
2760
2761 }
2762 proc_list_unlock();
2763 }
2764
2765 void
2766 pgrp_lock(struct pgrp * pgrp)
2767 {
2768 lck_mtx_lock(&pgrp->pg_mlock);
2769 }
2770
2771 void
2772 pgrp_unlock(struct pgrp * pgrp)
2773 {
2774 lck_mtx_unlock(&pgrp->pg_mlock);
2775 }
2776
2777 void
2778 session_lock(struct session * sess)
2779 {
2780 lck_mtx_lock(&sess->s_mlock);
2781 }
2782
2783
2784 void
2785 session_unlock(struct session * sess)
2786 {
2787 lck_mtx_unlock(&sess->s_mlock);
2788 }
2789
2790 struct pgrp *
2791 proc_pgrp(proc_t p)
2792 {
2793 struct pgrp * pgrp;
2794
2795 if (p == PROC_NULL)
2796 return(PGRP_NULL);
2797 proc_list_lock();
2798
2799 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2800 p->p_listflag |= P_LIST_PGRPTRWAIT;
2801 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2802 }
2803
2804 pgrp = p->p_pgrp;
2805
2806 assert(pgrp != NULL);
2807
2808 if (pgrp != PGRP_NULL) {
2809 pgrp->pg_refcount++;
2810 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2811 panic("proc_pgrp: ref being povided for dead pgrp");
2812 }
2813
2814 proc_list_unlock();
2815
2816 return(pgrp);
2817 }
2818
2819 struct pgrp *
2820 tty_pgrp(struct tty * tp)
2821 {
2822 struct pgrp * pg = PGRP_NULL;
2823
2824 proc_list_lock();
2825 pg = tp->t_pgrp;
2826
2827 if (pg != PGRP_NULL) {
2828 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2829 panic("tty_pgrp: ref being povided for dead pgrp");
2830 pg->pg_refcount++;
2831 }
2832 proc_list_unlock();
2833
2834 return(pg);
2835 }
2836
2837 struct session *
2838 proc_session(proc_t p)
2839 {
2840 struct session * sess = SESSION_NULL;
2841
2842 if (p == PROC_NULL)
2843 return(SESSION_NULL);
2844
2845 proc_list_lock();
2846
2847 /* wait during transitions */
2848 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2849 p->p_listflag |= P_LIST_PGRPTRWAIT;
2850 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2851 }
2852
2853 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2854 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2855 panic("proc_session:returning sesssion ref on terminating session");
2856 sess->s_count++;
2857 }
2858 proc_list_unlock();
2859 return(sess);
2860 }
2861
2862 void
2863 session_rele(struct session *sess)
2864 {
2865 proc_list_lock();
2866 if (--sess->s_count == 0) {
2867 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2868 panic("session_rele: terminating already terminated session");
2869 sess->s_listflags |= S_LIST_TERM;
2870 LIST_REMOVE(sess, s_hash);
2871 sess->s_listflags |= S_LIST_DEAD;
2872 if (sess->s_count != 0)
2873 panic("session_rele: freeing session in use");
2874 proc_list_unlock();
2875 #if CONFIG_FINE_LOCK_GROUPS
2876 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2877 #else
2878 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2879 #endif
2880 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2881 } else
2882 proc_list_unlock();
2883 }
2884
2885 int
2886 proc_transstart(proc_t p, int locked, int non_blocking)
2887 {
2888 if (locked == 0)
2889 proc_lock(p);
2890 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2891 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2892 if (locked == 0)
2893 proc_unlock(p);
2894 return EDEADLK;
2895 }
2896 p->p_lflag |= P_LTRANSWAIT;
2897 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2898 }
2899 p->p_lflag |= P_LINTRANSIT;
2900 p->p_transholder = current_thread();
2901 if (locked == 0)
2902 proc_unlock(p);
2903 return 0;
2904 }
2905
2906 void
2907 proc_transcommit(proc_t p, int locked)
2908 {
2909 if (locked == 0)
2910 proc_lock(p);
2911
2912 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2913 assert (p->p_transholder == current_thread());
2914 p->p_lflag |= P_LTRANSCOMMIT;
2915
2916 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2917 p->p_lflag &= ~P_LTRANSWAIT;
2918 wakeup(&p->p_lflag);
2919 }
2920 if (locked == 0)
2921 proc_unlock(p);
2922 }
2923
2924 void
2925 proc_transend(proc_t p, int locked)
2926 {
2927 if (locked == 0)
2928 proc_lock(p);
2929
2930 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2931 p->p_transholder = NULL;
2932
2933 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2934 p->p_lflag &= ~P_LTRANSWAIT;
2935 wakeup(&p->p_lflag);
2936 }
2937 if (locked == 0)
2938 proc_unlock(p);
2939 }
2940
2941 int
2942 proc_transwait(proc_t p, int locked)
2943 {
2944 if (locked == 0)
2945 proc_lock(p);
2946 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2947 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2948 if (locked == 0)
2949 proc_unlock(p);
2950 return EDEADLK;
2951 }
2952 p->p_lflag |= P_LTRANSWAIT;
2953 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2954 }
2955 if (locked == 0)
2956 proc_unlock(p);
2957 return 0;
2958 }
2959
2960 void
2961 proc_klist_lock(void)
2962 {
2963 lck_mtx_lock(proc_klist_mlock);
2964 }
2965
2966 void
2967 proc_klist_unlock(void)
2968 {
2969 lck_mtx_unlock(proc_klist_mlock);
2970 }
2971
2972 void
2973 proc_knote(struct proc * p, long hint)
2974 {
2975 proc_klist_lock();
2976 KNOTE(&p->p_klist, hint);
2977 proc_klist_unlock();
2978 }
2979
2980 void
2981 proc_knote_drain(struct proc *p)
2982 {
2983 struct knote *kn = NULL;
2984
2985 /*
2986 * Clear the proc's klist to avoid references after the proc is reaped.
2987 */
2988 proc_klist_lock();
2989 while ((kn = SLIST_FIRST(&p->p_klist))) {
2990 kn->kn_ptr.p_proc = PROC_NULL;
2991 KNOTE_DETACH(&p->p_klist, kn);
2992 }
2993 proc_klist_unlock();
2994 }
2995
2996 void
2997 proc_setregister(proc_t p)
2998 {
2999 proc_lock(p);
3000 p->p_lflag |= P_LREGISTER;
3001 proc_unlock(p);
3002 }
3003
3004 void
3005 proc_resetregister(proc_t p)
3006 {
3007 proc_lock(p);
3008 p->p_lflag &= ~P_LREGISTER;
3009 proc_unlock(p);
3010 }
3011
3012 pid_t
3013 proc_pgrpid(proc_t p)
3014 {
3015 return p->p_pgrpid;
3016 }
3017
3018 pid_t
3019 proc_selfpgrpid()
3020 {
3021 return current_proc()->p_pgrpid;
3022 }
3023
3024
3025 /* return control and action states */
3026 int
3027 proc_getpcontrol(int pid, int * pcontrolp)
3028 {
3029 proc_t p;
3030
3031 p = proc_find(pid);
3032 if (p == PROC_NULL)
3033 return(ESRCH);
3034 if (pcontrolp != NULL)
3035 *pcontrolp = p->p_pcaction;
3036
3037 proc_rele(p);
3038 return(0);
3039 }
3040
3041 int
3042 proc_dopcontrol(proc_t p)
3043 {
3044 int pcontrol;
3045
3046 proc_lock(p);
3047
3048 pcontrol = PROC_CONTROL_STATE(p);
3049
3050 if (PROC_ACTION_STATE(p) == 0) {
3051 switch(pcontrol) {
3052 case P_PCTHROTTLE:
3053 PROC_SETACTION_STATE(p);
3054 proc_unlock(p);
3055 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3056 break;
3057
3058 case P_PCSUSP:
3059 PROC_SETACTION_STATE(p);
3060 proc_unlock(p);
3061 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3062 task_suspend(p->task);
3063 break;
3064
3065 case P_PCKILL:
3066 PROC_SETACTION_STATE(p);
3067 proc_unlock(p);
3068 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3069 psignal(p, SIGKILL);
3070 break;
3071
3072 default:
3073 proc_unlock(p);
3074 }
3075
3076 } else
3077 proc_unlock(p);
3078
3079 return(PROC_RETURNED);
3080 }
3081
3082
3083 /*
3084 * Resume a throttled or suspended process. This is an internal interface that's only
3085 * used by the user level code that presents the GUI when we run out of swap space and
3086 * hence is restricted to processes with superuser privileges.
3087 */
3088
3089 int
3090 proc_resetpcontrol(int pid)
3091 {
3092 proc_t p;
3093 int pcontrol;
3094 int error;
3095 proc_t self = current_proc();
3096
3097 /* if the process has been validated to handle resource control or root is valid one */
3098 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
3099 return error;
3100
3101 p = proc_find(pid);
3102 if (p == PROC_NULL)
3103 return(ESRCH);
3104
3105 proc_lock(p);
3106
3107 pcontrol = PROC_CONTROL_STATE(p);
3108
3109 if(PROC_ACTION_STATE(p) !=0) {
3110 switch(pcontrol) {
3111 case P_PCTHROTTLE:
3112 PROC_RESETACTION_STATE(p);
3113 proc_unlock(p);
3114 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3115 break;
3116
3117 case P_PCSUSP:
3118 PROC_RESETACTION_STATE(p);
3119 proc_unlock(p);
3120 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3121 task_resume(p->task);
3122 break;
3123
3124 case P_PCKILL:
3125 /* Huh? */
3126 PROC_SETACTION_STATE(p);
3127 proc_unlock(p);
3128 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3129 break;
3130
3131 default:
3132 proc_unlock(p);
3133 }
3134
3135 } else
3136 proc_unlock(p);
3137
3138 proc_rele(p);
3139 return(0);
3140 }
3141
3142
3143
3144 struct no_paging_space
3145 {
3146 uint64_t pcs_max_size;
3147 uint64_t pcs_uniqueid;
3148 int pcs_pid;
3149 int pcs_proc_count;
3150 uint64_t pcs_total_size;
3151
3152 uint64_t npcs_max_size;
3153 uint64_t npcs_uniqueid;
3154 int npcs_pid;
3155 int npcs_proc_count;
3156 uint64_t npcs_total_size;
3157
3158 int apcs_proc_count;
3159 uint64_t apcs_total_size;
3160 };
3161
3162
3163 static int
3164 proc_pcontrol_filter(proc_t p, void *arg)
3165 {
3166 struct no_paging_space *nps;
3167 uint64_t compressed;
3168
3169 nps = (struct no_paging_space *)arg;
3170
3171 compressed = get_task_compressed(p->task);
3172
3173 if (PROC_CONTROL_STATE(p)) {
3174 if (PROC_ACTION_STATE(p) == 0) {
3175 if (compressed > nps->pcs_max_size) {
3176 nps->pcs_pid = p->p_pid;
3177 nps->pcs_uniqueid = p->p_uniqueid;
3178 nps->pcs_max_size = compressed;
3179 }
3180 nps->pcs_total_size += compressed;
3181 nps->pcs_proc_count++;
3182 } else {
3183 nps->apcs_total_size += compressed;
3184 nps->apcs_proc_count++;
3185 }
3186 } else {
3187 if (compressed > nps->npcs_max_size) {
3188 nps->npcs_pid = p->p_pid;
3189 nps->npcs_uniqueid = p->p_uniqueid;
3190 nps->npcs_max_size = compressed;
3191 }
3192 nps->npcs_total_size += compressed;
3193 nps->npcs_proc_count++;
3194
3195 }
3196 return (0);
3197 }
3198
3199
3200 static int
3201 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3202 {
3203 return(PROC_RETURNED);
3204 }
3205
3206
3207 /*
3208 * Deal with the low on compressor pool space condition... this function
3209 * gets called when we are approaching the limits of the compressor pool or
3210 * we are unable to create a new swap file.
3211 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3212 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3213 * There are 2 categories of processes to deal with. Those that have an action
3214 * associated with them by the task itself and those that do not. Actionable
3215 * tasks can have one of three categories specified: ones that
3216 * can be killed immediately, ones that should be suspended, and ones that should
3217 * be throttled. Processes that do not have an action associated with them are normally
3218 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3219 * that only by killing them can we hope to put the system back into a usable state.
3220 */
3221
3222 #define NO_PAGING_SPACE_DEBUG 0
3223
3224 extern uint64_t vm_compressor_pages_compressed(void);
3225
3226 struct timeval last_no_space_action = {0, 0};
3227
3228 #if DEVELOPMENT || DEBUG
3229 extern boolean_t kill_on_no_paging_space;
3230 #endif /* DEVELOPMENT || DEBUG */
3231
3232 #define MB_SIZE (1024 * 1024ULL)
3233 boolean_t memorystatus_kill_on_VM_thrashing(boolean_t);
3234
3235 extern int32_t max_kill_priority;
3236 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3237
3238 int
3239 no_paging_space_action()
3240 {
3241 proc_t p;
3242 struct no_paging_space nps;
3243 struct timeval now;
3244
3245 /*
3246 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3247 */
3248 microtime(&now);
3249
3250 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3251 return (0);
3252
3253 /*
3254 * Examine all processes and find the biggest (biggest is based on the number of pages this
3255 * task has in the compressor pool) that has been marked to have some action
3256 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3257 * action.
3258 *
3259 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3260 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3261 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3262 */
3263 bzero(&nps, sizeof(nps));
3264
3265 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3266
3267 #if NO_PAGING_SPACE_DEBUG
3268 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3269 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3270 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3271 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3272 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3273 nps.apcs_proc_count, nps.apcs_total_size);
3274 #endif
3275 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3276 /*
3277 * for now we'll knock out any task that has more then 50% of the pages
3278 * held by the compressor
3279 */
3280 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3281
3282 if (nps.npcs_uniqueid == p->p_uniqueid) {
3283 /*
3284 * verify this is still the same process
3285 * in case the proc exited and the pid got reused while
3286 * we were finishing the proc_iterate and getting to this point
3287 */
3288 last_no_space_action = now;
3289
3290 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size/MB_SIZE));
3291 psignal(p, SIGKILL);
3292
3293 proc_rele(p);
3294
3295 return (0);
3296 }
3297
3298 proc_rele(p);
3299 }
3300 }
3301
3302 /*
3303 * We have some processes within our jetsam bands of consideration and hence can be killed.
3304 * So we will invoke the memorystatus thread to go ahead and kill something.
3305 */
3306 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3307
3308 last_no_space_action = now;
3309 memorystatus_kill_on_VM_thrashing(TRUE /* async */);
3310 return (1);
3311 }
3312
3313 /*
3314 * No eligible processes to kill. So let's suspend/kill the largest
3315 * process depending on its policy control specifications.
3316 */
3317
3318 if (nps.pcs_max_size > 0) {
3319 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3320
3321 if (nps.pcs_uniqueid == p->p_uniqueid) {
3322 /*
3323 * verify this is still the same process
3324 * in case the proc exited and the pid got reused while
3325 * we were finishing the proc_iterate and getting to this point
3326 */
3327 last_no_space_action = now;
3328
3329 proc_dopcontrol(p);
3330
3331 proc_rele(p);
3332
3333 return (1);
3334 }
3335
3336 proc_rele(p);
3337 }
3338 }
3339 last_no_space_action = now;
3340
3341 printf("low swap: unable to find any eligible processes to take action on\n");
3342
3343 return (0);
3344 }
3345
3346 int
3347 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3348 {
3349 int ret = 0;
3350 proc_t target_proc = PROC_NULL;
3351 pid_t target_pid = uap->pid;
3352 uint64_t target_uniqueid = uap->uniqueid;
3353 task_t target_task = NULL;
3354
3355 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3356 ret = EPERM;
3357 goto out;
3358 }
3359 target_proc = proc_find(target_pid);
3360 if (target_proc != PROC_NULL) {
3361 if (target_uniqueid != proc_uniqueid(target_proc)) {
3362 ret = ENOENT;
3363 goto out;
3364 }
3365
3366 target_task = proc_task(target_proc);
3367 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3368 ret = EINVAL;
3369 goto out;
3370 }
3371 } else
3372 ret = ENOENT;
3373
3374 out:
3375 if (target_proc != PROC_NULL)
3376 proc_rele(target_proc);
3377 return (ret);
3378 }
3379
3380 #if VM_SCAN_FOR_SHADOW_CHAIN
3381 extern int vm_map_shadow_max(vm_map_t map);
3382 int proc_shadow_max(void);
3383 int proc_shadow_max(void)
3384 {
3385 int retval, max;
3386 proc_t p;
3387 task_t task;
3388 vm_map_t map;
3389
3390 max = 0;
3391 proc_list_lock();
3392 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3393 if (p->p_stat == SIDL)
3394 continue;
3395 task = p->task;
3396 if (task == NULL) {
3397 continue;
3398 }
3399 map = get_task_map(task);
3400 if (map == NULL) {
3401 continue;
3402 }
3403 retval = vm_map_shadow_max(map);
3404 if (retval > max) {
3405 max = retval;
3406 }
3407 }
3408 proc_list_unlock();
3409 return max;
3410 }
3411 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3412
3413 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3414 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3415 {
3416 if (target_proc != NULL) {
3417 target_proc->p_responsible_pid = responsible_pid;
3418 }
3419 return;
3420 }
3421
3422 int
3423 proc_chrooted(proc_t p)
3424 {
3425 int retval = 0;
3426
3427 if (p) {
3428 proc_fdlock(p);
3429 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3430 proc_fdunlock(p);
3431 }
3432
3433 return retval;
3434 }
3435
3436 void *
3437 proc_get_uthread_uu_threadlist(void * uthread_v)
3438 {
3439 uthread_t uth = (uthread_t)uthread_v;
3440 return (uth != NULL) ? uth->uu_threadlist : NULL;
3441 }
3442
3443 #ifdef CONFIG_32BIT_TELEMETRY
3444 void
3445 proc_log_32bit_telemetry(proc_t p)
3446 {
3447 /* Gather info */
3448 char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
3449 char * signature_cur_end = &signature_buf[0];
3450 char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
3451 int bytes_printed = 0;
3452
3453 const char * teamid = NULL;
3454 const char * identity = NULL;
3455 struct cs_blob * csblob = NULL;
3456
3457 proc_list_lock();
3458
3459 /*
3460 * Get proc name and parent proc name; if the parent execs, we'll get a
3461 * garbled name.
3462 */
3463 bytes_printed = snprintf(signature_cur_end,
3464 signature_buf_end - signature_cur_end,
3465 "%s,%s,", p->p_name,
3466 (p->p_pptr ? p->p_pptr->p_name : ""));
3467
3468 if (bytes_printed > 0) {
3469 signature_cur_end += bytes_printed;
3470 }
3471
3472 proc_list_unlock();
3473
3474 /* Get developer info. */
3475 vnode_t v = proc_getexecutablevnode(p);
3476
3477 if (v) {
3478 csblob = csvnode_get_blob(v, 0);
3479
3480 if (csblob) {
3481 teamid = csblob_get_teamid(csblob);
3482 identity = csblob_get_identity(csblob);
3483 }
3484 }
3485
3486 if (teamid == NULL) {
3487 teamid = "";
3488 }
3489
3490 if (identity == NULL) {
3491 identity = "";
3492 }
3493
3494 bytes_printed = snprintf(signature_cur_end,
3495 signature_buf_end - signature_cur_end,
3496 "%s,%s", teamid, identity);
3497
3498 if (bytes_printed > 0) {
3499 signature_cur_end += bytes_printed;
3500 }
3501
3502 if (v) {
3503 vnode_put(v);
3504 }
3505
3506 /*
3507 * We may want to rate limit here, although the SUMMARIZE key should
3508 * help us aggregate events in userspace.
3509 */
3510
3511 /* Emit log */
3512 kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
3513 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
3514 /* 1 */ "com.apple.message.signature", signature_buf,
3515 /* 2 */ "com.apple.message.summarize", "YES",
3516 NULL);
3517 }
3518 #endif /* CONFIG_32BIT_TELEMETRY */