]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kern_proc.c
xnu-3789.41.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_proc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63/*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69/* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79#include <sys/param.h>
80#include <sys/systm.h>
81#include <sys/kernel.h>
82#include <sys/proc_internal.h>
83#include <sys/acct.h>
84#include <sys/wait.h>
85#include <sys/file_internal.h>
86#include <sys/uio.h>
87#include <sys/malloc.h>
88#include <sys/lock.h>
89#include <sys/mbuf.h>
90#include <sys/ioctl.h>
91#include <sys/tty.h>
92#include <sys/signalvar.h>
93#include <sys/syslog.h>
94#include <sys/sysctl.h>
95#include <sys/sysproto.h>
96#include <sys/kauth.h>
97#include <sys/codesign.h>
98#include <sys/kernel_types.h>
99#include <sys/ubc.h>
100#include <kern/kalloc.h>
101#include <kern/task.h>
102#include <kern/coalition.h>
103#include <sys/coalition.h>
104#include <kern/assert.h>
105#include <vm/vm_protos.h>
106#include <vm/vm_map.h> /* vm_map_switch_protect() */
107#include <vm/vm_pageout.h>
108#include <mach/task.h>
109#include <mach/message.h>
110#include <sys/priv.h>
111#include <sys/proc_info.h>
112#include <sys/bsdtask_info.h>
113#include <sys/persona.h>
114
115#if CONFIG_MEMORYSTATUS
116#include <sys/kern_memorystatus.h>
117#endif
118
119#if CONFIG_MACF
120#include <security/mac_framework.h>
121#endif
122
123#include <libkern/crypto/sha1.h>
124
125/*
126 * Structure associated with user cacheing.
127 */
128struct uidinfo {
129 LIST_ENTRY(uidinfo) ui_hash;
130 uid_t ui_uid;
131 long ui_proccnt;
132};
133#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
134LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
135u_long uihash; /* size of hash table - 1 */
136
137/*
138 * Other process lists
139 */
140struct pidhashhead *pidhashtbl;
141u_long pidhash;
142struct pgrphashhead *pgrphashtbl;
143u_long pgrphash;
144struct sesshashhead *sesshashtbl;
145u_long sesshash;
146
147struct proclist allproc;
148struct proclist zombproc;
149extern struct tty cons;
150
151extern int cs_debug;
152
153#if DEBUG
154#define __PROC_INTERNAL_DEBUG 1
155#endif
156#if CONFIG_COREDUMP
157/* Name to give to core files */
158__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
159#endif
160
161#if PROC_REF_DEBUG
162#include <kern/backtrace.h>
163#endif
164
165static void orphanpg(struct pgrp * pg);
166void proc_name_kdp(task_t t, char * buf, int size);
167void * proc_get_uthread_uu_threadlist(void * uthread_v);
168int proc_threadname_kdp(void * uth, char * buf, size_t size);
169void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime);
170char * proc_name_address(void * p);
171
172/* TODO: make a header that's exported and usable in osfmk */
173char* proc_best_name(proc_t p);
174
175static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
176static void pgrp_remove(proc_t p);
177static void pgrp_replace(proc_t p, struct pgrp *pgrp);
178static void pgdelete_dropref(struct pgrp *pgrp);
179extern void pg_rele_dropref(struct pgrp * pgrp);
180static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
181static boolean_t proc_parent_is_currentproc(proc_t p);
182
183struct fixjob_iterargs {
184 struct pgrp * pg;
185 struct session * mysession;
186 int entering;
187};
188
189int fixjob_callback(proc_t, void *);
190
191uint64_t get_current_unique_pid(void);
192
193
194uint64_t
195get_current_unique_pid(void)
196{
197 proc_t p = current_proc();
198
199 if (p)
200 return p->p_uniqueid;
201 else
202 return 0;
203}
204
205/*
206 * Initialize global process hashing structures.
207 */
208void
209procinit(void)
210{
211 LIST_INIT(&allproc);
212 LIST_INIT(&zombproc);
213 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
214 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
215 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
216 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
217#if CONFIG_PERSONAS
218 personas_bootstrap();
219#endif
220}
221
222/*
223 * Change the count associated with number of processes
224 * a given user is using. This routine protects the uihash
225 * with the list lock
226 */
227int
228chgproccnt(uid_t uid, int diff)
229{
230 struct uidinfo *uip;
231 struct uidinfo *newuip = NULL;
232 struct uihashhead *uipp;
233 int retval;
234
235again:
236 proc_list_lock();
237 uipp = UIHASH(uid);
238 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
239 if (uip->ui_uid == uid)
240 break;
241 if (uip) {
242 uip->ui_proccnt += diff;
243 if (uip->ui_proccnt > 0) {
244 retval = uip->ui_proccnt;
245 proc_list_unlock();
246 goto out;
247 }
248 if (uip->ui_proccnt < 0)
249 panic("chgproccnt: procs < 0");
250 LIST_REMOVE(uip, ui_hash);
251 retval = 0;
252 proc_list_unlock();
253 FREE_ZONE(uip, sizeof(*uip), M_PROC);
254 goto out;
255 }
256 if (diff <= 0) {
257 if (diff == 0) {
258 retval = 0;
259 proc_list_unlock();
260 goto out;
261 }
262 panic("chgproccnt: lost user");
263 }
264 if (newuip != NULL) {
265 uip = newuip;
266 newuip = NULL;
267 LIST_INSERT_HEAD(uipp, uip, ui_hash);
268 uip->ui_uid = uid;
269 uip->ui_proccnt = diff;
270 retval = diff;
271 proc_list_unlock();
272 goto out;
273 }
274 proc_list_unlock();
275 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
276 if (newuip == NULL)
277 panic("chgproccnt: M_PROC zone depleted");
278 goto again;
279out:
280 if (newuip != NULL)
281 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
282 return(retval);
283}
284
285/*
286 * Is p an inferior of the current process?
287 */
288int
289inferior(proc_t p)
290{
291 int retval = 0;
292
293 proc_list_lock();
294 for (; p != current_proc(); p = p->p_pptr)
295 if (p->p_pid == 0)
296 goto out;
297 retval = 1;
298out:
299 proc_list_unlock();
300 return(retval);
301}
302
303/*
304 * Is p an inferior of t ?
305 */
306int
307isinferior(proc_t p, proc_t t)
308{
309 int retval = 0;
310 int nchecked = 0;
311 proc_t start = p;
312
313 /* if p==t they are not inferior */
314 if (p == t)
315 return(0);
316
317 proc_list_lock();
318 for (; p != t; p = p->p_pptr) {
319 nchecked++;
320
321 /* Detect here if we're in a cycle */
322 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
323 goto out;
324 }
325 retval = 1;
326out:
327 proc_list_unlock();
328 return(retval);
329}
330
331int
332proc_isinferior(int pid1, int pid2)
333{
334 proc_t p = PROC_NULL;
335 proc_t t = PROC_NULL;
336 int retval = 0;
337
338 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
339 retval = isinferior(p, t);
340
341 if (p != PROC_NULL)
342 proc_rele(p);
343 if (t != PROC_NULL)
344 proc_rele(t);
345
346 return(retval);
347}
348
349proc_t
350proc_find(int pid)
351{
352 return(proc_findinternal(pid, 0));
353}
354
355proc_t
356proc_findinternal(int pid, int locked)
357{
358 proc_t p = PROC_NULL;
359
360 if (locked == 0) {
361 proc_list_lock();
362 }
363
364 p = pfind_locked(pid);
365 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
366 p = PROC_NULL;
367
368 if (locked == 0) {
369 proc_list_unlock();
370 }
371
372 return(p);
373}
374
375proc_t
376proc_findthread(thread_t thread)
377{
378 proc_t p = PROC_NULL;
379 struct uthread *uth;
380
381 proc_list_lock();
382 uth = get_bsdthread_info(thread);
383 if (uth && (uth->uu_flag & UT_VFORK))
384 p = uth->uu_proc;
385 else
386 p = (proc_t)(get_bsdthreadtask_info(thread));
387 p = proc_ref_locked(p);
388 proc_list_unlock();
389 return(p);
390}
391
392#if PROC_REF_DEBUG
393void
394uthread_reset_proc_refcount(void *uthread) {
395 uthread_t uth;
396
397 if (proc_ref_tracking_disabled) {
398 return;
399 }
400
401 uth = (uthread_t) uthread;
402
403 uth->uu_proc_refcount = 0;
404 uth->uu_pindex = 0;
405}
406
407int
408uthread_get_proc_refcount(void *uthread) {
409 uthread_t uth;
410
411 if (proc_ref_tracking_disabled) {
412 return 0;
413 }
414
415 uth = (uthread_t) uthread;
416
417 return uth->uu_proc_refcount;
418}
419
420static void
421record_procref(proc_t p, int count) {
422 uthread_t uth;
423
424 if (proc_ref_tracking_disabled) {
425 return;
426 }
427
428 uth = current_uthread();
429 uth->uu_proc_refcount += count;
430
431 if (count == 1) {
432 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
433 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
434
435 uth->uu_proc_ps[uth->uu_pindex] = p;
436 uth->uu_pindex++;
437 }
438 }
439}
440#endif
441
442int
443proc_rele(proc_t p)
444{
445 proc_list_lock();
446 proc_rele_locked(p);
447 proc_list_unlock();
448
449 return(0);
450}
451
452proc_t
453proc_self(void)
454{
455 struct proc * p;
456
457 p = current_proc();
458
459 proc_list_lock();
460 if (p != proc_ref_locked(p))
461 p = PROC_NULL;
462 proc_list_unlock();
463 return(p);
464}
465
466
467proc_t
468proc_ref_locked(proc_t p)
469{
470 proc_t p1 = p;
471
472 /* if process still in creation return failure */
473 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
474 return (PROC_NULL);
475retry:
476 /*
477 * Do not return process marked for termination
478 * or proc_refdrain called without ref wait.
479 * Wait for proc_refdrain_with_refwait to complete if
480 * process in refdrain and refwait flag is set.
481 */
482 if ((p->p_stat != SZOMB) &&
483 ((p->p_listflag & P_LIST_EXITED) == 0) &&
484 ((p->p_listflag & P_LIST_DEAD) == 0) &&
485 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
486 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
487 if ((p->p_listflag & P_LIST_REFWAIT) != 0) {
488 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0) ;
489 goto retry;
490 }
491 p->p_refcount++;
492#if PROC_REF_DEBUG
493 record_procref(p, 1);
494#endif
495 }
496 else
497 p1 = PROC_NULL;
498
499 return(p1);
500}
501
502void
503proc_rele_locked(proc_t p)
504{
505
506 if (p->p_refcount > 0) {
507 p->p_refcount--;
508#if PROC_REF_DEBUG
509 record_procref(p, -1);
510#endif
511 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
512 p->p_listflag &= ~P_LIST_DRAINWAIT;
513 wakeup(&p->p_refcount);
514 }
515 } else
516 panic("proc_rele_locked -ve ref\n");
517
518}
519
520proc_t
521proc_find_zombref(int pid)
522{
523 proc_t p;
524
525 proc_list_lock();
526
527 again:
528 p = pfind_locked(pid);
529
530 /* should we bail? */
531 if ((p == PROC_NULL) /* not found */
532 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
533 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
534
535 proc_list_unlock();
536 return (PROC_NULL);
537 }
538
539 /* If someone else is controlling the (unreaped) zombie - wait */
540 if ((p->p_listflag & P_LIST_WAITING) != 0) {
541 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
542 goto again;
543 }
544 p->p_listflag |= P_LIST_WAITING;
545
546 proc_list_unlock();
547
548 return(p);
549}
550
551void
552proc_drop_zombref(proc_t p)
553{
554 proc_list_lock();
555 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
556 p->p_listflag &= ~P_LIST_WAITING;
557 wakeup(&p->p_stat);
558 }
559 proc_list_unlock();
560}
561
562
563void
564proc_refdrain(proc_t p)
565{
566 proc_refdrain_with_refwait(p, FALSE);
567}
568
569proc_t
570proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
571{
572 boolean_t initexec = FALSE;
573 proc_list_lock();
574
575 p->p_listflag |= P_LIST_DRAIN;
576 if (get_ref_and_allow_wait) {
577 /*
578 * All the calls to proc_ref_locked will wait
579 * for the flag to get cleared before returning a ref.
580 */
581 p->p_listflag |= P_LIST_REFWAIT;
582 if (p == initproc) {
583 initexec = TRUE;
584 }
585 }
586
587 /* Do not wait in ref drain for launchd exec */
588 while (p->p_refcount && !initexec) {
589 p->p_listflag |= P_LIST_DRAINWAIT;
590 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
591 }
592
593 p->p_listflag &= ~P_LIST_DRAIN;
594 if (!get_ref_and_allow_wait) {
595 p->p_listflag |= P_LIST_DEAD;
596 } else {
597 /* Return a ref to the caller */
598 p->p_refcount++;
599#if PROC_REF_DEBUG
600 record_procref(p, 1);
601#endif
602 }
603
604 proc_list_unlock();
605
606 if (get_ref_and_allow_wait) {
607 return (p);
608 }
609 return NULL;
610}
611
612void
613proc_refwake(proc_t p)
614{
615 proc_list_lock();
616 p->p_listflag &= ~P_LIST_REFWAIT;
617 wakeup(&p->p_listflag);
618 proc_list_unlock();
619}
620
621proc_t
622proc_parentholdref(proc_t p)
623{
624 proc_t parent = PROC_NULL;
625 proc_t pp;
626 int loopcnt = 0;
627
628
629 proc_list_lock();
630loop:
631 pp = p->p_pptr;
632 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
633 parent = PROC_NULL;
634 goto out;
635 }
636
637 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
638 pp->p_listflag |= P_LIST_CHILDDRWAIT;
639 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
640 loopcnt++;
641 if (loopcnt == 5) {
642 parent = PROC_NULL;
643 goto out;
644 }
645 goto loop;
646 }
647
648 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
649 pp->p_parentref++;
650 parent = pp;
651 goto out;
652 }
653
654out:
655 proc_list_unlock();
656 return(parent);
657}
658int
659proc_parentdropref(proc_t p, int listlocked)
660{
661 if (listlocked == 0)
662 proc_list_lock();
663
664 if (p->p_parentref > 0) {
665 p->p_parentref--;
666 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
667 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
668 wakeup(&p->p_parentref);
669 }
670 } else
671 panic("proc_parentdropref -ve ref\n");
672 if (listlocked == 0)
673 proc_list_unlock();
674
675 return(0);
676}
677
678void
679proc_childdrainstart(proc_t p)
680{
681#if __PROC_INTERNAL_DEBUG
682 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
683 panic("proc_childdrainstart: childdrain already started\n");
684#endif
685 p->p_listflag |= P_LIST_CHILDDRSTART;
686 /* wait for all that hold parentrefs to drop */
687 while (p->p_parentref > 0) {
688 p->p_listflag |= P_LIST_PARENTREFWAIT;
689 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
690 }
691}
692
693
694void
695proc_childdrainend(proc_t p)
696{
697#if __PROC_INTERNAL_DEBUG
698 if (p->p_childrencnt > 0)
699 panic("exiting: children stil hanging around\n");
700#endif
701 p->p_listflag |= P_LIST_CHILDDRAINED;
702 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
703 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
704 wakeup(&p->p_childrencnt);
705 }
706}
707
708void
709proc_checkdeadrefs(__unused proc_t p)
710{
711#if __PROC_INTERNAL_DEBUG
712 if ((p->p_listflag & P_LIST_INHASH) != 0)
713 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
714 if (p->p_childrencnt != 0)
715 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
716 if (p->p_refcount != 0)
717 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
718 if (p->p_parentref != 0)
719 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
720#endif
721}
722
723int
724proc_pid(proc_t p)
725{
726 if (p != NULL)
727 return (p->p_pid);
728 return -1;
729}
730
731int
732proc_ppid(proc_t p)
733{
734 if (p != NULL)
735 return (p->p_ppid);
736 return -1;
737}
738
739int
740proc_selfpid(void)
741{
742 return (current_proc()->p_pid);
743}
744
745int
746proc_selfppid(void)
747{
748 return (current_proc()->p_ppid);
749}
750
751int
752proc_selfcsflags(void)
753{
754 return (current_proc()->p_csflags);
755}
756
757#if CONFIG_DTRACE
758static proc_t
759dtrace_current_proc_vforking(void)
760{
761 thread_t th = current_thread();
762 struct uthread *ut = get_bsdthread_info(th);
763
764 if (ut &&
765 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
766 /*
767 * Handle the narrow window where we're in the vfork syscall,
768 * but we're not quite ready to claim (in particular, to DTrace)
769 * that we're running as the child.
770 */
771 return (get_bsdtask_info(get_threadtask(th)));
772 }
773 return (current_proc());
774}
775
776int
777dtrace_proc_selfpid(void)
778{
779 return (dtrace_current_proc_vforking()->p_pid);
780}
781
782int
783dtrace_proc_selfppid(void)
784{
785 return (dtrace_current_proc_vforking()->p_ppid);
786}
787
788uid_t
789dtrace_proc_selfruid(void)
790{
791 return (dtrace_current_proc_vforking()->p_ruid);
792}
793#endif /* CONFIG_DTRACE */
794
795proc_t
796proc_parent(proc_t p)
797{
798 proc_t parent;
799 proc_t pp;
800
801 proc_list_lock();
802loop:
803 pp = p->p_pptr;
804 parent = proc_ref_locked(pp);
805 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
806 pp->p_listflag |= P_LIST_CHILDLKWAIT;
807 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
808 goto loop;
809 }
810 proc_list_unlock();
811 return(parent);
812}
813
814static boolean_t
815proc_parent_is_currentproc(proc_t p)
816{
817 boolean_t ret = FALSE;
818
819 proc_list_lock();
820 if (p->p_pptr == current_proc())
821 ret = TRUE;
822
823 proc_list_unlock();
824 return ret;
825}
826
827void
828proc_name(int pid, char * buf, int size)
829{
830 proc_t p;
831
832 if ((p = proc_find(pid)) != PROC_NULL) {
833 strlcpy(buf, &p->p_comm[0], size);
834 proc_rele(p);
835 }
836}
837
838void
839proc_name_kdp(task_t t, char * buf, int size)
840{
841 proc_t p = get_bsdtask_info(t);
842 if (p == PROC_NULL)
843 return;
844
845 if ((size_t)size > sizeof(p->p_comm))
846 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
847 else
848 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
849}
850
851int
852proc_threadname_kdp(void * uth, char * buf, size_t size)
853{
854 if (size < MAXTHREADNAMESIZE) {
855 /* this is really just a protective measure for the future in
856 * case the thread name size in stackshot gets out of sync with
857 * the BSD max thread name size. Note that bsd_getthreadname
858 * doesn't take input buffer size into account. */
859 return -1;
860 }
861
862 if (uth != NULL) {
863 bsd_getthreadname(uth, buf);
864 }
865 return 0;
866}
867
868/* note that this function is generally going to be called from stackshot,
869 * and the arguments will be coming from a struct which is declared packed
870 * thus the input arguments will in general be unaligned. We have to handle
871 * that here. */
872void
873proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec, uint64_t *abstime)
874{
875 proc_t pp = (proc_t)p;
876 struct uint64p {
877 uint64_t val;
878 } __attribute__((packed));
879
880 if (pp != PROC_NULL) {
881 if (tv_sec != NULL)
882 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
883 if (tv_usec != NULL)
884 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
885 if (abstime != NULL) {
886 if (pp->p_stats != NULL)
887 *abstime = pp->p_stats->ps_start;
888 else
889 *abstime = 0;
890 }
891 }
892}
893
894char *
895proc_name_address(void *p)
896{
897 return &((proc_t)p)->p_comm[0];
898}
899
900char *
901proc_best_name(proc_t p)
902{
903 if (p->p_name[0] != 0)
904 return (&p->p_name[0]);
905 return (&p->p_comm[0]);
906}
907
908void
909proc_selfname(char * buf, int size)
910{
911 proc_t p;
912
913 if ((p = current_proc())!= (proc_t)0) {
914 strlcpy(buf, &p->p_comm[0], size);
915 }
916}
917
918void
919proc_signal(int pid, int signum)
920{
921 proc_t p;
922
923 if ((p = proc_find(pid)) != PROC_NULL) {
924 psignal(p, signum);
925 proc_rele(p);
926 }
927}
928
929int
930proc_issignal(int pid, sigset_t mask)
931{
932 proc_t p;
933 int error=0;
934
935 if ((p = proc_find(pid)) != PROC_NULL) {
936 error = proc_pendingsignals(p, mask);
937 proc_rele(p);
938 }
939
940 return(error);
941}
942
943int
944proc_noremotehang(proc_t p)
945{
946 int retval = 0;
947
948 if (p)
949 retval = p->p_flag & P_NOREMOTEHANG;
950 return(retval? 1: 0);
951
952}
953
954int
955proc_exiting(proc_t p)
956{
957 int retval = 0;
958
959 if (p)
960 retval = p->p_lflag & P_LEXIT;
961 return(retval? 1: 0);
962}
963
964int
965proc_forcequota(proc_t p)
966{
967 int retval = 0;
968
969 if (p)
970 retval = p->p_flag & P_FORCEQUOTA;
971 return(retval? 1: 0);
972
973}
974
975int
976proc_suser(proc_t p)
977{
978 kauth_cred_t my_cred;
979 int error;
980
981 my_cred = kauth_cred_proc_ref(p);
982 error = suser(my_cred, &p->p_acflag);
983 kauth_cred_unref(&my_cred);
984 return(error);
985}
986
987task_t
988proc_task(proc_t proc)
989{
990 return (task_t)proc->task;
991}
992
993/*
994 * Obtain the first thread in a process
995 *
996 * XXX This is a bad thing to do; it exists predominantly to support the
997 * XXX use of proc_t's in places that should really be using
998 * XXX thread_t's instead. This maintains historical behaviour, but really
999 * XXX needs an audit of the context (proxy vs. not) to clean up.
1000 */
1001thread_t
1002proc_thread(proc_t proc)
1003{
1004 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1005
1006 if (uth != NULL)
1007 return(uth->uu_context.vc_thread);
1008
1009 return(NULL);
1010}
1011
1012kauth_cred_t
1013proc_ucred(proc_t p)
1014{
1015 return(p->p_ucred);
1016}
1017
1018struct uthread *
1019current_uthread()
1020{
1021 thread_t th = current_thread();
1022
1023 return((struct uthread *)get_bsdthread_info(th));
1024}
1025
1026
1027int
1028proc_is64bit(proc_t p)
1029{
1030 return(IS_64BIT_PROCESS(p));
1031}
1032
1033int
1034proc_pidversion(proc_t p)
1035{
1036 return(p->p_idversion);
1037}
1038
1039uint32_t
1040proc_persona_id(proc_t p)
1041{
1042 return (uint32_t)persona_id_from_proc(p);
1043}
1044
1045uint32_t
1046proc_getuid(proc_t p)
1047{
1048 return(p->p_uid);
1049}
1050
1051uint32_t
1052proc_getgid(proc_t p)
1053{
1054 return(p->p_gid);
1055}
1056
1057uint64_t
1058proc_uniqueid(proc_t p)
1059{
1060 return(p->p_uniqueid);
1061}
1062
1063uint64_t
1064proc_puniqueid(proc_t p)
1065{
1066 return(p->p_puniqueid);
1067}
1068
1069void
1070proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1071{
1072#if CONFIG_COALITIONS
1073 task_coalition_ids(p->task, ids);
1074#else
1075 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
1076#endif
1077 return;
1078}
1079
1080uint64_t
1081proc_was_throttled(proc_t p)
1082{
1083 return (p->was_throttled);
1084}
1085
1086uint64_t
1087proc_did_throttle(proc_t p)
1088{
1089 return (p->did_throttle);
1090}
1091
1092int
1093proc_getcdhash(proc_t p, unsigned char *cdhash)
1094{
1095 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1096}
1097
1098void
1099proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1100{
1101 if (size >= sizeof(p->p_uuid)) {
1102 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1103 }
1104}
1105
1106/* Return vnode for executable with an iocount. Must be released with vnode_put() */
1107vnode_t
1108proc_getexecutablevnode(proc_t p)
1109{
1110 vnode_t tvp = p->p_textvp;
1111
1112 if ( tvp != NULLVP) {
1113 if (vnode_getwithref(tvp) == 0) {
1114 return tvp;
1115 }
1116 }
1117
1118 return NULLVP;
1119}
1120
1121
1122void
1123bsd_set_dependency_capable(task_t task)
1124{
1125 proc_t p = get_bsdtask_info(task);
1126
1127 if (p) {
1128 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1129 }
1130}
1131
1132
1133int
1134IS_64BIT_PROCESS(proc_t p)
1135{
1136 if (p && (p->p_flag & P_LP64))
1137 return(1);
1138 else
1139 return(0);
1140}
1141
1142/*
1143 * Locate a process by number
1144 */
1145proc_t
1146pfind_locked(pid_t pid)
1147{
1148 proc_t p;
1149#if DEBUG
1150 proc_t q;
1151#endif
1152
1153 if (!pid)
1154 return (kernproc);
1155
1156 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1157 if (p->p_pid == pid) {
1158#if DEBUG
1159 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1160 if ((p !=q) && (q->p_pid == pid))
1161 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1162 }
1163#endif
1164 return (p);
1165 }
1166 }
1167 return (NULL);
1168}
1169
1170/*
1171 * Locate a zombie by PID
1172 */
1173__private_extern__ proc_t
1174pzfind(pid_t pid)
1175{
1176 proc_t p;
1177
1178
1179 proc_list_lock();
1180
1181 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1182 if (p->p_pid == pid)
1183 break;
1184
1185 proc_list_unlock();
1186
1187 return (p);
1188}
1189
1190/*
1191 * Locate a process group by number
1192 */
1193
1194struct pgrp *
1195pgfind(pid_t pgid)
1196{
1197 struct pgrp * pgrp;
1198
1199 proc_list_lock();
1200 pgrp = pgfind_internal(pgid);
1201 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1202 pgrp = PGRP_NULL;
1203 else
1204 pgrp->pg_refcount++;
1205 proc_list_unlock();
1206 return(pgrp);
1207}
1208
1209
1210
1211struct pgrp *
1212pgfind_internal(pid_t pgid)
1213{
1214 struct pgrp *pgrp;
1215
1216 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1217 if (pgrp->pg_id == pgid)
1218 return (pgrp);
1219 return (NULL);
1220}
1221
1222void
1223pg_rele(struct pgrp * pgrp)
1224{
1225 if(pgrp == PGRP_NULL)
1226 return;
1227 pg_rele_dropref(pgrp);
1228}
1229
1230void
1231pg_rele_dropref(struct pgrp * pgrp)
1232{
1233 proc_list_lock();
1234 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1235 proc_list_unlock();
1236 pgdelete_dropref(pgrp);
1237 return;
1238 }
1239
1240 pgrp->pg_refcount--;
1241 proc_list_unlock();
1242}
1243
1244struct session *
1245session_find_internal(pid_t sessid)
1246{
1247 struct session *sess;
1248
1249 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1250 if (sess->s_sid == sessid)
1251 return (sess);
1252 return (NULL);
1253}
1254
1255
1256/*
1257 * Make a new process ready to become a useful member of society by making it
1258 * visible in all the right places and initialize its own lists to empty.
1259 *
1260 * Parameters: parent The parent of the process to insert
1261 * child The child process to insert
1262 *
1263 * Returns: (void)
1264 *
1265 * Notes: Insert a child process into the parents process group, assign
1266 * the child the parent process pointer and PPID of the parent,
1267 * place it on the parents p_children list as a sibling,
1268 * initialize its own child list, place it in the allproc list,
1269 * insert it in the proper hash bucket, and initialize its
1270 * event list.
1271 */
1272void
1273pinsertchild(proc_t parent, proc_t child)
1274{
1275 struct pgrp * pg;
1276
1277 LIST_INIT(&child->p_children);
1278 TAILQ_INIT(&child->p_evlist);
1279 child->p_pptr = parent;
1280 child->p_ppid = parent->p_pid;
1281 child->p_puniqueid = parent->p_uniqueid;
1282
1283 pg = proc_pgrp(parent);
1284 pgrp_add(pg, parent, child);
1285 pg_rele(pg);
1286
1287 proc_list_lock();
1288
1289#if CONFIG_MEMORYSTATUS
1290 memorystatus_add(child, TRUE);
1291#endif
1292
1293 parent->p_childrencnt++;
1294 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1295
1296 LIST_INSERT_HEAD(&allproc, child, p_list);
1297 /* mark the completion of proc creation */
1298 child->p_listflag &= ~P_LIST_INCREATE;
1299
1300 proc_list_unlock();
1301}
1302
1303/*
1304 * Move p to a new or existing process group (and session)
1305 *
1306 * Returns: 0 Success
1307 * ESRCH No such process
1308 */
1309int
1310enterpgrp(proc_t p, pid_t pgid, int mksess)
1311{
1312 struct pgrp *pgrp;
1313 struct pgrp *mypgrp;
1314 struct session * procsp;
1315
1316 pgrp = pgfind(pgid);
1317 mypgrp = proc_pgrp(p);
1318 procsp = proc_session(p);
1319
1320#if DIAGNOSTIC
1321 if (pgrp != NULL && mksess) /* firewalls */
1322 panic("enterpgrp: setsid into non-empty pgrp");
1323 if (SESS_LEADER(p, procsp))
1324 panic("enterpgrp: session leader attempted setpgrp");
1325#endif
1326 if (pgrp == PGRP_NULL) {
1327 pid_t savepid = p->p_pid;
1328 proc_t np = PROC_NULL;
1329 /*
1330 * new process group
1331 */
1332#if DIAGNOSTIC
1333 if (p->p_pid != pgid)
1334 panic("enterpgrp: new pgrp and pid != pgid");
1335#endif
1336 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1337 M_WAITOK);
1338 if (pgrp == NULL)
1339 panic("enterpgrp: M_PGRP zone depleted");
1340 if ((np = proc_find(savepid)) == NULL || np != p) {
1341 if (np != PROC_NULL)
1342 proc_rele(np);
1343 if (mypgrp != PGRP_NULL)
1344 pg_rele(mypgrp);
1345 if (procsp != SESSION_NULL)
1346 session_rele(procsp);
1347 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1348 return (ESRCH);
1349 }
1350 proc_rele(np);
1351 if (mksess) {
1352 struct session *sess;
1353
1354 /*
1355 * new session
1356 */
1357 MALLOC_ZONE(sess, struct session *,
1358 sizeof(struct session), M_SESSION, M_WAITOK);
1359 if (sess == NULL)
1360 panic("enterpgrp: M_SESSION zone depleted");
1361 sess->s_leader = p;
1362 sess->s_sid = p->p_pid;
1363 sess->s_count = 1;
1364 sess->s_ttyvp = NULL;
1365 sess->s_ttyp = TTY_NULL;
1366 sess->s_flags = 0;
1367 sess->s_listflags = 0;
1368 sess->s_ttypgrpid = NO_PID;
1369#if CONFIG_FINE_LOCK_GROUPS
1370 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1371#else
1372 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1373#endif
1374 bcopy(procsp->s_login, sess->s_login,
1375 sizeof(sess->s_login));
1376 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1377 proc_list_lock();
1378 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1379 proc_list_unlock();
1380 pgrp->pg_session = sess;
1381#if DIAGNOSTIC
1382 if (p != current_proc())
1383 panic("enterpgrp: mksession and p != curproc");
1384#endif
1385 } else {
1386 proc_list_lock();
1387 pgrp->pg_session = procsp;
1388
1389 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1390 panic("enterpgrp: providing ref to terminating session ");
1391 pgrp->pg_session->s_count++;
1392 proc_list_unlock();
1393 }
1394 pgrp->pg_id = pgid;
1395#if CONFIG_FINE_LOCK_GROUPS
1396 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1397#else
1398 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1399#endif
1400 LIST_INIT(&pgrp->pg_members);
1401 pgrp->pg_membercnt = 0;
1402 pgrp->pg_jobc = 0;
1403 proc_list_lock();
1404 pgrp->pg_refcount = 1;
1405 pgrp->pg_listflags = 0;
1406 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1407 proc_list_unlock();
1408 } else if (pgrp == mypgrp) {
1409 pg_rele(pgrp);
1410 if (mypgrp != NULL)
1411 pg_rele(mypgrp);
1412 if (procsp != SESSION_NULL)
1413 session_rele(procsp);
1414 return (0);
1415 }
1416
1417 if (procsp != SESSION_NULL)
1418 session_rele(procsp);
1419 /*
1420 * Adjust eligibility of affected pgrps to participate in job control.
1421 * Increment eligibility counts before decrementing, otherwise we
1422 * could reach 0 spuriously during the first call.
1423 */
1424 fixjobc(p, pgrp, 1);
1425 fixjobc(p, mypgrp, 0);
1426
1427 if(mypgrp != PGRP_NULL)
1428 pg_rele(mypgrp);
1429 pgrp_replace(p, pgrp);
1430 pg_rele(pgrp);
1431
1432 return(0);
1433}
1434
1435/*
1436 * remove process from process group
1437 */
1438int
1439leavepgrp(proc_t p)
1440{
1441
1442 pgrp_remove(p);
1443 return (0);
1444}
1445
1446/*
1447 * delete a process group
1448 */
1449static void
1450pgdelete_dropref(struct pgrp *pgrp)
1451{
1452 struct tty *ttyp;
1453 int emptypgrp = 1;
1454 struct session *sessp;
1455
1456
1457 pgrp_lock(pgrp);
1458 if (pgrp->pg_membercnt != 0) {
1459 emptypgrp = 0;
1460 }
1461 pgrp_unlock(pgrp);
1462
1463 proc_list_lock();
1464 pgrp->pg_refcount--;
1465 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1466 proc_list_unlock();
1467 return;
1468 }
1469
1470 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1471
1472 if (pgrp->pg_refcount > 0) {
1473 proc_list_unlock();
1474 return;
1475 }
1476
1477 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1478 LIST_REMOVE(pgrp, pg_hash);
1479
1480 proc_list_unlock();
1481
1482 ttyp = SESSION_TP(pgrp->pg_session);
1483 if (ttyp != TTY_NULL) {
1484 if (ttyp->t_pgrp == pgrp) {
1485 tty_lock(ttyp);
1486 /* Re-check after acquiring the lock */
1487 if (ttyp->t_pgrp == pgrp) {
1488 ttyp->t_pgrp = NULL;
1489 pgrp->pg_session->s_ttypgrpid = NO_PID;
1490 }
1491 tty_unlock(ttyp);
1492 }
1493 }
1494
1495 proc_list_lock();
1496
1497 sessp = pgrp->pg_session;
1498 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1499 panic("pg_deleteref: manipulating refs of already terminating session");
1500 if (--sessp->s_count == 0) {
1501 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1502 panic("pg_deleteref: terminating already terminated session");
1503 sessp->s_listflags |= S_LIST_TERM;
1504 ttyp = SESSION_TP(sessp);
1505 LIST_REMOVE(sessp, s_hash);
1506 proc_list_unlock();
1507 if (ttyp != TTY_NULL) {
1508 tty_lock(ttyp);
1509 if (ttyp->t_session == sessp)
1510 ttyp->t_session = NULL;
1511 tty_unlock(ttyp);
1512 }
1513 proc_list_lock();
1514 sessp->s_listflags |= S_LIST_DEAD;
1515 if (sessp->s_count != 0)
1516 panic("pg_deleteref: freeing session in use");
1517 proc_list_unlock();
1518#if CONFIG_FINE_LOCK_GROUPS
1519 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1520#else
1521 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1522#endif
1523 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1524 } else
1525 proc_list_unlock();
1526#if CONFIG_FINE_LOCK_GROUPS
1527 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1528#else
1529 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1530#endif
1531 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1532}
1533
1534
1535/*
1536 * Adjust pgrp jobc counters when specified process changes process group.
1537 * We count the number of processes in each process group that "qualify"
1538 * the group for terminal job control (those with a parent in a different
1539 * process group of the same session). If that count reaches zero, the
1540 * process group becomes orphaned. Check both the specified process'
1541 * process group and that of its children.
1542 * entering == 0 => p is leaving specified group.
1543 * entering == 1 => p is entering specified group.
1544 */
1545int
1546fixjob_callback(proc_t p, void * arg)
1547{
1548 struct fixjob_iterargs *fp;
1549 struct pgrp * pg, *hispg;
1550 struct session * mysession, *hissess;
1551 int entering;
1552
1553 fp = (struct fixjob_iterargs *)arg;
1554 pg = fp->pg;
1555 mysession = fp->mysession;
1556 entering = fp->entering;
1557
1558 hispg = proc_pgrp(p);
1559 hissess = proc_session(p);
1560
1561 if ((hispg != pg) &&
1562 (hissess == mysession)) {
1563 pgrp_lock(hispg);
1564 if (entering) {
1565 hispg->pg_jobc++;
1566 pgrp_unlock(hispg);
1567 } else if (--hispg->pg_jobc == 0) {
1568 pgrp_unlock(hispg);
1569 orphanpg(hispg);
1570 } else
1571 pgrp_unlock(hispg);
1572 }
1573 if (hissess != SESSION_NULL)
1574 session_rele(hissess);
1575 if (hispg != PGRP_NULL)
1576 pg_rele(hispg);
1577
1578 return(PROC_RETURNED);
1579}
1580
1581void
1582fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1583{
1584 struct pgrp *hispgrp = PGRP_NULL;
1585 struct session *hissess = SESSION_NULL;
1586 struct session *mysession = pgrp->pg_session;
1587 proc_t parent;
1588 struct fixjob_iterargs fjarg;
1589 boolean_t proc_parent_self;
1590
1591 /*
1592 * Check if p's parent is current proc, if yes then no need to take
1593 * a ref; calling proc_parent with current proc as parent may
1594 * deadlock if current proc is exiting.
1595 */
1596 proc_parent_self = proc_parent_is_currentproc(p);
1597 if (proc_parent_self)
1598 parent = current_proc();
1599 else
1600 parent = proc_parent(p);
1601
1602 if (parent != PROC_NULL) {
1603 hispgrp = proc_pgrp(parent);
1604 hissess = proc_session(parent);
1605 if (!proc_parent_self)
1606 proc_rele(parent);
1607 }
1608
1609
1610 /*
1611 * Check p's parent to see whether p qualifies its own process
1612 * group; if so, adjust count for p's process group.
1613 */
1614 if ((hispgrp != pgrp) &&
1615 (hissess == mysession)) {
1616 pgrp_lock(pgrp);
1617 if (entering) {
1618 pgrp->pg_jobc++;
1619 pgrp_unlock(pgrp);
1620 }else if (--pgrp->pg_jobc == 0) {
1621 pgrp_unlock(pgrp);
1622 orphanpg(pgrp);
1623 } else
1624 pgrp_unlock(pgrp);
1625 }
1626
1627 if (hissess != SESSION_NULL)
1628 session_rele(hissess);
1629 if (hispgrp != PGRP_NULL)
1630 pg_rele(hispgrp);
1631
1632 /*
1633 * Check this process' children to see whether they qualify
1634 * their process groups; if so, adjust counts for children's
1635 * process groups.
1636 */
1637 fjarg.pg = pgrp;
1638 fjarg.mysession = mysession;
1639 fjarg.entering = entering;
1640 proc_childrenwalk(p, fixjob_callback, &fjarg);
1641}
1642
1643/*
1644 * A process group has become orphaned; if there are any stopped processes in
1645 * the group, hang-up all process in that group.
1646 */
1647static void
1648orphanpg(struct pgrp *pgrp)
1649{
1650 pid_t *pid_list;
1651 proc_t p;
1652 vm_size_t pid_list_size = 0;
1653 vm_size_t pid_list_size_needed = 0;
1654 int pid_count = 0;
1655 int pid_count_available = 0;
1656
1657 assert(pgrp != NULL);
1658
1659 /* allocate outside of the pgrp_lock */
1660 for (;;) {
1661 pgrp_lock(pgrp);
1662
1663 boolean_t should_iterate = FALSE;
1664 pid_count_available = 0;
1665
1666 PGMEMBERS_FOREACH(pgrp, p) {
1667 pid_count_available++;
1668
1669 if (p->p_stat == SSTOP) {
1670 should_iterate = TRUE;
1671 }
1672 }
1673
1674 if (pid_count_available == 0 || !should_iterate) {
1675 pgrp_unlock(pgrp);
1676 return;
1677 }
1678
1679 pid_list_size_needed = pid_count_available * sizeof(pid_t);
1680 if (pid_list_size >= pid_list_size_needed) {
1681 break;
1682 }
1683 pgrp_unlock(pgrp);
1684
1685 if (pid_list_size != 0) {
1686 kfree(pid_list, pid_list_size);
1687 }
1688 pid_list = kalloc(pid_list_size_needed);
1689 if (!pid_list) {
1690 return;
1691 }
1692 pid_list_size = pid_list_size_needed;
1693 }
1694
1695 /* no orphaned processes */
1696 if (pid_list_size == 0) {
1697 pgrp_unlock(pgrp);
1698 return;
1699 }
1700
1701 PGMEMBERS_FOREACH(pgrp, p) {
1702 pid_list[pid_count++] = proc_pid(p);
1703 if (pid_count >= pid_count_available) {
1704 break;
1705 }
1706 }
1707 pgrp_unlock(pgrp);
1708
1709 if (pid_count == 0) {
1710 goto out;
1711 }
1712
1713 for (int i = 0; i < pid_count; i++) {
1714 /* do not handle kernproc */
1715 if (pid_list[i] == 0) {
1716 continue;
1717 }
1718 p = proc_find(pid_list[i]);
1719 if (!p) {
1720 continue;
1721 }
1722
1723 proc_transwait(p, 0);
1724 pt_setrunnable(p);
1725 psignal(p, SIGHUP);
1726 psignal(p, SIGCONT);
1727 proc_rele(p);
1728 }
1729
1730out:
1731 kfree(pid_list, pid_list_size);
1732 return;
1733}
1734
1735int
1736proc_is_classic(proc_t p __unused)
1737{
1738 return (0);
1739}
1740
1741/* XXX Why does this function exist? Need to kill it off... */
1742proc_t
1743current_proc_EXTERNAL(void)
1744{
1745 return (current_proc());
1746}
1747
1748int
1749proc_is_forcing_hfs_case_sensitivity(proc_t p)
1750{
1751 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1752}
1753
1754#if CONFIG_COREDUMP
1755/*
1756 * proc_core_name(name, uid, pid)
1757 * Expand the name described in corefilename, using name, uid, and pid.
1758 * corefilename is a printf-like string, with three format specifiers:
1759 * %N name of process ("name")
1760 * %P process id (pid)
1761 * %U user id (uid)
1762 * For example, "%N.core" is the default; they can be disabled completely
1763 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1764 * This is controlled by the sysctl variable kern.corefile (see above).
1765 */
1766__private_extern__ int
1767proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1768 size_t cf_name_len)
1769{
1770 const char *format, *appendstr;
1771 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1772 size_t i, l, n;
1773
1774 if (cf_name == NULL)
1775 goto toolong;
1776
1777 format = corefilename;
1778 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1779 switch (format[i]) {
1780 case '%': /* Format character */
1781 i++;
1782 switch (format[i]) {
1783 case '%':
1784 appendstr = "%";
1785 break;
1786 case 'N': /* process name */
1787 appendstr = name;
1788 break;
1789 case 'P': /* process id */
1790 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1791 appendstr = id_buf;
1792 break;
1793 case 'U': /* user id */
1794 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1795 appendstr = id_buf;
1796 break;
1797 case '\0': /* format string ended in % symbol */
1798 goto endofstring;
1799 default:
1800 appendstr = "";
1801 log(LOG_ERR,
1802 "Unknown format character %c in `%s'\n",
1803 format[i], format);
1804 }
1805 l = strlen(appendstr);
1806 if ((n + l) >= cf_name_len)
1807 goto toolong;
1808 bcopy(appendstr, cf_name + n, l);
1809 n += l;
1810 break;
1811 default:
1812 cf_name[n++] = format[i];
1813 }
1814 }
1815 if (format[i] != '\0')
1816 goto toolong;
1817 return (0);
1818toolong:
1819 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1820 (long)pid, name, (uint32_t)uid);
1821 return (1);
1822endofstring:
1823 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
1824 (long)pid, name, (uint32_t)uid);
1825 return (1);
1826}
1827#endif /* CONFIG_COREDUMP */
1828
1829/* Code Signing related routines */
1830
1831int
1832csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1833{
1834 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1835 uap->usersize, USER_ADDR_NULL));
1836}
1837
1838int
1839csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1840{
1841 if (uap->uaudittoken == USER_ADDR_NULL)
1842 return(EINVAL);
1843 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1844 uap->usersize, uap->uaudittoken));
1845}
1846
1847static int
1848csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1849{
1850 char fakeheader[8] = { 0 };
1851 int error;
1852
1853 if (usize < sizeof(fakeheader))
1854 return ERANGE;
1855
1856 /* if no blob, fill in zero header */
1857 if (NULL == start) {
1858 start = fakeheader;
1859 length = sizeof(fakeheader);
1860 } else if (usize < length) {
1861 /* ... if input too short, copy out length of entitlement */
1862 uint32_t length32 = htonl((uint32_t)length);
1863 memcpy(&fakeheader[4], &length32, sizeof(length32));
1864
1865 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1866 if (error == 0)
1867 return ERANGE; /* input buffer to short, ERANGE signals that */
1868 return error;
1869 }
1870 return copyout(start, uaddr, length);
1871}
1872
1873static int
1874csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1875{
1876 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1877 proc_t pt;
1878 int forself;
1879 int error;
1880 vnode_t tvp;
1881 off_t toff;
1882 unsigned char cdhash[SHA1_RESULTLEN];
1883 audit_token_t token;
1884 unsigned int upid=0, uidversion = 0;
1885
1886 forself = error = 0;
1887
1888 if (pid == 0)
1889 pid = proc_selfpid();
1890 if (pid == proc_selfpid())
1891 forself = 1;
1892
1893
1894 switch (ops) {
1895 case CS_OPS_STATUS:
1896 case CS_OPS_CDHASH:
1897 case CS_OPS_PIDOFFSET:
1898 case CS_OPS_ENTITLEMENTS_BLOB:
1899 case CS_OPS_IDENTITY:
1900 case CS_OPS_BLOB:
1901 break; /* not restricted to root */
1902 default:
1903 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1904 return(EPERM);
1905 break;
1906 }
1907
1908 pt = proc_find(pid);
1909 if (pt == PROC_NULL)
1910 return(ESRCH);
1911
1912 upid = pt->p_pid;
1913 uidversion = pt->p_idversion;
1914 if (uaudittoken != USER_ADDR_NULL) {
1915
1916 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1917 if (error != 0)
1918 goto out;
1919 /* verify the audit token pid/idversion matches with proc */
1920 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1921 error = ESRCH;
1922 goto out;
1923 }
1924 }
1925
1926#if CONFIG_MACF
1927 switch (ops) {
1928 case CS_OPS_MARKINVALID:
1929 case CS_OPS_MARKHARD:
1930 case CS_OPS_MARKKILL:
1931 case CS_OPS_MARKRESTRICT:
1932 case CS_OPS_SET_STATUS:
1933 case CS_OPS_CLEARINSTALLER:
1934 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
1935 goto out;
1936 break;
1937 default:
1938 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
1939 goto out;
1940 }
1941#endif
1942
1943 switch (ops) {
1944
1945 case CS_OPS_STATUS: {
1946 uint32_t retflags;
1947
1948 proc_lock(pt);
1949 retflags = pt->p_csflags;
1950 if (cs_enforcement(pt))
1951 retflags |= CS_ENFORCEMENT;
1952 if (csproc_get_platform_binary(pt))
1953 retflags |= CS_PLATFORM_BINARY;
1954 if (csproc_get_platform_path(pt))
1955 retflags |= CS_PLATFORM_PATH;
1956 proc_unlock(pt);
1957
1958 if (uaddr != USER_ADDR_NULL)
1959 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1960 break;
1961 }
1962 case CS_OPS_MARKINVALID:
1963 proc_lock(pt);
1964 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1965 pt->p_csflags &= ~CS_VALID; /* set invalid */
1966 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1967 pt->p_csflags |= CS_KILLED;
1968 proc_unlock(pt);
1969 if (cs_debug) {
1970 printf("CODE SIGNING: marked invalid by pid %d: "
1971 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1972 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
1973 }
1974 psignal(pt, SIGKILL);
1975 } else
1976 proc_unlock(pt);
1977 } else
1978 proc_unlock(pt);
1979
1980 break;
1981
1982 case CS_OPS_MARKHARD:
1983 proc_lock(pt);
1984 pt->p_csflags |= CS_HARD;
1985 if ((pt->p_csflags & CS_VALID) == 0) {
1986 /* @@@ allow? reject? kill? @@@ */
1987 proc_unlock(pt);
1988 error = EINVAL;
1989 goto out;
1990 } else
1991 proc_unlock(pt);
1992 break;
1993
1994 case CS_OPS_MARKKILL:
1995 proc_lock(pt);
1996 pt->p_csflags |= CS_KILL;
1997 if ((pt->p_csflags & CS_VALID) == 0) {
1998 proc_unlock(pt);
1999 psignal(pt, SIGKILL);
2000 } else
2001 proc_unlock(pt);
2002 break;
2003
2004 case CS_OPS_PIDOFFSET:
2005 toff = pt->p_textoff;
2006 proc_rele(pt);
2007 error = copyout(&toff, uaddr, sizeof(toff));
2008 return(error);
2009
2010 case CS_OPS_CDHASH:
2011
2012 /* pt already holds a reference on its p_textvp */
2013 tvp = pt->p_textvp;
2014 toff = pt->p_textoff;
2015
2016 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2017 proc_rele(pt);
2018 return EINVAL;
2019 }
2020
2021 error = vn_getcdhash(tvp, toff, cdhash);
2022 proc_rele(pt);
2023
2024 if (error == 0) {
2025 error = copyout(cdhash, uaddr, sizeof (cdhash));
2026 }
2027
2028 return error;
2029
2030 case CS_OPS_ENTITLEMENTS_BLOB: {
2031 void *start;
2032 size_t length;
2033
2034 proc_lock(pt);
2035
2036 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2037 proc_unlock(pt);
2038 error = EINVAL;
2039 break;
2040 }
2041
2042 error = cs_entitlements_blob_get(pt, &start, &length);
2043 proc_unlock(pt);
2044 if (error)
2045 break;
2046
2047 error = csops_copy_token(start, length, usize, uaddr);
2048 break;
2049 }
2050 case CS_OPS_MARKRESTRICT:
2051 proc_lock(pt);
2052 pt->p_csflags |= CS_RESTRICT;
2053 proc_unlock(pt);
2054 break;
2055
2056 case CS_OPS_SET_STATUS: {
2057 uint32_t flags;
2058
2059 if (usize < sizeof(flags)) {
2060 error = ERANGE;
2061 break;
2062 }
2063
2064 error = copyin(uaddr, &flags, sizeof(flags));
2065 if (error)
2066 break;
2067
2068 /* only allow setting a subset of all code sign flags */
2069 flags &=
2070 CS_HARD | CS_EXEC_SET_HARD |
2071 CS_KILL | CS_EXEC_SET_KILL |
2072 CS_RESTRICT |
2073 CS_REQUIRE_LV |
2074 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2075
2076 proc_lock(pt);
2077 if (pt->p_csflags & CS_VALID)
2078 pt->p_csflags |= flags;
2079 else
2080 error = EINVAL;
2081 proc_unlock(pt);
2082
2083 break;
2084 }
2085 case CS_OPS_BLOB: {
2086 void *start;
2087 size_t length;
2088
2089 proc_lock(pt);
2090 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2091 proc_unlock(pt);
2092 error = EINVAL;
2093 break;
2094 }
2095
2096 error = cs_blob_get(pt, &start, &length);
2097 proc_unlock(pt);
2098 if (error)
2099 break;
2100
2101 error = csops_copy_token(start, length, usize, uaddr);
2102 break;
2103 }
2104 case CS_OPS_IDENTITY: {
2105 const char *identity;
2106 uint8_t fakeheader[8];
2107 uint32_t idlen;
2108 size_t length;
2109
2110 /*
2111 * Make identity have a blob header to make it
2112 * easier on userland to guess the identity
2113 * length.
2114 */
2115 if (usize < sizeof(fakeheader)) {
2116 error = ERANGE;
2117 break;
2118 }
2119 memset(fakeheader, 0, sizeof(fakeheader));
2120
2121 proc_lock(pt);
2122 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2123 proc_unlock(pt);
2124 error = EINVAL;
2125 break;
2126 }
2127
2128 identity = cs_identity_get(pt);
2129 proc_unlock(pt);
2130 if (identity == NULL) {
2131 error = ENOENT;
2132 break;
2133 }
2134
2135 length = strlen(identity) + 1; /* include NUL */
2136 idlen = htonl(length + sizeof(fakeheader));
2137 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2138
2139 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2140 if (error)
2141 break;
2142
2143 if (usize < sizeof(fakeheader) + length)
2144 error = ERANGE;
2145 else if (usize > sizeof(fakeheader))
2146 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2147
2148 break;
2149 }
2150
2151 case CS_OPS_CLEARINSTALLER:
2152 proc_lock(pt);
2153 pt->p_csflags &= ~(CS_INSTALLER | CS_EXEC_SET_INSTALLER);
2154 proc_unlock(pt);
2155 break;
2156
2157 default:
2158 error = EINVAL;
2159 break;
2160 }
2161out:
2162 proc_rele(pt);
2163 return(error);
2164}
2165
2166int
2167proc_iterate(
2168 unsigned int flags,
2169 proc_iterate_fn_t callout,
2170 void *arg,
2171 proc_iterate_fn_t filterfn,
2172 void *filterarg)
2173{
2174 pid_t *pid_list;
2175 vm_size_t pid_list_size = 0;
2176 vm_size_t pid_list_size_needed = 0;
2177 int pid_count = 0;
2178 int pid_count_available = 0;
2179
2180 assert(callout != NULL);
2181
2182 /* allocate outside of the proc_list_lock */
2183 for (;;) {
2184 proc_list_lock();
2185
2186 pid_count_available = nprocs;
2187 assert(pid_count_available > 0);
2188
2189 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2190 if (pid_list_size >= pid_list_size_needed) {
2191 break;
2192 }
2193 proc_list_unlock();
2194
2195 if (pid_list_size != 0) {
2196 kfree(pid_list, pid_list_size);
2197 }
2198 pid_list = kalloc(pid_list_size_needed);
2199 if (!pid_list) {
2200 return 1;
2201 }
2202 pid_list_size = pid_list_size_needed;
2203 }
2204
2205 /* filter pids into pid_list */
2206
2207 if (flags & PROC_ALLPROCLIST) {
2208 proc_t p;
2209 ALLPROC_FOREACH(p) {
2210 /* ignore processes that are being forked */
2211 if (p->p_stat == SIDL) {
2212 continue;
2213 }
2214 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2215 continue;
2216 }
2217
2218 pid_list[pid_count++] = proc_pid(p);
2219 if (pid_count >= pid_count_available) {
2220 break;
2221 }
2222 }
2223 }
2224
2225 if ((pid_count < pid_count_available) &&
2226 (flags & PROC_ZOMBPROCLIST))
2227 {
2228 proc_t p;
2229 ZOMBPROC_FOREACH(p) {
2230 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2231 continue;
2232 }
2233
2234 pid_list[pid_count++] = proc_pid(p);
2235 if (pid_count >= pid_count_available) {
2236 break;
2237 }
2238 }
2239 }
2240
2241 proc_list_unlock();
2242
2243 /* call callout on processes in the pid_list */
2244
2245 for (int i = 0; i < pid_count; i++) {
2246 proc_t p = proc_find(pid_list[i]);
2247 if (p) {
2248 if ((flags & PROC_NOWAITTRANS) == 0) {
2249 proc_transwait(p, 0);
2250 }
2251 int callout_ret = callout(p, arg);
2252
2253 switch (callout_ret) {
2254 case PROC_RETURNED_DONE:
2255 proc_rele(p);
2256 /* FALLTHROUGH */
2257 case PROC_CLAIMED_DONE:
2258 goto out;
2259
2260 case PROC_RETURNED:
2261 proc_rele(p);
2262 /* FALLTHROUGH */
2263 case PROC_CLAIMED:
2264 break;
2265
2266 default:
2267 panic("proc_iterate: callout returned %d for pid %d",
2268 callout_ret, pid_list[i]);
2269 break;
2270 }
2271 } else if (flags & PROC_ZOMBPROCLIST) {
2272 p = proc_find_zombref(pid_list[i]);
2273 if (!p) {
2274 continue;
2275 }
2276 int callout_ret = callout(p, arg);
2277
2278 switch (callout_ret) {
2279 case PROC_RETURNED_DONE:
2280 proc_drop_zombref(p);
2281 /* FALLTHROUGH */
2282 case PROC_CLAIMED_DONE:
2283 goto out;
2284
2285 case PROC_RETURNED:
2286 proc_drop_zombref(p);
2287 /* FALLTHROUGH */
2288 case PROC_CLAIMED:
2289 break;
2290
2291 default:
2292 panic("proc_iterate: callout returned %d for zombie pid %d",
2293 callout_ret, pid_list[i]);
2294 break;
2295 }
2296 }
2297 }
2298
2299out:
2300 kfree(pid_list, pid_list_size);
2301 return 0;
2302
2303}
2304
2305void
2306proc_rebootscan(
2307 proc_iterate_fn_t callout,
2308 void *arg,
2309 proc_iterate_fn_t filterfn,
2310 void *filterarg)
2311{
2312 proc_t p;
2313
2314 assert(callout != NULL);
2315
2316 proc_shutdown_exitcount = 0;
2317
2318restart_foreach:
2319
2320 proc_list_lock();
2321
2322 ALLPROC_FOREACH(p) {
2323 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2324 continue;
2325 }
2326 p = proc_ref_locked(p);
2327 if (!p) {
2328 continue;
2329 }
2330
2331 proc_list_unlock();
2332
2333 proc_transwait(p, 0);
2334 (void)callout(p, arg);
2335 proc_rele(p);
2336
2337 goto restart_foreach;
2338 }
2339
2340 proc_list_unlock();
2341}
2342
2343int
2344proc_childrenwalk(
2345 proc_t parent,
2346 proc_iterate_fn_t callout,
2347 void *arg)
2348{
2349 pid_t *pid_list;
2350 vm_size_t pid_list_size = 0;
2351 vm_size_t pid_list_size_needed = 0;
2352 int pid_count = 0;
2353 int pid_count_available = 0;
2354
2355 assert(parent != NULL);
2356 assert(callout != NULL);
2357
2358 for (;;) {
2359 proc_list_lock();
2360
2361 pid_count_available = parent->p_childrencnt;
2362 if (pid_count_available == 0) {
2363 proc_list_unlock();
2364 return 0;
2365 }
2366
2367 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2368 if (pid_list_size >= pid_list_size_needed) {
2369 break;
2370 }
2371 proc_list_unlock();
2372
2373 if (pid_list_size != 0) {
2374 kfree(pid_list, pid_list_size);
2375 }
2376 pid_list = kalloc(pid_list_size_needed);
2377 if (!pid_list) {
2378 return 1;
2379 }
2380 pid_list_size = pid_list_size_needed;
2381 }
2382
2383 proc_t p;
2384 PCHILDREN_FOREACH(parent, p) {
2385 if (p->p_stat == SIDL) {
2386 continue;
2387 }
2388
2389 pid_list[pid_count++] = proc_pid(p);
2390 if (pid_count >= pid_count_available) {
2391 break;
2392 }
2393 }
2394
2395 proc_list_unlock();
2396
2397 for (int i = 0; i < pid_count; i++) {
2398 p = proc_find(pid_list[i]);
2399 if (!p) {
2400 continue;
2401 }
2402
2403 int callout_ret = callout(p, arg);
2404
2405 switch (callout_ret) {
2406 case PROC_RETURNED_DONE:
2407 proc_rele(p);
2408 /* FALLTHROUGH */
2409 case PROC_CLAIMED_DONE:
2410 goto out;
2411
2412 case PROC_RETURNED:
2413 proc_rele(p);
2414 /* FALLTHROUGH */
2415 case PROC_CLAIMED:
2416 break;
2417 default:
2418 panic("proc_childrenwalk: callout returned %d for pid %d",
2419 callout_ret, pid_list[i]);
2420 break;
2421 }
2422 }
2423
2424out:
2425 kfree(pid_list, pid_list_size);
2426 return 0;
2427}
2428
2429int
2430pgrp_iterate(
2431 struct pgrp *pgrp,
2432 unsigned int flags,
2433 proc_iterate_fn_t callout,
2434 void * arg,
2435 proc_iterate_fn_t filterfn,
2436 void * filterarg)
2437{
2438 pid_t *pid_list;
2439 proc_t p;
2440 vm_size_t pid_list_size = 0;
2441 vm_size_t pid_list_size_needed = 0;
2442 int pid_count = 0;
2443 int pid_count_available = 0;
2444
2445 pid_t pgid;
2446
2447 assert(pgrp != NULL);
2448 assert(callout != NULL);
2449
2450 for (;;) {
2451 pgrp_lock(pgrp);
2452
2453 pid_count_available = pgrp->pg_membercnt;
2454 if (pid_count_available == 0) {
2455 pgrp_unlock(pgrp);
2456 return 0;
2457 }
2458
2459 pid_list_size_needed = pid_count_available * sizeof(pid_t);
2460 if (pid_list_size >= pid_list_size_needed) {
2461 break;
2462 }
2463 pgrp_unlock(pgrp);
2464
2465 if (pid_list_size != 0) {
2466 kfree(pid_list, pid_list_size);
2467 }
2468 pid_list = kalloc(pid_list_size_needed);
2469 if (!pid_list) {
2470 return 1;
2471 }
2472 pid_list_size = pid_list_size_needed;
2473 }
2474
2475 pgid = pgrp->pg_id;
2476
2477 PGMEMBERS_FOREACH(pgrp, p) {
2478 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2479 continue;;
2480 }
2481 pid_list[pid_count++] = proc_pid(p);
2482 if (pid_count >= pid_count_available) {
2483 break;
2484 }
2485 }
2486
2487 pgrp_unlock(pgrp);
2488
2489 if (flags & PGRP_DROPREF) {
2490 pg_rele(pgrp);
2491 }
2492
2493 for (int i = 0; i< pid_count; i++) {
2494 /* do not handle kernproc */
2495 if (pid_list[i] == 0) {
2496 continue;
2497 }
2498 p = proc_find(pid_list[i]);
2499 if (!p) {
2500 continue;
2501 }
2502 if (p->p_pgrpid != pgid) {
2503 proc_rele(p);
2504 continue;
2505 }
2506
2507 int callout_ret = callout(p, arg);
2508
2509 switch (callout_ret) {
2510 case PROC_RETURNED:
2511 proc_rele(p);
2512 /* FALLTHROUGH */
2513 case PROC_CLAIMED:
2514 break;
2515
2516 case PROC_RETURNED_DONE:
2517 proc_rele(p);
2518 /* FALLTHROUGH */
2519 case PROC_CLAIMED_DONE:
2520 goto out;
2521
2522 default:
2523 panic("pgrp_iterate: callout returned %d for pid %d",
2524 callout_ret, pid_list[i]);
2525 }
2526 }
2527
2528out:
2529 kfree(pid_list, pid_list_size);
2530 return 0;
2531}
2532
2533static void
2534pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2535{
2536 proc_list_lock();
2537 child->p_pgrp = pgrp;
2538 child->p_pgrpid = pgrp->pg_id;
2539 child->p_listflag |= P_LIST_INPGRP;
2540 /*
2541 * When pgrp is being freed , a process can still
2542 * request addition using setpgid from bash when
2543 * login is terminated (login cycler) return ESRCH
2544 * Safe to hold lock due to refcount on pgrp
2545 */
2546 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2547 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2548 }
2549
2550 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2551 panic("pgrp_add : pgrp is dead adding process");
2552 proc_list_unlock();
2553
2554 pgrp_lock(pgrp);
2555 pgrp->pg_membercnt++;
2556 if ( parent != PROC_NULL) {
2557 LIST_INSERT_AFTER(parent, child, p_pglist);
2558 }else {
2559 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2560 }
2561 pgrp_unlock(pgrp);
2562
2563 proc_list_lock();
2564 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2565 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2566 }
2567 proc_list_unlock();
2568}
2569
2570static void
2571pgrp_remove(struct proc * p)
2572{
2573 struct pgrp * pg;
2574
2575 pg = proc_pgrp(p);
2576
2577 proc_list_lock();
2578#if __PROC_INTERNAL_DEBUG
2579 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2580 panic("removing from pglist but no named ref\n");
2581#endif
2582 p->p_pgrpid = PGRPID_DEAD;
2583 p->p_listflag &= ~P_LIST_INPGRP;
2584 p->p_pgrp = NULL;
2585 proc_list_unlock();
2586
2587 if (pg == PGRP_NULL)
2588 panic("pgrp_remove: pg is NULL");
2589 pgrp_lock(pg);
2590 pg->pg_membercnt--;
2591
2592 if (pg->pg_membercnt < 0)
2593 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2594
2595 LIST_REMOVE(p, p_pglist);
2596 if (pg->pg_members.lh_first == 0) {
2597 pgrp_unlock(pg);
2598 pgdelete_dropref(pg);
2599 } else {
2600 pgrp_unlock(pg);
2601 pg_rele(pg);
2602 }
2603}
2604
2605
2606/* cannot use proc_pgrp as it maybe stalled */
2607static void
2608pgrp_replace(struct proc * p, struct pgrp * newpg)
2609{
2610 struct pgrp * oldpg;
2611
2612
2613
2614 proc_list_lock();
2615
2616 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2617 p->p_listflag |= P_LIST_PGRPTRWAIT;
2618 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2619 }
2620
2621 p->p_listflag |= P_LIST_PGRPTRANS;
2622
2623 oldpg = p->p_pgrp;
2624 if (oldpg == PGRP_NULL)
2625 panic("pgrp_replace: oldpg NULL");
2626 oldpg->pg_refcount++;
2627#if __PROC_INTERNAL_DEBUG
2628 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2629 panic("removing from pglist but no named ref\n");
2630#endif
2631 p->p_pgrpid = PGRPID_DEAD;
2632 p->p_listflag &= ~P_LIST_INPGRP;
2633 p->p_pgrp = NULL;
2634
2635 proc_list_unlock();
2636
2637 pgrp_lock(oldpg);
2638 oldpg->pg_membercnt--;
2639 if (oldpg->pg_membercnt < 0)
2640 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2641 LIST_REMOVE(p, p_pglist);
2642 if (oldpg->pg_members.lh_first == 0) {
2643 pgrp_unlock(oldpg);
2644 pgdelete_dropref(oldpg);
2645 } else {
2646 pgrp_unlock(oldpg);
2647 pg_rele(oldpg);
2648 }
2649
2650 proc_list_lock();
2651 p->p_pgrp = newpg;
2652 p->p_pgrpid = newpg->pg_id;
2653 p->p_listflag |= P_LIST_INPGRP;
2654 /*
2655 * When pgrp is being freed , a process can still
2656 * request addition using setpgid from bash when
2657 * login is terminated (login cycler) return ESRCH
2658 * Safe to hold lock due to refcount on pgrp
2659 */
2660 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2661 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2662 }
2663
2664 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2665 panic("pgrp_add : pgrp is dead adding process");
2666 proc_list_unlock();
2667
2668 pgrp_lock(newpg);
2669 newpg->pg_membercnt++;
2670 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2671 pgrp_unlock(newpg);
2672
2673 proc_list_lock();
2674 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2675 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2676 }
2677
2678 p->p_listflag &= ~P_LIST_PGRPTRANS;
2679 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2680 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2681 wakeup(&p->p_pgrpid);
2682
2683 }
2684 proc_list_unlock();
2685}
2686
2687void
2688pgrp_lock(struct pgrp * pgrp)
2689{
2690 lck_mtx_lock(&pgrp->pg_mlock);
2691}
2692
2693void
2694pgrp_unlock(struct pgrp * pgrp)
2695{
2696 lck_mtx_unlock(&pgrp->pg_mlock);
2697}
2698
2699void
2700session_lock(struct session * sess)
2701{
2702 lck_mtx_lock(&sess->s_mlock);
2703}
2704
2705
2706void
2707session_unlock(struct session * sess)
2708{
2709 lck_mtx_unlock(&sess->s_mlock);
2710}
2711
2712struct pgrp *
2713proc_pgrp(proc_t p)
2714{
2715 struct pgrp * pgrp;
2716
2717 if (p == PROC_NULL)
2718 return(PGRP_NULL);
2719 proc_list_lock();
2720
2721 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2722 p->p_listflag |= P_LIST_PGRPTRWAIT;
2723 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2724 }
2725
2726 pgrp = p->p_pgrp;
2727
2728 assert(pgrp != NULL);
2729
2730 if (pgrp != PGRP_NULL) {
2731 pgrp->pg_refcount++;
2732 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2733 panic("proc_pgrp: ref being povided for dead pgrp");
2734 }
2735
2736 proc_list_unlock();
2737
2738 return(pgrp);
2739}
2740
2741struct pgrp *
2742tty_pgrp(struct tty * tp)
2743{
2744 struct pgrp * pg = PGRP_NULL;
2745
2746 proc_list_lock();
2747 pg = tp->t_pgrp;
2748
2749 if (pg != PGRP_NULL) {
2750 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2751 panic("tty_pgrp: ref being povided for dead pgrp");
2752 pg->pg_refcount++;
2753 }
2754 proc_list_unlock();
2755
2756 return(pg);
2757}
2758
2759struct session *
2760proc_session(proc_t p)
2761{
2762 struct session * sess = SESSION_NULL;
2763
2764 if (p == PROC_NULL)
2765 return(SESSION_NULL);
2766
2767 proc_list_lock();
2768
2769 /* wait during transitions */
2770 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2771 p->p_listflag |= P_LIST_PGRPTRWAIT;
2772 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2773 }
2774
2775 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2776 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2777 panic("proc_session:returning sesssion ref on terminating session");
2778 sess->s_count++;
2779 }
2780 proc_list_unlock();
2781 return(sess);
2782}
2783
2784void
2785session_rele(struct session *sess)
2786{
2787 proc_list_lock();
2788 if (--sess->s_count == 0) {
2789 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2790 panic("session_rele: terminating already terminated session");
2791 sess->s_listflags |= S_LIST_TERM;
2792 LIST_REMOVE(sess, s_hash);
2793 sess->s_listflags |= S_LIST_DEAD;
2794 if (sess->s_count != 0)
2795 panic("session_rele: freeing session in use");
2796 proc_list_unlock();
2797#if CONFIG_FINE_LOCK_GROUPS
2798 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2799#else
2800 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2801#endif
2802 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2803 } else
2804 proc_list_unlock();
2805}
2806
2807int
2808proc_transstart(proc_t p, int locked, int non_blocking)
2809{
2810 if (locked == 0)
2811 proc_lock(p);
2812 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2813 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2814 if (locked == 0)
2815 proc_unlock(p);
2816 return EDEADLK;
2817 }
2818 p->p_lflag |= P_LTRANSWAIT;
2819 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2820 }
2821 p->p_lflag |= P_LINTRANSIT;
2822 p->p_transholder = current_thread();
2823 if (locked == 0)
2824 proc_unlock(p);
2825 return 0;
2826}
2827
2828void
2829proc_transcommit(proc_t p, int locked)
2830{
2831 if (locked == 0)
2832 proc_lock(p);
2833
2834 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2835 assert (p->p_transholder == current_thread());
2836 p->p_lflag |= P_LTRANSCOMMIT;
2837
2838 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2839 p->p_lflag &= ~P_LTRANSWAIT;
2840 wakeup(&p->p_lflag);
2841 }
2842 if (locked == 0)
2843 proc_unlock(p);
2844}
2845
2846void
2847proc_transend(proc_t p, int locked)
2848{
2849 if (locked == 0)
2850 proc_lock(p);
2851
2852 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2853 p->p_transholder = NULL;
2854
2855 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2856 p->p_lflag &= ~P_LTRANSWAIT;
2857 wakeup(&p->p_lflag);
2858 }
2859 if (locked == 0)
2860 proc_unlock(p);
2861}
2862
2863int
2864proc_transwait(proc_t p, int locked)
2865{
2866 if (locked == 0)
2867 proc_lock(p);
2868 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2869 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2870 if (locked == 0)
2871 proc_unlock(p);
2872 return EDEADLK;
2873 }
2874 p->p_lflag |= P_LTRANSWAIT;
2875 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2876 }
2877 if (locked == 0)
2878 proc_unlock(p);
2879 return 0;
2880}
2881
2882void
2883proc_klist_lock(void)
2884{
2885 lck_mtx_lock(proc_klist_mlock);
2886}
2887
2888void
2889proc_klist_unlock(void)
2890{
2891 lck_mtx_unlock(proc_klist_mlock);
2892}
2893
2894void
2895proc_knote(struct proc * p, long hint)
2896{
2897 proc_klist_lock();
2898 KNOTE(&p->p_klist, hint);
2899 proc_klist_unlock();
2900}
2901
2902void
2903proc_knote_drain(struct proc *p)
2904{
2905 struct knote *kn = NULL;
2906
2907 /*
2908 * Clear the proc's klist to avoid references after the proc is reaped.
2909 */
2910 proc_klist_lock();
2911 while ((kn = SLIST_FIRST(&p->p_klist))) {
2912 kn->kn_ptr.p_proc = PROC_NULL;
2913 KNOTE_DETACH(&p->p_klist, kn);
2914 }
2915 proc_klist_unlock();
2916}
2917
2918void
2919proc_setregister(proc_t p)
2920{
2921 proc_lock(p);
2922 p->p_lflag |= P_LREGISTER;
2923 proc_unlock(p);
2924}
2925
2926void
2927proc_resetregister(proc_t p)
2928{
2929 proc_lock(p);
2930 p->p_lflag &= ~P_LREGISTER;
2931 proc_unlock(p);
2932}
2933
2934pid_t
2935proc_pgrpid(proc_t p)
2936{
2937 return p->p_pgrpid;
2938}
2939
2940pid_t
2941proc_selfpgrpid()
2942{
2943 return current_proc()->p_pgrpid;
2944}
2945
2946
2947/* return control and action states */
2948int
2949proc_getpcontrol(int pid, int * pcontrolp)
2950{
2951 proc_t p;
2952
2953 p = proc_find(pid);
2954 if (p == PROC_NULL)
2955 return(ESRCH);
2956 if (pcontrolp != NULL)
2957 *pcontrolp = p->p_pcaction;
2958
2959 proc_rele(p);
2960 return(0);
2961}
2962
2963int
2964proc_dopcontrol(proc_t p)
2965{
2966 int pcontrol;
2967
2968 proc_lock(p);
2969
2970 pcontrol = PROC_CONTROL_STATE(p);
2971
2972 if (PROC_ACTION_STATE(p) == 0) {
2973 switch(pcontrol) {
2974 case P_PCTHROTTLE:
2975 PROC_SETACTION_STATE(p);
2976 proc_unlock(p);
2977 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2978 break;
2979
2980 case P_PCSUSP:
2981 PROC_SETACTION_STATE(p);
2982 proc_unlock(p);
2983 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2984 task_suspend(p->task);
2985 break;
2986
2987 case P_PCKILL:
2988 PROC_SETACTION_STATE(p);
2989 proc_unlock(p);
2990 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2991 psignal(p, SIGKILL);
2992 break;
2993
2994 default:
2995 proc_unlock(p);
2996 }
2997
2998 } else
2999 proc_unlock(p);
3000
3001 return(PROC_RETURNED);
3002}
3003
3004
3005/*
3006 * Resume a throttled or suspended process. This is an internal interface that's only
3007 * used by the user level code that presents the GUI when we run out of swap space and
3008 * hence is restricted to processes with superuser privileges.
3009 */
3010
3011int
3012proc_resetpcontrol(int pid)
3013{
3014 proc_t p;
3015 int pcontrol;
3016 int error;
3017 proc_t self = current_proc();
3018
3019 /* if the process has been validated to handle resource control or root is valid one */
3020 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
3021 return error;
3022
3023 p = proc_find(pid);
3024 if (p == PROC_NULL)
3025 return(ESRCH);
3026
3027 proc_lock(p);
3028
3029 pcontrol = PROC_CONTROL_STATE(p);
3030
3031 if(PROC_ACTION_STATE(p) !=0) {
3032 switch(pcontrol) {
3033 case P_PCTHROTTLE:
3034 PROC_RESETACTION_STATE(p);
3035 proc_unlock(p);
3036 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3037 break;
3038
3039 case P_PCSUSP:
3040 PROC_RESETACTION_STATE(p);
3041 proc_unlock(p);
3042 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3043 task_resume(p->task);
3044 break;
3045
3046 case P_PCKILL:
3047 /* Huh? */
3048 PROC_SETACTION_STATE(p);
3049 proc_unlock(p);
3050 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3051 break;
3052
3053 default:
3054 proc_unlock(p);
3055 }
3056
3057 } else
3058 proc_unlock(p);
3059
3060 proc_rele(p);
3061 return(0);
3062}
3063
3064
3065
3066struct no_paging_space
3067{
3068 uint64_t pcs_max_size;
3069 uint64_t pcs_uniqueid;
3070 int pcs_pid;
3071 int pcs_proc_count;
3072 uint64_t pcs_total_size;
3073
3074 uint64_t npcs_max_size;
3075 uint64_t npcs_uniqueid;
3076 int npcs_pid;
3077 int npcs_proc_count;
3078 uint64_t npcs_total_size;
3079
3080 int apcs_proc_count;
3081 uint64_t apcs_total_size;
3082};
3083
3084
3085static int
3086proc_pcontrol_filter(proc_t p, void *arg)
3087{
3088 struct no_paging_space *nps;
3089 uint64_t compressed;
3090
3091 nps = (struct no_paging_space *)arg;
3092
3093 compressed = get_task_compressed(p->task);
3094
3095 if (PROC_CONTROL_STATE(p)) {
3096 if (PROC_ACTION_STATE(p) == 0) {
3097 if (compressed > nps->pcs_max_size) {
3098 nps->pcs_pid = p->p_pid;
3099 nps->pcs_uniqueid = p->p_uniqueid;
3100 nps->pcs_max_size = compressed;
3101 }
3102 nps->pcs_total_size += compressed;
3103 nps->pcs_proc_count++;
3104 } else {
3105 nps->apcs_total_size += compressed;
3106 nps->apcs_proc_count++;
3107 }
3108 } else {
3109 if (compressed > nps->npcs_max_size) {
3110 nps->npcs_pid = p->p_pid;
3111 nps->npcs_uniqueid = p->p_uniqueid;
3112 nps->npcs_max_size = compressed;
3113 }
3114 nps->npcs_total_size += compressed;
3115 nps->npcs_proc_count++;
3116
3117 }
3118 return (0);
3119}
3120
3121
3122static int
3123proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3124{
3125 return(PROC_RETURNED);
3126}
3127
3128
3129/*
3130 * Deal with the low on compressor pool space condition... this function
3131 * gets called when we are approaching the limits of the compressor pool or
3132 * we are unable to create a new swap file.
3133 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3134 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3135 * There are 2 categories of processes to deal with. Those that have an action
3136 * associated with them by the task itself and those that do not. Actionable
3137 * tasks can have one of three categories specified: ones that
3138 * can be killed immediately, ones that should be suspended, and ones that should
3139 * be throttled. Processes that do not have an action associated with them are normally
3140 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3141 * that only by killing them can we hope to put the system back into a usable state.
3142 */
3143
3144#define NO_PAGING_SPACE_DEBUG 0
3145
3146extern uint64_t vm_compressor_pages_compressed(void);
3147
3148struct timeval last_no_space_action = {0, 0};
3149
3150int
3151no_paging_space_action()
3152{
3153 proc_t p;
3154 struct no_paging_space nps;
3155 struct timeval now;
3156
3157 /*
3158 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3159 */
3160 microtime(&now);
3161
3162 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3163 return (0);
3164
3165 /*
3166 * Examine all processes and find the biggest (biggest is based on the number of pages this
3167 * task has in the compressor pool) that has been marked to have some action
3168 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3169 * action.
3170 *
3171 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3172 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3173 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3174 */
3175 bzero(&nps, sizeof(nps));
3176
3177 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3178
3179#if NO_PAGING_SPACE_DEBUG
3180 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3181 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3182 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3183 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3184 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3185 nps.apcs_proc_count, nps.apcs_total_size);
3186#endif
3187 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3188 /*
3189 * for now we'll knock out any task that has more then 50% of the pages
3190 * held by the compressor
3191 */
3192 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3193
3194 if (nps.npcs_uniqueid == p->p_uniqueid) {
3195 /*
3196 * verify this is still the same process
3197 * in case the proc exited and the pid got reused while
3198 * we were finishing the proc_iterate and getting to this point
3199 */
3200 last_no_space_action = now;
3201
3202 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3203 psignal(p, SIGKILL);
3204
3205 proc_rele(p);
3206
3207 return (0);
3208 }
3209
3210 proc_rele(p);
3211 }
3212 }
3213
3214 if (nps.pcs_max_size > 0) {
3215 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3216
3217 if (nps.pcs_uniqueid == p->p_uniqueid) {
3218 /*
3219 * verify this is still the same process
3220 * in case the proc exited and the pid got reused while
3221 * we were finishing the proc_iterate and getting to this point
3222 */
3223 last_no_space_action = now;
3224
3225 proc_dopcontrol(p);
3226
3227 proc_rele(p);
3228
3229 return (1);
3230 }
3231
3232 proc_rele(p);
3233 }
3234 }
3235 last_no_space_action = now;
3236
3237 printf("low swap: unable to find any eligible processes to take action on\n");
3238
3239 return (0);
3240}
3241
3242int
3243proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3244{
3245 int ret = 0;
3246 proc_t target_proc = PROC_NULL;
3247 pid_t target_pid = uap->pid;
3248 uint64_t target_uniqueid = uap->uniqueid;
3249 task_t target_task = NULL;
3250
3251 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3252 ret = EPERM;
3253 goto out;
3254 }
3255 target_proc = proc_find(target_pid);
3256 if (target_proc != PROC_NULL) {
3257 if (target_uniqueid != proc_uniqueid(target_proc)) {
3258 ret = ENOENT;
3259 goto out;
3260 }
3261
3262 target_task = proc_task(target_proc);
3263 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3264 ret = EINVAL;
3265 goto out;
3266 }
3267 } else
3268 ret = ENOENT;
3269
3270out:
3271 if (target_proc != PROC_NULL)
3272 proc_rele(target_proc);
3273 return (ret);
3274}
3275
3276#if VM_SCAN_FOR_SHADOW_CHAIN
3277extern int vm_map_shadow_max(vm_map_t map);
3278int proc_shadow_max(void);
3279int proc_shadow_max(void)
3280{
3281 int retval, max;
3282 proc_t p;
3283 task_t task;
3284 vm_map_t map;
3285
3286 max = 0;
3287 proc_list_lock();
3288 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3289 if (p->p_stat == SIDL)
3290 continue;
3291 task = p->task;
3292 if (task == NULL) {
3293 continue;
3294 }
3295 map = get_task_map(task);
3296 if (map == NULL) {
3297 continue;
3298 }
3299 retval = vm_map_shadow_max(map);
3300 if (retval > max) {
3301 max = retval;
3302 }
3303 }
3304 proc_list_unlock();
3305 return max;
3306}
3307#endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3308
3309void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3310void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3311{
3312 if (target_proc != NULL) {
3313 target_proc->p_responsible_pid = responsible_pid;
3314 }
3315 return;
3316}
3317
3318int
3319proc_chrooted(proc_t p)
3320{
3321 int retval = 0;
3322
3323 if (p) {
3324 proc_fdlock(p);
3325 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3326 proc_fdunlock(p);
3327 }
3328
3329 return retval;
3330}
3331
3332void *
3333proc_get_uthread_uu_threadlist(void * uthread_v)
3334{
3335 uthread_t uth = (uthread_t)uthread_v;
3336 return (uth != NULL) ? uth->uu_threadlist : NULL;
3337}