]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
549024e9d0b0f4dcf9f5c6437a354d6fa6aa4283
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114
115 #if CONFIG_MEMORYSTATUS
116 #include <sys/kern_memorystatus.h>
117 #endif
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #include <libkern/crypto/sha1.h>
124
125 /*
126 * Structure associated with user cacheing.
127 */
128 struct uidinfo {
129 LIST_ENTRY(uidinfo) ui_hash;
130 uid_t ui_uid;
131 long ui_proccnt;
132 };
133 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
134 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
135 u_long uihash; /* size of hash table - 1 */
136
137 /*
138 * Other process lists
139 */
140 struct pidhashhead *pidhashtbl;
141 u_long pidhash;
142 struct pgrphashhead *pgrphashtbl;
143 u_long pgrphash;
144 struct sesshashhead *sesshashtbl;
145 u_long sesshash;
146
147 struct proclist allproc;
148 struct proclist zombproc;
149 extern struct tty cons;
150
151 extern int cs_debug;
152
153 #if DEBUG
154 #define __PROC_INTERNAL_DEBUG 1
155 #endif
156 /* Name to give to core files */
157 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
158
159 #if PROC_REF_DEBUG
160 extern uint32_t fastbacktrace(uintptr_t* bt, uint32_t max_frames) __attribute__((noinline));
161 #endif
162
163 static void orphanpg(struct pgrp *pg);
164 void proc_name_kdp(task_t t, char * buf, int size);
165 int proc_threadname_kdp(void *uth, char *buf, size_t size);
166 void proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec);
167 char *proc_name_address(void *p);
168
169 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
170 static void pgrp_remove(proc_t p);
171 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
172 static void pgdelete_dropref(struct pgrp *pgrp);
173 extern void pg_rele_dropref(struct pgrp * pgrp);
174 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
175 static boolean_t proc_parent_is_currentproc(proc_t p);
176
177 struct fixjob_iterargs {
178 struct pgrp * pg;
179 struct session * mysession;
180 int entering;
181 };
182
183 int fixjob_callback(proc_t, void *);
184
185 /*
186 * Initialize global process hashing structures.
187 */
188 void
189 procinit(void)
190 {
191 LIST_INIT(&allproc);
192 LIST_INIT(&zombproc);
193 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
194 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
195 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
196 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
197 #if CONFIG_PERSONAS
198 personas_bootstrap();
199 #endif
200 }
201
202 /*
203 * Change the count associated with number of processes
204 * a given user is using. This routine protects the uihash
205 * with the list lock
206 */
207 int
208 chgproccnt(uid_t uid, int diff)
209 {
210 struct uidinfo *uip;
211 struct uidinfo *newuip = NULL;
212 struct uihashhead *uipp;
213 int retval;
214
215 again:
216 proc_list_lock();
217 uipp = UIHASH(uid);
218 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
219 if (uip->ui_uid == uid)
220 break;
221 if (uip) {
222 uip->ui_proccnt += diff;
223 if (uip->ui_proccnt > 0) {
224 retval = uip->ui_proccnt;
225 proc_list_unlock();
226 goto out;
227 }
228 if (uip->ui_proccnt < 0)
229 panic("chgproccnt: procs < 0");
230 LIST_REMOVE(uip, ui_hash);
231 retval = 0;
232 proc_list_unlock();
233 FREE_ZONE(uip, sizeof(*uip), M_PROC);
234 goto out;
235 }
236 if (diff <= 0) {
237 if (diff == 0) {
238 retval = 0;
239 proc_list_unlock();
240 goto out;
241 }
242 panic("chgproccnt: lost user");
243 }
244 if (newuip != NULL) {
245 uip = newuip;
246 newuip = NULL;
247 LIST_INSERT_HEAD(uipp, uip, ui_hash);
248 uip->ui_uid = uid;
249 uip->ui_proccnt = diff;
250 retval = diff;
251 proc_list_unlock();
252 goto out;
253 }
254 proc_list_unlock();
255 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
256 if (newuip == NULL)
257 panic("chgproccnt: M_PROC zone depleted");
258 goto again;
259 out:
260 if (newuip != NULL)
261 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
262 return(retval);
263 }
264
265 /*
266 * Is p an inferior of the current process?
267 */
268 int
269 inferior(proc_t p)
270 {
271 int retval = 0;
272
273 proc_list_lock();
274 for (; p != current_proc(); p = p->p_pptr)
275 if (p->p_pid == 0)
276 goto out;
277 retval = 1;
278 out:
279 proc_list_unlock();
280 return(retval);
281 }
282
283 /*
284 * Is p an inferior of t ?
285 */
286 int
287 isinferior(proc_t p, proc_t t)
288 {
289 int retval = 0;
290 int nchecked = 0;
291 proc_t start = p;
292
293 /* if p==t they are not inferior */
294 if (p == t)
295 return(0);
296
297 proc_list_lock();
298 for (; p != t; p = p->p_pptr) {
299 nchecked++;
300
301 /* Detect here if we're in a cycle */
302 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
303 goto out;
304 }
305 retval = 1;
306 out:
307 proc_list_unlock();
308 return(retval);
309 }
310
311 int
312 proc_isinferior(int pid1, int pid2)
313 {
314 proc_t p = PROC_NULL;
315 proc_t t = PROC_NULL;
316 int retval = 0;
317
318 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
319 retval = isinferior(p, t);
320
321 if (p != PROC_NULL)
322 proc_rele(p);
323 if (t != PROC_NULL)
324 proc_rele(t);
325
326 return(retval);
327 }
328
329 proc_t
330 proc_find(int pid)
331 {
332 return(proc_findinternal(pid, 0));
333 }
334
335 proc_t
336 proc_findinternal(int pid, int locked)
337 {
338 proc_t p = PROC_NULL;
339
340 if (locked == 0) {
341 proc_list_lock();
342 }
343
344 p = pfind_locked(pid);
345 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
346 p = PROC_NULL;
347
348 if (locked == 0) {
349 proc_list_unlock();
350 }
351
352 return(p);
353 }
354
355 proc_t
356 proc_findthread(thread_t thread)
357 {
358 proc_t p = PROC_NULL;
359 struct uthread *uth;
360
361 proc_list_lock();
362 uth = get_bsdthread_info(thread);
363 if (uth && (uth->uu_flag & UT_VFORK))
364 p = uth->uu_proc;
365 else
366 p = (proc_t)(get_bsdthreadtask_info(thread));
367 p = proc_ref_locked(p);
368 proc_list_unlock();
369 return(p);
370 }
371
372 #if PROC_REF_DEBUG
373 void
374 uthread_reset_proc_refcount(void *uthread) {
375 uthread_t uth;
376
377 if (proc_ref_tracking_disabled) {
378 return;
379 }
380
381 uth = (uthread_t) uthread;
382
383 uth->uu_proc_refcount = 0;
384 uth->uu_pindex = 0;
385 }
386
387 int
388 uthread_get_proc_refcount(void *uthread) {
389 uthread_t uth;
390
391 if (proc_ref_tracking_disabled) {
392 return 0;
393 }
394
395 uth = (uthread_t) uthread;
396
397 return uth->uu_proc_refcount;
398 }
399
400 static void
401 record_procref(proc_t p, int count) {
402 uthread_t uth;
403
404 if (proc_ref_tracking_disabled) {
405 return;
406 }
407
408 uth = current_uthread();
409 uth->uu_proc_refcount += count;
410
411 if (count == 1) {
412 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
413 fastbacktrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
414
415 uth->uu_proc_ps[uth->uu_pindex] = p;
416 uth->uu_pindex++;
417 }
418 }
419 }
420 #endif
421
422 int
423 proc_rele(proc_t p)
424 {
425 proc_list_lock();
426 proc_rele_locked(p);
427 proc_list_unlock();
428
429 return(0);
430 }
431
432 proc_t
433 proc_self(void)
434 {
435 struct proc * p;
436
437 p = current_proc();
438
439 proc_list_lock();
440 if (p != proc_ref_locked(p))
441 p = PROC_NULL;
442 proc_list_unlock();
443 return(p);
444 }
445
446
447 proc_t
448 proc_ref_locked(proc_t p)
449 {
450 proc_t p1 = p;
451
452 /* if process still in creation return failure */
453 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
454 return (PROC_NULL);
455 /* do not return process marked for termination */
456 if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0)) {
457 p->p_refcount++;
458 #if PROC_REF_DEBUG
459 record_procref(p, 1);
460 #endif
461 }
462 else
463 p1 = PROC_NULL;
464
465 return(p1);
466 }
467
468 void
469 proc_rele_locked(proc_t p)
470 {
471
472 if (p->p_refcount > 0) {
473 p->p_refcount--;
474 #if PROC_REF_DEBUG
475 record_procref(p, -1);
476 #endif
477 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
478 p->p_listflag &= ~P_LIST_DRAINWAIT;
479 wakeup(&p->p_refcount);
480 }
481 } else
482 panic("proc_rele_locked -ve ref\n");
483
484 }
485
486 proc_t
487 proc_find_zombref(int pid)
488 {
489 proc_t p;
490
491 proc_list_lock();
492
493 again:
494 p = pfind_locked(pid);
495
496 /* should we bail? */
497 if ((p == PROC_NULL) /* not found */
498 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
499 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
500
501 proc_list_unlock();
502 return (PROC_NULL);
503 }
504
505 /* If someone else is controlling the (unreaped) zombie - wait */
506 if ((p->p_listflag & P_LIST_WAITING) != 0) {
507 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
508 goto again;
509 }
510 p->p_listflag |= P_LIST_WAITING;
511
512 proc_list_unlock();
513
514 return(p);
515 }
516
517 void
518 proc_drop_zombref(proc_t p)
519 {
520 proc_list_lock();
521 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
522 p->p_listflag &= ~P_LIST_WAITING;
523 wakeup(&p->p_stat);
524 }
525 proc_list_unlock();
526 }
527
528
529 void
530 proc_refdrain(proc_t p)
531 {
532
533 proc_list_lock();
534
535 p->p_listflag |= P_LIST_DRAIN;
536 while (p->p_refcount) {
537 p->p_listflag |= P_LIST_DRAINWAIT;
538 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
539 }
540 p->p_listflag &= ~P_LIST_DRAIN;
541 p->p_listflag |= P_LIST_DEAD;
542
543 proc_list_unlock();
544
545
546 }
547
548 proc_t
549 proc_parentholdref(proc_t p)
550 {
551 proc_t parent = PROC_NULL;
552 proc_t pp;
553 int loopcnt = 0;
554
555
556 proc_list_lock();
557 loop:
558 pp = p->p_pptr;
559 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
560 parent = PROC_NULL;
561 goto out;
562 }
563
564 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
565 pp->p_listflag |= P_LIST_CHILDDRWAIT;
566 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
567 loopcnt++;
568 if (loopcnt == 5) {
569 parent = PROC_NULL;
570 goto out;
571 }
572 goto loop;
573 }
574
575 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
576 pp->p_parentref++;
577 parent = pp;
578 goto out;
579 }
580
581 out:
582 proc_list_unlock();
583 return(parent);
584 }
585 int
586 proc_parentdropref(proc_t p, int listlocked)
587 {
588 if (listlocked == 0)
589 proc_list_lock();
590
591 if (p->p_parentref > 0) {
592 p->p_parentref--;
593 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
594 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
595 wakeup(&p->p_parentref);
596 }
597 } else
598 panic("proc_parentdropref -ve ref\n");
599 if (listlocked == 0)
600 proc_list_unlock();
601
602 return(0);
603 }
604
605 void
606 proc_childdrainstart(proc_t p)
607 {
608 #if __PROC_INTERNAL_DEBUG
609 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
610 panic("proc_childdrainstart: childdrain already started\n");
611 #endif
612 p->p_listflag |= P_LIST_CHILDDRSTART;
613 /* wait for all that hold parentrefs to drop */
614 while (p->p_parentref > 0) {
615 p->p_listflag |= P_LIST_PARENTREFWAIT;
616 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
617 }
618 }
619
620
621 void
622 proc_childdrainend(proc_t p)
623 {
624 #if __PROC_INTERNAL_DEBUG
625 if (p->p_childrencnt > 0)
626 panic("exiting: children stil hanging around\n");
627 #endif
628 p->p_listflag |= P_LIST_CHILDDRAINED;
629 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
630 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
631 wakeup(&p->p_childrencnt);
632 }
633 }
634
635 void
636 proc_checkdeadrefs(__unused proc_t p)
637 {
638 #if __PROC_INTERNAL_DEBUG
639 if ((p->p_listflag & P_LIST_INHASH) != 0)
640 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
641 if (p->p_childrencnt != 0)
642 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
643 if (p->p_refcount != 0)
644 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
645 if (p->p_parentref != 0)
646 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
647 #endif
648 }
649
650 int
651 proc_pid(proc_t p)
652 {
653 if (p != NULL)
654 return (p->p_pid);
655 return -1;
656 }
657
658 int
659 proc_ppid(proc_t p)
660 {
661 if (p != NULL)
662 return (p->p_ppid);
663 return -1;
664 }
665
666 int
667 proc_selfpid(void)
668 {
669 return (current_proc()->p_pid);
670 }
671
672 int
673 proc_selfppid(void)
674 {
675 return (current_proc()->p_ppid);
676 }
677
678 int
679 proc_selfcsflags(void)
680 {
681 return (current_proc()->p_csflags);
682 }
683
684 #if CONFIG_DTRACE
685 static proc_t
686 dtrace_current_proc_vforking(void)
687 {
688 thread_t th = current_thread();
689 struct uthread *ut = get_bsdthread_info(th);
690
691 if (ut &&
692 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
693 /*
694 * Handle the narrow window where we're in the vfork syscall,
695 * but we're not quite ready to claim (in particular, to DTrace)
696 * that we're running as the child.
697 */
698 return (get_bsdtask_info(get_threadtask(th)));
699 }
700 return (current_proc());
701 }
702
703 int
704 dtrace_proc_selfpid(void)
705 {
706 return (dtrace_current_proc_vforking()->p_pid);
707 }
708
709 int
710 dtrace_proc_selfppid(void)
711 {
712 return (dtrace_current_proc_vforking()->p_ppid);
713 }
714
715 uid_t
716 dtrace_proc_selfruid(void)
717 {
718 return (dtrace_current_proc_vforking()->p_ruid);
719 }
720 #endif /* CONFIG_DTRACE */
721
722 proc_t
723 proc_parent(proc_t p)
724 {
725 proc_t parent;
726 proc_t pp;
727
728 proc_list_lock();
729 loop:
730 pp = p->p_pptr;
731 parent = proc_ref_locked(pp);
732 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
733 pp->p_listflag |= P_LIST_CHILDLKWAIT;
734 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
735 goto loop;
736 }
737 proc_list_unlock();
738 return(parent);
739 }
740
741 static boolean_t
742 proc_parent_is_currentproc(proc_t p)
743 {
744 boolean_t ret = FALSE;
745
746 proc_list_lock();
747 if (p->p_pptr == current_proc())
748 ret = TRUE;
749
750 proc_list_unlock();
751 return ret;
752 }
753
754 void
755 proc_name(int pid, char * buf, int size)
756 {
757 proc_t p;
758
759 if ((p = proc_find(pid)) != PROC_NULL) {
760 strlcpy(buf, &p->p_comm[0], size);
761 proc_rele(p);
762 }
763 }
764
765 void
766 proc_name_kdp(task_t t, char * buf, int size)
767 {
768 proc_t p = get_bsdtask_info(t);
769 if (p == PROC_NULL)
770 return;
771
772 if ((size_t)size > sizeof(p->p_comm))
773 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
774 else
775 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
776 }
777
778
779 int
780 proc_threadname_kdp(void *uth, char *buf, size_t size)
781 {
782 if (size < MAXTHREADNAMESIZE) {
783 /* this is really just a protective measure for the future in
784 * case the thread name size in stackshot gets out of sync with
785 * the BSD max thread name size. Note that bsd_getthreadname
786 * doesn't take input buffer size into account. */
787 return -1;
788 }
789
790 if (uth != NULL) {
791 bsd_getthreadname(uth, buf);
792 }
793 return 0;
794 }
795
796 /* note that this function is generally going to be called from stackshot,
797 * and the arguments will be coming from a struct which is declared packed
798 * thus the input arguments will in general be unaligned. We have to handle
799 * that here. */
800 void
801 proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec)
802 {
803 proc_t pp = (proc_t)p;
804 struct uint64p {
805 uint64_t val;
806 } __attribute__((packed));
807
808 if (pp != PROC_NULL) {
809 if (tv_sec != NULL)
810 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
811 if (tv_usec != NULL)
812 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
813 }
814 }
815
816 char *
817 proc_name_address(void *p)
818 {
819 return &((proc_t)p)->p_comm[0];
820 }
821
822 void
823 proc_selfname(char * buf, int size)
824 {
825 proc_t p;
826
827 if ((p = current_proc())!= (proc_t)0) {
828 strlcpy(buf, &p->p_comm[0], size);
829 }
830 }
831
832 void
833 proc_signal(int pid, int signum)
834 {
835 proc_t p;
836
837 if ((p = proc_find(pid)) != PROC_NULL) {
838 psignal(p, signum);
839 proc_rele(p);
840 }
841 }
842
843 int
844 proc_issignal(int pid, sigset_t mask)
845 {
846 proc_t p;
847 int error=0;
848
849 if ((p = proc_find(pid)) != PROC_NULL) {
850 error = proc_pendingsignals(p, mask);
851 proc_rele(p);
852 }
853
854 return(error);
855 }
856
857 int
858 proc_noremotehang(proc_t p)
859 {
860 int retval = 0;
861
862 if (p)
863 retval = p->p_flag & P_NOREMOTEHANG;
864 return(retval? 1: 0);
865
866 }
867
868 int
869 proc_exiting(proc_t p)
870 {
871 int retval = 0;
872
873 if (p)
874 retval = p->p_lflag & P_LEXIT;
875 return(retval? 1: 0);
876 }
877
878 int
879 proc_forcequota(proc_t p)
880 {
881 int retval = 0;
882
883 if (p)
884 retval = p->p_flag & P_FORCEQUOTA;
885 return(retval? 1: 0);
886
887 }
888
889 int
890 proc_suser(proc_t p)
891 {
892 kauth_cred_t my_cred;
893 int error;
894
895 my_cred = kauth_cred_proc_ref(p);
896 error = suser(my_cred, &p->p_acflag);
897 kauth_cred_unref(&my_cred);
898 return(error);
899 }
900
901 task_t
902 proc_task(proc_t proc)
903 {
904 return (task_t)proc->task;
905 }
906
907 /*
908 * Obtain the first thread in a process
909 *
910 * XXX This is a bad thing to do; it exists predominantly to support the
911 * XXX use of proc_t's in places that should really be using
912 * XXX thread_t's instead. This maintains historical behaviour, but really
913 * XXX needs an audit of the context (proxy vs. not) to clean up.
914 */
915 thread_t
916 proc_thread(proc_t proc)
917 {
918 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
919
920 if (uth != NULL)
921 return(uth->uu_context.vc_thread);
922
923 return(NULL);
924 }
925
926 kauth_cred_t
927 proc_ucred(proc_t p)
928 {
929 return(p->p_ucred);
930 }
931
932 struct uthread *
933 current_uthread()
934 {
935 thread_t th = current_thread();
936
937 return((struct uthread *)get_bsdthread_info(th));
938 }
939
940
941 int
942 proc_is64bit(proc_t p)
943 {
944 return(IS_64BIT_PROCESS(p));
945 }
946
947 int
948 proc_pidversion(proc_t p)
949 {
950 return(p->p_idversion);
951 }
952
953 uint32_t
954 proc_persona_id(proc_t p)
955 {
956 return (uint32_t)persona_id_from_proc(p);
957 }
958
959 uint32_t
960 proc_getuid(proc_t p)
961 {
962 return(p->p_uid);
963 }
964
965 uint32_t
966 proc_getgid(proc_t p)
967 {
968 return(p->p_gid);
969 }
970
971 uint64_t
972 proc_uniqueid(proc_t p)
973 {
974 return(p->p_uniqueid);
975 }
976
977 uint64_t
978 proc_puniqueid(proc_t p)
979 {
980 return(p->p_puniqueid);
981 }
982
983 void
984 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
985 {
986 #if CONFIG_COALITIONS
987 task_coalition_ids(p->task, ids);
988 #else
989 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
990 #endif
991 return;
992 }
993
994 uint64_t
995 proc_was_throttled(proc_t p)
996 {
997 return (p->was_throttled);
998 }
999
1000 uint64_t
1001 proc_did_throttle(proc_t p)
1002 {
1003 return (p->did_throttle);
1004 }
1005
1006 int
1007 proc_getcdhash(proc_t p, unsigned char *cdhash)
1008 {
1009 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1010 }
1011
1012 void
1013 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1014 {
1015 if (size >= sizeof(p->p_uuid)) {
1016 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1017 }
1018 }
1019
1020 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1021 vnode_t
1022 proc_getexecutablevnode(proc_t p)
1023 {
1024 vnode_t tvp = p->p_textvp;
1025
1026 if ( tvp != NULLVP) {
1027 if (vnode_getwithref(tvp) == 0) {
1028 return tvp;
1029 }
1030 }
1031
1032 return NULLVP;
1033 }
1034
1035
1036 void
1037 bsd_set_dependency_capable(task_t task)
1038 {
1039 proc_t p = get_bsdtask_info(task);
1040
1041 if (p) {
1042 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1043 }
1044 }
1045
1046
1047 int
1048 IS_64BIT_PROCESS(proc_t p)
1049 {
1050 if (p && (p->p_flag & P_LP64))
1051 return(1);
1052 else
1053 return(0);
1054 }
1055
1056 /*
1057 * Locate a process by number
1058 */
1059 proc_t
1060 pfind_locked(pid_t pid)
1061 {
1062 proc_t p;
1063 #if DEBUG
1064 proc_t q;
1065 #endif
1066
1067 if (!pid)
1068 return (kernproc);
1069
1070 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1071 if (p->p_pid == pid) {
1072 #if DEBUG
1073 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1074 if ((p !=q) && (q->p_pid == pid))
1075 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1076 }
1077 #endif
1078 return (p);
1079 }
1080 }
1081 return (NULL);
1082 }
1083
1084 /*
1085 * Locate a zombie by PID
1086 */
1087 __private_extern__ proc_t
1088 pzfind(pid_t pid)
1089 {
1090 proc_t p;
1091
1092
1093 proc_list_lock();
1094
1095 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1096 if (p->p_pid == pid)
1097 break;
1098
1099 proc_list_unlock();
1100
1101 return (p);
1102 }
1103
1104 /*
1105 * Locate a process group by number
1106 */
1107
1108 struct pgrp *
1109 pgfind(pid_t pgid)
1110 {
1111 struct pgrp * pgrp;
1112
1113 proc_list_lock();
1114 pgrp = pgfind_internal(pgid);
1115 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1116 pgrp = PGRP_NULL;
1117 else
1118 pgrp->pg_refcount++;
1119 proc_list_unlock();
1120 return(pgrp);
1121 }
1122
1123
1124
1125 struct pgrp *
1126 pgfind_internal(pid_t pgid)
1127 {
1128 struct pgrp *pgrp;
1129
1130 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1131 if (pgrp->pg_id == pgid)
1132 return (pgrp);
1133 return (NULL);
1134 }
1135
1136 void
1137 pg_rele(struct pgrp * pgrp)
1138 {
1139 if(pgrp == PGRP_NULL)
1140 return;
1141 pg_rele_dropref(pgrp);
1142 }
1143
1144 void
1145 pg_rele_dropref(struct pgrp * pgrp)
1146 {
1147 proc_list_lock();
1148 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1149 proc_list_unlock();
1150 pgdelete_dropref(pgrp);
1151 return;
1152 }
1153
1154 pgrp->pg_refcount--;
1155 proc_list_unlock();
1156 }
1157
1158 struct session *
1159 session_find_internal(pid_t sessid)
1160 {
1161 struct session *sess;
1162
1163 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1164 if (sess->s_sid == sessid)
1165 return (sess);
1166 return (NULL);
1167 }
1168
1169
1170 /*
1171 * Make a new process ready to become a useful member of society by making it
1172 * visible in all the right places and initialize its own lists to empty.
1173 *
1174 * Parameters: parent The parent of the process to insert
1175 * child The child process to insert
1176 *
1177 * Returns: (void)
1178 *
1179 * Notes: Insert a child process into the parents process group, assign
1180 * the child the parent process pointer and PPID of the parent,
1181 * place it on the parents p_children list as a sibling,
1182 * initialize its own child list, place it in the allproc list,
1183 * insert it in the proper hash bucket, and initialize its
1184 * event list.
1185 */
1186 void
1187 pinsertchild(proc_t parent, proc_t child)
1188 {
1189 struct pgrp * pg;
1190
1191 LIST_INIT(&child->p_children);
1192 TAILQ_INIT(&child->p_evlist);
1193 child->p_pptr = parent;
1194 child->p_ppid = parent->p_pid;
1195 child->p_puniqueid = parent->p_uniqueid;
1196
1197 pg = proc_pgrp(parent);
1198 pgrp_add(pg, parent, child);
1199 pg_rele(pg);
1200
1201 proc_list_lock();
1202
1203 #if CONFIG_MEMORYSTATUS
1204 memorystatus_add(child, TRUE);
1205 #endif
1206
1207 parent->p_childrencnt++;
1208 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1209
1210 LIST_INSERT_HEAD(&allproc, child, p_list);
1211 /* mark the completion of proc creation */
1212 child->p_listflag &= ~P_LIST_INCREATE;
1213
1214 proc_list_unlock();
1215 }
1216
1217 /*
1218 * Move p to a new or existing process group (and session)
1219 *
1220 * Returns: 0 Success
1221 * ESRCH No such process
1222 */
1223 int
1224 enterpgrp(proc_t p, pid_t pgid, int mksess)
1225 {
1226 struct pgrp *pgrp;
1227 struct pgrp *mypgrp;
1228 struct session * procsp;
1229
1230 pgrp = pgfind(pgid);
1231 mypgrp = proc_pgrp(p);
1232 procsp = proc_session(p);
1233
1234 #if DIAGNOSTIC
1235 if (pgrp != NULL && mksess) /* firewalls */
1236 panic("enterpgrp: setsid into non-empty pgrp");
1237 if (SESS_LEADER(p, procsp))
1238 panic("enterpgrp: session leader attempted setpgrp");
1239 #endif
1240 if (pgrp == PGRP_NULL) {
1241 pid_t savepid = p->p_pid;
1242 proc_t np = PROC_NULL;
1243 /*
1244 * new process group
1245 */
1246 #if DIAGNOSTIC
1247 if (p->p_pid != pgid)
1248 panic("enterpgrp: new pgrp and pid != pgid");
1249 #endif
1250 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1251 M_WAITOK);
1252 if (pgrp == NULL)
1253 panic("enterpgrp: M_PGRP zone depleted");
1254 if ((np = proc_find(savepid)) == NULL || np != p) {
1255 if (np != PROC_NULL)
1256 proc_rele(np);
1257 if (mypgrp != PGRP_NULL)
1258 pg_rele(mypgrp);
1259 if (procsp != SESSION_NULL)
1260 session_rele(procsp);
1261 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1262 return (ESRCH);
1263 }
1264 proc_rele(np);
1265 if (mksess) {
1266 struct session *sess;
1267
1268 /*
1269 * new session
1270 */
1271 MALLOC_ZONE(sess, struct session *,
1272 sizeof(struct session), M_SESSION, M_WAITOK);
1273 if (sess == NULL)
1274 panic("enterpgrp: M_SESSION zone depleted");
1275 sess->s_leader = p;
1276 sess->s_sid = p->p_pid;
1277 sess->s_count = 1;
1278 sess->s_ttyvp = NULL;
1279 sess->s_ttyp = TTY_NULL;
1280 sess->s_flags = 0;
1281 sess->s_listflags = 0;
1282 sess->s_ttypgrpid = NO_PID;
1283 #if CONFIG_FINE_LOCK_GROUPS
1284 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1285 #else
1286 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1287 #endif
1288 bcopy(procsp->s_login, sess->s_login,
1289 sizeof(sess->s_login));
1290 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1291 proc_list_lock();
1292 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1293 proc_list_unlock();
1294 pgrp->pg_session = sess;
1295 #if DIAGNOSTIC
1296 if (p != current_proc())
1297 panic("enterpgrp: mksession and p != curproc");
1298 #endif
1299 } else {
1300 proc_list_lock();
1301 pgrp->pg_session = procsp;
1302
1303 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1304 panic("enterpgrp: providing ref to terminating session ");
1305 pgrp->pg_session->s_count++;
1306 proc_list_unlock();
1307 }
1308 pgrp->pg_id = pgid;
1309 #if CONFIG_FINE_LOCK_GROUPS
1310 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1311 #else
1312 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1313 #endif
1314 LIST_INIT(&pgrp->pg_members);
1315 pgrp->pg_membercnt = 0;
1316 pgrp->pg_jobc = 0;
1317 proc_list_lock();
1318 pgrp->pg_refcount = 1;
1319 pgrp->pg_listflags = 0;
1320 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1321 proc_list_unlock();
1322 } else if (pgrp == mypgrp) {
1323 pg_rele(pgrp);
1324 if (mypgrp != NULL)
1325 pg_rele(mypgrp);
1326 if (procsp != SESSION_NULL)
1327 session_rele(procsp);
1328 return (0);
1329 }
1330
1331 if (procsp != SESSION_NULL)
1332 session_rele(procsp);
1333 /*
1334 * Adjust eligibility of affected pgrps to participate in job control.
1335 * Increment eligibility counts before decrementing, otherwise we
1336 * could reach 0 spuriously during the first call.
1337 */
1338 fixjobc(p, pgrp, 1);
1339 fixjobc(p, mypgrp, 0);
1340
1341 if(mypgrp != PGRP_NULL)
1342 pg_rele(mypgrp);
1343 pgrp_replace(p, pgrp);
1344 pg_rele(pgrp);
1345
1346 return(0);
1347 }
1348
1349 /*
1350 * remove process from process group
1351 */
1352 int
1353 leavepgrp(proc_t p)
1354 {
1355
1356 pgrp_remove(p);
1357 return (0);
1358 }
1359
1360 /*
1361 * delete a process group
1362 */
1363 static void
1364 pgdelete_dropref(struct pgrp *pgrp)
1365 {
1366 struct tty *ttyp;
1367 int emptypgrp = 1;
1368 struct session *sessp;
1369
1370
1371 pgrp_lock(pgrp);
1372 if (pgrp->pg_membercnt != 0) {
1373 emptypgrp = 0;
1374 }
1375 pgrp_unlock(pgrp);
1376
1377 proc_list_lock();
1378 pgrp->pg_refcount--;
1379 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1380 proc_list_unlock();
1381 return;
1382 }
1383
1384 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1385
1386 if (pgrp->pg_refcount > 0) {
1387 proc_list_unlock();
1388 return;
1389 }
1390
1391 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1392 LIST_REMOVE(pgrp, pg_hash);
1393
1394 proc_list_unlock();
1395
1396 ttyp = SESSION_TP(pgrp->pg_session);
1397 if (ttyp != TTY_NULL) {
1398 if (ttyp->t_pgrp == pgrp) {
1399 tty_lock(ttyp);
1400 /* Re-check after acquiring the lock */
1401 if (ttyp->t_pgrp == pgrp) {
1402 ttyp->t_pgrp = NULL;
1403 pgrp->pg_session->s_ttypgrpid = NO_PID;
1404 }
1405 tty_unlock(ttyp);
1406 }
1407 }
1408
1409 proc_list_lock();
1410
1411 sessp = pgrp->pg_session;
1412 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1413 panic("pg_deleteref: manipulating refs of already terminating session");
1414 if (--sessp->s_count == 0) {
1415 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1416 panic("pg_deleteref: terminating already terminated session");
1417 sessp->s_listflags |= S_LIST_TERM;
1418 ttyp = SESSION_TP(sessp);
1419 LIST_REMOVE(sessp, s_hash);
1420 proc_list_unlock();
1421 if (ttyp != TTY_NULL) {
1422 tty_lock(ttyp);
1423 if (ttyp->t_session == sessp)
1424 ttyp->t_session = NULL;
1425 tty_unlock(ttyp);
1426 }
1427 proc_list_lock();
1428 sessp->s_listflags |= S_LIST_DEAD;
1429 if (sessp->s_count != 0)
1430 panic("pg_deleteref: freeing session in use");
1431 proc_list_unlock();
1432 #if CONFIG_FINE_LOCK_GROUPS
1433 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1434 #else
1435 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1436 #endif
1437 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1438 } else
1439 proc_list_unlock();
1440 #if CONFIG_FINE_LOCK_GROUPS
1441 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1442 #else
1443 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1444 #endif
1445 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1446 }
1447
1448
1449 /*
1450 * Adjust pgrp jobc counters when specified process changes process group.
1451 * We count the number of processes in each process group that "qualify"
1452 * the group for terminal job control (those with a parent in a different
1453 * process group of the same session). If that count reaches zero, the
1454 * process group becomes orphaned. Check both the specified process'
1455 * process group and that of its children.
1456 * entering == 0 => p is leaving specified group.
1457 * entering == 1 => p is entering specified group.
1458 */
1459 int
1460 fixjob_callback(proc_t p, void * arg)
1461 {
1462 struct fixjob_iterargs *fp;
1463 struct pgrp * pg, *hispg;
1464 struct session * mysession, *hissess;
1465 int entering;
1466
1467 fp = (struct fixjob_iterargs *)arg;
1468 pg = fp->pg;
1469 mysession = fp->mysession;
1470 entering = fp->entering;
1471
1472 hispg = proc_pgrp(p);
1473 hissess = proc_session(p);
1474
1475 if ((hispg != pg) &&
1476 (hissess == mysession)) {
1477 pgrp_lock(hispg);
1478 if (entering) {
1479 hispg->pg_jobc++;
1480 pgrp_unlock(hispg);
1481 } else if (--hispg->pg_jobc == 0) {
1482 pgrp_unlock(hispg);
1483 orphanpg(hispg);
1484 } else
1485 pgrp_unlock(hispg);
1486 }
1487 if (hissess != SESSION_NULL)
1488 session_rele(hissess);
1489 if (hispg != PGRP_NULL)
1490 pg_rele(hispg);
1491
1492 return(PROC_RETURNED);
1493 }
1494
1495 void
1496 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1497 {
1498 struct pgrp *hispgrp = PGRP_NULL;
1499 struct session *hissess = SESSION_NULL;
1500 struct session *mysession = pgrp->pg_session;
1501 proc_t parent;
1502 struct fixjob_iterargs fjarg;
1503 boolean_t proc_parent_self;
1504
1505 /*
1506 * Check if p's parent is current proc, if yes then no need to take
1507 * a ref; calling proc_parent with current proc as parent may
1508 * deadlock if current proc is exiting.
1509 */
1510 proc_parent_self = proc_parent_is_currentproc(p);
1511 if (proc_parent_self)
1512 parent = current_proc();
1513 else
1514 parent = proc_parent(p);
1515
1516 if (parent != PROC_NULL) {
1517 hispgrp = proc_pgrp(parent);
1518 hissess = proc_session(parent);
1519 if (!proc_parent_self)
1520 proc_rele(parent);
1521 }
1522
1523
1524 /*
1525 * Check p's parent to see whether p qualifies its own process
1526 * group; if so, adjust count for p's process group.
1527 */
1528 if ((hispgrp != pgrp) &&
1529 (hissess == mysession)) {
1530 pgrp_lock(pgrp);
1531 if (entering) {
1532 pgrp->pg_jobc++;
1533 pgrp_unlock(pgrp);
1534 }else if (--pgrp->pg_jobc == 0) {
1535 pgrp_unlock(pgrp);
1536 orphanpg(pgrp);
1537 } else
1538 pgrp_unlock(pgrp);
1539 }
1540
1541 if (hissess != SESSION_NULL)
1542 session_rele(hissess);
1543 if (hispgrp != PGRP_NULL)
1544 pg_rele(hispgrp);
1545
1546 /*
1547 * Check this process' children to see whether they qualify
1548 * their process groups; if so, adjust counts for children's
1549 * process groups.
1550 */
1551 fjarg.pg = pgrp;
1552 fjarg.mysession = mysession;
1553 fjarg.entering = entering;
1554 proc_childrenwalk(p, fixjob_callback, &fjarg);
1555 }
1556
1557 /*
1558 * A process group has become orphaned;
1559 * if there are any stopped processes in the group,
1560 * hang-up all process in that group.
1561 */
1562 static void
1563 orphanpg(struct pgrp * pgrp)
1564 {
1565 proc_t p;
1566 pid_t * pid_list;
1567 int count, pidcount, i, alloc_count;
1568
1569 if (pgrp == PGRP_NULL)
1570 return;
1571 count = 0;
1572 pgrp_lock(pgrp);
1573 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
1574 if (p->p_stat == SSTOP) {
1575 for (p = pgrp->pg_members.lh_first; p != 0;
1576 p = p->p_pglist.le_next)
1577 count++;
1578 break; /* ??? stops after finding one.. */
1579 }
1580 }
1581 pgrp_unlock(pgrp);
1582
1583 count += 20;
1584 if (count > hard_maxproc)
1585 count = hard_maxproc;
1586 alloc_count = count * sizeof(pid_t);
1587 pid_list = (pid_t *)kalloc(alloc_count);
1588 bzero(pid_list, alloc_count);
1589
1590 pidcount = 0;
1591 pgrp_lock(pgrp);
1592 for (p = pgrp->pg_members.lh_first; p != 0;
1593 p = p->p_pglist.le_next) {
1594 if (p->p_stat == SSTOP) {
1595 for (p = pgrp->pg_members.lh_first; p != 0;
1596 p = p->p_pglist.le_next) {
1597 pid_list[pidcount] = p->p_pid;
1598 pidcount++;
1599 if (pidcount >= count)
1600 break;
1601 }
1602 break; /* ??? stops after finding one.. */
1603 }
1604 }
1605 pgrp_unlock(pgrp);
1606
1607 if (pidcount == 0)
1608 goto out;
1609
1610
1611 for (i = 0; i< pidcount; i++) {
1612 /* No handling or proc0 */
1613 if (pid_list[i] == 0)
1614 continue;
1615 p = proc_find(pid_list[i]);
1616 if (p) {
1617 proc_transwait(p, 0);
1618 pt_setrunnable(p);
1619 psignal(p, SIGHUP);
1620 psignal(p, SIGCONT);
1621 proc_rele(p);
1622 }
1623 }
1624 out:
1625 kfree(pid_list, alloc_count);
1626 return;
1627 }
1628
1629 int
1630 proc_is_classic(proc_t p __unused)
1631 {
1632 return (0);
1633 }
1634
1635 /* XXX Why does this function exist? Need to kill it off... */
1636 proc_t
1637 current_proc_EXTERNAL(void)
1638 {
1639 return (current_proc());
1640 }
1641
1642 int
1643 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1644 {
1645 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1646 }
1647
1648 /*
1649 * proc_core_name(name, uid, pid)
1650 * Expand the name described in corefilename, using name, uid, and pid.
1651 * corefilename is a printf-like string, with three format specifiers:
1652 * %N name of process ("name")
1653 * %P process id (pid)
1654 * %U user id (uid)
1655 * For example, "%N.core" is the default; they can be disabled completely
1656 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1657 * This is controlled by the sysctl variable kern.corefile (see above).
1658 */
1659 __private_extern__ int
1660 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1661 size_t cf_name_len)
1662 {
1663 const char *format, *appendstr;
1664 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1665 size_t i, l, n;
1666
1667 if (cf_name == NULL)
1668 goto toolong;
1669
1670 format = corefilename;
1671 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1672 switch (format[i]) {
1673 case '%': /* Format character */
1674 i++;
1675 switch (format[i]) {
1676 case '%':
1677 appendstr = "%";
1678 break;
1679 case 'N': /* process name */
1680 appendstr = name;
1681 break;
1682 case 'P': /* process id */
1683 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1684 appendstr = id_buf;
1685 break;
1686 case 'U': /* user id */
1687 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1688 appendstr = id_buf;
1689 break;
1690 default:
1691 appendstr = "";
1692 log(LOG_ERR,
1693 "Unknown format character %c in `%s'\n",
1694 format[i], format);
1695 }
1696 l = strlen(appendstr);
1697 if ((n + l) >= cf_name_len)
1698 goto toolong;
1699 bcopy(appendstr, cf_name + n, l);
1700 n += l;
1701 break;
1702 default:
1703 cf_name[n++] = format[i];
1704 }
1705 }
1706 if (format[i] != '\0')
1707 goto toolong;
1708 return (0);
1709 toolong:
1710 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1711 (long)pid, name, (uint32_t)uid);
1712 return (1);
1713 }
1714
1715 /* Code Signing related routines */
1716
1717 int
1718 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1719 {
1720 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1721 uap->usersize, USER_ADDR_NULL));
1722 }
1723
1724 int
1725 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1726 {
1727 if (uap->uaudittoken == USER_ADDR_NULL)
1728 return(EINVAL);
1729 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1730 uap->usersize, uap->uaudittoken));
1731 }
1732
1733 static int
1734 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1735 {
1736 char fakeheader[8] = { 0 };
1737 int error;
1738
1739 if (usize < sizeof(fakeheader))
1740 return ERANGE;
1741
1742 /* if no blob, fill in zero header */
1743 if (NULL == start) {
1744 start = fakeheader;
1745 length = sizeof(fakeheader);
1746 } else if (usize < length) {
1747 /* ... if input too short, copy out length of entitlement */
1748 uint32_t length32 = htonl((uint32_t)length);
1749 memcpy(&fakeheader[4], &length32, sizeof(length32));
1750
1751 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1752 if (error == 0)
1753 return ERANGE; /* input buffer to short, ERANGE signals that */
1754 return error;
1755 }
1756 return copyout(start, uaddr, length);
1757 }
1758
1759 static int
1760 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1761 {
1762 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1763 proc_t pt;
1764 int forself;
1765 int error;
1766 vnode_t tvp;
1767 off_t toff;
1768 unsigned char cdhash[SHA1_RESULTLEN];
1769 audit_token_t token;
1770 unsigned int upid=0, uidversion = 0;
1771
1772 forself = error = 0;
1773
1774 if (pid == 0)
1775 pid = proc_selfpid();
1776 if (pid == proc_selfpid())
1777 forself = 1;
1778
1779
1780 switch (ops) {
1781 case CS_OPS_STATUS:
1782 case CS_OPS_CDHASH:
1783 case CS_OPS_PIDOFFSET:
1784 case CS_OPS_ENTITLEMENTS_BLOB:
1785 case CS_OPS_IDENTITY:
1786 case CS_OPS_BLOB:
1787 break; /* not restricted to root */
1788 default:
1789 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1790 return(EPERM);
1791 break;
1792 }
1793
1794 pt = proc_find(pid);
1795 if (pt == PROC_NULL)
1796 return(ESRCH);
1797
1798 upid = pt->p_pid;
1799 uidversion = pt->p_idversion;
1800 if (uaudittoken != USER_ADDR_NULL) {
1801
1802 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1803 if (error != 0)
1804 goto out;
1805 /* verify the audit token pid/idversion matches with proc */
1806 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1807 error = ESRCH;
1808 goto out;
1809 }
1810 }
1811
1812 #if CONFIG_MACF
1813 switch (ops) {
1814 case CS_OPS_MARKINVALID:
1815 case CS_OPS_MARKHARD:
1816 case CS_OPS_MARKKILL:
1817 case CS_OPS_MARKRESTRICT:
1818 case CS_OPS_SET_STATUS:
1819 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
1820 goto out;
1821 break;
1822 default:
1823 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
1824 goto out;
1825 }
1826 #endif
1827
1828 switch (ops) {
1829
1830 case CS_OPS_STATUS: {
1831 uint32_t retflags;
1832
1833 proc_lock(pt);
1834 retflags = pt->p_csflags;
1835 if (cs_enforcement(pt))
1836 retflags |= CS_ENFORCEMENT;
1837 if (csproc_get_platform_binary(pt))
1838 retflags |= CS_PLATFORM_BINARY;
1839 proc_unlock(pt);
1840
1841 if (uaddr != USER_ADDR_NULL)
1842 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1843 break;
1844 }
1845 case CS_OPS_MARKINVALID:
1846 proc_lock(pt);
1847 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1848 pt->p_csflags &= ~CS_VALID; /* set invalid */
1849 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1850 pt->p_csflags |= CS_KILLED;
1851 proc_unlock(pt);
1852 if (cs_debug) {
1853 printf("CODE SIGNING: marked invalid by pid %d: "
1854 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1855 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
1856 }
1857 psignal(pt, SIGKILL);
1858 } else
1859 proc_unlock(pt);
1860 } else
1861 proc_unlock(pt);
1862
1863 break;
1864
1865 case CS_OPS_MARKHARD:
1866 proc_lock(pt);
1867 pt->p_csflags |= CS_HARD;
1868 if ((pt->p_csflags & CS_VALID) == 0) {
1869 /* @@@ allow? reject? kill? @@@ */
1870 proc_unlock(pt);
1871 error = EINVAL;
1872 goto out;
1873 } else
1874 proc_unlock(pt);
1875 break;
1876
1877 case CS_OPS_MARKKILL:
1878 proc_lock(pt);
1879 pt->p_csflags |= CS_KILL;
1880 if ((pt->p_csflags & CS_VALID) == 0) {
1881 proc_unlock(pt);
1882 psignal(pt, SIGKILL);
1883 } else
1884 proc_unlock(pt);
1885 break;
1886
1887 case CS_OPS_PIDOFFSET:
1888 toff = pt->p_textoff;
1889 proc_rele(pt);
1890 error = copyout(&toff, uaddr, sizeof(toff));
1891 return(error);
1892
1893 case CS_OPS_CDHASH:
1894
1895 /* pt already holds a reference on its p_textvp */
1896 tvp = pt->p_textvp;
1897 toff = pt->p_textoff;
1898
1899 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
1900 proc_rele(pt);
1901 return EINVAL;
1902 }
1903
1904 error = vn_getcdhash(tvp, toff, cdhash);
1905 proc_rele(pt);
1906
1907 if (error == 0) {
1908 error = copyout(cdhash, uaddr, sizeof (cdhash));
1909 }
1910
1911 return error;
1912
1913 case CS_OPS_ENTITLEMENTS_BLOB: {
1914 void *start;
1915 size_t length;
1916
1917 proc_lock(pt);
1918
1919 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
1920 proc_unlock(pt);
1921 error = EINVAL;
1922 break;
1923 }
1924
1925 error = cs_entitlements_blob_get(pt, &start, &length);
1926 proc_unlock(pt);
1927 if (error)
1928 break;
1929
1930 error = csops_copy_token(start, length, usize, uaddr);
1931 break;
1932 }
1933 case CS_OPS_MARKRESTRICT:
1934 proc_lock(pt);
1935 pt->p_csflags |= CS_RESTRICT;
1936 proc_unlock(pt);
1937 break;
1938
1939 case CS_OPS_SET_STATUS: {
1940 uint32_t flags;
1941
1942 if (usize < sizeof(flags)) {
1943 error = ERANGE;
1944 break;
1945 }
1946
1947 error = copyin(uaddr, &flags, sizeof(flags));
1948 if (error)
1949 break;
1950
1951 /* only allow setting a subset of all code sign flags */
1952 flags &=
1953 CS_HARD | CS_EXEC_SET_HARD |
1954 CS_KILL | CS_EXEC_SET_KILL |
1955 CS_RESTRICT |
1956 CS_REQUIRE_LV |
1957 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT |
1958 CS_ENTITLEMENTS_VALIDATED;
1959
1960 proc_lock(pt);
1961 if (pt->p_csflags & CS_VALID)
1962 pt->p_csflags |= flags;
1963 else
1964 error = EINVAL;
1965 proc_unlock(pt);
1966
1967 break;
1968 }
1969 case CS_OPS_BLOB: {
1970 void *start;
1971 size_t length;
1972
1973 proc_lock(pt);
1974 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
1975 proc_unlock(pt);
1976 error = EINVAL;
1977 break;
1978 }
1979
1980 error = cs_blob_get(pt, &start, &length);
1981 proc_unlock(pt);
1982 if (error)
1983 break;
1984
1985 error = csops_copy_token(start, length, usize, uaddr);
1986 break;
1987 }
1988 case CS_OPS_IDENTITY: {
1989 const char *identity;
1990 uint8_t fakeheader[8];
1991 uint32_t idlen;
1992 size_t length;
1993
1994 /*
1995 * Make identity have a blob header to make it
1996 * easier on userland to guess the identity
1997 * length.
1998 */
1999 if (usize < sizeof(fakeheader)) {
2000 error = ERANGE;
2001 break;
2002 }
2003 memset(fakeheader, 0, sizeof(fakeheader));
2004
2005 proc_lock(pt);
2006 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2007 proc_unlock(pt);
2008 error = EINVAL;
2009 break;
2010 }
2011
2012 identity = cs_identity_get(pt);
2013 proc_unlock(pt);
2014 if (identity == NULL) {
2015 error = ENOENT;
2016 break;
2017 }
2018
2019 length = strlen(identity) + 1; /* include NUL */
2020 idlen = htonl(length + sizeof(fakeheader));
2021 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2022
2023 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2024 if (error)
2025 break;
2026
2027 if (usize < sizeof(fakeheader) + length)
2028 error = ERANGE;
2029 else if (usize > sizeof(fakeheader))
2030 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2031
2032 break;
2033 }
2034
2035 default:
2036 error = EINVAL;
2037 break;
2038 }
2039 out:
2040 proc_rele(pt);
2041 return(error);
2042 }
2043
2044 int
2045 proc_iterate(flags, callout, arg, filterfn, filterarg)
2046 int flags;
2047 int (*callout)(proc_t, void *);
2048 void * arg;
2049 int (*filterfn)(proc_t, void *);
2050 void * filterarg;
2051 {
2052 proc_t p;
2053 pid_t * pid_list;
2054 int count, pidcount, alloc_count, i, retval;
2055
2056 count = nprocs+ 10;
2057 if (count > hard_maxproc)
2058 count = hard_maxproc;
2059 alloc_count = count * sizeof(pid_t);
2060 pid_list = (pid_t *)kalloc(alloc_count);
2061 bzero(pid_list, alloc_count);
2062
2063
2064 proc_list_lock();
2065
2066
2067 pidcount = 0;
2068 if (flags & PROC_ALLPROCLIST) {
2069 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2070 if (p->p_stat == SIDL)
2071 continue;
2072 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2073 pid_list[pidcount] = p->p_pid;
2074 pidcount++;
2075 if (pidcount >= count)
2076 break;
2077 }
2078 }
2079 }
2080 if ((pidcount < count ) && (flags & PROC_ZOMBPROCLIST)) {
2081 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2082 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2083 pid_list[pidcount] = p->p_pid;
2084 pidcount++;
2085 if (pidcount >= count)
2086 break;
2087 }
2088 }
2089 }
2090
2091
2092 proc_list_unlock();
2093
2094
2095 for (i = 0; i< pidcount; i++) {
2096 p = proc_find(pid_list[i]);
2097 if (p) {
2098 if ((flags & PROC_NOWAITTRANS) == 0)
2099 proc_transwait(p, 0);
2100 retval = callout(p, arg);
2101
2102 switch (retval) {
2103 case PROC_RETURNED:
2104 proc_rele(p);
2105 break;
2106 case PROC_RETURNED_DONE:
2107 proc_rele(p);
2108 goto out;
2109 case PROC_CLAIMED_DONE:
2110 goto out;
2111 case PROC_CLAIMED:
2112 default:
2113 break;
2114 }
2115 } else if (flags & PROC_ZOMBPROCLIST) {
2116 p = proc_find_zombref(pid_list[i]);
2117 if (p != PROC_NULL) {
2118 retval = callout(p, arg);
2119
2120 switch (retval) {
2121 case PROC_RETURNED:
2122 proc_drop_zombref(p);
2123 break;
2124 case PROC_RETURNED_DONE:
2125 proc_drop_zombref(p);
2126 goto out;
2127 case PROC_CLAIMED_DONE:
2128 goto out;
2129 case PROC_CLAIMED:
2130 default:
2131 break;
2132 }
2133 }
2134 }
2135 }
2136
2137 out:
2138 kfree(pid_list, alloc_count);
2139 return(0);
2140
2141 }
2142
2143
2144 #if 0
2145 /* This is for iteration in case of trivial non blocking callouts */
2146 int
2147 proc_scanall(flags, callout, arg)
2148 int flags;
2149 int (*callout)(proc_t, void *);
2150 void * arg;
2151 {
2152 proc_t p;
2153 int retval;
2154
2155
2156 proc_list_lock();
2157
2158
2159 if (flags & PROC_ALLPROCLIST) {
2160 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2161 retval = callout(p, arg);
2162 if (retval == PROC_RETURNED_DONE)
2163 goto out;
2164 }
2165 }
2166 if (flags & PROC_ZOMBPROCLIST) {
2167 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2168 retval = callout(p, arg);
2169 if (retval == PROC_RETURNED_DONE)
2170 goto out;
2171 }
2172 }
2173 out:
2174
2175 proc_list_unlock();
2176
2177 return(0);
2178 }
2179 #endif
2180
2181
2182 int
2183 proc_rebootscan(callout, arg, filterfn, filterarg)
2184 int (*callout)(proc_t, void *);
2185 void * arg;
2186 int (*filterfn)(proc_t, void *);
2187 void * filterarg;
2188 {
2189 proc_t p;
2190 int lockheld = 0, retval;
2191
2192 proc_shutdown_exitcount = 0;
2193
2194 ps_allprocscan:
2195
2196 proc_list_lock();
2197
2198 lockheld = 1;
2199
2200 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2201 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2202 p = proc_ref_locked(p);
2203
2204 proc_list_unlock();
2205 lockheld = 0;
2206
2207 if (p) {
2208 proc_transwait(p, 0);
2209 retval = callout(p, arg);
2210 proc_rele(p);
2211
2212 switch (retval) {
2213 case PROC_RETURNED_DONE:
2214 case PROC_CLAIMED_DONE:
2215 goto out;
2216 }
2217 }
2218 goto ps_allprocscan;
2219 } /* filter pass */
2220 } /* allproc walk thru */
2221
2222 if (lockheld == 1) {
2223 proc_list_unlock();
2224 lockheld = 0;
2225 }
2226
2227 out:
2228 return(0);
2229
2230 }
2231
2232
2233 int
2234 proc_childrenwalk(parent, callout, arg)
2235 struct proc * parent;
2236 int (*callout)(proc_t, void *);
2237 void * arg;
2238 {
2239 register struct proc *p;
2240 pid_t * pid_list;
2241 int count, pidcount, alloc_count, i, retval;
2242
2243 count = nprocs+ 10;
2244 if (count > hard_maxproc)
2245 count = hard_maxproc;
2246 alloc_count = count * sizeof(pid_t);
2247 pid_list = (pid_t *)kalloc(alloc_count);
2248 bzero(pid_list, alloc_count);
2249
2250
2251 proc_list_lock();
2252
2253
2254 pidcount = 0;
2255 for (p = parent->p_children.lh_first; (p != 0); p = p->p_sibling.le_next) {
2256 if (p->p_stat == SIDL)
2257 continue;
2258 pid_list[pidcount] = p->p_pid;
2259 pidcount++;
2260 if (pidcount >= count)
2261 break;
2262 }
2263 proc_list_unlock();
2264
2265
2266 for (i = 0; i< pidcount; i++) {
2267 p = proc_find(pid_list[i]);
2268 if (p) {
2269 proc_transwait(p, 0);
2270 retval = callout(p, arg);
2271
2272 switch (retval) {
2273 case PROC_RETURNED:
2274 case PROC_RETURNED_DONE:
2275 proc_rele(p);
2276 if (retval == PROC_RETURNED_DONE) {
2277 goto out;
2278 }
2279 break;
2280
2281 case PROC_CLAIMED_DONE:
2282 goto out;
2283 case PROC_CLAIMED:
2284 default:
2285 break;
2286 }
2287 }
2288 }
2289
2290 out:
2291 kfree(pid_list, alloc_count);
2292 return(0);
2293
2294 }
2295
2296 /*
2297 */
2298 /* PGRP_BLOCKITERATE is not implemented yet */
2299 int
2300 pgrp_iterate(pgrp, flags, callout, arg, filterfn, filterarg)
2301 struct pgrp *pgrp;
2302 int flags;
2303 int (*callout)(proc_t, void *);
2304 void * arg;
2305 int (*filterfn)(proc_t, void *);
2306 void * filterarg;
2307 {
2308 proc_t p;
2309 pid_t * pid_list;
2310 int count, pidcount, i, alloc_count;
2311 int retval;
2312 pid_t pgid;
2313 int dropref = flags & PGRP_DROPREF;
2314 #if 0
2315 int serialize = flags & PGRP_BLOCKITERATE;
2316 #else
2317 int serialize = 0;
2318 #endif
2319
2320 if (pgrp == 0)
2321 return(0);
2322 count = pgrp->pg_membercnt + 10;
2323 if (count > hard_maxproc)
2324 count = hard_maxproc;
2325 alloc_count = count * sizeof(pid_t);
2326 pid_list = (pid_t *)kalloc(alloc_count);
2327 bzero(pid_list, alloc_count);
2328
2329 pgrp_lock(pgrp);
2330 if (serialize != 0) {
2331 while ((pgrp->pg_listflags & PGRP_FLAG_ITERABEGIN) == PGRP_FLAG_ITERABEGIN) {
2332 pgrp->pg_listflags |= PGRP_FLAG_ITERWAIT;
2333 msleep(&pgrp->pg_listflags, &pgrp->pg_mlock, 0, "pgrp_iterate", 0);
2334 }
2335 pgrp->pg_listflags |= PGRP_FLAG_ITERABEGIN;
2336 }
2337
2338 pgid = pgrp->pg_id;
2339
2340 pidcount = 0;
2341 for (p = pgrp->pg_members.lh_first; p != 0;
2342 p = p->p_pglist.le_next) {
2343 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2344 pid_list[pidcount] = p->p_pid;
2345 pidcount++;
2346 if (pidcount >= count)
2347 break;
2348 }
2349 }
2350
2351
2352 pgrp_unlock(pgrp);
2353 if ((serialize == 0) && (dropref != 0))
2354 pg_rele(pgrp);
2355
2356
2357 for (i = 0; i< pidcount; i++) {
2358 /* No handling or proc0 */
2359 if (pid_list[i] == 0)
2360 continue;
2361 p = proc_find(pid_list[i]);
2362 if (p) {
2363 if (p->p_pgrpid != pgid) {
2364 proc_rele(p);
2365 continue;
2366 }
2367 proc_transwait(p, 0);
2368 retval = callout(p, arg);
2369
2370 switch (retval) {
2371 case PROC_RETURNED:
2372 case PROC_RETURNED_DONE:
2373 proc_rele(p);
2374 if (retval == PROC_RETURNED_DONE) {
2375 goto out;
2376 }
2377 break;
2378
2379 case PROC_CLAIMED_DONE:
2380 goto out;
2381 case PROC_CLAIMED:
2382 default:
2383 break;
2384 }
2385 }
2386 }
2387 out:
2388 if (serialize != 0) {
2389 pgrp_lock(pgrp);
2390 pgrp->pg_listflags &= ~PGRP_FLAG_ITERABEGIN;
2391 if ((pgrp->pg_listflags & PGRP_FLAG_ITERWAIT) == PGRP_FLAG_ITERWAIT) {
2392 pgrp->pg_listflags &= ~PGRP_FLAG_ITERWAIT;
2393 wakeup(&pgrp->pg_listflags);
2394 }
2395 pgrp_unlock(pgrp);
2396 if (dropref != 0)
2397 pg_rele(pgrp);
2398 }
2399 kfree(pid_list, alloc_count);
2400 return(0);
2401 }
2402
2403 static void
2404 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2405 {
2406 proc_list_lock();
2407 child->p_pgrp = pgrp;
2408 child->p_pgrpid = pgrp->pg_id;
2409 child->p_listflag |= P_LIST_INPGRP;
2410 /*
2411 * When pgrp is being freed , a process can still
2412 * request addition using setpgid from bash when
2413 * login is terminated (login cycler) return ESRCH
2414 * Safe to hold lock due to refcount on pgrp
2415 */
2416 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2417 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2418 }
2419
2420 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2421 panic("pgrp_add : pgrp is dead adding process");
2422 proc_list_unlock();
2423
2424 pgrp_lock(pgrp);
2425 pgrp->pg_membercnt++;
2426 if ( parent != PROC_NULL) {
2427 LIST_INSERT_AFTER(parent, child, p_pglist);
2428 }else {
2429 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2430 }
2431 pgrp_unlock(pgrp);
2432
2433 proc_list_lock();
2434 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2435 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2436 }
2437 proc_list_unlock();
2438 }
2439
2440 static void
2441 pgrp_remove(struct proc * p)
2442 {
2443 struct pgrp * pg;
2444
2445 pg = proc_pgrp(p);
2446
2447 proc_list_lock();
2448 #if __PROC_INTERNAL_DEBUG
2449 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2450 panic("removing from pglist but no named ref\n");
2451 #endif
2452 p->p_pgrpid = PGRPID_DEAD;
2453 p->p_listflag &= ~P_LIST_INPGRP;
2454 p->p_pgrp = NULL;
2455 proc_list_unlock();
2456
2457 if (pg == PGRP_NULL)
2458 panic("pgrp_remove: pg is NULL");
2459 pgrp_lock(pg);
2460 pg->pg_membercnt--;
2461
2462 if (pg->pg_membercnt < 0)
2463 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2464
2465 LIST_REMOVE(p, p_pglist);
2466 if (pg->pg_members.lh_first == 0) {
2467 pgrp_unlock(pg);
2468 pgdelete_dropref(pg);
2469 } else {
2470 pgrp_unlock(pg);
2471 pg_rele(pg);
2472 }
2473 }
2474
2475
2476 /* cannot use proc_pgrp as it maybe stalled */
2477 static void
2478 pgrp_replace(struct proc * p, struct pgrp * newpg)
2479 {
2480 struct pgrp * oldpg;
2481
2482
2483
2484 proc_list_lock();
2485
2486 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2487 p->p_listflag |= P_LIST_PGRPTRWAIT;
2488 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2489 }
2490
2491 p->p_listflag |= P_LIST_PGRPTRANS;
2492
2493 oldpg = p->p_pgrp;
2494 if (oldpg == PGRP_NULL)
2495 panic("pgrp_replace: oldpg NULL");
2496 oldpg->pg_refcount++;
2497 #if __PROC_INTERNAL_DEBUG
2498 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2499 panic("removing from pglist but no named ref\n");
2500 #endif
2501 p->p_pgrpid = PGRPID_DEAD;
2502 p->p_listflag &= ~P_LIST_INPGRP;
2503 p->p_pgrp = NULL;
2504
2505 proc_list_unlock();
2506
2507 pgrp_lock(oldpg);
2508 oldpg->pg_membercnt--;
2509 if (oldpg->pg_membercnt < 0)
2510 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2511 LIST_REMOVE(p, p_pglist);
2512 if (oldpg->pg_members.lh_first == 0) {
2513 pgrp_unlock(oldpg);
2514 pgdelete_dropref(oldpg);
2515 } else {
2516 pgrp_unlock(oldpg);
2517 pg_rele(oldpg);
2518 }
2519
2520 proc_list_lock();
2521 p->p_pgrp = newpg;
2522 p->p_pgrpid = newpg->pg_id;
2523 p->p_listflag |= P_LIST_INPGRP;
2524 /*
2525 * When pgrp is being freed , a process can still
2526 * request addition using setpgid from bash when
2527 * login is terminated (login cycler) return ESRCH
2528 * Safe to hold lock due to refcount on pgrp
2529 */
2530 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2531 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2532 }
2533
2534 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2535 panic("pgrp_add : pgrp is dead adding process");
2536 proc_list_unlock();
2537
2538 pgrp_lock(newpg);
2539 newpg->pg_membercnt++;
2540 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2541 pgrp_unlock(newpg);
2542
2543 proc_list_lock();
2544 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2545 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2546 }
2547
2548 p->p_listflag &= ~P_LIST_PGRPTRANS;
2549 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2550 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2551 wakeup(&p->p_pgrpid);
2552
2553 }
2554 proc_list_unlock();
2555 }
2556
2557 void
2558 pgrp_lock(struct pgrp * pgrp)
2559 {
2560 lck_mtx_lock(&pgrp->pg_mlock);
2561 }
2562
2563 void
2564 pgrp_unlock(struct pgrp * pgrp)
2565 {
2566 lck_mtx_unlock(&pgrp->pg_mlock);
2567 }
2568
2569 void
2570 session_lock(struct session * sess)
2571 {
2572 lck_mtx_lock(&sess->s_mlock);
2573 }
2574
2575
2576 void
2577 session_unlock(struct session * sess)
2578 {
2579 lck_mtx_unlock(&sess->s_mlock);
2580 }
2581
2582 struct pgrp *
2583 proc_pgrp(proc_t p)
2584 {
2585 struct pgrp * pgrp;
2586
2587 if (p == PROC_NULL)
2588 return(PGRP_NULL);
2589 proc_list_lock();
2590
2591 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2592 p->p_listflag |= P_LIST_PGRPTRWAIT;
2593 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2594 }
2595
2596 pgrp = p->p_pgrp;
2597
2598 assert(pgrp != NULL);
2599
2600 if (pgrp != PGRP_NULL) {
2601 pgrp->pg_refcount++;
2602 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2603 panic("proc_pgrp: ref being povided for dead pgrp");
2604 }
2605
2606 proc_list_unlock();
2607
2608 return(pgrp);
2609 }
2610
2611 struct pgrp *
2612 tty_pgrp(struct tty * tp)
2613 {
2614 struct pgrp * pg = PGRP_NULL;
2615
2616 proc_list_lock();
2617 pg = tp->t_pgrp;
2618
2619 if (pg != PGRP_NULL) {
2620 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2621 panic("tty_pgrp: ref being povided for dead pgrp");
2622 pg->pg_refcount++;
2623 }
2624 proc_list_unlock();
2625
2626 return(pg);
2627 }
2628
2629 struct session *
2630 proc_session(proc_t p)
2631 {
2632 struct session * sess = SESSION_NULL;
2633
2634 if (p == PROC_NULL)
2635 return(SESSION_NULL);
2636
2637 proc_list_lock();
2638
2639 /* wait during transitions */
2640 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2641 p->p_listflag |= P_LIST_PGRPTRWAIT;
2642 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2643 }
2644
2645 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2646 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2647 panic("proc_session:returning sesssion ref on terminating session");
2648 sess->s_count++;
2649 }
2650 proc_list_unlock();
2651 return(sess);
2652 }
2653
2654 void
2655 session_rele(struct session *sess)
2656 {
2657 proc_list_lock();
2658 if (--sess->s_count == 0) {
2659 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2660 panic("session_rele: terminating already terminated session");
2661 sess->s_listflags |= S_LIST_TERM;
2662 LIST_REMOVE(sess, s_hash);
2663 sess->s_listflags |= S_LIST_DEAD;
2664 if (sess->s_count != 0)
2665 panic("session_rele: freeing session in use");
2666 proc_list_unlock();
2667 #if CONFIG_FINE_LOCK_GROUPS
2668 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2669 #else
2670 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2671 #endif
2672 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2673 } else
2674 proc_list_unlock();
2675 }
2676
2677 int
2678 proc_transstart(proc_t p, int locked, int non_blocking)
2679 {
2680 if (locked == 0)
2681 proc_lock(p);
2682 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2683 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2684 if (locked == 0)
2685 proc_unlock(p);
2686 return EDEADLK;
2687 }
2688 p->p_lflag |= P_LTRANSWAIT;
2689 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2690 }
2691 p->p_lflag |= P_LINTRANSIT;
2692 p->p_transholder = current_thread();
2693 if (locked == 0)
2694 proc_unlock(p);
2695 return 0;
2696 }
2697
2698 void
2699 proc_transcommit(proc_t p, int locked)
2700 {
2701 if (locked == 0)
2702 proc_lock(p);
2703
2704 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2705 assert (p->p_transholder == current_thread());
2706 p->p_lflag |= P_LTRANSCOMMIT;
2707
2708 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2709 p->p_lflag &= ~P_LTRANSWAIT;
2710 wakeup(&p->p_lflag);
2711 }
2712 if (locked == 0)
2713 proc_unlock(p);
2714 }
2715
2716 void
2717 proc_transend(proc_t p, int locked)
2718 {
2719 if (locked == 0)
2720 proc_lock(p);
2721
2722 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2723 p->p_transholder = NULL;
2724
2725 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2726 p->p_lflag &= ~P_LTRANSWAIT;
2727 wakeup(&p->p_lflag);
2728 }
2729 if (locked == 0)
2730 proc_unlock(p);
2731 }
2732
2733 int
2734 proc_transwait(proc_t p, int locked)
2735 {
2736 if (locked == 0)
2737 proc_lock(p);
2738 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2739 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2740 if (locked == 0)
2741 proc_unlock(p);
2742 return EDEADLK;
2743 }
2744 p->p_lflag |= P_LTRANSWAIT;
2745 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2746 }
2747 if (locked == 0)
2748 proc_unlock(p);
2749 return 0;
2750 }
2751
2752 void
2753 proc_klist_lock(void)
2754 {
2755 lck_mtx_lock(proc_klist_mlock);
2756 }
2757
2758 void
2759 proc_klist_unlock(void)
2760 {
2761 lck_mtx_unlock(proc_klist_mlock);
2762 }
2763
2764 void
2765 proc_knote(struct proc * p, long hint)
2766 {
2767 proc_klist_lock();
2768 KNOTE(&p->p_klist, hint);
2769 proc_klist_unlock();
2770 }
2771
2772 void
2773 proc_knote_drain(struct proc *p)
2774 {
2775 struct knote *kn = NULL;
2776
2777 /*
2778 * Clear the proc's klist to avoid references after the proc is reaped.
2779 */
2780 proc_klist_lock();
2781 while ((kn = SLIST_FIRST(&p->p_klist))) {
2782 kn->kn_ptr.p_proc = PROC_NULL;
2783 KNOTE_DETACH(&p->p_klist, kn);
2784 }
2785 proc_klist_unlock();
2786 }
2787
2788 void
2789 proc_setregister(proc_t p)
2790 {
2791 proc_lock(p);
2792 p->p_lflag |= P_LREGISTER;
2793 proc_unlock(p);
2794 }
2795
2796 void
2797 proc_resetregister(proc_t p)
2798 {
2799 proc_lock(p);
2800 p->p_lflag &= ~P_LREGISTER;
2801 proc_unlock(p);
2802 }
2803
2804 pid_t
2805 proc_pgrpid(proc_t p)
2806 {
2807 return p->p_pgrpid;
2808 }
2809
2810 pid_t
2811 proc_selfpgrpid()
2812 {
2813 return current_proc()->p_pgrpid;
2814 }
2815
2816
2817 /* return control and action states */
2818 int
2819 proc_getpcontrol(int pid, int * pcontrolp)
2820 {
2821 proc_t p;
2822
2823 p = proc_find(pid);
2824 if (p == PROC_NULL)
2825 return(ESRCH);
2826 if (pcontrolp != NULL)
2827 *pcontrolp = p->p_pcaction;
2828
2829 proc_rele(p);
2830 return(0);
2831 }
2832
2833 int
2834 proc_dopcontrol(proc_t p)
2835 {
2836 int pcontrol;
2837
2838 proc_lock(p);
2839
2840 pcontrol = PROC_CONTROL_STATE(p);
2841
2842 if (PROC_ACTION_STATE(p) == 0) {
2843 switch(pcontrol) {
2844 case P_PCTHROTTLE:
2845 PROC_SETACTION_STATE(p);
2846 proc_unlock(p);
2847 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2848 break;
2849
2850 case P_PCSUSP:
2851 PROC_SETACTION_STATE(p);
2852 proc_unlock(p);
2853 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2854 task_suspend(p->task);
2855 break;
2856
2857 case P_PCKILL:
2858 PROC_SETACTION_STATE(p);
2859 proc_unlock(p);
2860 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2861 psignal(p, SIGKILL);
2862 break;
2863
2864 default:
2865 proc_unlock(p);
2866 }
2867
2868 } else
2869 proc_unlock(p);
2870
2871 return(PROC_RETURNED);
2872 }
2873
2874
2875 /*
2876 * Resume a throttled or suspended process. This is an internal interface that's only
2877 * used by the user level code that presents the GUI when we run out of swap space and
2878 * hence is restricted to processes with superuser privileges.
2879 */
2880
2881 int
2882 proc_resetpcontrol(int pid)
2883 {
2884 proc_t p;
2885 int pcontrol;
2886 int error;
2887 proc_t self = current_proc();
2888
2889 /* if the process has been validated to handle resource control or root is valid one */
2890 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
2891 return error;
2892
2893 p = proc_find(pid);
2894 if (p == PROC_NULL)
2895 return(ESRCH);
2896
2897 proc_lock(p);
2898
2899 pcontrol = PROC_CONTROL_STATE(p);
2900
2901 if(PROC_ACTION_STATE(p) !=0) {
2902 switch(pcontrol) {
2903 case P_PCTHROTTLE:
2904 PROC_RESETACTION_STATE(p);
2905 proc_unlock(p);
2906 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
2907 break;
2908
2909 case P_PCSUSP:
2910 PROC_RESETACTION_STATE(p);
2911 proc_unlock(p);
2912 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
2913 task_resume(p->task);
2914 break;
2915
2916 case P_PCKILL:
2917 /* Huh? */
2918 PROC_SETACTION_STATE(p);
2919 proc_unlock(p);
2920 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
2921 break;
2922
2923 default:
2924 proc_unlock(p);
2925 }
2926
2927 } else
2928 proc_unlock(p);
2929
2930 proc_rele(p);
2931 return(0);
2932 }
2933
2934
2935
2936 struct no_paging_space
2937 {
2938 uint64_t pcs_max_size;
2939 uint64_t pcs_uniqueid;
2940 int pcs_pid;
2941 int pcs_proc_count;
2942 uint64_t pcs_total_size;
2943
2944 uint64_t npcs_max_size;
2945 uint64_t npcs_uniqueid;
2946 int npcs_pid;
2947 int npcs_proc_count;
2948 uint64_t npcs_total_size;
2949
2950 int apcs_proc_count;
2951 uint64_t apcs_total_size;
2952 };
2953
2954
2955 static int
2956 proc_pcontrol_filter(proc_t p, void *arg)
2957 {
2958 struct no_paging_space *nps;
2959 uint64_t compressed;
2960
2961 nps = (struct no_paging_space *)arg;
2962
2963 compressed = get_task_compressed(p->task);
2964
2965 if (PROC_CONTROL_STATE(p)) {
2966 if (PROC_ACTION_STATE(p) == 0) {
2967 if (compressed > nps->pcs_max_size) {
2968 nps->pcs_pid = p->p_pid;
2969 nps->pcs_uniqueid = p->p_uniqueid;
2970 nps->pcs_max_size = compressed;
2971 }
2972 nps->pcs_total_size += compressed;
2973 nps->pcs_proc_count++;
2974 } else {
2975 nps->apcs_total_size += compressed;
2976 nps->apcs_proc_count++;
2977 }
2978 } else {
2979 if (compressed > nps->npcs_max_size) {
2980 nps->npcs_pid = p->p_pid;
2981 nps->npcs_uniqueid = p->p_uniqueid;
2982 nps->npcs_max_size = compressed;
2983 }
2984 nps->npcs_total_size += compressed;
2985 nps->npcs_proc_count++;
2986
2987 }
2988 return (0);
2989 }
2990
2991
2992 static int
2993 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
2994 {
2995 return(PROC_RETURNED);
2996 }
2997
2998
2999 /*
3000 * Deal with the low on compressor pool space condition... this function
3001 * gets called when we are approaching the limits of the compressor pool or
3002 * we are unable to create a new swap file.
3003 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3004 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3005 * There are 2 categories of processes to deal with. Those that have an action
3006 * associated with them by the task itself and those that do not. Actionable
3007 * tasks can have one of three categories specified: ones that
3008 * can be killed immediately, ones that should be suspended, and ones that should
3009 * be throttled. Processes that do not have an action associated with them are normally
3010 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3011 * that only by killing them can we hope to put the system back into a usable state.
3012 */
3013
3014 #define NO_PAGING_SPACE_DEBUG 0
3015
3016 extern uint64_t vm_compressor_pages_compressed(void);
3017
3018 struct timeval last_no_space_action = {0, 0};
3019
3020 int
3021 no_paging_space_action()
3022 {
3023 proc_t p;
3024 struct no_paging_space nps;
3025 struct timeval now;
3026
3027 /*
3028 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3029 */
3030 microtime(&now);
3031
3032 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3033 return (0);
3034
3035 /*
3036 * Examine all processes and find the biggest (biggest is based on the number of pages this
3037 * task has in the compressor pool) that has been marked to have some action
3038 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3039 * action.
3040 *
3041 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3042 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3043 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3044 */
3045 bzero(&nps, sizeof(nps));
3046
3047 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3048
3049 #if NO_PAGING_SPACE_DEBUG
3050 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3051 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3052 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3053 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3054 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3055 nps.apcs_proc_count, nps.apcs_total_size);
3056 #endif
3057 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3058 /*
3059 * for now we'll knock out any task that has more then 50% of the pages
3060 * held by the compressor
3061 */
3062 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3063
3064 if (nps.npcs_uniqueid == p->p_uniqueid) {
3065 /*
3066 * verify this is still the same process
3067 * in case the proc exited and the pid got reused while
3068 * we were finishing the proc_iterate and getting to this point
3069 */
3070 last_no_space_action = now;
3071
3072 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3073 psignal(p, SIGKILL);
3074
3075 proc_rele(p);
3076
3077 return (0);
3078 }
3079
3080 proc_rele(p);
3081 }
3082 }
3083
3084 if (nps.pcs_max_size > 0) {
3085 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3086
3087 if (nps.pcs_uniqueid == p->p_uniqueid) {
3088 /*
3089 * verify this is still the same process
3090 * in case the proc exited and the pid got reused while
3091 * we were finishing the proc_iterate and getting to this point
3092 */
3093 last_no_space_action = now;
3094
3095 proc_dopcontrol(p);
3096
3097 proc_rele(p);
3098
3099 return (1);
3100 }
3101
3102 proc_rele(p);
3103 }
3104 }
3105 last_no_space_action = now;
3106
3107 printf("low swap: unable to find any eligible processes to take action on\n");
3108
3109 return (0);
3110 }
3111
3112 int
3113 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3114 {
3115 int ret = 0;
3116 proc_t target_proc = PROC_NULL;
3117 pid_t target_pid = uap->pid;
3118 uint64_t target_uniqueid = uap->uniqueid;
3119 task_t target_task = NULL;
3120
3121 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3122 ret = EPERM;
3123 goto out;
3124 }
3125 target_proc = proc_find(target_pid);
3126 if (target_proc != PROC_NULL) {
3127 if (target_uniqueid != proc_uniqueid(target_proc)) {
3128 ret = ENOENT;
3129 goto out;
3130 }
3131
3132 target_task = proc_task(target_proc);
3133 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3134 ret = EINVAL;
3135 goto out;
3136 }
3137 } else
3138 ret = ENOENT;
3139
3140 out:
3141 if (target_proc != PROC_NULL)
3142 proc_rele(target_proc);
3143 return (ret);
3144 }
3145
3146 #if VM_SCAN_FOR_SHADOW_CHAIN
3147 extern int vm_map_shadow_max(vm_map_t map);
3148 int proc_shadow_max(void);
3149 int proc_shadow_max(void)
3150 {
3151 int retval, max;
3152 proc_t p;
3153 task_t task;
3154 vm_map_t map;
3155
3156 max = 0;
3157 proc_list_lock();
3158 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3159 if (p->p_stat == SIDL)
3160 continue;
3161 task = p->task;
3162 if (task == NULL) {
3163 continue;
3164 }
3165 map = get_task_map(task);
3166 if (map == NULL) {
3167 continue;
3168 }
3169 retval = vm_map_shadow_max(map);
3170 if (retval > max) {
3171 max = retval;
3172 }
3173 }
3174 proc_list_unlock();
3175 return max;
3176 }
3177 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3178
3179 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3180 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3181 {
3182 if (target_proc != NULL) {
3183 target_proc->p_responsible_pid = responsible_pid;
3184 }
3185 return;
3186 }
3187
3188 int
3189 proc_chrooted(proc_t p)
3190 {
3191 int retval = 0;
3192
3193 if (p) {
3194 proc_fdlock(p);
3195 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3196 proc_fdunlock(p);
3197 }
3198
3199 return retval;
3200 }