]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
bd82153262eff9d82c855078cf0ab0b25edde96b
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114
115 #if CONFIG_MEMORYSTATUS
116 #include <sys/kern_memorystatus.h>
117 #endif
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #include <libkern/crypto/sha1.h>
124
125 /*
126 * Structure associated with user cacheing.
127 */
128 struct uidinfo {
129 LIST_ENTRY(uidinfo) ui_hash;
130 uid_t ui_uid;
131 long ui_proccnt;
132 };
133 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
134 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
135 u_long uihash; /* size of hash table - 1 */
136
137 /*
138 * Other process lists
139 */
140 struct pidhashhead *pidhashtbl;
141 u_long pidhash;
142 struct pgrphashhead *pgrphashtbl;
143 u_long pgrphash;
144 struct sesshashhead *sesshashtbl;
145 u_long sesshash;
146
147 struct proclist allproc;
148 struct proclist zombproc;
149 extern struct tty cons;
150
151 extern int cs_debug;
152
153 #if DEBUG
154 #define __PROC_INTERNAL_DEBUG 1
155 #endif
156 /* Name to give to core files */
157 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
158
159 #if PROC_REF_DEBUG
160 extern uint32_t fastbacktrace(uintptr_t* bt, uint32_t max_frames) __attribute__((noinline));
161 #endif
162
163 static void orphanpg(struct pgrp *pg);
164 void proc_name_kdp(task_t t, char * buf, int size);
165 int proc_threadname_kdp(void *uth, char *buf, size_t size);
166 void proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec);
167 char *proc_name_address(void *p);
168
169 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
170 static void pgrp_remove(proc_t p);
171 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
172 static void pgdelete_dropref(struct pgrp *pgrp);
173 extern void pg_rele_dropref(struct pgrp * pgrp);
174 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
175 static boolean_t proc_parent_is_currentproc(proc_t p);
176
177 struct fixjob_iterargs {
178 struct pgrp * pg;
179 struct session * mysession;
180 int entering;
181 };
182
183 int fixjob_callback(proc_t, void *);
184
185 /*
186 * Initialize global process hashing structures.
187 */
188 void
189 procinit(void)
190 {
191 LIST_INIT(&allproc);
192 LIST_INIT(&zombproc);
193 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
194 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
195 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
196 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
197 #if CONFIG_PERSONAS
198 personas_bootstrap();
199 #endif
200 }
201
202 /*
203 * Change the count associated with number of processes
204 * a given user is using. This routine protects the uihash
205 * with the list lock
206 */
207 int
208 chgproccnt(uid_t uid, int diff)
209 {
210 struct uidinfo *uip;
211 struct uidinfo *newuip = NULL;
212 struct uihashhead *uipp;
213 int retval;
214
215 again:
216 proc_list_lock();
217 uipp = UIHASH(uid);
218 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
219 if (uip->ui_uid == uid)
220 break;
221 if (uip) {
222 uip->ui_proccnt += diff;
223 if (uip->ui_proccnt > 0) {
224 retval = uip->ui_proccnt;
225 proc_list_unlock();
226 goto out;
227 }
228 if (uip->ui_proccnt < 0)
229 panic("chgproccnt: procs < 0");
230 LIST_REMOVE(uip, ui_hash);
231 retval = 0;
232 proc_list_unlock();
233 FREE_ZONE(uip, sizeof(*uip), M_PROC);
234 goto out;
235 }
236 if (diff <= 0) {
237 if (diff == 0) {
238 retval = 0;
239 proc_list_unlock();
240 goto out;
241 }
242 panic("chgproccnt: lost user");
243 }
244 if (newuip != NULL) {
245 uip = newuip;
246 newuip = NULL;
247 LIST_INSERT_HEAD(uipp, uip, ui_hash);
248 uip->ui_uid = uid;
249 uip->ui_proccnt = diff;
250 retval = diff;
251 proc_list_unlock();
252 goto out;
253 }
254 proc_list_unlock();
255 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
256 if (newuip == NULL)
257 panic("chgproccnt: M_PROC zone depleted");
258 goto again;
259 out:
260 if (newuip != NULL)
261 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
262 return(retval);
263 }
264
265 /*
266 * Is p an inferior of the current process?
267 */
268 int
269 inferior(proc_t p)
270 {
271 int retval = 0;
272
273 proc_list_lock();
274 for (; p != current_proc(); p = p->p_pptr)
275 if (p->p_pid == 0)
276 goto out;
277 retval = 1;
278 out:
279 proc_list_unlock();
280 return(retval);
281 }
282
283 /*
284 * Is p an inferior of t ?
285 */
286 int
287 isinferior(proc_t p, proc_t t)
288 {
289 int retval = 0;
290 int nchecked = 0;
291 proc_t start = p;
292
293 /* if p==t they are not inferior */
294 if (p == t)
295 return(0);
296
297 proc_list_lock();
298 for (; p != t; p = p->p_pptr) {
299 nchecked++;
300
301 /* Detect here if we're in a cycle */
302 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
303 goto out;
304 }
305 retval = 1;
306 out:
307 proc_list_unlock();
308 return(retval);
309 }
310
311 int
312 proc_isinferior(int pid1, int pid2)
313 {
314 proc_t p = PROC_NULL;
315 proc_t t = PROC_NULL;
316 int retval = 0;
317
318 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
319 retval = isinferior(p, t);
320
321 if (p != PROC_NULL)
322 proc_rele(p);
323 if (t != PROC_NULL)
324 proc_rele(t);
325
326 return(retval);
327 }
328
329 proc_t
330 proc_find(int pid)
331 {
332 return(proc_findinternal(pid, 0));
333 }
334
335 proc_t
336 proc_findinternal(int pid, int locked)
337 {
338 proc_t p = PROC_NULL;
339
340 if (locked == 0) {
341 proc_list_lock();
342 }
343
344 p = pfind_locked(pid);
345 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
346 p = PROC_NULL;
347
348 if (locked == 0) {
349 proc_list_unlock();
350 }
351
352 return(p);
353 }
354
355 proc_t
356 proc_findthread(thread_t thread)
357 {
358 proc_t p = PROC_NULL;
359 struct uthread *uth;
360
361 proc_list_lock();
362 uth = get_bsdthread_info(thread);
363 if (uth && (uth->uu_flag & UT_VFORK))
364 p = uth->uu_proc;
365 else
366 p = (proc_t)(get_bsdthreadtask_info(thread));
367 p = proc_ref_locked(p);
368 proc_list_unlock();
369 return(p);
370 }
371
372 #if PROC_REF_DEBUG
373 void
374 uthread_reset_proc_refcount(void *uthread) {
375 uthread_t uth;
376
377 if (proc_ref_tracking_disabled) {
378 return;
379 }
380
381 uth = (uthread_t) uthread;
382
383 uth->uu_proc_refcount = 0;
384 uth->uu_pindex = 0;
385 }
386
387 int
388 uthread_get_proc_refcount(void *uthread) {
389 uthread_t uth;
390
391 if (proc_ref_tracking_disabled) {
392 return 0;
393 }
394
395 uth = (uthread_t) uthread;
396
397 return uth->uu_proc_refcount;
398 }
399
400 static void
401 record_procref(proc_t p, int count) {
402 uthread_t uth;
403
404 if (proc_ref_tracking_disabled) {
405 return;
406 }
407
408 uth = current_uthread();
409 uth->uu_proc_refcount += count;
410
411 if (count == 1) {
412 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
413 fastbacktrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
414
415 uth->uu_proc_ps[uth->uu_pindex] = p;
416 uth->uu_pindex++;
417 }
418 }
419 }
420 #endif
421
422 int
423 proc_rele(proc_t p)
424 {
425 proc_list_lock();
426 proc_rele_locked(p);
427 proc_list_unlock();
428
429 return(0);
430 }
431
432 proc_t
433 proc_self(void)
434 {
435 struct proc * p;
436
437 p = current_proc();
438
439 proc_list_lock();
440 if (p != proc_ref_locked(p))
441 p = PROC_NULL;
442 proc_list_unlock();
443 return(p);
444 }
445
446
447 proc_t
448 proc_ref_locked(proc_t p)
449 {
450 proc_t p1 = p;
451
452 /* if process still in creation return failure */
453 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
454 return (PROC_NULL);
455 /* do not return process marked for termination */
456 if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0)) {
457 p->p_refcount++;
458 #if PROC_REF_DEBUG
459 record_procref(p, 1);
460 #endif
461 }
462 else
463 p1 = PROC_NULL;
464
465 return(p1);
466 }
467
468 void
469 proc_rele_locked(proc_t p)
470 {
471
472 if (p->p_refcount > 0) {
473 p->p_refcount--;
474 #if PROC_REF_DEBUG
475 record_procref(p, -1);
476 #endif
477 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
478 p->p_listflag &= ~P_LIST_DRAINWAIT;
479 wakeup(&p->p_refcount);
480 }
481 } else
482 panic("proc_rele_locked -ve ref\n");
483
484 }
485
486 proc_t
487 proc_find_zombref(int pid)
488 {
489 proc_t p;
490
491 proc_list_lock();
492
493 again:
494 p = pfind_locked(pid);
495
496 /* should we bail? */
497 if ((p == PROC_NULL) /* not found */
498 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
499 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
500
501 proc_list_unlock();
502 return (PROC_NULL);
503 }
504
505 /* If someone else is controlling the (unreaped) zombie - wait */
506 if ((p->p_listflag & P_LIST_WAITING) != 0) {
507 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
508 goto again;
509 }
510 p->p_listflag |= P_LIST_WAITING;
511
512 proc_list_unlock();
513
514 return(p);
515 }
516
517 void
518 proc_drop_zombref(proc_t p)
519 {
520 proc_list_lock();
521 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
522 p->p_listflag &= ~P_LIST_WAITING;
523 wakeup(&p->p_stat);
524 }
525 proc_list_unlock();
526 }
527
528
529 void
530 proc_refdrain(proc_t p)
531 {
532
533 proc_list_lock();
534
535 p->p_listflag |= P_LIST_DRAIN;
536 while (p->p_refcount) {
537 p->p_listflag |= P_LIST_DRAINWAIT;
538 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
539 }
540 p->p_listflag &= ~P_LIST_DRAIN;
541 p->p_listflag |= P_LIST_DEAD;
542
543 proc_list_unlock();
544
545
546 }
547
548 proc_t
549 proc_parentholdref(proc_t p)
550 {
551 proc_t parent = PROC_NULL;
552 proc_t pp;
553 int loopcnt = 0;
554
555
556 proc_list_lock();
557 loop:
558 pp = p->p_pptr;
559 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
560 parent = PROC_NULL;
561 goto out;
562 }
563
564 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
565 pp->p_listflag |= P_LIST_CHILDDRWAIT;
566 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
567 loopcnt++;
568 if (loopcnt == 5) {
569 parent = PROC_NULL;
570 goto out;
571 }
572 goto loop;
573 }
574
575 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
576 pp->p_parentref++;
577 parent = pp;
578 goto out;
579 }
580
581 out:
582 proc_list_unlock();
583 return(parent);
584 }
585 int
586 proc_parentdropref(proc_t p, int listlocked)
587 {
588 if (listlocked == 0)
589 proc_list_lock();
590
591 if (p->p_parentref > 0) {
592 p->p_parentref--;
593 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
594 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
595 wakeup(&p->p_parentref);
596 }
597 } else
598 panic("proc_parentdropref -ve ref\n");
599 if (listlocked == 0)
600 proc_list_unlock();
601
602 return(0);
603 }
604
605 void
606 proc_childdrainstart(proc_t p)
607 {
608 #if __PROC_INTERNAL_DEBUG
609 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
610 panic("proc_childdrainstart: childdrain already started\n");
611 #endif
612 p->p_listflag |= P_LIST_CHILDDRSTART;
613 /* wait for all that hold parentrefs to drop */
614 while (p->p_parentref > 0) {
615 p->p_listflag |= P_LIST_PARENTREFWAIT;
616 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
617 }
618 }
619
620
621 void
622 proc_childdrainend(proc_t p)
623 {
624 #if __PROC_INTERNAL_DEBUG
625 if (p->p_childrencnt > 0)
626 panic("exiting: children stil hanging around\n");
627 #endif
628 p->p_listflag |= P_LIST_CHILDDRAINED;
629 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
630 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
631 wakeup(&p->p_childrencnt);
632 }
633 }
634
635 void
636 proc_checkdeadrefs(__unused proc_t p)
637 {
638 #if __PROC_INTERNAL_DEBUG
639 if ((p->p_listflag & P_LIST_INHASH) != 0)
640 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
641 if (p->p_childrencnt != 0)
642 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
643 if (p->p_refcount != 0)
644 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
645 if (p->p_parentref != 0)
646 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
647 #endif
648 }
649
650 int
651 proc_pid(proc_t p)
652 {
653 if (p != NULL)
654 return (p->p_pid);
655 return -1;
656 }
657
658 int
659 proc_ppid(proc_t p)
660 {
661 if (p != NULL)
662 return (p->p_ppid);
663 return -1;
664 }
665
666 int
667 proc_selfpid(void)
668 {
669 return (current_proc()->p_pid);
670 }
671
672 int
673 proc_selfppid(void)
674 {
675 return (current_proc()->p_ppid);
676 }
677
678 int
679 proc_selfcsflags(void)
680 {
681 return (current_proc()->p_csflags);
682 }
683
684 #if CONFIG_DTRACE
685 static proc_t
686 dtrace_current_proc_vforking(void)
687 {
688 thread_t th = current_thread();
689 struct uthread *ut = get_bsdthread_info(th);
690
691 if (ut &&
692 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
693 /*
694 * Handle the narrow window where we're in the vfork syscall,
695 * but we're not quite ready to claim (in particular, to DTrace)
696 * that we're running as the child.
697 */
698 return (get_bsdtask_info(get_threadtask(th)));
699 }
700 return (current_proc());
701 }
702
703 int
704 dtrace_proc_selfpid(void)
705 {
706 return (dtrace_current_proc_vforking()->p_pid);
707 }
708
709 int
710 dtrace_proc_selfppid(void)
711 {
712 return (dtrace_current_proc_vforking()->p_ppid);
713 }
714
715 uid_t
716 dtrace_proc_selfruid(void)
717 {
718 return (dtrace_current_proc_vforking()->p_ruid);
719 }
720 #endif /* CONFIG_DTRACE */
721
722 proc_t
723 proc_parent(proc_t p)
724 {
725 proc_t parent;
726 proc_t pp;
727
728 proc_list_lock();
729 loop:
730 pp = p->p_pptr;
731 parent = proc_ref_locked(pp);
732 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
733 pp->p_listflag |= P_LIST_CHILDLKWAIT;
734 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
735 goto loop;
736 }
737 proc_list_unlock();
738 return(parent);
739 }
740
741 static boolean_t
742 proc_parent_is_currentproc(proc_t p)
743 {
744 boolean_t ret = FALSE;
745
746 proc_list_lock();
747 if (p->p_pptr == current_proc())
748 ret = TRUE;
749
750 proc_list_unlock();
751 return ret;
752 }
753
754 void
755 proc_name(int pid, char * buf, int size)
756 {
757 proc_t p;
758
759 if ((p = proc_find(pid)) != PROC_NULL) {
760 strlcpy(buf, &p->p_comm[0], size);
761 proc_rele(p);
762 }
763 }
764
765 void
766 proc_name_kdp(task_t t, char * buf, int size)
767 {
768 proc_t p = get_bsdtask_info(t);
769 if (p == PROC_NULL)
770 return;
771
772 if ((size_t)size > sizeof(p->p_comm))
773 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
774 else
775 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
776 }
777
778
779 int
780 proc_threadname_kdp(void *uth, char *buf, size_t size)
781 {
782 if (size < MAXTHREADNAMESIZE) {
783 /* this is really just a protective measure for the future in
784 * case the thread name size in stackshot gets out of sync with
785 * the BSD max thread name size. Note that bsd_getthreadname
786 * doesn't take input buffer size into account. */
787 return -1;
788 }
789
790 if (uth != NULL) {
791 bsd_getthreadname(uth, buf);
792 }
793 return 0;
794 }
795
796 /* note that this function is generally going to be called from stackshot,
797 * and the arguments will be coming from a struct which is declared packed
798 * thus the input arguments will in general be unaligned. We have to handle
799 * that here. */
800 void
801 proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec)
802 {
803 proc_t pp = (proc_t)p;
804 struct uint64p {
805 uint64_t val;
806 } __attribute__((packed));
807
808 if (pp != PROC_NULL) {
809 if (tv_sec != NULL)
810 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
811 if (tv_usec != NULL)
812 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
813 }
814 }
815
816 char *
817 proc_name_address(void *p)
818 {
819 return &((proc_t)p)->p_comm[0];
820 }
821
822 void
823 proc_selfname(char * buf, int size)
824 {
825 proc_t p;
826
827 if ((p = current_proc())!= (proc_t)0) {
828 strlcpy(buf, &p->p_comm[0], size);
829 }
830 }
831
832 void
833 proc_signal(int pid, int signum)
834 {
835 proc_t p;
836
837 if ((p = proc_find(pid)) != PROC_NULL) {
838 psignal(p, signum);
839 proc_rele(p);
840 }
841 }
842
843 int
844 proc_issignal(int pid, sigset_t mask)
845 {
846 proc_t p;
847 int error=0;
848
849 if ((p = proc_find(pid)) != PROC_NULL) {
850 error = proc_pendingsignals(p, mask);
851 proc_rele(p);
852 }
853
854 return(error);
855 }
856
857 int
858 proc_noremotehang(proc_t p)
859 {
860 int retval = 0;
861
862 if (p)
863 retval = p->p_flag & P_NOREMOTEHANG;
864 return(retval? 1: 0);
865
866 }
867
868 int
869 proc_exiting(proc_t p)
870 {
871 int retval = 0;
872
873 if (p)
874 retval = p->p_lflag & P_LEXIT;
875 return(retval? 1: 0);
876 }
877
878 int
879 proc_forcequota(proc_t p)
880 {
881 int retval = 0;
882
883 if (p)
884 retval = p->p_flag & P_FORCEQUOTA;
885 return(retval? 1: 0);
886
887 }
888
889 int
890 proc_suser(proc_t p)
891 {
892 kauth_cred_t my_cred;
893 int error;
894
895 my_cred = kauth_cred_proc_ref(p);
896 error = suser(my_cred, &p->p_acflag);
897 kauth_cred_unref(&my_cred);
898 return(error);
899 }
900
901 task_t
902 proc_task(proc_t proc)
903 {
904 return (task_t)proc->task;
905 }
906
907 /*
908 * Obtain the first thread in a process
909 *
910 * XXX This is a bad thing to do; it exists predominantly to support the
911 * XXX use of proc_t's in places that should really be using
912 * XXX thread_t's instead. This maintains historical behaviour, but really
913 * XXX needs an audit of the context (proxy vs. not) to clean up.
914 */
915 thread_t
916 proc_thread(proc_t proc)
917 {
918 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
919
920 if (uth != NULL)
921 return(uth->uu_context.vc_thread);
922
923 return(NULL);
924 }
925
926 kauth_cred_t
927 proc_ucred(proc_t p)
928 {
929 return(p->p_ucred);
930 }
931
932 struct uthread *
933 current_uthread()
934 {
935 thread_t th = current_thread();
936
937 return((struct uthread *)get_bsdthread_info(th));
938 }
939
940
941 int
942 proc_is64bit(proc_t p)
943 {
944 return(IS_64BIT_PROCESS(p));
945 }
946
947 int
948 proc_pidversion(proc_t p)
949 {
950 return(p->p_idversion);
951 }
952
953 uint32_t
954 proc_persona_id(proc_t p)
955 {
956 return (uint32_t)persona_id_from_proc(p);
957 }
958
959 uint32_t
960 proc_getuid(proc_t p)
961 {
962 return(p->p_uid);
963 }
964
965 uint32_t
966 proc_getgid(proc_t p)
967 {
968 return(p->p_gid);
969 }
970
971 uint64_t
972 proc_uniqueid(proc_t p)
973 {
974 return(p->p_uniqueid);
975 }
976
977 uint64_t
978 proc_puniqueid(proc_t p)
979 {
980 return(p->p_puniqueid);
981 }
982
983 void
984 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
985 {
986 #if CONFIG_COALITIONS
987 task_coalition_ids(p->task, ids);
988 #else
989 memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES]));
990 #endif
991 return;
992 }
993
994 uint64_t
995 proc_was_throttled(proc_t p)
996 {
997 return (p->was_throttled);
998 }
999
1000 uint64_t
1001 proc_did_throttle(proc_t p)
1002 {
1003 return (p->did_throttle);
1004 }
1005
1006 int
1007 proc_getcdhash(proc_t p, unsigned char *cdhash)
1008 {
1009 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1010 }
1011
1012 void
1013 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1014 {
1015 if (size >= sizeof(p->p_uuid)) {
1016 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1017 }
1018 }
1019
1020 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1021 vnode_t
1022 proc_getexecutablevnode(proc_t p)
1023 {
1024 vnode_t tvp = p->p_textvp;
1025
1026 if ( tvp != NULLVP) {
1027 if (vnode_getwithref(tvp) == 0) {
1028 return tvp;
1029 }
1030 }
1031
1032 return NULLVP;
1033 }
1034
1035
1036 void
1037 bsd_set_dependency_capable(task_t task)
1038 {
1039 proc_t p = get_bsdtask_info(task);
1040
1041 if (p) {
1042 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1043 }
1044 }
1045
1046
1047 int
1048 IS_64BIT_PROCESS(proc_t p)
1049 {
1050 if (p && (p->p_flag & P_LP64))
1051 return(1);
1052 else
1053 return(0);
1054 }
1055
1056 /*
1057 * Locate a process by number
1058 */
1059 proc_t
1060 pfind_locked(pid_t pid)
1061 {
1062 proc_t p;
1063 #if DEBUG
1064 proc_t q;
1065 #endif
1066
1067 if (!pid)
1068 return (kernproc);
1069
1070 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1071 if (p->p_pid == pid) {
1072 #if DEBUG
1073 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1074 if ((p !=q) && (q->p_pid == pid))
1075 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1076 }
1077 #endif
1078 return (p);
1079 }
1080 }
1081 return (NULL);
1082 }
1083
1084 /*
1085 * Locate a zombie by PID
1086 */
1087 __private_extern__ proc_t
1088 pzfind(pid_t pid)
1089 {
1090 proc_t p;
1091
1092
1093 proc_list_lock();
1094
1095 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1096 if (p->p_pid == pid)
1097 break;
1098
1099 proc_list_unlock();
1100
1101 return (p);
1102 }
1103
1104 /*
1105 * Locate a process group by number
1106 */
1107
1108 struct pgrp *
1109 pgfind(pid_t pgid)
1110 {
1111 struct pgrp * pgrp;
1112
1113 proc_list_lock();
1114 pgrp = pgfind_internal(pgid);
1115 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1116 pgrp = PGRP_NULL;
1117 else
1118 pgrp->pg_refcount++;
1119 proc_list_unlock();
1120 return(pgrp);
1121 }
1122
1123
1124
1125 struct pgrp *
1126 pgfind_internal(pid_t pgid)
1127 {
1128 struct pgrp *pgrp;
1129
1130 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1131 if (pgrp->pg_id == pgid)
1132 return (pgrp);
1133 return (NULL);
1134 }
1135
1136 void
1137 pg_rele(struct pgrp * pgrp)
1138 {
1139 if(pgrp == PGRP_NULL)
1140 return;
1141 pg_rele_dropref(pgrp);
1142 }
1143
1144 void
1145 pg_rele_dropref(struct pgrp * pgrp)
1146 {
1147 proc_list_lock();
1148 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1149 proc_list_unlock();
1150 pgdelete_dropref(pgrp);
1151 return;
1152 }
1153
1154 pgrp->pg_refcount--;
1155 proc_list_unlock();
1156 }
1157
1158 struct session *
1159 session_find_internal(pid_t sessid)
1160 {
1161 struct session *sess;
1162
1163 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1164 if (sess->s_sid == sessid)
1165 return (sess);
1166 return (NULL);
1167 }
1168
1169
1170 /*
1171 * Make a new process ready to become a useful member of society by making it
1172 * visible in all the right places and initialize its own lists to empty.
1173 *
1174 * Parameters: parent The parent of the process to insert
1175 * child The child process to insert
1176 *
1177 * Returns: (void)
1178 *
1179 * Notes: Insert a child process into the parents process group, assign
1180 * the child the parent process pointer and PPID of the parent,
1181 * place it on the parents p_children list as a sibling,
1182 * initialize its own child list, place it in the allproc list,
1183 * insert it in the proper hash bucket, and initialize its
1184 * event list.
1185 */
1186 void
1187 pinsertchild(proc_t parent, proc_t child)
1188 {
1189 struct pgrp * pg;
1190
1191 LIST_INIT(&child->p_children);
1192 TAILQ_INIT(&child->p_evlist);
1193 child->p_pptr = parent;
1194 child->p_ppid = parent->p_pid;
1195 child->p_puniqueid = parent->p_uniqueid;
1196
1197 pg = proc_pgrp(parent);
1198 pgrp_add(pg, parent, child);
1199 pg_rele(pg);
1200
1201 proc_list_lock();
1202
1203 #if CONFIG_MEMORYSTATUS
1204 memorystatus_add(child, TRUE);
1205 #endif
1206
1207 parent->p_childrencnt++;
1208 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1209
1210 LIST_INSERT_HEAD(&allproc, child, p_list);
1211 /* mark the completion of proc creation */
1212 child->p_listflag &= ~P_LIST_INCREATE;
1213
1214 proc_list_unlock();
1215 }
1216
1217 /*
1218 * Move p to a new or existing process group (and session)
1219 *
1220 * Returns: 0 Success
1221 * ESRCH No such process
1222 */
1223 int
1224 enterpgrp(proc_t p, pid_t pgid, int mksess)
1225 {
1226 struct pgrp *pgrp;
1227 struct pgrp *mypgrp;
1228 struct session * procsp;
1229
1230 pgrp = pgfind(pgid);
1231 mypgrp = proc_pgrp(p);
1232 procsp = proc_session(p);
1233
1234 #if DIAGNOSTIC
1235 if (pgrp != NULL && mksess) /* firewalls */
1236 panic("enterpgrp: setsid into non-empty pgrp");
1237 if (SESS_LEADER(p, procsp))
1238 panic("enterpgrp: session leader attempted setpgrp");
1239 #endif
1240 if (pgrp == PGRP_NULL) {
1241 pid_t savepid = p->p_pid;
1242 proc_t np = PROC_NULL;
1243 /*
1244 * new process group
1245 */
1246 #if DIAGNOSTIC
1247 if (p->p_pid != pgid)
1248 panic("enterpgrp: new pgrp and pid != pgid");
1249 #endif
1250 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1251 M_WAITOK);
1252 if (pgrp == NULL)
1253 panic("enterpgrp: M_PGRP zone depleted");
1254 if ((np = proc_find(savepid)) == NULL || np != p) {
1255 if (np != PROC_NULL)
1256 proc_rele(np);
1257 if (mypgrp != PGRP_NULL)
1258 pg_rele(mypgrp);
1259 if (procsp != SESSION_NULL)
1260 session_rele(procsp);
1261 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1262 return (ESRCH);
1263 }
1264 proc_rele(np);
1265 if (mksess) {
1266 struct session *sess;
1267
1268 /*
1269 * new session
1270 */
1271 MALLOC_ZONE(sess, struct session *,
1272 sizeof(struct session), M_SESSION, M_WAITOK);
1273 if (sess == NULL)
1274 panic("enterpgrp: M_SESSION zone depleted");
1275 sess->s_leader = p;
1276 sess->s_sid = p->p_pid;
1277 sess->s_count = 1;
1278 sess->s_ttyvp = NULL;
1279 sess->s_ttyp = TTY_NULL;
1280 sess->s_flags = 0;
1281 sess->s_listflags = 0;
1282 sess->s_ttypgrpid = NO_PID;
1283 #if CONFIG_FINE_LOCK_GROUPS
1284 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1285 #else
1286 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1287 #endif
1288 bcopy(procsp->s_login, sess->s_login,
1289 sizeof(sess->s_login));
1290 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1291 proc_list_lock();
1292 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1293 proc_list_unlock();
1294 pgrp->pg_session = sess;
1295 #if DIAGNOSTIC
1296 if (p != current_proc())
1297 panic("enterpgrp: mksession and p != curproc");
1298 #endif
1299 } else {
1300 proc_list_lock();
1301 pgrp->pg_session = procsp;
1302
1303 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1304 panic("enterpgrp: providing ref to terminating session ");
1305 pgrp->pg_session->s_count++;
1306 proc_list_unlock();
1307 }
1308 pgrp->pg_id = pgid;
1309 #if CONFIG_FINE_LOCK_GROUPS
1310 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1311 #else
1312 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1313 #endif
1314 LIST_INIT(&pgrp->pg_members);
1315 pgrp->pg_membercnt = 0;
1316 pgrp->pg_jobc = 0;
1317 proc_list_lock();
1318 pgrp->pg_refcount = 1;
1319 pgrp->pg_listflags = 0;
1320 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1321 proc_list_unlock();
1322 } else if (pgrp == mypgrp) {
1323 pg_rele(pgrp);
1324 if (mypgrp != NULL)
1325 pg_rele(mypgrp);
1326 if (procsp != SESSION_NULL)
1327 session_rele(procsp);
1328 return (0);
1329 }
1330
1331 if (procsp != SESSION_NULL)
1332 session_rele(procsp);
1333 /*
1334 * Adjust eligibility of affected pgrps to participate in job control.
1335 * Increment eligibility counts before decrementing, otherwise we
1336 * could reach 0 spuriously during the first call.
1337 */
1338 fixjobc(p, pgrp, 1);
1339 fixjobc(p, mypgrp, 0);
1340
1341 if(mypgrp != PGRP_NULL)
1342 pg_rele(mypgrp);
1343 pgrp_replace(p, pgrp);
1344 pg_rele(pgrp);
1345
1346 return(0);
1347 }
1348
1349 /*
1350 * remove process from process group
1351 */
1352 int
1353 leavepgrp(proc_t p)
1354 {
1355
1356 pgrp_remove(p);
1357 return (0);
1358 }
1359
1360 /*
1361 * delete a process group
1362 */
1363 static void
1364 pgdelete_dropref(struct pgrp *pgrp)
1365 {
1366 struct tty *ttyp;
1367 int emptypgrp = 1;
1368 struct session *sessp;
1369
1370
1371 pgrp_lock(pgrp);
1372 if (pgrp->pg_membercnt != 0) {
1373 emptypgrp = 0;
1374 }
1375 pgrp_unlock(pgrp);
1376
1377 proc_list_lock();
1378 pgrp->pg_refcount--;
1379 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1380 proc_list_unlock();
1381 return;
1382 }
1383
1384 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1385
1386 if (pgrp->pg_refcount > 0) {
1387 proc_list_unlock();
1388 return;
1389 }
1390
1391 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1392 LIST_REMOVE(pgrp, pg_hash);
1393
1394 proc_list_unlock();
1395
1396 ttyp = SESSION_TP(pgrp->pg_session);
1397 if (ttyp != TTY_NULL) {
1398 if (ttyp->t_pgrp == pgrp) {
1399 tty_lock(ttyp);
1400 /* Re-check after acquiring the lock */
1401 if (ttyp->t_pgrp == pgrp) {
1402 ttyp->t_pgrp = NULL;
1403 pgrp->pg_session->s_ttypgrpid = NO_PID;
1404 }
1405 tty_unlock(ttyp);
1406 }
1407 }
1408
1409 proc_list_lock();
1410
1411 sessp = pgrp->pg_session;
1412 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1413 panic("pg_deleteref: manipulating refs of already terminating session");
1414 if (--sessp->s_count == 0) {
1415 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1416 panic("pg_deleteref: terminating already terminated session");
1417 sessp->s_listflags |= S_LIST_TERM;
1418 ttyp = SESSION_TP(sessp);
1419 LIST_REMOVE(sessp, s_hash);
1420 proc_list_unlock();
1421 if (ttyp != TTY_NULL) {
1422 tty_lock(ttyp);
1423 if (ttyp->t_session == sessp)
1424 ttyp->t_session = NULL;
1425 tty_unlock(ttyp);
1426 }
1427 proc_list_lock();
1428 sessp->s_listflags |= S_LIST_DEAD;
1429 if (sessp->s_count != 0)
1430 panic("pg_deleteref: freeing session in use");
1431 proc_list_unlock();
1432 #if CONFIG_FINE_LOCK_GROUPS
1433 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1434 #else
1435 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1436 #endif
1437 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1438 } else
1439 proc_list_unlock();
1440 #if CONFIG_FINE_LOCK_GROUPS
1441 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1442 #else
1443 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1444 #endif
1445 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1446 }
1447
1448
1449 /*
1450 * Adjust pgrp jobc counters when specified process changes process group.
1451 * We count the number of processes in each process group that "qualify"
1452 * the group for terminal job control (those with a parent in a different
1453 * process group of the same session). If that count reaches zero, the
1454 * process group becomes orphaned. Check both the specified process'
1455 * process group and that of its children.
1456 * entering == 0 => p is leaving specified group.
1457 * entering == 1 => p is entering specified group.
1458 */
1459 int
1460 fixjob_callback(proc_t p, void * arg)
1461 {
1462 struct fixjob_iterargs *fp;
1463 struct pgrp * pg, *hispg;
1464 struct session * mysession, *hissess;
1465 int entering;
1466
1467 fp = (struct fixjob_iterargs *)arg;
1468 pg = fp->pg;
1469 mysession = fp->mysession;
1470 entering = fp->entering;
1471
1472 hispg = proc_pgrp(p);
1473 hissess = proc_session(p);
1474
1475 if ((hispg != pg) &&
1476 (hissess == mysession)) {
1477 pgrp_lock(hispg);
1478 if (entering) {
1479 hispg->pg_jobc++;
1480 pgrp_unlock(hispg);
1481 } else if (--hispg->pg_jobc == 0) {
1482 pgrp_unlock(hispg);
1483 orphanpg(hispg);
1484 } else
1485 pgrp_unlock(hispg);
1486 }
1487 if (hissess != SESSION_NULL)
1488 session_rele(hissess);
1489 if (hispg != PGRP_NULL)
1490 pg_rele(hispg);
1491
1492 return(PROC_RETURNED);
1493 }
1494
1495 void
1496 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1497 {
1498 struct pgrp *hispgrp = PGRP_NULL;
1499 struct session *hissess = SESSION_NULL;
1500 struct session *mysession = pgrp->pg_session;
1501 proc_t parent;
1502 struct fixjob_iterargs fjarg;
1503 boolean_t proc_parent_self;
1504
1505 /*
1506 * Check if p's parent is current proc, if yes then no need to take
1507 * a ref; calling proc_parent with current proc as parent may
1508 * deadlock if current proc is exiting.
1509 */
1510 proc_parent_self = proc_parent_is_currentproc(p);
1511 if (proc_parent_self)
1512 parent = current_proc();
1513 else
1514 parent = proc_parent(p);
1515
1516 if (parent != PROC_NULL) {
1517 hispgrp = proc_pgrp(parent);
1518 hissess = proc_session(parent);
1519 if (!proc_parent_self)
1520 proc_rele(parent);
1521 }
1522
1523
1524 /*
1525 * Check p's parent to see whether p qualifies its own process
1526 * group; if so, adjust count for p's process group.
1527 */
1528 if ((hispgrp != pgrp) &&
1529 (hissess == mysession)) {
1530 pgrp_lock(pgrp);
1531 if (entering) {
1532 pgrp->pg_jobc++;
1533 pgrp_unlock(pgrp);
1534 }else if (--pgrp->pg_jobc == 0) {
1535 pgrp_unlock(pgrp);
1536 orphanpg(pgrp);
1537 } else
1538 pgrp_unlock(pgrp);
1539 }
1540
1541 if (hissess != SESSION_NULL)
1542 session_rele(hissess);
1543 if (hispgrp != PGRP_NULL)
1544 pg_rele(hispgrp);
1545
1546 /*
1547 * Check this process' children to see whether they qualify
1548 * their process groups; if so, adjust counts for children's
1549 * process groups.
1550 */
1551 fjarg.pg = pgrp;
1552 fjarg.mysession = mysession;
1553 fjarg.entering = entering;
1554 proc_childrenwalk(p, fixjob_callback, &fjarg);
1555 }
1556
1557 /*
1558 * A process group has become orphaned;
1559 * if there are any stopped processes in the group,
1560 * hang-up all process in that group.
1561 */
1562 static void
1563 orphanpg(struct pgrp * pgrp)
1564 {
1565 proc_t p;
1566 pid_t * pid_list;
1567 int count, pidcount, i, alloc_count;
1568
1569 if (pgrp == PGRP_NULL)
1570 return;
1571 count = 0;
1572 pgrp_lock(pgrp);
1573 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
1574 if (p->p_stat == SSTOP) {
1575 for (p = pgrp->pg_members.lh_first; p != 0;
1576 p = p->p_pglist.le_next)
1577 count++;
1578 break; /* ??? stops after finding one.. */
1579 }
1580 }
1581 pgrp_unlock(pgrp);
1582
1583 count += 20;
1584 if (count > hard_maxproc)
1585 count = hard_maxproc;
1586 alloc_count = count * sizeof(pid_t);
1587 pid_list = (pid_t *)kalloc(alloc_count);
1588 bzero(pid_list, alloc_count);
1589
1590 pidcount = 0;
1591 pgrp_lock(pgrp);
1592 for (p = pgrp->pg_members.lh_first; p != 0;
1593 p = p->p_pglist.le_next) {
1594 if (p->p_stat == SSTOP) {
1595 for (p = pgrp->pg_members.lh_first; p != 0;
1596 p = p->p_pglist.le_next) {
1597 pid_list[pidcount] = p->p_pid;
1598 pidcount++;
1599 if (pidcount >= count)
1600 break;
1601 }
1602 break; /* ??? stops after finding one.. */
1603 }
1604 }
1605 pgrp_unlock(pgrp);
1606
1607 if (pidcount == 0)
1608 goto out;
1609
1610
1611 for (i = 0; i< pidcount; i++) {
1612 /* No handling or proc0 */
1613 if (pid_list[i] == 0)
1614 continue;
1615 p = proc_find(pid_list[i]);
1616 if (p) {
1617 proc_transwait(p, 0);
1618 pt_setrunnable(p);
1619 psignal(p, SIGHUP);
1620 psignal(p, SIGCONT);
1621 proc_rele(p);
1622 }
1623 }
1624 out:
1625 kfree(pid_list, alloc_count);
1626 return;
1627 }
1628
1629 int
1630 proc_is_classic(proc_t p __unused)
1631 {
1632 return (0);
1633 }
1634
1635 /* XXX Why does this function exist? Need to kill it off... */
1636 proc_t
1637 current_proc_EXTERNAL(void)
1638 {
1639 return (current_proc());
1640 }
1641
1642 int
1643 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1644 {
1645 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1646 }
1647
1648 /*
1649 * proc_core_name(name, uid, pid)
1650 * Expand the name described in corefilename, using name, uid, and pid.
1651 * corefilename is a printf-like string, with three format specifiers:
1652 * %N name of process ("name")
1653 * %P process id (pid)
1654 * %U user id (uid)
1655 * For example, "%N.core" is the default; they can be disabled completely
1656 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1657 * This is controlled by the sysctl variable kern.corefile (see above).
1658 */
1659 __private_extern__ int
1660 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1661 size_t cf_name_len)
1662 {
1663 const char *format, *appendstr;
1664 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1665 size_t i, l, n;
1666
1667 if (cf_name == NULL)
1668 goto toolong;
1669
1670 format = corefilename;
1671 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1672 switch (format[i]) {
1673 case '%': /* Format character */
1674 i++;
1675 switch (format[i]) {
1676 case '%':
1677 appendstr = "%";
1678 break;
1679 case 'N': /* process name */
1680 appendstr = name;
1681 break;
1682 case 'P': /* process id */
1683 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1684 appendstr = id_buf;
1685 break;
1686 case 'U': /* user id */
1687 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1688 appendstr = id_buf;
1689 break;
1690 default:
1691 appendstr = "";
1692 log(LOG_ERR,
1693 "Unknown format character %c in `%s'\n",
1694 format[i], format);
1695 }
1696 l = strlen(appendstr);
1697 if ((n + l) >= cf_name_len)
1698 goto toolong;
1699 bcopy(appendstr, cf_name + n, l);
1700 n += l;
1701 break;
1702 default:
1703 cf_name[n++] = format[i];
1704 }
1705 }
1706 if (format[i] != '\0')
1707 goto toolong;
1708 return (0);
1709 toolong:
1710 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1711 (long)pid, name, (uint32_t)uid);
1712 return (1);
1713 }
1714
1715 /* Code Signing related routines */
1716
1717 int
1718 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1719 {
1720 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1721 uap->usersize, USER_ADDR_NULL));
1722 }
1723
1724 int
1725 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1726 {
1727 if (uap->uaudittoken == USER_ADDR_NULL)
1728 return(EINVAL);
1729 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1730 uap->usersize, uap->uaudittoken));
1731 }
1732
1733 static int
1734 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1735 {
1736 char fakeheader[8] = { 0 };
1737 int error;
1738
1739 if (usize < sizeof(fakeheader))
1740 return ERANGE;
1741
1742 /* if no blob, fill in zero header */
1743 if (NULL == start) {
1744 start = fakeheader;
1745 length = sizeof(fakeheader);
1746 } else if (usize < length) {
1747 /* ... if input too short, copy out length of entitlement */
1748 uint32_t length32 = htonl((uint32_t)length);
1749 memcpy(&fakeheader[4], &length32, sizeof(length32));
1750
1751 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1752 if (error == 0)
1753 return ERANGE; /* input buffer to short, ERANGE signals that */
1754 return error;
1755 }
1756 return copyout(start, uaddr, length);
1757 }
1758
1759 static int
1760 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1761 {
1762 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1763 proc_t pt;
1764 int forself;
1765 int error;
1766 vnode_t tvp;
1767 off_t toff;
1768 unsigned char cdhash[SHA1_RESULTLEN];
1769 audit_token_t token;
1770 unsigned int upid=0, uidversion = 0;
1771
1772 forself = error = 0;
1773
1774 if (pid == 0)
1775 pid = proc_selfpid();
1776 if (pid == proc_selfpid())
1777 forself = 1;
1778
1779
1780 switch (ops) {
1781 case CS_OPS_STATUS:
1782 case CS_OPS_CDHASH:
1783 case CS_OPS_PIDOFFSET:
1784 case CS_OPS_ENTITLEMENTS_BLOB:
1785 case CS_OPS_IDENTITY:
1786 case CS_OPS_BLOB:
1787 break; /* unrestricted */
1788 default:
1789 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1790 return(EPERM);
1791 break;
1792 }
1793
1794 pt = proc_find(pid);
1795 if (pt == PROC_NULL)
1796 return(ESRCH);
1797
1798 upid = pt->p_pid;
1799 uidversion = pt->p_idversion;
1800 if (uaudittoken != USER_ADDR_NULL) {
1801
1802 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1803 if (error != 0)
1804 goto out;
1805 /* verify the audit token pid/idversion matches with proc */
1806 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1807 error = ESRCH;
1808 goto out;
1809 }
1810 }
1811
1812 switch (ops) {
1813
1814 case CS_OPS_STATUS: {
1815 uint32_t retflags;
1816
1817 proc_lock(pt);
1818 retflags = pt->p_csflags;
1819 if (cs_enforcement(pt))
1820 retflags |= CS_ENFORCEMENT;
1821 if (csproc_get_platform_binary(pt))
1822 retflags |= CS_PLATFORM_BINARY;
1823 proc_unlock(pt);
1824
1825 if (uaddr != USER_ADDR_NULL)
1826 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1827 break;
1828 }
1829 case CS_OPS_MARKINVALID:
1830 proc_lock(pt);
1831 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1832 pt->p_csflags &= ~CS_VALID; /* set invalid */
1833 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1834 pt->p_csflags |= CS_KILLED;
1835 proc_unlock(pt);
1836 if (cs_debug) {
1837 printf("CODE SIGNING: marked invalid by pid %d: "
1838 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1839 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
1840 }
1841 psignal(pt, SIGKILL);
1842 } else
1843 proc_unlock(pt);
1844 } else
1845 proc_unlock(pt);
1846
1847 break;
1848
1849 case CS_OPS_MARKHARD:
1850 proc_lock(pt);
1851 pt->p_csflags |= CS_HARD;
1852 if ((pt->p_csflags & CS_VALID) == 0) {
1853 /* @@@ allow? reject? kill? @@@ */
1854 proc_unlock(pt);
1855 error = EINVAL;
1856 goto out;
1857 } else
1858 proc_unlock(pt);
1859 break;
1860
1861 case CS_OPS_MARKKILL:
1862 proc_lock(pt);
1863 pt->p_csflags |= CS_KILL;
1864 if ((pt->p_csflags & CS_VALID) == 0) {
1865 proc_unlock(pt);
1866 psignal(pt, SIGKILL);
1867 } else
1868 proc_unlock(pt);
1869 break;
1870
1871 case CS_OPS_PIDOFFSET:
1872 toff = pt->p_textoff;
1873 proc_rele(pt);
1874 error = copyout(&toff, uaddr, sizeof(toff));
1875 return(error);
1876
1877 case CS_OPS_CDHASH:
1878
1879 /* pt already holds a reference on its p_textvp */
1880 tvp = pt->p_textvp;
1881 toff = pt->p_textoff;
1882
1883 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
1884 proc_rele(pt);
1885 return EINVAL;
1886 }
1887
1888 error = vn_getcdhash(tvp, toff, cdhash);
1889 proc_rele(pt);
1890
1891 if (error == 0) {
1892 error = copyout(cdhash, uaddr, sizeof (cdhash));
1893 }
1894
1895 return error;
1896
1897 case CS_OPS_ENTITLEMENTS_BLOB: {
1898 void *start;
1899 size_t length;
1900
1901 proc_lock(pt);
1902
1903 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
1904 proc_unlock(pt);
1905 error = EINVAL;
1906 break;
1907 }
1908
1909 error = cs_entitlements_blob_get(pt, &start, &length);
1910 proc_unlock(pt);
1911 if (error)
1912 break;
1913
1914 error = csops_copy_token(start, length, usize, uaddr);
1915 break;
1916 }
1917 case CS_OPS_MARKRESTRICT:
1918 proc_lock(pt);
1919 pt->p_csflags |= CS_RESTRICT;
1920 proc_unlock(pt);
1921 break;
1922
1923 case CS_OPS_SET_STATUS: {
1924 uint32_t flags;
1925
1926 if (usize < sizeof(flags)) {
1927 error = ERANGE;
1928 break;
1929 }
1930
1931 error = copyin(uaddr, &flags, sizeof(flags));
1932 if (error)
1933 break;
1934
1935 /* only allow setting a subset of all code sign flags */
1936 flags &=
1937 CS_HARD | CS_EXEC_SET_HARD |
1938 CS_KILL | CS_EXEC_SET_KILL |
1939 CS_RESTRICT |
1940 CS_REQUIRE_LV |
1941 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT |
1942 CS_ENTITLEMENTS_VALIDATED;
1943
1944 proc_lock(pt);
1945 if (pt->p_csflags & CS_VALID)
1946 pt->p_csflags |= flags;
1947 else
1948 error = EINVAL;
1949 proc_unlock(pt);
1950
1951 break;
1952 }
1953 case CS_OPS_BLOB: {
1954 void *start;
1955 size_t length;
1956
1957 proc_lock(pt);
1958 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
1959 proc_unlock(pt);
1960 error = EINVAL;
1961 break;
1962 }
1963
1964 error = cs_blob_get(pt, &start, &length);
1965 proc_unlock(pt);
1966 if (error)
1967 break;
1968
1969 error = csops_copy_token(start, length, usize, uaddr);
1970 break;
1971 }
1972 case CS_OPS_IDENTITY: {
1973 const char *identity;
1974 uint8_t fakeheader[8];
1975 uint32_t idlen;
1976 size_t length;
1977
1978 /*
1979 * Make identity have a blob header to make it
1980 * easier on userland to guess the identity
1981 * length.
1982 */
1983 if (usize < sizeof(fakeheader)) {
1984 error = ERANGE;
1985 break;
1986 }
1987 memset(fakeheader, 0, sizeof(fakeheader));
1988
1989 proc_lock(pt);
1990 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
1991 proc_unlock(pt);
1992 error = EINVAL;
1993 break;
1994 }
1995
1996 identity = cs_identity_get(pt);
1997 proc_unlock(pt);
1998 if (identity == NULL) {
1999 error = ENOENT;
2000 break;
2001 }
2002
2003 length = strlen(identity) + 1; /* include NUL */
2004 idlen = htonl(length + sizeof(fakeheader));
2005 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2006
2007 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2008 if (error)
2009 break;
2010
2011 if (usize < sizeof(fakeheader) + length)
2012 error = ERANGE;
2013 else if (usize > sizeof(fakeheader))
2014 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2015
2016 break;
2017 }
2018
2019 default:
2020 error = EINVAL;
2021 break;
2022 }
2023 out:
2024 proc_rele(pt);
2025 return(error);
2026 }
2027
2028 int
2029 proc_iterate(flags, callout, arg, filterfn, filterarg)
2030 int flags;
2031 int (*callout)(proc_t, void *);
2032 void * arg;
2033 int (*filterfn)(proc_t, void *);
2034 void * filterarg;
2035 {
2036 proc_t p;
2037 pid_t * pid_list;
2038 int count, pidcount, alloc_count, i, retval;
2039
2040 count = nprocs+ 10;
2041 if (count > hard_maxproc)
2042 count = hard_maxproc;
2043 alloc_count = count * sizeof(pid_t);
2044 pid_list = (pid_t *)kalloc(alloc_count);
2045 bzero(pid_list, alloc_count);
2046
2047
2048 proc_list_lock();
2049
2050
2051 pidcount = 0;
2052 if (flags & PROC_ALLPROCLIST) {
2053 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2054 if (p->p_stat == SIDL)
2055 continue;
2056 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2057 pid_list[pidcount] = p->p_pid;
2058 pidcount++;
2059 if (pidcount >= count)
2060 break;
2061 }
2062 }
2063 }
2064 if ((pidcount < count ) && (flags & PROC_ZOMBPROCLIST)) {
2065 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2066 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2067 pid_list[pidcount] = p->p_pid;
2068 pidcount++;
2069 if (pidcount >= count)
2070 break;
2071 }
2072 }
2073 }
2074
2075
2076 proc_list_unlock();
2077
2078
2079 for (i = 0; i< pidcount; i++) {
2080 p = proc_find(pid_list[i]);
2081 if (p) {
2082 if ((flags & PROC_NOWAITTRANS) == 0)
2083 proc_transwait(p, 0);
2084 retval = callout(p, arg);
2085
2086 switch (retval) {
2087 case PROC_RETURNED:
2088 proc_rele(p);
2089 break;
2090 case PROC_RETURNED_DONE:
2091 proc_rele(p);
2092 goto out;
2093 case PROC_CLAIMED_DONE:
2094 goto out;
2095 case PROC_CLAIMED:
2096 default:
2097 break;
2098 }
2099 } else if (flags & PROC_ZOMBPROCLIST) {
2100 p = proc_find_zombref(pid_list[i]);
2101 if (p != PROC_NULL) {
2102 retval = callout(p, arg);
2103
2104 switch (retval) {
2105 case PROC_RETURNED:
2106 proc_drop_zombref(p);
2107 break;
2108 case PROC_RETURNED_DONE:
2109 proc_drop_zombref(p);
2110 goto out;
2111 case PROC_CLAIMED_DONE:
2112 goto out;
2113 case PROC_CLAIMED:
2114 default:
2115 break;
2116 }
2117 }
2118 }
2119 }
2120
2121 out:
2122 kfree(pid_list, alloc_count);
2123 return(0);
2124
2125 }
2126
2127
2128 #if 0
2129 /* This is for iteration in case of trivial non blocking callouts */
2130 int
2131 proc_scanall(flags, callout, arg)
2132 int flags;
2133 int (*callout)(proc_t, void *);
2134 void * arg;
2135 {
2136 proc_t p;
2137 int retval;
2138
2139
2140 proc_list_lock();
2141
2142
2143 if (flags & PROC_ALLPROCLIST) {
2144 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2145 retval = callout(p, arg);
2146 if (retval == PROC_RETURNED_DONE)
2147 goto out;
2148 }
2149 }
2150 if (flags & PROC_ZOMBPROCLIST) {
2151 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2152 retval = callout(p, arg);
2153 if (retval == PROC_RETURNED_DONE)
2154 goto out;
2155 }
2156 }
2157 out:
2158
2159 proc_list_unlock();
2160
2161 return(0);
2162 }
2163 #endif
2164
2165
2166 int
2167 proc_rebootscan(callout, arg, filterfn, filterarg)
2168 int (*callout)(proc_t, void *);
2169 void * arg;
2170 int (*filterfn)(proc_t, void *);
2171 void * filterarg;
2172 {
2173 proc_t p;
2174 int lockheld = 0, retval;
2175
2176 proc_shutdown_exitcount = 0;
2177
2178 ps_allprocscan:
2179
2180 proc_list_lock();
2181
2182 lockheld = 1;
2183
2184 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2185 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2186 p = proc_ref_locked(p);
2187
2188 proc_list_unlock();
2189 lockheld = 0;
2190
2191 if (p) {
2192 proc_transwait(p, 0);
2193 retval = callout(p, arg);
2194 proc_rele(p);
2195
2196 switch (retval) {
2197 case PROC_RETURNED_DONE:
2198 case PROC_CLAIMED_DONE:
2199 goto out;
2200 }
2201 }
2202 goto ps_allprocscan;
2203 } /* filter pass */
2204 } /* allproc walk thru */
2205
2206 if (lockheld == 1) {
2207 proc_list_unlock();
2208 lockheld = 0;
2209 }
2210
2211 out:
2212 return(0);
2213
2214 }
2215
2216
2217 int
2218 proc_childrenwalk(parent, callout, arg)
2219 struct proc * parent;
2220 int (*callout)(proc_t, void *);
2221 void * arg;
2222 {
2223 register struct proc *p;
2224 pid_t * pid_list;
2225 int count, pidcount, alloc_count, i, retval;
2226
2227 count = nprocs+ 10;
2228 if (count > hard_maxproc)
2229 count = hard_maxproc;
2230 alloc_count = count * sizeof(pid_t);
2231 pid_list = (pid_t *)kalloc(alloc_count);
2232 bzero(pid_list, alloc_count);
2233
2234
2235 proc_list_lock();
2236
2237
2238 pidcount = 0;
2239 for (p = parent->p_children.lh_first; (p != 0); p = p->p_sibling.le_next) {
2240 if (p->p_stat == SIDL)
2241 continue;
2242 pid_list[pidcount] = p->p_pid;
2243 pidcount++;
2244 if (pidcount >= count)
2245 break;
2246 }
2247 proc_list_unlock();
2248
2249
2250 for (i = 0; i< pidcount; i++) {
2251 p = proc_find(pid_list[i]);
2252 if (p) {
2253 proc_transwait(p, 0);
2254 retval = callout(p, arg);
2255
2256 switch (retval) {
2257 case PROC_RETURNED:
2258 case PROC_RETURNED_DONE:
2259 proc_rele(p);
2260 if (retval == PROC_RETURNED_DONE) {
2261 goto out;
2262 }
2263 break;
2264
2265 case PROC_CLAIMED_DONE:
2266 goto out;
2267 case PROC_CLAIMED:
2268 default:
2269 break;
2270 }
2271 }
2272 }
2273
2274 out:
2275 kfree(pid_list, alloc_count);
2276 return(0);
2277
2278 }
2279
2280 /*
2281 */
2282 /* PGRP_BLOCKITERATE is not implemented yet */
2283 int
2284 pgrp_iterate(pgrp, flags, callout, arg, filterfn, filterarg)
2285 struct pgrp *pgrp;
2286 int flags;
2287 int (*callout)(proc_t, void *);
2288 void * arg;
2289 int (*filterfn)(proc_t, void *);
2290 void * filterarg;
2291 {
2292 proc_t p;
2293 pid_t * pid_list;
2294 int count, pidcount, i, alloc_count;
2295 int retval;
2296 pid_t pgid;
2297 int dropref = flags & PGRP_DROPREF;
2298 #if 0
2299 int serialize = flags & PGRP_BLOCKITERATE;
2300 #else
2301 int serialize = 0;
2302 #endif
2303
2304 if (pgrp == 0)
2305 return(0);
2306 count = pgrp->pg_membercnt + 10;
2307 if (count > hard_maxproc)
2308 count = hard_maxproc;
2309 alloc_count = count * sizeof(pid_t);
2310 pid_list = (pid_t *)kalloc(alloc_count);
2311 bzero(pid_list, alloc_count);
2312
2313 pgrp_lock(pgrp);
2314 if (serialize != 0) {
2315 while ((pgrp->pg_listflags & PGRP_FLAG_ITERABEGIN) == PGRP_FLAG_ITERABEGIN) {
2316 pgrp->pg_listflags |= PGRP_FLAG_ITERWAIT;
2317 msleep(&pgrp->pg_listflags, &pgrp->pg_mlock, 0, "pgrp_iterate", 0);
2318 }
2319 pgrp->pg_listflags |= PGRP_FLAG_ITERABEGIN;
2320 }
2321
2322 pgid = pgrp->pg_id;
2323
2324 pidcount = 0;
2325 for (p = pgrp->pg_members.lh_first; p != 0;
2326 p = p->p_pglist.le_next) {
2327 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2328 pid_list[pidcount] = p->p_pid;
2329 pidcount++;
2330 if (pidcount >= count)
2331 break;
2332 }
2333 }
2334
2335
2336 pgrp_unlock(pgrp);
2337 if ((serialize == 0) && (dropref != 0))
2338 pg_rele(pgrp);
2339
2340
2341 for (i = 0; i< pidcount; i++) {
2342 /* No handling or proc0 */
2343 if (pid_list[i] == 0)
2344 continue;
2345 p = proc_find(pid_list[i]);
2346 if (p) {
2347 if (p->p_pgrpid != pgid) {
2348 proc_rele(p);
2349 continue;
2350 }
2351 proc_transwait(p, 0);
2352 retval = callout(p, arg);
2353
2354 switch (retval) {
2355 case PROC_RETURNED:
2356 case PROC_RETURNED_DONE:
2357 proc_rele(p);
2358 if (retval == PROC_RETURNED_DONE) {
2359 goto out;
2360 }
2361 break;
2362
2363 case PROC_CLAIMED_DONE:
2364 goto out;
2365 case PROC_CLAIMED:
2366 default:
2367 break;
2368 }
2369 }
2370 }
2371 out:
2372 if (serialize != 0) {
2373 pgrp_lock(pgrp);
2374 pgrp->pg_listflags &= ~PGRP_FLAG_ITERABEGIN;
2375 if ((pgrp->pg_listflags & PGRP_FLAG_ITERWAIT) == PGRP_FLAG_ITERWAIT) {
2376 pgrp->pg_listflags &= ~PGRP_FLAG_ITERWAIT;
2377 wakeup(&pgrp->pg_listflags);
2378 }
2379 pgrp_unlock(pgrp);
2380 if (dropref != 0)
2381 pg_rele(pgrp);
2382 }
2383 kfree(pid_list, alloc_count);
2384 return(0);
2385 }
2386
2387 static void
2388 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2389 {
2390 proc_list_lock();
2391 child->p_pgrp = pgrp;
2392 child->p_pgrpid = pgrp->pg_id;
2393 child->p_listflag |= P_LIST_INPGRP;
2394 /*
2395 * When pgrp is being freed , a process can still
2396 * request addition using setpgid from bash when
2397 * login is terminated (login cycler) return ESRCH
2398 * Safe to hold lock due to refcount on pgrp
2399 */
2400 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2401 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2402 }
2403
2404 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2405 panic("pgrp_add : pgrp is dead adding process");
2406 proc_list_unlock();
2407
2408 pgrp_lock(pgrp);
2409 pgrp->pg_membercnt++;
2410 if ( parent != PROC_NULL) {
2411 LIST_INSERT_AFTER(parent, child, p_pglist);
2412 }else {
2413 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2414 }
2415 pgrp_unlock(pgrp);
2416
2417 proc_list_lock();
2418 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2419 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2420 }
2421 proc_list_unlock();
2422 }
2423
2424 static void
2425 pgrp_remove(struct proc * p)
2426 {
2427 struct pgrp * pg;
2428
2429 pg = proc_pgrp(p);
2430
2431 proc_list_lock();
2432 #if __PROC_INTERNAL_DEBUG
2433 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2434 panic("removing from pglist but no named ref\n");
2435 #endif
2436 p->p_pgrpid = PGRPID_DEAD;
2437 p->p_listflag &= ~P_LIST_INPGRP;
2438 p->p_pgrp = NULL;
2439 proc_list_unlock();
2440
2441 if (pg == PGRP_NULL)
2442 panic("pgrp_remove: pg is NULL");
2443 pgrp_lock(pg);
2444 pg->pg_membercnt--;
2445
2446 if (pg->pg_membercnt < 0)
2447 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2448
2449 LIST_REMOVE(p, p_pglist);
2450 if (pg->pg_members.lh_first == 0) {
2451 pgrp_unlock(pg);
2452 pgdelete_dropref(pg);
2453 } else {
2454 pgrp_unlock(pg);
2455 pg_rele(pg);
2456 }
2457 }
2458
2459
2460 /* cannot use proc_pgrp as it maybe stalled */
2461 static void
2462 pgrp_replace(struct proc * p, struct pgrp * newpg)
2463 {
2464 struct pgrp * oldpg;
2465
2466
2467
2468 proc_list_lock();
2469
2470 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2471 p->p_listflag |= P_LIST_PGRPTRWAIT;
2472 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2473 }
2474
2475 p->p_listflag |= P_LIST_PGRPTRANS;
2476
2477 oldpg = p->p_pgrp;
2478 if (oldpg == PGRP_NULL)
2479 panic("pgrp_replace: oldpg NULL");
2480 oldpg->pg_refcount++;
2481 #if __PROC_INTERNAL_DEBUG
2482 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2483 panic("removing from pglist but no named ref\n");
2484 #endif
2485 p->p_pgrpid = PGRPID_DEAD;
2486 p->p_listflag &= ~P_LIST_INPGRP;
2487 p->p_pgrp = NULL;
2488
2489 proc_list_unlock();
2490
2491 pgrp_lock(oldpg);
2492 oldpg->pg_membercnt--;
2493 if (oldpg->pg_membercnt < 0)
2494 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2495 LIST_REMOVE(p, p_pglist);
2496 if (oldpg->pg_members.lh_first == 0) {
2497 pgrp_unlock(oldpg);
2498 pgdelete_dropref(oldpg);
2499 } else {
2500 pgrp_unlock(oldpg);
2501 pg_rele(oldpg);
2502 }
2503
2504 proc_list_lock();
2505 p->p_pgrp = newpg;
2506 p->p_pgrpid = newpg->pg_id;
2507 p->p_listflag |= P_LIST_INPGRP;
2508 /*
2509 * When pgrp is being freed , a process can still
2510 * request addition using setpgid from bash when
2511 * login is terminated (login cycler) return ESRCH
2512 * Safe to hold lock due to refcount on pgrp
2513 */
2514 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2515 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2516 }
2517
2518 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2519 panic("pgrp_add : pgrp is dead adding process");
2520 proc_list_unlock();
2521
2522 pgrp_lock(newpg);
2523 newpg->pg_membercnt++;
2524 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2525 pgrp_unlock(newpg);
2526
2527 proc_list_lock();
2528 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2529 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2530 }
2531
2532 p->p_listflag &= ~P_LIST_PGRPTRANS;
2533 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2534 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2535 wakeup(&p->p_pgrpid);
2536
2537 }
2538 proc_list_unlock();
2539 }
2540
2541 void
2542 pgrp_lock(struct pgrp * pgrp)
2543 {
2544 lck_mtx_lock(&pgrp->pg_mlock);
2545 }
2546
2547 void
2548 pgrp_unlock(struct pgrp * pgrp)
2549 {
2550 lck_mtx_unlock(&pgrp->pg_mlock);
2551 }
2552
2553 void
2554 session_lock(struct session * sess)
2555 {
2556 lck_mtx_lock(&sess->s_mlock);
2557 }
2558
2559
2560 void
2561 session_unlock(struct session * sess)
2562 {
2563 lck_mtx_unlock(&sess->s_mlock);
2564 }
2565
2566 struct pgrp *
2567 proc_pgrp(proc_t p)
2568 {
2569 struct pgrp * pgrp;
2570
2571 if (p == PROC_NULL)
2572 return(PGRP_NULL);
2573 proc_list_lock();
2574
2575 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2576 p->p_listflag |= P_LIST_PGRPTRWAIT;
2577 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2578 }
2579
2580 pgrp = p->p_pgrp;
2581
2582 assert(pgrp != NULL);
2583
2584 if (pgrp != PGRP_NULL) {
2585 pgrp->pg_refcount++;
2586 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2587 panic("proc_pgrp: ref being povided for dead pgrp");
2588 }
2589
2590 proc_list_unlock();
2591
2592 return(pgrp);
2593 }
2594
2595 struct pgrp *
2596 tty_pgrp(struct tty * tp)
2597 {
2598 struct pgrp * pg = PGRP_NULL;
2599
2600 proc_list_lock();
2601 pg = tp->t_pgrp;
2602
2603 if (pg != PGRP_NULL) {
2604 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2605 panic("tty_pgrp: ref being povided for dead pgrp");
2606 pg->pg_refcount++;
2607 }
2608 proc_list_unlock();
2609
2610 return(pg);
2611 }
2612
2613 struct session *
2614 proc_session(proc_t p)
2615 {
2616 struct session * sess = SESSION_NULL;
2617
2618 if (p == PROC_NULL)
2619 return(SESSION_NULL);
2620
2621 proc_list_lock();
2622
2623 /* wait during transitions */
2624 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2625 p->p_listflag |= P_LIST_PGRPTRWAIT;
2626 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2627 }
2628
2629 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2630 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2631 panic("proc_session:returning sesssion ref on terminating session");
2632 sess->s_count++;
2633 }
2634 proc_list_unlock();
2635 return(sess);
2636 }
2637
2638 void
2639 session_rele(struct session *sess)
2640 {
2641 proc_list_lock();
2642 if (--sess->s_count == 0) {
2643 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2644 panic("session_rele: terminating already terminated session");
2645 sess->s_listflags |= S_LIST_TERM;
2646 LIST_REMOVE(sess, s_hash);
2647 sess->s_listflags |= S_LIST_DEAD;
2648 if (sess->s_count != 0)
2649 panic("session_rele: freeing session in use");
2650 proc_list_unlock();
2651 #if CONFIG_FINE_LOCK_GROUPS
2652 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2653 #else
2654 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2655 #endif
2656 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2657 } else
2658 proc_list_unlock();
2659 }
2660
2661 int
2662 proc_transstart(proc_t p, int locked, int non_blocking)
2663 {
2664 if (locked == 0)
2665 proc_lock(p);
2666 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2667 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2668 if (locked == 0)
2669 proc_unlock(p);
2670 return EDEADLK;
2671 }
2672 p->p_lflag |= P_LTRANSWAIT;
2673 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2674 }
2675 p->p_lflag |= P_LINTRANSIT;
2676 p->p_transholder = current_thread();
2677 if (locked == 0)
2678 proc_unlock(p);
2679 return 0;
2680 }
2681
2682 void
2683 proc_transcommit(proc_t p, int locked)
2684 {
2685 if (locked == 0)
2686 proc_lock(p);
2687
2688 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2689 assert (p->p_transholder == current_thread());
2690 p->p_lflag |= P_LTRANSCOMMIT;
2691
2692 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2693 p->p_lflag &= ~P_LTRANSWAIT;
2694 wakeup(&p->p_lflag);
2695 }
2696 if (locked == 0)
2697 proc_unlock(p);
2698 }
2699
2700 void
2701 proc_transend(proc_t p, int locked)
2702 {
2703 if (locked == 0)
2704 proc_lock(p);
2705
2706 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2707 p->p_transholder = NULL;
2708
2709 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2710 p->p_lflag &= ~P_LTRANSWAIT;
2711 wakeup(&p->p_lflag);
2712 }
2713 if (locked == 0)
2714 proc_unlock(p);
2715 }
2716
2717 int
2718 proc_transwait(proc_t p, int locked)
2719 {
2720 if (locked == 0)
2721 proc_lock(p);
2722 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2723 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2724 if (locked == 0)
2725 proc_unlock(p);
2726 return EDEADLK;
2727 }
2728 p->p_lflag |= P_LTRANSWAIT;
2729 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2730 }
2731 if (locked == 0)
2732 proc_unlock(p);
2733 return 0;
2734 }
2735
2736 void
2737 proc_klist_lock(void)
2738 {
2739 lck_mtx_lock(proc_klist_mlock);
2740 }
2741
2742 void
2743 proc_klist_unlock(void)
2744 {
2745 lck_mtx_unlock(proc_klist_mlock);
2746 }
2747
2748 void
2749 proc_knote(struct proc * p, long hint)
2750 {
2751 proc_klist_lock();
2752 KNOTE(&p->p_klist, hint);
2753 proc_klist_unlock();
2754 }
2755
2756 void
2757 proc_knote_drain(struct proc *p)
2758 {
2759 struct knote *kn = NULL;
2760
2761 /*
2762 * Clear the proc's klist to avoid references after the proc is reaped.
2763 */
2764 proc_klist_lock();
2765 while ((kn = SLIST_FIRST(&p->p_klist))) {
2766 kn->kn_ptr.p_proc = PROC_NULL;
2767 KNOTE_DETACH(&p->p_klist, kn);
2768 }
2769 proc_klist_unlock();
2770 }
2771
2772 void
2773 proc_setregister(proc_t p)
2774 {
2775 proc_lock(p);
2776 p->p_lflag |= P_LREGISTER;
2777 proc_unlock(p);
2778 }
2779
2780 void
2781 proc_resetregister(proc_t p)
2782 {
2783 proc_lock(p);
2784 p->p_lflag &= ~P_LREGISTER;
2785 proc_unlock(p);
2786 }
2787
2788 pid_t
2789 proc_pgrpid(proc_t p)
2790 {
2791 return p->p_pgrpid;
2792 }
2793
2794 pid_t
2795 proc_selfpgrpid()
2796 {
2797 return current_proc()->p_pgrpid;
2798 }
2799
2800
2801 /* return control and action states */
2802 int
2803 proc_getpcontrol(int pid, int * pcontrolp)
2804 {
2805 proc_t p;
2806
2807 p = proc_find(pid);
2808 if (p == PROC_NULL)
2809 return(ESRCH);
2810 if (pcontrolp != NULL)
2811 *pcontrolp = p->p_pcaction;
2812
2813 proc_rele(p);
2814 return(0);
2815 }
2816
2817 int
2818 proc_dopcontrol(proc_t p)
2819 {
2820 int pcontrol;
2821
2822 proc_lock(p);
2823
2824 pcontrol = PROC_CONTROL_STATE(p);
2825
2826 if (PROC_ACTION_STATE(p) == 0) {
2827 switch(pcontrol) {
2828 case P_PCTHROTTLE:
2829 PROC_SETACTION_STATE(p);
2830 proc_unlock(p);
2831 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2832 break;
2833
2834 case P_PCSUSP:
2835 PROC_SETACTION_STATE(p);
2836 proc_unlock(p);
2837 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2838 task_suspend(p->task);
2839 break;
2840
2841 case P_PCKILL:
2842 PROC_SETACTION_STATE(p);
2843 proc_unlock(p);
2844 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2845 psignal(p, SIGKILL);
2846 break;
2847
2848 default:
2849 proc_unlock(p);
2850 }
2851
2852 } else
2853 proc_unlock(p);
2854
2855 return(PROC_RETURNED);
2856 }
2857
2858
2859 /*
2860 * Resume a throttled or suspended process. This is an internal interface that's only
2861 * used by the user level code that presents the GUI when we run out of swap space and
2862 * hence is restricted to processes with superuser privileges.
2863 */
2864
2865 int
2866 proc_resetpcontrol(int pid)
2867 {
2868 proc_t p;
2869 int pcontrol;
2870 int error;
2871 proc_t self = current_proc();
2872
2873 /* if the process has been validated to handle resource control or root is valid one */
2874 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
2875 return error;
2876
2877 p = proc_find(pid);
2878 if (p == PROC_NULL)
2879 return(ESRCH);
2880
2881 proc_lock(p);
2882
2883 pcontrol = PROC_CONTROL_STATE(p);
2884
2885 if(PROC_ACTION_STATE(p) !=0) {
2886 switch(pcontrol) {
2887 case P_PCTHROTTLE:
2888 PROC_RESETACTION_STATE(p);
2889 proc_unlock(p);
2890 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
2891 break;
2892
2893 case P_PCSUSP:
2894 PROC_RESETACTION_STATE(p);
2895 proc_unlock(p);
2896 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
2897 task_resume(p->task);
2898 break;
2899
2900 case P_PCKILL:
2901 /* Huh? */
2902 PROC_SETACTION_STATE(p);
2903 proc_unlock(p);
2904 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
2905 break;
2906
2907 default:
2908 proc_unlock(p);
2909 }
2910
2911 } else
2912 proc_unlock(p);
2913
2914 proc_rele(p);
2915 return(0);
2916 }
2917
2918
2919
2920 struct no_paging_space
2921 {
2922 uint64_t pcs_max_size;
2923 uint64_t pcs_uniqueid;
2924 int pcs_pid;
2925 int pcs_proc_count;
2926 uint64_t pcs_total_size;
2927
2928 uint64_t npcs_max_size;
2929 uint64_t npcs_uniqueid;
2930 int npcs_pid;
2931 int npcs_proc_count;
2932 uint64_t npcs_total_size;
2933
2934 int apcs_proc_count;
2935 uint64_t apcs_total_size;
2936 };
2937
2938
2939 static int
2940 proc_pcontrol_filter(proc_t p, void *arg)
2941 {
2942 struct no_paging_space *nps;
2943 uint64_t compressed;
2944
2945 nps = (struct no_paging_space *)arg;
2946
2947 compressed = get_task_compressed(p->task);
2948
2949 if (PROC_CONTROL_STATE(p)) {
2950 if (PROC_ACTION_STATE(p) == 0) {
2951 if (compressed > nps->pcs_max_size) {
2952 nps->pcs_pid = p->p_pid;
2953 nps->pcs_uniqueid = p->p_uniqueid;
2954 nps->pcs_max_size = compressed;
2955 }
2956 nps->pcs_total_size += compressed;
2957 nps->pcs_proc_count++;
2958 } else {
2959 nps->apcs_total_size += compressed;
2960 nps->apcs_proc_count++;
2961 }
2962 } else {
2963 if (compressed > nps->npcs_max_size) {
2964 nps->npcs_pid = p->p_pid;
2965 nps->npcs_uniqueid = p->p_uniqueid;
2966 nps->npcs_max_size = compressed;
2967 }
2968 nps->npcs_total_size += compressed;
2969 nps->npcs_proc_count++;
2970
2971 }
2972 return (0);
2973 }
2974
2975
2976 static int
2977 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
2978 {
2979 return(PROC_RETURNED);
2980 }
2981
2982
2983 /*
2984 * Deal with the low on compressor pool space condition... this function
2985 * gets called when we are approaching the limits of the compressor pool or
2986 * we are unable to create a new swap file.
2987 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
2988 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
2989 * There are 2 categories of processes to deal with. Those that have an action
2990 * associated with them by the task itself and those that do not. Actionable
2991 * tasks can have one of three categories specified: ones that
2992 * can be killed immediately, ones that should be suspended, and ones that should
2993 * be throttled. Processes that do not have an action associated with them are normally
2994 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
2995 * that only by killing them can we hope to put the system back into a usable state.
2996 */
2997
2998 #define NO_PAGING_SPACE_DEBUG 0
2999
3000 extern uint64_t vm_compressor_pages_compressed(void);
3001
3002 struct timeval last_no_space_action = {0, 0};
3003
3004 int
3005 no_paging_space_action()
3006 {
3007 proc_t p;
3008 struct no_paging_space nps;
3009 struct timeval now;
3010
3011 /*
3012 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3013 */
3014 microtime(&now);
3015
3016 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3017 return (0);
3018
3019 /*
3020 * Examine all processes and find the biggest (biggest is based on the number of pages this
3021 * task has in the compressor pool) that has been marked to have some action
3022 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3023 * action.
3024 *
3025 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3026 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3027 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3028 */
3029 bzero(&nps, sizeof(nps));
3030
3031 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3032
3033 #if NO_PAGING_SPACE_DEBUG
3034 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3035 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3036 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3037 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3038 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3039 nps.apcs_proc_count, nps.apcs_total_size);
3040 #endif
3041 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3042 /*
3043 * for now we'll knock out any task that has more then 50% of the pages
3044 * held by the compressor
3045 */
3046 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3047
3048 if (nps.npcs_uniqueid == p->p_uniqueid) {
3049 /*
3050 * verify this is still the same process
3051 * in case the proc exited and the pid got reused while
3052 * we were finishing the proc_iterate and getting to this point
3053 */
3054 last_no_space_action = now;
3055
3056 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3057 psignal(p, SIGKILL);
3058
3059 proc_rele(p);
3060
3061 return (0);
3062 }
3063
3064 proc_rele(p);
3065 }
3066 }
3067
3068 if (nps.pcs_max_size > 0) {
3069 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3070
3071 if (nps.pcs_uniqueid == p->p_uniqueid) {
3072 /*
3073 * verify this is still the same process
3074 * in case the proc exited and the pid got reused while
3075 * we were finishing the proc_iterate and getting to this point
3076 */
3077 last_no_space_action = now;
3078
3079 proc_dopcontrol(p);
3080
3081 proc_rele(p);
3082
3083 return (1);
3084 }
3085
3086 proc_rele(p);
3087 }
3088 }
3089 last_no_space_action = now;
3090
3091 printf("low swap: unable to find any eligible processes to take action on\n");
3092
3093 return (0);
3094 }
3095
3096 int
3097 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3098 {
3099 int ret = 0;
3100 proc_t target_proc = PROC_NULL;
3101 pid_t target_pid = uap->pid;
3102 uint64_t target_uniqueid = uap->uniqueid;
3103 task_t target_task = NULL;
3104
3105 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3106 ret = EPERM;
3107 goto out;
3108 }
3109 target_proc = proc_find(target_pid);
3110 if (target_proc != PROC_NULL) {
3111 if (target_uniqueid != proc_uniqueid(target_proc)) {
3112 ret = ENOENT;
3113 goto out;
3114 }
3115
3116 target_task = proc_task(target_proc);
3117 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3118 ret = EINVAL;
3119 goto out;
3120 }
3121 } else
3122 ret = ENOENT;
3123
3124 out:
3125 if (target_proc != PROC_NULL)
3126 proc_rele(target_proc);
3127 return (ret);
3128 }
3129
3130 #if VM_SCAN_FOR_SHADOW_CHAIN
3131 extern int vm_map_shadow_max(vm_map_t map);
3132 int proc_shadow_max(void);
3133 int proc_shadow_max(void)
3134 {
3135 int retval, max;
3136 proc_t p;
3137 task_t task;
3138 vm_map_t map;
3139
3140 max = 0;
3141 proc_list_lock();
3142 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3143 if (p->p_stat == SIDL)
3144 continue;
3145 task = p->task;
3146 if (task == NULL) {
3147 continue;
3148 }
3149 map = get_task_map(task);
3150 if (map == NULL) {
3151 continue;
3152 }
3153 retval = vm_map_shadow_max(map);
3154 if (retval > max) {
3155 max = retval;
3156 }
3157 }
3158 proc_list_unlock();
3159 return max;
3160 }
3161 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3162
3163 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3164 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3165 {
3166 if (target_proc != NULL) {
3167 target_proc->p_responsible_pid = responsible_pid;
3168 }
3169 return;
3170 }
3171
3172 int
3173 proc_chrooted(proc_t p)
3174 {
3175 int retval = 0;
3176
3177 if (p) {
3178 proc_fdlock(p);
3179 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3180 proc_fdunlock(p);
3181 }
3182
3183 return retval;
3184 }