]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
xnu-2782.10.72.tar.gz
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113
114 #if CONFIG_MEMORYSTATUS
115 #include <sys/kern_memorystatus.h>
116 #endif
117
118 #if CONFIG_MACF
119 #include <security/mac_framework.h>
120 #endif
121
122 #include <libkern/crypto/sha1.h>
123
124 /*
125 * Structure associated with user cacheing.
126 */
127 struct uidinfo {
128 LIST_ENTRY(uidinfo) ui_hash;
129 uid_t ui_uid;
130 long ui_proccnt;
131 };
132 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
133 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
134 u_long uihash; /* size of hash table - 1 */
135
136 /*
137 * Other process lists
138 */
139 struct pidhashhead *pidhashtbl;
140 u_long pidhash;
141 struct pgrphashhead *pgrphashtbl;
142 u_long pgrphash;
143 struct sesshashhead *sesshashtbl;
144 u_long sesshash;
145
146 struct proclist allproc;
147 struct proclist zombproc;
148 extern struct tty cons;
149
150 #if CONFIG_LCTX
151 /*
152 * Login Context
153 */
154 static pid_t lastlcid = 1;
155 static int alllctx_cnt;
156
157 #define LCID_MAX 8192 /* Does this really need to be large? */
158 static int maxlcid = LCID_MAX;
159
160 LIST_HEAD(lctxlist, lctx);
161 static struct lctxlist alllctx;
162
163 lck_mtx_t alllctx_lock;
164 lck_grp_t * lctx_lck_grp;
165 lck_grp_attr_t * lctx_lck_grp_attr;
166 lck_attr_t * lctx_lck_attr;
167
168 static void lctxinit(void);
169 #endif
170
171 extern int cs_debug;
172
173 #if DEBUG
174 #define __PROC_INTERNAL_DEBUG 1
175 #endif
176 /* Name to give to core files */
177 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
178
179 static void orphanpg(struct pgrp *pg);
180 void proc_name_kdp(task_t t, char * buf, int size);
181 int proc_threadname_kdp(void *uth, char *buf, size_t size);
182 void proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec);
183 char *proc_name_address(void *p);
184
185 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
186 static void pgrp_remove(proc_t p);
187 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
188 static void pgdelete_dropref(struct pgrp *pgrp);
189 extern void pg_rele_dropref(struct pgrp * pgrp);
190 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
191 static boolean_t proc_parent_is_currentproc(proc_t p);
192
193 struct fixjob_iterargs {
194 struct pgrp * pg;
195 struct session * mysession;
196 int entering;
197 };
198
199 int fixjob_callback(proc_t, void *);
200
201 /*
202 * Initialize global process hashing structures.
203 */
204 void
205 procinit(void)
206 {
207 LIST_INIT(&allproc);
208 LIST_INIT(&zombproc);
209 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
210 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
211 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
212 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
213 #if CONFIG_LCTX
214 lctxinit();
215 #endif
216 }
217
218 /*
219 * Change the count associated with number of processes
220 * a given user is using. This routine protects the uihash
221 * with the list lock
222 */
223 int
224 chgproccnt(uid_t uid, int diff)
225 {
226 struct uidinfo *uip;
227 struct uidinfo *newuip = NULL;
228 struct uihashhead *uipp;
229 int retval;
230
231 again:
232 proc_list_lock();
233 uipp = UIHASH(uid);
234 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
235 if (uip->ui_uid == uid)
236 break;
237 if (uip) {
238 uip->ui_proccnt += diff;
239 if (uip->ui_proccnt > 0) {
240 retval = uip->ui_proccnt;
241 proc_list_unlock();
242 goto out;
243 }
244 if (uip->ui_proccnt < 0)
245 panic("chgproccnt: procs < 0");
246 LIST_REMOVE(uip, ui_hash);
247 retval = 0;
248 proc_list_unlock();
249 FREE_ZONE(uip, sizeof(*uip), M_PROC);
250 goto out;
251 }
252 if (diff <= 0) {
253 if (diff == 0) {
254 retval = 0;
255 proc_list_unlock();
256 goto out;
257 }
258 panic("chgproccnt: lost user");
259 }
260 if (newuip != NULL) {
261 uip = newuip;
262 newuip = NULL;
263 LIST_INSERT_HEAD(uipp, uip, ui_hash);
264 uip->ui_uid = uid;
265 uip->ui_proccnt = diff;
266 retval = diff;
267 proc_list_unlock();
268 goto out;
269 }
270 proc_list_unlock();
271 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
272 if (newuip == NULL)
273 panic("chgproccnt: M_PROC zone depleted");
274 goto again;
275 out:
276 if (newuip != NULL)
277 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
278 return(retval);
279 }
280
281 /*
282 * Is p an inferior of the current process?
283 */
284 int
285 inferior(proc_t p)
286 {
287 int retval = 0;
288
289 proc_list_lock();
290 for (; p != current_proc(); p = p->p_pptr)
291 if (p->p_pid == 0)
292 goto out;
293 retval = 1;
294 out:
295 proc_list_unlock();
296 return(retval);
297 }
298
299 /*
300 * Is p an inferior of t ?
301 */
302 int
303 isinferior(proc_t p, proc_t t)
304 {
305 int retval = 0;
306 int nchecked = 0;
307 proc_t start = p;
308
309 /* if p==t they are not inferior */
310 if (p == t)
311 return(0);
312
313 proc_list_lock();
314 for (; p != t; p = p->p_pptr) {
315 nchecked++;
316
317 /* Detect here if we're in a cycle */
318 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
319 goto out;
320 }
321 retval = 1;
322 out:
323 proc_list_unlock();
324 return(retval);
325 }
326
327 int
328 proc_isinferior(int pid1, int pid2)
329 {
330 proc_t p = PROC_NULL;
331 proc_t t = PROC_NULL;
332 int retval = 0;
333
334 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
335 retval = isinferior(p, t);
336
337 if (p != PROC_NULL)
338 proc_rele(p);
339 if (t != PROC_NULL)
340 proc_rele(t);
341
342 return(retval);
343 }
344
345 proc_t
346 proc_find(int pid)
347 {
348 return(proc_findinternal(pid, 0));
349 }
350
351 proc_t
352 proc_findinternal(int pid, int locked)
353 {
354 proc_t p = PROC_NULL;
355
356 if (locked == 0) {
357 proc_list_lock();
358 }
359
360 p = pfind_locked(pid);
361 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
362 p = PROC_NULL;
363
364 if (locked == 0) {
365 proc_list_unlock();
366 }
367
368 return(p);
369 }
370
371 proc_t
372 proc_findthread(thread_t thread)
373 {
374 proc_t p = PROC_NULL;
375 struct uthread *uth;
376
377 proc_list_lock();
378 uth = get_bsdthread_info(thread);
379 if (uth && (uth->uu_flag & UT_VFORK))
380 p = uth->uu_proc;
381 else
382 p = (proc_t)(get_bsdthreadtask_info(thread));
383 p = proc_ref_locked(p);
384 proc_list_unlock();
385 return(p);
386 }
387
388 int
389 proc_rele(proc_t p)
390 {
391 proc_list_lock();
392 proc_rele_locked(p);
393 proc_list_unlock();
394
395 return(0);
396 }
397
398 proc_t
399 proc_self(void)
400 {
401 struct proc * p;
402
403 p = current_proc();
404
405 proc_list_lock();
406 if (p != proc_ref_locked(p))
407 p = PROC_NULL;
408 proc_list_unlock();
409 return(p);
410 }
411
412
413 proc_t
414 proc_ref_locked(proc_t p)
415 {
416 proc_t p1 = p;
417
418 /* if process still in creation return failure */
419 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
420 return (PROC_NULL);
421 /* do not return process marked for termination */
422 if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0))
423 p->p_refcount++;
424 else
425 p1 = PROC_NULL;
426
427 return(p1);
428 }
429
430 void
431 proc_rele_locked(proc_t p)
432 {
433
434 if (p->p_refcount > 0) {
435 p->p_refcount--;
436 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
437 p->p_listflag &= ~P_LIST_DRAINWAIT;
438 wakeup(&p->p_refcount);
439 }
440 } else
441 panic("proc_rele_locked -ve ref\n");
442
443 }
444
445 proc_t
446 proc_find_zombref(int pid)
447 {
448 proc_t p;
449
450 proc_list_lock();
451
452 again:
453 p = pfind_locked(pid);
454
455 /* should we bail? */
456 if ((p == PROC_NULL) /* not found */
457 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
458 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
459
460 proc_list_unlock();
461 return (PROC_NULL);
462 }
463
464 /* If someone else is controlling the (unreaped) zombie - wait */
465 if ((p->p_listflag & P_LIST_WAITING) != 0) {
466 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
467 goto again;
468 }
469 p->p_listflag |= P_LIST_WAITING;
470
471 proc_list_unlock();
472
473 return(p);
474 }
475
476 void
477 proc_drop_zombref(proc_t p)
478 {
479 proc_list_lock();
480 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
481 p->p_listflag &= ~P_LIST_WAITING;
482 wakeup(&p->p_stat);
483 }
484 proc_list_unlock();
485 }
486
487
488 void
489 proc_refdrain(proc_t p)
490 {
491
492 proc_list_lock();
493
494 p->p_listflag |= P_LIST_DRAIN;
495 while (p->p_refcount) {
496 p->p_listflag |= P_LIST_DRAINWAIT;
497 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
498 }
499 p->p_listflag &= ~P_LIST_DRAIN;
500 p->p_listflag |= P_LIST_DEAD;
501
502 proc_list_unlock();
503
504
505 }
506
507 proc_t
508 proc_parentholdref(proc_t p)
509 {
510 proc_t parent = PROC_NULL;
511 proc_t pp;
512 int loopcnt = 0;
513
514
515 proc_list_lock();
516 loop:
517 pp = p->p_pptr;
518 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
519 parent = PROC_NULL;
520 goto out;
521 }
522
523 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
524 pp->p_listflag |= P_LIST_CHILDDRWAIT;
525 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
526 loopcnt++;
527 if (loopcnt == 5) {
528 parent = PROC_NULL;
529 goto out;
530 }
531 goto loop;
532 }
533
534 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
535 pp->p_parentref++;
536 parent = pp;
537 goto out;
538 }
539
540 out:
541 proc_list_unlock();
542 return(parent);
543 }
544 int
545 proc_parentdropref(proc_t p, int listlocked)
546 {
547 if (listlocked == 0)
548 proc_list_lock();
549
550 if (p->p_parentref > 0) {
551 p->p_parentref--;
552 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
553 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
554 wakeup(&p->p_parentref);
555 }
556 } else
557 panic("proc_parentdropref -ve ref\n");
558 if (listlocked == 0)
559 proc_list_unlock();
560
561 return(0);
562 }
563
564 void
565 proc_childdrainstart(proc_t p)
566 {
567 #if __PROC_INTERNAL_DEBUG
568 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
569 panic("proc_childdrainstart: childdrain already started\n");
570 #endif
571 p->p_listflag |= P_LIST_CHILDDRSTART;
572 /* wait for all that hold parentrefs to drop */
573 while (p->p_parentref > 0) {
574 p->p_listflag |= P_LIST_PARENTREFWAIT;
575 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
576 }
577 }
578
579
580 void
581 proc_childdrainend(proc_t p)
582 {
583 #if __PROC_INTERNAL_DEBUG
584 if (p->p_childrencnt > 0)
585 panic("exiting: children stil hanging around\n");
586 #endif
587 p->p_listflag |= P_LIST_CHILDDRAINED;
588 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
589 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
590 wakeup(&p->p_childrencnt);
591 }
592 }
593
594 void
595 proc_checkdeadrefs(__unused proc_t p)
596 {
597 #if __PROC_INTERNAL_DEBUG
598 if ((p->p_listflag & P_LIST_INHASH) != 0)
599 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
600 if (p->p_childrencnt != 0)
601 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
602 if (p->p_refcount != 0)
603 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
604 if (p->p_parentref != 0)
605 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
606 #endif
607 }
608
609 int
610 proc_pid(proc_t p)
611 {
612 return (p->p_pid);
613 }
614
615 int
616 proc_ppid(proc_t p)
617 {
618 return (p->p_ppid);
619 }
620
621 int
622 proc_selfpid(void)
623 {
624 return (current_proc()->p_pid);
625 }
626
627 int
628 proc_selfppid(void)
629 {
630 return (current_proc()->p_ppid);
631 }
632
633 #if CONFIG_DTRACE
634 static proc_t
635 dtrace_current_proc_vforking(void)
636 {
637 thread_t th = current_thread();
638 struct uthread *ut = get_bsdthread_info(th);
639
640 if (ut &&
641 ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) {
642 /*
643 * Handle the narrow window where we're in the vfork syscall,
644 * but we're not quite ready to claim (in particular, to DTrace)
645 * that we're running as the child.
646 */
647 return (get_bsdtask_info(get_threadtask(th)));
648 }
649 return (current_proc());
650 }
651
652 int
653 dtrace_proc_selfpid(void)
654 {
655 return (dtrace_current_proc_vforking()->p_pid);
656 }
657
658 int
659 dtrace_proc_selfppid(void)
660 {
661 return (dtrace_current_proc_vforking()->p_ppid);
662 }
663
664 uid_t
665 dtrace_proc_selfruid(void)
666 {
667 return (dtrace_current_proc_vforking()->p_ruid);
668 }
669 #endif /* CONFIG_DTRACE */
670
671 proc_t
672 proc_parent(proc_t p)
673 {
674 proc_t parent;
675 proc_t pp;
676
677 proc_list_lock();
678 loop:
679 pp = p->p_pptr;
680 parent = proc_ref_locked(pp);
681 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
682 pp->p_listflag |= P_LIST_CHILDLKWAIT;
683 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
684 goto loop;
685 }
686 proc_list_unlock();
687 return(parent);
688 }
689
690 static boolean_t
691 proc_parent_is_currentproc(proc_t p)
692 {
693 boolean_t ret = FALSE;
694
695 proc_list_lock();
696 if (p->p_pptr == current_proc())
697 ret = TRUE;
698
699 proc_list_unlock();
700 return ret;
701 }
702
703 void
704 proc_name(int pid, char * buf, int size)
705 {
706 proc_t p;
707
708 if ((p = proc_find(pid)) != PROC_NULL) {
709 strlcpy(buf, &p->p_comm[0], size);
710 proc_rele(p);
711 }
712 }
713
714 void
715 proc_name_kdp(task_t t, char * buf, int size)
716 {
717 proc_t p = get_bsdtask_info(t);
718
719 if (p != PROC_NULL)
720 strlcpy(buf, &p->p_comm[0], size);
721 }
722
723
724 int
725 proc_threadname_kdp(void *uth, char *buf, size_t size)
726 {
727 if (size < MAXTHREADNAMESIZE) {
728 /* this is really just a protective measure for the future in
729 * case the thread name size in stackshot gets out of sync with
730 * the BSD max thread name size. Note that bsd_getthreadname
731 * doesn't take input buffer size into account. */
732 return -1;
733 }
734
735 if (uth != NULL) {
736 bsd_getthreadname(uth, buf);
737 }
738 return 0;
739 }
740
741 /* note that this function is generally going to be called from stackshot,
742 * and the arguments will be coming from a struct which is declared packed
743 * thus the input arguments will in general be unaligned. We have to handle
744 * that here. */
745 void
746 proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec)
747 {
748 proc_t pp = (proc_t)p;
749 struct uint64p {
750 uint64_t val;
751 } __attribute__((packed));
752
753 if (pp != PROC_NULL) {
754 if (tv_sec != NULL)
755 ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
756 if (tv_usec != NULL)
757 ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
758 }
759 }
760
761 char *
762 proc_name_address(void *p)
763 {
764 return &((proc_t)p)->p_comm[0];
765 }
766
767 void
768 proc_selfname(char * buf, int size)
769 {
770 proc_t p;
771
772 if ((p = current_proc())!= (proc_t)0) {
773 strlcpy(buf, &p->p_comm[0], size);
774 }
775 }
776
777 void
778 proc_signal(int pid, int signum)
779 {
780 proc_t p;
781
782 if ((p = proc_find(pid)) != PROC_NULL) {
783 psignal(p, signum);
784 proc_rele(p);
785 }
786 }
787
788 int
789 proc_issignal(int pid, sigset_t mask)
790 {
791 proc_t p;
792 int error=0;
793
794 if ((p = proc_find(pid)) != PROC_NULL) {
795 error = proc_pendingsignals(p, mask);
796 proc_rele(p);
797 }
798
799 return(error);
800 }
801
802 int
803 proc_noremotehang(proc_t p)
804 {
805 int retval = 0;
806
807 if (p)
808 retval = p->p_flag & P_NOREMOTEHANG;
809 return(retval? 1: 0);
810
811 }
812
813 int
814 proc_exiting(proc_t p)
815 {
816 int retval = 0;
817
818 if (p)
819 retval = p->p_lflag & P_LEXIT;
820 return(retval? 1: 0);
821 }
822
823 int
824 proc_forcequota(proc_t p)
825 {
826 int retval = 0;
827
828 if (p)
829 retval = p->p_flag & P_FORCEQUOTA;
830 return(retval? 1: 0);
831
832 }
833
834 int
835 proc_suser(proc_t p)
836 {
837 kauth_cred_t my_cred;
838 int error;
839
840 my_cred = kauth_cred_proc_ref(p);
841 error = suser(my_cred, &p->p_acflag);
842 kauth_cred_unref(&my_cred);
843 return(error);
844 }
845
846 task_t
847 proc_task(proc_t proc)
848 {
849 return (task_t)proc->task;
850 }
851
852 /*
853 * Obtain the first thread in a process
854 *
855 * XXX This is a bad thing to do; it exists predominantly to support the
856 * XXX use of proc_t's in places that should really be using
857 * XXX thread_t's instead. This maintains historical behaviour, but really
858 * XXX needs an audit of the context (proxy vs. not) to clean up.
859 */
860 thread_t
861 proc_thread(proc_t proc)
862 {
863 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
864
865 if (uth != NULL)
866 return(uth->uu_context.vc_thread);
867
868 return(NULL);
869 }
870
871 kauth_cred_t
872 proc_ucred(proc_t p)
873 {
874 return(p->p_ucred);
875 }
876
877 struct uthread *
878 current_uthread()
879 {
880 thread_t th = current_thread();
881
882 return((struct uthread *)get_bsdthread_info(th));
883 }
884
885
886 int
887 proc_is64bit(proc_t p)
888 {
889 return(IS_64BIT_PROCESS(p));
890 }
891
892 int
893 proc_pidversion(proc_t p)
894 {
895 return(p->p_idversion);
896 }
897
898 uint64_t
899 proc_uniqueid(proc_t p)
900 {
901 return(p->p_uniqueid);
902 }
903
904 uint64_t
905 proc_puniqueid(proc_t p)
906 {
907 return(p->p_puniqueid);
908 }
909
910 uint64_t
911 proc_coalitionid(__unused proc_t p)
912 {
913 #if CONFIG_COALITIONS
914 return(task_coalition_id(p->task));
915 #else
916 return 0;
917 #endif
918 }
919
920 uint64_t
921 proc_was_throttled(proc_t p)
922 {
923 return (p->was_throttled);
924 }
925
926 uint64_t
927 proc_did_throttle(proc_t p)
928 {
929 return (p->did_throttle);
930 }
931
932 int
933 proc_getcdhash(proc_t p, unsigned char *cdhash)
934 {
935 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
936 }
937
938 void
939 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
940 {
941 if (size >= sizeof(p->p_uuid)) {
942 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
943 }
944 }
945
946 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
947 vnode_t
948 proc_getexecutablevnode(proc_t p)
949 {
950 vnode_t tvp = p->p_textvp;
951
952 if ( tvp != NULLVP) {
953 if (vnode_getwithref(tvp) == 0) {
954 return tvp;
955 }
956 }
957
958 return NULLVP;
959 }
960
961
962 void
963 bsd_set_dependency_capable(task_t task)
964 {
965 proc_t p = get_bsdtask_info(task);
966
967 if (p) {
968 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
969 }
970 }
971
972
973 int
974 IS_64BIT_PROCESS(proc_t p)
975 {
976 if (p && (p->p_flag & P_LP64))
977 return(1);
978 else
979 return(0);
980 }
981
982 /*
983 * Locate a process by number
984 */
985 proc_t
986 pfind_locked(pid_t pid)
987 {
988 proc_t p;
989 #if DEBUG
990 proc_t q;
991 #endif
992
993 if (!pid)
994 return (kernproc);
995
996 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
997 if (p->p_pid == pid) {
998 #if DEBUG
999 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1000 if ((p !=q) && (q->p_pid == pid))
1001 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1002 }
1003 #endif
1004 return (p);
1005 }
1006 }
1007 return (NULL);
1008 }
1009
1010 /*
1011 * Locate a zombie by PID
1012 */
1013 __private_extern__ proc_t
1014 pzfind(pid_t pid)
1015 {
1016 proc_t p;
1017
1018
1019 proc_list_lock();
1020
1021 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
1022 if (p->p_pid == pid)
1023 break;
1024
1025 proc_list_unlock();
1026
1027 return (p);
1028 }
1029
1030 /*
1031 * Locate a process group by number
1032 */
1033
1034 struct pgrp *
1035 pgfind(pid_t pgid)
1036 {
1037 struct pgrp * pgrp;
1038
1039 proc_list_lock();
1040 pgrp = pgfind_internal(pgid);
1041 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
1042 pgrp = PGRP_NULL;
1043 else
1044 pgrp->pg_refcount++;
1045 proc_list_unlock();
1046 return(pgrp);
1047 }
1048
1049
1050
1051 struct pgrp *
1052 pgfind_internal(pid_t pgid)
1053 {
1054 struct pgrp *pgrp;
1055
1056 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
1057 if (pgrp->pg_id == pgid)
1058 return (pgrp);
1059 return (NULL);
1060 }
1061
1062 void
1063 pg_rele(struct pgrp * pgrp)
1064 {
1065 if(pgrp == PGRP_NULL)
1066 return;
1067 pg_rele_dropref(pgrp);
1068 }
1069
1070 void
1071 pg_rele_dropref(struct pgrp * pgrp)
1072 {
1073 proc_list_lock();
1074 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1075 proc_list_unlock();
1076 pgdelete_dropref(pgrp);
1077 return;
1078 }
1079
1080 pgrp->pg_refcount--;
1081 proc_list_unlock();
1082 }
1083
1084 struct session *
1085 session_find_internal(pid_t sessid)
1086 {
1087 struct session *sess;
1088
1089 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
1090 if (sess->s_sid == sessid)
1091 return (sess);
1092 return (NULL);
1093 }
1094
1095
1096 /*
1097 * Make a new process ready to become a useful member of society by making it
1098 * visible in all the right places and initialize its own lists to empty.
1099 *
1100 * Parameters: parent The parent of the process to insert
1101 * child The child process to insert
1102 *
1103 * Returns: (void)
1104 *
1105 * Notes: Insert a child process into the parents process group, assign
1106 * the child the parent process pointer and PPID of the parent,
1107 * place it on the parents p_children list as a sibling,
1108 * initialize its own child list, place it in the allproc list,
1109 * insert it in the proper hash bucket, and initialize its
1110 * event list.
1111 */
1112 void
1113 pinsertchild(proc_t parent, proc_t child)
1114 {
1115 struct pgrp * pg;
1116
1117 LIST_INIT(&child->p_children);
1118 TAILQ_INIT(&child->p_evlist);
1119 child->p_pptr = parent;
1120 child->p_ppid = parent->p_pid;
1121 child->p_puniqueid = parent->p_uniqueid;
1122
1123 pg = proc_pgrp(parent);
1124 pgrp_add(pg, parent, child);
1125 pg_rele(pg);
1126
1127 proc_list_lock();
1128
1129 #if CONFIG_MEMORYSTATUS
1130 memorystatus_add(child, TRUE);
1131 #endif
1132
1133 parent->p_childrencnt++;
1134 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1135
1136 LIST_INSERT_HEAD(&allproc, child, p_list);
1137 /* mark the completion of proc creation */
1138 child->p_listflag &= ~P_LIST_INCREATE;
1139
1140 proc_list_unlock();
1141 }
1142
1143 /*
1144 * Move p to a new or existing process group (and session)
1145 *
1146 * Returns: 0 Success
1147 * ESRCH No such process
1148 */
1149 int
1150 enterpgrp(proc_t p, pid_t pgid, int mksess)
1151 {
1152 struct pgrp *pgrp;
1153 struct pgrp *mypgrp;
1154 struct session * procsp;
1155
1156 pgrp = pgfind(pgid);
1157 mypgrp = proc_pgrp(p);
1158 procsp = proc_session(p);
1159
1160 #if DIAGNOSTIC
1161 if (pgrp != NULL && mksess) /* firewalls */
1162 panic("enterpgrp: setsid into non-empty pgrp");
1163 if (SESS_LEADER(p, procsp))
1164 panic("enterpgrp: session leader attempted setpgrp");
1165 #endif
1166 if (pgrp == PGRP_NULL) {
1167 pid_t savepid = p->p_pid;
1168 proc_t np = PROC_NULL;
1169 /*
1170 * new process group
1171 */
1172 #if DIAGNOSTIC
1173 if (p->p_pid != pgid)
1174 panic("enterpgrp: new pgrp and pid != pgid");
1175 #endif
1176 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1177 M_WAITOK);
1178 if (pgrp == NULL)
1179 panic("enterpgrp: M_PGRP zone depleted");
1180 if ((np = proc_find(savepid)) == NULL || np != p) {
1181 if (np != PROC_NULL)
1182 proc_rele(np);
1183 if (mypgrp != PGRP_NULL)
1184 pg_rele(mypgrp);
1185 if (procsp != SESSION_NULL)
1186 session_rele(procsp);
1187 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1188 return (ESRCH);
1189 }
1190 proc_rele(np);
1191 if (mksess) {
1192 struct session *sess;
1193
1194 /*
1195 * new session
1196 */
1197 MALLOC_ZONE(sess, struct session *,
1198 sizeof(struct session), M_SESSION, M_WAITOK);
1199 if (sess == NULL)
1200 panic("enterpgrp: M_SESSION zone depleted");
1201 sess->s_leader = p;
1202 sess->s_sid = p->p_pid;
1203 sess->s_count = 1;
1204 sess->s_ttyvp = NULL;
1205 sess->s_ttyp = TTY_NULL;
1206 sess->s_flags = 0;
1207 sess->s_listflags = 0;
1208 sess->s_ttypgrpid = NO_PID;
1209 #if CONFIG_FINE_LOCK_GROUPS
1210 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1211 #else
1212 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1213 #endif
1214 bcopy(procsp->s_login, sess->s_login,
1215 sizeof(sess->s_login));
1216 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1217 proc_list_lock();
1218 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1219 proc_list_unlock();
1220 pgrp->pg_session = sess;
1221 #if DIAGNOSTIC
1222 if (p != current_proc())
1223 panic("enterpgrp: mksession and p != curproc");
1224 #endif
1225 } else {
1226 proc_list_lock();
1227 pgrp->pg_session = procsp;
1228
1229 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1230 panic("enterpgrp: providing ref to terminating session ");
1231 pgrp->pg_session->s_count++;
1232 proc_list_unlock();
1233 }
1234 pgrp->pg_id = pgid;
1235 #if CONFIG_FINE_LOCK_GROUPS
1236 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1237 #else
1238 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1239 #endif
1240 LIST_INIT(&pgrp->pg_members);
1241 pgrp->pg_membercnt = 0;
1242 pgrp->pg_jobc = 0;
1243 proc_list_lock();
1244 pgrp->pg_refcount = 1;
1245 pgrp->pg_listflags = 0;
1246 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1247 proc_list_unlock();
1248 } else if (pgrp == mypgrp) {
1249 pg_rele(pgrp);
1250 if (mypgrp != NULL)
1251 pg_rele(mypgrp);
1252 if (procsp != SESSION_NULL)
1253 session_rele(procsp);
1254 return (0);
1255 }
1256
1257 if (procsp != SESSION_NULL)
1258 session_rele(procsp);
1259 /*
1260 * Adjust eligibility of affected pgrps to participate in job control.
1261 * Increment eligibility counts before decrementing, otherwise we
1262 * could reach 0 spuriously during the first call.
1263 */
1264 fixjobc(p, pgrp, 1);
1265 fixjobc(p, mypgrp, 0);
1266
1267 if(mypgrp != PGRP_NULL)
1268 pg_rele(mypgrp);
1269 pgrp_replace(p, pgrp);
1270 pg_rele(pgrp);
1271
1272 return(0);
1273 }
1274
1275 /*
1276 * remove process from process group
1277 */
1278 int
1279 leavepgrp(proc_t p)
1280 {
1281
1282 pgrp_remove(p);
1283 return (0);
1284 }
1285
1286 /*
1287 * delete a process group
1288 */
1289 static void
1290 pgdelete_dropref(struct pgrp *pgrp)
1291 {
1292 struct tty *ttyp;
1293 int emptypgrp = 1;
1294 struct session *sessp;
1295
1296
1297 pgrp_lock(pgrp);
1298 if (pgrp->pg_membercnt != 0) {
1299 emptypgrp = 0;
1300 }
1301 pgrp_unlock(pgrp);
1302
1303 proc_list_lock();
1304 pgrp->pg_refcount--;
1305 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1306 proc_list_unlock();
1307 return;
1308 }
1309
1310 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1311
1312 if (pgrp->pg_refcount > 0) {
1313 proc_list_unlock();
1314 return;
1315 }
1316
1317 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1318 LIST_REMOVE(pgrp, pg_hash);
1319
1320 proc_list_unlock();
1321
1322 ttyp = SESSION_TP(pgrp->pg_session);
1323 if (ttyp != TTY_NULL) {
1324 if (ttyp->t_pgrp == pgrp) {
1325 tty_lock(ttyp);
1326 /* Re-check after acquiring the lock */
1327 if (ttyp->t_pgrp == pgrp) {
1328 ttyp->t_pgrp = NULL;
1329 pgrp->pg_session->s_ttypgrpid = NO_PID;
1330 }
1331 tty_unlock(ttyp);
1332 }
1333 }
1334
1335 proc_list_lock();
1336
1337 sessp = pgrp->pg_session;
1338 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1339 panic("pg_deleteref: manipulating refs of already terminating session");
1340 if (--sessp->s_count == 0) {
1341 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1342 panic("pg_deleteref: terminating already terminated session");
1343 sessp->s_listflags |= S_LIST_TERM;
1344 ttyp = SESSION_TP(sessp);
1345 LIST_REMOVE(sessp, s_hash);
1346 proc_list_unlock();
1347 if (ttyp != TTY_NULL) {
1348 tty_lock(ttyp);
1349 if (ttyp->t_session == sessp)
1350 ttyp->t_session = NULL;
1351 tty_unlock(ttyp);
1352 }
1353 proc_list_lock();
1354 sessp->s_listflags |= S_LIST_DEAD;
1355 if (sessp->s_count != 0)
1356 panic("pg_deleteref: freeing session in use");
1357 proc_list_unlock();
1358 #if CONFIG_FINE_LOCK_GROUPS
1359 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1360 #else
1361 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1362 #endif
1363 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1364 } else
1365 proc_list_unlock();
1366 #if CONFIG_FINE_LOCK_GROUPS
1367 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1368 #else
1369 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1370 #endif
1371 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1372 }
1373
1374
1375 /*
1376 * Adjust pgrp jobc counters when specified process changes process group.
1377 * We count the number of processes in each process group that "qualify"
1378 * the group for terminal job control (those with a parent in a different
1379 * process group of the same session). If that count reaches zero, the
1380 * process group becomes orphaned. Check both the specified process'
1381 * process group and that of its children.
1382 * entering == 0 => p is leaving specified group.
1383 * entering == 1 => p is entering specified group.
1384 */
1385 int
1386 fixjob_callback(proc_t p, void * arg)
1387 {
1388 struct fixjob_iterargs *fp;
1389 struct pgrp * pg, *hispg;
1390 struct session * mysession, *hissess;
1391 int entering;
1392
1393 fp = (struct fixjob_iterargs *)arg;
1394 pg = fp->pg;
1395 mysession = fp->mysession;
1396 entering = fp->entering;
1397
1398 hispg = proc_pgrp(p);
1399 hissess = proc_session(p);
1400
1401 if ((hispg != pg) &&
1402 (hissess == mysession)) {
1403 pgrp_lock(hispg);
1404 if (entering) {
1405 hispg->pg_jobc++;
1406 pgrp_unlock(hispg);
1407 } else if (--hispg->pg_jobc == 0) {
1408 pgrp_unlock(hispg);
1409 orphanpg(hispg);
1410 } else
1411 pgrp_unlock(hispg);
1412 }
1413 if (hissess != SESSION_NULL)
1414 session_rele(hissess);
1415 if (hispg != PGRP_NULL)
1416 pg_rele(hispg);
1417
1418 return(PROC_RETURNED);
1419 }
1420
1421 void
1422 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1423 {
1424 struct pgrp *hispgrp = PGRP_NULL;
1425 struct session *hissess = SESSION_NULL;
1426 struct session *mysession = pgrp->pg_session;
1427 proc_t parent;
1428 struct fixjob_iterargs fjarg;
1429 boolean_t proc_parent_self;
1430
1431 /*
1432 * Check if p's parent is current proc, if yes then no need to take
1433 * a ref; calling proc_parent with current proc as parent may
1434 * deadlock if current proc is exiting.
1435 */
1436 proc_parent_self = proc_parent_is_currentproc(p);
1437 if (proc_parent_self)
1438 parent = current_proc();
1439 else
1440 parent = proc_parent(p);
1441
1442 if (parent != PROC_NULL) {
1443 hispgrp = proc_pgrp(parent);
1444 hissess = proc_session(parent);
1445 if (!proc_parent_self)
1446 proc_rele(parent);
1447 }
1448
1449
1450 /*
1451 * Check p's parent to see whether p qualifies its own process
1452 * group; if so, adjust count for p's process group.
1453 */
1454 if ((hispgrp != pgrp) &&
1455 (hissess == mysession)) {
1456 pgrp_lock(pgrp);
1457 if (entering) {
1458 pgrp->pg_jobc++;
1459 pgrp_unlock(pgrp);
1460 }else if (--pgrp->pg_jobc == 0) {
1461 pgrp_unlock(pgrp);
1462 orphanpg(pgrp);
1463 } else
1464 pgrp_unlock(pgrp);
1465 }
1466
1467 if (hissess != SESSION_NULL)
1468 session_rele(hissess);
1469 if (hispgrp != PGRP_NULL)
1470 pg_rele(hispgrp);
1471
1472 /*
1473 * Check this process' children to see whether they qualify
1474 * their process groups; if so, adjust counts for children's
1475 * process groups.
1476 */
1477 fjarg.pg = pgrp;
1478 fjarg.mysession = mysession;
1479 fjarg.entering = entering;
1480 proc_childrenwalk(p, fixjob_callback, &fjarg);
1481 }
1482
1483 /*
1484 * A process group has become orphaned;
1485 * if there are any stopped processes in the group,
1486 * hang-up all process in that group.
1487 */
1488 static void
1489 orphanpg(struct pgrp * pgrp)
1490 {
1491 proc_t p;
1492 pid_t * pid_list;
1493 int count, pidcount, i, alloc_count;
1494
1495 if (pgrp == PGRP_NULL)
1496 return;
1497 count = 0;
1498 pgrp_lock(pgrp);
1499 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
1500 if (p->p_stat == SSTOP) {
1501 for (p = pgrp->pg_members.lh_first; p != 0;
1502 p = p->p_pglist.le_next)
1503 count++;
1504 break; /* ??? stops after finding one.. */
1505 }
1506 }
1507 pgrp_unlock(pgrp);
1508
1509 count += 20;
1510 if (count > hard_maxproc)
1511 count = hard_maxproc;
1512 alloc_count = count * sizeof(pid_t);
1513 pid_list = (pid_t *)kalloc(alloc_count);
1514 bzero(pid_list, alloc_count);
1515
1516 pidcount = 0;
1517 pgrp_lock(pgrp);
1518 for (p = pgrp->pg_members.lh_first; p != 0;
1519 p = p->p_pglist.le_next) {
1520 if (p->p_stat == SSTOP) {
1521 for (p = pgrp->pg_members.lh_first; p != 0;
1522 p = p->p_pglist.le_next) {
1523 pid_list[pidcount] = p->p_pid;
1524 pidcount++;
1525 if (pidcount >= count)
1526 break;
1527 }
1528 break; /* ??? stops after finding one.. */
1529 }
1530 }
1531 pgrp_unlock(pgrp);
1532
1533 if (pidcount == 0)
1534 goto out;
1535
1536
1537 for (i = 0; i< pidcount; i++) {
1538 /* No handling or proc0 */
1539 if (pid_list[i] == 0)
1540 continue;
1541 p = proc_find(pid_list[i]);
1542 if (p) {
1543 proc_transwait(p, 0);
1544 pt_setrunnable(p);
1545 psignal(p, SIGHUP);
1546 psignal(p, SIGCONT);
1547 proc_rele(p);
1548 }
1549 }
1550 out:
1551 kfree(pid_list, alloc_count);
1552 return;
1553 }
1554
1555
1556
1557 /* XXX should be __private_extern__ */
1558 int
1559 proc_is_classic(proc_t p)
1560 {
1561 return (p->p_flag & P_TRANSLATED) ? 1 : 0;
1562 }
1563
1564 /* XXX Why does this function exist? Need to kill it off... */
1565 proc_t
1566 current_proc_EXTERNAL(void)
1567 {
1568 return (current_proc());
1569 }
1570
1571 int
1572 proc_is_forcing_hfs_case_sensitivity(proc_t p)
1573 {
1574 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
1575 }
1576
1577 /*
1578 * proc_core_name(name, uid, pid)
1579 * Expand the name described in corefilename, using name, uid, and pid.
1580 * corefilename is a printf-like string, with three format specifiers:
1581 * %N name of process ("name")
1582 * %P process id (pid)
1583 * %U user id (uid)
1584 * For example, "%N.core" is the default; they can be disabled completely
1585 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1586 * This is controlled by the sysctl variable kern.corefile (see above).
1587 */
1588 __private_extern__ int
1589 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1590 size_t cf_name_len)
1591 {
1592 const char *format, *appendstr;
1593 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1594 size_t i, l, n;
1595
1596 if (cf_name == NULL)
1597 goto toolong;
1598
1599 format = corefilename;
1600 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1601 switch (format[i]) {
1602 case '%': /* Format character */
1603 i++;
1604 switch (format[i]) {
1605 case '%':
1606 appendstr = "%";
1607 break;
1608 case 'N': /* process name */
1609 appendstr = name;
1610 break;
1611 case 'P': /* process id */
1612 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1613 appendstr = id_buf;
1614 break;
1615 case 'U': /* user id */
1616 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1617 appendstr = id_buf;
1618 break;
1619 default:
1620 appendstr = "";
1621 log(LOG_ERR,
1622 "Unknown format character %c in `%s'\n",
1623 format[i], format);
1624 }
1625 l = strlen(appendstr);
1626 if ((n + l) >= cf_name_len)
1627 goto toolong;
1628 bcopy(appendstr, cf_name + n, l);
1629 n += l;
1630 break;
1631 default:
1632 cf_name[n++] = format[i];
1633 }
1634 }
1635 if (format[i] != '\0')
1636 goto toolong;
1637 return (0);
1638 toolong:
1639 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1640 (long)pid, name, (uint32_t)uid);
1641 return (1);
1642 }
1643
1644 #if CONFIG_LCTX
1645
1646 static void
1647 lctxinit(void)
1648 {
1649 LIST_INIT(&alllctx);
1650 alllctx_cnt = 0;
1651
1652 /* allocate lctx lock group attribute and group */
1653 lctx_lck_grp_attr = lck_grp_attr_alloc_init();
1654 lck_grp_attr_setstat(lctx_lck_grp_attr);
1655
1656 lctx_lck_grp = lck_grp_alloc_init("lctx", lctx_lck_grp_attr);
1657 /* Allocate lctx lock attribute */
1658 lctx_lck_attr = lck_attr_alloc_init();
1659
1660 lck_mtx_init(&alllctx_lock, lctx_lck_grp, lctx_lck_attr);
1661 }
1662
1663 /*
1664 * Locate login context by number.
1665 */
1666 struct lctx *
1667 lcfind(pid_t lcid)
1668 {
1669 struct lctx *l;
1670
1671 ALLLCTX_LOCK;
1672 LIST_FOREACH(l, &alllctx, lc_list) {
1673 if (l->lc_id == lcid) {
1674 LCTX_LOCK(l);
1675 break;
1676 }
1677 }
1678 ALLLCTX_UNLOCK;
1679 return (l);
1680 }
1681
1682 #define LCID_INC \
1683 do { \
1684 lastlcid++; \
1685 if (lastlcid > maxlcid) \
1686 lastlcid = 1; \
1687 } while (0) \
1688
1689 struct lctx *
1690 lccreate(void)
1691 {
1692 struct lctx *l;
1693 pid_t newlcid;
1694
1695 /* Not very efficient but this isn't a common operation. */
1696 while ((l = lcfind(lastlcid)) != NULL) {
1697 LCTX_UNLOCK(l);
1698 LCID_INC;
1699 }
1700 newlcid = lastlcid;
1701 LCID_INC;
1702
1703 MALLOC(l, struct lctx *, sizeof(struct lctx), M_LCTX, M_WAITOK|M_ZERO);
1704 l->lc_id = newlcid;
1705 LIST_INIT(&l->lc_members);
1706 lck_mtx_init(&l->lc_mtx, lctx_lck_grp, lctx_lck_attr);
1707 #if CONFIG_MACF
1708 l->lc_label = mac_lctx_label_alloc();
1709 #endif
1710 ALLLCTX_LOCK;
1711 LIST_INSERT_HEAD(&alllctx, l, lc_list);
1712 alllctx_cnt++;
1713 ALLLCTX_UNLOCK;
1714
1715 return (l);
1716 }
1717
1718 /*
1719 * Call with proc protected (either by being invisible
1720 * or by having the all-login-context lock held) and
1721 * the lctx locked.
1722 *
1723 * Will unlock lctx on return.
1724 */
1725 void
1726 enterlctx (proc_t p, struct lctx *l, __unused int create)
1727 {
1728 if (l == NULL)
1729 return;
1730
1731 p->p_lctx = l;
1732 LIST_INSERT_HEAD(&l->lc_members, p, p_lclist);
1733 l->lc_mc++;
1734
1735 #if CONFIG_MACF
1736 if (create)
1737 mac_lctx_notify_create(p, l);
1738 else
1739 mac_lctx_notify_join(p, l);
1740 #endif
1741 LCTX_UNLOCK(l);
1742
1743 return;
1744 }
1745
1746 /*
1747 * Remove process from login context (if any). Called with p protected by
1748 * the alllctx lock.
1749 */
1750 void
1751 leavelctx (proc_t p)
1752 {
1753 struct lctx *l;
1754
1755 if (p->p_lctx == NULL) {
1756 return;
1757 }
1758
1759 LCTX_LOCK(p->p_lctx);
1760 l = p->p_lctx;
1761 p->p_lctx = NULL;
1762 LIST_REMOVE(p, p_lclist);
1763 l->lc_mc--;
1764 #if CONFIG_MACF
1765 mac_lctx_notify_leave(p, l);
1766 #endif
1767 if (LIST_EMPTY(&l->lc_members)) {
1768 LIST_REMOVE(l, lc_list);
1769 alllctx_cnt--;
1770 LCTX_UNLOCK(l);
1771 lck_mtx_destroy(&l->lc_mtx, lctx_lck_grp);
1772 #if CONFIG_MACF
1773 mac_lctx_label_free(l->lc_label);
1774 l->lc_label = NULL;
1775 #endif
1776 FREE(l, M_LCTX);
1777 } else {
1778 LCTX_UNLOCK(l);
1779 }
1780 return;
1781 }
1782
1783 static int
1784 sysctl_kern_lctx SYSCTL_HANDLER_ARGS
1785 {
1786 int *name = (int*) arg1;
1787 u_int namelen = arg2;
1788 struct kinfo_lctx kil;
1789 struct lctx *l;
1790 int error;
1791
1792 error = 0;
1793
1794 switch (oidp->oid_number) {
1795 case KERN_LCTX_ALL:
1796 ALLLCTX_LOCK;
1797 /* Request for size. */
1798 if (!req->oldptr) {
1799 error = SYSCTL_OUT(req, 0,
1800 sizeof(struct kinfo_lctx) * (alllctx_cnt + 1));
1801 goto out;
1802 }
1803 break;
1804
1805 case KERN_LCTX_LCID:
1806 /* No space */
1807 if (req->oldlen < sizeof(struct kinfo_lctx))
1808 return (ENOMEM);
1809 /* No argument */
1810 if (namelen != 1)
1811 return (EINVAL);
1812 /* No login context */
1813 l = lcfind((pid_t)name[0]);
1814 if (l == NULL)
1815 return (ENOENT);
1816 kil.id = l->lc_id;
1817 kil.mc = l->lc_mc;
1818 LCTX_UNLOCK(l);
1819 return (SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil)));
1820
1821 default:
1822 return (EINVAL);
1823 }
1824
1825 /* Provided buffer is too small. */
1826 if (req->oldlen < (sizeof(struct kinfo_lctx) * alllctx_cnt)) {
1827 error = ENOMEM;
1828 goto out;
1829 }
1830
1831 LIST_FOREACH(l, &alllctx, lc_list) {
1832 LCTX_LOCK(l);
1833 kil.id = l->lc_id;
1834 kil.mc = l->lc_mc;
1835 LCTX_UNLOCK(l);
1836 error = SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil));
1837 if (error)
1838 break;
1839 }
1840 out:
1841 ALLLCTX_UNLOCK;
1842
1843 return (error);
1844 }
1845
1846 SYSCTL_NODE(_kern, KERN_LCTX, lctx, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Login Context");
1847
1848 SYSCTL_PROC(_kern_lctx, KERN_LCTX_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT | CTLFLAG_LOCKED,
1849 0, 0, sysctl_kern_lctx, "S,lctx",
1850 "Return entire login context table");
1851 SYSCTL_NODE(_kern_lctx, KERN_LCTX_LCID, lcid, CTLFLAG_RD | CTLFLAG_LOCKED,
1852 sysctl_kern_lctx, "Login Context Table");
1853 SYSCTL_INT(_kern_lctx, OID_AUTO, last, CTLFLAG_RD | CTLFLAG_LOCKED, &lastlcid, 0, "");
1854 SYSCTL_INT(_kern_lctx, OID_AUTO, count, CTLFLAG_RD | CTLFLAG_LOCKED, &alllctx_cnt, 0, "");
1855 SYSCTL_INT(_kern_lctx, OID_AUTO, max, CTLFLAG_RW | CTLFLAG_LOCKED, &maxlcid, 0, "");
1856
1857 #endif /* LCTX */
1858
1859 /* Code Signing related routines */
1860
1861 int
1862 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1863 {
1864 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1865 uap->usersize, USER_ADDR_NULL));
1866 }
1867
1868 int
1869 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1870 {
1871 if (uap->uaudittoken == USER_ADDR_NULL)
1872 return(EINVAL);
1873 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1874 uap->usersize, uap->uaudittoken));
1875 }
1876
1877 static int
1878 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
1879 {
1880 char fakeheader[8] = { 0 };
1881 int error;
1882
1883 if (usize < sizeof(fakeheader))
1884 return ERANGE;
1885
1886 /* if no blob, fill in zero header */
1887 if (NULL == start) {
1888 start = fakeheader;
1889 length = sizeof(fakeheader);
1890 } else if (usize < length) {
1891 /* ... if input too short, copy out length of entitlement */
1892 uint32_t length32 = htonl((uint32_t)length);
1893 memcpy(&fakeheader[4], &length32, sizeof(length32));
1894
1895 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1896 if (error == 0)
1897 return ERANGE; /* input buffer to short, ERANGE signals that */
1898 return error;
1899 }
1900 return copyout(start, uaddr, length);
1901 }
1902
1903 static int
1904 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1905 {
1906 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1907 proc_t pt;
1908 int forself;
1909 int error;
1910 vnode_t tvp;
1911 off_t toff;
1912 unsigned char cdhash[SHA1_RESULTLEN];
1913 audit_token_t token;
1914 unsigned int upid=0, uidversion = 0;
1915
1916 forself = error = 0;
1917
1918 if (pid == 0)
1919 pid = proc_selfpid();
1920 if (pid == proc_selfpid())
1921 forself = 1;
1922
1923
1924 switch (ops) {
1925 case CS_OPS_STATUS:
1926 case CS_OPS_CDHASH:
1927 case CS_OPS_PIDOFFSET:
1928 case CS_OPS_ENTITLEMENTS_BLOB:
1929 case CS_OPS_IDENTITY:
1930 case CS_OPS_BLOB:
1931 break; /* unrestricted */
1932 default:
1933 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1934 return(EPERM);
1935 break;
1936 }
1937
1938 pt = proc_find(pid);
1939 if (pt == PROC_NULL)
1940 return(ESRCH);
1941
1942 upid = pt->p_pid;
1943 uidversion = pt->p_idversion;
1944 if (uaudittoken != USER_ADDR_NULL) {
1945
1946 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1947 if (error != 0)
1948 goto out;
1949 /* verify the audit token pid/idversion matches with proc */
1950 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1951 error = ESRCH;
1952 goto out;
1953 }
1954 }
1955
1956 switch (ops) {
1957
1958 case CS_OPS_STATUS: {
1959 uint32_t retflags;
1960
1961 proc_lock(pt);
1962 retflags = pt->p_csflags;
1963 if (cs_enforcement(pt))
1964 retflags |= CS_ENFORCEMENT;
1965 proc_unlock(pt);
1966
1967 if (uaddr != USER_ADDR_NULL)
1968 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1969 break;
1970 }
1971 case CS_OPS_MARKINVALID:
1972 proc_lock(pt);
1973 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1974 pt->p_csflags &= ~CS_VALID; /* set invalid */
1975 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1976 pt->p_csflags |= CS_KILLED;
1977 proc_unlock(pt);
1978 if (cs_debug) {
1979 printf("CODE SIGNING: marked invalid by pid %d: "
1980 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1981 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
1982 }
1983 psignal(pt, SIGKILL);
1984 } else
1985 proc_unlock(pt);
1986 } else
1987 proc_unlock(pt);
1988
1989 break;
1990
1991 case CS_OPS_MARKHARD:
1992 proc_lock(pt);
1993 pt->p_csflags |= CS_HARD;
1994 if ((pt->p_csflags & CS_VALID) == 0) {
1995 /* @@@ allow? reject? kill? @@@ */
1996 proc_unlock(pt);
1997 error = EINVAL;
1998 goto out;
1999 } else
2000 proc_unlock(pt);
2001 break;
2002
2003 case CS_OPS_MARKKILL:
2004 proc_lock(pt);
2005 pt->p_csflags |= CS_KILL;
2006 if ((pt->p_csflags & CS_VALID) == 0) {
2007 proc_unlock(pt);
2008 psignal(pt, SIGKILL);
2009 } else
2010 proc_unlock(pt);
2011 break;
2012
2013 case CS_OPS_PIDOFFSET:
2014 toff = pt->p_textoff;
2015 proc_rele(pt);
2016 error = copyout(&toff, uaddr, sizeof(toff));
2017 return(error);
2018
2019 case CS_OPS_CDHASH:
2020
2021 /* pt already holds a reference on its p_textvp */
2022 tvp = pt->p_textvp;
2023 toff = pt->p_textoff;
2024
2025 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2026 proc_rele(pt);
2027 return EINVAL;
2028 }
2029
2030 error = vn_getcdhash(tvp, toff, cdhash);
2031 proc_rele(pt);
2032
2033 if (error == 0) {
2034 error = copyout(cdhash, uaddr, sizeof (cdhash));
2035 }
2036
2037 return error;
2038
2039 case CS_OPS_ENTITLEMENTS_BLOB: {
2040 void *start;
2041 size_t length;
2042
2043 proc_lock(pt);
2044
2045 if ((pt->p_csflags & CS_VALID) == 0) {
2046 proc_unlock(pt);
2047 error = EINVAL;
2048 break;
2049 }
2050
2051 error = cs_entitlements_blob_get(pt, &start, &length);
2052 proc_unlock(pt);
2053 if (error)
2054 break;
2055
2056 error = csops_copy_token(start, length, usize, uaddr);
2057 break;
2058 }
2059 case CS_OPS_MARKRESTRICT:
2060 proc_lock(pt);
2061 pt->p_csflags |= CS_RESTRICT;
2062 proc_unlock(pt);
2063 break;
2064
2065 case CS_OPS_SET_STATUS: {
2066 uint32_t flags;
2067
2068 if (usize < sizeof(flags)) {
2069 error = ERANGE;
2070 break;
2071 }
2072
2073 error = copyin(uaddr, &flags, sizeof(flags));
2074 if (error)
2075 break;
2076
2077 /* only allow setting a subset of all code sign flags */
2078 flags &=
2079 CS_HARD | CS_EXEC_SET_HARD |
2080 CS_KILL | CS_EXEC_SET_KILL |
2081 CS_RESTRICT |
2082 CS_REQUIRE_LV |
2083 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT |
2084 CS_ENTITLEMENTS_VALIDATED;
2085
2086 proc_lock(pt);
2087 if (pt->p_csflags & CS_VALID)
2088 pt->p_csflags |= flags;
2089 else
2090 error = EINVAL;
2091 proc_unlock(pt);
2092
2093 break;
2094 }
2095 case CS_OPS_BLOB: {
2096 void *start;
2097 size_t length;
2098
2099 proc_lock(pt);
2100 if ((pt->p_csflags & CS_VALID) == 0) {
2101 proc_unlock(pt);
2102 error = EINVAL;
2103 break;
2104 }
2105
2106 error = cs_blob_get(pt, &start, &length);
2107 proc_unlock(pt);
2108 if (error)
2109 break;
2110
2111 error = csops_copy_token(start, length, usize, uaddr);
2112 break;
2113 }
2114 case CS_OPS_IDENTITY: {
2115 const char *identity;
2116 uint8_t fakeheader[8];
2117 uint32_t idlen;
2118 size_t length;
2119
2120 /*
2121 * Make identity have a blob header to make it
2122 * easier on userland to guess the identity
2123 * length.
2124 */
2125 if (usize < sizeof(fakeheader)) {
2126 error = ERANGE;
2127 break;
2128 }
2129 memset(fakeheader, 0, sizeof(fakeheader));
2130
2131 proc_lock(pt);
2132 if ((pt->p_csflags & CS_VALID) == 0) {
2133 proc_unlock(pt);
2134 error = EINVAL;
2135 break;
2136 }
2137
2138 identity = cs_identity_get(pt);
2139 proc_unlock(pt);
2140 if (identity == NULL) {
2141 error = ENOENT;
2142 break;
2143 }
2144
2145 length = strlen(identity) + 1; /* include NUL */
2146 idlen = htonl(length + sizeof(fakeheader));
2147 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2148
2149 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2150 if (error)
2151 break;
2152
2153 if (usize < sizeof(fakeheader) + length)
2154 error = ERANGE;
2155 else if (usize > sizeof(fakeheader))
2156 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2157
2158 break;
2159 }
2160
2161 case CS_OPS_SIGPUP_INSTALL:
2162 error = sigpup_install(uaddr);
2163 break;
2164
2165 case CS_OPS_SIGPUP_DROP:
2166 error = sigpup_drop();
2167 break;
2168
2169 default:
2170 error = EINVAL;
2171 break;
2172 }
2173 out:
2174 proc_rele(pt);
2175 return(error);
2176 }
2177
2178 int
2179 proc_iterate(flags, callout, arg, filterfn, filterarg)
2180 int flags;
2181 int (*callout)(proc_t, void *);
2182 void * arg;
2183 int (*filterfn)(proc_t, void *);
2184 void * filterarg;
2185 {
2186 proc_t p;
2187 pid_t * pid_list;
2188 int count, pidcount, alloc_count, i, retval;
2189
2190 count = nprocs+ 10;
2191 if (count > hard_maxproc)
2192 count = hard_maxproc;
2193 alloc_count = count * sizeof(pid_t);
2194 pid_list = (pid_t *)kalloc(alloc_count);
2195 bzero(pid_list, alloc_count);
2196
2197
2198 proc_list_lock();
2199
2200
2201 pidcount = 0;
2202 if (flags & PROC_ALLPROCLIST) {
2203 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2204 if (p->p_stat == SIDL)
2205 continue;
2206 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2207 pid_list[pidcount] = p->p_pid;
2208 pidcount++;
2209 if (pidcount >= count)
2210 break;
2211 }
2212 }
2213 }
2214 if ((pidcount < count ) && (flags & PROC_ZOMBPROCLIST)) {
2215 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2216 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2217 pid_list[pidcount] = p->p_pid;
2218 pidcount++;
2219 if (pidcount >= count)
2220 break;
2221 }
2222 }
2223 }
2224
2225
2226 proc_list_unlock();
2227
2228
2229 for (i = 0; i< pidcount; i++) {
2230 p = proc_find(pid_list[i]);
2231 if (p) {
2232 if ((flags & PROC_NOWAITTRANS) == 0)
2233 proc_transwait(p, 0);
2234 retval = callout(p, arg);
2235
2236 switch (retval) {
2237 case PROC_RETURNED:
2238 proc_rele(p);
2239 break;
2240 case PROC_RETURNED_DONE:
2241 proc_rele(p);
2242 goto out;
2243 case PROC_CLAIMED_DONE:
2244 goto out;
2245 case PROC_CLAIMED:
2246 default:
2247 break;
2248 }
2249 } else if (flags & PROC_ZOMBPROCLIST) {
2250 p = proc_find_zombref(pid_list[i]);
2251 if (p != PROC_NULL) {
2252 retval = callout(p, arg);
2253
2254 switch (retval) {
2255 case PROC_RETURNED:
2256 proc_drop_zombref(p);
2257 break;
2258 case PROC_RETURNED_DONE:
2259 proc_drop_zombref(p);
2260 goto out;
2261 case PROC_CLAIMED_DONE:
2262 goto out;
2263 case PROC_CLAIMED:
2264 default:
2265 break;
2266 }
2267 }
2268 }
2269 }
2270
2271 out:
2272 kfree(pid_list, alloc_count);
2273 return(0);
2274
2275 }
2276
2277
2278 #if 0
2279 /* This is for iteration in case of trivial non blocking callouts */
2280 int
2281 proc_scanall(flags, callout, arg)
2282 int flags;
2283 int (*callout)(proc_t, void *);
2284 void * arg;
2285 {
2286 proc_t p;
2287 int retval;
2288
2289
2290 proc_list_lock();
2291
2292
2293 if (flags & PROC_ALLPROCLIST) {
2294 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2295 retval = callout(p, arg);
2296 if (retval == PROC_RETURNED_DONE)
2297 goto out;
2298 }
2299 }
2300 if (flags & PROC_ZOMBPROCLIST) {
2301 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2302 retval = callout(p, arg);
2303 if (retval == PROC_RETURNED_DONE)
2304 goto out;
2305 }
2306 }
2307 out:
2308
2309 proc_list_unlock();
2310
2311 return(0);
2312 }
2313 #endif
2314
2315
2316 int
2317 proc_rebootscan(callout, arg, filterfn, filterarg)
2318 int (*callout)(proc_t, void *);
2319 void * arg;
2320 int (*filterfn)(proc_t, void *);
2321 void * filterarg;
2322 {
2323 proc_t p;
2324 int lockheld = 0, retval;
2325
2326 proc_shutdown_exitcount = 0;
2327
2328 ps_allprocscan:
2329
2330 proc_list_lock();
2331
2332 lockheld = 1;
2333
2334 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2335 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2336 p = proc_ref_locked(p);
2337
2338 proc_list_unlock();
2339 lockheld = 0;
2340
2341 if (p) {
2342 proc_transwait(p, 0);
2343 retval = callout(p, arg);
2344 proc_rele(p);
2345
2346 switch (retval) {
2347 case PROC_RETURNED_DONE:
2348 case PROC_CLAIMED_DONE:
2349 goto out;
2350 }
2351 }
2352 goto ps_allprocscan;
2353 } /* filter pass */
2354 } /* allproc walk thru */
2355
2356 if (lockheld == 1) {
2357 proc_list_unlock();
2358 lockheld = 0;
2359 }
2360
2361 out:
2362 return(0);
2363
2364 }
2365
2366
2367 int
2368 proc_childrenwalk(parent, callout, arg)
2369 struct proc * parent;
2370 int (*callout)(proc_t, void *);
2371 void * arg;
2372 {
2373 register struct proc *p;
2374 pid_t * pid_list;
2375 int count, pidcount, alloc_count, i, retval;
2376
2377 count = nprocs+ 10;
2378 if (count > hard_maxproc)
2379 count = hard_maxproc;
2380 alloc_count = count * sizeof(pid_t);
2381 pid_list = (pid_t *)kalloc(alloc_count);
2382 bzero(pid_list, alloc_count);
2383
2384
2385 proc_list_lock();
2386
2387
2388 pidcount = 0;
2389 for (p = parent->p_children.lh_first; (p != 0); p = p->p_sibling.le_next) {
2390 if (p->p_stat == SIDL)
2391 continue;
2392 pid_list[pidcount] = p->p_pid;
2393 pidcount++;
2394 if (pidcount >= count)
2395 break;
2396 }
2397 proc_list_unlock();
2398
2399
2400 for (i = 0; i< pidcount; i++) {
2401 p = proc_find(pid_list[i]);
2402 if (p) {
2403 proc_transwait(p, 0);
2404 retval = callout(p, arg);
2405
2406 switch (retval) {
2407 case PROC_RETURNED:
2408 case PROC_RETURNED_DONE:
2409 proc_rele(p);
2410 if (retval == PROC_RETURNED_DONE) {
2411 goto out;
2412 }
2413 break;
2414
2415 case PROC_CLAIMED_DONE:
2416 goto out;
2417 case PROC_CLAIMED:
2418 default:
2419 break;
2420 }
2421 }
2422 }
2423
2424 out:
2425 kfree(pid_list, alloc_count);
2426 return(0);
2427
2428 }
2429
2430 /*
2431 */
2432 /* PGRP_BLOCKITERATE is not implemented yet */
2433 int
2434 pgrp_iterate(pgrp, flags, callout, arg, filterfn, filterarg)
2435 struct pgrp *pgrp;
2436 int flags;
2437 int (*callout)(proc_t, void *);
2438 void * arg;
2439 int (*filterfn)(proc_t, void *);
2440 void * filterarg;
2441 {
2442 proc_t p;
2443 pid_t * pid_list;
2444 int count, pidcount, i, alloc_count;
2445 int retval;
2446 pid_t pgid;
2447 int dropref = flags & PGRP_DROPREF;
2448 #if 0
2449 int serialize = flags & PGRP_BLOCKITERATE;
2450 #else
2451 int serialize = 0;
2452 #endif
2453
2454 if (pgrp == 0)
2455 return(0);
2456 count = pgrp->pg_membercnt + 10;
2457 if (count > hard_maxproc)
2458 count = hard_maxproc;
2459 alloc_count = count * sizeof(pid_t);
2460 pid_list = (pid_t *)kalloc(alloc_count);
2461 bzero(pid_list, alloc_count);
2462
2463 pgrp_lock(pgrp);
2464 if (serialize != 0) {
2465 while ((pgrp->pg_listflags & PGRP_FLAG_ITERABEGIN) == PGRP_FLAG_ITERABEGIN) {
2466 pgrp->pg_listflags |= PGRP_FLAG_ITERWAIT;
2467 msleep(&pgrp->pg_listflags, &pgrp->pg_mlock, 0, "pgrp_iterate", 0);
2468 }
2469 pgrp->pg_listflags |= PGRP_FLAG_ITERABEGIN;
2470 }
2471
2472 pgid = pgrp->pg_id;
2473
2474 pidcount = 0;
2475 for (p = pgrp->pg_members.lh_first; p != 0;
2476 p = p->p_pglist.le_next) {
2477 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2478 pid_list[pidcount] = p->p_pid;
2479 pidcount++;
2480 if (pidcount >= count)
2481 break;
2482 }
2483 }
2484
2485
2486 pgrp_unlock(pgrp);
2487 if ((serialize == 0) && (dropref != 0))
2488 pg_rele(pgrp);
2489
2490
2491 for (i = 0; i< pidcount; i++) {
2492 /* No handling or proc0 */
2493 if (pid_list[i] == 0)
2494 continue;
2495 p = proc_find(pid_list[i]);
2496 if (p) {
2497 if (p->p_pgrpid != pgid) {
2498 proc_rele(p);
2499 continue;
2500 }
2501 proc_transwait(p, 0);
2502 retval = callout(p, arg);
2503
2504 switch (retval) {
2505 case PROC_RETURNED:
2506 case PROC_RETURNED_DONE:
2507 proc_rele(p);
2508 if (retval == PROC_RETURNED_DONE) {
2509 goto out;
2510 }
2511 break;
2512
2513 case PROC_CLAIMED_DONE:
2514 goto out;
2515 case PROC_CLAIMED:
2516 default:
2517 break;
2518 }
2519 }
2520 }
2521 out:
2522 if (serialize != 0) {
2523 pgrp_lock(pgrp);
2524 pgrp->pg_listflags &= ~PGRP_FLAG_ITERABEGIN;
2525 if ((pgrp->pg_listflags & PGRP_FLAG_ITERWAIT) == PGRP_FLAG_ITERWAIT) {
2526 pgrp->pg_listflags &= ~PGRP_FLAG_ITERWAIT;
2527 wakeup(&pgrp->pg_listflags);
2528 }
2529 pgrp_unlock(pgrp);
2530 if (dropref != 0)
2531 pg_rele(pgrp);
2532 }
2533 kfree(pid_list, alloc_count);
2534 return(0);
2535 }
2536
2537 static void
2538 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2539 {
2540 proc_list_lock();
2541 child->p_pgrp = pgrp;
2542 child->p_pgrpid = pgrp->pg_id;
2543 child->p_listflag |= P_LIST_INPGRP;
2544 /*
2545 * When pgrp is being freed , a process can still
2546 * request addition using setpgid from bash when
2547 * login is terminated (login cycler) return ESRCH
2548 * Safe to hold lock due to refcount on pgrp
2549 */
2550 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2551 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2552 }
2553
2554 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2555 panic("pgrp_add : pgrp is dead adding process");
2556 proc_list_unlock();
2557
2558 pgrp_lock(pgrp);
2559 pgrp->pg_membercnt++;
2560 if ( parent != PROC_NULL) {
2561 LIST_INSERT_AFTER(parent, child, p_pglist);
2562 }else {
2563 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2564 }
2565 pgrp_unlock(pgrp);
2566
2567 proc_list_lock();
2568 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2569 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2570 }
2571 proc_list_unlock();
2572 }
2573
2574 static void
2575 pgrp_remove(struct proc * p)
2576 {
2577 struct pgrp * pg;
2578
2579 pg = proc_pgrp(p);
2580
2581 proc_list_lock();
2582 #if __PROC_INTERNAL_DEBUG
2583 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2584 panic("removing from pglist but no named ref\n");
2585 #endif
2586 p->p_pgrpid = PGRPID_DEAD;
2587 p->p_listflag &= ~P_LIST_INPGRP;
2588 p->p_pgrp = NULL;
2589 proc_list_unlock();
2590
2591 if (pg == PGRP_NULL)
2592 panic("pgrp_remove: pg is NULL");
2593 pgrp_lock(pg);
2594 pg->pg_membercnt--;
2595
2596 if (pg->pg_membercnt < 0)
2597 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2598
2599 LIST_REMOVE(p, p_pglist);
2600 if (pg->pg_members.lh_first == 0) {
2601 pgrp_unlock(pg);
2602 pgdelete_dropref(pg);
2603 } else {
2604 pgrp_unlock(pg);
2605 pg_rele(pg);
2606 }
2607 }
2608
2609
2610 /* cannot use proc_pgrp as it maybe stalled */
2611 static void
2612 pgrp_replace(struct proc * p, struct pgrp * newpg)
2613 {
2614 struct pgrp * oldpg;
2615
2616
2617
2618 proc_list_lock();
2619
2620 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2621 p->p_listflag |= P_LIST_PGRPTRWAIT;
2622 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2623 }
2624
2625 p->p_listflag |= P_LIST_PGRPTRANS;
2626
2627 oldpg = p->p_pgrp;
2628 if (oldpg == PGRP_NULL)
2629 panic("pgrp_replace: oldpg NULL");
2630 oldpg->pg_refcount++;
2631 #if __PROC_INTERNAL_DEBUG
2632 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2633 panic("removing from pglist but no named ref\n");
2634 #endif
2635 p->p_pgrpid = PGRPID_DEAD;
2636 p->p_listflag &= ~P_LIST_INPGRP;
2637 p->p_pgrp = NULL;
2638
2639 proc_list_unlock();
2640
2641 pgrp_lock(oldpg);
2642 oldpg->pg_membercnt--;
2643 if (oldpg->pg_membercnt < 0)
2644 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2645 LIST_REMOVE(p, p_pglist);
2646 if (oldpg->pg_members.lh_first == 0) {
2647 pgrp_unlock(oldpg);
2648 pgdelete_dropref(oldpg);
2649 } else {
2650 pgrp_unlock(oldpg);
2651 pg_rele(oldpg);
2652 }
2653
2654 proc_list_lock();
2655 p->p_pgrp = newpg;
2656 p->p_pgrpid = newpg->pg_id;
2657 p->p_listflag |= P_LIST_INPGRP;
2658 /*
2659 * When pgrp is being freed , a process can still
2660 * request addition using setpgid from bash when
2661 * login is terminated (login cycler) return ESRCH
2662 * Safe to hold lock due to refcount on pgrp
2663 */
2664 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2665 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2666 }
2667
2668 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2669 panic("pgrp_add : pgrp is dead adding process");
2670 proc_list_unlock();
2671
2672 pgrp_lock(newpg);
2673 newpg->pg_membercnt++;
2674 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2675 pgrp_unlock(newpg);
2676
2677 proc_list_lock();
2678 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2679 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2680 }
2681
2682 p->p_listflag &= ~P_LIST_PGRPTRANS;
2683 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2684 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2685 wakeup(&p->p_pgrpid);
2686
2687 }
2688 proc_list_unlock();
2689 }
2690
2691 void
2692 pgrp_lock(struct pgrp * pgrp)
2693 {
2694 lck_mtx_lock(&pgrp->pg_mlock);
2695 }
2696
2697 void
2698 pgrp_unlock(struct pgrp * pgrp)
2699 {
2700 lck_mtx_unlock(&pgrp->pg_mlock);
2701 }
2702
2703 void
2704 session_lock(struct session * sess)
2705 {
2706 lck_mtx_lock(&sess->s_mlock);
2707 }
2708
2709
2710 void
2711 session_unlock(struct session * sess)
2712 {
2713 lck_mtx_unlock(&sess->s_mlock);
2714 }
2715
2716 struct pgrp *
2717 proc_pgrp(proc_t p)
2718 {
2719 struct pgrp * pgrp;
2720
2721 if (p == PROC_NULL)
2722 return(PGRP_NULL);
2723 proc_list_lock();
2724
2725 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2726 p->p_listflag |= P_LIST_PGRPTRWAIT;
2727 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2728 }
2729
2730 pgrp = p->p_pgrp;
2731
2732 assert(pgrp != NULL);
2733
2734 if (pgrp != PGRP_NULL) {
2735 pgrp->pg_refcount++;
2736 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2737 panic("proc_pgrp: ref being povided for dead pgrp");
2738 }
2739
2740 proc_list_unlock();
2741
2742 return(pgrp);
2743 }
2744
2745 struct pgrp *
2746 tty_pgrp(struct tty * tp)
2747 {
2748 struct pgrp * pg = PGRP_NULL;
2749
2750 proc_list_lock();
2751 pg = tp->t_pgrp;
2752
2753 if (pg != PGRP_NULL) {
2754 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2755 panic("tty_pgrp: ref being povided for dead pgrp");
2756 pg->pg_refcount++;
2757 }
2758 proc_list_unlock();
2759
2760 return(pg);
2761 }
2762
2763 struct session *
2764 proc_session(proc_t p)
2765 {
2766 struct session * sess = SESSION_NULL;
2767
2768 if (p == PROC_NULL)
2769 return(SESSION_NULL);
2770
2771 proc_list_lock();
2772
2773 /* wait during transitions */
2774 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2775 p->p_listflag |= P_LIST_PGRPTRWAIT;
2776 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2777 }
2778
2779 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2780 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2781 panic("proc_session:returning sesssion ref on terminating session");
2782 sess->s_count++;
2783 }
2784 proc_list_unlock();
2785 return(sess);
2786 }
2787
2788 void
2789 session_rele(struct session *sess)
2790 {
2791 proc_list_lock();
2792 if (--sess->s_count == 0) {
2793 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2794 panic("session_rele: terminating already terminated session");
2795 sess->s_listflags |= S_LIST_TERM;
2796 LIST_REMOVE(sess, s_hash);
2797 sess->s_listflags |= S_LIST_DEAD;
2798 if (sess->s_count != 0)
2799 panic("session_rele: freeing session in use");
2800 proc_list_unlock();
2801 #if CONFIG_FINE_LOCK_GROUPS
2802 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2803 #else
2804 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2805 #endif
2806 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2807 } else
2808 proc_list_unlock();
2809 }
2810
2811 int
2812 proc_transstart(proc_t p, int locked, int non_blocking)
2813 {
2814 if (locked == 0)
2815 proc_lock(p);
2816 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2817 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
2818 if (locked == 0)
2819 proc_unlock(p);
2820 return EDEADLK;
2821 }
2822 p->p_lflag |= P_LTRANSWAIT;
2823 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2824 }
2825 p->p_lflag |= P_LINTRANSIT;
2826 p->p_transholder = current_thread();
2827 if (locked == 0)
2828 proc_unlock(p);
2829 return 0;
2830 }
2831
2832 void
2833 proc_transcommit(proc_t p, int locked)
2834 {
2835 if (locked == 0)
2836 proc_lock(p);
2837
2838 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2839 assert (p->p_transholder == current_thread());
2840 p->p_lflag |= P_LTRANSCOMMIT;
2841
2842 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2843 p->p_lflag &= ~P_LTRANSWAIT;
2844 wakeup(&p->p_lflag);
2845 }
2846 if (locked == 0)
2847 proc_unlock(p);
2848 }
2849
2850 void
2851 proc_transend(proc_t p, int locked)
2852 {
2853 if (locked == 0)
2854 proc_lock(p);
2855
2856 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2857 p->p_transholder = NULL;
2858
2859 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2860 p->p_lflag &= ~P_LTRANSWAIT;
2861 wakeup(&p->p_lflag);
2862 }
2863 if (locked == 0)
2864 proc_unlock(p);
2865 }
2866
2867 int
2868 proc_transwait(proc_t p, int locked)
2869 {
2870 if (locked == 0)
2871 proc_lock(p);
2872 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2873 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2874 if (locked == 0)
2875 proc_unlock(p);
2876 return EDEADLK;
2877 }
2878 p->p_lflag |= P_LTRANSWAIT;
2879 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2880 }
2881 if (locked == 0)
2882 proc_unlock(p);
2883 return 0;
2884 }
2885
2886 void
2887 proc_klist_lock(void)
2888 {
2889 lck_mtx_lock(proc_klist_mlock);
2890 }
2891
2892 void
2893 proc_klist_unlock(void)
2894 {
2895 lck_mtx_unlock(proc_klist_mlock);
2896 }
2897
2898 void
2899 proc_knote(struct proc * p, long hint)
2900 {
2901 proc_klist_lock();
2902 KNOTE(&p->p_klist, hint);
2903 proc_klist_unlock();
2904 }
2905
2906 void
2907 proc_knote_drain(struct proc *p)
2908 {
2909 struct knote *kn = NULL;
2910
2911 /*
2912 * Clear the proc's klist to avoid references after the proc is reaped.
2913 */
2914 proc_klist_lock();
2915 while ((kn = SLIST_FIRST(&p->p_klist))) {
2916 kn->kn_ptr.p_proc = PROC_NULL;
2917 KNOTE_DETACH(&p->p_klist, kn);
2918 }
2919 proc_klist_unlock();
2920 }
2921
2922 void
2923 proc_setregister(proc_t p)
2924 {
2925 proc_lock(p);
2926 p->p_lflag |= P_LREGISTER;
2927 proc_unlock(p);
2928 }
2929
2930 void
2931 proc_resetregister(proc_t p)
2932 {
2933 proc_lock(p);
2934 p->p_lflag &= ~P_LREGISTER;
2935 proc_unlock(p);
2936 }
2937
2938 pid_t
2939 proc_pgrpid(proc_t p)
2940 {
2941 return p->p_pgrpid;
2942 }
2943
2944 pid_t
2945 proc_selfpgrpid()
2946 {
2947 return current_proc()->p_pgrpid;
2948 }
2949
2950
2951 /* return control and action states */
2952 int
2953 proc_getpcontrol(int pid, int * pcontrolp)
2954 {
2955 proc_t p;
2956
2957 p = proc_find(pid);
2958 if (p == PROC_NULL)
2959 return(ESRCH);
2960 if (pcontrolp != NULL)
2961 *pcontrolp = p->p_pcaction;
2962
2963 proc_rele(p);
2964 return(0);
2965 }
2966
2967 int
2968 proc_dopcontrol(proc_t p)
2969 {
2970 int pcontrol;
2971
2972 proc_lock(p);
2973
2974 pcontrol = PROC_CONTROL_STATE(p);
2975
2976 if (PROC_ACTION_STATE(p) == 0) {
2977 switch(pcontrol) {
2978 case P_PCTHROTTLE:
2979 PROC_SETACTION_STATE(p);
2980 proc_unlock(p);
2981 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2982 break;
2983
2984 case P_PCSUSP:
2985 PROC_SETACTION_STATE(p);
2986 proc_unlock(p);
2987 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2988 task_suspend(p->task);
2989 break;
2990
2991 case P_PCKILL:
2992 PROC_SETACTION_STATE(p);
2993 proc_unlock(p);
2994 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2995 psignal(p, SIGKILL);
2996 break;
2997
2998 default:
2999 proc_unlock(p);
3000 }
3001
3002 } else
3003 proc_unlock(p);
3004
3005 return(PROC_RETURNED);
3006 }
3007
3008
3009 /*
3010 * Resume a throttled or suspended process. This is an internal interface that's only
3011 * used by the user level code that presents the GUI when we run out of swap space and
3012 * hence is restricted to processes with superuser privileges.
3013 */
3014
3015 int
3016 proc_resetpcontrol(int pid)
3017 {
3018 proc_t p;
3019 int pcontrol;
3020 int error;
3021 proc_t self = current_proc();
3022
3023 /* if the process has been validated to handle resource control or root is valid one */
3024 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
3025 return error;
3026
3027 p = proc_find(pid);
3028 if (p == PROC_NULL)
3029 return(ESRCH);
3030
3031 proc_lock(p);
3032
3033 pcontrol = PROC_CONTROL_STATE(p);
3034
3035 if(PROC_ACTION_STATE(p) !=0) {
3036 switch(pcontrol) {
3037 case P_PCTHROTTLE:
3038 PROC_RESETACTION_STATE(p);
3039 proc_unlock(p);
3040 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3041 break;
3042
3043 case P_PCSUSP:
3044 PROC_RESETACTION_STATE(p);
3045 proc_unlock(p);
3046 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3047 task_resume(p->task);
3048 break;
3049
3050 case P_PCKILL:
3051 /* Huh? */
3052 PROC_SETACTION_STATE(p);
3053 proc_unlock(p);
3054 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3055 break;
3056
3057 default:
3058 proc_unlock(p);
3059 }
3060
3061 } else
3062 proc_unlock(p);
3063
3064 proc_rele(p);
3065 return(0);
3066 }
3067
3068
3069
3070 struct no_paging_space
3071 {
3072 uint64_t pcs_max_size;
3073 uint64_t pcs_uniqueid;
3074 int pcs_pid;
3075 int pcs_proc_count;
3076 uint64_t pcs_total_size;
3077
3078 uint64_t npcs_max_size;
3079 uint64_t npcs_uniqueid;
3080 int npcs_pid;
3081 int npcs_proc_count;
3082 uint64_t npcs_total_size;
3083
3084 int apcs_proc_count;
3085 uint64_t apcs_total_size;
3086 };
3087
3088
3089 static int
3090 proc_pcontrol_filter(proc_t p, void *arg)
3091 {
3092 struct no_paging_space *nps;
3093 uint64_t compressed;
3094
3095 nps = (struct no_paging_space *)arg;
3096
3097 compressed = get_task_compressed(p->task);
3098
3099 if (PROC_CONTROL_STATE(p)) {
3100 if (PROC_ACTION_STATE(p) == 0) {
3101 if (compressed > nps->pcs_max_size) {
3102 nps->pcs_pid = p->p_pid;
3103 nps->pcs_uniqueid = p->p_uniqueid;
3104 nps->pcs_max_size = compressed;
3105 }
3106 nps->pcs_total_size += compressed;
3107 nps->pcs_proc_count++;
3108 } else {
3109 nps->apcs_total_size += compressed;
3110 nps->apcs_proc_count++;
3111 }
3112 } else {
3113 if (compressed > nps->npcs_max_size) {
3114 nps->npcs_pid = p->p_pid;
3115 nps->npcs_uniqueid = p->p_uniqueid;
3116 nps->npcs_max_size = compressed;
3117 }
3118 nps->npcs_total_size += compressed;
3119 nps->npcs_proc_count++;
3120
3121 }
3122 return (0);
3123 }
3124
3125
3126 static int
3127 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3128 {
3129 return(PROC_RETURNED);
3130 }
3131
3132
3133 /*
3134 * Deal with the low on compressor pool space condition... this function
3135 * gets called when we are approaching the limits of the compressor pool or
3136 * we are unable to create a new swap file.
3137 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3138 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3139 * There are 2 categories of processes to deal with. Those that have an action
3140 * associated with them by the task itself and those that do not. Actionable
3141 * tasks can have one of three categories specified: ones that
3142 * can be killed immediately, ones that should be suspended, and ones that should
3143 * be throttled. Processes that do not have an action associated with them are normally
3144 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3145 * that only by killing them can we hope to put the system back into a usable state.
3146 */
3147
3148 #define NO_PAGING_SPACE_DEBUG 0
3149
3150 extern uint64_t vm_compressor_pages_compressed(void);
3151
3152 struct timeval last_no_space_action = {0, 0};
3153
3154 int
3155 no_paging_space_action()
3156 {
3157 proc_t p;
3158 struct no_paging_space nps;
3159 struct timeval now;
3160
3161 /*
3162 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3163 */
3164 microtime(&now);
3165
3166 if (now.tv_sec <= last_no_space_action.tv_sec + 5)
3167 return (0);
3168
3169 /*
3170 * Examine all processes and find the biggest (biggest is based on the number of pages this
3171 * task has in the compressor pool) that has been marked to have some action
3172 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3173 * action.
3174 *
3175 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3176 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3177 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3178 */
3179 bzero(&nps, sizeof(nps));
3180
3181 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3182
3183 #if NO_PAGING_SPACE_DEBUG
3184 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3185 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3186 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3187 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3188 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3189 nps.apcs_proc_count, nps.apcs_total_size);
3190 #endif
3191 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3192 /*
3193 * for now we'll knock out any task that has more then 50% of the pages
3194 * held by the compressor
3195 */
3196 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3197
3198 if (nps.npcs_uniqueid == p->p_uniqueid) {
3199 /*
3200 * verify this is still the same process
3201 * in case the proc exited and the pid got reused while
3202 * we were finishing the proc_iterate and getting to this point
3203 */
3204 last_no_space_action = now;
3205
3206 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3207 psignal(p, SIGKILL);
3208
3209 proc_rele(p);
3210
3211 return (0);
3212 }
3213
3214 proc_rele(p);
3215 }
3216 }
3217
3218 if (nps.pcs_max_size > 0) {
3219 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3220
3221 if (nps.pcs_uniqueid == p->p_uniqueid) {
3222 /*
3223 * verify this is still the same process
3224 * in case the proc exited and the pid got reused while
3225 * we were finishing the proc_iterate and getting to this point
3226 */
3227 last_no_space_action = now;
3228
3229 proc_dopcontrol(p);
3230
3231 proc_rele(p);
3232
3233 return (1);
3234 }
3235
3236 proc_rele(p);
3237 }
3238 }
3239 last_no_space_action = now;
3240
3241 printf("low swap: unable to find any eligible processes to take action on\n");
3242
3243 return (0);
3244 }
3245
3246 int
3247 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3248 {
3249 int ret = 0;
3250 proc_t target_proc = PROC_NULL;
3251 pid_t target_pid = uap->pid;
3252 uint64_t target_uniqueid = uap->uniqueid;
3253 task_t target_task = NULL;
3254
3255 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3256 ret = EPERM;
3257 goto out;
3258 }
3259 target_proc = proc_find(target_pid);
3260 if (target_proc != PROC_NULL) {
3261 if (target_uniqueid != proc_uniqueid(target_proc)) {
3262 ret = ENOENT;
3263 goto out;
3264 }
3265
3266 target_task = proc_task(target_proc);
3267 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3268 ret = EINVAL;
3269 goto out;
3270 }
3271 } else
3272 ret = ENOENT;
3273
3274 out:
3275 if (target_proc != PROC_NULL)
3276 proc_rele(target_proc);
3277 return (ret);
3278 }
3279
3280 #if VM_SCAN_FOR_SHADOW_CHAIN
3281 extern int vm_map_shadow_max(vm_map_t map);
3282 int proc_shadow_max(void);
3283 int proc_shadow_max(void)
3284 {
3285 int retval, max;
3286 proc_t p;
3287 task_t task;
3288 vm_map_t map;
3289
3290 max = 0;
3291 proc_list_lock();
3292 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3293 if (p->p_stat == SIDL)
3294 continue;
3295 task = p->task;
3296 if (task == NULL) {
3297 continue;
3298 }
3299 map = get_task_map(task);
3300 if (map == NULL) {
3301 continue;
3302 }
3303 retval = vm_map_shadow_max(map);
3304 if (retval > max) {
3305 max = retval;
3306 }
3307 }
3308 proc_list_unlock();
3309 return max;
3310 }
3311 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */