]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
81c86f484cdbc88bab027e662d0051de55173542
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/task.h>
101 #include <kern/assert.h>
102 #include <vm/vm_protos.h>
103 #include <vm/vm_map.h> /* vm_map_switch_protect() */
104 #include <mach/task.h>
105
106 #if CONFIG_MACF
107 #include <security/mac_framework.h>
108 #endif
109
110 #include <libkern/crypto/sha1.h>
111
112 /*
113 * Structure associated with user cacheing.
114 */
115 struct uidinfo {
116 LIST_ENTRY(uidinfo) ui_hash;
117 uid_t ui_uid;
118 long ui_proccnt;
119 };
120 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
121 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
122 u_long uihash; /* size of hash table - 1 */
123
124 /*
125 * Other process lists
126 */
127 struct pidhashhead *pidhashtbl;
128 u_long pidhash;
129 struct pgrphashhead *pgrphashtbl;
130 u_long pgrphash;
131 struct sesshashhead *sesshashtbl;
132 u_long sesshash;
133
134 struct proclist allproc;
135 struct proclist zombproc;
136 extern struct tty cons;
137
138 #if CONFIG_LCTX
139 /*
140 * Login Context
141 */
142 static pid_t lastlcid = 1;
143 static int alllctx_cnt;
144
145 #define LCID_MAX 8192 /* Does this really need to be large? */
146 static int maxlcid = LCID_MAX;
147
148 LIST_HEAD(lctxlist, lctx);
149 static struct lctxlist alllctx;
150
151 lck_mtx_t alllctx_lock;
152 lck_grp_t * lctx_lck_grp;
153 lck_grp_attr_t * lctx_lck_grp_attr;
154 lck_attr_t * lctx_lck_attr;
155
156 static void lctxinit(void);
157 #endif
158
159 #if DEBUG
160 #define __PROC_INTERNAL_DEBUG 1
161 #endif
162 /* Name to give to core files */
163 __private_extern__ char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
164
165 static void orphanpg(struct pgrp *pg);
166 void proc_name_kdp(task_t t, char * buf, int size);
167 char *proc_name_address(void *p);
168
169 static proc_t proc_refinternal_locked(proc_t p);
170 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
171 static void pgrp_remove(proc_t p);
172 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
173 static void pgdelete_dropref(struct pgrp *pgrp);
174 static proc_t proc_find_zombref(int pid);
175 static void proc_drop_zombref(proc_t p);
176 extern void pg_rele_dropref(struct pgrp * pgrp);
177
178 struct fixjob_iterargs {
179 struct pgrp * pg;
180 struct session * mysession;
181 int entering;
182 };
183
184 int fixjob_callback(proc_t, void *);
185
186 /*
187 * Initialize global process hashing structures.
188 */
189 void
190 procinit(void)
191 {
192 LIST_INIT(&allproc);
193 LIST_INIT(&zombproc);
194 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
195 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
196 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
197 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
198 #if CONFIG_LCTX
199 lctxinit();
200 #endif
201 }
202
203 /*
204 * Change the count associated with number of processes
205 * a given user is using. This routine protects the uihash
206 * with the list lock
207 */
208 int
209 chgproccnt(uid_t uid, int diff)
210 {
211 struct uidinfo *uip;
212 struct uidinfo *newuip = NULL;
213 struct uihashhead *uipp;
214 int retval;
215
216 again:
217 proc_list_lock();
218 uipp = UIHASH(uid);
219 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
220 if (uip->ui_uid == uid)
221 break;
222 if (uip) {
223 uip->ui_proccnt += diff;
224 if (uip->ui_proccnt > 0) {
225 retval = uip->ui_proccnt;
226 proc_list_unlock();
227 goto out;
228 }
229 if (uip->ui_proccnt < 0)
230 panic("chgproccnt: procs < 0");
231 LIST_REMOVE(uip, ui_hash);
232 retval = 0;
233 proc_list_unlock();
234 FREE_ZONE(uip, sizeof(*uip), M_PROC);
235 goto out;
236 }
237 if (diff <= 0) {
238 if (diff == 0) {
239 retval = 0;
240 proc_list_unlock();
241 goto out;
242 }
243 panic("chgproccnt: lost user");
244 }
245 if (newuip != NULL) {
246 uip = newuip;
247 newuip = NULL;
248 LIST_INSERT_HEAD(uipp, uip, ui_hash);
249 uip->ui_uid = uid;
250 uip->ui_proccnt = diff;
251 retval = diff;
252 proc_list_unlock();
253 goto out;
254 }
255 proc_list_unlock();
256 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
257 if (newuip == NULL)
258 panic("chgproccnt: M_PROC zone depleted");
259 goto again;
260 out:
261 if (newuip != NULL)
262 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
263 return(retval);
264 }
265
266 /*
267 * Is p an inferior of the current process?
268 */
269 int
270 inferior(proc_t p)
271 {
272 int retval = 0;
273
274 proc_list_lock();
275 for (; p != current_proc(); p = p->p_pptr)
276 if (p->p_pid == 0)
277 goto out;
278 retval = 1;
279 out:
280 proc_list_unlock();
281 return(retval);
282 }
283
284 /*
285 * Is p an inferior of t ?
286 */
287 int
288 isinferior(proc_t p, proc_t t)
289 {
290 int retval = 0;
291 int nchecked = 0;
292 proc_t start = p;
293
294 /* if p==t they are not inferior */
295 if (p == t)
296 return(0);
297
298 proc_list_lock();
299 for (; p != t; p = p->p_pptr) {
300 nchecked++;
301
302 /* Detect here if we're in a cycle */
303 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
304 goto out;
305 }
306 retval = 1;
307 out:
308 proc_list_unlock();
309 return(retval);
310 }
311
312 int
313 proc_isinferior(int pid1, int pid2)
314 {
315 proc_t p = PROC_NULL;
316 proc_t t = PROC_NULL;
317 int retval = 0;
318
319 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
320 retval = isinferior(p, t);
321
322 if (p != PROC_NULL)
323 proc_rele(p);
324 if (t != PROC_NULL)
325 proc_rele(t);
326
327 return(retval);
328 }
329
330 proc_t
331 proc_find(int pid)
332 {
333 return(proc_findinternal(pid, 0));
334 }
335
336 proc_t
337 proc_findinternal(int pid, int locked)
338 {
339 proc_t p = PROC_NULL;
340
341 if (locked == 0) {
342 proc_list_lock();
343 }
344
345 p = pfind_locked(pid);
346 if ((p == PROC_NULL) || (p != proc_refinternal_locked(p)))
347 p = PROC_NULL;
348
349 if (locked == 0) {
350 proc_list_unlock();
351 }
352
353 return(p);
354 }
355
356 int
357 proc_rele(proc_t p)
358 {
359 proc_list_lock();
360 proc_rele_locked(p);
361 proc_list_unlock();
362
363 return(0);
364 }
365
366 proc_t
367 proc_self(void)
368 {
369 struct proc * p;
370
371 p = current_proc();
372
373 proc_list_lock();
374 if (p != proc_refinternal_locked(p))
375 p = PROC_NULL;
376 proc_list_unlock();
377 return(p);
378 }
379
380
381 static proc_t
382 proc_refinternal_locked(proc_t p)
383 {
384 proc_t p1 = p;
385
386 /* if process still in creation return failure */
387 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
388 return (PROC_NULL);
389 /* do not return process marked for termination */
390 if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0))
391 p->p_refcount++;
392 else
393 p1 = PROC_NULL;
394
395 return(p1);
396 }
397
398 void
399 proc_rele_locked(proc_t p)
400 {
401
402 if (p->p_refcount > 0) {
403 p->p_refcount--;
404 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
405 p->p_listflag &= ~P_LIST_DRAINWAIT;
406 wakeup(&p->p_refcount);
407 }
408 } else
409 panic("proc_rele_locked -ve ref\n");
410
411 }
412
413 static proc_t
414 proc_find_zombref(int pid)
415 {
416 proc_t p1 = PROC_NULL;
417 proc_t p = PROC_NULL;
418
419 proc_list_lock();
420
421 p = pfind_locked(pid);
422
423 /* if process still in creation return NULL */
424 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0)) {
425 proc_list_unlock();
426 return (p1);
427 }
428
429 /* if process has not started exit or is being reaped, return NULL */
430 if (((p->p_listflag & P_LIST_EXITED) != 0) && ((p->p_listflag & P_LIST_WAITING) == 0)) {
431 p->p_listflag |= P_LIST_WAITING;
432 p1 = p;
433 } else
434 p1 = PROC_NULL;
435
436 proc_list_unlock();
437
438 return(p1);
439 }
440
441 static void
442 proc_drop_zombref(proc_t p)
443 {
444 proc_list_lock();
445 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
446 p->p_listflag &= ~P_LIST_WAITING;
447 wakeup(&p->p_stat);
448 }
449 proc_list_unlock();
450 }
451
452
453 void
454 proc_refdrain(proc_t p)
455 {
456
457 proc_list_lock();
458
459 p->p_listflag |= P_LIST_DRAIN;
460 while (p->p_refcount) {
461 p->p_listflag |= P_LIST_DRAINWAIT;
462 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
463 }
464 p->p_listflag &= ~P_LIST_DRAIN;
465 p->p_listflag |= P_LIST_DEAD;
466
467 proc_list_unlock();
468
469
470 }
471
472 proc_t
473 proc_parentholdref(proc_t p)
474 {
475 proc_t parent = PROC_NULL;
476 proc_t pp;
477 int loopcnt = 0;
478
479
480 proc_list_lock();
481 loop:
482 pp = p->p_pptr;
483 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
484 parent = PROC_NULL;
485 goto out;
486 }
487
488 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
489 pp->p_listflag |= P_LIST_CHILDDRWAIT;
490 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
491 loopcnt++;
492 if (loopcnt == 5) {
493 parent = PROC_NULL;
494 goto out;
495 }
496 goto loop;
497 }
498
499 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
500 pp->p_parentref++;
501 parent = pp;
502 goto out;
503 }
504
505 out:
506 proc_list_unlock();
507 return(parent);
508 }
509 int
510 proc_parentdropref(proc_t p, int listlocked)
511 {
512 if (listlocked == 0)
513 proc_list_lock();
514
515 if (p->p_parentref > 0) {
516 p->p_parentref--;
517 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
518 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
519 wakeup(&p->p_parentref);
520 }
521 } else
522 panic("proc_parentdropref -ve ref\n");
523 if (listlocked == 0)
524 proc_list_unlock();
525
526 return(0);
527 }
528
529 void
530 proc_childdrainstart(proc_t p)
531 {
532 #if __PROC_INTERNAL_DEBUG
533 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
534 panic("proc_childdrainstart: childdrain already started\n");
535 #endif
536 p->p_listflag |= P_LIST_CHILDDRSTART;
537 /* wait for all that hold parentrefs to drop */
538 while (p->p_parentref > 0) {
539 p->p_listflag |= P_LIST_PARENTREFWAIT;
540 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
541 }
542 }
543
544
545 void
546 proc_childdrainend(proc_t p)
547 {
548 #if __PROC_INTERNAL_DEBUG
549 if (p->p_childrencnt > 0)
550 panic("exiting: children stil hanging around\n");
551 #endif
552 p->p_listflag |= P_LIST_CHILDDRAINED;
553 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
554 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
555 wakeup(&p->p_childrencnt);
556 }
557 }
558
559 void
560 proc_checkdeadrefs(__unused proc_t p)
561 {
562 #if __PROC_INTERNAL_DEBUG
563 if ((p->p_listflag & P_LIST_INHASH) != 0)
564 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
565 if (p->p_childrencnt != 0)
566 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
567 if (p->p_refcount != 0)
568 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
569 if (p->p_parentref != 0)
570 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
571 #endif
572 }
573
574 int
575 proc_pid(proc_t p)
576 {
577 return(p->p_pid);
578 }
579
580 int
581 proc_ppid(proc_t p)
582 {
583 return(p->p_ppid);
584 }
585
586 int
587 proc_selfpid(void)
588 {
589 proc_t p = current_proc();
590 return(p->p_pid);
591 }
592
593 int
594 proc_selfppid(void)
595 {
596 proc_t p = current_proc();
597 return(p->p_ppid);
598 }
599
600 proc_t
601 proc_parent(proc_t p)
602 {
603 proc_t parent;
604 proc_t pp;
605
606 proc_list_lock();
607 loop:
608 pp = p->p_pptr;
609 parent = proc_refinternal_locked(pp);
610 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
611 pp->p_listflag |= P_LIST_CHILDLKWAIT;
612 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
613 goto loop;
614 }
615 proc_list_unlock();
616 return(parent);
617 }
618
619
620 void
621 proc_name(int pid, char * buf, int size)
622 {
623 proc_t p;
624
625 if ((p = proc_find(pid)) != PROC_NULL) {
626 strlcpy(buf, &p->p_comm[0], size);
627 proc_rele(p);
628 }
629 }
630
631 void
632 proc_name_kdp(task_t t, char * buf, int size)
633 {
634 proc_t p = get_bsdtask_info(t);
635
636 if (p != PROC_NULL)
637 strlcpy(buf, &p->p_comm[0], size);
638 }
639
640 char *
641 proc_name_address(void *p)
642 {
643 return &((proc_t)p)->p_comm[0];
644 }
645
646 void
647 proc_selfname(char * buf, int size)
648 {
649 proc_t p;
650
651 if ((p = current_proc())!= (proc_t)0) {
652 strlcpy(buf, &p->p_comm[0], size);
653 }
654 }
655
656 void
657 proc_signal(int pid, int signum)
658 {
659 proc_t p;
660
661 if ((p = proc_find(pid)) != PROC_NULL) {
662 psignal(p, signum);
663 proc_rele(p);
664 }
665 }
666
667 int
668 proc_issignal(int pid, sigset_t mask)
669 {
670 proc_t p;
671 int error=0;
672
673 if ((p = proc_find(pid)) != PROC_NULL) {
674 error = proc_pendingsignals(p, mask);
675 proc_rele(p);
676 }
677
678 return(error);
679 }
680
681 int
682 proc_noremotehang(proc_t p)
683 {
684 int retval = 0;
685
686 if (p)
687 retval = p->p_flag & P_NOREMOTEHANG;
688 return(retval? 1: 0);
689
690 }
691
692 int
693 proc_exiting(proc_t p)
694 {
695 int retval = 0;
696
697 if (p)
698 retval = p->p_lflag & P_LEXIT;
699 return(retval? 1: 0);
700 }
701
702 int
703 proc_forcequota(proc_t p)
704 {
705 int retval = 0;
706
707 if (p)
708 retval = p->p_flag & P_FORCEQUOTA;
709 return(retval? 1: 0);
710
711 }
712
713 int
714 proc_tbe(proc_t p)
715 {
716 int retval = 0;
717
718 if (p)
719 retval = p->p_flag & P_TBE;
720 return(retval? 1: 0);
721
722 }
723
724 int
725 proc_suser(proc_t p)
726 {
727 kauth_cred_t my_cred;
728 int error;
729
730 my_cred = kauth_cred_proc_ref(p);
731 error = suser(my_cred, &p->p_acflag);
732 kauth_cred_unref(&my_cred);
733 return(error);
734 }
735
736 /*
737 * Obtain the first thread in a process
738 *
739 * XXX This is a bad thing to do; it exists predominantly to support the
740 * XXX use of proc_t's in places that should really be using
741 * XXX thread_t's instead. This maintains historical behaviour, but really
742 * XXX needs an audit of the context (proxy vs. not) to clean up.
743 */
744 thread_t
745 proc_thread(proc_t proc)
746 {
747 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
748
749 if (uth != NULL)
750 return(uth->uu_context.vc_thread);
751
752 return(NULL);
753 }
754
755 kauth_cred_t
756 proc_ucred(proc_t p)
757 {
758 return(p->p_ucred);
759 }
760
761 struct uthread *
762 current_uthread()
763 {
764 thread_t th = current_thread();
765
766 return((struct uthread *)get_bsdthread_info(th));
767 }
768
769
770 int
771 proc_is64bit(proc_t p)
772 {
773 return(IS_64BIT_PROCESS(p));
774 }
775
776 int
777 proc_pidversion(proc_t p)
778 {
779 return(p->p_idversion);
780 }
781
782 int
783 proc_getcdhash(proc_t p, unsigned char *cdhash)
784 {
785 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
786 }
787
788 void
789 bsd_set_dependency_capable(task_t task)
790 {
791 proc_t p = get_bsdtask_info(task);
792
793 if (p) {
794 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
795 }
796 }
797
798
799 int
800 IS_64BIT_PROCESS(proc_t p)
801 {
802 if (p && (p->p_flag & P_LP64))
803 return(1);
804 else
805 return(0);
806 }
807
808 /*
809 * Locate a process by number
810 */
811 proc_t
812 pfind_locked(pid_t pid)
813 {
814 proc_t p;
815 #if DEBUG
816 proc_t q;
817 #endif
818
819 if (!pid)
820 return (kernproc);
821
822 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
823 if (p->p_pid == pid) {
824 #if DEBUG
825 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
826 if ((p !=q) && (q->p_pid == pid))
827 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
828 }
829 #endif
830 return (p);
831 }
832 }
833 return (NULL);
834 }
835
836 /*
837 * Locate a zombie by PID
838 */
839 __private_extern__ proc_t
840 pzfind(pid_t pid)
841 {
842 proc_t p;
843
844
845 proc_list_lock();
846
847 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
848 if (p->p_pid == pid)
849 break;
850
851 proc_list_unlock();
852
853 return (p);
854 }
855
856 /*
857 * Locate a process group by number
858 */
859
860 struct pgrp *
861 pgfind(pid_t pgid)
862 {
863 struct pgrp * pgrp;
864
865 proc_list_lock();
866 pgrp = pgfind_internal(pgid);
867 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
868 pgrp = PGRP_NULL;
869 else
870 pgrp->pg_refcount++;
871 proc_list_unlock();
872 return(pgrp);
873 }
874
875
876
877 struct pgrp *
878 pgfind_internal(pid_t pgid)
879 {
880 struct pgrp *pgrp;
881
882 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
883 if (pgrp->pg_id == pgid)
884 return (pgrp);
885 return (NULL);
886 }
887
888 void
889 pg_rele(struct pgrp * pgrp)
890 {
891 if(pgrp == PGRP_NULL)
892 return;
893 pg_rele_dropref(pgrp);
894 }
895
896 void
897 pg_rele_dropref(struct pgrp * pgrp)
898 {
899 proc_list_lock();
900 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
901 proc_list_unlock();
902 pgdelete_dropref(pgrp);
903 return;
904 }
905
906 pgrp->pg_refcount--;
907 proc_list_unlock();
908 }
909
910 struct session *
911 session_find_internal(pid_t sessid)
912 {
913 struct session *sess;
914
915 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
916 if (sess->s_sid == sessid)
917 return (sess);
918 return (NULL);
919 }
920
921
922 /*
923 * Make a new process ready to become a useful member of society by making it
924 * visible in all the right places and initialize its own lists to empty.
925 *
926 * Parameters: parent The parent of the process to insert
927 * child The child process to insert
928 *
929 * Returns: (void)
930 *
931 * Notes: Insert a child process into the parents process group, assign
932 * the child the parent process pointer and PPID of the parent,
933 * place it on the parents p_children list as a sibling,
934 * initialize its own child list, place it in the allproc list,
935 * insert it in the proper hash bucket, and initialize its
936 * event list.
937 */
938 void
939 pinsertchild(proc_t parent, proc_t child)
940 {
941 struct pgrp * pg;
942
943 LIST_INIT(&child->p_children);
944 TAILQ_INIT(&child->p_evlist);
945 child->p_pptr = parent;
946 child->p_ppid = parent->p_pid;
947
948 pg = proc_pgrp(parent);
949 pgrp_add(pg, parent, child);
950 pg_rele(pg);
951
952 proc_list_lock();
953 parent->p_childrencnt++;
954 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
955
956 LIST_INSERT_HEAD(&allproc, child, p_list);
957 /* mark the completion of proc creation */
958 child->p_listflag &= ~P_LIST_INCREATE;
959
960 proc_list_unlock();
961
962 }
963
964 /*
965 * Move p to a new or existing process group (and session)
966 *
967 * Returns: 0 Success
968 * ESRCH No such process
969 */
970 int
971 enterpgrp(proc_t p, pid_t pgid, int mksess)
972 {
973 struct pgrp *pgrp;
974 struct pgrp *mypgrp;
975 struct session * procsp;
976
977 pgrp = pgfind(pgid);
978 mypgrp = proc_pgrp(p);
979 procsp = proc_session(p);
980
981 #if DIAGNOSTIC
982 if (pgrp != NULL && mksess) /* firewalls */
983 panic("enterpgrp: setsid into non-empty pgrp");
984 if (SESS_LEADER(p, procsp))
985 panic("enterpgrp: session leader attempted setpgrp");
986 #endif
987 if (pgrp == PGRP_NULL) {
988 pid_t savepid = p->p_pid;
989 proc_t np = PROC_NULL;
990 /*
991 * new process group
992 */
993 #if DIAGNOSTIC
994 if (p->p_pid != pgid)
995 panic("enterpgrp: new pgrp and pid != pgid");
996 #endif
997 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
998 M_WAITOK);
999 if (pgrp == NULL)
1000 panic("enterpgrp: M_PGRP zone depleted");
1001 if ((np = proc_find(savepid)) == NULL || np != p) {
1002 if (np != PROC_NULL)
1003 proc_rele(np);
1004 if (mypgrp != PGRP_NULL)
1005 pg_rele(mypgrp);
1006 if (procsp != SESSION_NULL)
1007 session_rele(procsp);
1008 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1009 return (ESRCH);
1010 }
1011 proc_rele(np);
1012 if (mksess) {
1013 struct session *sess;
1014
1015 /*
1016 * new session
1017 */
1018 MALLOC_ZONE(sess, struct session *,
1019 sizeof(struct session), M_SESSION, M_WAITOK);
1020 if (sess == NULL)
1021 panic("enterpgrp: M_SESSION zone depleted");
1022 sess->s_leader = p;
1023 sess->s_sid = p->p_pid;
1024 sess->s_count = 1;
1025 sess->s_ttyvp = NULL;
1026 sess->s_ttyp = TTY_NULL;
1027 sess->s_flags = 0;
1028 sess->s_listflags = 0;
1029 sess->s_ttypgrpid = NO_PID;
1030 #ifdef CONFIG_EMBEDDED
1031 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1032 #else
1033 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1034 #endif
1035 bcopy(procsp->s_login, sess->s_login,
1036 sizeof(sess->s_login));
1037 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1038 proc_list_lock();
1039 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1040 proc_list_unlock();
1041 pgrp->pg_session = sess;
1042 #if DIAGNOSTIC
1043 if (p != current_proc())
1044 panic("enterpgrp: mksession and p != curproc");
1045 #endif
1046 } else {
1047 proc_list_lock();
1048 pgrp->pg_session = procsp;
1049
1050 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1051 panic("enterpgrp: providing ref to terminating session ");
1052 pgrp->pg_session->s_count++;
1053 proc_list_unlock();
1054 }
1055 pgrp->pg_id = pgid;
1056 #ifdef CONFIG_EMBEDDED
1057 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1058 #else
1059 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1060 #endif
1061 LIST_INIT(&pgrp->pg_members);
1062 pgrp->pg_membercnt = 0;
1063 pgrp->pg_jobc = 0;
1064 proc_list_lock();
1065 pgrp->pg_refcount = 1;
1066 pgrp->pg_listflags = 0;
1067 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1068 proc_list_unlock();
1069 } else if (pgrp == mypgrp) {
1070 pg_rele(pgrp);
1071 if (mypgrp != NULL)
1072 pg_rele(mypgrp);
1073 if (procsp != SESSION_NULL)
1074 session_rele(procsp);
1075 return (0);
1076 }
1077
1078 if (procsp != SESSION_NULL)
1079 session_rele(procsp);
1080 /*
1081 * Adjust eligibility of affected pgrps to participate in job control.
1082 * Increment eligibility counts before decrementing, otherwise we
1083 * could reach 0 spuriously during the first call.
1084 */
1085 fixjobc(p, pgrp, 1);
1086 fixjobc(p, mypgrp, 0);
1087
1088 if(mypgrp != PGRP_NULL)
1089 pg_rele(mypgrp);
1090 pgrp_replace(p, pgrp);
1091 pg_rele(pgrp);
1092
1093 return(0);
1094 }
1095
1096 /*
1097 * remove process from process group
1098 */
1099 int
1100 leavepgrp(proc_t p)
1101 {
1102
1103 pgrp_remove(p);
1104 return (0);
1105 }
1106
1107 /*
1108 * delete a process group
1109 */
1110 static void
1111 pgdelete_dropref(struct pgrp *pgrp)
1112 {
1113 struct tty *ttyp;
1114 int emptypgrp = 1;
1115 struct session *sessp;
1116
1117
1118 pgrp_lock(pgrp);
1119 if (pgrp->pg_membercnt != 0) {
1120 emptypgrp = 0;
1121 }
1122 pgrp_unlock(pgrp);
1123
1124 proc_list_lock();
1125 pgrp->pg_refcount--;
1126 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1127 proc_list_unlock();
1128 return;
1129 }
1130
1131 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1132
1133 if (pgrp->pg_refcount > 0) {
1134 proc_list_unlock();
1135 return;
1136 }
1137
1138 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1139 LIST_REMOVE(pgrp, pg_hash);
1140
1141 proc_list_unlock();
1142
1143 ttyp = SESSION_TP(pgrp->pg_session);
1144 if (ttyp != TTY_NULL) {
1145 if (ttyp->t_pgrp == pgrp) {
1146 tty_lock(ttyp);
1147 /* Re-check after acquiring the lock */
1148 if (ttyp->t_pgrp == pgrp) {
1149 ttyp->t_pgrp = NULL;
1150 pgrp->pg_session->s_ttypgrpid = NO_PID;
1151 }
1152 tty_unlock(ttyp);
1153 }
1154 }
1155
1156 proc_list_lock();
1157
1158 sessp = pgrp->pg_session;
1159 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1160 panic("pg_deleteref: manipulating refs of already terminating session");
1161 if (--sessp->s_count == 0) {
1162 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1163 panic("pg_deleteref: terminating already terminated session");
1164 sessp->s_listflags |= S_LIST_TERM;
1165 ttyp = SESSION_TP(sessp);
1166 LIST_REMOVE(sessp, s_hash);
1167 proc_list_unlock();
1168 if (ttyp != TTY_NULL) {
1169 tty_lock(ttyp);
1170 if (ttyp->t_session == sessp)
1171 ttyp->t_session = NULL;
1172 tty_unlock(ttyp);
1173 }
1174 proc_list_lock();
1175 sessp->s_listflags |= S_LIST_DEAD;
1176 if (sessp->s_count != 0)
1177 panic("pg_deleteref: freeing session in use");
1178 proc_list_unlock();
1179 #ifdef CONFIG_EMBEDDED
1180 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1181 #else
1182 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1183 #endif
1184 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1185 } else
1186 proc_list_unlock();
1187 #ifdef CONFIG_EMBEDDED
1188 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1189 #else
1190 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1191 #endif
1192 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1193 }
1194
1195
1196 /*
1197 * Adjust pgrp jobc counters when specified process changes process group.
1198 * We count the number of processes in each process group that "qualify"
1199 * the group for terminal job control (those with a parent in a different
1200 * process group of the same session). If that count reaches zero, the
1201 * process group becomes orphaned. Check both the specified process'
1202 * process group and that of its children.
1203 * entering == 0 => p is leaving specified group.
1204 * entering == 1 => p is entering specified group.
1205 */
1206 int
1207 fixjob_callback(proc_t p, void * arg)
1208 {
1209 struct fixjob_iterargs *fp;
1210 struct pgrp * pg, *hispg;
1211 struct session * mysession, *hissess;
1212 int entering;
1213
1214 fp = (struct fixjob_iterargs *)arg;
1215 pg = fp->pg;
1216 mysession = fp->mysession;
1217 entering = fp->entering;
1218
1219 hispg = proc_pgrp(p);
1220 hissess = proc_session(p);
1221
1222 if ((hispg != pg) &&
1223 (hissess == mysession)) {
1224 pgrp_lock(hispg);
1225 if (entering) {
1226 hispg->pg_jobc++;
1227 pgrp_unlock(hispg);
1228 } else if (--hispg->pg_jobc == 0) {
1229 pgrp_unlock(hispg);
1230 orphanpg(hispg);
1231 } else
1232 pgrp_unlock(hispg);
1233 }
1234 if (hissess != SESSION_NULL)
1235 session_rele(hissess);
1236 if (hispg != PGRP_NULL)
1237 pg_rele(hispg);
1238
1239 return(PROC_RETURNED);
1240 }
1241
1242 void
1243 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1244 {
1245 struct pgrp *hispgrp = PGRP_NULL;
1246 struct session *hissess = SESSION_NULL;
1247 struct session *mysession = pgrp->pg_session;
1248 proc_t parent;
1249 struct fixjob_iterargs fjarg;
1250
1251 parent = proc_parent(p);
1252 if (parent != PROC_NULL) {
1253 hispgrp = proc_pgrp(parent);
1254 hissess = proc_session(parent);
1255 proc_rele(parent);
1256 }
1257
1258
1259 /*
1260 * Check p's parent to see whether p qualifies its own process
1261 * group; if so, adjust count for p's process group.
1262 */
1263 if ((hispgrp != pgrp) &&
1264 (hissess == mysession)) {
1265 pgrp_lock(pgrp);
1266 if (entering) {
1267 pgrp->pg_jobc++;
1268 pgrp_unlock(pgrp);
1269 }else if (--pgrp->pg_jobc == 0) {
1270 pgrp_unlock(pgrp);
1271 orphanpg(pgrp);
1272 } else
1273 pgrp_unlock(pgrp);
1274 }
1275
1276 if (hissess != SESSION_NULL)
1277 session_rele(hissess);
1278 if (hispgrp != PGRP_NULL)
1279 pg_rele(hispgrp);
1280
1281 /*
1282 * Check this process' children to see whether they qualify
1283 * their process groups; if so, adjust counts for children's
1284 * process groups.
1285 */
1286 fjarg.pg = pgrp;
1287 fjarg.mysession = mysession;
1288 fjarg.entering = entering;
1289 proc_childrenwalk(p, fixjob_callback, &fjarg);
1290 }
1291
1292 /*
1293 * A process group has become orphaned;
1294 * if there are any stopped processes in the group,
1295 * hang-up all process in that group.
1296 */
1297 static void
1298 orphanpg(struct pgrp * pgrp)
1299 {
1300 proc_t p;
1301 pid_t * pid_list;
1302 int count, pidcount, i, alloc_count;
1303
1304 if (pgrp == PGRP_NULL)
1305 return;
1306 count = 0;
1307 pgrp_lock(pgrp);
1308 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
1309 if (p->p_stat == SSTOP) {
1310 for (p = pgrp->pg_members.lh_first; p != 0;
1311 p = p->p_pglist.le_next)
1312 count++;
1313 break; /* ??? stops after finding one.. */
1314 }
1315 }
1316 pgrp_unlock(pgrp);
1317
1318 count += 20;
1319 if (count > hard_maxproc)
1320 count = hard_maxproc;
1321 alloc_count = count * sizeof(pid_t);
1322 pid_list = (pid_t *)kalloc(alloc_count);
1323 bzero(pid_list, alloc_count);
1324
1325 pidcount = 0;
1326 pgrp_lock(pgrp);
1327 for (p = pgrp->pg_members.lh_first; p != 0;
1328 p = p->p_pglist.le_next) {
1329 if (p->p_stat == SSTOP) {
1330 for (p = pgrp->pg_members.lh_first; p != 0;
1331 p = p->p_pglist.le_next) {
1332 pid_list[pidcount] = p->p_pid;
1333 pidcount++;
1334 if (pidcount >= count)
1335 break;
1336 }
1337 break; /* ??? stops after finding one.. */
1338 }
1339 }
1340 pgrp_unlock(pgrp);
1341
1342 if (pidcount == 0)
1343 goto out;
1344
1345
1346 for (i = 0; i< pidcount; i++) {
1347 /* No handling or proc0 */
1348 if (pid_list[i] == 0)
1349 continue;
1350 p = proc_find(pid_list[i]);
1351 if (p) {
1352 proc_transwait(p, 0);
1353 pt_setrunnable(p);
1354 psignal(p, SIGHUP);
1355 psignal(p, SIGCONT);
1356 proc_rele(p);
1357 }
1358 }
1359 out:
1360 kfree(pid_list, alloc_count);
1361 return;
1362 }
1363
1364
1365
1366 /* XXX should be __private_extern__ */
1367 int
1368 proc_is_classic(proc_t p)
1369 {
1370 return (p->p_flag & P_TRANSLATED) ? 1 : 0;
1371 }
1372
1373 /* XXX Why does this function exist? Need to kill it off... */
1374 proc_t
1375 current_proc_EXTERNAL(void)
1376 {
1377 return (current_proc());
1378 }
1379
1380 /*
1381 * proc_core_name(name, uid, pid)
1382 * Expand the name described in corefilename, using name, uid, and pid.
1383 * corefilename is a printf-like string, with three format specifiers:
1384 * %N name of process ("name")
1385 * %P process id (pid)
1386 * %U user id (uid)
1387 * For example, "%N.core" is the default; they can be disabled completely
1388 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1389 * This is controlled by the sysctl variable kern.corefile (see above).
1390 */
1391 __private_extern__ int
1392 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1393 size_t cf_name_len)
1394 {
1395 const char *format, *appendstr;
1396 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1397 size_t i, l, n;
1398
1399 if (cf_name == NULL)
1400 goto toolong;
1401
1402 format = corefilename;
1403 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1404 switch (format[i]) {
1405 case '%': /* Format character */
1406 i++;
1407 switch (format[i]) {
1408 case '%':
1409 appendstr = "%";
1410 break;
1411 case 'N': /* process name */
1412 appendstr = name;
1413 break;
1414 case 'P': /* process id */
1415 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1416 appendstr = id_buf;
1417 break;
1418 case 'U': /* user id */
1419 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1420 appendstr = id_buf;
1421 break;
1422 default:
1423 appendstr = "";
1424 log(LOG_ERR,
1425 "Unknown format character %c in `%s'\n",
1426 format[i], format);
1427 }
1428 l = strlen(appendstr);
1429 if ((n + l) >= cf_name_len)
1430 goto toolong;
1431 bcopy(appendstr, cf_name + n, l);
1432 n += l;
1433 break;
1434 default:
1435 cf_name[n++] = format[i];
1436 }
1437 }
1438 if (format[i] != '\0')
1439 goto toolong;
1440 return (0);
1441 toolong:
1442 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1443 (long)pid, name, (uint32_t)uid);
1444 return (1);
1445 }
1446
1447 #if CONFIG_LCTX
1448
1449 static void
1450 lctxinit(void)
1451 {
1452 LIST_INIT(&alllctx);
1453 alllctx_cnt = 0;
1454
1455 /* allocate lctx lock group attribute and group */
1456 lctx_lck_grp_attr = lck_grp_attr_alloc_init();
1457 lck_grp_attr_setstat(lctx_lck_grp_attr);
1458
1459 lctx_lck_grp = lck_grp_alloc_init("lctx", lctx_lck_grp_attr);
1460 /* Allocate lctx lock attribute */
1461 lctx_lck_attr = lck_attr_alloc_init();
1462
1463 lck_mtx_init(&alllctx_lock, lctx_lck_grp, lctx_lck_attr);
1464 }
1465
1466 /*
1467 * Locate login context by number.
1468 */
1469 struct lctx *
1470 lcfind(pid_t lcid)
1471 {
1472 struct lctx *l;
1473
1474 ALLLCTX_LOCK;
1475 LIST_FOREACH(l, &alllctx, lc_list) {
1476 if (l->lc_id == lcid) {
1477 LCTX_LOCK(l);
1478 break;
1479 }
1480 }
1481 ALLLCTX_UNLOCK;
1482 return (l);
1483 }
1484
1485 #define LCID_INC \
1486 do { \
1487 lastlcid++; \
1488 if (lastlcid > maxlcid) \
1489 lastlcid = 1; \
1490 } while (0) \
1491
1492 struct lctx *
1493 lccreate(void)
1494 {
1495 struct lctx *l;
1496 pid_t newlcid;
1497
1498 /* Not very efficient but this isn't a common operation. */
1499 while ((l = lcfind(lastlcid)) != NULL) {
1500 LCTX_UNLOCK(l);
1501 LCID_INC;
1502 }
1503 newlcid = lastlcid;
1504 LCID_INC;
1505
1506 MALLOC(l, struct lctx *, sizeof(struct lctx), M_LCTX, M_WAITOK|M_ZERO);
1507 l->lc_id = newlcid;
1508 LIST_INIT(&l->lc_members);
1509 lck_mtx_init(&l->lc_mtx, lctx_lck_grp, lctx_lck_attr);
1510 #if CONFIG_MACF
1511 l->lc_label = mac_lctx_label_alloc();
1512 #endif
1513 ALLLCTX_LOCK;
1514 LIST_INSERT_HEAD(&alllctx, l, lc_list);
1515 alllctx_cnt++;
1516 ALLLCTX_UNLOCK;
1517
1518 return (l);
1519 }
1520
1521 /*
1522 * Call with proc protected (either by being invisible
1523 * or by having the all-login-context lock held) and
1524 * the lctx locked.
1525 *
1526 * Will unlock lctx on return.
1527 */
1528 void
1529 enterlctx (proc_t p, struct lctx *l, __unused int create)
1530 {
1531 if (l == NULL)
1532 return;
1533
1534 p->p_lctx = l;
1535 LIST_INSERT_HEAD(&l->lc_members, p, p_lclist);
1536 l->lc_mc++;
1537
1538 #if CONFIG_MACF
1539 if (create)
1540 mac_lctx_notify_create(p, l);
1541 else
1542 mac_lctx_notify_join(p, l);
1543 #endif
1544 LCTX_UNLOCK(l);
1545
1546 return;
1547 }
1548
1549 /*
1550 * Remove process from login context (if any). Called with p protected by
1551 * the alllctx lock.
1552 */
1553 void
1554 leavelctx (proc_t p)
1555 {
1556 struct lctx *l;
1557
1558 if (p->p_lctx == NULL) {
1559 return;
1560 }
1561
1562 LCTX_LOCK(p->p_lctx);
1563 l = p->p_lctx;
1564 p->p_lctx = NULL;
1565 LIST_REMOVE(p, p_lclist);
1566 l->lc_mc--;
1567 #if CONFIG_MACF
1568 mac_lctx_notify_leave(p, l);
1569 #endif
1570 if (LIST_EMPTY(&l->lc_members)) {
1571 LIST_REMOVE(l, lc_list);
1572 alllctx_cnt--;
1573 LCTX_UNLOCK(l);
1574 lck_mtx_destroy(&l->lc_mtx, lctx_lck_grp);
1575 #if CONFIG_MACF
1576 mac_lctx_label_free(l->lc_label);
1577 l->lc_label = NULL;
1578 #endif
1579 FREE(l, M_LCTX);
1580 } else {
1581 LCTX_UNLOCK(l);
1582 }
1583 return;
1584 }
1585
1586 static int
1587 sysctl_kern_lctx SYSCTL_HANDLER_ARGS
1588 {
1589 int *name = (int*) arg1;
1590 u_int namelen = arg2;
1591 struct kinfo_lctx kil;
1592 struct lctx *l;
1593 int error;
1594
1595 error = 0;
1596
1597 switch (oidp->oid_number) {
1598 case KERN_LCTX_ALL:
1599 ALLLCTX_LOCK;
1600 /* Request for size. */
1601 if (!req->oldptr) {
1602 error = SYSCTL_OUT(req, 0,
1603 sizeof(struct kinfo_lctx) * (alllctx_cnt + 1));
1604 goto out;
1605 }
1606 break;
1607
1608 case KERN_LCTX_LCID:
1609 /* No space */
1610 if (req->oldlen < sizeof(struct kinfo_lctx))
1611 return (ENOMEM);
1612 /* No argument */
1613 if (namelen != 1)
1614 return (EINVAL);
1615 /* No login context */
1616 l = lcfind((pid_t)name[0]);
1617 if (l == NULL)
1618 return (ENOENT);
1619 kil.id = l->lc_id;
1620 kil.mc = l->lc_mc;
1621 LCTX_UNLOCK(l);
1622 return (SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil)));
1623
1624 default:
1625 return (EINVAL);
1626 }
1627
1628 /* Provided buffer is too small. */
1629 if (req->oldlen < (sizeof(struct kinfo_lctx) * alllctx_cnt)) {
1630 error = ENOMEM;
1631 goto out;
1632 }
1633
1634 LIST_FOREACH(l, &alllctx, lc_list) {
1635 LCTX_LOCK(l);
1636 kil.id = l->lc_id;
1637 kil.mc = l->lc_mc;
1638 LCTX_UNLOCK(l);
1639 error = SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil));
1640 if (error)
1641 break;
1642 }
1643 out:
1644 ALLLCTX_UNLOCK;
1645
1646 return (error);
1647 }
1648
1649 SYSCTL_NODE(_kern, KERN_LCTX, lctx, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Login Context");
1650
1651 SYSCTL_PROC(_kern_lctx, KERN_LCTX_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1652 0, 0, sysctl_kern_lctx, "S,lctx",
1653 "Return entire login context table");
1654 SYSCTL_NODE(_kern_lctx, KERN_LCTX_LCID, lcid, CTLFLAG_RD,
1655 sysctl_kern_lctx, "Login Context Table");
1656 SYSCTL_INT(_kern_lctx, OID_AUTO, last, CTLFLAG_RD, &lastlcid, 0, "");
1657 SYSCTL_INT(_kern_lctx, OID_AUTO, count, CTLFLAG_RD, &alllctx_cnt, 0, "");
1658 SYSCTL_INT(_kern_lctx, OID_AUTO, max, CTLFLAG_RW, &maxlcid, 0, "");
1659
1660 #endif /* LCTX */
1661
1662 /* Code Signing related routines */
1663
1664 int
1665 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1666 {
1667 int ops = uap->ops;
1668 pid_t pid = uap->pid;
1669 user_addr_t uaddr = uap->useraddr;
1670 size_t usize = (size_t)CAST_DOWN(size_t, uap->usersize);
1671 proc_t pt;
1672 uint32_t retflags;
1673 int vid, forself;
1674 int error;
1675 vnode_t tvp;
1676 off_t toff;
1677 char * buf;
1678 unsigned char cdhash[SHA1_RESULTLEN];
1679
1680 forself = error = 0;
1681
1682 if (pid == 0)
1683 pid = proc_selfpid();
1684 if (pid == proc_selfpid())
1685 forself = 1;
1686
1687
1688 /* Pre flight checks for CS_OPS_PIDPATH */
1689 if (ops == CS_OPS_PIDPATH) {
1690 /* usize is unsigned.. */
1691 if (usize > 4 * PATH_MAX)
1692 return(EOVERFLOW);
1693 if (kauth_cred_issuser(kauth_cred_get()) != TRUE)
1694 return(EPERM);
1695 } else if ((forself == 0) && ((ops != CS_OPS_STATUS) && (ops != CS_OPS_CDHASH) && (ops != CS_OPS_PIDOFFSET) && (kauth_cred_issuser(kauth_cred_get()) != TRUE))) {
1696 return(EPERM);
1697 }
1698
1699 pt = proc_find(pid);
1700 if (pt == PROC_NULL)
1701 return(ESRCH);
1702
1703
1704
1705 switch (ops) {
1706
1707 case CS_OPS_STATUS:
1708 retflags = pt->p_csflags;
1709 if (uaddr != USER_ADDR_NULL)
1710 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1711 break;
1712
1713 case CS_OPS_MARKINVALID:
1714 proc_lock(pt);
1715 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1716 pt->p_csflags &= ~CS_VALID; /* set invalid */
1717 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1718 proc_unlock(pt);
1719 psignal(pt, SIGKILL);
1720 } else
1721 proc_unlock(pt);
1722 } else
1723 proc_unlock(pt);
1724
1725 break;
1726
1727 case CS_OPS_MARKHARD:
1728 proc_lock(pt);
1729 pt->p_csflags |= CS_HARD;
1730 if ((pt->p_csflags & CS_VALID) == 0) {
1731 /* @@@ allow? reject? kill? @@@ */
1732 proc_unlock(pt);
1733 error = EINVAL;
1734 goto out;
1735 } else
1736 proc_unlock(pt);
1737 break;
1738
1739 case CS_OPS_MARKKILL:
1740 proc_lock(pt);
1741 pt->p_csflags |= CS_KILL;
1742 if ((pt->p_csflags & CS_VALID) == 0) {
1743 proc_unlock(pt);
1744 psignal(pt, SIGKILL);
1745 } else
1746 proc_unlock(pt);
1747 break;
1748
1749 case CS_OPS_PIDPATH:
1750 tvp = pt->p_textvp;
1751 vid = vnode_vid(tvp);
1752
1753 if (tvp == NULLVP) {
1754 proc_rele(pt);
1755 return(EINVAL);
1756 }
1757
1758 buf = (char *)kalloc(usize);
1759 if (buf == NULL) {
1760 proc_rele(pt);
1761 return(ENOMEM);
1762 }
1763 bzero(buf, usize);
1764
1765 error = vnode_getwithvid(tvp, vid);
1766 if (error == 0) {
1767 int len;
1768 len = usize;
1769 error = vn_getpath(tvp, buf, &len);
1770 vnode_put(tvp);
1771 if (error == 0) {
1772 error = copyout(buf, uaddr, usize);
1773 }
1774 kfree(buf, usize);
1775 }
1776
1777 proc_rele(pt);
1778
1779 return(error);
1780
1781 case CS_OPS_PIDOFFSET:
1782 toff = pt->p_textoff;
1783 proc_rele(pt);
1784 error = copyout(&toff, uaddr, sizeof(toff));
1785 return(error);
1786
1787 case CS_OPS_CDHASH:
1788
1789 /* pt already holds a reference on its p_textvp */
1790 tvp = pt->p_textvp;
1791 toff = pt->p_textoff;
1792
1793 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
1794 proc_rele(pt);
1795 return EINVAL;
1796 }
1797
1798 error = vn_getcdhash(tvp, toff, cdhash);
1799 proc_rele(pt);
1800
1801 if (error == 0) {
1802 error = copyout(cdhash, uaddr, sizeof (cdhash));
1803 }
1804
1805 return error;
1806
1807 default:
1808 error = EINVAL;
1809 break;
1810 }
1811 out:
1812 proc_rele(pt);
1813 return(error);
1814 }
1815
1816
1817 int
1818 proc_iterate(flags, callout, arg, filterfn, filterarg)
1819 int flags;
1820 int (*callout)(proc_t, void *);
1821 void * arg;
1822 int (*filterfn)(proc_t, void *);
1823 void * filterarg;
1824 {
1825 proc_t p;
1826 pid_t * pid_list;
1827 int count, pidcount, alloc_count, i, retval;
1828
1829 count = nprocs+ 10;
1830 if (count > hard_maxproc)
1831 count = hard_maxproc;
1832 alloc_count = count * sizeof(pid_t);
1833 pid_list = (pid_t *)kalloc(alloc_count);
1834 bzero(pid_list, alloc_count);
1835
1836
1837 proc_list_lock();
1838
1839
1840 pidcount = 0;
1841 if (flags & PROC_ALLPROCLIST) {
1842 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
1843 if (p->p_stat == SIDL)
1844 continue;
1845 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
1846 pid_list[pidcount] = p->p_pid;
1847 pidcount++;
1848 if (pidcount >= count)
1849 break;
1850 }
1851 }
1852 }
1853 if ((pidcount < count ) && (flags & PROC_ZOMBPROCLIST)) {
1854 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
1855 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
1856 pid_list[pidcount] = p->p_pid;
1857 pidcount++;
1858 if (pidcount >= count)
1859 break;
1860 }
1861 }
1862 }
1863
1864
1865 proc_list_unlock();
1866
1867
1868 for (i = 0; i< pidcount; i++) {
1869 p = proc_find(pid_list[i]);
1870 if (p) {
1871 if ((flags & PROC_NOWAITTRANS) == 0)
1872 proc_transwait(p, 0);
1873 retval = callout(p, arg);
1874
1875 switch (retval) {
1876 case PROC_RETURNED:
1877 case PROC_RETURNED_DONE:
1878 proc_rele(p);
1879 if (retval == PROC_RETURNED_DONE) {
1880 goto out;
1881 }
1882 break;
1883
1884 case PROC_CLAIMED_DONE:
1885 goto out;
1886 case PROC_CLAIMED:
1887 default:
1888 break;
1889 }
1890 } else if (flags & PROC_ZOMBPROCLIST) {
1891 p = proc_find_zombref(pid_list[i]);
1892 if (p != PROC_NULL) {
1893 retval = callout(p, arg);
1894
1895 switch (retval) {
1896 case PROC_RETURNED:
1897 case PROC_RETURNED_DONE:
1898 proc_drop_zombref(p);
1899 if (retval == PROC_RETURNED_DONE) {
1900 goto out;
1901 }
1902 break;
1903
1904 case PROC_CLAIMED_DONE:
1905 goto out;
1906 case PROC_CLAIMED:
1907 default:
1908 break;
1909 }
1910 }
1911 }
1912 }
1913
1914 out:
1915 kfree(pid_list, alloc_count);
1916 return(0);
1917
1918 }
1919
1920
1921 #if 0
1922 /* This is for iteration in case of trivial non blocking callouts */
1923 int
1924 proc_scanall(flags, callout, arg)
1925 int flags;
1926 int (*callout)(proc_t, void *);
1927 void * arg;
1928 {
1929 proc_t p;
1930 int retval;
1931
1932
1933 proc_list_lock();
1934
1935
1936 if (flags & PROC_ALLPROCLIST) {
1937 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
1938 retval = callout(p, arg);
1939 if (retval == PROC_RETURNED_DONE)
1940 goto out;
1941 }
1942 }
1943 if (flags & PROC_ZOMBPROCLIST) {
1944 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
1945 retval = callout(p, arg);
1946 if (retval == PROC_RETURNED_DONE)
1947 goto out;
1948 }
1949 }
1950 out:
1951
1952 proc_list_unlock();
1953
1954 return(0);
1955 }
1956 #endif
1957
1958
1959 int
1960 proc_rebootscan(callout, arg, filterfn, filterarg)
1961 int (*callout)(proc_t, void *);
1962 void * arg;
1963 int (*filterfn)(proc_t, void *);
1964 void * filterarg;
1965 {
1966 proc_t p;
1967 int lockheld = 0, retval;
1968
1969 proc_shutdown_exitcount = 0;
1970
1971 ps_allprocscan:
1972
1973 proc_list_lock();
1974
1975 lockheld = 1;
1976
1977 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
1978 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
1979 p = proc_refinternal_locked(p);
1980
1981 proc_list_unlock();
1982 lockheld = 0;
1983
1984 if (p) {
1985 proc_transwait(p, 0);
1986 retval = callout(p, arg);
1987 proc_rele(p);
1988
1989 switch (retval) {
1990 case PROC_RETURNED_DONE:
1991 case PROC_CLAIMED_DONE:
1992 goto out;
1993 }
1994 }
1995 goto ps_allprocscan;
1996 } /* filter pass */
1997 } /* allproc walk thru */
1998
1999 if (lockheld == 1) {
2000 proc_list_unlock();
2001 lockheld = 0;
2002 }
2003
2004 out:
2005 return(0);
2006
2007 }
2008
2009
2010 int
2011 proc_childrenwalk(parent, callout, arg)
2012 struct proc * parent;
2013 int (*callout)(proc_t, void *);
2014 void * arg;
2015 {
2016 register struct proc *p;
2017 pid_t * pid_list;
2018 int count, pidcount, alloc_count, i, retval;
2019
2020 count = nprocs+ 10;
2021 if (count > hard_maxproc)
2022 count = hard_maxproc;
2023 alloc_count = count * sizeof(pid_t);
2024 pid_list = (pid_t *)kalloc(alloc_count);
2025 bzero(pid_list, alloc_count);
2026
2027
2028 proc_list_lock();
2029
2030
2031 pidcount = 0;
2032 for (p = parent->p_children.lh_first; (p != 0); p = p->p_sibling.le_next) {
2033 if (p->p_stat == SIDL)
2034 continue;
2035 pid_list[pidcount] = p->p_pid;
2036 pidcount++;
2037 if (pidcount >= count)
2038 break;
2039 }
2040 proc_list_unlock();
2041
2042
2043 for (i = 0; i< pidcount; i++) {
2044 p = proc_find(pid_list[i]);
2045 if (p) {
2046 proc_transwait(p, 0);
2047 retval = callout(p, arg);
2048
2049 switch (retval) {
2050 case PROC_RETURNED:
2051 case PROC_RETURNED_DONE:
2052 proc_rele(p);
2053 if (retval == PROC_RETURNED_DONE) {
2054 goto out;
2055 }
2056 break;
2057
2058 case PROC_CLAIMED_DONE:
2059 goto out;
2060 case PROC_CLAIMED:
2061 default:
2062 break;
2063 }
2064 }
2065 }
2066
2067 out:
2068 kfree(pid_list, alloc_count);
2069 return(0);
2070
2071 }
2072
2073 /*
2074 */
2075 /* PGRP_BLOCKITERATE is not implemented yet */
2076 int
2077 pgrp_iterate(pgrp, flags, callout, arg, filterfn, filterarg)
2078 struct pgrp *pgrp;
2079 int flags;
2080 int (*callout)(proc_t, void *);
2081 void * arg;
2082 int (*filterfn)(proc_t, void *);
2083 void * filterarg;
2084 {
2085 proc_t p;
2086 pid_t * pid_list;
2087 int count, pidcount, i, alloc_count;
2088 int retval;
2089 pid_t pgid;
2090 int dropref = flags & PGRP_DROPREF;
2091 #if 0
2092 int serialize = flags & PGRP_BLOCKITERATE;
2093 #else
2094 int serialize = 0;
2095 #endif
2096
2097 if (pgrp == 0)
2098 return(0);
2099 count = pgrp->pg_membercnt + 10;
2100 if (count > hard_maxproc)
2101 count = hard_maxproc;
2102 alloc_count = count * sizeof(pid_t);
2103 pid_list = (pid_t *)kalloc(alloc_count);
2104 bzero(pid_list, alloc_count);
2105
2106 pgrp_lock(pgrp);
2107 if (serialize != 0) {
2108 while ((pgrp->pg_listflags & PGRP_FLAG_ITERABEGIN) == PGRP_FLAG_ITERABEGIN) {
2109 pgrp->pg_listflags |= PGRP_FLAG_ITERWAIT;
2110 msleep(&pgrp->pg_listflags, &pgrp->pg_mlock, 0, "pgrp_iterate", 0);
2111 }
2112 pgrp->pg_listflags |= PGRP_FLAG_ITERABEGIN;
2113 }
2114
2115 pgid = pgrp->pg_id;
2116
2117 pidcount = 0;
2118 for (p = pgrp->pg_members.lh_first; p != 0;
2119 p = p->p_pglist.le_next) {
2120 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2121 pid_list[pidcount] = p->p_pid;
2122 pidcount++;
2123 if (pidcount >= count)
2124 break;
2125 }
2126 }
2127
2128
2129 pgrp_unlock(pgrp);
2130 if ((serialize == 0) && (dropref != 0))
2131 pg_rele(pgrp);
2132
2133
2134 for (i = 0; i< pidcount; i++) {
2135 /* No handling or proc0 */
2136 if (pid_list[i] == 0)
2137 continue;
2138 p = proc_find(pid_list[i]);
2139 if (p) {
2140 if (p->p_pgrpid != pgid) {
2141 proc_rele(p);
2142 continue;
2143 }
2144 proc_transwait(p, 0);
2145 retval = callout(p, arg);
2146
2147 switch (retval) {
2148 case PROC_RETURNED:
2149 case PROC_RETURNED_DONE:
2150 proc_rele(p);
2151 if (retval == PROC_RETURNED_DONE) {
2152 goto out;
2153 }
2154 break;
2155
2156 case PROC_CLAIMED_DONE:
2157 goto out;
2158 case PROC_CLAIMED:
2159 default:
2160 break;
2161 }
2162 }
2163 }
2164 out:
2165 if (serialize != 0) {
2166 pgrp_lock(pgrp);
2167 pgrp->pg_listflags &= ~PGRP_FLAG_ITERABEGIN;
2168 if ((pgrp->pg_listflags & PGRP_FLAG_ITERWAIT) == PGRP_FLAG_ITERWAIT) {
2169 pgrp->pg_listflags &= ~PGRP_FLAG_ITERWAIT;
2170 wakeup(&pgrp->pg_listflags);
2171 }
2172 pgrp_unlock(pgrp);
2173 if (dropref != 0)
2174 pg_rele(pgrp);
2175 }
2176 kfree(pid_list, alloc_count);
2177 return(0);
2178 }
2179
2180 static void
2181 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2182 {
2183 proc_list_lock();
2184 child->p_pgrp = pgrp;
2185 child->p_pgrpid = pgrp->pg_id;
2186 child->p_listflag |= P_LIST_INPGRP;
2187 /*
2188 * When pgrp is being freed , a process can still
2189 * request addition using setpgid from bash when
2190 * login is terminated (login cycler) return ESRCH
2191 * Safe to hold lock due to refcount on pgrp
2192 */
2193 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2194 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2195 }
2196
2197 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2198 panic("pgrp_add : pgrp is dead adding process");
2199 proc_list_unlock();
2200
2201 pgrp_lock(pgrp);
2202 pgrp->pg_membercnt++;
2203 if ( parent != PROC_NULL) {
2204 LIST_INSERT_AFTER(parent, child, p_pglist);
2205 }else {
2206 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2207 }
2208 pgrp_unlock(pgrp);
2209
2210 proc_list_lock();
2211 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2212 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2213 }
2214 proc_list_unlock();
2215 }
2216
2217 static void
2218 pgrp_remove(struct proc * p)
2219 {
2220 struct pgrp * pg;
2221
2222 pg = proc_pgrp(p);
2223
2224 proc_list_lock();
2225 #if __PROC_INTERNAL_DEBUG
2226 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2227 panic("removing from pglist but no named ref\n");
2228 #endif
2229 p->p_pgrpid = PGRPID_DEAD;
2230 p->p_listflag &= ~P_LIST_INPGRP;
2231 p->p_pgrp = NULL;
2232 proc_list_unlock();
2233
2234 if (pg == PGRP_NULL)
2235 panic("pgrp_remove: pg is NULL");
2236 pgrp_lock(pg);
2237 pg->pg_membercnt--;
2238
2239 if (pg->pg_membercnt < 0)
2240 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2241
2242 LIST_REMOVE(p, p_pglist);
2243 if (pg->pg_members.lh_first == 0) {
2244 pgrp_unlock(pg);
2245 pgdelete_dropref(pg);
2246 } else {
2247 pgrp_unlock(pg);
2248 pg_rele(pg);
2249 }
2250 }
2251
2252
2253 /* cannot use proc_pgrp as it maybe stalled */
2254 static void
2255 pgrp_replace(struct proc * p, struct pgrp * newpg)
2256 {
2257 struct pgrp * oldpg;
2258
2259
2260
2261 proc_list_lock();
2262
2263 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2264 p->p_listflag |= P_LIST_PGRPTRWAIT;
2265 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2266 }
2267
2268 p->p_listflag |= P_LIST_PGRPTRANS;
2269
2270 oldpg = p->p_pgrp;
2271 if (oldpg == PGRP_NULL)
2272 panic("pgrp_replace: oldpg NULL");
2273 oldpg->pg_refcount++;
2274 #if __PROC_INTERNAL_DEBUG
2275 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2276 panic("removing from pglist but no named ref\n");
2277 #endif
2278 p->p_pgrpid = PGRPID_DEAD;
2279 p->p_listflag &= ~P_LIST_INPGRP;
2280 p->p_pgrp = NULL;
2281
2282 proc_list_unlock();
2283
2284 pgrp_lock(oldpg);
2285 oldpg->pg_membercnt--;
2286 if (oldpg->pg_membercnt < 0)
2287 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2288 LIST_REMOVE(p, p_pglist);
2289 if (oldpg->pg_members.lh_first == 0) {
2290 pgrp_unlock(oldpg);
2291 pgdelete_dropref(oldpg);
2292 } else {
2293 pgrp_unlock(oldpg);
2294 pg_rele(oldpg);
2295 }
2296
2297 proc_list_lock();
2298 p->p_pgrp = newpg;
2299 p->p_pgrpid = newpg->pg_id;
2300 p->p_listflag |= P_LIST_INPGRP;
2301 /*
2302 * When pgrp is being freed , a process can still
2303 * request addition using setpgid from bash when
2304 * login is terminated (login cycler) return ESRCH
2305 * Safe to hold lock due to refcount on pgrp
2306 */
2307 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2308 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2309 }
2310
2311 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2312 panic("pgrp_add : pgrp is dead adding process");
2313 proc_list_unlock();
2314
2315 pgrp_lock(newpg);
2316 newpg->pg_membercnt++;
2317 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2318 pgrp_unlock(newpg);
2319
2320 proc_list_lock();
2321 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2322 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2323 }
2324
2325 p->p_listflag &= ~P_LIST_PGRPTRANS;
2326 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2327 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2328 wakeup(&p->p_pgrpid);
2329
2330 }
2331 proc_list_unlock();
2332 }
2333
2334 void
2335 pgrp_lock(struct pgrp * pgrp)
2336 {
2337 lck_mtx_lock(&pgrp->pg_mlock);
2338 }
2339
2340 void
2341 pgrp_unlock(struct pgrp * pgrp)
2342 {
2343 lck_mtx_unlock(&pgrp->pg_mlock);
2344 }
2345
2346 void
2347 session_lock(struct session * sess)
2348 {
2349 lck_mtx_lock(&sess->s_mlock);
2350 }
2351
2352
2353 void
2354 session_unlock(struct session * sess)
2355 {
2356 lck_mtx_unlock(&sess->s_mlock);
2357 }
2358
2359 struct pgrp *
2360 proc_pgrp(proc_t p)
2361 {
2362 struct pgrp * pgrp;
2363
2364 if (p == PROC_NULL)
2365 return(PGRP_NULL);
2366 proc_list_lock();
2367
2368 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2369 p->p_listflag |= P_LIST_PGRPTRWAIT;
2370 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2371 }
2372
2373 pgrp = p->p_pgrp;
2374
2375 assert(pgrp != NULL);
2376
2377 if (pgrp != PGRP_NULL) {
2378 pgrp->pg_refcount++;
2379 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2380 panic("proc_pgrp: ref being povided for dead pgrp");
2381 }
2382
2383 proc_list_unlock();
2384
2385 return(pgrp);
2386 }
2387
2388 struct pgrp *
2389 tty_pgrp(struct tty * tp)
2390 {
2391 struct pgrp * pg = PGRP_NULL;
2392
2393 proc_list_lock();
2394 pg = tp->t_pgrp;
2395
2396 if (pg != PGRP_NULL) {
2397 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2398 panic("tty_pgrp: ref being povided for dead pgrp");
2399 pg->pg_refcount++;
2400 }
2401 proc_list_unlock();
2402
2403 return(pg);
2404 }
2405
2406 struct session *
2407 proc_session(proc_t p)
2408 {
2409 struct session * sess = SESSION_NULL;
2410
2411 if (p == PROC_NULL)
2412 return(SESSION_NULL);
2413
2414 proc_list_lock();
2415
2416 /* wait during transitions */
2417 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2418 p->p_listflag |= P_LIST_PGRPTRWAIT;
2419 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2420 }
2421
2422 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2423 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2424 panic("proc_session:returning sesssion ref on terminating session");
2425 sess->s_count++;
2426 }
2427 proc_list_unlock();
2428 return(sess);
2429 }
2430
2431 void
2432 session_rele(struct session *sess)
2433 {
2434 proc_list_lock();
2435 if (--sess->s_count == 0) {
2436 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2437 panic("session_rele: terminating already terminated session");
2438 sess->s_listflags |= S_LIST_TERM;
2439 LIST_REMOVE(sess, s_hash);
2440 sess->s_listflags |= S_LIST_DEAD;
2441 if (sess->s_count != 0)
2442 panic("session_rele: freeing session in use");
2443 proc_list_unlock();
2444 #ifdef CONFIG_EMBEDDED
2445 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2446 #else
2447 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2448 #endif
2449 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2450 } else
2451 proc_list_unlock();
2452 }
2453
2454 int
2455 proc_transstart(proc_t p, int locked)
2456 {
2457 if (locked == 0)
2458 proc_lock(p);
2459 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2460 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) {
2461 if (locked == 0)
2462 proc_unlock(p);
2463 return EDEADLK;
2464 }
2465 p->p_lflag |= P_LTRANSWAIT;
2466 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2467 }
2468 p->p_lflag |= P_LINTRANSIT;
2469 p->p_transholder = current_thread();
2470 if (locked == 0)
2471 proc_unlock(p);
2472 return 0;
2473 }
2474
2475 void
2476 proc_transcommit(proc_t p, int locked)
2477 {
2478 if (locked == 0)
2479 proc_lock(p);
2480
2481 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2482 assert (p->p_transholder == current_thread());
2483 p->p_lflag |= P_LTRANSCOMMIT;
2484
2485 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2486 p->p_lflag &= ~P_LTRANSWAIT;
2487 wakeup(&p->p_lflag);
2488 }
2489 if (locked == 0)
2490 proc_unlock(p);
2491 }
2492
2493 void
2494 proc_transend(proc_t p, int locked)
2495 {
2496 if (locked == 0)
2497 proc_lock(p);
2498
2499 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2500 p->p_transholder = NULL;
2501
2502 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2503 p->p_lflag &= ~P_LTRANSWAIT;
2504 wakeup(&p->p_lflag);
2505 }
2506 if (locked == 0)
2507 proc_unlock(p);
2508 }
2509
2510 int
2511 proc_transwait(proc_t p, int locked)
2512 {
2513 if (locked == 0)
2514 proc_lock(p);
2515 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2516 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2517 if (locked == 0)
2518 proc_unlock(p);
2519 return EDEADLK;
2520 }
2521 p->p_lflag |= P_LTRANSWAIT;
2522 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2523 }
2524 if (locked == 0)
2525 proc_unlock(p);
2526 return 0;
2527 }
2528
2529 void
2530 proc_klist_lock(void)
2531 {
2532 lck_mtx_lock(proc_klist_mlock);
2533 }
2534
2535 void
2536 proc_klist_unlock(void)
2537 {
2538 lck_mtx_unlock(proc_klist_mlock);
2539 }
2540
2541 void
2542 proc_knote(struct proc * p, long hint)
2543 {
2544 proc_klist_lock();
2545 KNOTE(&p->p_klist, hint);
2546 proc_klist_unlock();
2547 }
2548
2549 void
2550 proc_knote_drain(struct proc *p)
2551 {
2552 struct knote *kn = NULL;
2553
2554 /*
2555 * Clear the proc's klist to avoid references after the proc is reaped.
2556 */
2557 proc_klist_lock();
2558 while ((kn = SLIST_FIRST(&p->p_klist))) {
2559 kn->kn_ptr.p_proc = PROC_NULL;
2560 KNOTE_DETACH(&p->p_klist, kn);
2561 }
2562 proc_klist_unlock();
2563 }
2564
2565 unsigned long cs_procs_killed = 0;
2566 unsigned long cs_procs_invalidated = 0;
2567 int cs_force_kill = 0;
2568 int cs_force_hard = 0;
2569 int cs_debug = 0;
2570 SYSCTL_INT(_vm, OID_AUTO, cs_force_kill, CTLFLAG_RW, &cs_force_kill, 0, "");
2571 SYSCTL_INT(_vm, OID_AUTO, cs_force_hard, CTLFLAG_RW, &cs_force_hard, 0, "");
2572 SYSCTL_INT(_vm, OID_AUTO, cs_debug, CTLFLAG_RW, &cs_debug, 0, "");
2573
2574 int
2575 cs_allow_invalid(struct proc *p)
2576 {
2577 #if MACH_ASSERT
2578 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED);
2579 #endif
2580 #if CONFIG_MACF && CONFIG_ENFORCE_SIGNED_CODE
2581 /* There needs to be a MAC policy to implement this hook, or else the
2582 * kill bits will be cleared here every time. If we have
2583 * CONFIG_ENFORCE_SIGNED_CODE, we can assume there is a policy
2584 * implementing the hook.
2585 */
2586 if( 0 != mac_proc_check_run_cs_invalid(p)) {
2587 if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() "
2588 "not allowed: pid %d\n",
2589 p->p_pid);
2590 return 0;
2591 }
2592 if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() "
2593 "allowed: pid %d\n",
2594 p->p_pid);
2595 proc_lock(p);
2596 p->p_csflags &= ~(CS_KILL | CS_HARD | CS_VALID);
2597 proc_unlock(p);
2598 vm_map_switch_protect(get_task_map(p->task), FALSE);
2599 #endif
2600 return (p->p_csflags & (CS_KILL | CS_HARD)) == 0;
2601 }
2602
2603 int
2604 cs_invalid_page(
2605 addr64_t vaddr)
2606 {
2607 struct proc *p;
2608 int retval;
2609
2610 p = current_proc();
2611
2612 /*
2613 * XXX revisit locking when proc is no longer protected
2614 * by the kernel funnel...
2615 */
2616
2617 /* XXX for testing */
2618 proc_lock(p);
2619 if (cs_force_kill)
2620 p->p_csflags |= CS_KILL;
2621 if (cs_force_hard)
2622 p->p_csflags |= CS_HARD;
2623
2624 /* CS_KILL triggers us to send a kill signal. Nothing else. */
2625 if (p->p_csflags & CS_KILL) {
2626 proc_unlock(p);
2627 if (cs_debug) {
2628 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2629 "p=%d[%s] honoring CS_KILL\n",
2630 vaddr, p->p_pid, p->p_comm);
2631 }
2632 cs_procs_killed++;
2633 psignal(p, SIGKILL);
2634 proc_lock(p);
2635 }
2636
2637 /* CS_HARD means fail the mapping operation so the process stays valid. */
2638 if (p->p_csflags & CS_HARD) {
2639 proc_unlock(p);
2640 if (cs_debug) {
2641 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2642 "p=%d[%s] honoring CS_HARD\n",
2643 vaddr, p->p_pid, p->p_comm);
2644 }
2645 retval = 1;
2646 } else {
2647 if (p->p_csflags & CS_VALID) {
2648 p->p_csflags &= ~CS_VALID;
2649
2650 proc_unlock(p);
2651 cs_procs_invalidated++;
2652 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2653 "p=%d[%s] clearing CS_VALID\n",
2654 vaddr, p->p_pid, p->p_comm);
2655 } else {
2656 proc_unlock(p);
2657 }
2658
2659 retval = 0;
2660 }
2661
2662 return retval;
2663 }
2664
2665 void
2666 proc_setregister(proc_t p)
2667 {
2668 proc_lock(p);
2669 p->p_lflag |= P_LREGISTER;
2670 proc_unlock(p);
2671 }
2672
2673 void
2674 proc_resetregister(proc_t p)
2675 {
2676 proc_lock(p);
2677 p->p_lflag &= ~P_LREGISTER;
2678 proc_unlock(p);
2679 }
2680
2681 pid_t
2682 proc_pgrpid(proc_t p)
2683 {
2684 return p->p_pgrpid;
2685 }
2686
2687 pid_t
2688 proc_selfpgrpid()
2689 {
2690 return current_proc()->p_pgrpid;
2691 }
2692
2693
2694 /* return control and action states */
2695 int
2696 proc_getpcontrol(int pid, int * pcontrolp)
2697 {
2698 proc_t p;
2699
2700 p = proc_find(pid);
2701 if (p == PROC_NULL)
2702 return(ESRCH);
2703 if (pcontrolp != NULL)
2704 *pcontrolp = p->p_pcaction;
2705
2706 proc_rele(p);
2707 return(0);
2708 }
2709
2710 int
2711 proc_dopcontrol(proc_t p, void *num_found)
2712 {
2713 int pcontrol;
2714
2715 proc_lock(p);
2716
2717 pcontrol = PROC_CONTROL_STATE(p);
2718
2719 if (PROC_ACTION_STATE(p) ==0) {
2720 switch(pcontrol) {
2721 case P_PCTHROTTLE:
2722 PROC_SETACTION_STATE(p);
2723 proc_unlock(p);
2724 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2725 (*(int *)num_found)++;
2726 break;
2727
2728 case P_PCSUSP:
2729 PROC_SETACTION_STATE(p);
2730 proc_unlock(p);
2731 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2732 task_suspend(p->task);
2733 (*(int *)num_found)++;
2734 break;
2735
2736 case P_PCKILL:
2737 PROC_SETACTION_STATE(p);
2738 proc_unlock(p);
2739 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2740 psignal(p, SIGKILL);
2741 (*(int *)num_found)++;
2742 break;
2743
2744 default:
2745 proc_unlock(p);
2746 }
2747
2748 } else
2749 proc_unlock(p);
2750
2751 return(PROC_RETURNED);
2752 }
2753
2754
2755 /*
2756 * Resume a throttled or suspended process. This is an internal interface that's only
2757 * used by the user level code that presents the GUI when we run out of swap space and
2758 * hence is restricted to processes with superuser privileges.
2759 */
2760
2761 int
2762 proc_resetpcontrol(int pid)
2763 {
2764 proc_t p;
2765 int pcontrol;
2766 int error;
2767
2768 if ((error = suser(kauth_cred_get(), 0)))
2769 return error;
2770 p = proc_find(pid);
2771 if (p == PROC_NULL)
2772 return(ESRCH);
2773
2774 proc_lock(p);
2775
2776 pcontrol = PROC_CONTROL_STATE(p);
2777
2778 if(PROC_ACTION_STATE(p) !=0) {
2779 switch(pcontrol) {
2780 case P_PCTHROTTLE:
2781 PROC_RESETACTION_STATE(p);
2782 proc_unlock(p);
2783 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
2784 break;
2785
2786 case P_PCSUSP:
2787 PROC_RESETACTION_STATE(p);
2788 proc_unlock(p);
2789 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
2790 task_resume(p->task);
2791 break;
2792
2793 case P_PCKILL:
2794 /* Huh? */
2795 PROC_SETACTION_STATE(p);
2796 proc_unlock(p);
2797 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
2798 break;
2799
2800 default:
2801 proc_unlock(p);
2802 }
2803
2804 } else
2805 proc_unlock(p);
2806
2807 proc_rele(p);
2808 return(0);
2809 }
2810
2811
2812 /*
2813 * Return true if the specified process has an action state specified for it and it isn't
2814 * already in an action state and it's using more physical memory than the specified threshold.
2815 * Note: the memory_threshold argument is specified in bytes and is of type uint64_t.
2816 */
2817
2818 static int
2819 proc_pcontrol_filter(proc_t p, void *memory_thresholdp)
2820 {
2821
2822 return PROC_CONTROL_STATE(p) && /* if there's an action state specified... */
2823 (PROC_ACTION_STATE(p) == 0) && /* and we're not in the action state yet... */
2824 (get_task_resident_size(p->task) > *((uint64_t *)memory_thresholdp)); /* and this proc is over the mem threshold, */
2825 /* then return true to take action on this proc */
2826 }
2827
2828
2829
2830 /*
2831 * Deal with the out of swap space condition. This routine gets called when
2832 * we want to swap something out but there's no more space left. Since this
2833 * creates a memory deadlock situtation, we need to take action to free up
2834 * some memory resources in order to prevent the system from hanging completely.
2835 * The action we take is based on what the system processes running at user level
2836 * have specified. Processes are marked in one of four categories: ones that
2837 * can be killed immediately, ones that should be suspended, ones that should
2838 * be throttled, and all the rest which are basically none of the above. Which
2839 * processes are marked as being in which category is a user level policy decision;
2840 * we just take action based on those decisions here.
2841 */
2842
2843 #define STARTING_PERCENTAGE 50 /* memory threshold expressed as a percentage */
2844 /* of physical memory */
2845
2846 struct timeval last_no_space_action = {0, 0};
2847
2848 void
2849 no_paging_space_action(void)
2850 {
2851
2852 uint64_t memory_threshold;
2853 int num_found;
2854 struct timeval now;
2855
2856 /*
2857 * Throttle how often we come through here. Once every 20 seconds should be plenty.
2858 */
2859
2860 microtime(&now);
2861
2862 if (now.tv_sec <= last_no_space_action.tv_sec + 20)
2863 return;
2864
2865 last_no_space_action = now;
2866
2867 /*
2868 * Examine all processes and find those that have been marked to have some action
2869 * taken when swap space runs out. Of those processes, select one or more and
2870 * apply the specified action to them. The idea is to only take action against
2871 * a few processes rather than hitting too many at once. If the low swap condition
2872 * persists, this routine will get called again and we'll take action against more
2873 * processes.
2874 *
2875 * Of the processes that have been marked, we choose which ones to take action
2876 * against according to how much physical memory they're presently using. We
2877 * start with the STARTING_THRESHOLD and any processes using more physical memory
2878 * than the percentage threshold will have action taken against it. If there
2879 * are no processes over the threshold, then the threshold is cut in half and we
2880 * look again for processes using more than this threshold. We continue in
2881 * this fashion until we find at least one process to take action against. This
2882 * iterative approach is less than ideally efficient, however we only get here
2883 * when the system is almost in a memory deadlock and is pretty much just
2884 * thrashing if it's doing anything at all. Therefore, the cpu overhead of
2885 * potentially multiple passes here probably isn't revelant.
2886 */
2887
2888 memory_threshold = (sane_size * STARTING_PERCENTAGE) / 100; /* resident threshold in bytes */
2889
2890 for (num_found = 0; num_found == 0; memory_threshold = memory_threshold / 2) {
2891 proc_iterate(PROC_ALLPROCLIST, proc_dopcontrol, (void *)&num_found, proc_pcontrol_filter, (void *)&memory_threshold);
2892
2893 /*
2894 * If we just looked with memory_threshold == 0, then there's no need to iterate any further since
2895 * we won't find any eligible processes at this point.
2896 */
2897
2898 if (memory_threshold == 0) {
2899 if (num_found == 0) /* log that we couldn't do anything in this case */
2900 printf("low swap: unable to find any eligible processes to take action on\n");
2901
2902 break;
2903 }
2904 }
2905 }