]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
6d696b4244226b782877e0e4312106a724501921
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/assert.h>
103 #include <vm/vm_protos.h>
104 #include <vm/vm_map.h> /* vm_map_switch_protect() */
105 #include <mach/task.h>
106 #include <mach/message.h>
107
108 #if CONFIG_MACF
109 #include <security/mac_framework.h>
110 #endif
111
112 #include <libkern/crypto/sha1.h>
113
114 /*
115 * Structure associated with user cacheing.
116 */
117 struct uidinfo {
118 LIST_ENTRY(uidinfo) ui_hash;
119 uid_t ui_uid;
120 long ui_proccnt;
121 };
122 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
123 LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
124 u_long uihash; /* size of hash table - 1 */
125
126 /*
127 * Other process lists
128 */
129 struct pidhashhead *pidhashtbl;
130 u_long pidhash;
131 struct pgrphashhead *pgrphashtbl;
132 u_long pgrphash;
133 struct sesshashhead *sesshashtbl;
134 u_long sesshash;
135
136 struct proclist allproc;
137 struct proclist zombproc;
138 extern struct tty cons;
139
140 #if CONFIG_LCTX
141 /*
142 * Login Context
143 */
144 static pid_t lastlcid = 1;
145 static int alllctx_cnt;
146
147 #define LCID_MAX 8192 /* Does this really need to be large? */
148 static int maxlcid = LCID_MAX;
149
150 LIST_HEAD(lctxlist, lctx);
151 static struct lctxlist alllctx;
152
153 lck_mtx_t alllctx_lock;
154 lck_grp_t * lctx_lck_grp;
155 lck_grp_attr_t * lctx_lck_grp_attr;
156 lck_attr_t * lctx_lck_attr;
157
158 static void lctxinit(void);
159 #endif
160
161 int cs_debug; /* declared further down in this file */
162
163 #if DEBUG
164 #define __PROC_INTERNAL_DEBUG 1
165 #endif
166 /* Name to give to core files */
167 __private_extern__ char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
168
169 static void orphanpg(struct pgrp *pg);
170 void proc_name_kdp(task_t t, char * buf, int size);
171 char *proc_name_address(void *p);
172
173 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
174 static void pgrp_remove(proc_t p);
175 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
176 static void pgdelete_dropref(struct pgrp *pgrp);
177 extern void pg_rele_dropref(struct pgrp * pgrp);
178 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
179
180 struct fixjob_iterargs {
181 struct pgrp * pg;
182 struct session * mysession;
183 int entering;
184 };
185
186 int fixjob_callback(proc_t, void *);
187
188 /*
189 * Initialize global process hashing structures.
190 */
191 void
192 procinit(void)
193 {
194 LIST_INIT(&allproc);
195 LIST_INIT(&zombproc);
196 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
197 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
198 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
199 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
200 #if CONFIG_LCTX
201 lctxinit();
202 #endif
203 }
204
205 /*
206 * Change the count associated with number of processes
207 * a given user is using. This routine protects the uihash
208 * with the list lock
209 */
210 int
211 chgproccnt(uid_t uid, int diff)
212 {
213 struct uidinfo *uip;
214 struct uidinfo *newuip = NULL;
215 struct uihashhead *uipp;
216 int retval;
217
218 again:
219 proc_list_lock();
220 uipp = UIHASH(uid);
221 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next)
222 if (uip->ui_uid == uid)
223 break;
224 if (uip) {
225 uip->ui_proccnt += diff;
226 if (uip->ui_proccnt > 0) {
227 retval = uip->ui_proccnt;
228 proc_list_unlock();
229 goto out;
230 }
231 if (uip->ui_proccnt < 0)
232 panic("chgproccnt: procs < 0");
233 LIST_REMOVE(uip, ui_hash);
234 retval = 0;
235 proc_list_unlock();
236 FREE_ZONE(uip, sizeof(*uip), M_PROC);
237 goto out;
238 }
239 if (diff <= 0) {
240 if (diff == 0) {
241 retval = 0;
242 proc_list_unlock();
243 goto out;
244 }
245 panic("chgproccnt: lost user");
246 }
247 if (newuip != NULL) {
248 uip = newuip;
249 newuip = NULL;
250 LIST_INSERT_HEAD(uipp, uip, ui_hash);
251 uip->ui_uid = uid;
252 uip->ui_proccnt = diff;
253 retval = diff;
254 proc_list_unlock();
255 goto out;
256 }
257 proc_list_unlock();
258 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
259 if (newuip == NULL)
260 panic("chgproccnt: M_PROC zone depleted");
261 goto again;
262 out:
263 if (newuip != NULL)
264 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
265 return(retval);
266 }
267
268 /*
269 * Is p an inferior of the current process?
270 */
271 int
272 inferior(proc_t p)
273 {
274 int retval = 0;
275
276 proc_list_lock();
277 for (; p != current_proc(); p = p->p_pptr)
278 if (p->p_pid == 0)
279 goto out;
280 retval = 1;
281 out:
282 proc_list_unlock();
283 return(retval);
284 }
285
286 /*
287 * Is p an inferior of t ?
288 */
289 int
290 isinferior(proc_t p, proc_t t)
291 {
292 int retval = 0;
293 int nchecked = 0;
294 proc_t start = p;
295
296 /* if p==t they are not inferior */
297 if (p == t)
298 return(0);
299
300 proc_list_lock();
301 for (; p != t; p = p->p_pptr) {
302 nchecked++;
303
304 /* Detect here if we're in a cycle */
305 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs))
306 goto out;
307 }
308 retval = 1;
309 out:
310 proc_list_unlock();
311 return(retval);
312 }
313
314 int
315 proc_isinferior(int pid1, int pid2)
316 {
317 proc_t p = PROC_NULL;
318 proc_t t = PROC_NULL;
319 int retval = 0;
320
321 if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0))
322 retval = isinferior(p, t);
323
324 if (p != PROC_NULL)
325 proc_rele(p);
326 if (t != PROC_NULL)
327 proc_rele(t);
328
329 return(retval);
330 }
331
332 proc_t
333 proc_find(int pid)
334 {
335 return(proc_findinternal(pid, 0));
336 }
337
338 proc_t
339 proc_findinternal(int pid, int locked)
340 {
341 proc_t p = PROC_NULL;
342
343 if (locked == 0) {
344 proc_list_lock();
345 }
346
347 p = pfind_locked(pid);
348 if ((p == PROC_NULL) || (p != proc_ref_locked(p)))
349 p = PROC_NULL;
350
351 if (locked == 0) {
352 proc_list_unlock();
353 }
354
355 return(p);
356 }
357
358 proc_t
359 proc_findthread(thread_t thread)
360 {
361 proc_t p = PROC_NULL;
362 struct uthread *uth;
363
364 proc_list_lock();
365 uth = get_bsdthread_info(thread);
366 if (uth && (uth->uu_flag & UT_VFORK))
367 p = uth->uu_proc;
368 else
369 p = (proc_t)(get_bsdthreadtask_info(thread));
370 p = proc_ref_locked(p);
371 proc_list_unlock();
372 return(p);
373 }
374
375 int
376 proc_rele(proc_t p)
377 {
378 proc_list_lock();
379 proc_rele_locked(p);
380 proc_list_unlock();
381
382 return(0);
383 }
384
385 proc_t
386 proc_self(void)
387 {
388 struct proc * p;
389
390 p = current_proc();
391
392 proc_list_lock();
393 if (p != proc_ref_locked(p))
394 p = PROC_NULL;
395 proc_list_unlock();
396 return(p);
397 }
398
399
400 proc_t
401 proc_ref_locked(proc_t p)
402 {
403 proc_t p1 = p;
404
405 /* if process still in creation return failure */
406 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
407 return (PROC_NULL);
408 /* do not return process marked for termination */
409 if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0))
410 p->p_refcount++;
411 else
412 p1 = PROC_NULL;
413
414 return(p1);
415 }
416
417 void
418 proc_rele_locked(proc_t p)
419 {
420
421 if (p->p_refcount > 0) {
422 p->p_refcount--;
423 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
424 p->p_listflag &= ~P_LIST_DRAINWAIT;
425 wakeup(&p->p_refcount);
426 }
427 } else
428 panic("proc_rele_locked -ve ref\n");
429
430 }
431
432 proc_t
433 proc_find_zombref(int pid)
434 {
435 proc_t p1 = PROC_NULL;
436 proc_t p = PROC_NULL;
437
438 proc_list_lock();
439
440 p = pfind_locked(pid);
441
442 /* if process still in creation return NULL */
443 if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0)) {
444 proc_list_unlock();
445 return (p1);
446 }
447
448 /* if process has not started exit or is being reaped, return NULL */
449 if (((p->p_listflag & P_LIST_EXITED) != 0) && ((p->p_listflag & P_LIST_WAITING) == 0)) {
450 p->p_listflag |= P_LIST_WAITING;
451 p1 = p;
452 } else
453 p1 = PROC_NULL;
454
455 proc_list_unlock();
456
457 return(p1);
458 }
459
460 void
461 proc_drop_zombref(proc_t p)
462 {
463 proc_list_lock();
464 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
465 p->p_listflag &= ~P_LIST_WAITING;
466 wakeup(&p->p_stat);
467 }
468 proc_list_unlock();
469 }
470
471
472 void
473 proc_refdrain(proc_t p)
474 {
475
476 proc_list_lock();
477
478 p->p_listflag |= P_LIST_DRAIN;
479 while (p->p_refcount) {
480 p->p_listflag |= P_LIST_DRAINWAIT;
481 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
482 }
483 p->p_listflag &= ~P_LIST_DRAIN;
484 p->p_listflag |= P_LIST_DEAD;
485
486 proc_list_unlock();
487
488
489 }
490
491 proc_t
492 proc_parentholdref(proc_t p)
493 {
494 proc_t parent = PROC_NULL;
495 proc_t pp;
496 int loopcnt = 0;
497
498
499 proc_list_lock();
500 loop:
501 pp = p->p_pptr;
502 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
503 parent = PROC_NULL;
504 goto out;
505 }
506
507 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
508 pp->p_listflag |= P_LIST_CHILDDRWAIT;
509 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
510 loopcnt++;
511 if (loopcnt == 5) {
512 parent = PROC_NULL;
513 goto out;
514 }
515 goto loop;
516 }
517
518 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
519 pp->p_parentref++;
520 parent = pp;
521 goto out;
522 }
523
524 out:
525 proc_list_unlock();
526 return(parent);
527 }
528 int
529 proc_parentdropref(proc_t p, int listlocked)
530 {
531 if (listlocked == 0)
532 proc_list_lock();
533
534 if (p->p_parentref > 0) {
535 p->p_parentref--;
536 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
537 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
538 wakeup(&p->p_parentref);
539 }
540 } else
541 panic("proc_parentdropref -ve ref\n");
542 if (listlocked == 0)
543 proc_list_unlock();
544
545 return(0);
546 }
547
548 void
549 proc_childdrainstart(proc_t p)
550 {
551 #if __PROC_INTERNAL_DEBUG
552 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART)
553 panic("proc_childdrainstart: childdrain already started\n");
554 #endif
555 p->p_listflag |= P_LIST_CHILDDRSTART;
556 /* wait for all that hold parentrefs to drop */
557 while (p->p_parentref > 0) {
558 p->p_listflag |= P_LIST_PARENTREFWAIT;
559 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ;
560 }
561 }
562
563
564 void
565 proc_childdrainend(proc_t p)
566 {
567 #if __PROC_INTERNAL_DEBUG
568 if (p->p_childrencnt > 0)
569 panic("exiting: children stil hanging around\n");
570 #endif
571 p->p_listflag |= P_LIST_CHILDDRAINED;
572 if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) {
573 p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT);
574 wakeup(&p->p_childrencnt);
575 }
576 }
577
578 void
579 proc_checkdeadrefs(__unused proc_t p)
580 {
581 #if __PROC_INTERNAL_DEBUG
582 if ((p->p_listflag & P_LIST_INHASH) != 0)
583 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
584 if (p->p_childrencnt != 0)
585 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
586 if (p->p_refcount != 0)
587 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
588 if (p->p_parentref != 0)
589 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
590 #endif
591 }
592
593 int
594 proc_pid(proc_t p)
595 {
596 return(p->p_pid);
597 }
598
599 int
600 proc_ppid(proc_t p)
601 {
602 return(p->p_ppid);
603 }
604
605 int
606 proc_selfpid(void)
607 {
608 proc_t p = current_proc();
609 return(p->p_pid);
610 }
611
612 int
613 proc_selfppid(void)
614 {
615 proc_t p = current_proc();
616 return(p->p_ppid);
617 }
618
619 proc_t
620 proc_parent(proc_t p)
621 {
622 proc_t parent;
623 proc_t pp;
624
625 proc_list_lock();
626 loop:
627 pp = p->p_pptr;
628 parent = proc_ref_locked(pp);
629 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){
630 pp->p_listflag |= P_LIST_CHILDLKWAIT;
631 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
632 goto loop;
633 }
634 proc_list_unlock();
635 return(parent);
636 }
637
638
639 void
640 proc_name(int pid, char * buf, int size)
641 {
642 proc_t p;
643
644 if ((p = proc_find(pid)) != PROC_NULL) {
645 strlcpy(buf, &p->p_comm[0], size);
646 proc_rele(p);
647 }
648 }
649
650 void
651 proc_name_kdp(task_t t, char * buf, int size)
652 {
653 proc_t p = get_bsdtask_info(t);
654
655 if (p != PROC_NULL)
656 strlcpy(buf, &p->p_comm[0], size);
657 }
658
659 char *
660 proc_name_address(void *p)
661 {
662 return &((proc_t)p)->p_comm[0];
663 }
664
665 void
666 proc_selfname(char * buf, int size)
667 {
668 proc_t p;
669
670 if ((p = current_proc())!= (proc_t)0) {
671 strlcpy(buf, &p->p_comm[0], size);
672 }
673 }
674
675 void
676 proc_signal(int pid, int signum)
677 {
678 proc_t p;
679
680 if ((p = proc_find(pid)) != PROC_NULL) {
681 psignal(p, signum);
682 proc_rele(p);
683 }
684 }
685
686 int
687 proc_issignal(int pid, sigset_t mask)
688 {
689 proc_t p;
690 int error=0;
691
692 if ((p = proc_find(pid)) != PROC_NULL) {
693 error = proc_pendingsignals(p, mask);
694 proc_rele(p);
695 }
696
697 return(error);
698 }
699
700 int
701 proc_noremotehang(proc_t p)
702 {
703 int retval = 0;
704
705 if (p)
706 retval = p->p_flag & P_NOREMOTEHANG;
707 return(retval? 1: 0);
708
709 }
710
711 int
712 proc_exiting(proc_t p)
713 {
714 int retval = 0;
715
716 if (p)
717 retval = p->p_lflag & P_LEXIT;
718 return(retval? 1: 0);
719 }
720
721 int
722 proc_forcequota(proc_t p)
723 {
724 int retval = 0;
725
726 if (p)
727 retval = p->p_flag & P_FORCEQUOTA;
728 return(retval? 1: 0);
729
730 }
731
732 int
733 proc_tbe(proc_t p)
734 {
735 int retval = 0;
736
737 if (p)
738 retval = p->p_flag & P_TBE;
739 return(retval? 1: 0);
740
741 }
742
743 int
744 proc_suser(proc_t p)
745 {
746 kauth_cred_t my_cred;
747 int error;
748
749 my_cred = kauth_cred_proc_ref(p);
750 error = suser(my_cred, &p->p_acflag);
751 kauth_cred_unref(&my_cred);
752 return(error);
753 }
754
755 task_t
756 proc_task(proc_t proc)
757 {
758 return (task_t)proc->task;
759 }
760
761 /*
762 * Obtain the first thread in a process
763 *
764 * XXX This is a bad thing to do; it exists predominantly to support the
765 * XXX use of proc_t's in places that should really be using
766 * XXX thread_t's instead. This maintains historical behaviour, but really
767 * XXX needs an audit of the context (proxy vs. not) to clean up.
768 */
769 thread_t
770 proc_thread(proc_t proc)
771 {
772 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
773
774 if (uth != NULL)
775 return(uth->uu_context.vc_thread);
776
777 return(NULL);
778 }
779
780 kauth_cred_t
781 proc_ucred(proc_t p)
782 {
783 return(p->p_ucred);
784 }
785
786 struct uthread *
787 current_uthread()
788 {
789 thread_t th = current_thread();
790
791 return((struct uthread *)get_bsdthread_info(th));
792 }
793
794
795 int
796 proc_is64bit(proc_t p)
797 {
798 return(IS_64BIT_PROCESS(p));
799 }
800
801 int
802 proc_pidversion(proc_t p)
803 {
804 return(p->p_idversion);
805 }
806
807 uint64_t
808 proc_uniqueid(proc_t p)
809 {
810 return(p->p_uniqueid);
811 }
812
813 uint64_t
814 proc_selfuniqueid(void)
815 {
816 proc_t p = current_proc();
817 return(p->p_uniqueid);
818 }
819
820 int
821 proc_getcdhash(proc_t p, unsigned char *cdhash)
822 {
823 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
824 }
825
826 void
827 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
828 {
829 if (size >= sizeof(p->p_uuid)) {
830 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
831 }
832 }
833
834
835 void
836 bsd_set_dependency_capable(task_t task)
837 {
838 proc_t p = get_bsdtask_info(task);
839
840 if (p) {
841 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
842 }
843 }
844
845
846 int
847 IS_64BIT_PROCESS(proc_t p)
848 {
849 if (p && (p->p_flag & P_LP64))
850 return(1);
851 else
852 return(0);
853 }
854
855 /*
856 * Locate a process by number
857 */
858 proc_t
859 pfind_locked(pid_t pid)
860 {
861 proc_t p;
862 #if DEBUG
863 proc_t q;
864 #endif
865
866 if (!pid)
867 return (kernproc);
868
869 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
870 if (p->p_pid == pid) {
871 #if DEBUG
872 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
873 if ((p !=q) && (q->p_pid == pid))
874 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
875 }
876 #endif
877 return (p);
878 }
879 }
880 return (NULL);
881 }
882
883 /*
884 * Locate a zombie by PID
885 */
886 __private_extern__ proc_t
887 pzfind(pid_t pid)
888 {
889 proc_t p;
890
891
892 proc_list_lock();
893
894 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next)
895 if (p->p_pid == pid)
896 break;
897
898 proc_list_unlock();
899
900 return (p);
901 }
902
903 /*
904 * Locate a process group by number
905 */
906
907 struct pgrp *
908 pgfind(pid_t pgid)
909 {
910 struct pgrp * pgrp;
911
912 proc_list_lock();
913 pgrp = pgfind_internal(pgid);
914 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0))
915 pgrp = PGRP_NULL;
916 else
917 pgrp->pg_refcount++;
918 proc_list_unlock();
919 return(pgrp);
920 }
921
922
923
924 struct pgrp *
925 pgfind_internal(pid_t pgid)
926 {
927 struct pgrp *pgrp;
928
929 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next)
930 if (pgrp->pg_id == pgid)
931 return (pgrp);
932 return (NULL);
933 }
934
935 void
936 pg_rele(struct pgrp * pgrp)
937 {
938 if(pgrp == PGRP_NULL)
939 return;
940 pg_rele_dropref(pgrp);
941 }
942
943 void
944 pg_rele_dropref(struct pgrp * pgrp)
945 {
946 proc_list_lock();
947 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
948 proc_list_unlock();
949 pgdelete_dropref(pgrp);
950 return;
951 }
952
953 pgrp->pg_refcount--;
954 proc_list_unlock();
955 }
956
957 struct session *
958 session_find_internal(pid_t sessid)
959 {
960 struct session *sess;
961
962 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next)
963 if (sess->s_sid == sessid)
964 return (sess);
965 return (NULL);
966 }
967
968
969 /*
970 * Make a new process ready to become a useful member of society by making it
971 * visible in all the right places and initialize its own lists to empty.
972 *
973 * Parameters: parent The parent of the process to insert
974 * child The child process to insert
975 *
976 * Returns: (void)
977 *
978 * Notes: Insert a child process into the parents process group, assign
979 * the child the parent process pointer and PPID of the parent,
980 * place it on the parents p_children list as a sibling,
981 * initialize its own child list, place it in the allproc list,
982 * insert it in the proper hash bucket, and initialize its
983 * event list.
984 */
985 void
986 pinsertchild(proc_t parent, proc_t child)
987 {
988 struct pgrp * pg;
989
990 LIST_INIT(&child->p_children);
991 TAILQ_INIT(&child->p_evlist);
992 child->p_pptr = parent;
993 child->p_ppid = parent->p_pid;
994
995 pg = proc_pgrp(parent);
996 pgrp_add(pg, parent, child);
997 pg_rele(pg);
998
999 proc_list_lock();
1000 parent->p_childrencnt++;
1001 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1002
1003 LIST_INSERT_HEAD(&allproc, child, p_list);
1004 /* mark the completion of proc creation */
1005 child->p_listflag &= ~P_LIST_INCREATE;
1006
1007 proc_list_unlock();
1008
1009 }
1010
1011 /*
1012 * Move p to a new or existing process group (and session)
1013 *
1014 * Returns: 0 Success
1015 * ESRCH No such process
1016 */
1017 int
1018 enterpgrp(proc_t p, pid_t pgid, int mksess)
1019 {
1020 struct pgrp *pgrp;
1021 struct pgrp *mypgrp;
1022 struct session * procsp;
1023
1024 pgrp = pgfind(pgid);
1025 mypgrp = proc_pgrp(p);
1026 procsp = proc_session(p);
1027
1028 #if DIAGNOSTIC
1029 if (pgrp != NULL && mksess) /* firewalls */
1030 panic("enterpgrp: setsid into non-empty pgrp");
1031 if (SESS_LEADER(p, procsp))
1032 panic("enterpgrp: session leader attempted setpgrp");
1033 #endif
1034 if (pgrp == PGRP_NULL) {
1035 pid_t savepid = p->p_pid;
1036 proc_t np = PROC_NULL;
1037 /*
1038 * new process group
1039 */
1040 #if DIAGNOSTIC
1041 if (p->p_pid != pgid)
1042 panic("enterpgrp: new pgrp and pid != pgid");
1043 #endif
1044 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1045 M_WAITOK);
1046 if (pgrp == NULL)
1047 panic("enterpgrp: M_PGRP zone depleted");
1048 if ((np = proc_find(savepid)) == NULL || np != p) {
1049 if (np != PROC_NULL)
1050 proc_rele(np);
1051 if (mypgrp != PGRP_NULL)
1052 pg_rele(mypgrp);
1053 if (procsp != SESSION_NULL)
1054 session_rele(procsp);
1055 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1056 return (ESRCH);
1057 }
1058 proc_rele(np);
1059 if (mksess) {
1060 struct session *sess;
1061
1062 /*
1063 * new session
1064 */
1065 MALLOC_ZONE(sess, struct session *,
1066 sizeof(struct session), M_SESSION, M_WAITOK);
1067 if (sess == NULL)
1068 panic("enterpgrp: M_SESSION zone depleted");
1069 sess->s_leader = p;
1070 sess->s_sid = p->p_pid;
1071 sess->s_count = 1;
1072 sess->s_ttyvp = NULL;
1073 sess->s_ttyp = TTY_NULL;
1074 sess->s_flags = 0;
1075 sess->s_listflags = 0;
1076 sess->s_ttypgrpid = NO_PID;
1077 #if CONFIG_FINE_LOCK_GROUPS
1078 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1079 #else
1080 lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr);
1081 #endif
1082 bcopy(procsp->s_login, sess->s_login,
1083 sizeof(sess->s_login));
1084 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1085 proc_list_lock();
1086 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1087 proc_list_unlock();
1088 pgrp->pg_session = sess;
1089 #if DIAGNOSTIC
1090 if (p != current_proc())
1091 panic("enterpgrp: mksession and p != curproc");
1092 #endif
1093 } else {
1094 proc_list_lock();
1095 pgrp->pg_session = procsp;
1096
1097 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1098 panic("enterpgrp: providing ref to terminating session ");
1099 pgrp->pg_session->s_count++;
1100 proc_list_unlock();
1101 }
1102 pgrp->pg_id = pgid;
1103 #if CONFIG_FINE_LOCK_GROUPS
1104 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1105 #else
1106 lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr);
1107 #endif
1108 LIST_INIT(&pgrp->pg_members);
1109 pgrp->pg_membercnt = 0;
1110 pgrp->pg_jobc = 0;
1111 proc_list_lock();
1112 pgrp->pg_refcount = 1;
1113 pgrp->pg_listflags = 0;
1114 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1115 proc_list_unlock();
1116 } else if (pgrp == mypgrp) {
1117 pg_rele(pgrp);
1118 if (mypgrp != NULL)
1119 pg_rele(mypgrp);
1120 if (procsp != SESSION_NULL)
1121 session_rele(procsp);
1122 return (0);
1123 }
1124
1125 if (procsp != SESSION_NULL)
1126 session_rele(procsp);
1127 /*
1128 * Adjust eligibility of affected pgrps to participate in job control.
1129 * Increment eligibility counts before decrementing, otherwise we
1130 * could reach 0 spuriously during the first call.
1131 */
1132 fixjobc(p, pgrp, 1);
1133 fixjobc(p, mypgrp, 0);
1134
1135 if(mypgrp != PGRP_NULL)
1136 pg_rele(mypgrp);
1137 pgrp_replace(p, pgrp);
1138 pg_rele(pgrp);
1139
1140 return(0);
1141 }
1142
1143 /*
1144 * remove process from process group
1145 */
1146 int
1147 leavepgrp(proc_t p)
1148 {
1149
1150 pgrp_remove(p);
1151 return (0);
1152 }
1153
1154 /*
1155 * delete a process group
1156 */
1157 static void
1158 pgdelete_dropref(struct pgrp *pgrp)
1159 {
1160 struct tty *ttyp;
1161 int emptypgrp = 1;
1162 struct session *sessp;
1163
1164
1165 pgrp_lock(pgrp);
1166 if (pgrp->pg_membercnt != 0) {
1167 emptypgrp = 0;
1168 }
1169 pgrp_unlock(pgrp);
1170
1171 proc_list_lock();
1172 pgrp->pg_refcount--;
1173 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1174 proc_list_unlock();
1175 return;
1176 }
1177
1178 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1179
1180 if (pgrp->pg_refcount > 0) {
1181 proc_list_unlock();
1182 return;
1183 }
1184
1185 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1186 LIST_REMOVE(pgrp, pg_hash);
1187
1188 proc_list_unlock();
1189
1190 ttyp = SESSION_TP(pgrp->pg_session);
1191 if (ttyp != TTY_NULL) {
1192 if (ttyp->t_pgrp == pgrp) {
1193 tty_lock(ttyp);
1194 /* Re-check after acquiring the lock */
1195 if (ttyp->t_pgrp == pgrp) {
1196 ttyp->t_pgrp = NULL;
1197 pgrp->pg_session->s_ttypgrpid = NO_PID;
1198 }
1199 tty_unlock(ttyp);
1200 }
1201 }
1202
1203 proc_list_lock();
1204
1205 sessp = pgrp->pg_session;
1206 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1207 panic("pg_deleteref: manipulating refs of already terminating session");
1208 if (--sessp->s_count == 0) {
1209 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
1210 panic("pg_deleteref: terminating already terminated session");
1211 sessp->s_listflags |= S_LIST_TERM;
1212 ttyp = SESSION_TP(sessp);
1213 LIST_REMOVE(sessp, s_hash);
1214 proc_list_unlock();
1215 if (ttyp != TTY_NULL) {
1216 tty_lock(ttyp);
1217 if (ttyp->t_session == sessp)
1218 ttyp->t_session = NULL;
1219 tty_unlock(ttyp);
1220 }
1221 proc_list_lock();
1222 sessp->s_listflags |= S_LIST_DEAD;
1223 if (sessp->s_count != 0)
1224 panic("pg_deleteref: freeing session in use");
1225 proc_list_unlock();
1226 #if CONFIG_FINE_LOCK_GROUPS
1227 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1228 #else
1229 lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp);
1230 #endif
1231 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1232 } else
1233 proc_list_unlock();
1234 #if CONFIG_FINE_LOCK_GROUPS
1235 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1236 #else
1237 lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp);
1238 #endif
1239 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1240 }
1241
1242
1243 /*
1244 * Adjust pgrp jobc counters when specified process changes process group.
1245 * We count the number of processes in each process group that "qualify"
1246 * the group for terminal job control (those with a parent in a different
1247 * process group of the same session). If that count reaches zero, the
1248 * process group becomes orphaned. Check both the specified process'
1249 * process group and that of its children.
1250 * entering == 0 => p is leaving specified group.
1251 * entering == 1 => p is entering specified group.
1252 */
1253 int
1254 fixjob_callback(proc_t p, void * arg)
1255 {
1256 struct fixjob_iterargs *fp;
1257 struct pgrp * pg, *hispg;
1258 struct session * mysession, *hissess;
1259 int entering;
1260
1261 fp = (struct fixjob_iterargs *)arg;
1262 pg = fp->pg;
1263 mysession = fp->mysession;
1264 entering = fp->entering;
1265
1266 hispg = proc_pgrp(p);
1267 hissess = proc_session(p);
1268
1269 if ((hispg != pg) &&
1270 (hissess == mysession)) {
1271 pgrp_lock(hispg);
1272 if (entering) {
1273 hispg->pg_jobc++;
1274 pgrp_unlock(hispg);
1275 } else if (--hispg->pg_jobc == 0) {
1276 pgrp_unlock(hispg);
1277 orphanpg(hispg);
1278 } else
1279 pgrp_unlock(hispg);
1280 }
1281 if (hissess != SESSION_NULL)
1282 session_rele(hissess);
1283 if (hispg != PGRP_NULL)
1284 pg_rele(hispg);
1285
1286 return(PROC_RETURNED);
1287 }
1288
1289 void
1290 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1291 {
1292 struct pgrp *hispgrp = PGRP_NULL;
1293 struct session *hissess = SESSION_NULL;
1294 struct session *mysession = pgrp->pg_session;
1295 proc_t parent;
1296 struct fixjob_iterargs fjarg;
1297
1298 parent = proc_parent(p);
1299 if (parent != PROC_NULL) {
1300 hispgrp = proc_pgrp(parent);
1301 hissess = proc_session(parent);
1302 proc_rele(parent);
1303 }
1304
1305
1306 /*
1307 * Check p's parent to see whether p qualifies its own process
1308 * group; if so, adjust count for p's process group.
1309 */
1310 if ((hispgrp != pgrp) &&
1311 (hissess == mysession)) {
1312 pgrp_lock(pgrp);
1313 if (entering) {
1314 pgrp->pg_jobc++;
1315 pgrp_unlock(pgrp);
1316 }else if (--pgrp->pg_jobc == 0) {
1317 pgrp_unlock(pgrp);
1318 orphanpg(pgrp);
1319 } else
1320 pgrp_unlock(pgrp);
1321 }
1322
1323 if (hissess != SESSION_NULL)
1324 session_rele(hissess);
1325 if (hispgrp != PGRP_NULL)
1326 pg_rele(hispgrp);
1327
1328 /*
1329 * Check this process' children to see whether they qualify
1330 * their process groups; if so, adjust counts for children's
1331 * process groups.
1332 */
1333 fjarg.pg = pgrp;
1334 fjarg.mysession = mysession;
1335 fjarg.entering = entering;
1336 proc_childrenwalk(p, fixjob_callback, &fjarg);
1337 }
1338
1339 /*
1340 * A process group has become orphaned;
1341 * if there are any stopped processes in the group,
1342 * hang-up all process in that group.
1343 */
1344 static void
1345 orphanpg(struct pgrp * pgrp)
1346 {
1347 proc_t p;
1348 pid_t * pid_list;
1349 int count, pidcount, i, alloc_count;
1350
1351 if (pgrp == PGRP_NULL)
1352 return;
1353 count = 0;
1354 pgrp_lock(pgrp);
1355 for (p = pgrp->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
1356 if (p->p_stat == SSTOP) {
1357 for (p = pgrp->pg_members.lh_first; p != 0;
1358 p = p->p_pglist.le_next)
1359 count++;
1360 break; /* ??? stops after finding one.. */
1361 }
1362 }
1363 pgrp_unlock(pgrp);
1364
1365 count += 20;
1366 if (count > hard_maxproc)
1367 count = hard_maxproc;
1368 alloc_count = count * sizeof(pid_t);
1369 pid_list = (pid_t *)kalloc(alloc_count);
1370 bzero(pid_list, alloc_count);
1371
1372 pidcount = 0;
1373 pgrp_lock(pgrp);
1374 for (p = pgrp->pg_members.lh_first; p != 0;
1375 p = p->p_pglist.le_next) {
1376 if (p->p_stat == SSTOP) {
1377 for (p = pgrp->pg_members.lh_first; p != 0;
1378 p = p->p_pglist.le_next) {
1379 pid_list[pidcount] = p->p_pid;
1380 pidcount++;
1381 if (pidcount >= count)
1382 break;
1383 }
1384 break; /* ??? stops after finding one.. */
1385 }
1386 }
1387 pgrp_unlock(pgrp);
1388
1389 if (pidcount == 0)
1390 goto out;
1391
1392
1393 for (i = 0; i< pidcount; i++) {
1394 /* No handling or proc0 */
1395 if (pid_list[i] == 0)
1396 continue;
1397 p = proc_find(pid_list[i]);
1398 if (p) {
1399 proc_transwait(p, 0);
1400 pt_setrunnable(p);
1401 psignal(p, SIGHUP);
1402 psignal(p, SIGCONT);
1403 proc_rele(p);
1404 }
1405 }
1406 out:
1407 kfree(pid_list, alloc_count);
1408 return;
1409 }
1410
1411
1412
1413 /* XXX should be __private_extern__ */
1414 int
1415 proc_is_classic(proc_t p)
1416 {
1417 return (p->p_flag & P_TRANSLATED) ? 1 : 0;
1418 }
1419
1420 /* XXX Why does this function exist? Need to kill it off... */
1421 proc_t
1422 current_proc_EXTERNAL(void)
1423 {
1424 return (current_proc());
1425 }
1426
1427 /*
1428 * proc_core_name(name, uid, pid)
1429 * Expand the name described in corefilename, using name, uid, and pid.
1430 * corefilename is a printf-like string, with three format specifiers:
1431 * %N name of process ("name")
1432 * %P process id (pid)
1433 * %U user id (uid)
1434 * For example, "%N.core" is the default; they can be disabled completely
1435 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1436 * This is controlled by the sysctl variable kern.corefile (see above).
1437 */
1438 __private_extern__ int
1439 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
1440 size_t cf_name_len)
1441 {
1442 const char *format, *appendstr;
1443 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
1444 size_t i, l, n;
1445
1446 if (cf_name == NULL)
1447 goto toolong;
1448
1449 format = corefilename;
1450 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
1451 switch (format[i]) {
1452 case '%': /* Format character */
1453 i++;
1454 switch (format[i]) {
1455 case '%':
1456 appendstr = "%";
1457 break;
1458 case 'N': /* process name */
1459 appendstr = name;
1460 break;
1461 case 'P': /* process id */
1462 snprintf(id_buf, sizeof(id_buf), "%u", pid);
1463 appendstr = id_buf;
1464 break;
1465 case 'U': /* user id */
1466 snprintf(id_buf, sizeof(id_buf), "%u", uid);
1467 appendstr = id_buf;
1468 break;
1469 default:
1470 appendstr = "";
1471 log(LOG_ERR,
1472 "Unknown format character %c in `%s'\n",
1473 format[i], format);
1474 }
1475 l = strlen(appendstr);
1476 if ((n + l) >= cf_name_len)
1477 goto toolong;
1478 bcopy(appendstr, cf_name + n, l);
1479 n += l;
1480 break;
1481 default:
1482 cf_name[n++] = format[i];
1483 }
1484 }
1485 if (format[i] != '\0')
1486 goto toolong;
1487 return (0);
1488 toolong:
1489 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
1490 (long)pid, name, (uint32_t)uid);
1491 return (1);
1492 }
1493
1494 #if CONFIG_LCTX
1495
1496 static void
1497 lctxinit(void)
1498 {
1499 LIST_INIT(&alllctx);
1500 alllctx_cnt = 0;
1501
1502 /* allocate lctx lock group attribute and group */
1503 lctx_lck_grp_attr = lck_grp_attr_alloc_init();
1504 lck_grp_attr_setstat(lctx_lck_grp_attr);
1505
1506 lctx_lck_grp = lck_grp_alloc_init("lctx", lctx_lck_grp_attr);
1507 /* Allocate lctx lock attribute */
1508 lctx_lck_attr = lck_attr_alloc_init();
1509
1510 lck_mtx_init(&alllctx_lock, lctx_lck_grp, lctx_lck_attr);
1511 }
1512
1513 /*
1514 * Locate login context by number.
1515 */
1516 struct lctx *
1517 lcfind(pid_t lcid)
1518 {
1519 struct lctx *l;
1520
1521 ALLLCTX_LOCK;
1522 LIST_FOREACH(l, &alllctx, lc_list) {
1523 if (l->lc_id == lcid) {
1524 LCTX_LOCK(l);
1525 break;
1526 }
1527 }
1528 ALLLCTX_UNLOCK;
1529 return (l);
1530 }
1531
1532 #define LCID_INC \
1533 do { \
1534 lastlcid++; \
1535 if (lastlcid > maxlcid) \
1536 lastlcid = 1; \
1537 } while (0) \
1538
1539 struct lctx *
1540 lccreate(void)
1541 {
1542 struct lctx *l;
1543 pid_t newlcid;
1544
1545 /* Not very efficient but this isn't a common operation. */
1546 while ((l = lcfind(lastlcid)) != NULL) {
1547 LCTX_UNLOCK(l);
1548 LCID_INC;
1549 }
1550 newlcid = lastlcid;
1551 LCID_INC;
1552
1553 MALLOC(l, struct lctx *, sizeof(struct lctx), M_LCTX, M_WAITOK|M_ZERO);
1554 l->lc_id = newlcid;
1555 LIST_INIT(&l->lc_members);
1556 lck_mtx_init(&l->lc_mtx, lctx_lck_grp, lctx_lck_attr);
1557 #if CONFIG_MACF
1558 l->lc_label = mac_lctx_label_alloc();
1559 #endif
1560 ALLLCTX_LOCK;
1561 LIST_INSERT_HEAD(&alllctx, l, lc_list);
1562 alllctx_cnt++;
1563 ALLLCTX_UNLOCK;
1564
1565 return (l);
1566 }
1567
1568 /*
1569 * Call with proc protected (either by being invisible
1570 * or by having the all-login-context lock held) and
1571 * the lctx locked.
1572 *
1573 * Will unlock lctx on return.
1574 */
1575 void
1576 enterlctx (proc_t p, struct lctx *l, __unused int create)
1577 {
1578 if (l == NULL)
1579 return;
1580
1581 p->p_lctx = l;
1582 LIST_INSERT_HEAD(&l->lc_members, p, p_lclist);
1583 l->lc_mc++;
1584
1585 #if CONFIG_MACF
1586 if (create)
1587 mac_lctx_notify_create(p, l);
1588 else
1589 mac_lctx_notify_join(p, l);
1590 #endif
1591 LCTX_UNLOCK(l);
1592
1593 return;
1594 }
1595
1596 /*
1597 * Remove process from login context (if any). Called with p protected by
1598 * the alllctx lock.
1599 */
1600 void
1601 leavelctx (proc_t p)
1602 {
1603 struct lctx *l;
1604
1605 if (p->p_lctx == NULL) {
1606 return;
1607 }
1608
1609 LCTX_LOCK(p->p_lctx);
1610 l = p->p_lctx;
1611 p->p_lctx = NULL;
1612 LIST_REMOVE(p, p_lclist);
1613 l->lc_mc--;
1614 #if CONFIG_MACF
1615 mac_lctx_notify_leave(p, l);
1616 #endif
1617 if (LIST_EMPTY(&l->lc_members)) {
1618 LIST_REMOVE(l, lc_list);
1619 alllctx_cnt--;
1620 LCTX_UNLOCK(l);
1621 lck_mtx_destroy(&l->lc_mtx, lctx_lck_grp);
1622 #if CONFIG_MACF
1623 mac_lctx_label_free(l->lc_label);
1624 l->lc_label = NULL;
1625 #endif
1626 FREE(l, M_LCTX);
1627 } else {
1628 LCTX_UNLOCK(l);
1629 }
1630 return;
1631 }
1632
1633 static int
1634 sysctl_kern_lctx SYSCTL_HANDLER_ARGS
1635 {
1636 int *name = (int*) arg1;
1637 u_int namelen = arg2;
1638 struct kinfo_lctx kil;
1639 struct lctx *l;
1640 int error;
1641
1642 error = 0;
1643
1644 switch (oidp->oid_number) {
1645 case KERN_LCTX_ALL:
1646 ALLLCTX_LOCK;
1647 /* Request for size. */
1648 if (!req->oldptr) {
1649 error = SYSCTL_OUT(req, 0,
1650 sizeof(struct kinfo_lctx) * (alllctx_cnt + 1));
1651 goto out;
1652 }
1653 break;
1654
1655 case KERN_LCTX_LCID:
1656 /* No space */
1657 if (req->oldlen < sizeof(struct kinfo_lctx))
1658 return (ENOMEM);
1659 /* No argument */
1660 if (namelen != 1)
1661 return (EINVAL);
1662 /* No login context */
1663 l = lcfind((pid_t)name[0]);
1664 if (l == NULL)
1665 return (ENOENT);
1666 kil.id = l->lc_id;
1667 kil.mc = l->lc_mc;
1668 LCTX_UNLOCK(l);
1669 return (SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil)));
1670
1671 default:
1672 return (EINVAL);
1673 }
1674
1675 /* Provided buffer is too small. */
1676 if (req->oldlen < (sizeof(struct kinfo_lctx) * alllctx_cnt)) {
1677 error = ENOMEM;
1678 goto out;
1679 }
1680
1681 LIST_FOREACH(l, &alllctx, lc_list) {
1682 LCTX_LOCK(l);
1683 kil.id = l->lc_id;
1684 kil.mc = l->lc_mc;
1685 LCTX_UNLOCK(l);
1686 error = SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil));
1687 if (error)
1688 break;
1689 }
1690 out:
1691 ALLLCTX_UNLOCK;
1692
1693 return (error);
1694 }
1695
1696 SYSCTL_NODE(_kern, KERN_LCTX, lctx, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Login Context");
1697
1698 SYSCTL_PROC(_kern_lctx, KERN_LCTX_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT | CTLFLAG_LOCKED,
1699 0, 0, sysctl_kern_lctx, "S,lctx",
1700 "Return entire login context table");
1701 SYSCTL_NODE(_kern_lctx, KERN_LCTX_LCID, lcid, CTLFLAG_RD | CTLFLAG_LOCKED,
1702 sysctl_kern_lctx, "Login Context Table");
1703 SYSCTL_INT(_kern_lctx, OID_AUTO, last, CTLFLAG_RD | CTLFLAG_LOCKED, &lastlcid, 0, "");
1704 SYSCTL_INT(_kern_lctx, OID_AUTO, count, CTLFLAG_RD | CTLFLAG_LOCKED, &alllctx_cnt, 0, "");
1705 SYSCTL_INT(_kern_lctx, OID_AUTO, max, CTLFLAG_RW | CTLFLAG_LOCKED, &maxlcid, 0, "");
1706
1707 #endif /* LCTX */
1708
1709 /* Code Signing related routines */
1710
1711 int
1712 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
1713 {
1714 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1715 uap->usersize, USER_ADDR_NULL));
1716 }
1717
1718 int
1719 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
1720 {
1721 if (uap->uaudittoken == USER_ADDR_NULL)
1722 return(EINVAL);
1723 switch (uap->ops) {
1724 case CS_OPS_PIDPATH:
1725 case CS_OPS_ENTITLEMENTS_BLOB:
1726 break;
1727 default:
1728 return(EINVAL);
1729 };
1730
1731 return(csops_internal(uap->pid, uap->ops, uap->useraddr,
1732 uap->usersize, uap->uaudittoken));
1733 }
1734
1735 static int
1736 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
1737 {
1738 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
1739 proc_t pt;
1740 uint32_t retflags;
1741 int vid, forself;
1742 int error;
1743 vnode_t tvp;
1744 off_t toff;
1745 char * buf;
1746 unsigned char cdhash[SHA1_RESULTLEN];
1747 audit_token_t token;
1748 unsigned int upid=0, uidversion = 0;
1749
1750 forself = error = 0;
1751
1752 if (pid == 0)
1753 pid = proc_selfpid();
1754 if (pid == proc_selfpid())
1755 forself = 1;
1756
1757
1758 /* Pre flight checks for CS_OPS_PIDPATH */
1759 if (ops == CS_OPS_PIDPATH) {
1760 /* usize is unsigned.. */
1761 if (usize > 4 * PATH_MAX)
1762 return(EOVERFLOW);
1763 if (kauth_cred_issuser(kauth_cred_get()) != TRUE)
1764 return(EPERM);
1765 } else {
1766 switch (ops) {
1767 case CS_OPS_STATUS:
1768 case CS_OPS_CDHASH:
1769 case CS_OPS_PIDOFFSET:
1770 case CS_OPS_ENTITLEMENTS_BLOB:
1771 break; /* unrestricted */
1772 default:
1773 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
1774 return(EPERM);
1775 break;
1776 }
1777 }
1778
1779 pt = proc_find(pid);
1780 if (pt == PROC_NULL)
1781 return(ESRCH);
1782
1783 upid = pt->p_pid;
1784 uidversion = pt->p_idversion;
1785 if (uaudittoken != USER_ADDR_NULL) {
1786
1787 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
1788 if (error != 0)
1789 goto out;
1790 /* verify the audit token pid/idversion matches with proc */
1791 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
1792 error = ESRCH;
1793 goto out;
1794 }
1795 }
1796
1797 switch (ops) {
1798
1799 case CS_OPS_STATUS:
1800 retflags = pt->p_csflags;
1801 if (uaddr != USER_ADDR_NULL)
1802 error = copyout(&retflags, uaddr, sizeof(uint32_t));
1803 break;
1804
1805 case CS_OPS_MARKINVALID:
1806 proc_lock(pt);
1807 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
1808 pt->p_csflags &= ~CS_VALID; /* set invalid */
1809 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
1810 pt->p_csflags |= CS_KILLED;
1811 proc_unlock(pt);
1812 if (cs_debug) {
1813 printf("CODE SIGNING: marked invalid by pid %d: "
1814 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
1815 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
1816 }
1817 psignal(pt, SIGKILL);
1818 } else
1819 proc_unlock(pt);
1820 } else
1821 proc_unlock(pt);
1822
1823 break;
1824
1825 case CS_OPS_MARKHARD:
1826 proc_lock(pt);
1827 pt->p_csflags |= CS_HARD;
1828 if ((pt->p_csflags & CS_VALID) == 0) {
1829 /* @@@ allow? reject? kill? @@@ */
1830 proc_unlock(pt);
1831 error = EINVAL;
1832 goto out;
1833 } else
1834 proc_unlock(pt);
1835 break;
1836
1837 case CS_OPS_MARKKILL:
1838 proc_lock(pt);
1839 pt->p_csflags |= CS_KILL;
1840 if ((pt->p_csflags & CS_VALID) == 0) {
1841 proc_unlock(pt);
1842 psignal(pt, SIGKILL);
1843 } else
1844 proc_unlock(pt);
1845 break;
1846
1847 case CS_OPS_PIDPATH:
1848 tvp = pt->p_textvp;
1849 vid = vnode_vid(tvp);
1850
1851 if (tvp == NULLVP) {
1852 proc_rele(pt);
1853 return(EINVAL);
1854 }
1855
1856 buf = (char *)kalloc(usize);
1857 if (buf == NULL) {
1858 proc_rele(pt);
1859 return(ENOMEM);
1860 }
1861 bzero(buf, usize);
1862
1863 error = vnode_getwithvid(tvp, vid);
1864 if (error == 0) {
1865 int len;
1866 len = usize;
1867 error = vn_getpath(tvp, buf, &len);
1868 vnode_put(tvp);
1869 if (error == 0) {
1870 error = copyout(buf, uaddr, usize);
1871 }
1872 kfree(buf, usize);
1873 }
1874
1875 proc_rele(pt);
1876
1877 return(error);
1878
1879 case CS_OPS_PIDOFFSET:
1880 toff = pt->p_textoff;
1881 proc_rele(pt);
1882 error = copyout(&toff, uaddr, sizeof(toff));
1883 return(error);
1884
1885 case CS_OPS_CDHASH:
1886
1887 /* pt already holds a reference on its p_textvp */
1888 tvp = pt->p_textvp;
1889 toff = pt->p_textoff;
1890
1891 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
1892 proc_rele(pt);
1893 return EINVAL;
1894 }
1895
1896 error = vn_getcdhash(tvp, toff, cdhash);
1897 proc_rele(pt);
1898
1899 if (error == 0) {
1900 error = copyout(cdhash, uaddr, sizeof (cdhash));
1901 }
1902
1903 return error;
1904
1905 case CS_OPS_ENTITLEMENTS_BLOB: {
1906 char fakeheader[8] = { 0 };
1907 void *start;
1908 size_t length;
1909
1910 if ((pt->p_csflags & CS_VALID) == 0) {
1911 error = EINVAL;
1912 break;
1913 }
1914 if (usize < sizeof(fakeheader)) {
1915 error = ERANGE;
1916 break;
1917 }
1918 if (0 != (error = cs_entitlements_blob_get(pt,
1919 &start, &length)))
1920 break;
1921 /* if no entitlement, fill in zero header */
1922 if (NULL == start) {
1923 start = fakeheader;
1924 length = sizeof(fakeheader);
1925 } else if (usize < length) {
1926 /* ... if input too short, copy out length of entitlement */
1927 uint32_t length32 = htonl((uint32_t)length);
1928 memcpy(&fakeheader[4], &length32, sizeof(length32));
1929
1930 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
1931 if (error == 0)
1932 error = ERANGE; /* input buffer to short, ERANGE signals that */
1933 break;
1934 }
1935 error = copyout(start, uaddr, length);
1936 break;
1937 }
1938
1939 case CS_OPS_MARKRESTRICT:
1940 proc_lock(pt);
1941 pt->p_csflags |= CS_RESTRICT;
1942 proc_unlock(pt);
1943 break;
1944
1945 default:
1946 error = EINVAL;
1947 break;
1948 }
1949 out:
1950 proc_rele(pt);
1951 return(error);
1952 }
1953
1954 int
1955 proc_iterate(flags, callout, arg, filterfn, filterarg)
1956 int flags;
1957 int (*callout)(proc_t, void *);
1958 void * arg;
1959 int (*filterfn)(proc_t, void *);
1960 void * filterarg;
1961 {
1962 proc_t p;
1963 pid_t * pid_list;
1964 int count, pidcount, alloc_count, i, retval;
1965
1966 count = nprocs+ 10;
1967 if (count > hard_maxproc)
1968 count = hard_maxproc;
1969 alloc_count = count * sizeof(pid_t);
1970 pid_list = (pid_t *)kalloc(alloc_count);
1971 bzero(pid_list, alloc_count);
1972
1973
1974 proc_list_lock();
1975
1976
1977 pidcount = 0;
1978 if (flags & PROC_ALLPROCLIST) {
1979 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
1980 if (p->p_stat == SIDL)
1981 continue;
1982 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
1983 pid_list[pidcount] = p->p_pid;
1984 pidcount++;
1985 if (pidcount >= count)
1986 break;
1987 }
1988 }
1989 }
1990 if ((pidcount < count ) && (flags & PROC_ZOMBPROCLIST)) {
1991 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
1992 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
1993 pid_list[pidcount] = p->p_pid;
1994 pidcount++;
1995 if (pidcount >= count)
1996 break;
1997 }
1998 }
1999 }
2000
2001
2002 proc_list_unlock();
2003
2004
2005 for (i = 0; i< pidcount; i++) {
2006 p = proc_find(pid_list[i]);
2007 if (p) {
2008 if ((flags & PROC_NOWAITTRANS) == 0)
2009 proc_transwait(p, 0);
2010 retval = callout(p, arg);
2011
2012 switch (retval) {
2013 case PROC_RETURNED:
2014 case PROC_RETURNED_DONE:
2015 proc_rele(p);
2016 if (retval == PROC_RETURNED_DONE) {
2017 goto out;
2018 }
2019 break;
2020
2021 case PROC_CLAIMED_DONE:
2022 goto out;
2023 case PROC_CLAIMED:
2024 default:
2025 break;
2026 }
2027 } else if (flags & PROC_ZOMBPROCLIST) {
2028 p = proc_find_zombref(pid_list[i]);
2029 if (p != PROC_NULL) {
2030 retval = callout(p, arg);
2031
2032 switch (retval) {
2033 case PROC_RETURNED:
2034 case PROC_RETURNED_DONE:
2035 proc_drop_zombref(p);
2036 if (retval == PROC_RETURNED_DONE) {
2037 goto out;
2038 }
2039 break;
2040
2041 case PROC_CLAIMED_DONE:
2042 goto out;
2043 case PROC_CLAIMED:
2044 default:
2045 break;
2046 }
2047 }
2048 }
2049 }
2050
2051 out:
2052 kfree(pid_list, alloc_count);
2053 return(0);
2054
2055 }
2056
2057
2058 #if 0
2059 /* This is for iteration in case of trivial non blocking callouts */
2060 int
2061 proc_scanall(flags, callout, arg)
2062 int flags;
2063 int (*callout)(proc_t, void *);
2064 void * arg;
2065 {
2066 proc_t p;
2067 int retval;
2068
2069
2070 proc_list_lock();
2071
2072
2073 if (flags & PROC_ALLPROCLIST) {
2074 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2075 retval = callout(p, arg);
2076 if (retval == PROC_RETURNED_DONE)
2077 goto out;
2078 }
2079 }
2080 if (flags & PROC_ZOMBPROCLIST) {
2081 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
2082 retval = callout(p, arg);
2083 if (retval == PROC_RETURNED_DONE)
2084 goto out;
2085 }
2086 }
2087 out:
2088
2089 proc_list_unlock();
2090
2091 return(0);
2092 }
2093 #endif
2094
2095
2096 int
2097 proc_rebootscan(callout, arg, filterfn, filterarg)
2098 int (*callout)(proc_t, void *);
2099 void * arg;
2100 int (*filterfn)(proc_t, void *);
2101 void * filterarg;
2102 {
2103 proc_t p;
2104 int lockheld = 0, retval;
2105
2106 proc_shutdown_exitcount = 0;
2107
2108 ps_allprocscan:
2109
2110 proc_list_lock();
2111
2112 lockheld = 1;
2113
2114 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
2115 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2116 p = proc_ref_locked(p);
2117
2118 proc_list_unlock();
2119 lockheld = 0;
2120
2121 if (p) {
2122 proc_transwait(p, 0);
2123 retval = callout(p, arg);
2124 proc_rele(p);
2125
2126 switch (retval) {
2127 case PROC_RETURNED_DONE:
2128 case PROC_CLAIMED_DONE:
2129 goto out;
2130 }
2131 }
2132 goto ps_allprocscan;
2133 } /* filter pass */
2134 } /* allproc walk thru */
2135
2136 if (lockheld == 1) {
2137 proc_list_unlock();
2138 lockheld = 0;
2139 }
2140
2141 out:
2142 return(0);
2143
2144 }
2145
2146
2147 int
2148 proc_childrenwalk(parent, callout, arg)
2149 struct proc * parent;
2150 int (*callout)(proc_t, void *);
2151 void * arg;
2152 {
2153 register struct proc *p;
2154 pid_t * pid_list;
2155 int count, pidcount, alloc_count, i, retval;
2156
2157 count = nprocs+ 10;
2158 if (count > hard_maxproc)
2159 count = hard_maxproc;
2160 alloc_count = count * sizeof(pid_t);
2161 pid_list = (pid_t *)kalloc(alloc_count);
2162 bzero(pid_list, alloc_count);
2163
2164
2165 proc_list_lock();
2166
2167
2168 pidcount = 0;
2169 for (p = parent->p_children.lh_first; (p != 0); p = p->p_sibling.le_next) {
2170 if (p->p_stat == SIDL)
2171 continue;
2172 pid_list[pidcount] = p->p_pid;
2173 pidcount++;
2174 if (pidcount >= count)
2175 break;
2176 }
2177 proc_list_unlock();
2178
2179
2180 for (i = 0; i< pidcount; i++) {
2181 p = proc_find(pid_list[i]);
2182 if (p) {
2183 proc_transwait(p, 0);
2184 retval = callout(p, arg);
2185
2186 switch (retval) {
2187 case PROC_RETURNED:
2188 case PROC_RETURNED_DONE:
2189 proc_rele(p);
2190 if (retval == PROC_RETURNED_DONE) {
2191 goto out;
2192 }
2193 break;
2194
2195 case PROC_CLAIMED_DONE:
2196 goto out;
2197 case PROC_CLAIMED:
2198 default:
2199 break;
2200 }
2201 }
2202 }
2203
2204 out:
2205 kfree(pid_list, alloc_count);
2206 return(0);
2207
2208 }
2209
2210 /*
2211 */
2212 /* PGRP_BLOCKITERATE is not implemented yet */
2213 int
2214 pgrp_iterate(pgrp, flags, callout, arg, filterfn, filterarg)
2215 struct pgrp *pgrp;
2216 int flags;
2217 int (*callout)(proc_t, void *);
2218 void * arg;
2219 int (*filterfn)(proc_t, void *);
2220 void * filterarg;
2221 {
2222 proc_t p;
2223 pid_t * pid_list;
2224 int count, pidcount, i, alloc_count;
2225 int retval;
2226 pid_t pgid;
2227 int dropref = flags & PGRP_DROPREF;
2228 #if 0
2229 int serialize = flags & PGRP_BLOCKITERATE;
2230 #else
2231 int serialize = 0;
2232 #endif
2233
2234 if (pgrp == 0)
2235 return(0);
2236 count = pgrp->pg_membercnt + 10;
2237 if (count > hard_maxproc)
2238 count = hard_maxproc;
2239 alloc_count = count * sizeof(pid_t);
2240 pid_list = (pid_t *)kalloc(alloc_count);
2241 bzero(pid_list, alloc_count);
2242
2243 pgrp_lock(pgrp);
2244 if (serialize != 0) {
2245 while ((pgrp->pg_listflags & PGRP_FLAG_ITERABEGIN) == PGRP_FLAG_ITERABEGIN) {
2246 pgrp->pg_listflags |= PGRP_FLAG_ITERWAIT;
2247 msleep(&pgrp->pg_listflags, &pgrp->pg_mlock, 0, "pgrp_iterate", 0);
2248 }
2249 pgrp->pg_listflags |= PGRP_FLAG_ITERABEGIN;
2250 }
2251
2252 pgid = pgrp->pg_id;
2253
2254 pidcount = 0;
2255 for (p = pgrp->pg_members.lh_first; p != 0;
2256 p = p->p_pglist.le_next) {
2257 if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
2258 pid_list[pidcount] = p->p_pid;
2259 pidcount++;
2260 if (pidcount >= count)
2261 break;
2262 }
2263 }
2264
2265
2266 pgrp_unlock(pgrp);
2267 if ((serialize == 0) && (dropref != 0))
2268 pg_rele(pgrp);
2269
2270
2271 for (i = 0; i< pidcount; i++) {
2272 /* No handling or proc0 */
2273 if (pid_list[i] == 0)
2274 continue;
2275 p = proc_find(pid_list[i]);
2276 if (p) {
2277 if (p->p_pgrpid != pgid) {
2278 proc_rele(p);
2279 continue;
2280 }
2281 proc_transwait(p, 0);
2282 retval = callout(p, arg);
2283
2284 switch (retval) {
2285 case PROC_RETURNED:
2286 case PROC_RETURNED_DONE:
2287 proc_rele(p);
2288 if (retval == PROC_RETURNED_DONE) {
2289 goto out;
2290 }
2291 break;
2292
2293 case PROC_CLAIMED_DONE:
2294 goto out;
2295 case PROC_CLAIMED:
2296 default:
2297 break;
2298 }
2299 }
2300 }
2301 out:
2302 if (serialize != 0) {
2303 pgrp_lock(pgrp);
2304 pgrp->pg_listflags &= ~PGRP_FLAG_ITERABEGIN;
2305 if ((pgrp->pg_listflags & PGRP_FLAG_ITERWAIT) == PGRP_FLAG_ITERWAIT) {
2306 pgrp->pg_listflags &= ~PGRP_FLAG_ITERWAIT;
2307 wakeup(&pgrp->pg_listflags);
2308 }
2309 pgrp_unlock(pgrp);
2310 if (dropref != 0)
2311 pg_rele(pgrp);
2312 }
2313 kfree(pid_list, alloc_count);
2314 return(0);
2315 }
2316
2317 static void
2318 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2319 {
2320 proc_list_lock();
2321 child->p_pgrp = pgrp;
2322 child->p_pgrpid = pgrp->pg_id;
2323 child->p_listflag |= P_LIST_INPGRP;
2324 /*
2325 * When pgrp is being freed , a process can still
2326 * request addition using setpgid from bash when
2327 * login is terminated (login cycler) return ESRCH
2328 * Safe to hold lock due to refcount on pgrp
2329 */
2330 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2331 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2332 }
2333
2334 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2335 panic("pgrp_add : pgrp is dead adding process");
2336 proc_list_unlock();
2337
2338 pgrp_lock(pgrp);
2339 pgrp->pg_membercnt++;
2340 if ( parent != PROC_NULL) {
2341 LIST_INSERT_AFTER(parent, child, p_pglist);
2342 }else {
2343 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2344 }
2345 pgrp_unlock(pgrp);
2346
2347 proc_list_lock();
2348 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2349 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2350 }
2351 proc_list_unlock();
2352 }
2353
2354 static void
2355 pgrp_remove(struct proc * p)
2356 {
2357 struct pgrp * pg;
2358
2359 pg = proc_pgrp(p);
2360
2361 proc_list_lock();
2362 #if __PROC_INTERNAL_DEBUG
2363 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2364 panic("removing from pglist but no named ref\n");
2365 #endif
2366 p->p_pgrpid = PGRPID_DEAD;
2367 p->p_listflag &= ~P_LIST_INPGRP;
2368 p->p_pgrp = NULL;
2369 proc_list_unlock();
2370
2371 if (pg == PGRP_NULL)
2372 panic("pgrp_remove: pg is NULL");
2373 pgrp_lock(pg);
2374 pg->pg_membercnt--;
2375
2376 if (pg->pg_membercnt < 0)
2377 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
2378
2379 LIST_REMOVE(p, p_pglist);
2380 if (pg->pg_members.lh_first == 0) {
2381 pgrp_unlock(pg);
2382 pgdelete_dropref(pg);
2383 } else {
2384 pgrp_unlock(pg);
2385 pg_rele(pg);
2386 }
2387 }
2388
2389
2390 /* cannot use proc_pgrp as it maybe stalled */
2391 static void
2392 pgrp_replace(struct proc * p, struct pgrp * newpg)
2393 {
2394 struct pgrp * oldpg;
2395
2396
2397
2398 proc_list_lock();
2399
2400 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2401 p->p_listflag |= P_LIST_PGRPTRWAIT;
2402 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2403 }
2404
2405 p->p_listflag |= P_LIST_PGRPTRANS;
2406
2407 oldpg = p->p_pgrp;
2408 if (oldpg == PGRP_NULL)
2409 panic("pgrp_replace: oldpg NULL");
2410 oldpg->pg_refcount++;
2411 #if __PROC_INTERNAL_DEBUG
2412 if ((p->p_listflag & P_LIST_INPGRP) == 0)
2413 panic("removing from pglist but no named ref\n");
2414 #endif
2415 p->p_pgrpid = PGRPID_DEAD;
2416 p->p_listflag &= ~P_LIST_INPGRP;
2417 p->p_pgrp = NULL;
2418
2419 proc_list_unlock();
2420
2421 pgrp_lock(oldpg);
2422 oldpg->pg_membercnt--;
2423 if (oldpg->pg_membercnt < 0)
2424 panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
2425 LIST_REMOVE(p, p_pglist);
2426 if (oldpg->pg_members.lh_first == 0) {
2427 pgrp_unlock(oldpg);
2428 pgdelete_dropref(oldpg);
2429 } else {
2430 pgrp_unlock(oldpg);
2431 pg_rele(oldpg);
2432 }
2433
2434 proc_list_lock();
2435 p->p_pgrp = newpg;
2436 p->p_pgrpid = newpg->pg_id;
2437 p->p_listflag |= P_LIST_INPGRP;
2438 /*
2439 * When pgrp is being freed , a process can still
2440 * request addition using setpgid from bash when
2441 * login is terminated (login cycler) return ESRCH
2442 * Safe to hold lock due to refcount on pgrp
2443 */
2444 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2445 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2446 }
2447
2448 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
2449 panic("pgrp_add : pgrp is dead adding process");
2450 proc_list_unlock();
2451
2452 pgrp_lock(newpg);
2453 newpg->pg_membercnt++;
2454 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2455 pgrp_unlock(newpg);
2456
2457 proc_list_lock();
2458 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2459 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2460 }
2461
2462 p->p_listflag &= ~P_LIST_PGRPTRANS;
2463 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2464 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2465 wakeup(&p->p_pgrpid);
2466
2467 }
2468 proc_list_unlock();
2469 }
2470
2471 void
2472 pgrp_lock(struct pgrp * pgrp)
2473 {
2474 lck_mtx_lock(&pgrp->pg_mlock);
2475 }
2476
2477 void
2478 pgrp_unlock(struct pgrp * pgrp)
2479 {
2480 lck_mtx_unlock(&pgrp->pg_mlock);
2481 }
2482
2483 void
2484 session_lock(struct session * sess)
2485 {
2486 lck_mtx_lock(&sess->s_mlock);
2487 }
2488
2489
2490 void
2491 session_unlock(struct session * sess)
2492 {
2493 lck_mtx_unlock(&sess->s_mlock);
2494 }
2495
2496 struct pgrp *
2497 proc_pgrp(proc_t p)
2498 {
2499 struct pgrp * pgrp;
2500
2501 if (p == PROC_NULL)
2502 return(PGRP_NULL);
2503 proc_list_lock();
2504
2505 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2506 p->p_listflag |= P_LIST_PGRPTRWAIT;
2507 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2508 }
2509
2510 pgrp = p->p_pgrp;
2511
2512 assert(pgrp != NULL);
2513
2514 if (pgrp != PGRP_NULL) {
2515 pgrp->pg_refcount++;
2516 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
2517 panic("proc_pgrp: ref being povided for dead pgrp");
2518 }
2519
2520 proc_list_unlock();
2521
2522 return(pgrp);
2523 }
2524
2525 struct pgrp *
2526 tty_pgrp(struct tty * tp)
2527 {
2528 struct pgrp * pg = PGRP_NULL;
2529
2530 proc_list_lock();
2531 pg = tp->t_pgrp;
2532
2533 if (pg != PGRP_NULL) {
2534 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
2535 panic("tty_pgrp: ref being povided for dead pgrp");
2536 pg->pg_refcount++;
2537 }
2538 proc_list_unlock();
2539
2540 return(pg);
2541 }
2542
2543 struct session *
2544 proc_session(proc_t p)
2545 {
2546 struct session * sess = SESSION_NULL;
2547
2548 if (p == PROC_NULL)
2549 return(SESSION_NULL);
2550
2551 proc_list_lock();
2552
2553 /* wait during transitions */
2554 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2555 p->p_listflag |= P_LIST_PGRPTRWAIT;
2556 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2557 }
2558
2559 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
2560 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2561 panic("proc_session:returning sesssion ref on terminating session");
2562 sess->s_count++;
2563 }
2564 proc_list_unlock();
2565 return(sess);
2566 }
2567
2568 void
2569 session_rele(struct session *sess)
2570 {
2571 proc_list_lock();
2572 if (--sess->s_count == 0) {
2573 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
2574 panic("session_rele: terminating already terminated session");
2575 sess->s_listflags |= S_LIST_TERM;
2576 LIST_REMOVE(sess, s_hash);
2577 sess->s_listflags |= S_LIST_DEAD;
2578 if (sess->s_count != 0)
2579 panic("session_rele: freeing session in use");
2580 proc_list_unlock();
2581 #if CONFIG_FINE_LOCK_GROUPS
2582 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
2583 #else
2584 lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
2585 #endif
2586 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
2587 } else
2588 proc_list_unlock();
2589 }
2590
2591 int
2592 proc_transstart(proc_t p, int locked)
2593 {
2594 if (locked == 0)
2595 proc_lock(p);
2596 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2597 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) {
2598 if (locked == 0)
2599 proc_unlock(p);
2600 return EDEADLK;
2601 }
2602 p->p_lflag |= P_LTRANSWAIT;
2603 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2604 }
2605 p->p_lflag |= P_LINTRANSIT;
2606 p->p_transholder = current_thread();
2607 if (locked == 0)
2608 proc_unlock(p);
2609 return 0;
2610 }
2611
2612 void
2613 proc_transcommit(proc_t p, int locked)
2614 {
2615 if (locked == 0)
2616 proc_lock(p);
2617
2618 assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
2619 assert (p->p_transholder == current_thread());
2620 p->p_lflag |= P_LTRANSCOMMIT;
2621
2622 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2623 p->p_lflag &= ~P_LTRANSWAIT;
2624 wakeup(&p->p_lflag);
2625 }
2626 if (locked == 0)
2627 proc_unlock(p);
2628 }
2629
2630 void
2631 proc_transend(proc_t p, int locked)
2632 {
2633 if (locked == 0)
2634 proc_lock(p);
2635
2636 p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
2637 p->p_transholder = NULL;
2638
2639 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
2640 p->p_lflag &= ~P_LTRANSWAIT;
2641 wakeup(&p->p_lflag);
2642 }
2643 if (locked == 0)
2644 proc_unlock(p);
2645 }
2646
2647 int
2648 proc_transwait(proc_t p, int locked)
2649 {
2650 if (locked == 0)
2651 proc_lock(p);
2652 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
2653 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
2654 if (locked == 0)
2655 proc_unlock(p);
2656 return EDEADLK;
2657 }
2658 p->p_lflag |= P_LTRANSWAIT;
2659 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
2660 }
2661 if (locked == 0)
2662 proc_unlock(p);
2663 return 0;
2664 }
2665
2666 void
2667 proc_klist_lock(void)
2668 {
2669 lck_mtx_lock(proc_klist_mlock);
2670 }
2671
2672 void
2673 proc_klist_unlock(void)
2674 {
2675 lck_mtx_unlock(proc_klist_mlock);
2676 }
2677
2678 void
2679 proc_knote(struct proc * p, long hint)
2680 {
2681 proc_klist_lock();
2682 KNOTE(&p->p_klist, hint);
2683 proc_klist_unlock();
2684 }
2685
2686 void
2687 proc_knote_drain(struct proc *p)
2688 {
2689 struct knote *kn = NULL;
2690
2691 /*
2692 * Clear the proc's klist to avoid references after the proc is reaped.
2693 */
2694 proc_klist_lock();
2695 while ((kn = SLIST_FIRST(&p->p_klist))) {
2696 kn->kn_ptr.p_proc = PROC_NULL;
2697 KNOTE_DETACH(&p->p_klist, kn);
2698 }
2699 proc_klist_unlock();
2700 }
2701
2702 unsigned long cs_procs_killed = 0;
2703 unsigned long cs_procs_invalidated = 0;
2704 int cs_force_kill = 0;
2705 int cs_force_hard = 0;
2706 int cs_debug = 0;
2707 SYSCTL_INT(_vm, OID_AUTO, cs_force_kill, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_force_kill, 0, "");
2708 SYSCTL_INT(_vm, OID_AUTO, cs_force_hard, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_force_hard, 0, "");
2709 SYSCTL_INT(_vm, OID_AUTO, cs_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_debug, 0, "");
2710
2711 int
2712 cs_allow_invalid(struct proc *p)
2713 {
2714 #if MACH_ASSERT
2715 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED);
2716 #endif
2717 #if CONFIG_MACF && CONFIG_ENFORCE_SIGNED_CODE
2718 /* There needs to be a MAC policy to implement this hook, or else the
2719 * kill bits will be cleared here every time. If we have
2720 * CONFIG_ENFORCE_SIGNED_CODE, we can assume there is a policy
2721 * implementing the hook.
2722 */
2723 if( 0 != mac_proc_check_run_cs_invalid(p)) {
2724 if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() "
2725 "not allowed: pid %d\n",
2726 p->p_pid);
2727 return 0;
2728 }
2729 if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() "
2730 "allowed: pid %d\n",
2731 p->p_pid);
2732 proc_lock(p);
2733 p->p_csflags &= ~(CS_KILL | CS_HARD | CS_VALID);
2734 proc_unlock(p);
2735 vm_map_switch_protect(get_task_map(p->task), FALSE);
2736 #endif
2737 return (p->p_csflags & (CS_KILL | CS_HARD)) == 0;
2738 }
2739
2740 int
2741 cs_invalid_page(
2742 addr64_t vaddr)
2743 {
2744 struct proc *p;
2745 int retval;
2746
2747 p = current_proc();
2748
2749 /*
2750 * XXX revisit locking when proc is no longer protected
2751 * by the kernel funnel...
2752 */
2753
2754 /* XXX for testing */
2755 proc_lock(p);
2756 if (cs_force_kill)
2757 p->p_csflags |= CS_KILL;
2758 if (cs_force_hard)
2759 p->p_csflags |= CS_HARD;
2760
2761 /* CS_KILL triggers us to send a kill signal. Nothing else. */
2762 if (p->p_csflags & CS_KILL) {
2763 p->p_csflags |= CS_KILLED;
2764 proc_unlock(p);
2765 if (cs_debug) {
2766 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2767 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2768 vaddr, p->p_pid, p->p_comm, p->p_csflags);
2769 }
2770 cs_procs_killed++;
2771 psignal(p, SIGKILL);
2772 proc_lock(p);
2773 }
2774
2775 /* CS_HARD means fail the mapping operation so the process stays valid. */
2776 if (p->p_csflags & CS_HARD) {
2777 proc_unlock(p);
2778 if (cs_debug) {
2779 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2780 "p=%d[%s] honoring CS_HARD\n",
2781 vaddr, p->p_pid, p->p_comm);
2782 }
2783 retval = 1;
2784 } else {
2785 if (p->p_csflags & CS_VALID) {
2786 p->p_csflags &= ~CS_VALID;
2787
2788 proc_unlock(p);
2789 cs_procs_invalidated++;
2790 printf("CODE SIGNING: cs_invalid_page(0x%llx): "
2791 "p=%d[%s] clearing CS_VALID\n",
2792 vaddr, p->p_pid, p->p_comm);
2793 } else {
2794 proc_unlock(p);
2795 }
2796
2797 retval = 0;
2798 }
2799
2800 return retval;
2801 }
2802
2803 void
2804 proc_setregister(proc_t p)
2805 {
2806 proc_lock(p);
2807 p->p_lflag |= P_LREGISTER;
2808 proc_unlock(p);
2809 }
2810
2811 void
2812 proc_resetregister(proc_t p)
2813 {
2814 proc_lock(p);
2815 p->p_lflag &= ~P_LREGISTER;
2816 proc_unlock(p);
2817 }
2818
2819 pid_t
2820 proc_pgrpid(proc_t p)
2821 {
2822 return p->p_pgrpid;
2823 }
2824
2825 pid_t
2826 proc_selfpgrpid()
2827 {
2828 return current_proc()->p_pgrpid;
2829 }
2830
2831
2832 /* return control and action states */
2833 int
2834 proc_getpcontrol(int pid, int * pcontrolp)
2835 {
2836 proc_t p;
2837
2838 p = proc_find(pid);
2839 if (p == PROC_NULL)
2840 return(ESRCH);
2841 if (pcontrolp != NULL)
2842 *pcontrolp = p->p_pcaction;
2843
2844 proc_rele(p);
2845 return(0);
2846 }
2847
2848 int
2849 proc_dopcontrol(proc_t p, void *num_found)
2850 {
2851 int pcontrol;
2852
2853 proc_lock(p);
2854
2855 pcontrol = PROC_CONTROL_STATE(p);
2856
2857 if (PROC_ACTION_STATE(p) ==0) {
2858 switch(pcontrol) {
2859 case P_PCTHROTTLE:
2860 PROC_SETACTION_STATE(p);
2861 proc_unlock(p);
2862 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
2863 (*(int *)num_found)++;
2864 break;
2865
2866 case P_PCSUSP:
2867 PROC_SETACTION_STATE(p);
2868 proc_unlock(p);
2869 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
2870 task_suspend(p->task);
2871 (*(int *)num_found)++;
2872 break;
2873
2874 case P_PCKILL:
2875 PROC_SETACTION_STATE(p);
2876 proc_unlock(p);
2877 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
2878 psignal(p, SIGKILL);
2879 (*(int *)num_found)++;
2880 break;
2881
2882 default:
2883 proc_unlock(p);
2884 }
2885
2886 } else
2887 proc_unlock(p);
2888
2889 return(PROC_RETURNED);
2890 }
2891
2892
2893 /*
2894 * Resume a throttled or suspended process. This is an internal interface that's only
2895 * used by the user level code that presents the GUI when we run out of swap space and
2896 * hence is restricted to processes with superuser privileges.
2897 */
2898
2899 int
2900 proc_resetpcontrol(int pid)
2901 {
2902 proc_t p;
2903 int pcontrol;
2904 int error;
2905 proc_t self = current_proc();
2906
2907 /* if the process has been validated to handle resource control or root is valid one */
2908 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
2909 return error;
2910
2911 p = proc_find(pid);
2912 if (p == PROC_NULL)
2913 return(ESRCH);
2914
2915 proc_lock(p);
2916
2917 pcontrol = PROC_CONTROL_STATE(p);
2918
2919 if(PROC_ACTION_STATE(p) !=0) {
2920 switch(pcontrol) {
2921 case P_PCTHROTTLE:
2922 PROC_RESETACTION_STATE(p);
2923 proc_unlock(p);
2924 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
2925 break;
2926
2927 case P_PCSUSP:
2928 PROC_RESETACTION_STATE(p);
2929 proc_unlock(p);
2930 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
2931 task_resume(p->task);
2932 break;
2933
2934 case P_PCKILL:
2935 /* Huh? */
2936 PROC_SETACTION_STATE(p);
2937 proc_unlock(p);
2938 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
2939 break;
2940
2941 default:
2942 proc_unlock(p);
2943 }
2944
2945 } else
2946 proc_unlock(p);
2947
2948 proc_rele(p);
2949 return(0);
2950 }
2951
2952
2953 /*
2954 * Return true if the specified process has an action state specified for it and it isn't
2955 * already in an action state and it's using more physical memory than the specified threshold.
2956 * Note: the memory_threshold argument is specified in bytes and is of type uint64_t.
2957 */
2958
2959 static int
2960 proc_pcontrol_filter(proc_t p, void *memory_thresholdp)
2961 {
2962
2963 return PROC_CONTROL_STATE(p) && /* if there's an action state specified... */
2964 (PROC_ACTION_STATE(p) == 0) && /* and we're not in the action state yet... */
2965 (get_task_resident_size(p->task) > *((uint64_t *)memory_thresholdp)); /* and this proc is over the mem threshold, */
2966 /* then return true to take action on this proc */
2967 }
2968
2969
2970
2971 /*
2972 * Deal with the out of swap space condition. This routine gets called when
2973 * we want to swap something out but there's no more space left. Since this
2974 * creates a memory deadlock situtation, we need to take action to free up
2975 * some memory resources in order to prevent the system from hanging completely.
2976 * The action we take is based on what the system processes running at user level
2977 * have specified. Processes are marked in one of four categories: ones that
2978 * can be killed immediately, ones that should be suspended, ones that should
2979 * be throttled, and all the rest which are basically none of the above. Which
2980 * processes are marked as being in which category is a user level policy decision;
2981 * we just take action based on those decisions here.
2982 */
2983
2984 #define STARTING_PERCENTAGE 50 /* memory threshold expressed as a percentage */
2985 /* of physical memory */
2986
2987 struct timeval last_no_space_action = {0, 0};
2988
2989 void
2990 no_paging_space_action(void)
2991 {
2992
2993 uint64_t memory_threshold;
2994 int num_found;
2995 struct timeval now;
2996
2997 /*
2998 * Throttle how often we come through here. Once every 20 seconds should be plenty.
2999 */
3000
3001 microtime(&now);
3002
3003 if (now.tv_sec <= last_no_space_action.tv_sec + 20)
3004 return;
3005
3006 last_no_space_action = now;
3007
3008 /*
3009 * Examine all processes and find those that have been marked to have some action
3010 * taken when swap space runs out. Of those processes, select one or more and
3011 * apply the specified action to them. The idea is to only take action against
3012 * a few processes rather than hitting too many at once. If the low swap condition
3013 * persists, this routine will get called again and we'll take action against more
3014 * processes.
3015 *
3016 * Of the processes that have been marked, we choose which ones to take action
3017 * against according to how much physical memory they're presently using. We
3018 * start with the STARTING_THRESHOLD and any processes using more physical memory
3019 * than the percentage threshold will have action taken against it. If there
3020 * are no processes over the threshold, then the threshold is cut in half and we
3021 * look again for processes using more than this threshold. We continue in
3022 * this fashion until we find at least one process to take action against. This
3023 * iterative approach is less than ideally efficient, however we only get here
3024 * when the system is almost in a memory deadlock and is pretty much just
3025 * thrashing if it's doing anything at all. Therefore, the cpu overhead of
3026 * potentially multiple passes here probably isn't revelant.
3027 */
3028
3029 memory_threshold = (sane_size * STARTING_PERCENTAGE) / 100; /* resident threshold in bytes */
3030
3031 for (num_found = 0; num_found == 0; memory_threshold = memory_threshold / 2) {
3032 proc_iterate(PROC_ALLPROCLIST, proc_dopcontrol, (void *)&num_found, proc_pcontrol_filter, (void *)&memory_threshold);
3033
3034 /*
3035 * If we just looked with memory_threshold == 0, then there's no need to iterate any further since
3036 * we won't find any eligible processes at this point.
3037 */
3038
3039 if (memory_threshold == 0) {
3040 if (num_found == 0) /* log that we couldn't do anything in this case */
3041 printf("low swap: unable to find any eligible processes to take action on\n");
3042
3043 break;
3044 }
3045 }
3046 }