]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
c5ea090ce7b1914c26a94ec0b08870610ff28bf9
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114 #include <sys/sysent.h>
115 #include <sys/reason.h>
116 #include <IOKit/IOBSD.h> /* IOTaskHasEntitlement() */
117
118 #ifdef CONFIG_32BIT_TELEMETRY
119 #include <sys/kasl.h>
120 #endif /* CONFIG_32BIT_TELEMETRY */
121
122 #if CONFIG_CSR
123 #include <sys/csr.h>
124 #endif
125
126 #if CONFIG_MEMORYSTATUS
127 #include <sys/kern_memorystatus.h>
128 #endif
129
130 #if CONFIG_MACF
131 #include <security/mac_framework.h>
132 #endif
133
134 #include <libkern/crypto/sha1.h>
135
136 #ifdef CONFIG_32BIT_TELEMETRY
137 #define MAX_32BIT_EXEC_SIG_SIZE 160
138 #endif /* CONFIG_32BIT_TELEMETRY */
139
140 /*
141 * Structure associated with user cacheing.
142 */
143 struct uidinfo {
144 LIST_ENTRY(uidinfo) ui_hash;
145 uid_t ui_uid;
146 long ui_proccnt;
147 };
148 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
149 LIST_HEAD(uihashhead, uidinfo) * uihashtbl;
150 u_long uihash; /* size of hash table - 1 */
151
152 /*
153 * Other process lists
154 */
155 struct pidhashhead *pidhashtbl;
156 u_long pidhash;
157 struct pgrphashhead *pgrphashtbl;
158 u_long pgrphash;
159 struct sesshashhead *sesshashtbl;
160 u_long sesshash;
161
162 struct proclist allproc;
163 struct proclist zombproc;
164 extern struct tty cons;
165
166 extern int cs_debug;
167
168 #if DEVELOPMENT || DEBUG
169 int syscallfilter_disable = 0;
170 #endif // DEVELOPMENT || DEBUG
171
172 #if DEBUG
173 #define __PROC_INTERNAL_DEBUG 1
174 #endif
175 #if CONFIG_COREDUMP
176 /* Name to give to core files */
177 #if defined(XNU_TARGET_OS_BRIDGE)
178 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/internal/%N.core"};
179 #elif CONFIG_EMBEDDED
180 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/cores/%N.core"};
181 #else
182 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/cores/core.%P"};
183 #endif
184 #endif
185
186 #if PROC_REF_DEBUG
187 #include <kern/backtrace.h>
188 #endif
189
190 typedef uint64_t unaligned_u64 __attribute__((aligned(1)));
191
192 static void orphanpg(struct pgrp * pg);
193 void proc_name_kdp(task_t t, char * buf, int size);
194 boolean_t proc_binary_uuid_kdp(task_t task, uuid_t uuid);
195 int proc_threadname_kdp(void * uth, char * buf, size_t size);
196 void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime);
197 char * proc_name_address(void * p);
198
199 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
200 static void pgrp_remove(proc_t p);
201 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
202 static void pgdelete_dropref(struct pgrp *pgrp);
203 extern void pg_rele_dropref(struct pgrp * pgrp);
204 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
205 static boolean_t proc_parent_is_currentproc(proc_t p);
206
207 struct fixjob_iterargs {
208 struct pgrp * pg;
209 struct session * mysession;
210 int entering;
211 };
212
213 int fixjob_callback(proc_t, void *);
214
215 uint64_t
216 get_current_unique_pid(void)
217 {
218 proc_t p = current_proc();
219
220 if (p) {
221 return p->p_uniqueid;
222 } else {
223 return 0;
224 }
225 }
226
227 /*
228 * Initialize global process hashing structures.
229 */
230 void
231 procinit(void)
232 {
233 LIST_INIT(&allproc);
234 LIST_INIT(&zombproc);
235 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
236 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
237 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
238 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
239 #if CONFIG_PERSONAS
240 personas_bootstrap();
241 #endif
242 }
243
244 /*
245 * Change the count associated with number of processes
246 * a given user is using. This routine protects the uihash
247 * with the list lock
248 */
249 int
250 chgproccnt(uid_t uid, int diff)
251 {
252 struct uidinfo *uip;
253 struct uidinfo *newuip = NULL;
254 struct uihashhead *uipp;
255 int retval;
256
257 again:
258 proc_list_lock();
259 uipp = UIHASH(uid);
260 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) {
261 if (uip->ui_uid == uid) {
262 break;
263 }
264 }
265 if (uip) {
266 uip->ui_proccnt += diff;
267 if (uip->ui_proccnt > 0) {
268 retval = uip->ui_proccnt;
269 proc_list_unlock();
270 goto out;
271 }
272 if (uip->ui_proccnt < 0) {
273 panic("chgproccnt: procs < 0");
274 }
275 LIST_REMOVE(uip, ui_hash);
276 retval = 0;
277 proc_list_unlock();
278 FREE_ZONE(uip, sizeof(*uip), M_PROC);
279 goto out;
280 }
281 if (diff <= 0) {
282 if (diff == 0) {
283 retval = 0;
284 proc_list_unlock();
285 goto out;
286 }
287 panic("chgproccnt: lost user");
288 }
289 if (newuip != NULL) {
290 uip = newuip;
291 newuip = NULL;
292 LIST_INSERT_HEAD(uipp, uip, ui_hash);
293 uip->ui_uid = uid;
294 uip->ui_proccnt = diff;
295 retval = diff;
296 proc_list_unlock();
297 goto out;
298 }
299 proc_list_unlock();
300 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
301 if (newuip == NULL) {
302 panic("chgproccnt: M_PROC zone depleted");
303 }
304 goto again;
305 out:
306 if (newuip != NULL) {
307 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
308 }
309 return retval;
310 }
311
312 /*
313 * Is p an inferior of the current process?
314 */
315 int
316 inferior(proc_t p)
317 {
318 int retval = 0;
319
320 proc_list_lock();
321 for (; p != current_proc(); p = p->p_pptr) {
322 if (p->p_pid == 0) {
323 goto out;
324 }
325 }
326 retval = 1;
327 out:
328 proc_list_unlock();
329 return retval;
330 }
331
332 /*
333 * Is p an inferior of t ?
334 */
335 int
336 isinferior(proc_t p, proc_t t)
337 {
338 int retval = 0;
339 int nchecked = 0;
340 proc_t start = p;
341
342 /* if p==t they are not inferior */
343 if (p == t) {
344 return 0;
345 }
346
347 proc_list_lock();
348 for (; p != t; p = p->p_pptr) {
349 nchecked++;
350
351 /* Detect here if we're in a cycle */
352 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs)) {
353 goto out;
354 }
355 }
356 retval = 1;
357 out:
358 proc_list_unlock();
359 return retval;
360 }
361
362 int
363 proc_isinferior(int pid1, int pid2)
364 {
365 proc_t p = PROC_NULL;
366 proc_t t = PROC_NULL;
367 int retval = 0;
368
369 if (((p = proc_find(pid1)) != (proc_t)0) && ((t = proc_find(pid2)) != (proc_t)0)) {
370 retval = isinferior(p, t);
371 }
372
373 if (p != PROC_NULL) {
374 proc_rele(p);
375 }
376 if (t != PROC_NULL) {
377 proc_rele(t);
378 }
379
380 return retval;
381 }
382
383 proc_t
384 proc_find(int pid)
385 {
386 return proc_findinternal(pid, 0);
387 }
388
389 proc_t
390 proc_findinternal(int pid, int locked)
391 {
392 proc_t p = PROC_NULL;
393
394 if (locked == 0) {
395 proc_list_lock();
396 }
397
398 p = pfind_locked(pid);
399 if ((p == PROC_NULL) || (p != proc_ref_locked(p))) {
400 p = PROC_NULL;
401 }
402
403 if (locked == 0) {
404 proc_list_unlock();
405 }
406
407 return p;
408 }
409
410 proc_t
411 proc_findthread(thread_t thread)
412 {
413 proc_t p = PROC_NULL;
414 struct uthread *uth;
415
416 proc_list_lock();
417 uth = get_bsdthread_info(thread);
418 if (uth && (uth->uu_flag & UT_VFORK)) {
419 p = uth->uu_proc;
420 } else {
421 p = (proc_t)(get_bsdthreadtask_info(thread));
422 }
423 p = proc_ref_locked(p);
424 proc_list_unlock();
425 return p;
426 }
427
428 void
429 uthread_reset_proc_refcount(void *uthread)
430 {
431 uthread_t uth;
432
433 uth = (uthread_t) uthread;
434 uth->uu_proc_refcount = 0;
435
436 #if PROC_REF_DEBUG
437 if (proc_ref_tracking_disabled) {
438 return;
439 }
440
441 uth->uu_pindex = 0;
442 #endif
443 }
444
445 #if PROC_REF_DEBUG
446 int
447 uthread_get_proc_refcount(void *uthread)
448 {
449 uthread_t uth;
450
451 if (proc_ref_tracking_disabled) {
452 return 0;
453 }
454
455 uth = (uthread_t) uthread;
456
457 return uth->uu_proc_refcount;
458 }
459 #endif
460
461 static void
462 record_procref(proc_t p __unused, int count)
463 {
464 uthread_t uth;
465
466 uth = current_uthread();
467 uth->uu_proc_refcount += count;
468
469 #if PROC_REF_DEBUG
470 if (proc_ref_tracking_disabled) {
471 return;
472 }
473
474 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
475 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex],
476 PROC_REF_STACK_DEPTH, NULL);
477
478 uth->uu_proc_ps[uth->uu_pindex] = p;
479 uth->uu_pindex++;
480 }
481 #endif
482 }
483
484 static boolean_t
485 uthread_needs_to_wait_in_proc_refwait(void)
486 {
487 uthread_t uth = current_uthread();
488
489 /*
490 * Allow threads holding no proc refs to wait
491 * in proc_refwait, allowing threads holding
492 * proc refs to wait in proc_refwait causes
493 * deadlocks and makes proc_find non-reentrant.
494 */
495 if (uth->uu_proc_refcount == 0) {
496 return TRUE;
497 }
498
499 return FALSE;
500 }
501
502 int
503 proc_rele(proc_t p)
504 {
505 proc_list_lock();
506 proc_rele_locked(p);
507 proc_list_unlock();
508
509 return 0;
510 }
511
512 proc_t
513 proc_self(void)
514 {
515 struct proc * p;
516
517 p = current_proc();
518
519 proc_list_lock();
520 if (p != proc_ref_locked(p)) {
521 p = PROC_NULL;
522 }
523 proc_list_unlock();
524 return p;
525 }
526
527
528 proc_t
529 proc_ref_locked(proc_t p)
530 {
531 proc_t p1 = p;
532 int pid = proc_pid(p);
533
534 retry:
535 /*
536 * if process still in creation or proc got recycled
537 * during msleep then return failure.
538 */
539 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0)) {
540 return PROC_NULL;
541 }
542
543 /*
544 * Do not return process marked for termination
545 * or proc_refdrain called without ref wait.
546 * Wait for proc_refdrain_with_refwait to complete if
547 * process in refdrain and refwait flag is set, unless
548 * the current thread is holding to a proc_ref
549 * for any proc.
550 */
551 if ((p->p_stat != SZOMB) &&
552 ((p->p_listflag & P_LIST_EXITED) == 0) &&
553 ((p->p_listflag & P_LIST_DEAD) == 0) &&
554 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
555 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
556 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
557 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0);
558 /*
559 * the proc might have been recycled since we dropped
560 * the proc list lock, get the proc again.
561 */
562 p = pfind_locked(pid);
563 goto retry;
564 }
565 p->p_refcount++;
566 record_procref(p, 1);
567 } else {
568 p1 = PROC_NULL;
569 }
570
571 return p1;
572 }
573
574 void
575 proc_rele_locked(proc_t p)
576 {
577 if (p->p_refcount > 0) {
578 p->p_refcount--;
579 record_procref(p, -1);
580 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
581 p->p_listflag &= ~P_LIST_DRAINWAIT;
582 wakeup(&p->p_refcount);
583 }
584 } else {
585 panic("proc_rele_locked -ve ref\n");
586 }
587 }
588
589 proc_t
590 proc_find_zombref(int pid)
591 {
592 proc_t p;
593
594 proc_list_lock();
595
596 again:
597 p = pfind_locked(pid);
598
599 /* should we bail? */
600 if ((p == PROC_NULL) /* not found */
601 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
602 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
603 proc_list_unlock();
604 return PROC_NULL;
605 }
606
607 /* If someone else is controlling the (unreaped) zombie - wait */
608 if ((p->p_listflag & P_LIST_WAITING) != 0) {
609 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
610 goto again;
611 }
612 p->p_listflag |= P_LIST_WAITING;
613
614 proc_list_unlock();
615
616 return p;
617 }
618
619 void
620 proc_drop_zombref(proc_t p)
621 {
622 proc_list_lock();
623 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
624 p->p_listflag &= ~P_LIST_WAITING;
625 wakeup(&p->p_stat);
626 }
627 proc_list_unlock();
628 }
629
630
631 void
632 proc_refdrain(proc_t p)
633 {
634 proc_refdrain_with_refwait(p, FALSE);
635 }
636
637 proc_t
638 proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
639 {
640 boolean_t initexec = FALSE;
641 proc_list_lock();
642
643 p->p_listflag |= P_LIST_DRAIN;
644 if (get_ref_and_allow_wait) {
645 /*
646 * All the calls to proc_ref_locked will wait
647 * for the flag to get cleared before returning a ref,
648 * unless the current thread is holding to a proc ref
649 * for any proc.
650 */
651 p->p_listflag |= P_LIST_REFWAIT;
652 if (p == initproc) {
653 initexec = TRUE;
654 }
655 }
656
657 /* Do not wait in ref drain for launchd exec */
658 while (p->p_refcount && !initexec) {
659 p->p_listflag |= P_LIST_DRAINWAIT;
660 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0);
661 }
662
663 p->p_listflag &= ~P_LIST_DRAIN;
664 if (!get_ref_and_allow_wait) {
665 p->p_listflag |= P_LIST_DEAD;
666 } else {
667 /* Return a ref to the caller */
668 p->p_refcount++;
669 record_procref(p, 1);
670 }
671
672 proc_list_unlock();
673
674 if (get_ref_and_allow_wait) {
675 return p;
676 }
677 return NULL;
678 }
679
680 void
681 proc_refwake(proc_t p)
682 {
683 proc_list_lock();
684 p->p_listflag &= ~P_LIST_REFWAIT;
685 wakeup(&p->p_listflag);
686 proc_list_unlock();
687 }
688
689 proc_t
690 proc_parentholdref(proc_t p)
691 {
692 proc_t parent = PROC_NULL;
693 proc_t pp;
694 int loopcnt = 0;
695
696
697 proc_list_lock();
698 loop:
699 pp = p->p_pptr;
700 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
701 parent = PROC_NULL;
702 goto out;
703 }
704
705 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
706 pp->p_listflag |= P_LIST_CHILDDRWAIT;
707 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
708 loopcnt++;
709 if (loopcnt == 5) {
710 parent = PROC_NULL;
711 goto out;
712 }
713 goto loop;
714 }
715
716 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
717 pp->p_parentref++;
718 parent = pp;
719 goto out;
720 }
721
722 out:
723 proc_list_unlock();
724 return parent;
725 }
726 int
727 proc_parentdropref(proc_t p, int listlocked)
728 {
729 if (listlocked == 0) {
730 proc_list_lock();
731 }
732
733 if (p->p_parentref > 0) {
734 p->p_parentref--;
735 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
736 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
737 wakeup(&p->p_parentref);
738 }
739 } else {
740 panic("proc_parentdropref -ve ref\n");
741 }
742 if (listlocked == 0) {
743 proc_list_unlock();
744 }
745
746 return 0;
747 }
748
749 void
750 proc_childdrainstart(proc_t p)
751 {
752 #if __PROC_INTERNAL_DEBUG
753 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART) {
754 panic("proc_childdrainstart: childdrain already started\n");
755 }
756 #endif
757 p->p_listflag |= P_LIST_CHILDDRSTART;
758 /* wait for all that hold parentrefs to drop */
759 while (p->p_parentref > 0) {
760 p->p_listflag |= P_LIST_PARENTREFWAIT;
761 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0);
762 }
763 }
764
765
766 void
767 proc_childdrainend(proc_t p)
768 {
769 #if __PROC_INTERNAL_DEBUG
770 if (p->p_childrencnt > 0) {
771 panic("exiting: children stil hanging around\n");
772 }
773 #endif
774 p->p_listflag |= P_LIST_CHILDDRAINED;
775 if ((p->p_listflag & (P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT)) != 0) {
776 p->p_listflag &= ~(P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT);
777 wakeup(&p->p_childrencnt);
778 }
779 }
780
781 void
782 proc_checkdeadrefs(__unused proc_t p)
783 {
784 #if __PROC_INTERNAL_DEBUG
785 if ((p->p_listflag & P_LIST_INHASH) != 0) {
786 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
787 }
788 if (p->p_childrencnt != 0) {
789 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
790 }
791 if (p->p_refcount != 0) {
792 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
793 }
794 if (p->p_parentref != 0) {
795 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
796 }
797 #endif
798 }
799
800 int
801 proc_pid(proc_t p)
802 {
803 if (p != NULL) {
804 return p->p_pid;
805 }
806 return -1;
807 }
808
809 int
810 proc_ppid(proc_t p)
811 {
812 if (p != NULL) {
813 return p->p_ppid;
814 }
815 return -1;
816 }
817
818 int
819 proc_original_ppid(proc_t p)
820 {
821 if (p != NULL) {
822 return p->p_original_ppid;
823 }
824 return -1;
825 }
826
827 int
828 proc_selfpid(void)
829 {
830 return current_proc()->p_pid;
831 }
832
833 int
834 proc_selfppid(void)
835 {
836 return current_proc()->p_ppid;
837 }
838
839 uint64_t
840 proc_selfcsflags(void)
841 {
842 return (uint64_t)current_proc()->p_csflags;
843 }
844
845 int
846 proc_csflags(proc_t p, uint64_t *flags)
847 {
848 if (p && flags) {
849 *flags = (uint64_t)p->p_csflags;
850 return 0;
851 }
852 return EINVAL;
853 }
854
855 uint32_t
856 proc_platform(proc_t p)
857 {
858 if (p != NULL) {
859 return p->p_platform;
860 }
861 return (uint32_t)-1;
862 }
863
864 uint32_t
865 proc_sdk(proc_t p)
866 {
867 if (p != NULL) {
868 return p->p_sdk;
869 }
870 return (uint32_t)-1;
871 }
872
873 #if CONFIG_DTRACE
874 static proc_t
875 dtrace_current_proc_vforking(void)
876 {
877 thread_t th = current_thread();
878 struct uthread *ut = get_bsdthread_info(th);
879
880 if (ut &&
881 ((ut->uu_flag & (UT_VFORK | UT_VFORKING)) == (UT_VFORK | UT_VFORKING))) {
882 /*
883 * Handle the narrow window where we're in the vfork syscall,
884 * but we're not quite ready to claim (in particular, to DTrace)
885 * that we're running as the child.
886 */
887 return get_bsdtask_info(get_threadtask(th));
888 }
889 return current_proc();
890 }
891
892 int
893 dtrace_proc_selfpid(void)
894 {
895 return dtrace_current_proc_vforking()->p_pid;
896 }
897
898 int
899 dtrace_proc_selfppid(void)
900 {
901 return dtrace_current_proc_vforking()->p_ppid;
902 }
903
904 uid_t
905 dtrace_proc_selfruid(void)
906 {
907 return dtrace_current_proc_vforking()->p_ruid;
908 }
909 #endif /* CONFIG_DTRACE */
910
911 proc_t
912 proc_parent(proc_t p)
913 {
914 proc_t parent;
915 proc_t pp;
916
917 proc_list_lock();
918 loop:
919 pp = p->p_pptr;
920 parent = proc_ref_locked(pp);
921 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED) == 0)) {
922 pp->p_listflag |= P_LIST_CHILDLKWAIT;
923 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
924 goto loop;
925 }
926 proc_list_unlock();
927 return parent;
928 }
929
930 static boolean_t
931 proc_parent_is_currentproc(proc_t p)
932 {
933 boolean_t ret = FALSE;
934
935 proc_list_lock();
936 if (p->p_pptr == current_proc()) {
937 ret = TRUE;
938 }
939
940 proc_list_unlock();
941 return ret;
942 }
943
944 void
945 proc_name(int pid, char * buf, int size)
946 {
947 proc_t p;
948
949 if (size <= 0) {
950 return;
951 }
952
953 bzero(buf, size);
954
955 if ((p = proc_find(pid)) != PROC_NULL) {
956 strlcpy(buf, &p->p_comm[0], size);
957 proc_rele(p);
958 }
959 }
960
961 void
962 proc_name_kdp(task_t t, char * buf, int size)
963 {
964 proc_t p = get_bsdtask_info(t);
965 if (p == PROC_NULL) {
966 return;
967 }
968
969 if ((size_t)size > sizeof(p->p_comm)) {
970 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
971 } else {
972 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
973 }
974 }
975
976 boolean_t
977 proc_binary_uuid_kdp(task_t task, uuid_t uuid)
978 {
979 proc_t p = get_bsdtask_info(task);
980 if (p == PROC_NULL) {
981 return FALSE;
982 }
983
984 proc_getexecutableuuid(p, uuid, sizeof(uuid_t));
985
986 return TRUE;
987 }
988
989 int
990 proc_threadname_kdp(void * uth, char * buf, size_t size)
991 {
992 if (size < MAXTHREADNAMESIZE) {
993 /* this is really just a protective measure for the future in
994 * case the thread name size in stackshot gets out of sync with
995 * the BSD max thread name size. Note that bsd_getthreadname
996 * doesn't take input buffer size into account. */
997 return -1;
998 }
999
1000 if (uth != NULL) {
1001 bsd_getthreadname(uth, buf);
1002 }
1003 return 0;
1004 }
1005
1006
1007 /* note that this function is generally going to be called from stackshot,
1008 * and the arguments will be coming from a struct which is declared packed
1009 * thus the input arguments will in general be unaligned. We have to handle
1010 * that here. */
1011 void
1012 proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime)
1013 {
1014 proc_t pp = (proc_t)p;
1015 if (pp != PROC_NULL) {
1016 if (tv_sec != NULL) {
1017 *tv_sec = pp->p_start.tv_sec;
1018 }
1019 if (tv_usec != NULL) {
1020 *tv_usec = pp->p_start.tv_usec;
1021 }
1022 if (abstime != NULL) {
1023 if (pp->p_stats != NULL) {
1024 *abstime = pp->p_stats->ps_start;
1025 } else {
1026 *abstime = 0;
1027 }
1028 }
1029 }
1030 }
1031
1032 char *
1033 proc_name_address(void *p)
1034 {
1035 return &((proc_t)p)->p_comm[0];
1036 }
1037
1038 char *
1039 proc_best_name(proc_t p)
1040 {
1041 if (p->p_name[0] != 0) {
1042 return &p->p_name[0];
1043 }
1044 return &p->p_comm[0];
1045 }
1046
1047 void
1048 proc_selfname(char * buf, int size)
1049 {
1050 proc_t p;
1051
1052 if ((p = current_proc()) != (proc_t)0) {
1053 strlcpy(buf, &p->p_comm[0], size);
1054 }
1055 }
1056
1057 void
1058 proc_signal(int pid, int signum)
1059 {
1060 proc_t p;
1061
1062 if ((p = proc_find(pid)) != PROC_NULL) {
1063 psignal(p, signum);
1064 proc_rele(p);
1065 }
1066 }
1067
1068 int
1069 proc_issignal(int pid, sigset_t mask)
1070 {
1071 proc_t p;
1072 int error = 0;
1073
1074 if ((p = proc_find(pid)) != PROC_NULL) {
1075 error = proc_pendingsignals(p, mask);
1076 proc_rele(p);
1077 }
1078
1079 return error;
1080 }
1081
1082 int
1083 proc_noremotehang(proc_t p)
1084 {
1085 int retval = 0;
1086
1087 if (p) {
1088 retval = p->p_flag & P_NOREMOTEHANG;
1089 }
1090 return retval? 1: 0;
1091 }
1092
1093 int
1094 proc_exiting(proc_t p)
1095 {
1096 int retval = 0;
1097
1098 if (p) {
1099 retval = p->p_lflag & P_LEXIT;
1100 }
1101 return retval? 1: 0;
1102 }
1103
1104 int
1105 proc_in_teardown(proc_t p)
1106 {
1107 int retval = 0;
1108
1109 if (p) {
1110 retval = p->p_lflag & P_LPEXIT;
1111 }
1112 return retval? 1: 0;
1113 }
1114
1115 int
1116 proc_forcequota(proc_t p)
1117 {
1118 int retval = 0;
1119
1120 if (p) {
1121 retval = p->p_flag & P_FORCEQUOTA;
1122 }
1123 return retval? 1: 0;
1124 }
1125
1126 int
1127 proc_suser(proc_t p)
1128 {
1129 kauth_cred_t my_cred;
1130 int error;
1131
1132 my_cred = kauth_cred_proc_ref(p);
1133 error = suser(my_cred, &p->p_acflag);
1134 kauth_cred_unref(&my_cred);
1135 return error;
1136 }
1137
1138 task_t
1139 proc_task(proc_t proc)
1140 {
1141 return (task_t)proc->task;
1142 }
1143
1144 /*
1145 * Obtain the first thread in a process
1146 *
1147 * XXX This is a bad thing to do; it exists predominantly to support the
1148 * XXX use of proc_t's in places that should really be using
1149 * XXX thread_t's instead. This maintains historical behaviour, but really
1150 * XXX needs an audit of the context (proxy vs. not) to clean up.
1151 */
1152 thread_t
1153 proc_thread(proc_t proc)
1154 {
1155 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1156
1157 if (uth != NULL) {
1158 return uth->uu_context.vc_thread;
1159 }
1160
1161 return NULL;
1162 }
1163
1164 kauth_cred_t
1165 proc_ucred(proc_t p)
1166 {
1167 return p->p_ucred;
1168 }
1169
1170 struct uthread *
1171 current_uthread()
1172 {
1173 thread_t th = current_thread();
1174
1175 return (struct uthread *)get_bsdthread_info(th);
1176 }
1177
1178
1179 int
1180 proc_is64bit(proc_t p)
1181 {
1182 return IS_64BIT_PROCESS(p);
1183 }
1184
1185 int
1186 proc_is64bit_data(proc_t p)
1187 {
1188 assert(p->task);
1189 return (int)task_get_64bit_data(p->task);
1190 }
1191
1192 int
1193 proc_pidversion(proc_t p)
1194 {
1195 return p->p_idversion;
1196 }
1197
1198 uint32_t
1199 proc_persona_id(proc_t p)
1200 {
1201 return (uint32_t)persona_id_from_proc(p);
1202 }
1203
1204 uint32_t
1205 proc_getuid(proc_t p)
1206 {
1207 return p->p_uid;
1208 }
1209
1210 uint32_t
1211 proc_getgid(proc_t p)
1212 {
1213 return p->p_gid;
1214 }
1215
1216 uint64_t
1217 proc_uniqueid(proc_t p)
1218 {
1219 return p->p_uniqueid;
1220 }
1221
1222 uint64_t
1223 proc_puniqueid(proc_t p)
1224 {
1225 return p->p_puniqueid;
1226 }
1227
1228 void
1229 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1230 {
1231 #if CONFIG_COALITIONS
1232 task_coalition_ids(p->task, ids);
1233 #else
1234 memset(ids, 0, sizeof(uint64_t[COALITION_NUM_TYPES]));
1235 #endif
1236 return;
1237 }
1238
1239 uint64_t
1240 proc_was_throttled(proc_t p)
1241 {
1242 return p->was_throttled;
1243 }
1244
1245 uint64_t
1246 proc_did_throttle(proc_t p)
1247 {
1248 return p->did_throttle;
1249 }
1250
1251 int
1252 proc_getcdhash(proc_t p, unsigned char *cdhash)
1253 {
1254 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1255 }
1256
1257 int
1258 proc_exitstatus(proc_t p)
1259 {
1260 return p->p_xstat & 0xffff;
1261 }
1262
1263 void
1264 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1265 {
1266 if (size >= sizeof(p->p_uuid)) {
1267 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1268 }
1269 }
1270
1271 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1272 vnode_t
1273 proc_getexecutablevnode(proc_t p)
1274 {
1275 vnode_t tvp = p->p_textvp;
1276
1277 if (tvp != NULLVP) {
1278 if (vnode_getwithref(tvp) == 0) {
1279 return tvp;
1280 }
1281 }
1282
1283 return NULLVP;
1284 }
1285
1286 int
1287 proc_gettty(proc_t p, vnode_t *vp)
1288 {
1289 if (!p || !vp) {
1290 return EINVAL;
1291 }
1292
1293 struct session *procsp = proc_session(p);
1294 int err = EINVAL;
1295
1296 if (procsp != SESSION_NULL) {
1297 session_lock(procsp);
1298 vnode_t ttyvp = procsp->s_ttyvp;
1299 int ttyvid = procsp->s_ttyvid;
1300 session_unlock(procsp);
1301
1302 if (ttyvp) {
1303 if (vnode_getwithvid(ttyvp, ttyvid) == 0) {
1304 *vp = procsp->s_ttyvp;
1305 err = 0;
1306 }
1307 } else {
1308 err = ENOENT;
1309 }
1310
1311 session_rele(procsp);
1312 }
1313
1314 return err;
1315 }
1316
1317 int
1318 proc_gettty_dev(proc_t p, dev_t *dev)
1319 {
1320 struct session *procsp = proc_session(p);
1321 boolean_t has_tty = FALSE;
1322
1323 if (procsp != SESSION_NULL) {
1324 session_lock(procsp);
1325
1326 struct tty * tp = SESSION_TP(procsp);
1327 if (tp != TTY_NULL) {
1328 *dev = tp->t_dev;
1329 has_tty = TRUE;
1330 }
1331
1332 session_unlock(procsp);
1333 session_rele(procsp);
1334 }
1335
1336 if (has_tty) {
1337 return 0;
1338 } else {
1339 return EINVAL;
1340 }
1341 }
1342
1343 int
1344 proc_selfexecutableargs(uint8_t *buf, size_t *buflen)
1345 {
1346 proc_t p = current_proc();
1347
1348 // buflen must always be provided
1349 if (buflen == NULL) {
1350 return EINVAL;
1351 }
1352
1353 // If a buf is provided, there must be at least enough room to fit argc
1354 if (buf && *buflen < sizeof(p->p_argc)) {
1355 return EINVAL;
1356 }
1357
1358 if (!p->user_stack) {
1359 return EINVAL;
1360 }
1361
1362 if (buf == NULL) {
1363 *buflen = p->p_argslen + sizeof(p->p_argc);
1364 return 0;
1365 }
1366
1367 // Copy in argc to the first 4 bytes
1368 memcpy(buf, &p->p_argc, sizeof(p->p_argc));
1369
1370 if (*buflen > sizeof(p->p_argc) && p->p_argslen > 0) {
1371 // See memory layout comment in kern_exec.c:exec_copyout_strings()
1372 // We want to copy starting from `p_argslen` bytes away from top of stack
1373 return copyin(p->user_stack - p->p_argslen,
1374 buf + sizeof(p->p_argc),
1375 MIN(p->p_argslen, *buflen - sizeof(p->p_argc)));
1376 } else {
1377 return 0;
1378 }
1379 }
1380
1381 off_t
1382 proc_getexecutableoffset(proc_t p)
1383 {
1384 return p->p_textoff;
1385 }
1386
1387 void
1388 bsd_set_dependency_capable(task_t task)
1389 {
1390 proc_t p = get_bsdtask_info(task);
1391
1392 if (p) {
1393 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1394 }
1395 }
1396
1397
1398 #ifndef __arm__
1399 int
1400 IS_64BIT_PROCESS(proc_t p)
1401 {
1402 if (p && (p->p_flag & P_LP64)) {
1403 return 1;
1404 } else {
1405 return 0;
1406 }
1407 }
1408 #endif
1409
1410 /*
1411 * Locate a process by number
1412 */
1413 proc_t
1414 pfind_locked(pid_t pid)
1415 {
1416 proc_t p;
1417 #if DEBUG
1418 proc_t q;
1419 #endif
1420
1421 if (!pid) {
1422 return kernproc;
1423 }
1424
1425 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1426 if (p->p_pid == pid) {
1427 #if DEBUG
1428 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1429 if ((p != q) && (q->p_pid == pid)) {
1430 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1431 }
1432 }
1433 #endif
1434 return p;
1435 }
1436 }
1437 return NULL;
1438 }
1439
1440 /*
1441 * Locate a zombie by PID
1442 */
1443 __private_extern__ proc_t
1444 pzfind(pid_t pid)
1445 {
1446 proc_t p;
1447
1448
1449 proc_list_lock();
1450
1451 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
1452 if (p->p_pid == pid) {
1453 break;
1454 }
1455 }
1456
1457 proc_list_unlock();
1458
1459 return p;
1460 }
1461
1462 /*
1463 * Locate a process group by number
1464 */
1465
1466 struct pgrp *
1467 pgfind(pid_t pgid)
1468 {
1469 struct pgrp * pgrp;
1470
1471 proc_list_lock();
1472 pgrp = pgfind_internal(pgid);
1473 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) {
1474 pgrp = PGRP_NULL;
1475 } else {
1476 pgrp->pg_refcount++;
1477 }
1478 proc_list_unlock();
1479 return pgrp;
1480 }
1481
1482
1483
1484 struct pgrp *
1485 pgfind_internal(pid_t pgid)
1486 {
1487 struct pgrp *pgrp;
1488
1489 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) {
1490 if (pgrp->pg_id == pgid) {
1491 return pgrp;
1492 }
1493 }
1494 return NULL;
1495 }
1496
1497 void
1498 pg_rele(struct pgrp * pgrp)
1499 {
1500 if (pgrp == PGRP_NULL) {
1501 return;
1502 }
1503 pg_rele_dropref(pgrp);
1504 }
1505
1506 void
1507 pg_rele_dropref(struct pgrp * pgrp)
1508 {
1509 proc_list_lock();
1510 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1511 proc_list_unlock();
1512 pgdelete_dropref(pgrp);
1513 return;
1514 }
1515
1516 pgrp->pg_refcount--;
1517 proc_list_unlock();
1518 }
1519
1520 struct session *
1521 session_find_internal(pid_t sessid)
1522 {
1523 struct session *sess;
1524
1525 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) {
1526 if (sess->s_sid == sessid) {
1527 return sess;
1528 }
1529 }
1530 return NULL;
1531 }
1532
1533
1534 /*
1535 * Make a new process ready to become a useful member of society by making it
1536 * visible in all the right places and initialize its own lists to empty.
1537 *
1538 * Parameters: parent The parent of the process to insert
1539 * child The child process to insert
1540 *
1541 * Returns: (void)
1542 *
1543 * Notes: Insert a child process into the parents process group, assign
1544 * the child the parent process pointer and PPID of the parent,
1545 * place it on the parents p_children list as a sibling,
1546 * initialize its own child list, place it in the allproc list,
1547 * insert it in the proper hash bucket, and initialize its
1548 * event list.
1549 */
1550 void
1551 pinsertchild(proc_t parent, proc_t child)
1552 {
1553 struct pgrp * pg;
1554
1555 LIST_INIT(&child->p_children);
1556 TAILQ_INIT(&child->p_evlist);
1557 child->p_pptr = parent;
1558 child->p_ppid = parent->p_pid;
1559 child->p_original_ppid = parent->p_pid;
1560 child->p_puniqueid = parent->p_uniqueid;
1561 child->p_xhighbits = 0;
1562
1563 pg = proc_pgrp(parent);
1564 pgrp_add(pg, parent, child);
1565 pg_rele(pg);
1566
1567 proc_list_lock();
1568
1569 #if CONFIG_MEMORYSTATUS
1570 memorystatus_add(child, TRUE);
1571 #endif
1572
1573 parent->p_childrencnt++;
1574 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1575
1576 LIST_INSERT_HEAD(&allproc, child, p_list);
1577 /* mark the completion of proc creation */
1578 child->p_listflag &= ~P_LIST_INCREATE;
1579
1580 proc_list_unlock();
1581 }
1582
1583 /*
1584 * Move p to a new or existing process group (and session)
1585 *
1586 * Returns: 0 Success
1587 * ESRCH No such process
1588 */
1589 int
1590 enterpgrp(proc_t p, pid_t pgid, int mksess)
1591 {
1592 struct pgrp *pgrp;
1593 struct pgrp *mypgrp;
1594 struct session * procsp;
1595
1596 pgrp = pgfind(pgid);
1597 mypgrp = proc_pgrp(p);
1598 procsp = proc_session(p);
1599
1600 #if DIAGNOSTIC
1601 if (pgrp != NULL && mksess) { /* firewalls */
1602 panic("enterpgrp: setsid into non-empty pgrp");
1603 }
1604 if (SESS_LEADER(p, procsp)) {
1605 panic("enterpgrp: session leader attempted setpgrp");
1606 }
1607 #endif
1608 if (pgrp == PGRP_NULL) {
1609 pid_t savepid = p->p_pid;
1610 proc_t np = PROC_NULL;
1611 /*
1612 * new process group
1613 */
1614 #if DIAGNOSTIC
1615 if (p->p_pid != pgid) {
1616 panic("enterpgrp: new pgrp and pid != pgid");
1617 }
1618 #endif
1619 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1620 M_WAITOK);
1621 if (pgrp == NULL) {
1622 panic("enterpgrp: M_PGRP zone depleted");
1623 }
1624 if ((np = proc_find(savepid)) == NULL || np != p) {
1625 if (np != PROC_NULL) {
1626 proc_rele(np);
1627 }
1628 if (mypgrp != PGRP_NULL) {
1629 pg_rele(mypgrp);
1630 }
1631 if (procsp != SESSION_NULL) {
1632 session_rele(procsp);
1633 }
1634 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1635 return ESRCH;
1636 }
1637 proc_rele(np);
1638 if (mksess) {
1639 struct session *sess;
1640
1641 /*
1642 * new session
1643 */
1644 MALLOC_ZONE(sess, struct session *,
1645 sizeof(struct session), M_SESSION, M_WAITOK);
1646 if (sess == NULL) {
1647 panic("enterpgrp: M_SESSION zone depleted");
1648 }
1649 sess->s_leader = p;
1650 sess->s_sid = p->p_pid;
1651 sess->s_count = 1;
1652 sess->s_ttyvp = NULL;
1653 sess->s_ttyp = TTY_NULL;
1654 sess->s_flags = 0;
1655 sess->s_listflags = 0;
1656 sess->s_ttypgrpid = NO_PID;
1657
1658 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1659
1660 bcopy(procsp->s_login, sess->s_login,
1661 sizeof(sess->s_login));
1662 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1663 proc_list_lock();
1664 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1665 proc_list_unlock();
1666 pgrp->pg_session = sess;
1667 #if DIAGNOSTIC
1668 if (p != current_proc()) {
1669 panic("enterpgrp: mksession and p != curproc");
1670 }
1671 #endif
1672 } else {
1673 proc_list_lock();
1674 pgrp->pg_session = procsp;
1675
1676 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1677 panic("enterpgrp: providing ref to terminating session ");
1678 }
1679 pgrp->pg_session->s_count++;
1680 proc_list_unlock();
1681 }
1682 pgrp->pg_id = pgid;
1683
1684 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1685
1686 LIST_INIT(&pgrp->pg_members);
1687 pgrp->pg_membercnt = 0;
1688 pgrp->pg_jobc = 0;
1689 proc_list_lock();
1690 pgrp->pg_refcount = 1;
1691 pgrp->pg_listflags = 0;
1692 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1693 proc_list_unlock();
1694 } else if (pgrp == mypgrp) {
1695 pg_rele(pgrp);
1696 if (mypgrp != NULL) {
1697 pg_rele(mypgrp);
1698 }
1699 if (procsp != SESSION_NULL) {
1700 session_rele(procsp);
1701 }
1702 return 0;
1703 }
1704
1705 if (procsp != SESSION_NULL) {
1706 session_rele(procsp);
1707 }
1708 /*
1709 * Adjust eligibility of affected pgrps to participate in job control.
1710 * Increment eligibility counts before decrementing, otherwise we
1711 * could reach 0 spuriously during the first call.
1712 */
1713 fixjobc(p, pgrp, 1);
1714 fixjobc(p, mypgrp, 0);
1715
1716 if (mypgrp != PGRP_NULL) {
1717 pg_rele(mypgrp);
1718 }
1719 pgrp_replace(p, pgrp);
1720 pg_rele(pgrp);
1721
1722 return 0;
1723 }
1724
1725 /*
1726 * remove process from process group
1727 */
1728 int
1729 leavepgrp(proc_t p)
1730 {
1731 pgrp_remove(p);
1732 return 0;
1733 }
1734
1735 /*
1736 * delete a process group
1737 */
1738 static void
1739 pgdelete_dropref(struct pgrp *pgrp)
1740 {
1741 struct tty *ttyp;
1742 int emptypgrp = 1;
1743 struct session *sessp;
1744
1745
1746 pgrp_lock(pgrp);
1747 if (pgrp->pg_membercnt != 0) {
1748 emptypgrp = 0;
1749 }
1750 pgrp_unlock(pgrp);
1751
1752 proc_list_lock();
1753 pgrp->pg_refcount--;
1754 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1755 proc_list_unlock();
1756 return;
1757 }
1758
1759 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1760
1761 if (pgrp->pg_refcount > 0) {
1762 proc_list_unlock();
1763 return;
1764 }
1765
1766 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1767 LIST_REMOVE(pgrp, pg_hash);
1768
1769 proc_list_unlock();
1770
1771 ttyp = SESSION_TP(pgrp->pg_session);
1772 if (ttyp != TTY_NULL) {
1773 if (ttyp->t_pgrp == pgrp) {
1774 tty_lock(ttyp);
1775 /* Re-check after acquiring the lock */
1776 if (ttyp->t_pgrp == pgrp) {
1777 ttyp->t_pgrp = NULL;
1778 pgrp->pg_session->s_ttypgrpid = NO_PID;
1779 }
1780 tty_unlock(ttyp);
1781 }
1782 }
1783
1784 proc_list_lock();
1785
1786 sessp = pgrp->pg_session;
1787 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1788 panic("pg_deleteref: manipulating refs of already terminating session");
1789 }
1790 if (--sessp->s_count == 0) {
1791 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1792 panic("pg_deleteref: terminating already terminated session");
1793 }
1794 sessp->s_listflags |= S_LIST_TERM;
1795 ttyp = SESSION_TP(sessp);
1796 LIST_REMOVE(sessp, s_hash);
1797 proc_list_unlock();
1798 if (ttyp != TTY_NULL) {
1799 tty_lock(ttyp);
1800 if (ttyp->t_session == sessp) {
1801 ttyp->t_session = NULL;
1802 }
1803 tty_unlock(ttyp);
1804 }
1805 proc_list_lock();
1806 sessp->s_listflags |= S_LIST_DEAD;
1807 if (sessp->s_count != 0) {
1808 panic("pg_deleteref: freeing session in use");
1809 }
1810 proc_list_unlock();
1811 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1812
1813 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1814 } else {
1815 proc_list_unlock();
1816 }
1817 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1818 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1819 }
1820
1821
1822 /*
1823 * Adjust pgrp jobc counters when specified process changes process group.
1824 * We count the number of processes in each process group that "qualify"
1825 * the group for terminal job control (those with a parent in a different
1826 * process group of the same session). If that count reaches zero, the
1827 * process group becomes orphaned. Check both the specified process'
1828 * process group and that of its children.
1829 * entering == 0 => p is leaving specified group.
1830 * entering == 1 => p is entering specified group.
1831 */
1832 int
1833 fixjob_callback(proc_t p, void * arg)
1834 {
1835 struct fixjob_iterargs *fp;
1836 struct pgrp * pg, *hispg;
1837 struct session * mysession, *hissess;
1838 int entering;
1839
1840 fp = (struct fixjob_iterargs *)arg;
1841 pg = fp->pg;
1842 mysession = fp->mysession;
1843 entering = fp->entering;
1844
1845 hispg = proc_pgrp(p);
1846 hissess = proc_session(p);
1847
1848 if ((hispg != pg) &&
1849 (hissess == mysession)) {
1850 pgrp_lock(hispg);
1851 if (entering) {
1852 hispg->pg_jobc++;
1853 pgrp_unlock(hispg);
1854 } else if (--hispg->pg_jobc == 0) {
1855 pgrp_unlock(hispg);
1856 orphanpg(hispg);
1857 } else {
1858 pgrp_unlock(hispg);
1859 }
1860 }
1861 if (hissess != SESSION_NULL) {
1862 session_rele(hissess);
1863 }
1864 if (hispg != PGRP_NULL) {
1865 pg_rele(hispg);
1866 }
1867
1868 return PROC_RETURNED;
1869 }
1870
1871 void
1872 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1873 {
1874 struct pgrp *hispgrp = PGRP_NULL;
1875 struct session *hissess = SESSION_NULL;
1876 struct session *mysession = pgrp->pg_session;
1877 proc_t parent;
1878 struct fixjob_iterargs fjarg;
1879 boolean_t proc_parent_self;
1880
1881 /*
1882 * Check if p's parent is current proc, if yes then no need to take
1883 * a ref; calling proc_parent with current proc as parent may
1884 * deadlock if current proc is exiting.
1885 */
1886 proc_parent_self = proc_parent_is_currentproc(p);
1887 if (proc_parent_self) {
1888 parent = current_proc();
1889 } else {
1890 parent = proc_parent(p);
1891 }
1892
1893 if (parent != PROC_NULL) {
1894 hispgrp = proc_pgrp(parent);
1895 hissess = proc_session(parent);
1896 if (!proc_parent_self) {
1897 proc_rele(parent);
1898 }
1899 }
1900
1901
1902 /*
1903 * Check p's parent to see whether p qualifies its own process
1904 * group; if so, adjust count for p's process group.
1905 */
1906 if ((hispgrp != pgrp) &&
1907 (hissess == mysession)) {
1908 pgrp_lock(pgrp);
1909 if (entering) {
1910 pgrp->pg_jobc++;
1911 pgrp_unlock(pgrp);
1912 } else if (--pgrp->pg_jobc == 0) {
1913 pgrp_unlock(pgrp);
1914 orphanpg(pgrp);
1915 } else {
1916 pgrp_unlock(pgrp);
1917 }
1918 }
1919
1920 if (hissess != SESSION_NULL) {
1921 session_rele(hissess);
1922 }
1923 if (hispgrp != PGRP_NULL) {
1924 pg_rele(hispgrp);
1925 }
1926
1927 /*
1928 * Check this process' children to see whether they qualify
1929 * their process groups; if so, adjust counts for children's
1930 * process groups.
1931 */
1932 fjarg.pg = pgrp;
1933 fjarg.mysession = mysession;
1934 fjarg.entering = entering;
1935 proc_childrenwalk(p, fixjob_callback, &fjarg);
1936 }
1937
1938 /*
1939 * The pidlist_* routines support the functions in this file that
1940 * walk lists of processes applying filters and callouts to the
1941 * elements of the list.
1942 *
1943 * A prior implementation used a single linear array, which can be
1944 * tricky to allocate on large systems. This implementation creates
1945 * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
1946 *
1947 * The array should be sized large enough to keep the overhead of
1948 * walking the list low, but small enough that blocking allocations of
1949 * pidlist_entry_t structures always succeed.
1950 */
1951
1952 #define PIDS_PER_ENTRY 1021
1953
1954 typedef struct pidlist_entry {
1955 SLIST_ENTRY(pidlist_entry) pe_link;
1956 u_int pe_nused;
1957 pid_t pe_pid[PIDS_PER_ENTRY];
1958 } pidlist_entry_t;
1959
1960 typedef struct {
1961 SLIST_HEAD(, pidlist_entry) pl_head;
1962 struct pidlist_entry *pl_active;
1963 u_int pl_nalloc;
1964 } pidlist_t;
1965
1966 static __inline__ pidlist_t *
1967 pidlist_init(pidlist_t *pl)
1968 {
1969 SLIST_INIT(&pl->pl_head);
1970 pl->pl_active = NULL;
1971 pl->pl_nalloc = 0;
1972 return pl;
1973 }
1974
1975 static u_int
1976 pidlist_alloc(pidlist_t *pl, u_int needed)
1977 {
1978 while (pl->pl_nalloc < needed) {
1979 pidlist_entry_t *pe = kalloc(sizeof(*pe));
1980 if (NULL == pe) {
1981 panic("no space for pidlist entry");
1982 }
1983 pe->pe_nused = 0;
1984 SLIST_INSERT_HEAD(&pl->pl_head, pe, pe_link);
1985 pl->pl_nalloc += (sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0]));
1986 }
1987 return pl->pl_nalloc;
1988 }
1989
1990 static void
1991 pidlist_free(pidlist_t *pl)
1992 {
1993 pidlist_entry_t *pe;
1994 while (NULL != (pe = SLIST_FIRST(&pl->pl_head))) {
1995 SLIST_FIRST(&pl->pl_head) = SLIST_NEXT(pe, pe_link);
1996 kfree(pe, sizeof(*pe));
1997 }
1998 pl->pl_nalloc = 0;
1999 }
2000
2001 static __inline__ void
2002 pidlist_set_active(pidlist_t *pl)
2003 {
2004 pl->pl_active = SLIST_FIRST(&pl->pl_head);
2005 assert(pl->pl_active);
2006 }
2007
2008 static void
2009 pidlist_add_pid(pidlist_t *pl, pid_t pid)
2010 {
2011 pidlist_entry_t *pe = pl->pl_active;
2012 if (pe->pe_nused >= sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0])) {
2013 if (NULL == (pe = SLIST_NEXT(pe, pe_link))) {
2014 panic("pidlist allocation exhausted");
2015 }
2016 pl->pl_active = pe;
2017 }
2018 pe->pe_pid[pe->pe_nused++] = pid;
2019 }
2020
2021 static __inline__ u_int
2022 pidlist_nalloc(const pidlist_t *pl)
2023 {
2024 return pl->pl_nalloc;
2025 }
2026
2027 /*
2028 * A process group has become orphaned; if there are any stopped processes in
2029 * the group, hang-up all process in that group.
2030 */
2031 static void
2032 orphanpg(struct pgrp *pgrp)
2033 {
2034 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2035 u_int pid_count_available = 0;
2036 proc_t p;
2037
2038 /* allocate outside of the pgrp_lock */
2039 for (;;) {
2040 pgrp_lock(pgrp);
2041
2042 boolean_t should_iterate = FALSE;
2043 pid_count_available = 0;
2044
2045 PGMEMBERS_FOREACH(pgrp, p) {
2046 pid_count_available++;
2047 if (p->p_stat == SSTOP) {
2048 should_iterate = TRUE;
2049 }
2050 }
2051 if (pid_count_available == 0 || !should_iterate) {
2052 pgrp_unlock(pgrp);
2053 goto out; /* no orphaned processes OR nothing stopped */
2054 }
2055 if (pidlist_nalloc(pl) >= pid_count_available) {
2056 break;
2057 }
2058 pgrp_unlock(pgrp);
2059
2060 pidlist_alloc(pl, pid_count_available);
2061 }
2062 pidlist_set_active(pl);
2063
2064 u_int pid_count = 0;
2065 PGMEMBERS_FOREACH(pgrp, p) {
2066 pidlist_add_pid(pl, proc_pid(p));
2067 if (++pid_count >= pid_count_available) {
2068 break;
2069 }
2070 }
2071 pgrp_unlock(pgrp);
2072
2073 const pidlist_entry_t *pe;
2074 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2075 for (u_int i = 0; i < pe->pe_nused; i++) {
2076 const pid_t pid = pe->pe_pid[i];
2077 if (0 == pid) {
2078 continue; /* skip kernproc */
2079 }
2080 p = proc_find(pid);
2081 if (!p) {
2082 continue;
2083 }
2084 proc_transwait(p, 0);
2085 pt_setrunnable(p);
2086 psignal(p, SIGHUP);
2087 psignal(p, SIGCONT);
2088 proc_rele(p);
2089 }
2090 }
2091 out:
2092 pidlist_free(pl);
2093 }
2094
2095 int
2096 proc_is_classic(proc_t p __unused)
2097 {
2098 return 0;
2099 }
2100
2101 /* XXX Why does this function exist? Need to kill it off... */
2102 proc_t
2103 current_proc_EXTERNAL(void)
2104 {
2105 return current_proc();
2106 }
2107
2108 int
2109 proc_is_forcing_hfs_case_sensitivity(proc_t p)
2110 {
2111 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
2112 }
2113
2114 #if CONFIG_COREDUMP
2115 /*
2116 * proc_core_name(name, uid, pid)
2117 * Expand the name described in corefilename, using name, uid, and pid.
2118 * corefilename is a printf-like string, with three format specifiers:
2119 * %N name of process ("name")
2120 * %P process id (pid)
2121 * %U user id (uid)
2122 * For example, "%N.core" is the default; they can be disabled completely
2123 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2124 * This is controlled by the sysctl variable kern.corefile (see above).
2125 */
2126 __private_extern__ int
2127 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
2128 size_t cf_name_len)
2129 {
2130 const char *format, *appendstr;
2131 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
2132 size_t i, l, n;
2133
2134 if (cf_name == NULL) {
2135 goto toolong;
2136 }
2137
2138 format = corefilename;
2139 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
2140 switch (format[i]) {
2141 case '%': /* Format character */
2142 i++;
2143 switch (format[i]) {
2144 case '%':
2145 appendstr = "%";
2146 break;
2147 case 'N': /* process name */
2148 appendstr = name;
2149 break;
2150 case 'P': /* process id */
2151 snprintf(id_buf, sizeof(id_buf), "%u", pid);
2152 appendstr = id_buf;
2153 break;
2154 case 'U': /* user id */
2155 snprintf(id_buf, sizeof(id_buf), "%u", uid);
2156 appendstr = id_buf;
2157 break;
2158 case '\0': /* format string ended in % symbol */
2159 goto endofstring;
2160 default:
2161 appendstr = "";
2162 log(LOG_ERR,
2163 "Unknown format character %c in `%s'\n",
2164 format[i], format);
2165 }
2166 l = strlen(appendstr);
2167 if ((n + l) >= cf_name_len) {
2168 goto toolong;
2169 }
2170 bcopy(appendstr, cf_name + n, l);
2171 n += l;
2172 break;
2173 default:
2174 cf_name[n++] = format[i];
2175 }
2176 }
2177 if (format[i] != '\0') {
2178 goto toolong;
2179 }
2180 return 0;
2181 toolong:
2182 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
2183 (long)pid, name, (uint32_t)uid);
2184 return 1;
2185 endofstring:
2186 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
2187 (long)pid, name, (uint32_t)uid);
2188 return 1;
2189 }
2190 #endif /* CONFIG_COREDUMP */
2191
2192 /* Code Signing related routines */
2193
2194 int
2195 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
2196 {
2197 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2198 uap->usersize, USER_ADDR_NULL);
2199 }
2200
2201 int
2202 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
2203 {
2204 if (uap->uaudittoken == USER_ADDR_NULL) {
2205 return EINVAL;
2206 }
2207 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2208 uap->usersize, uap->uaudittoken);
2209 }
2210
2211 static int
2212 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
2213 {
2214 char fakeheader[8] = { 0 };
2215 int error;
2216
2217 if (usize < sizeof(fakeheader)) {
2218 return ERANGE;
2219 }
2220
2221 /* if no blob, fill in zero header */
2222 if (NULL == start) {
2223 start = fakeheader;
2224 length = sizeof(fakeheader);
2225 } else if (usize < length) {
2226 /* ... if input too short, copy out length of entitlement */
2227 uint32_t length32 = htonl((uint32_t)length);
2228 memcpy(&fakeheader[4], &length32, sizeof(length32));
2229
2230 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2231 if (error == 0) {
2232 return ERANGE; /* input buffer to short, ERANGE signals that */
2233 }
2234 return error;
2235 }
2236 return copyout(start, uaddr, length);
2237 }
2238
2239 static int
2240 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
2241 {
2242 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
2243 proc_t pt;
2244 int forself;
2245 int error;
2246 vnode_t tvp;
2247 off_t toff;
2248 unsigned char cdhash[SHA1_RESULTLEN];
2249 audit_token_t token;
2250 unsigned int upid = 0, uidversion = 0;
2251
2252 forself = error = 0;
2253
2254 if (pid == 0) {
2255 pid = proc_selfpid();
2256 }
2257 if (pid == proc_selfpid()) {
2258 forself = 1;
2259 }
2260
2261
2262 switch (ops) {
2263 case CS_OPS_STATUS:
2264 case CS_OPS_CDHASH:
2265 case CS_OPS_PIDOFFSET:
2266 case CS_OPS_ENTITLEMENTS_BLOB:
2267 case CS_OPS_IDENTITY:
2268 case CS_OPS_BLOB:
2269 case CS_OPS_TEAMID:
2270 case CS_OPS_CLEAR_LV:
2271 break; /* not restricted to root */
2272 default:
2273 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) {
2274 return EPERM;
2275 }
2276 break;
2277 }
2278
2279 pt = proc_find(pid);
2280 if (pt == PROC_NULL) {
2281 return ESRCH;
2282 }
2283
2284 upid = pt->p_pid;
2285 uidversion = pt->p_idversion;
2286 if (uaudittoken != USER_ADDR_NULL) {
2287 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
2288 if (error != 0) {
2289 goto out;
2290 }
2291 /* verify the audit token pid/idversion matches with proc */
2292 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
2293 error = ESRCH;
2294 goto out;
2295 }
2296 }
2297
2298 #if CONFIG_MACF
2299 switch (ops) {
2300 case CS_OPS_MARKINVALID:
2301 case CS_OPS_MARKHARD:
2302 case CS_OPS_MARKKILL:
2303 case CS_OPS_MARKRESTRICT:
2304 case CS_OPS_SET_STATUS:
2305 case CS_OPS_CLEARINSTALLER:
2306 case CS_OPS_CLEARPLATFORM:
2307 case CS_OPS_CLEAR_LV:
2308 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) {
2309 goto out;
2310 }
2311 break;
2312 default:
2313 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops))) {
2314 goto out;
2315 }
2316 }
2317 #endif
2318
2319 switch (ops) {
2320 case CS_OPS_STATUS: {
2321 uint32_t retflags;
2322
2323 proc_lock(pt);
2324 retflags = pt->p_csflags;
2325 if (cs_process_enforcement(pt)) {
2326 retflags |= CS_ENFORCEMENT;
2327 }
2328 if (csproc_get_platform_binary(pt)) {
2329 retflags |= CS_PLATFORM_BINARY;
2330 }
2331 if (csproc_get_platform_path(pt)) {
2332 retflags |= CS_PLATFORM_PATH;
2333 }
2334 //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
2335 if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) {
2336 retflags &= (~CS_REQUIRE_LV);
2337 }
2338 proc_unlock(pt);
2339
2340 if (uaddr != USER_ADDR_NULL) {
2341 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2342 }
2343 break;
2344 }
2345 case CS_OPS_MARKINVALID:
2346 proc_lock(pt);
2347 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2348 pt->p_csflags &= ~CS_VALID; /* set invalid */
2349 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2350 pt->p_csflags |= CS_KILLED;
2351 proc_unlock(pt);
2352 if (cs_debug) {
2353 printf("CODE SIGNING: marked invalid by pid %d: "
2354 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2355 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2356 }
2357 psignal(pt, SIGKILL);
2358 } else {
2359 proc_unlock(pt);
2360 }
2361 } else {
2362 proc_unlock(pt);
2363 }
2364
2365 break;
2366
2367 case CS_OPS_MARKHARD:
2368 proc_lock(pt);
2369 pt->p_csflags |= CS_HARD;
2370 if ((pt->p_csflags & CS_VALID) == 0) {
2371 /* @@@ allow? reject? kill? @@@ */
2372 proc_unlock(pt);
2373 error = EINVAL;
2374 goto out;
2375 } else {
2376 proc_unlock(pt);
2377 }
2378 break;
2379
2380 case CS_OPS_MARKKILL:
2381 proc_lock(pt);
2382 pt->p_csflags |= CS_KILL;
2383 if ((pt->p_csflags & CS_VALID) == 0) {
2384 proc_unlock(pt);
2385 psignal(pt, SIGKILL);
2386 } else {
2387 proc_unlock(pt);
2388 }
2389 break;
2390
2391 case CS_OPS_PIDOFFSET:
2392 toff = pt->p_textoff;
2393 proc_rele(pt);
2394 error = copyout(&toff, uaddr, sizeof(toff));
2395 return error;
2396
2397 case CS_OPS_CDHASH:
2398
2399 /* pt already holds a reference on its p_textvp */
2400 tvp = pt->p_textvp;
2401 toff = pt->p_textoff;
2402
2403 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2404 proc_rele(pt);
2405 return EINVAL;
2406 }
2407
2408 error = vn_getcdhash(tvp, toff, cdhash);
2409 proc_rele(pt);
2410
2411 if (error == 0) {
2412 error = copyout(cdhash, uaddr, sizeof(cdhash));
2413 }
2414
2415 return error;
2416
2417 case CS_OPS_ENTITLEMENTS_BLOB: {
2418 void *start;
2419 size_t length;
2420
2421 proc_lock(pt);
2422
2423 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2424 proc_unlock(pt);
2425 error = EINVAL;
2426 break;
2427 }
2428
2429 error = cs_entitlements_blob_get(pt, &start, &length);
2430 proc_unlock(pt);
2431 if (error) {
2432 break;
2433 }
2434
2435 error = csops_copy_token(start, length, usize, uaddr);
2436 break;
2437 }
2438 case CS_OPS_MARKRESTRICT:
2439 proc_lock(pt);
2440 pt->p_csflags |= CS_RESTRICT;
2441 proc_unlock(pt);
2442 break;
2443
2444 case CS_OPS_SET_STATUS: {
2445 uint32_t flags;
2446
2447 if (usize < sizeof(flags)) {
2448 error = ERANGE;
2449 break;
2450 }
2451
2452 error = copyin(uaddr, &flags, sizeof(flags));
2453 if (error) {
2454 break;
2455 }
2456
2457 /* only allow setting a subset of all code sign flags */
2458 flags &=
2459 CS_HARD | CS_EXEC_SET_HARD |
2460 CS_KILL | CS_EXEC_SET_KILL |
2461 CS_RESTRICT |
2462 CS_REQUIRE_LV |
2463 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2464
2465 proc_lock(pt);
2466 if (pt->p_csflags & CS_VALID) {
2467 pt->p_csflags |= flags;
2468 } else {
2469 error = EINVAL;
2470 }
2471 proc_unlock(pt);
2472
2473 break;
2474 }
2475 case CS_OPS_CLEAR_LV: {
2476 /*
2477 * This option is used to remove library validation from
2478 * a running process. This is used in plugin architectures
2479 * when a program needs to load untrusted libraries. This
2480 * allows the process to maintain library validation as
2481 * long as possible, then drop it only when required.
2482 * Once a process has loaded the untrusted library,
2483 * relying on library validation in the future will
2484 * not be effective. An alternative is to re-exec
2485 * your application without library validation, or
2486 * fork an untrusted child.
2487 */
2488 #ifdef CONFIG_EMBEDDED
2489 // On embedded platforms, we don't support dropping LV
2490 error = ENOTSUP;
2491 #else
2492 /*
2493 * if we have the flag set, and the caller wants
2494 * to remove it, and they're entitled to, then
2495 * we remove it from the csflags
2496 *
2497 * NOTE: We are fine to poke into the task because
2498 * we get a ref to pt when we do the proc_find
2499 * at the beginning of this function.
2500 *
2501 * We also only allow altering ourselves.
2502 */
2503 if (forself == 1 && IOTaskHasEntitlement(pt->task, CLEAR_LV_ENTITLEMENT)) {
2504 proc_lock(pt);
2505 pt->p_csflags &= (~(CS_REQUIRE_LV | CS_FORCED_LV));
2506 proc_unlock(pt);
2507 error = 0;
2508 } else {
2509 error = EPERM;
2510 }
2511 #endif
2512 break;
2513 }
2514 case CS_OPS_BLOB: {
2515 void *start;
2516 size_t length;
2517
2518 proc_lock(pt);
2519 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2520 proc_unlock(pt);
2521 error = EINVAL;
2522 break;
2523 }
2524
2525 error = cs_blob_get(pt, &start, &length);
2526 proc_unlock(pt);
2527 if (error) {
2528 break;
2529 }
2530
2531 error = csops_copy_token(start, length, usize, uaddr);
2532 break;
2533 }
2534 case CS_OPS_IDENTITY:
2535 case CS_OPS_TEAMID: {
2536 const char *identity;
2537 uint8_t fakeheader[8];
2538 uint32_t idlen;
2539 size_t length;
2540
2541 /*
2542 * Make identity have a blob header to make it
2543 * easier on userland to guess the identity
2544 * length.
2545 */
2546 if (usize < sizeof(fakeheader)) {
2547 error = ERANGE;
2548 break;
2549 }
2550 memset(fakeheader, 0, sizeof(fakeheader));
2551
2552 proc_lock(pt);
2553 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2554 proc_unlock(pt);
2555 error = EINVAL;
2556 break;
2557 }
2558
2559 identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt);
2560 proc_unlock(pt);
2561 if (identity == NULL) {
2562 error = ENOENT;
2563 break;
2564 }
2565
2566 length = strlen(identity) + 1; /* include NUL */
2567 idlen = htonl(length + sizeof(fakeheader));
2568 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2569
2570 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2571 if (error) {
2572 break;
2573 }
2574
2575 if (usize < sizeof(fakeheader) + length) {
2576 error = ERANGE;
2577 } else if (usize > sizeof(fakeheader)) {
2578 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2579 }
2580
2581 break;
2582 }
2583
2584 case CS_OPS_CLEARINSTALLER:
2585 proc_lock(pt);
2586 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2587 proc_unlock(pt);
2588 break;
2589
2590 case CS_OPS_CLEARPLATFORM:
2591 #if DEVELOPMENT || DEBUG
2592 if (cs_process_global_enforcement()) {
2593 error = ENOTSUP;
2594 break;
2595 }
2596
2597 #if CONFIG_CSR
2598 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2599 error = ENOTSUP;
2600 break;
2601 }
2602 #endif
2603
2604 proc_lock(pt);
2605 pt->p_csflags &= ~(CS_PLATFORM_BINARY | CS_PLATFORM_PATH);
2606 csproc_clear_platform_binary(pt);
2607 proc_unlock(pt);
2608 break;
2609 #else
2610 error = ENOTSUP;
2611 break;
2612 #endif /* !DEVELOPMENT || DEBUG */
2613
2614 default:
2615 error = EINVAL;
2616 break;
2617 }
2618 out:
2619 proc_rele(pt);
2620 return error;
2621 }
2622
2623 void
2624 proc_iterate(
2625 unsigned int flags,
2626 proc_iterate_fn_t callout,
2627 void *arg,
2628 proc_iterate_fn_t filterfn,
2629 void *filterarg)
2630 {
2631 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2632 u_int pid_count_available = 0;
2633
2634 assert(callout != NULL);
2635
2636 /* allocate outside of the proc_list_lock */
2637 for (;;) {
2638 proc_list_lock();
2639 pid_count_available = nprocs + 1; /* kernel_task not counted in nprocs */
2640 assert(pid_count_available > 0);
2641 if (pidlist_nalloc(pl) > pid_count_available) {
2642 break;
2643 }
2644 proc_list_unlock();
2645
2646 pidlist_alloc(pl, pid_count_available);
2647 }
2648 pidlist_set_active(pl);
2649
2650 /* filter pids into the pid_list */
2651
2652 u_int pid_count = 0;
2653 if (flags & PROC_ALLPROCLIST) {
2654 proc_t p;
2655 ALLPROC_FOREACH(p) {
2656 /* ignore processes that are being forked */
2657 if (p->p_stat == SIDL) {
2658 continue;
2659 }
2660 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2661 continue;
2662 }
2663 pidlist_add_pid(pl, proc_pid(p));
2664 if (++pid_count >= pid_count_available) {
2665 break;
2666 }
2667 }
2668 }
2669
2670 if ((pid_count < pid_count_available) &&
2671 (flags & PROC_ZOMBPROCLIST)) {
2672 proc_t p;
2673 ZOMBPROC_FOREACH(p) {
2674 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2675 continue;
2676 }
2677 pidlist_add_pid(pl, proc_pid(p));
2678 if (++pid_count >= pid_count_available) {
2679 break;
2680 }
2681 }
2682 }
2683
2684 proc_list_unlock();
2685
2686 /* call callout on processes in the pid_list */
2687
2688 const pidlist_entry_t *pe;
2689 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2690 for (u_int i = 0; i < pe->pe_nused; i++) {
2691 const pid_t pid = pe->pe_pid[i];
2692 proc_t p = proc_find(pid);
2693 if (p) {
2694 if ((flags & PROC_NOWAITTRANS) == 0) {
2695 proc_transwait(p, 0);
2696 }
2697 const int callout_ret = callout(p, arg);
2698
2699 switch (callout_ret) {
2700 case PROC_RETURNED_DONE:
2701 proc_rele(p);
2702 /* FALLTHROUGH */
2703 case PROC_CLAIMED_DONE:
2704 goto out;
2705
2706 case PROC_RETURNED:
2707 proc_rele(p);
2708 /* FALLTHROUGH */
2709 case PROC_CLAIMED:
2710 break;
2711 default:
2712 panic("%s: callout =%d for pid %d",
2713 __func__, callout_ret, pid);
2714 break;
2715 }
2716 } else if (flags & PROC_ZOMBPROCLIST) {
2717 p = proc_find_zombref(pid);
2718 if (!p) {
2719 continue;
2720 }
2721 const int callout_ret = callout(p, arg);
2722
2723 switch (callout_ret) {
2724 case PROC_RETURNED_DONE:
2725 proc_drop_zombref(p);
2726 /* FALLTHROUGH */
2727 case PROC_CLAIMED_DONE:
2728 goto out;
2729
2730 case PROC_RETURNED:
2731 proc_drop_zombref(p);
2732 /* FALLTHROUGH */
2733 case PROC_CLAIMED:
2734 break;
2735 default:
2736 panic("%s: callout =%d for zombie %d",
2737 __func__, callout_ret, pid);
2738 break;
2739 }
2740 }
2741 }
2742 }
2743 out:
2744 pidlist_free(pl);
2745 }
2746
2747 void
2748 proc_rebootscan(
2749 proc_iterate_fn_t callout,
2750 void *arg,
2751 proc_iterate_fn_t filterfn,
2752 void *filterarg)
2753 {
2754 proc_t p;
2755
2756 assert(callout != NULL);
2757
2758 proc_shutdown_exitcount = 0;
2759
2760 restart_foreach:
2761
2762 proc_list_lock();
2763
2764 ALLPROC_FOREACH(p) {
2765 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2766 continue;
2767 }
2768 p = proc_ref_locked(p);
2769 if (!p) {
2770 continue;
2771 }
2772
2773 proc_list_unlock();
2774
2775 proc_transwait(p, 0);
2776 (void)callout(p, arg);
2777 proc_rele(p);
2778
2779 goto restart_foreach;
2780 }
2781
2782 proc_list_unlock();
2783 }
2784
2785 void
2786 proc_childrenwalk(
2787 proc_t parent,
2788 proc_iterate_fn_t callout,
2789 void *arg)
2790 {
2791 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2792 u_int pid_count_available = 0;
2793
2794 assert(parent != NULL);
2795 assert(callout != NULL);
2796
2797 for (;;) {
2798 proc_list_lock();
2799 pid_count_available = parent->p_childrencnt;
2800 if (pid_count_available == 0) {
2801 proc_list_unlock();
2802 goto out;
2803 }
2804 if (pidlist_nalloc(pl) > pid_count_available) {
2805 break;
2806 }
2807 proc_list_unlock();
2808
2809 pidlist_alloc(pl, pid_count_available);
2810 }
2811 pidlist_set_active(pl);
2812
2813 u_int pid_count = 0;
2814 proc_t p;
2815 PCHILDREN_FOREACH(parent, p) {
2816 if (p->p_stat == SIDL) {
2817 continue;
2818 }
2819 pidlist_add_pid(pl, proc_pid(p));
2820 if (++pid_count >= pid_count_available) {
2821 break;
2822 }
2823 }
2824
2825 proc_list_unlock();
2826
2827 const pidlist_entry_t *pe;
2828 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2829 for (u_int i = 0; i < pe->pe_nused; i++) {
2830 const pid_t pid = pe->pe_pid[i];
2831 p = proc_find(pid);
2832 if (!p) {
2833 continue;
2834 }
2835 const int callout_ret = callout(p, arg);
2836
2837 switch (callout_ret) {
2838 case PROC_RETURNED_DONE:
2839 proc_rele(p);
2840 /* FALLTHROUGH */
2841 case PROC_CLAIMED_DONE:
2842 goto out;
2843
2844 case PROC_RETURNED:
2845 proc_rele(p);
2846 /* FALLTHROUGH */
2847 case PROC_CLAIMED:
2848 break;
2849 default:
2850 panic("%s: callout =%d for pid %d",
2851 __func__, callout_ret, pid);
2852 break;
2853 }
2854 }
2855 }
2856 out:
2857 pidlist_free(pl);
2858 }
2859
2860 void
2861 pgrp_iterate(
2862 struct pgrp *pgrp,
2863 unsigned int flags,
2864 proc_iterate_fn_t callout,
2865 void * arg,
2866 proc_iterate_fn_t filterfn,
2867 void * filterarg)
2868 {
2869 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2870 u_int pid_count_available = 0;
2871
2872 assert(pgrp != NULL);
2873 assert(callout != NULL);
2874
2875 for (;;) {
2876 pgrp_lock(pgrp);
2877 pid_count_available = pgrp->pg_membercnt;
2878 if (pid_count_available == 0) {
2879 pgrp_unlock(pgrp);
2880 if (flags & PGRP_DROPREF) {
2881 pg_rele(pgrp);
2882 }
2883 goto out;
2884 }
2885 if (pidlist_nalloc(pl) > pid_count_available) {
2886 break;
2887 }
2888 pgrp_unlock(pgrp);
2889
2890 pidlist_alloc(pl, pid_count_available);
2891 }
2892 pidlist_set_active(pl);
2893
2894 const pid_t pgid = pgrp->pg_id;
2895 u_int pid_count = 0;
2896 proc_t p;
2897 PGMEMBERS_FOREACH(pgrp, p) {
2898 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2899 continue;;
2900 }
2901 pidlist_add_pid(pl, proc_pid(p));
2902 if (++pid_count >= pid_count_available) {
2903 break;
2904 }
2905 }
2906
2907 pgrp_unlock(pgrp);
2908
2909 if (flags & PGRP_DROPREF) {
2910 pg_rele(pgrp);
2911 }
2912
2913 const pidlist_entry_t *pe;
2914 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2915 for (u_int i = 0; i < pe->pe_nused; i++) {
2916 const pid_t pid = pe->pe_pid[i];
2917 if (0 == pid) {
2918 continue; /* skip kernproc */
2919 }
2920 p = proc_find(pid);
2921 if (!p) {
2922 continue;
2923 }
2924 if (p->p_pgrpid != pgid) {
2925 proc_rele(p);
2926 continue;
2927 }
2928 const int callout_ret = callout(p, arg);
2929
2930 switch (callout_ret) {
2931 case PROC_RETURNED:
2932 proc_rele(p);
2933 /* FALLTHROUGH */
2934 case PROC_CLAIMED:
2935 break;
2936 case PROC_RETURNED_DONE:
2937 proc_rele(p);
2938 /* FALLTHROUGH */
2939 case PROC_CLAIMED_DONE:
2940 goto out;
2941
2942 default:
2943 panic("%s: callout =%d for pid %d",
2944 __func__, callout_ret, pid);
2945 }
2946 }
2947 }
2948
2949 out:
2950 pidlist_free(pl);
2951 }
2952
2953 static void
2954 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2955 {
2956 proc_list_lock();
2957 child->p_pgrp = pgrp;
2958 child->p_pgrpid = pgrp->pg_id;
2959 child->p_listflag |= P_LIST_INPGRP;
2960 /*
2961 * When pgrp is being freed , a process can still
2962 * request addition using setpgid from bash when
2963 * login is terminated (login cycler) return ESRCH
2964 * Safe to hold lock due to refcount on pgrp
2965 */
2966 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2967 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2968 }
2969
2970 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
2971 panic("pgrp_add : pgrp is dead adding process");
2972 }
2973 proc_list_unlock();
2974
2975 pgrp_lock(pgrp);
2976 pgrp->pg_membercnt++;
2977 if (parent != PROC_NULL) {
2978 LIST_INSERT_AFTER(parent, child, p_pglist);
2979 } else {
2980 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2981 }
2982 pgrp_unlock(pgrp);
2983
2984 proc_list_lock();
2985 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2986 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2987 }
2988 proc_list_unlock();
2989 }
2990
2991 static void
2992 pgrp_remove(struct proc * p)
2993 {
2994 struct pgrp * pg;
2995
2996 pg = proc_pgrp(p);
2997
2998 proc_list_lock();
2999 #if __PROC_INTERNAL_DEBUG
3000 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
3001 panic("removing from pglist but no named ref\n");
3002 }
3003 #endif
3004 p->p_pgrpid = PGRPID_DEAD;
3005 p->p_listflag &= ~P_LIST_INPGRP;
3006 p->p_pgrp = NULL;
3007 proc_list_unlock();
3008
3009 if (pg == PGRP_NULL) {
3010 panic("pgrp_remove: pg is NULL");
3011 }
3012 pgrp_lock(pg);
3013 pg->pg_membercnt--;
3014
3015 if (pg->pg_membercnt < 0) {
3016 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", pg, p);
3017 }
3018
3019 LIST_REMOVE(p, p_pglist);
3020 if (pg->pg_members.lh_first == 0) {
3021 pgrp_unlock(pg);
3022 pgdelete_dropref(pg);
3023 } else {
3024 pgrp_unlock(pg);
3025 pg_rele(pg);
3026 }
3027 }
3028
3029
3030 /* cannot use proc_pgrp as it maybe stalled */
3031 static void
3032 pgrp_replace(struct proc * p, struct pgrp * newpg)
3033 {
3034 struct pgrp * oldpg;
3035
3036
3037
3038 proc_list_lock();
3039
3040 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3041 p->p_listflag |= P_LIST_PGRPTRWAIT;
3042 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3043 }
3044
3045 p->p_listflag |= P_LIST_PGRPTRANS;
3046
3047 oldpg = p->p_pgrp;
3048 if (oldpg == PGRP_NULL) {
3049 panic("pgrp_replace: oldpg NULL");
3050 }
3051 oldpg->pg_refcount++;
3052 #if __PROC_INTERNAL_DEBUG
3053 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
3054 panic("removing from pglist but no named ref\n");
3055 }
3056 #endif
3057 p->p_pgrpid = PGRPID_DEAD;
3058 p->p_listflag &= ~P_LIST_INPGRP;
3059 p->p_pgrp = NULL;
3060
3061 proc_list_unlock();
3062
3063 pgrp_lock(oldpg);
3064 oldpg->pg_membercnt--;
3065 if (oldpg->pg_membercnt < 0) {
3066 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", oldpg, p);
3067 }
3068 LIST_REMOVE(p, p_pglist);
3069 if (oldpg->pg_members.lh_first == 0) {
3070 pgrp_unlock(oldpg);
3071 pgdelete_dropref(oldpg);
3072 } else {
3073 pgrp_unlock(oldpg);
3074 pg_rele(oldpg);
3075 }
3076
3077 proc_list_lock();
3078 p->p_pgrp = newpg;
3079 p->p_pgrpid = newpg->pg_id;
3080 p->p_listflag |= P_LIST_INPGRP;
3081 /*
3082 * When pgrp is being freed , a process can still
3083 * request addition using setpgid from bash when
3084 * login is terminated (login cycler) return ESRCH
3085 * Safe to hold lock due to refcount on pgrp
3086 */
3087 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
3088 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3089 }
3090
3091 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
3092 panic("pgrp_add : pgrp is dead adding process");
3093 }
3094 proc_list_unlock();
3095
3096 pgrp_lock(newpg);
3097 newpg->pg_membercnt++;
3098 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
3099 pgrp_unlock(newpg);
3100
3101 proc_list_lock();
3102 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
3103 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3104 }
3105
3106 p->p_listflag &= ~P_LIST_PGRPTRANS;
3107 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
3108 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
3109 wakeup(&p->p_pgrpid);
3110 }
3111 proc_list_unlock();
3112 }
3113
3114 void
3115 pgrp_lock(struct pgrp * pgrp)
3116 {
3117 lck_mtx_lock(&pgrp->pg_mlock);
3118 }
3119
3120 void
3121 pgrp_unlock(struct pgrp * pgrp)
3122 {
3123 lck_mtx_unlock(&pgrp->pg_mlock);
3124 }
3125
3126 void
3127 session_lock(struct session * sess)
3128 {
3129 lck_mtx_lock(&sess->s_mlock);
3130 }
3131
3132
3133 void
3134 session_unlock(struct session * sess)
3135 {
3136 lck_mtx_unlock(&sess->s_mlock);
3137 }
3138
3139 struct pgrp *
3140 proc_pgrp(proc_t p)
3141 {
3142 struct pgrp * pgrp;
3143
3144 if (p == PROC_NULL) {
3145 return PGRP_NULL;
3146 }
3147 proc_list_lock();
3148
3149 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3150 p->p_listflag |= P_LIST_PGRPTRWAIT;
3151 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3152 }
3153
3154 pgrp = p->p_pgrp;
3155
3156 assert(pgrp != NULL);
3157
3158 if (pgrp != PGRP_NULL) {
3159 pgrp->pg_refcount++;
3160 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0) {
3161 panic("proc_pgrp: ref being povided for dead pgrp");
3162 }
3163 }
3164
3165 proc_list_unlock();
3166
3167 return pgrp;
3168 }
3169
3170 struct pgrp *
3171 tty_pgrp(struct tty * tp)
3172 {
3173 struct pgrp * pg = PGRP_NULL;
3174
3175 proc_list_lock();
3176 pg = tp->t_pgrp;
3177
3178 if (pg != PGRP_NULL) {
3179 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0) {
3180 panic("tty_pgrp: ref being povided for dead pgrp");
3181 }
3182 pg->pg_refcount++;
3183 }
3184 proc_list_unlock();
3185
3186 return pg;
3187 }
3188
3189 struct session *
3190 proc_session(proc_t p)
3191 {
3192 struct session * sess = SESSION_NULL;
3193
3194 if (p == PROC_NULL) {
3195 return SESSION_NULL;
3196 }
3197
3198 proc_list_lock();
3199
3200 /* wait during transitions */
3201 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3202 p->p_listflag |= P_LIST_PGRPTRWAIT;
3203 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3204 }
3205
3206 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
3207 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3208 panic("proc_session:returning sesssion ref on terminating session");
3209 }
3210 sess->s_count++;
3211 }
3212 proc_list_unlock();
3213 return sess;
3214 }
3215
3216 void
3217 session_rele(struct session *sess)
3218 {
3219 proc_list_lock();
3220 if (--sess->s_count == 0) {
3221 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3222 panic("session_rele: terminating already terminated session");
3223 }
3224 sess->s_listflags |= S_LIST_TERM;
3225 LIST_REMOVE(sess, s_hash);
3226 sess->s_listflags |= S_LIST_DEAD;
3227 if (sess->s_count != 0) {
3228 panic("session_rele: freeing session in use");
3229 }
3230 proc_list_unlock();
3231 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
3232 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
3233 } else {
3234 proc_list_unlock();
3235 }
3236 }
3237
3238 int
3239 proc_transstart(proc_t p, int locked, int non_blocking)
3240 {
3241 if (locked == 0) {
3242 proc_lock(p);
3243 }
3244 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3245 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
3246 if (locked == 0) {
3247 proc_unlock(p);
3248 }
3249 return EDEADLK;
3250 }
3251 p->p_lflag |= P_LTRANSWAIT;
3252 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3253 }
3254 p->p_lflag |= P_LINTRANSIT;
3255 p->p_transholder = current_thread();
3256 if (locked == 0) {
3257 proc_unlock(p);
3258 }
3259 return 0;
3260 }
3261
3262 void
3263 proc_transcommit(proc_t p, int locked)
3264 {
3265 if (locked == 0) {
3266 proc_lock(p);
3267 }
3268
3269 assert((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
3270 assert(p->p_transholder == current_thread());
3271 p->p_lflag |= P_LTRANSCOMMIT;
3272
3273 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3274 p->p_lflag &= ~P_LTRANSWAIT;
3275 wakeup(&p->p_lflag);
3276 }
3277 if (locked == 0) {
3278 proc_unlock(p);
3279 }
3280 }
3281
3282 void
3283 proc_transend(proc_t p, int locked)
3284 {
3285 if (locked == 0) {
3286 proc_lock(p);
3287 }
3288
3289 p->p_lflag &= ~(P_LINTRANSIT | P_LTRANSCOMMIT);
3290 p->p_transholder = NULL;
3291
3292 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3293 p->p_lflag &= ~P_LTRANSWAIT;
3294 wakeup(&p->p_lflag);
3295 }
3296 if (locked == 0) {
3297 proc_unlock(p);
3298 }
3299 }
3300
3301 int
3302 proc_transwait(proc_t p, int locked)
3303 {
3304 if (locked == 0) {
3305 proc_lock(p);
3306 }
3307 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3308 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
3309 if (locked == 0) {
3310 proc_unlock(p);
3311 }
3312 return EDEADLK;
3313 }
3314 p->p_lflag |= P_LTRANSWAIT;
3315 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3316 }
3317 if (locked == 0) {
3318 proc_unlock(p);
3319 }
3320 return 0;
3321 }
3322
3323 void
3324 proc_klist_lock(void)
3325 {
3326 lck_mtx_lock(proc_klist_mlock);
3327 }
3328
3329 void
3330 proc_klist_unlock(void)
3331 {
3332 lck_mtx_unlock(proc_klist_mlock);
3333 }
3334
3335 void
3336 proc_knote(struct proc * p, long hint)
3337 {
3338 proc_klist_lock();
3339 KNOTE(&p->p_klist, hint);
3340 proc_klist_unlock();
3341 }
3342
3343 void
3344 proc_knote_drain(struct proc *p)
3345 {
3346 struct knote *kn = NULL;
3347
3348 /*
3349 * Clear the proc's klist to avoid references after the proc is reaped.
3350 */
3351 proc_klist_lock();
3352 while ((kn = SLIST_FIRST(&p->p_klist))) {
3353 kn->kn_proc = PROC_NULL;
3354 KNOTE_DETACH(&p->p_klist, kn);
3355 }
3356 proc_klist_unlock();
3357 }
3358
3359 void
3360 proc_setregister(proc_t p)
3361 {
3362 proc_lock(p);
3363 p->p_lflag |= P_LREGISTER;
3364 proc_unlock(p);
3365 }
3366
3367 void
3368 proc_resetregister(proc_t p)
3369 {
3370 proc_lock(p);
3371 p->p_lflag &= ~P_LREGISTER;
3372 proc_unlock(p);
3373 }
3374
3375 pid_t
3376 proc_pgrpid(proc_t p)
3377 {
3378 return p->p_pgrpid;
3379 }
3380
3381 pid_t
3382 proc_sessionid(proc_t p)
3383 {
3384 pid_t sid = -1;
3385 struct session * sessp = proc_session(p);
3386
3387 if (sessp != SESSION_NULL) {
3388 sid = sessp->s_sid;
3389 session_rele(sessp);
3390 }
3391
3392 return sid;
3393 }
3394
3395 pid_t
3396 proc_selfpgrpid()
3397 {
3398 return current_proc()->p_pgrpid;
3399 }
3400
3401
3402 /* return control and action states */
3403 int
3404 proc_getpcontrol(int pid, int * pcontrolp)
3405 {
3406 proc_t p;
3407
3408 p = proc_find(pid);
3409 if (p == PROC_NULL) {
3410 return ESRCH;
3411 }
3412 if (pcontrolp != NULL) {
3413 *pcontrolp = p->p_pcaction;
3414 }
3415
3416 proc_rele(p);
3417 return 0;
3418 }
3419
3420 int
3421 proc_dopcontrol(proc_t p)
3422 {
3423 int pcontrol;
3424 os_reason_t kill_reason;
3425
3426 proc_lock(p);
3427
3428 pcontrol = PROC_CONTROL_STATE(p);
3429
3430 if (PROC_ACTION_STATE(p) == 0) {
3431 switch (pcontrol) {
3432 case P_PCTHROTTLE:
3433 PROC_SETACTION_STATE(p);
3434 proc_unlock(p);
3435 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3436 break;
3437
3438 case P_PCSUSP:
3439 PROC_SETACTION_STATE(p);
3440 proc_unlock(p);
3441 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3442 task_suspend(p->task);
3443 break;
3444
3445 case P_PCKILL:
3446 PROC_SETACTION_STATE(p);
3447 proc_unlock(p);
3448 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3449 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3450 psignal_with_reason(p, SIGKILL, kill_reason);
3451 break;
3452
3453 default:
3454 proc_unlock(p);
3455 }
3456 } else {
3457 proc_unlock(p);
3458 }
3459
3460 return PROC_RETURNED;
3461 }
3462
3463
3464 /*
3465 * Resume a throttled or suspended process. This is an internal interface that's only
3466 * used by the user level code that presents the GUI when we run out of swap space and
3467 * hence is restricted to processes with superuser privileges.
3468 */
3469
3470 int
3471 proc_resetpcontrol(int pid)
3472 {
3473 proc_t p;
3474 int pcontrol;
3475 int error;
3476 proc_t self = current_proc();
3477
3478 /* if the process has been validated to handle resource control or root is valid one */
3479 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0))) {
3480 return error;
3481 }
3482
3483 p = proc_find(pid);
3484 if (p == PROC_NULL) {
3485 return ESRCH;
3486 }
3487
3488 proc_lock(p);
3489
3490 pcontrol = PROC_CONTROL_STATE(p);
3491
3492 if (PROC_ACTION_STATE(p) != 0) {
3493 switch (pcontrol) {
3494 case P_PCTHROTTLE:
3495 PROC_RESETACTION_STATE(p);
3496 proc_unlock(p);
3497 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3498 break;
3499
3500 case P_PCSUSP:
3501 PROC_RESETACTION_STATE(p);
3502 proc_unlock(p);
3503 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3504 task_resume(p->task);
3505 break;
3506
3507 case P_PCKILL:
3508 /* Huh? */
3509 PROC_SETACTION_STATE(p);
3510 proc_unlock(p);
3511 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3512 break;
3513
3514 default:
3515 proc_unlock(p);
3516 }
3517 } else {
3518 proc_unlock(p);
3519 }
3520
3521 proc_rele(p);
3522 return 0;
3523 }
3524
3525
3526
3527 struct no_paging_space {
3528 uint64_t pcs_max_size;
3529 uint64_t pcs_uniqueid;
3530 int pcs_pid;
3531 int pcs_proc_count;
3532 uint64_t pcs_total_size;
3533
3534 uint64_t npcs_max_size;
3535 uint64_t npcs_uniqueid;
3536 int npcs_pid;
3537 int npcs_proc_count;
3538 uint64_t npcs_total_size;
3539
3540 int apcs_proc_count;
3541 uint64_t apcs_total_size;
3542 };
3543
3544
3545 static int
3546 proc_pcontrol_filter(proc_t p, void *arg)
3547 {
3548 struct no_paging_space *nps;
3549 uint64_t compressed;
3550
3551 nps = (struct no_paging_space *)arg;
3552
3553 compressed = get_task_compressed(p->task);
3554
3555 if (PROC_CONTROL_STATE(p)) {
3556 if (PROC_ACTION_STATE(p) == 0) {
3557 if (compressed > nps->pcs_max_size) {
3558 nps->pcs_pid = p->p_pid;
3559 nps->pcs_uniqueid = p->p_uniqueid;
3560 nps->pcs_max_size = compressed;
3561 }
3562 nps->pcs_total_size += compressed;
3563 nps->pcs_proc_count++;
3564 } else {
3565 nps->apcs_total_size += compressed;
3566 nps->apcs_proc_count++;
3567 }
3568 } else {
3569 if (compressed > nps->npcs_max_size) {
3570 nps->npcs_pid = p->p_pid;
3571 nps->npcs_uniqueid = p->p_uniqueid;
3572 nps->npcs_max_size = compressed;
3573 }
3574 nps->npcs_total_size += compressed;
3575 nps->npcs_proc_count++;
3576 }
3577 return 0;
3578 }
3579
3580
3581 static int
3582 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3583 {
3584 return PROC_RETURNED;
3585 }
3586
3587
3588 /*
3589 * Deal with the low on compressor pool space condition... this function
3590 * gets called when we are approaching the limits of the compressor pool or
3591 * we are unable to create a new swap file.
3592 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3593 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3594 * There are 2 categories of processes to deal with. Those that have an action
3595 * associated with them by the task itself and those that do not. Actionable
3596 * tasks can have one of three categories specified: ones that
3597 * can be killed immediately, ones that should be suspended, and ones that should
3598 * be throttled. Processes that do not have an action associated with them are normally
3599 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3600 * that only by killing them can we hope to put the system back into a usable state.
3601 */
3602
3603 #define NO_PAGING_SPACE_DEBUG 0
3604
3605 extern uint64_t vm_compressor_pages_compressed(void);
3606
3607 struct timeval last_no_space_action = {.tv_sec = 0, .tv_usec = 0};
3608
3609 #if DEVELOPMENT || DEBUG
3610 extern boolean_t kill_on_no_paging_space;
3611 #endif /* DEVELOPMENT || DEBUG */
3612
3613 #define MB_SIZE (1024 * 1024ULL)
3614 boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
3615
3616 extern int32_t max_kill_priority;
3617 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3618
3619 int
3620 no_paging_space_action()
3621 {
3622 proc_t p;
3623 struct no_paging_space nps;
3624 struct timeval now;
3625 os_reason_t kill_reason;
3626
3627 /*
3628 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3629 */
3630 microtime(&now);
3631
3632 if (now.tv_sec <= last_no_space_action.tv_sec + 5) {
3633 return 0;
3634 }
3635
3636 /*
3637 * Examine all processes and find the biggest (biggest is based on the number of pages this
3638 * task has in the compressor pool) that has been marked to have some action
3639 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3640 * action.
3641 *
3642 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3643 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3644 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3645 */
3646 bzero(&nps, sizeof(nps));
3647
3648 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3649
3650 #if NO_PAGING_SPACE_DEBUG
3651 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3652 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3653 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3654 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3655 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3656 nps.apcs_proc_count, nps.apcs_total_size);
3657 #endif
3658 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3659 /*
3660 * for now we'll knock out any task that has more then 50% of the pages
3661 * held by the compressor
3662 */
3663 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3664 if (nps.npcs_uniqueid == p->p_uniqueid) {
3665 /*
3666 * verify this is still the same process
3667 * in case the proc exited and the pid got reused while
3668 * we were finishing the proc_iterate and getting to this point
3669 */
3670 last_no_space_action = now;
3671
3672 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size / MB_SIZE));
3673 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3674 psignal_with_reason(p, SIGKILL, kill_reason);
3675
3676 proc_rele(p);
3677
3678 return 0;
3679 }
3680
3681 proc_rele(p);
3682 }
3683 }
3684
3685 /*
3686 * We have some processes within our jetsam bands of consideration and hence can be killed.
3687 * So we will invoke the memorystatus thread to go ahead and kill something.
3688 */
3689 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3690 last_no_space_action = now;
3691 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
3692 return 1;
3693 }
3694
3695 /*
3696 * No eligible processes to kill. So let's suspend/kill the largest
3697 * process depending on its policy control specifications.
3698 */
3699
3700 if (nps.pcs_max_size > 0) {
3701 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3702 if (nps.pcs_uniqueid == p->p_uniqueid) {
3703 /*
3704 * verify this is still the same process
3705 * in case the proc exited and the pid got reused while
3706 * we were finishing the proc_iterate and getting to this point
3707 */
3708 last_no_space_action = now;
3709
3710 proc_dopcontrol(p);
3711
3712 proc_rele(p);
3713
3714 return 1;
3715 }
3716
3717 proc_rele(p);
3718 }
3719 }
3720 last_no_space_action = now;
3721
3722 printf("low swap: unable to find any eligible processes to take action on\n");
3723
3724 return 0;
3725 }
3726
3727 int
3728 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3729 {
3730 int ret = 0;
3731 proc_t target_proc = PROC_NULL;
3732 pid_t target_pid = uap->pid;
3733 uint64_t target_uniqueid = uap->uniqueid;
3734 task_t target_task = NULL;
3735
3736 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3737 ret = EPERM;
3738 goto out;
3739 }
3740 target_proc = proc_find(target_pid);
3741 if (target_proc != PROC_NULL) {
3742 if (target_uniqueid != proc_uniqueid(target_proc)) {
3743 ret = ENOENT;
3744 goto out;
3745 }
3746
3747 target_task = proc_task(target_proc);
3748 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3749 ret = EINVAL;
3750 goto out;
3751 }
3752 } else {
3753 ret = ENOENT;
3754 }
3755
3756 out:
3757 if (target_proc != PROC_NULL) {
3758 proc_rele(target_proc);
3759 }
3760 return ret;
3761 }
3762
3763 #if VM_SCAN_FOR_SHADOW_CHAIN
3764 extern int vm_map_shadow_max(vm_map_t map);
3765 int proc_shadow_max(void);
3766 int
3767 proc_shadow_max(void)
3768 {
3769 int retval, max;
3770 proc_t p;
3771 task_t task;
3772 vm_map_t map;
3773
3774 max = 0;
3775 proc_list_lock();
3776 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3777 if (p->p_stat == SIDL) {
3778 continue;
3779 }
3780 task = p->task;
3781 if (task == NULL) {
3782 continue;
3783 }
3784 map = get_task_map(task);
3785 if (map == NULL) {
3786 continue;
3787 }
3788 retval = vm_map_shadow_max(map);
3789 if (retval > max) {
3790 max = retval;
3791 }
3792 }
3793 proc_list_unlock();
3794 return max;
3795 }
3796 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3797
3798 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3799 void
3800 proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3801 {
3802 if (target_proc != NULL) {
3803 target_proc->p_responsible_pid = responsible_pid;
3804 }
3805 return;
3806 }
3807
3808 int
3809 proc_chrooted(proc_t p)
3810 {
3811 int retval = 0;
3812
3813 if (p) {
3814 proc_fdlock(p);
3815 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3816 proc_fdunlock(p);
3817 }
3818
3819 return retval;
3820 }
3821
3822 boolean_t
3823 proc_send_synchronous_EXC_RESOURCE(proc_t p)
3824 {
3825 if (p == PROC_NULL) {
3826 return FALSE;
3827 }
3828
3829 /* Send sync EXC_RESOURCE if the process is traced */
3830 if (ISSET(p->p_lflag, P_LTRACED)) {
3831 return TRUE;
3832 }
3833 return FALSE;
3834 }
3835
3836 size_t
3837 proc_get_syscall_filter_mask_size(int which)
3838 {
3839 if (which == SYSCALL_MASK_UNIX) {
3840 return nsysent;
3841 }
3842
3843 return 0;
3844 }
3845
3846 int
3847 proc_set_syscall_filter_mask(proc_t p, int which, unsigned char *maskptr, size_t masklen)
3848 {
3849 #if DEVELOPMENT || DEBUG
3850 if (syscallfilter_disable) {
3851 printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p));
3852 return KERN_SUCCESS;
3853 }
3854 #endif // DEVELOPMENT || DEBUG
3855
3856 if (which != SYSCALL_MASK_UNIX ||
3857 (maskptr != NULL && masklen != nsysent)) {
3858 return EINVAL;
3859 }
3860
3861 p->syscall_filter_mask = maskptr;
3862
3863 return KERN_SUCCESS;
3864 }
3865
3866 #ifdef CONFIG_32BIT_TELEMETRY
3867 void
3868 proc_log_32bit_telemetry(proc_t p)
3869 {
3870 /* Gather info */
3871 char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
3872 char * signature_cur_end = &signature_buf[0];
3873 char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
3874 int bytes_printed = 0;
3875
3876 const char * teamid = NULL;
3877 const char * identity = NULL;
3878 struct cs_blob * csblob = NULL;
3879
3880 proc_list_lock();
3881
3882 /*
3883 * Get proc name and parent proc name; if the parent execs, we'll get a
3884 * garbled name.
3885 */
3886 bytes_printed = scnprintf(signature_cur_end,
3887 signature_buf_end - signature_cur_end,
3888 "%s,%s,", p->p_name,
3889 (p->p_pptr ? p->p_pptr->p_name : ""));
3890
3891 if (bytes_printed > 0) {
3892 signature_cur_end += bytes_printed;
3893 }
3894
3895 proc_list_unlock();
3896
3897 /* Get developer info. */
3898 vnode_t v = proc_getexecutablevnode(p);
3899
3900 if (v) {
3901 csblob = csvnode_get_blob(v, 0);
3902
3903 if (csblob) {
3904 teamid = csblob_get_teamid(csblob);
3905 identity = csblob_get_identity(csblob);
3906 }
3907 }
3908
3909 if (teamid == NULL) {
3910 teamid = "";
3911 }
3912
3913 if (identity == NULL) {
3914 identity = "";
3915 }
3916
3917 bytes_printed = scnprintf(signature_cur_end,
3918 signature_buf_end - signature_cur_end,
3919 "%s,%s", teamid, identity);
3920
3921 if (bytes_printed > 0) {
3922 signature_cur_end += bytes_printed;
3923 }
3924
3925 if (v) {
3926 vnode_put(v);
3927 }
3928
3929 /*
3930 * We may want to rate limit here, although the SUMMARIZE key should
3931 * help us aggregate events in userspace.
3932 */
3933
3934 /* Emit log */
3935 kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
3936 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
3937 /* 1 */ "com.apple.message.signature", signature_buf,
3938 /* 2 */ "com.apple.message.summarize", "YES",
3939 NULL);
3940 }
3941 #endif /* CONFIG_32BIT_TELEMETRY */