]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kern_proc.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_proc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63/*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69/* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79#include <sys/param.h>
80#include <sys/systm.h>
81#include <sys/kernel.h>
82#include <sys/proc_internal.h>
83#include <sys/acct.h>
84#include <sys/wait.h>
85#include <sys/file_internal.h>
86#include <sys/uio.h>
87#include <sys/malloc.h>
88#include <sys/lock.h>
89#include <sys/mbuf.h>
90#include <sys/ioctl.h>
91#include <sys/tty.h>
92#include <sys/signalvar.h>
93#include <sys/syslog.h>
94#include <sys/sysctl.h>
95#include <sys/sysproto.h>
96#include <sys/kauth.h>
97#include <sys/codesign.h>
98#include <sys/kernel_types.h>
99#include <sys/ubc.h>
100#include <kern/kalloc.h>
101#include <kern/task.h>
102#include <kern/coalition.h>
103#include <sys/coalition.h>
104#include <kern/assert.h>
105#include <vm/vm_protos.h>
106#include <vm/vm_map.h> /* vm_map_switch_protect() */
107#include <vm/vm_pageout.h>
108#include <mach/task.h>
109#include <mach/message.h>
110#include <sys/priv.h>
111#include <sys/proc_info.h>
112#include <sys/bsdtask_info.h>
113#include <sys/persona.h>
114#include <sys/sysent.h>
115#include <sys/reason.h>
116#include <sys/proc_require.h>
117#include <IOKit/IOBSD.h> /* IOTaskHasEntitlement() */
118#include <kern/ipc_kobject.h> /* ipc_kobject_set_kobjidx() */
119
120#ifdef CONFIG_32BIT_TELEMETRY
121#include <sys/kasl.h>
122#endif /* CONFIG_32BIT_TELEMETRY */
123
124#if CONFIG_CSR
125#include <sys/csr.h>
126#endif
127
128#if CONFIG_MEMORYSTATUS
129#include <sys/kern_memorystatus.h>
130#endif
131
132#if CONFIG_MACF
133#include <security/mac_framework.h>
134#include <security/mac_mach_internal.h>
135#endif
136
137#include <libkern/crypto/sha1.h>
138
139#ifdef CONFIG_32BIT_TELEMETRY
140#define MAX_32BIT_EXEC_SIG_SIZE 160
141#endif /* CONFIG_32BIT_TELEMETRY */
142
143/*
144 * Structure associated with user cacheing.
145 */
146struct uidinfo {
147 LIST_ENTRY(uidinfo) ui_hash;
148 uid_t ui_uid;
149 size_t ui_proccnt;
150};
151#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
152LIST_HEAD(uihashhead, uidinfo) * uihashtbl;
153u_long uihash; /* size of hash table - 1 */
154
155/*
156 * Other process lists
157 */
158struct pidhashhead *pidhashtbl;
159u_long pidhash;
160struct pgrphashhead *pgrphashtbl;
161u_long pgrphash;
162struct sesshashhead *sesshashtbl;
163u_long sesshash;
164
165struct proclist allproc;
166struct proclist zombproc;
167extern struct tty cons;
168
169extern int cs_debug;
170
171#if DEVELOPMENT || DEBUG
172int syscallfilter_disable = 0;
173#endif // DEVELOPMENT || DEBUG
174
175#if DEBUG
176#define __PROC_INTERNAL_DEBUG 1
177#endif
178#if CONFIG_COREDUMP
179/* Name to give to core files */
180#if defined(XNU_TARGET_OS_BRIDGE)
181__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/internal/%N.core"};
182#elif defined(XNU_TARGET_OS_OSX)
183__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/cores/core.%P"};
184#else
185__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/cores/%N.core"};
186#endif
187#endif
188
189#if PROC_REF_DEBUG
190#include <kern/backtrace.h>
191#endif
192
193static LCK_MTX_DECLARE_ATTR(proc_klist_mlock, &proc_mlock_grp, &proc_lck_attr);
194
195ZONE_DECLARE(pgrp_zone, "pgrp",
196 sizeof(struct pgrp), ZC_ZFREE_CLEARMEM);
197ZONE_DECLARE(session_zone, "session",
198 sizeof(struct session), ZC_ZFREE_CLEARMEM);
199/*
200 * If you need accounting for KM_PROC consider using
201 * ZONE_VIEW_DEFINE to define a zone view.
202 */
203#define KM_PROC KHEAP_DEFAULT
204
205typedef uint64_t unaligned_u64 __attribute__((aligned(1)));
206
207static void orphanpg(struct pgrp * pg);
208void proc_name_kdp(task_t t, char * buf, int size);
209boolean_t proc_binary_uuid_kdp(task_t task, uuid_t uuid);
210int proc_threadname_kdp(void * uth, char * buf, size_t size);
211void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime);
212void proc_archinfo_kdp(void* p, cpu_type_t* cputype, cpu_subtype_t* cpusubtype);
213char * proc_name_address(void * p);
214char * proc_longname_address(void *);
215
216static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
217static void pgrp_remove(proc_t p);
218static void pgrp_replace(proc_t p, struct pgrp *pgrp);
219static void pgdelete_dropref(struct pgrp *pgrp);
220extern void pg_rele_dropref(struct pgrp * pgrp);
221static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
222static boolean_t proc_parent_is_currentproc(proc_t p);
223
224struct fixjob_iterargs {
225 struct pgrp * pg;
226 struct session * mysession;
227 int entering;
228};
229
230int fixjob_callback(proc_t, void *);
231
232uint64_t
233get_current_unique_pid(void)
234{
235 proc_t p = current_proc();
236
237 if (p) {
238 return p->p_uniqueid;
239 } else {
240 return 0;
241 }
242}
243
244/*
245 * Initialize global process hashing structures.
246 */
247void
248procinit(void)
249{
250 LIST_INIT(&allproc);
251 LIST_INIT(&zombproc);
252 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
253 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
254 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
255 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
256#if CONFIG_PERSONAS
257 personas_bootstrap();
258#endif
259}
260
261/*
262 * Change the count associated with number of processes
263 * a given user is using. This routine protects the uihash
264 * with the list lock
265 */
266size_t
267chgproccnt(uid_t uid, int diff)
268{
269 struct uidinfo *uip;
270 struct uidinfo *newuip = NULL;
271 struct uihashhead *uipp;
272 size_t retval;
273
274again:
275 proc_list_lock();
276 uipp = UIHASH(uid);
277 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) {
278 if (uip->ui_uid == uid) {
279 break;
280 }
281 }
282 if (uip) {
283 uip->ui_proccnt += diff;
284 if (uip->ui_proccnt > 0) {
285 retval = uip->ui_proccnt;
286 proc_list_unlock();
287 goto out;
288 }
289 LIST_REMOVE(uip, ui_hash);
290 retval = 0;
291 proc_list_unlock();
292 kheap_free(KM_PROC, uip, sizeof(struct uidinfo));
293 goto out;
294 }
295 if (diff <= 0) {
296 if (diff == 0) {
297 retval = 0;
298 proc_list_unlock();
299 goto out;
300 }
301 panic("chgproccnt: lost user");
302 }
303 if (newuip != NULL) {
304 uip = newuip;
305 newuip = NULL;
306 LIST_INSERT_HEAD(uipp, uip, ui_hash);
307 uip->ui_uid = uid;
308 uip->ui_proccnt = diff;
309 retval = diff;
310 proc_list_unlock();
311 goto out;
312 }
313 proc_list_unlock();
314 newuip = kheap_alloc(KM_PROC, sizeof(struct uidinfo), Z_WAITOK);
315 if (newuip == NULL) {
316 panic("chgproccnt: M_PROC zone depleted");
317 }
318 goto again;
319out:
320 kheap_free(KM_PROC, newuip, sizeof(struct uidinfo));
321 return retval;
322}
323
324/*
325 * Is p an inferior of the current process?
326 */
327int
328inferior(proc_t p)
329{
330 int retval = 0;
331
332 proc_list_lock();
333 for (; p != current_proc(); p = p->p_pptr) {
334 if (p->p_pid == 0) {
335 goto out;
336 }
337 }
338 retval = 1;
339out:
340 proc_list_unlock();
341 return retval;
342}
343
344/*
345 * Is p an inferior of t ?
346 */
347int
348isinferior(proc_t p, proc_t t)
349{
350 int retval = 0;
351 int nchecked = 0;
352 proc_t start = p;
353
354 /* if p==t they are not inferior */
355 if (p == t) {
356 return 0;
357 }
358
359 proc_list_lock();
360 for (; p != t; p = p->p_pptr) {
361 nchecked++;
362
363 /* Detect here if we're in a cycle */
364 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs)) {
365 goto out;
366 }
367 }
368 retval = 1;
369out:
370 proc_list_unlock();
371 return retval;
372}
373
374int
375proc_isinferior(int pid1, int pid2)
376{
377 proc_t p = PROC_NULL;
378 proc_t t = PROC_NULL;
379 int retval = 0;
380
381 if (((p = proc_find(pid1)) != (proc_t)0) && ((t = proc_find(pid2)) != (proc_t)0)) {
382 retval = isinferior(p, t);
383 }
384
385 if (p != PROC_NULL) {
386 proc_rele(p);
387 }
388 if (t != PROC_NULL) {
389 proc_rele(t);
390 }
391
392 return retval;
393}
394
395proc_t
396proc_find(int pid)
397{
398 return proc_findinternal(pid, 0);
399}
400
401proc_t
402proc_findinternal(int pid, int locked)
403{
404 proc_t p = PROC_NULL;
405
406 if (locked == 0) {
407 proc_list_lock();
408 }
409
410 p = pfind_locked(pid);
411 if ((p == PROC_NULL) || (p != proc_ref_locked(p))) {
412 p = PROC_NULL;
413 }
414
415 if (locked == 0) {
416 proc_list_unlock();
417 }
418
419 return p;
420}
421
422proc_t
423proc_findthread(thread_t thread)
424{
425 proc_t p = PROC_NULL;
426 struct uthread *uth;
427
428 proc_list_lock();
429 uth = get_bsdthread_info(thread);
430 if (uth && (uth->uu_flag & UT_VFORK)) {
431 p = uth->uu_proc;
432 } else {
433 p = (proc_t)(get_bsdthreadtask_info(thread));
434 }
435 p = proc_ref_locked(p);
436 proc_list_unlock();
437 return p;
438}
439
440/*
441 * Returns process identity of a given process. Calling this function is not
442 * racy for a current process or if a reference to the process is held.
443 */
444struct proc_ident
445proc_ident(proc_t p)
446{
447 struct proc_ident ident = {
448 .p_pid = proc_pid(p),
449 .p_uniqueid = proc_uniqueid(p),
450 .p_idversion = proc_pidversion(p),
451 };
452
453 return ident;
454}
455
456proc_t
457proc_find_ident(struct proc_ident const *ident)
458{
459 proc_t proc = PROC_NULL;
460
461 proc = proc_find(ident->p_pid);
462 if (proc == PROC_NULL) {
463 return PROC_NULL;
464 }
465
466 if (proc_uniqueid(proc) != ident->p_uniqueid ||
467 proc_pidversion(proc) != ident->p_idversion) {
468 proc_rele(proc);
469 return PROC_NULL;
470 }
471
472 return proc;
473}
474
475void
476uthread_reset_proc_refcount(void *uthread)
477{
478 uthread_t uth;
479
480 uth = (uthread_t) uthread;
481 uth->uu_proc_refcount = 0;
482
483#if PROC_REF_DEBUG
484 if (proc_ref_tracking_disabled) {
485 return;
486 }
487
488 uth->uu_pindex = 0;
489#endif
490}
491
492#if PROC_REF_DEBUG
493int
494uthread_get_proc_refcount(void *uthread)
495{
496 uthread_t uth;
497
498 if (proc_ref_tracking_disabled) {
499 return 0;
500 }
501
502 uth = (uthread_t) uthread;
503
504 return uth->uu_proc_refcount;
505}
506#endif
507
508static void
509record_procref(proc_t p __unused, int count)
510{
511 uthread_t uth;
512
513 uth = current_uthread();
514 uth->uu_proc_refcount += count;
515
516#if PROC_REF_DEBUG
517 if (proc_ref_tracking_disabled) {
518 return;
519 }
520
521 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
522 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex],
523 PROC_REF_STACK_DEPTH, NULL);
524
525 uth->uu_proc_ps[uth->uu_pindex] = p;
526 uth->uu_pindex++;
527 }
528#endif
529}
530
531static boolean_t
532uthread_needs_to_wait_in_proc_refwait(void)
533{
534 uthread_t uth = current_uthread();
535
536 /*
537 * Allow threads holding no proc refs to wait
538 * in proc_refwait, allowing threads holding
539 * proc refs to wait in proc_refwait causes
540 * deadlocks and makes proc_find non-reentrant.
541 */
542 if (uth->uu_proc_refcount == 0) {
543 return TRUE;
544 }
545
546 return FALSE;
547}
548
549int
550proc_rele(proc_t p)
551{
552 proc_list_lock();
553 proc_rele_locked(p);
554 proc_list_unlock();
555
556 return 0;
557}
558
559proc_t
560proc_self(void)
561{
562 struct proc * p;
563
564 p = current_proc();
565
566 proc_list_lock();
567 if (p != proc_ref_locked(p)) {
568 p = PROC_NULL;
569 }
570 proc_list_unlock();
571 return p;
572}
573
574
575proc_t
576proc_ref_locked(proc_t p)
577{
578 proc_t p1 = p;
579 int pid = proc_pid(p);
580
581retry:
582 /*
583 * if process still in creation or proc got recycled
584 * during msleep then return failure.
585 */
586 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0)) {
587 return PROC_NULL;
588 }
589
590 /*
591 * Do not return process marked for termination
592 * or proc_refdrain called without ref wait.
593 * Wait for proc_refdrain_with_refwait to complete if
594 * process in refdrain and refwait flag is set, unless
595 * the current thread is holding to a proc_ref
596 * for any proc.
597 */
598 if ((p->p_stat != SZOMB) &&
599 ((p->p_listflag & P_LIST_EXITED) == 0) &&
600 ((p->p_listflag & P_LIST_DEAD) == 0) &&
601 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
602 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
603 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
604 msleep(&p->p_listflag, &proc_list_mlock, 0, "proc_refwait", 0);
605 /*
606 * the proc might have been recycled since we dropped
607 * the proc list lock, get the proc again.
608 */
609 p = pfind_locked(pid);
610 goto retry;
611 }
612 p->p_refcount++;
613 record_procref(p, 1);
614 } else {
615 p1 = PROC_NULL;
616 }
617
618 return p1;
619}
620
621void
622proc_rele_locked(proc_t p)
623{
624 if (p->p_refcount > 0) {
625 p->p_refcount--;
626 record_procref(p, -1);
627 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
628 p->p_listflag &= ~P_LIST_DRAINWAIT;
629 wakeup(&p->p_refcount);
630 }
631 } else {
632 panic("proc_rele_locked -ve ref\n");
633 }
634}
635
636proc_t
637proc_find_zombref(int pid)
638{
639 proc_t p;
640
641 proc_list_lock();
642
643again:
644 p = pfind_locked(pid);
645
646 /* should we bail? */
647 if ((p == PROC_NULL) /* not found */
648 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
649 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
650 proc_list_unlock();
651 return PROC_NULL;
652 }
653
654 /* If someone else is controlling the (unreaped) zombie - wait */
655 if ((p->p_listflag & P_LIST_WAITING) != 0) {
656 (void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
657 goto again;
658 }
659 p->p_listflag |= P_LIST_WAITING;
660
661 proc_list_unlock();
662
663 return p;
664}
665
666void
667proc_drop_zombref(proc_t p)
668{
669 proc_list_lock();
670 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
671 p->p_listflag &= ~P_LIST_WAITING;
672 wakeup(&p->p_stat);
673 }
674 proc_list_unlock();
675}
676
677
678void
679proc_refdrain(proc_t p)
680{
681 proc_refdrain_with_refwait(p, FALSE);
682}
683
684proc_t
685proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
686{
687 boolean_t initexec = FALSE;
688 proc_list_lock();
689
690 p->p_listflag |= P_LIST_DRAIN;
691 if (get_ref_and_allow_wait) {
692 /*
693 * All the calls to proc_ref_locked will wait
694 * for the flag to get cleared before returning a ref,
695 * unless the current thread is holding to a proc ref
696 * for any proc.
697 */
698 p->p_listflag |= P_LIST_REFWAIT;
699 if (p == initproc) {
700 initexec = TRUE;
701 }
702 }
703
704 /* Do not wait in ref drain for launchd exec */
705 while (p->p_refcount && !initexec) {
706 p->p_listflag |= P_LIST_DRAINWAIT;
707 msleep(&p->p_refcount, &proc_list_mlock, 0, "proc_refdrain", 0);
708 }
709
710 p->p_listflag &= ~P_LIST_DRAIN;
711 if (!get_ref_and_allow_wait) {
712 p->p_listflag |= P_LIST_DEAD;
713 } else {
714 /* Return a ref to the caller */
715 p->p_refcount++;
716 record_procref(p, 1);
717 }
718
719 proc_list_unlock();
720
721 if (get_ref_and_allow_wait) {
722 return p;
723 }
724 return NULL;
725}
726
727void
728proc_refwake(proc_t p)
729{
730 proc_list_lock();
731 p->p_listflag &= ~P_LIST_REFWAIT;
732 wakeup(&p->p_listflag);
733 proc_list_unlock();
734}
735
736proc_t
737proc_parentholdref(proc_t p)
738{
739 proc_t parent = PROC_NULL;
740 proc_t pp;
741 int loopcnt = 0;
742
743
744 proc_list_lock();
745loop:
746 pp = p->p_pptr;
747 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
748 parent = PROC_NULL;
749 goto out;
750 }
751
752 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
753 pp->p_listflag |= P_LIST_CHILDDRWAIT;
754 msleep(&pp->p_childrencnt, &proc_list_mlock, 0, "proc_parent", 0);
755 loopcnt++;
756 if (loopcnt == 5) {
757 parent = PROC_NULL;
758 goto out;
759 }
760 goto loop;
761 }
762
763 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
764 pp->p_parentref++;
765 parent = pp;
766 goto out;
767 }
768
769out:
770 proc_list_unlock();
771 return parent;
772}
773int
774proc_parentdropref(proc_t p, int listlocked)
775{
776 if (listlocked == 0) {
777 proc_list_lock();
778 }
779
780 if (p->p_parentref > 0) {
781 p->p_parentref--;
782 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
783 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
784 wakeup(&p->p_parentref);
785 }
786 } else {
787 panic("proc_parentdropref -ve ref\n");
788 }
789 if (listlocked == 0) {
790 proc_list_unlock();
791 }
792
793 return 0;
794}
795
796void
797proc_childdrainstart(proc_t p)
798{
799#if __PROC_INTERNAL_DEBUG
800 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART) {
801 panic("proc_childdrainstart: childdrain already started\n");
802 }
803#endif
804 p->p_listflag |= P_LIST_CHILDDRSTART;
805 /* wait for all that hold parentrefs to drop */
806 while (p->p_parentref > 0) {
807 p->p_listflag |= P_LIST_PARENTREFWAIT;
808 msleep(&p->p_parentref, &proc_list_mlock, 0, "proc_childdrainstart", 0);
809 }
810}
811
812
813void
814proc_childdrainend(proc_t p)
815{
816#if __PROC_INTERNAL_DEBUG
817 if (p->p_childrencnt > 0) {
818 panic("exiting: children stil hanging around\n");
819 }
820#endif
821 p->p_listflag |= P_LIST_CHILDDRAINED;
822 if ((p->p_listflag & (P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT)) != 0) {
823 p->p_listflag &= ~(P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT);
824 wakeup(&p->p_childrencnt);
825 }
826}
827
828void
829proc_checkdeadrefs(__unused proc_t p)
830{
831#if __PROC_INTERNAL_DEBUG
832 if ((p->p_listflag & P_LIST_INHASH) != 0) {
833 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
834 }
835 if (p->p_childrencnt != 0) {
836 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
837 }
838 if (p->p_refcount != 0) {
839 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
840 }
841 if (p->p_parentref != 0) {
842 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
843 }
844#endif
845}
846
847
848__attribute__((always_inline, visibility("hidden")))
849void
850proc_require(proc_t proc, proc_require_flags_t flags)
851{
852 if ((flags & PROC_REQUIRE_ALLOW_NULL) && proc == PROC_NULL) {
853 return;
854 }
855 if ((flags & PROC_REQUIRE_ALLOW_KERNPROC) && proc == &proc0) {
856 return;
857 }
858 zone_id_require(ZONE_ID_PROC, sizeof(struct proc), proc);
859}
860
861int
862proc_pid(proc_t p)
863{
864 if (p != NULL) {
865 proc_require(p, PROC_REQUIRE_ALLOW_KERNPROC);
866 return p->p_pid;
867 }
868 return -1;
869}
870
871int
872proc_ppid(proc_t p)
873{
874 if (p != NULL) {
875 proc_require(p, PROC_REQUIRE_ALLOW_KERNPROC);
876 return p->p_ppid;
877 }
878 return -1;
879}
880
881int
882proc_original_ppid(proc_t p)
883{
884 if (p != NULL) {
885 proc_require(p, PROC_REQUIRE_ALLOW_KERNPROC);
886 return p->p_original_ppid;
887 }
888 return -1;
889}
890
891int
892proc_starttime(proc_t p, struct timeval *tv)
893{
894 if (p != NULL && tv != NULL) {
895 tv->tv_sec = p->p_start.tv_sec;
896 tv->tv_usec = p->p_start.tv_usec;
897 return 0;
898 }
899 return EINVAL;
900}
901
902int
903proc_selfpid(void)
904{
905 return current_proc()->p_pid;
906}
907
908int
909proc_selfppid(void)
910{
911 return current_proc()->p_ppid;
912}
913
914uint64_t
915proc_selfcsflags(void)
916{
917 return (uint64_t)current_proc()->p_csflags;
918}
919
920int
921proc_csflags(proc_t p, uint64_t *flags)
922{
923 if (p && flags) {
924 proc_require(p, PROC_REQUIRE_ALLOW_KERNPROC);
925 *flags = (uint64_t)p->p_csflags;
926 return 0;
927 }
928 return EINVAL;
929}
930
931uint32_t
932proc_platform(const proc_t p)
933{
934 if (p != NULL) {
935 return p->p_platform;
936 }
937 return (uint32_t)-1;
938}
939
940uint32_t
941proc_min_sdk(proc_t p)
942{
943 if (p != NULL) {
944 return p->p_min_sdk;
945 }
946 return (uint32_t)-1;
947}
948
949uint32_t
950proc_sdk(proc_t p)
951{
952 if (p != NULL) {
953 return p->p_sdk;
954 }
955 return (uint32_t)-1;
956}
957
958#if CONFIG_DTRACE
959static proc_t
960dtrace_current_proc_vforking(void)
961{
962 thread_t th = current_thread();
963 struct uthread *ut = get_bsdthread_info(th);
964
965 if (ut &&
966 ((ut->uu_flag & (UT_VFORK | UT_VFORKING)) == (UT_VFORK | UT_VFORKING))) {
967 /*
968 * Handle the narrow window where we're in the vfork syscall,
969 * but we're not quite ready to claim (in particular, to DTrace)
970 * that we're running as the child.
971 */
972 return get_bsdtask_info(get_threadtask(th));
973 }
974 return current_proc();
975}
976
977int
978dtrace_proc_selfpid(void)
979{
980 return dtrace_current_proc_vforking()->p_pid;
981}
982
983int
984dtrace_proc_selfppid(void)
985{
986 return dtrace_current_proc_vforking()->p_ppid;
987}
988
989uid_t
990dtrace_proc_selfruid(void)
991{
992 return dtrace_current_proc_vforking()->p_ruid;
993}
994#endif /* CONFIG_DTRACE */
995
996proc_t
997proc_parent(proc_t p)
998{
999 proc_t parent;
1000 proc_t pp;
1001
1002 proc_list_lock();
1003loop:
1004 pp = p->p_pptr;
1005 parent = proc_ref_locked(pp);
1006 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED) == 0)) {
1007 pp->p_listflag |= P_LIST_CHILDLKWAIT;
1008 msleep(&pp->p_childrencnt, &proc_list_mlock, 0, "proc_parent", 0);
1009 goto loop;
1010 }
1011 proc_list_unlock();
1012 return parent;
1013}
1014
1015static boolean_t
1016proc_parent_is_currentproc(proc_t p)
1017{
1018 boolean_t ret = FALSE;
1019
1020 proc_list_lock();
1021 if (p->p_pptr == current_proc()) {
1022 ret = TRUE;
1023 }
1024
1025 proc_list_unlock();
1026 return ret;
1027}
1028
1029void
1030proc_name(int pid, char * buf, int size)
1031{
1032 proc_t p;
1033
1034 if (size <= 0) {
1035 return;
1036 }
1037
1038 bzero(buf, size);
1039
1040 if ((p = proc_find(pid)) != PROC_NULL) {
1041 strlcpy(buf, &p->p_comm[0], size);
1042 proc_rele(p);
1043 }
1044}
1045
1046void
1047proc_name_kdp(task_t t, char * buf, int size)
1048{
1049 proc_t p = get_bsdtask_info(t);
1050 if (p == PROC_NULL) {
1051 return;
1052 }
1053
1054 if ((size_t)size > sizeof(p->p_comm)) {
1055 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
1056 } else {
1057 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
1058 }
1059}
1060
1061boolean_t
1062proc_binary_uuid_kdp(task_t task, uuid_t uuid)
1063{
1064 proc_t p = get_bsdtask_info(task);
1065 if (p == PROC_NULL) {
1066 return FALSE;
1067 }
1068
1069 proc_getexecutableuuid(p, uuid, sizeof(uuid_t));
1070
1071 return TRUE;
1072}
1073
1074int
1075proc_threadname_kdp(void * uth, char * buf, size_t size)
1076{
1077 if (size < MAXTHREADNAMESIZE) {
1078 /* this is really just a protective measure for the future in
1079 * case the thread name size in stackshot gets out of sync with
1080 * the BSD max thread name size. Note that bsd_getthreadname
1081 * doesn't take input buffer size into account. */
1082 return -1;
1083 }
1084
1085 if (uth != NULL) {
1086 bsd_getthreadname(uth, buf);
1087 }
1088 return 0;
1089}
1090
1091
1092/* note that this function is generally going to be called from stackshot,
1093 * and the arguments will be coming from a struct which is declared packed
1094 * thus the input arguments will in general be unaligned. We have to handle
1095 * that here. */
1096void
1097proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime)
1098{
1099 proc_t pp = (proc_t)p;
1100 if (pp != PROC_NULL) {
1101 if (tv_sec != NULL) {
1102 *tv_sec = pp->p_start.tv_sec;
1103 }
1104 if (tv_usec != NULL) {
1105 *tv_usec = pp->p_start.tv_usec;
1106 }
1107 if (abstime != NULL) {
1108 if (pp->p_stats != NULL) {
1109 *abstime = pp->p_stats->ps_start;
1110 } else {
1111 *abstime = 0;
1112 }
1113 }
1114 }
1115}
1116
1117void
1118proc_archinfo_kdp(void* p, cpu_type_t* cputype, cpu_subtype_t* cpusubtype)
1119{
1120 proc_t pp = (proc_t)p;
1121 if (pp != PROC_NULL) {
1122 *cputype = pp->p_cputype;
1123 *cpusubtype = pp->p_cpusubtype;
1124 }
1125}
1126
1127char *
1128proc_name_address(void *p)
1129{
1130 return &((proc_t)p)->p_comm[0];
1131}
1132
1133char *
1134proc_longname_address(void *p)
1135{
1136 return &((proc_t)p)->p_name[0];
1137}
1138
1139char *
1140proc_best_name(proc_t p)
1141{
1142 if (p->p_name[0] != '\0') {
1143 return &p->p_name[0];
1144 }
1145 return &p->p_comm[0];
1146}
1147
1148void
1149proc_selfname(char * buf, int size)
1150{
1151 proc_t p;
1152
1153 if ((p = current_proc()) != (proc_t)0) {
1154 strlcpy(buf, &p->p_comm[0], size);
1155 }
1156}
1157
1158void
1159proc_signal(int pid, int signum)
1160{
1161 proc_t p;
1162
1163 if ((p = proc_find(pid)) != PROC_NULL) {
1164 psignal(p, signum);
1165 proc_rele(p);
1166 }
1167}
1168
1169int
1170proc_issignal(int pid, sigset_t mask)
1171{
1172 proc_t p;
1173 int error = 0;
1174
1175 if ((p = proc_find(pid)) != PROC_NULL) {
1176 error = proc_pendingsignals(p, mask);
1177 proc_rele(p);
1178 }
1179
1180 return error;
1181}
1182
1183int
1184proc_noremotehang(proc_t p)
1185{
1186 int retval = 0;
1187
1188 if (p) {
1189 retval = p->p_flag & P_NOREMOTEHANG;
1190 }
1191 return retval? 1: 0;
1192}
1193
1194int
1195proc_exiting(proc_t p)
1196{
1197 int retval = 0;
1198
1199 if (p) {
1200 retval = p->p_lflag & P_LEXIT;
1201 }
1202 return retval? 1: 0;
1203}
1204
1205int
1206proc_in_teardown(proc_t p)
1207{
1208 int retval = 0;
1209
1210 if (p) {
1211 retval = p->p_lflag & P_LPEXIT;
1212 }
1213 return retval? 1: 0;
1214}
1215
1216int
1217proc_forcequota(proc_t p)
1218{
1219 int retval = 0;
1220
1221 if (p) {
1222 retval = p->p_flag & P_FORCEQUOTA;
1223 }
1224 return retval? 1: 0;
1225}
1226
1227int
1228proc_suser(proc_t p)
1229{
1230 kauth_cred_t my_cred;
1231 int error;
1232
1233 my_cred = kauth_cred_proc_ref(p);
1234 error = suser(my_cred, &p->p_acflag);
1235 kauth_cred_unref(&my_cred);
1236 return error;
1237}
1238
1239task_t
1240proc_task(proc_t proc)
1241{
1242 return (task_t)proc->task;
1243}
1244
1245/*
1246 * Obtain the first thread in a process
1247 *
1248 * XXX This is a bad thing to do; it exists predominantly to support the
1249 * XXX use of proc_t's in places that should really be using
1250 * XXX thread_t's instead. This maintains historical behaviour, but really
1251 * XXX needs an audit of the context (proxy vs. not) to clean up.
1252 */
1253thread_t
1254proc_thread(proc_t proc)
1255{
1256 LCK_MTX_ASSERT(&proc->p_mlock, LCK_MTX_ASSERT_OWNED);
1257
1258 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1259
1260 if (uth != NULL) {
1261 return uth->uu_context.vc_thread;
1262 }
1263
1264 return NULL;
1265}
1266
1267kauth_cred_t
1268proc_ucred(proc_t p)
1269{
1270 return p->p_ucred;
1271}
1272
1273struct uthread *
1274current_uthread()
1275{
1276 thread_t th = current_thread();
1277
1278 return (struct uthread *)get_bsdthread_info(th);
1279}
1280
1281
1282int
1283proc_is64bit(proc_t p)
1284{
1285 return IS_64BIT_PROCESS(p);
1286}
1287
1288int
1289proc_is64bit_data(proc_t p)
1290{
1291 assert(p->task);
1292 return (int)task_get_64bit_data(p->task);
1293}
1294
1295int
1296proc_isinitproc(proc_t p)
1297{
1298 if (initproc == NULL) {
1299 return 0;
1300 }
1301 return p == initproc;
1302}
1303
1304int
1305proc_pidversion(proc_t p)
1306{
1307 return p->p_idversion;
1308}
1309
1310uint32_t
1311proc_persona_id(proc_t p)
1312{
1313 return (uint32_t)persona_id_from_proc(p);
1314}
1315
1316uint32_t
1317proc_getuid(proc_t p)
1318{
1319 return p->p_uid;
1320}
1321
1322uint32_t
1323proc_getgid(proc_t p)
1324{
1325 return p->p_gid;
1326}
1327
1328uint64_t
1329proc_uniqueid(proc_t p)
1330{
1331 return p->p_uniqueid;
1332}
1333
1334uint64_t
1335proc_puniqueid(proc_t p)
1336{
1337 return p->p_puniqueid;
1338}
1339
1340void
1341proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1342{
1343#if CONFIG_COALITIONS
1344 task_coalition_ids(p->task, ids);
1345#else
1346 memset(ids, 0, sizeof(uint64_t[COALITION_NUM_TYPES]));
1347#endif
1348 return;
1349}
1350
1351uint64_t
1352proc_was_throttled(proc_t p)
1353{
1354 return p->was_throttled;
1355}
1356
1357uint64_t
1358proc_did_throttle(proc_t p)
1359{
1360 return p->did_throttle;
1361}
1362
1363int
1364proc_getcdhash(proc_t p, unsigned char *cdhash)
1365{
1366 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1367}
1368
1369int
1370proc_exitstatus(proc_t p)
1371{
1372 return p->p_xstat & 0xffff;
1373}
1374
1375void
1376proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1377{
1378 if (size >= sizeof(p->p_uuid)) {
1379 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1380 }
1381}
1382
1383/* Return vnode for executable with an iocount. Must be released with vnode_put() */
1384vnode_t
1385proc_getexecutablevnode(proc_t p)
1386{
1387 vnode_t tvp = p->p_textvp;
1388
1389 if (tvp != NULLVP) {
1390 if (vnode_getwithref(tvp) == 0) {
1391 return tvp;
1392 }
1393 }
1394
1395 return NULLVP;
1396}
1397
1398int
1399proc_gettty(proc_t p, vnode_t *vp)
1400{
1401 if (!p || !vp) {
1402 return EINVAL;
1403 }
1404
1405 struct session *procsp = proc_session(p);
1406 int err = EINVAL;
1407
1408 if (procsp != SESSION_NULL) {
1409 session_lock(procsp);
1410 vnode_t ttyvp = procsp->s_ttyvp;
1411 int ttyvid = procsp->s_ttyvid;
1412 session_unlock(procsp);
1413
1414 if (ttyvp) {
1415 if (vnode_getwithvid(ttyvp, ttyvid) == 0) {
1416 *vp = ttyvp;
1417 err = 0;
1418 }
1419 } else {
1420 err = ENOENT;
1421 }
1422
1423 session_rele(procsp);
1424 }
1425
1426 return err;
1427}
1428
1429int
1430proc_gettty_dev(proc_t p, dev_t *dev)
1431{
1432 struct session *procsp = proc_session(p);
1433 boolean_t has_tty = FALSE;
1434
1435 if (procsp != SESSION_NULL) {
1436 session_lock(procsp);
1437
1438 struct tty * tp = SESSION_TP(procsp);
1439 if (tp != TTY_NULL) {
1440 *dev = tp->t_dev;
1441 has_tty = TRUE;
1442 }
1443
1444 session_unlock(procsp);
1445 session_rele(procsp);
1446 }
1447
1448 if (has_tty) {
1449 return 0;
1450 } else {
1451 return EINVAL;
1452 }
1453}
1454
1455int
1456proc_selfexecutableargs(uint8_t *buf, size_t *buflen)
1457{
1458 proc_t p = current_proc();
1459
1460 // buflen must always be provided
1461 if (buflen == NULL) {
1462 return EINVAL;
1463 }
1464
1465 // If a buf is provided, there must be at least enough room to fit argc
1466 if (buf && *buflen < sizeof(p->p_argc)) {
1467 return EINVAL;
1468 }
1469
1470 if (!p->user_stack) {
1471 return EINVAL;
1472 }
1473
1474 if (buf == NULL) {
1475 *buflen = p->p_argslen + sizeof(p->p_argc);
1476 return 0;
1477 }
1478
1479 // Copy in argc to the first 4 bytes
1480 memcpy(buf, &p->p_argc, sizeof(p->p_argc));
1481
1482 if (*buflen > sizeof(p->p_argc) && p->p_argslen > 0) {
1483 // See memory layout comment in kern_exec.c:exec_copyout_strings()
1484 // We want to copy starting from `p_argslen` bytes away from top of stack
1485 return copyin(p->user_stack - p->p_argslen,
1486 buf + sizeof(p->p_argc),
1487 MIN(p->p_argslen, *buflen - sizeof(p->p_argc)));
1488 } else {
1489 return 0;
1490 }
1491}
1492
1493off_t
1494proc_getexecutableoffset(proc_t p)
1495{
1496 return p->p_textoff;
1497}
1498
1499void
1500bsd_set_dependency_capable(task_t task)
1501{
1502 proc_t p = get_bsdtask_info(task);
1503
1504 if (p) {
1505 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1506 }
1507}
1508
1509
1510#ifndef __arm__
1511int
1512IS_64BIT_PROCESS(proc_t p)
1513{
1514 if (p && (p->p_flag & P_LP64)) {
1515 return 1;
1516 } else {
1517 return 0;
1518 }
1519}
1520#endif
1521
1522/*
1523 * Locate a process by number
1524 */
1525proc_t
1526pfind_locked(pid_t pid)
1527{
1528 proc_t p;
1529#if DEBUG
1530 proc_t q;
1531#endif
1532
1533 if (!pid) {
1534 return kernproc;
1535 }
1536
1537 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1538 if (p->p_pid == pid) {
1539#if DEBUG
1540 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1541 if ((p != q) && (q->p_pid == pid)) {
1542 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1543 }
1544 }
1545#endif
1546 return p;
1547 }
1548 }
1549 return NULL;
1550}
1551
1552/*
1553 * Locate a zombie by PID
1554 */
1555__private_extern__ proc_t
1556pzfind(pid_t pid)
1557{
1558 proc_t p;
1559
1560
1561 proc_list_lock();
1562
1563 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
1564 if (p->p_pid == pid) {
1565 break;
1566 }
1567 }
1568
1569 proc_list_unlock();
1570
1571 return p;
1572}
1573
1574/*
1575 * Locate a process group by number
1576 */
1577
1578struct pgrp *
1579pgfind(pid_t pgid)
1580{
1581 struct pgrp * pgrp;
1582
1583 proc_list_lock();
1584 pgrp = pgfind_internal(pgid);
1585 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) {
1586 pgrp = PGRP_NULL;
1587 } else {
1588 pgrp->pg_refcount++;
1589 }
1590 proc_list_unlock();
1591 return pgrp;
1592}
1593
1594
1595
1596struct pgrp *
1597pgfind_internal(pid_t pgid)
1598{
1599 struct pgrp *pgrp;
1600
1601 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) {
1602 if (pgrp->pg_id == pgid) {
1603 return pgrp;
1604 }
1605 }
1606 return NULL;
1607}
1608
1609void
1610pg_rele(struct pgrp * pgrp)
1611{
1612 if (pgrp == PGRP_NULL) {
1613 return;
1614 }
1615 pg_rele_dropref(pgrp);
1616}
1617
1618void
1619pg_rele_dropref(struct pgrp * pgrp)
1620{
1621 proc_list_lock();
1622 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1623 proc_list_unlock();
1624 pgdelete_dropref(pgrp);
1625 return;
1626 }
1627
1628 pgrp->pg_refcount--;
1629 proc_list_unlock();
1630}
1631
1632struct session *
1633session_find_internal(pid_t sessid)
1634{
1635 struct session *sess;
1636
1637 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) {
1638 if (sess->s_sid == sessid) {
1639 return sess;
1640 }
1641 }
1642 return NULL;
1643}
1644
1645
1646/*
1647 * Make a new process ready to become a useful member of society by making it
1648 * visible in all the right places and initialize its own lists to empty.
1649 *
1650 * Parameters: parent The parent of the process to insert
1651 * child The child process to insert
1652 *
1653 * Returns: (void)
1654 *
1655 * Notes: Insert a child process into the parents process group, assign
1656 * the child the parent process pointer and PPID of the parent,
1657 * place it on the parents p_children list as a sibling,
1658 * initialize its own child list, place it in the allproc list,
1659 * insert it in the proper hash bucket, and initialize its
1660 * event list.
1661 */
1662void
1663pinsertchild(proc_t parent, proc_t child)
1664{
1665 struct pgrp * pg;
1666
1667 LIST_INIT(&child->p_children);
1668 child->p_pptr = parent;
1669 child->p_ppid = parent->p_pid;
1670 child->p_original_ppid = parent->p_pid;
1671 child->p_puniqueid = parent->p_uniqueid;
1672 child->p_xhighbits = 0;
1673
1674 pg = proc_pgrp(parent);
1675 pgrp_add(pg, parent, child);
1676 pg_rele(pg);
1677
1678 proc_list_lock();
1679
1680#if CONFIG_MEMORYSTATUS
1681 memorystatus_add(child, TRUE);
1682#endif
1683
1684 parent->p_childrencnt++;
1685 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1686
1687 LIST_INSERT_HEAD(&allproc, child, p_list);
1688 /* mark the completion of proc creation */
1689 child->p_listflag &= ~P_LIST_INCREATE;
1690
1691 proc_list_unlock();
1692}
1693
1694/*
1695 * Move p to a new or existing process group (and session)
1696 *
1697 * Returns: 0 Success
1698 * ESRCH No such process
1699 */
1700int
1701enterpgrp(proc_t p, pid_t pgid, int mksess)
1702{
1703 struct pgrp *pgrp;
1704 struct pgrp *mypgrp;
1705 struct session * procsp;
1706
1707 pgrp = pgfind(pgid);
1708 mypgrp = proc_pgrp(p);
1709 procsp = proc_session(p);
1710
1711#if DIAGNOSTIC
1712 if (pgrp != NULL && mksess) { /* firewalls */
1713 panic("enterpgrp: setsid into non-empty pgrp");
1714 }
1715 if (SESS_LEADER(p, procsp)) {
1716 panic("enterpgrp: session leader attempted setpgrp");
1717 }
1718#endif
1719 if (pgrp == PGRP_NULL) {
1720 pid_t savepid = p->p_pid;
1721 proc_t np = PROC_NULL;
1722 /*
1723 * new process group
1724 */
1725#if DIAGNOSTIC
1726 if (p->p_pid != pgid) {
1727 panic("enterpgrp: new pgrp and pid != pgid");
1728 }
1729#endif
1730 pgrp = zalloc_flags(pgrp_zone, Z_WAITOK | Z_ZERO);
1731 if ((np = proc_find(savepid)) == NULL || np != p) {
1732 if (np != PROC_NULL) {
1733 proc_rele(np);
1734 }
1735 if (mypgrp != PGRP_NULL) {
1736 pg_rele(mypgrp);
1737 }
1738 if (procsp != SESSION_NULL) {
1739 session_rele(procsp);
1740 }
1741 zfree(pgrp_zone, pgrp);
1742 return ESRCH;
1743 }
1744 proc_rele(np);
1745 if (mksess) {
1746 struct session *sess;
1747
1748 /*
1749 * new session
1750 */
1751 sess = zalloc_flags(session_zone, Z_WAITOK | Z_ZERO);
1752 sess->s_leader = p;
1753 sess->s_sid = p->p_pid;
1754 sess->s_count = 1;
1755 sess->s_ttypgrpid = NO_PID;
1756
1757 lck_mtx_init(&sess->s_mlock, &proc_mlock_grp, &proc_lck_attr);
1758
1759 bcopy(procsp->s_login, sess->s_login,
1760 sizeof(sess->s_login));
1761 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1762 proc_list_lock();
1763 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1764 proc_list_unlock();
1765 pgrp->pg_session = sess;
1766 p->p_sessionid = sess->s_sid;
1767#if DIAGNOSTIC
1768 if (p != current_proc()) {
1769 panic("enterpgrp: mksession and p != curproc");
1770 }
1771#endif
1772 } else {
1773 proc_list_lock();
1774 pgrp->pg_session = procsp;
1775 p->p_sessionid = procsp->s_sid;
1776
1777 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1778 panic("enterpgrp: providing ref to terminating session ");
1779 }
1780 pgrp->pg_session->s_count++;
1781 proc_list_unlock();
1782 }
1783 pgrp->pg_id = pgid;
1784
1785 lck_mtx_init(&pgrp->pg_mlock, &proc_mlock_grp, &proc_lck_attr);
1786
1787 LIST_INIT(&pgrp->pg_members);
1788 proc_list_lock();
1789 pgrp->pg_refcount = 1;
1790 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1791 proc_list_unlock();
1792 } else if (pgrp == mypgrp) {
1793 pg_rele(pgrp);
1794 if (mypgrp != NULL) {
1795 pg_rele(mypgrp);
1796 }
1797 if (procsp != SESSION_NULL) {
1798 session_rele(procsp);
1799 }
1800 return 0;
1801 }
1802
1803 if (procsp != SESSION_NULL) {
1804 session_rele(procsp);
1805 }
1806 /*
1807 * Adjust eligibility of affected pgrps to participate in job control.
1808 * Increment eligibility counts before decrementing, otherwise we
1809 * could reach 0 spuriously during the first call.
1810 */
1811 fixjobc(p, pgrp, 1);
1812 fixjobc(p, mypgrp, 0);
1813
1814 if (mypgrp != PGRP_NULL) {
1815 pg_rele(mypgrp);
1816 }
1817 pgrp_replace(p, pgrp);
1818 pg_rele(pgrp);
1819
1820 return 0;
1821}
1822
1823/*
1824 * remove process from process group
1825 */
1826int
1827leavepgrp(proc_t p)
1828{
1829 pgrp_remove(p);
1830 return 0;
1831}
1832
1833/*
1834 * delete a process group
1835 */
1836static void
1837pgdelete_dropref(struct pgrp *pgrp)
1838{
1839 struct tty *ttyp;
1840 int emptypgrp = 1;
1841 struct session *sessp;
1842
1843
1844 pgrp_lock(pgrp);
1845 if (pgrp->pg_membercnt != 0) {
1846 emptypgrp = 0;
1847 }
1848 pgrp_unlock(pgrp);
1849
1850 proc_list_lock();
1851 pgrp->pg_refcount--;
1852 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1853 proc_list_unlock();
1854 return;
1855 }
1856
1857 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1858
1859 if (pgrp->pg_refcount > 0) {
1860 proc_list_unlock();
1861 return;
1862 }
1863
1864 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1865 LIST_REMOVE(pgrp, pg_hash);
1866
1867 proc_list_unlock();
1868
1869 ttyp = SESSION_TP(pgrp->pg_session);
1870 if (ttyp != TTY_NULL) {
1871 if (ttyp->t_pgrp == pgrp) {
1872 tty_lock(ttyp);
1873 /* Re-check after acquiring the lock */
1874 if (ttyp->t_pgrp == pgrp) {
1875 ttyp->t_pgrp = NULL;
1876 pgrp->pg_session->s_ttypgrpid = NO_PID;
1877 }
1878 tty_unlock(ttyp);
1879 }
1880 }
1881
1882 proc_list_lock();
1883
1884 sessp = pgrp->pg_session;
1885 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1886 panic("pg_deleteref: manipulating refs of already terminating session");
1887 }
1888 if (--sessp->s_count == 0) {
1889 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1890 panic("pg_deleteref: terminating already terminated session");
1891 }
1892 sessp->s_listflags |= S_LIST_TERM;
1893 ttyp = SESSION_TP(sessp);
1894 LIST_REMOVE(sessp, s_hash);
1895 proc_list_unlock();
1896 if (ttyp != TTY_NULL) {
1897 tty_lock(ttyp);
1898 if (ttyp->t_session == sessp) {
1899 ttyp->t_session = NULL;
1900 }
1901 tty_unlock(ttyp);
1902 }
1903 proc_list_lock();
1904 sessp->s_listflags |= S_LIST_DEAD;
1905 if (sessp->s_count != 0) {
1906 panic("pg_deleteref: freeing session in use");
1907 }
1908 proc_list_unlock();
1909 lck_mtx_destroy(&sessp->s_mlock, &proc_mlock_grp);
1910
1911 zfree(session_zone, sessp);
1912 } else {
1913 proc_list_unlock();
1914 }
1915 lck_mtx_destroy(&pgrp->pg_mlock, &proc_mlock_grp);
1916 zfree(pgrp_zone, pgrp);
1917}
1918
1919
1920/*
1921 * Adjust pgrp jobc counters when specified process changes process group.
1922 * We count the number of processes in each process group that "qualify"
1923 * the group for terminal job control (those with a parent in a different
1924 * process group of the same session). If that count reaches zero, the
1925 * process group becomes orphaned. Check both the specified process'
1926 * process group and that of its children.
1927 * entering == 0 => p is leaving specified group.
1928 * entering == 1 => p is entering specified group.
1929 */
1930int
1931fixjob_callback(proc_t p, void * arg)
1932{
1933 struct fixjob_iterargs *fp;
1934 struct pgrp * pg, *hispg;
1935 struct session * mysession, *hissess;
1936 int entering;
1937
1938 fp = (struct fixjob_iterargs *)arg;
1939 pg = fp->pg;
1940 mysession = fp->mysession;
1941 entering = fp->entering;
1942
1943 hispg = proc_pgrp(p);
1944 hissess = proc_session(p);
1945
1946 if ((hispg != pg) &&
1947 (hissess == mysession)) {
1948 pgrp_lock(hispg);
1949 if (entering) {
1950 hispg->pg_jobc++;
1951 pgrp_unlock(hispg);
1952 } else if (--hispg->pg_jobc == 0) {
1953 pgrp_unlock(hispg);
1954 orphanpg(hispg);
1955 } else {
1956 pgrp_unlock(hispg);
1957 }
1958 }
1959 if (hissess != SESSION_NULL) {
1960 session_rele(hissess);
1961 }
1962 if (hispg != PGRP_NULL) {
1963 pg_rele(hispg);
1964 }
1965
1966 return PROC_RETURNED;
1967}
1968
1969void
1970fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1971{
1972 struct pgrp *hispgrp = PGRP_NULL;
1973 struct session *hissess = SESSION_NULL;
1974 struct session *mysession = pgrp->pg_session;
1975 proc_t parent;
1976 struct fixjob_iterargs fjarg;
1977 boolean_t proc_parent_self;
1978
1979 /*
1980 * Check if p's parent is current proc, if yes then no need to take
1981 * a ref; calling proc_parent with current proc as parent may
1982 * deadlock if current proc is exiting.
1983 */
1984 proc_parent_self = proc_parent_is_currentproc(p);
1985 if (proc_parent_self) {
1986 parent = current_proc();
1987 } else {
1988 parent = proc_parent(p);
1989 }
1990
1991 if (parent != PROC_NULL) {
1992 hispgrp = proc_pgrp(parent);
1993 hissess = proc_session(parent);
1994 if (!proc_parent_self) {
1995 proc_rele(parent);
1996 }
1997 }
1998
1999
2000 /*
2001 * Check p's parent to see whether p qualifies its own process
2002 * group; if so, adjust count for p's process group.
2003 */
2004 if ((hispgrp != pgrp) &&
2005 (hissess == mysession)) {
2006 pgrp_lock(pgrp);
2007 if (entering) {
2008 pgrp->pg_jobc++;
2009 pgrp_unlock(pgrp);
2010 } else if (--pgrp->pg_jobc == 0) {
2011 pgrp_unlock(pgrp);
2012 orphanpg(pgrp);
2013 } else {
2014 pgrp_unlock(pgrp);
2015 }
2016 }
2017
2018 if (hissess != SESSION_NULL) {
2019 session_rele(hissess);
2020 }
2021 if (hispgrp != PGRP_NULL) {
2022 pg_rele(hispgrp);
2023 }
2024
2025 /*
2026 * Check this process' children to see whether they qualify
2027 * their process groups; if so, adjust counts for children's
2028 * process groups.
2029 */
2030 fjarg.pg = pgrp;
2031 fjarg.mysession = mysession;
2032 fjarg.entering = entering;
2033 proc_childrenwalk(p, fixjob_callback, &fjarg);
2034}
2035
2036/*
2037 * The pidlist_* routines support the functions in this file that
2038 * walk lists of processes applying filters and callouts to the
2039 * elements of the list.
2040 *
2041 * A prior implementation used a single linear array, which can be
2042 * tricky to allocate on large systems. This implementation creates
2043 * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
2044 *
2045 * The array should be sized large enough to keep the overhead of
2046 * walking the list low, but small enough that blocking allocations of
2047 * pidlist_entry_t structures always succeed.
2048 */
2049
2050#define PIDS_PER_ENTRY 1021
2051
2052typedef struct pidlist_entry {
2053 SLIST_ENTRY(pidlist_entry) pe_link;
2054 u_int pe_nused;
2055 pid_t pe_pid[PIDS_PER_ENTRY];
2056} pidlist_entry_t;
2057
2058typedef struct {
2059 SLIST_HEAD(, pidlist_entry) pl_head;
2060 struct pidlist_entry *pl_active;
2061 u_int pl_nalloc;
2062} pidlist_t;
2063
2064static __inline__ pidlist_t *
2065pidlist_init(pidlist_t *pl)
2066{
2067 SLIST_INIT(&pl->pl_head);
2068 pl->pl_active = NULL;
2069 pl->pl_nalloc = 0;
2070 return pl;
2071}
2072
2073static u_int
2074pidlist_alloc(pidlist_t *pl, u_int needed)
2075{
2076 while (pl->pl_nalloc < needed) {
2077 pidlist_entry_t *pe = kheap_alloc(KHEAP_TEMP, sizeof(*pe),
2078 Z_WAITOK | Z_ZERO);
2079 if (NULL == pe) {
2080 panic("no space for pidlist entry");
2081 }
2082 SLIST_INSERT_HEAD(&pl->pl_head, pe, pe_link);
2083 pl->pl_nalloc += (sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0]));
2084 }
2085 return pl->pl_nalloc;
2086}
2087
2088static void
2089pidlist_free(pidlist_t *pl)
2090{
2091 pidlist_entry_t *pe;
2092 while (NULL != (pe = SLIST_FIRST(&pl->pl_head))) {
2093 SLIST_FIRST(&pl->pl_head) = SLIST_NEXT(pe, pe_link);
2094 kheap_free(KHEAP_TEMP, pe, sizeof(*pe));
2095 }
2096 pl->pl_nalloc = 0;
2097}
2098
2099static __inline__ void
2100pidlist_set_active(pidlist_t *pl)
2101{
2102 pl->pl_active = SLIST_FIRST(&pl->pl_head);
2103 assert(pl->pl_active);
2104}
2105
2106static void
2107pidlist_add_pid(pidlist_t *pl, pid_t pid)
2108{
2109 pidlist_entry_t *pe = pl->pl_active;
2110 if (pe->pe_nused >= sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0])) {
2111 if (NULL == (pe = SLIST_NEXT(pe, pe_link))) {
2112 panic("pidlist allocation exhausted");
2113 }
2114 pl->pl_active = pe;
2115 }
2116 pe->pe_pid[pe->pe_nused++] = pid;
2117}
2118
2119static __inline__ u_int
2120pidlist_nalloc(const pidlist_t *pl)
2121{
2122 return pl->pl_nalloc;
2123}
2124
2125/*
2126 * A process group has become orphaned; if there are any stopped processes in
2127 * the group, hang-up all process in that group.
2128 */
2129static void
2130orphanpg(struct pgrp *pgrp)
2131{
2132 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2133 u_int pid_count_available = 0;
2134 proc_t p;
2135
2136 /* allocate outside of the pgrp_lock */
2137 for (;;) {
2138 pgrp_lock(pgrp);
2139
2140 boolean_t should_iterate = FALSE;
2141 pid_count_available = 0;
2142
2143 PGMEMBERS_FOREACH(pgrp, p) {
2144 pid_count_available++;
2145 if (p->p_stat == SSTOP) {
2146 should_iterate = TRUE;
2147 }
2148 }
2149 if (pid_count_available == 0 || !should_iterate) {
2150 pgrp_unlock(pgrp);
2151 goto out; /* no orphaned processes OR nothing stopped */
2152 }
2153 if (pidlist_nalloc(pl) >= pid_count_available) {
2154 break;
2155 }
2156 pgrp_unlock(pgrp);
2157
2158 pidlist_alloc(pl, pid_count_available);
2159 }
2160 pidlist_set_active(pl);
2161
2162 u_int pid_count = 0;
2163 PGMEMBERS_FOREACH(pgrp, p) {
2164 pidlist_add_pid(pl, proc_pid(p));
2165 if (++pid_count >= pid_count_available) {
2166 break;
2167 }
2168 }
2169 pgrp_unlock(pgrp);
2170
2171 const pidlist_entry_t *pe;
2172 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2173 for (u_int i = 0; i < pe->pe_nused; i++) {
2174 const pid_t pid = pe->pe_pid[i];
2175 if (0 == pid) {
2176 continue; /* skip kernproc */
2177 }
2178 p = proc_find(pid);
2179 if (!p) {
2180 continue;
2181 }
2182 proc_transwait(p, 0);
2183 pt_setrunnable(p);
2184 psignal(p, SIGHUP);
2185 psignal(p, SIGCONT);
2186 proc_rele(p);
2187 }
2188 }
2189out:
2190 pidlist_free(pl);
2191}
2192
2193boolean_t
2194proc_is_translated(proc_t p __unused)
2195{
2196 return 0;
2197}
2198
2199int
2200proc_is_classic(proc_t p __unused)
2201{
2202 return 0;
2203}
2204
2205bool
2206proc_is_exotic(
2207 proc_t p)
2208{
2209 if (p == NULL) {
2210 return false;
2211 }
2212 return task_is_exotic(proc_task(p));
2213}
2214
2215bool
2216proc_is_alien(
2217 proc_t p)
2218{
2219 if (p == NULL) {
2220 return false;
2221 }
2222 return task_is_alien(proc_task(p));
2223}
2224
2225/* XXX Why does this function exist? Need to kill it off... */
2226proc_t
2227current_proc_EXTERNAL(void)
2228{
2229 return current_proc();
2230}
2231
2232int
2233proc_is_forcing_hfs_case_sensitivity(proc_t p)
2234{
2235 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
2236}
2237
2238bool
2239proc_ignores_content_protection(proc_t p)
2240{
2241 return os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION;
2242}
2243
2244bool
2245proc_ignores_node_permissions(proc_t p)
2246{
2247 return os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS;
2248}
2249
2250bool
2251proc_skip_mtime_update(proc_t p)
2252{
2253 return os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE;
2254}
2255
2256#if CONFIG_COREDUMP
2257/*
2258 * proc_core_name(name, uid, pid)
2259 * Expand the name described in corefilename, using name, uid, and pid.
2260 * corefilename is a printf-like string, with three format specifiers:
2261 * %N name of process ("name")
2262 * %P process id (pid)
2263 * %U user id (uid)
2264 * For example, "%N.core" is the default; they can be disabled completely
2265 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2266 * This is controlled by the sysctl variable kern.corefile (see above).
2267 */
2268__private_extern__ int
2269proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
2270 size_t cf_name_len)
2271{
2272 const char *format, *appendstr;
2273 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
2274 size_t i, l, n;
2275
2276 if (cf_name == NULL) {
2277 goto toolong;
2278 }
2279
2280 format = corefilename;
2281 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
2282 switch (format[i]) {
2283 case '%': /* Format character */
2284 i++;
2285 switch (format[i]) {
2286 case '%':
2287 appendstr = "%";
2288 break;
2289 case 'N': /* process name */
2290 appendstr = name;
2291 break;
2292 case 'P': /* process id */
2293 snprintf(id_buf, sizeof(id_buf), "%u", pid);
2294 appendstr = id_buf;
2295 break;
2296 case 'U': /* user id */
2297 snprintf(id_buf, sizeof(id_buf), "%u", uid);
2298 appendstr = id_buf;
2299 break;
2300 case '\0': /* format string ended in % symbol */
2301 goto endofstring;
2302 default:
2303 appendstr = "";
2304 log(LOG_ERR,
2305 "Unknown format character %c in `%s'\n",
2306 format[i], format);
2307 }
2308 l = strlen(appendstr);
2309 if ((n + l) >= cf_name_len) {
2310 goto toolong;
2311 }
2312 bcopy(appendstr, cf_name + n, l);
2313 n += l;
2314 break;
2315 default:
2316 cf_name[n++] = format[i];
2317 }
2318 }
2319 if (format[i] != '\0') {
2320 goto toolong;
2321 }
2322 return 0;
2323toolong:
2324 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
2325 (long)pid, name, (uint32_t)uid);
2326 return 1;
2327endofstring:
2328 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
2329 (long)pid, name, (uint32_t)uid);
2330 return 1;
2331}
2332#endif /* CONFIG_COREDUMP */
2333
2334/* Code Signing related routines */
2335
2336int
2337csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
2338{
2339 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2340 uap->usersize, USER_ADDR_NULL);
2341}
2342
2343int
2344csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
2345{
2346 if (uap->uaudittoken == USER_ADDR_NULL) {
2347 return EINVAL;
2348 }
2349 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2350 uap->usersize, uap->uaudittoken);
2351}
2352
2353static int
2354csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
2355{
2356 char fakeheader[8] = { 0 };
2357 int error;
2358
2359 if (usize < sizeof(fakeheader)) {
2360 return ERANGE;
2361 }
2362
2363 /* if no blob, fill in zero header */
2364 if (NULL == start) {
2365 start = fakeheader;
2366 length = sizeof(fakeheader);
2367 } else if (usize < length) {
2368 /* ... if input too short, copy out length of entitlement */
2369 uint32_t length32 = htonl((uint32_t)length);
2370 memcpy(&fakeheader[4], &length32, sizeof(length32));
2371
2372 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2373 if (error == 0) {
2374 return ERANGE; /* input buffer to short, ERANGE signals that */
2375 }
2376 return error;
2377 }
2378 return copyout(start, uaddr, length);
2379}
2380
2381static int
2382csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
2383{
2384 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
2385 proc_t pt;
2386 int forself;
2387 int error;
2388 vnode_t tvp;
2389 off_t toff;
2390 unsigned char cdhash[SHA1_RESULTLEN];
2391 audit_token_t token;
2392 unsigned int upid = 0, uidversion = 0;
2393
2394 forself = error = 0;
2395
2396 if (pid == 0) {
2397 pid = proc_selfpid();
2398 }
2399 if (pid == proc_selfpid()) {
2400 forself = 1;
2401 }
2402
2403
2404 switch (ops) {
2405 case CS_OPS_STATUS:
2406 case CS_OPS_CDHASH:
2407 case CS_OPS_PIDOFFSET:
2408 case CS_OPS_ENTITLEMENTS_BLOB:
2409 case CS_OPS_IDENTITY:
2410 case CS_OPS_BLOB:
2411 case CS_OPS_TEAMID:
2412 case CS_OPS_CLEAR_LV:
2413 break; /* not restricted to root */
2414 default:
2415 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) {
2416 return EPERM;
2417 }
2418 break;
2419 }
2420
2421 pt = proc_find(pid);
2422 if (pt == PROC_NULL) {
2423 return ESRCH;
2424 }
2425
2426 upid = pt->p_pid;
2427 uidversion = pt->p_idversion;
2428 if (uaudittoken != USER_ADDR_NULL) {
2429 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
2430 if (error != 0) {
2431 goto out;
2432 }
2433 /* verify the audit token pid/idversion matches with proc */
2434 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
2435 error = ESRCH;
2436 goto out;
2437 }
2438 }
2439
2440#if CONFIG_MACF
2441 switch (ops) {
2442 case CS_OPS_MARKINVALID:
2443 case CS_OPS_MARKHARD:
2444 case CS_OPS_MARKKILL:
2445 case CS_OPS_MARKRESTRICT:
2446 case CS_OPS_SET_STATUS:
2447 case CS_OPS_CLEARINSTALLER:
2448 case CS_OPS_CLEARPLATFORM:
2449 case CS_OPS_CLEAR_LV:
2450 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) {
2451 goto out;
2452 }
2453 break;
2454 default:
2455 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops))) {
2456 goto out;
2457 }
2458 }
2459#endif
2460
2461 switch (ops) {
2462 case CS_OPS_STATUS: {
2463 uint32_t retflags;
2464
2465 proc_lock(pt);
2466 retflags = pt->p_csflags;
2467 if (cs_process_enforcement(pt)) {
2468 retflags |= CS_ENFORCEMENT;
2469 }
2470 if (csproc_get_platform_binary(pt)) {
2471 retflags |= CS_PLATFORM_BINARY;
2472 }
2473 if (csproc_get_platform_path(pt)) {
2474 retflags |= CS_PLATFORM_PATH;
2475 }
2476 //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
2477 if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) {
2478 retflags &= (~CS_REQUIRE_LV);
2479 }
2480 proc_unlock(pt);
2481
2482 if (uaddr != USER_ADDR_NULL) {
2483 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2484 }
2485 break;
2486 }
2487 case CS_OPS_MARKINVALID:
2488 proc_lock(pt);
2489 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2490 pt->p_csflags &= ~CS_VALID; /* set invalid */
2491 cs_process_invalidated(pt);
2492 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2493 pt->p_csflags |= CS_KILLED;
2494 proc_unlock(pt);
2495 if (cs_debug) {
2496 printf("CODE SIGNING: marked invalid by pid %d: "
2497 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2498 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2499 }
2500 psignal(pt, SIGKILL);
2501 } else {
2502 proc_unlock(pt);
2503 }
2504 } else {
2505 proc_unlock(pt);
2506 }
2507
2508 break;
2509
2510 case CS_OPS_MARKHARD:
2511 proc_lock(pt);
2512 pt->p_csflags |= CS_HARD;
2513 if ((pt->p_csflags & CS_VALID) == 0) {
2514 /* @@@ allow? reject? kill? @@@ */
2515 proc_unlock(pt);
2516 error = EINVAL;
2517 goto out;
2518 } else {
2519 proc_unlock(pt);
2520 }
2521 break;
2522
2523 case CS_OPS_MARKKILL:
2524 proc_lock(pt);
2525 pt->p_csflags |= CS_KILL;
2526 if ((pt->p_csflags & CS_VALID) == 0) {
2527 proc_unlock(pt);
2528 psignal(pt, SIGKILL);
2529 } else {
2530 proc_unlock(pt);
2531 }
2532 break;
2533
2534 case CS_OPS_PIDOFFSET:
2535 toff = pt->p_textoff;
2536 proc_rele(pt);
2537 error = copyout(&toff, uaddr, sizeof(toff));
2538 return error;
2539
2540 case CS_OPS_CDHASH:
2541
2542 /* pt already holds a reference on its p_textvp */
2543 tvp = pt->p_textvp;
2544 toff = pt->p_textoff;
2545
2546 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2547 proc_rele(pt);
2548 return EINVAL;
2549 }
2550
2551 error = vn_getcdhash(tvp, toff, cdhash);
2552 proc_rele(pt);
2553
2554 if (error == 0) {
2555 error = copyout(cdhash, uaddr, sizeof(cdhash));
2556 }
2557
2558 return error;
2559
2560 case CS_OPS_ENTITLEMENTS_BLOB: {
2561 void *start;
2562 size_t length;
2563
2564 proc_lock(pt);
2565
2566 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2567 proc_unlock(pt);
2568 error = EINVAL;
2569 break;
2570 }
2571
2572 error = cs_entitlements_blob_get(pt, &start, &length);
2573 proc_unlock(pt);
2574 if (error) {
2575 break;
2576 }
2577
2578 error = csops_copy_token(start, length, usize, uaddr);
2579 break;
2580 }
2581 case CS_OPS_MARKRESTRICT:
2582 proc_lock(pt);
2583 pt->p_csflags |= CS_RESTRICT;
2584 proc_unlock(pt);
2585 break;
2586
2587 case CS_OPS_SET_STATUS: {
2588 uint32_t flags;
2589
2590 if (usize < sizeof(flags)) {
2591 error = ERANGE;
2592 break;
2593 }
2594
2595 error = copyin(uaddr, &flags, sizeof(flags));
2596 if (error) {
2597 break;
2598 }
2599
2600 /* only allow setting a subset of all code sign flags */
2601 flags &=
2602 CS_HARD | CS_EXEC_SET_HARD |
2603 CS_KILL | CS_EXEC_SET_KILL |
2604 CS_RESTRICT |
2605 CS_REQUIRE_LV |
2606 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2607
2608 proc_lock(pt);
2609 if (pt->p_csflags & CS_VALID) {
2610 if ((flags & CS_ENFORCEMENT) &&
2611 !(pt->p_csflags & CS_ENFORCEMENT)) {
2612 vm_map_cs_enforcement_set(get_task_map(pt->task), TRUE);
2613 }
2614 pt->p_csflags |= flags;
2615 } else {
2616 error = EINVAL;
2617 }
2618 proc_unlock(pt);
2619
2620 break;
2621 }
2622 case CS_OPS_CLEAR_LV: {
2623 /*
2624 * This option is used to remove library validation from
2625 * a running process. This is used in plugin architectures
2626 * when a program needs to load untrusted libraries. This
2627 * allows the process to maintain library validation as
2628 * long as possible, then drop it only when required.
2629 * Once a process has loaded the untrusted library,
2630 * relying on library validation in the future will
2631 * not be effective. An alternative is to re-exec
2632 * your application without library validation, or
2633 * fork an untrusted child.
2634 */
2635#if !defined(XNU_TARGET_OS_OSX)
2636 // We only support dropping library validation on macOS
2637 error = ENOTSUP;
2638#else
2639 /*
2640 * if we have the flag set, and the caller wants
2641 * to remove it, and they're entitled to, then
2642 * we remove it from the csflags
2643 *
2644 * NOTE: We are fine to poke into the task because
2645 * we get a ref to pt when we do the proc_find
2646 * at the beginning of this function.
2647 *
2648 * We also only allow altering ourselves.
2649 */
2650 if (forself == 1 && IOTaskHasEntitlement(pt->task, CLEAR_LV_ENTITLEMENT)) {
2651 proc_lock(pt);
2652 pt->p_csflags &= (~(CS_REQUIRE_LV | CS_FORCED_LV));
2653 proc_unlock(pt);
2654 error = 0;
2655 } else {
2656 error = EPERM;
2657 }
2658#endif
2659 break;
2660 }
2661 case CS_OPS_BLOB: {
2662 void *start;
2663 size_t length;
2664
2665 proc_lock(pt);
2666 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2667 proc_unlock(pt);
2668 error = EINVAL;
2669 break;
2670 }
2671
2672 error = cs_blob_get(pt, &start, &length);
2673 proc_unlock(pt);
2674 if (error) {
2675 break;
2676 }
2677
2678 error = csops_copy_token(start, length, usize, uaddr);
2679 break;
2680 }
2681 case CS_OPS_IDENTITY:
2682 case CS_OPS_TEAMID: {
2683 const char *identity;
2684 uint8_t fakeheader[8];
2685 uint32_t idlen;
2686 size_t length;
2687
2688 /*
2689 * Make identity have a blob header to make it
2690 * easier on userland to guess the identity
2691 * length.
2692 */
2693 if (usize < sizeof(fakeheader)) {
2694 error = ERANGE;
2695 break;
2696 }
2697 memset(fakeheader, 0, sizeof(fakeheader));
2698
2699 proc_lock(pt);
2700 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2701 proc_unlock(pt);
2702 error = EINVAL;
2703 break;
2704 }
2705
2706 identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt);
2707 proc_unlock(pt);
2708 if (identity == NULL) {
2709 error = ENOENT;
2710 break;
2711 }
2712
2713 length = strlen(identity) + 1; /* include NUL */
2714 idlen = htonl((uint32_t)(length + sizeof(fakeheader)));
2715 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2716
2717 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2718 if (error) {
2719 break;
2720 }
2721
2722 if (usize < sizeof(fakeheader) + length) {
2723 error = ERANGE;
2724 } else if (usize > sizeof(fakeheader)) {
2725 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2726 }
2727
2728 break;
2729 }
2730
2731 case CS_OPS_CLEARINSTALLER:
2732 proc_lock(pt);
2733 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2734 proc_unlock(pt);
2735 break;
2736
2737 case CS_OPS_CLEARPLATFORM:
2738#if DEVELOPMENT || DEBUG
2739 if (cs_process_global_enforcement()) {
2740 error = ENOTSUP;
2741 break;
2742 }
2743
2744#if CONFIG_CSR
2745 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2746 error = ENOTSUP;
2747 break;
2748 }
2749#endif
2750
2751 proc_lock(pt);
2752 pt->p_csflags &= ~(CS_PLATFORM_BINARY | CS_PLATFORM_PATH);
2753 csproc_clear_platform_binary(pt);
2754 proc_unlock(pt);
2755 break;
2756#else
2757 error = ENOTSUP;
2758 break;
2759#endif /* !DEVELOPMENT || DEBUG */
2760
2761 default:
2762 error = EINVAL;
2763 break;
2764 }
2765out:
2766 proc_rele(pt);
2767 return error;
2768}
2769
2770void
2771proc_iterate(
2772 unsigned int flags,
2773 proc_iterate_fn_t callout,
2774 void *arg,
2775 proc_iterate_fn_t filterfn,
2776 void *filterarg)
2777{
2778 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2779 u_int pid_count_available = 0;
2780
2781 assert(callout != NULL);
2782
2783 /* allocate outside of the proc_list_lock */
2784 for (;;) {
2785 proc_list_lock();
2786 pid_count_available = nprocs + 1; /* kernel_task not counted in nprocs */
2787 assert(pid_count_available > 0);
2788 if (pidlist_nalloc(pl) >= pid_count_available) {
2789 break;
2790 }
2791 proc_list_unlock();
2792
2793 pidlist_alloc(pl, pid_count_available);
2794 }
2795 pidlist_set_active(pl);
2796
2797 /* filter pids into the pid_list */
2798
2799 u_int pid_count = 0;
2800 if (flags & PROC_ALLPROCLIST) {
2801 proc_t p;
2802 ALLPROC_FOREACH(p) {
2803 /* ignore processes that are being forked */
2804 if (p->p_stat == SIDL) {
2805 continue;
2806 }
2807 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2808 continue;
2809 }
2810 pidlist_add_pid(pl, proc_pid(p));
2811 if (++pid_count >= pid_count_available) {
2812 break;
2813 }
2814 }
2815 }
2816
2817 if ((pid_count < pid_count_available) &&
2818 (flags & PROC_ZOMBPROCLIST)) {
2819 proc_t p;
2820 ZOMBPROC_FOREACH(p) {
2821 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2822 continue;
2823 }
2824 pidlist_add_pid(pl, proc_pid(p));
2825 if (++pid_count >= pid_count_available) {
2826 break;
2827 }
2828 }
2829 }
2830
2831 proc_list_unlock();
2832
2833 /* call callout on processes in the pid_list */
2834
2835 const pidlist_entry_t *pe;
2836 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2837 for (u_int i = 0; i < pe->pe_nused; i++) {
2838 const pid_t pid = pe->pe_pid[i];
2839 proc_t p = proc_find(pid);
2840 if (p) {
2841 if ((flags & PROC_NOWAITTRANS) == 0) {
2842 proc_transwait(p, 0);
2843 }
2844 const int callout_ret = callout(p, arg);
2845
2846 switch (callout_ret) {
2847 case PROC_RETURNED_DONE:
2848 proc_rele(p);
2849 OS_FALLTHROUGH;
2850 case PROC_CLAIMED_DONE:
2851 goto out;
2852
2853 case PROC_RETURNED:
2854 proc_rele(p);
2855 OS_FALLTHROUGH;
2856 case PROC_CLAIMED:
2857 break;
2858 default:
2859 panic("%s: callout =%d for pid %d",
2860 __func__, callout_ret, pid);
2861 break;
2862 }
2863 } else if (flags & PROC_ZOMBPROCLIST) {
2864 p = proc_find_zombref(pid);
2865 if (!p) {
2866 continue;
2867 }
2868 const int callout_ret = callout(p, arg);
2869
2870 switch (callout_ret) {
2871 case PROC_RETURNED_DONE:
2872 proc_drop_zombref(p);
2873 OS_FALLTHROUGH;
2874 case PROC_CLAIMED_DONE:
2875 goto out;
2876
2877 case PROC_RETURNED:
2878 proc_drop_zombref(p);
2879 OS_FALLTHROUGH;
2880 case PROC_CLAIMED:
2881 break;
2882 default:
2883 panic("%s: callout =%d for zombie %d",
2884 __func__, callout_ret, pid);
2885 break;
2886 }
2887 }
2888 }
2889 }
2890out:
2891 pidlist_free(pl);
2892}
2893
2894void
2895proc_rebootscan(
2896 proc_iterate_fn_t callout,
2897 void *arg,
2898 proc_iterate_fn_t filterfn,
2899 void *filterarg)
2900{
2901 proc_t p;
2902
2903 assert(callout != NULL);
2904
2905 proc_shutdown_exitcount = 0;
2906
2907restart_foreach:
2908
2909 proc_list_lock();
2910
2911 ALLPROC_FOREACH(p) {
2912 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2913 continue;
2914 }
2915 p = proc_ref_locked(p);
2916 if (!p) {
2917 continue;
2918 }
2919
2920 proc_list_unlock();
2921
2922 proc_transwait(p, 0);
2923 (void)callout(p, arg);
2924 proc_rele(p);
2925
2926 goto restart_foreach;
2927 }
2928
2929 proc_list_unlock();
2930}
2931
2932void
2933proc_childrenwalk(
2934 proc_t parent,
2935 proc_iterate_fn_t callout,
2936 void *arg)
2937{
2938 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2939 u_int pid_count_available = 0;
2940
2941 assert(parent != NULL);
2942 assert(callout != NULL);
2943
2944 for (;;) {
2945 proc_list_lock();
2946 pid_count_available = parent->p_childrencnt;
2947 if (pid_count_available == 0) {
2948 proc_list_unlock();
2949 goto out;
2950 }
2951 if (pidlist_nalloc(pl) >= pid_count_available) {
2952 break;
2953 }
2954 proc_list_unlock();
2955
2956 pidlist_alloc(pl, pid_count_available);
2957 }
2958 pidlist_set_active(pl);
2959
2960 u_int pid_count = 0;
2961 proc_t p;
2962 PCHILDREN_FOREACH(parent, p) {
2963 if (p->p_stat == SIDL) {
2964 continue;
2965 }
2966 pidlist_add_pid(pl, proc_pid(p));
2967 if (++pid_count >= pid_count_available) {
2968 break;
2969 }
2970 }
2971
2972 proc_list_unlock();
2973
2974 const pidlist_entry_t *pe;
2975 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2976 for (u_int i = 0; i < pe->pe_nused; i++) {
2977 const pid_t pid = pe->pe_pid[i];
2978 p = proc_find(pid);
2979 if (!p) {
2980 continue;
2981 }
2982 const int callout_ret = callout(p, arg);
2983
2984 switch (callout_ret) {
2985 case PROC_RETURNED_DONE:
2986 proc_rele(p);
2987 OS_FALLTHROUGH;
2988 case PROC_CLAIMED_DONE:
2989 goto out;
2990
2991 case PROC_RETURNED:
2992 proc_rele(p);
2993 OS_FALLTHROUGH;
2994 case PROC_CLAIMED:
2995 break;
2996 default:
2997 panic("%s: callout =%d for pid %d",
2998 __func__, callout_ret, pid);
2999 break;
3000 }
3001 }
3002 }
3003out:
3004 pidlist_free(pl);
3005}
3006
3007void
3008pgrp_iterate(
3009 struct pgrp *pgrp,
3010 unsigned int flags,
3011 proc_iterate_fn_t callout,
3012 void * arg,
3013 proc_iterate_fn_t filterfn,
3014 void * filterarg)
3015{
3016 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
3017 u_int pid_count_available = 0;
3018
3019 assert(pgrp != NULL);
3020 assert(callout != NULL);
3021
3022 for (;;) {
3023 pgrp_lock(pgrp);
3024 pid_count_available = pgrp->pg_membercnt;
3025 if (pid_count_available == 0) {
3026 pgrp_unlock(pgrp);
3027 if (flags & PGRP_DROPREF) {
3028 pg_rele(pgrp);
3029 }
3030 goto out;
3031 }
3032 if (pidlist_nalloc(pl) >= pid_count_available) {
3033 break;
3034 }
3035 pgrp_unlock(pgrp);
3036
3037 pidlist_alloc(pl, pid_count_available);
3038 }
3039 pidlist_set_active(pl);
3040
3041 const pid_t pgid = pgrp->pg_id;
3042 u_int pid_count = 0;
3043 proc_t p;
3044 PGMEMBERS_FOREACH(pgrp, p) {
3045 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
3046 continue;;
3047 }
3048 pidlist_add_pid(pl, proc_pid(p));
3049 if (++pid_count >= pid_count_available) {
3050 break;
3051 }
3052 }
3053
3054 pgrp_unlock(pgrp);
3055
3056 if (flags & PGRP_DROPREF) {
3057 pg_rele(pgrp);
3058 }
3059
3060 const pidlist_entry_t *pe;
3061 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
3062 for (u_int i = 0; i < pe->pe_nused; i++) {
3063 const pid_t pid = pe->pe_pid[i];
3064 if (0 == pid) {
3065 continue; /* skip kernproc */
3066 }
3067 p = proc_find(pid);
3068 if (!p) {
3069 continue;
3070 }
3071 if (p->p_pgrpid != pgid) {
3072 proc_rele(p);
3073 continue;
3074 }
3075 const int callout_ret = callout(p, arg);
3076
3077 switch (callout_ret) {
3078 case PROC_RETURNED:
3079 proc_rele(p);
3080 OS_FALLTHROUGH;
3081 case PROC_CLAIMED:
3082 break;
3083 case PROC_RETURNED_DONE:
3084 proc_rele(p);
3085 OS_FALLTHROUGH;
3086 case PROC_CLAIMED_DONE:
3087 goto out;
3088
3089 default:
3090 panic("%s: callout =%d for pid %d",
3091 __func__, callout_ret, pid);
3092 }
3093 }
3094 }
3095
3096out:
3097 pidlist_free(pl);
3098}
3099
3100static void
3101pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
3102{
3103 proc_list_lock();
3104 child->p_pgrp = pgrp;
3105 child->p_pgrpid = pgrp->pg_id;
3106 child->p_sessionid = pgrp->pg_session->s_sid;
3107 child->p_listflag |= P_LIST_INPGRP;
3108 /*
3109 * When pgrp is being freed , a process can still
3110 * request addition using setpgid from bash when
3111 * login is terminated (login cycler) return ESRCH
3112 * Safe to hold lock due to refcount on pgrp
3113 */
3114 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
3115 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3116 }
3117
3118 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
3119 panic("pgrp_add : pgrp is dead adding process");
3120 }
3121 proc_list_unlock();
3122
3123 pgrp_lock(pgrp);
3124 pgrp->pg_membercnt++;
3125 if (parent != PROC_NULL) {
3126 LIST_INSERT_AFTER(parent, child, p_pglist);
3127 } else {
3128 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
3129 }
3130 pgrp_unlock(pgrp);
3131
3132 proc_list_lock();
3133 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
3134 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3135 }
3136 proc_list_unlock();
3137}
3138
3139static void
3140pgrp_remove(struct proc * p)
3141{
3142 struct pgrp * pg;
3143
3144 pg = proc_pgrp(p);
3145
3146 proc_list_lock();
3147#if __PROC_INTERNAL_DEBUG
3148 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
3149 panic("removing from pglist but no named ref\n");
3150 }
3151#endif
3152 p->p_pgrpid = PGRPID_DEAD;
3153 p->p_listflag &= ~P_LIST_INPGRP;
3154 p->p_pgrp = NULL;
3155 proc_list_unlock();
3156
3157 if (pg == PGRP_NULL) {
3158 panic("pgrp_remove: pg is NULL");
3159 }
3160 pgrp_lock(pg);
3161 pg->pg_membercnt--;
3162
3163 if (pg->pg_membercnt < 0) {
3164 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", pg, p);
3165 }
3166
3167 LIST_REMOVE(p, p_pglist);
3168 if (pg->pg_members.lh_first == 0) {
3169 pgrp_unlock(pg);
3170 pgdelete_dropref(pg);
3171 } else {
3172 pgrp_unlock(pg);
3173 pg_rele(pg);
3174 }
3175}
3176
3177
3178/* cannot use proc_pgrp as it maybe stalled */
3179static void
3180pgrp_replace(struct proc * p, struct pgrp * newpg)
3181{
3182 struct pgrp * oldpg;
3183
3184
3185
3186 proc_list_lock();
3187
3188 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3189 p->p_listflag |= P_LIST_PGRPTRWAIT;
3190 (void)msleep(&p->p_pgrpid, &proc_list_mlock, 0, "proc_pgrp", 0);
3191 }
3192
3193 p->p_listflag |= P_LIST_PGRPTRANS;
3194
3195 oldpg = p->p_pgrp;
3196 if (oldpg == PGRP_NULL) {
3197 panic("pgrp_replace: oldpg NULL");
3198 }
3199 oldpg->pg_refcount++;
3200#if __PROC_INTERNAL_DEBUG
3201 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
3202 panic("removing from pglist but no named ref\n");
3203 }
3204#endif
3205 p->p_pgrpid = PGRPID_DEAD;
3206 p->p_listflag &= ~P_LIST_INPGRP;
3207 p->p_pgrp = NULL;
3208
3209 proc_list_unlock();
3210
3211 pgrp_lock(oldpg);
3212 oldpg->pg_membercnt--;
3213 if (oldpg->pg_membercnt < 0) {
3214 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", oldpg, p);
3215 }
3216 LIST_REMOVE(p, p_pglist);
3217 if (oldpg->pg_members.lh_first == 0) {
3218 pgrp_unlock(oldpg);
3219 pgdelete_dropref(oldpg);
3220 } else {
3221 pgrp_unlock(oldpg);
3222 pg_rele(oldpg);
3223 }
3224
3225 proc_list_lock();
3226 p->p_pgrp = newpg;
3227 p->p_pgrpid = newpg->pg_id;
3228 p->p_sessionid = newpg->pg_session->s_sid;
3229 p->p_listflag |= P_LIST_INPGRP;
3230 /*
3231 * When pgrp is being freed , a process can still
3232 * request addition using setpgid from bash when
3233 * login is terminated (login cycler) return ESRCH
3234 * Safe to hold lock due to refcount on pgrp
3235 */
3236 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
3237 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3238 }
3239
3240 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
3241 panic("pgrp_add : pgrp is dead adding process");
3242 }
3243 proc_list_unlock();
3244
3245 pgrp_lock(newpg);
3246 newpg->pg_membercnt++;
3247 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
3248 pgrp_unlock(newpg);
3249
3250 proc_list_lock();
3251 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
3252 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3253 }
3254
3255 p->p_listflag &= ~P_LIST_PGRPTRANS;
3256 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
3257 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
3258 wakeup(&p->p_pgrpid);
3259 }
3260 proc_list_unlock();
3261}
3262
3263void
3264pgrp_lock(struct pgrp * pgrp)
3265{
3266 lck_mtx_lock(&pgrp->pg_mlock);
3267}
3268
3269void
3270pgrp_unlock(struct pgrp * pgrp)
3271{
3272 lck_mtx_unlock(&pgrp->pg_mlock);
3273}
3274
3275void
3276session_lock(struct session * sess)
3277{
3278 lck_mtx_lock(&sess->s_mlock);
3279}
3280
3281
3282void
3283session_unlock(struct session * sess)
3284{
3285 lck_mtx_unlock(&sess->s_mlock);
3286}
3287
3288struct pgrp *
3289proc_pgrp(proc_t p)
3290{
3291 struct pgrp * pgrp;
3292
3293 if (p == PROC_NULL) {
3294 return PGRP_NULL;
3295 }
3296 proc_list_lock();
3297
3298 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3299 p->p_listflag |= P_LIST_PGRPTRWAIT;
3300 (void)msleep(&p->p_pgrpid, &proc_list_mlock, 0, "proc_pgrp", 0);
3301 }
3302
3303 pgrp = p->p_pgrp;
3304
3305 assert(pgrp != NULL);
3306
3307 if (pgrp != PGRP_NULL) {
3308 pgrp->pg_refcount++;
3309 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0) {
3310 panic("proc_pgrp: ref being povided for dead pgrp");
3311 }
3312 }
3313
3314 proc_list_unlock();
3315
3316 return pgrp;
3317}
3318
3319struct pgrp *
3320tty_pgrp(struct tty * tp)
3321{
3322 struct pgrp * pg = PGRP_NULL;
3323
3324 proc_list_lock();
3325 pg = tp->t_pgrp;
3326
3327 if (pg != PGRP_NULL) {
3328 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0) {
3329 panic("tty_pgrp: ref being povided for dead pgrp");
3330 }
3331 pg->pg_refcount++;
3332 }
3333 proc_list_unlock();
3334
3335 return pg;
3336}
3337
3338struct session *
3339proc_session(proc_t p)
3340{
3341 struct session * sess = SESSION_NULL;
3342
3343 if (p == PROC_NULL) {
3344 return SESSION_NULL;
3345 }
3346
3347 proc_list_lock();
3348
3349 /* wait during transitions */
3350 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3351 p->p_listflag |= P_LIST_PGRPTRWAIT;
3352 (void)msleep(&p->p_pgrpid, &proc_list_mlock, 0, "proc_pgrp", 0);
3353 }
3354
3355 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
3356 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3357 panic("proc_session:returning sesssion ref on terminating session");
3358 }
3359 sess->s_count++;
3360 }
3361 proc_list_unlock();
3362 return sess;
3363}
3364
3365void
3366session_rele(struct session *sess)
3367{
3368 proc_list_lock();
3369 if (--sess->s_count == 0) {
3370 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3371 panic("session_rele: terminating already terminated session");
3372 }
3373 sess->s_listflags |= S_LIST_TERM;
3374 LIST_REMOVE(sess, s_hash);
3375 sess->s_listflags |= S_LIST_DEAD;
3376 if (sess->s_count != 0) {
3377 panic("session_rele: freeing session in use");
3378 }
3379 proc_list_unlock();
3380 lck_mtx_destroy(&sess->s_mlock, &proc_mlock_grp);
3381 zfree(session_zone, sess);
3382 } else {
3383 proc_list_unlock();
3384 }
3385}
3386
3387int
3388proc_transstart(proc_t p, int locked, int non_blocking)
3389{
3390 if (locked == 0) {
3391 proc_lock(p);
3392 }
3393 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3394 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
3395 if (locked == 0) {
3396 proc_unlock(p);
3397 }
3398 return EDEADLK;
3399 }
3400 p->p_lflag |= P_LTRANSWAIT;
3401 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3402 }
3403 p->p_lflag |= P_LINTRANSIT;
3404 p->p_transholder = current_thread();
3405 if (locked == 0) {
3406 proc_unlock(p);
3407 }
3408 return 0;
3409}
3410
3411void
3412proc_transcommit(proc_t p, int locked)
3413{
3414 if (locked == 0) {
3415 proc_lock(p);
3416 }
3417
3418 assert((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
3419 assert(p->p_transholder == current_thread());
3420 p->p_lflag |= P_LTRANSCOMMIT;
3421
3422 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3423 p->p_lflag &= ~P_LTRANSWAIT;
3424 wakeup(&p->p_lflag);
3425 }
3426 if (locked == 0) {
3427 proc_unlock(p);
3428 }
3429}
3430
3431void
3432proc_transend(proc_t p, int locked)
3433{
3434 if (locked == 0) {
3435 proc_lock(p);
3436 }
3437
3438 p->p_lflag &= ~(P_LINTRANSIT | P_LTRANSCOMMIT);
3439 p->p_transholder = NULL;
3440
3441 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3442 p->p_lflag &= ~P_LTRANSWAIT;
3443 wakeup(&p->p_lflag);
3444 }
3445 if (locked == 0) {
3446 proc_unlock(p);
3447 }
3448}
3449
3450int
3451proc_transwait(proc_t p, int locked)
3452{
3453 if (locked == 0) {
3454 proc_lock(p);
3455 }
3456 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3457 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
3458 if (locked == 0) {
3459 proc_unlock(p);
3460 }
3461 return EDEADLK;
3462 }
3463 p->p_lflag |= P_LTRANSWAIT;
3464 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3465 }
3466 if (locked == 0) {
3467 proc_unlock(p);
3468 }
3469 return 0;
3470}
3471
3472void
3473proc_klist_lock(void)
3474{
3475 lck_mtx_lock(&proc_klist_mlock);
3476}
3477
3478void
3479proc_klist_unlock(void)
3480{
3481 lck_mtx_unlock(&proc_klist_mlock);
3482}
3483
3484void
3485proc_knote(struct proc * p, long hint)
3486{
3487 proc_klist_lock();
3488 KNOTE(&p->p_klist, hint);
3489 proc_klist_unlock();
3490}
3491
3492void
3493proc_knote_drain(struct proc *p)
3494{
3495 struct knote *kn = NULL;
3496
3497 /*
3498 * Clear the proc's klist to avoid references after the proc is reaped.
3499 */
3500 proc_klist_lock();
3501 while ((kn = SLIST_FIRST(&p->p_klist))) {
3502 kn->kn_proc = PROC_NULL;
3503 KNOTE_DETACH(&p->p_klist, kn);
3504 }
3505 proc_klist_unlock();
3506}
3507
3508void
3509proc_setregister(proc_t p)
3510{
3511 proc_lock(p);
3512 p->p_lflag |= P_LREGISTER;
3513 proc_unlock(p);
3514}
3515
3516void
3517proc_resetregister(proc_t p)
3518{
3519 proc_lock(p);
3520 p->p_lflag &= ~P_LREGISTER;
3521 proc_unlock(p);
3522}
3523
3524pid_t
3525proc_pgrpid(proc_t p)
3526{
3527 return p->p_pgrpid;
3528}
3529
3530pid_t
3531proc_sessionid(proc_t p)
3532{
3533 return p->p_sessionid;
3534}
3535
3536pid_t
3537proc_selfpgrpid()
3538{
3539 return current_proc()->p_pgrpid;
3540}
3541
3542
3543/* return control and action states */
3544int
3545proc_getpcontrol(int pid, int * pcontrolp)
3546{
3547 proc_t p;
3548
3549 p = proc_find(pid);
3550 if (p == PROC_NULL) {
3551 return ESRCH;
3552 }
3553 if (pcontrolp != NULL) {
3554 *pcontrolp = p->p_pcaction;
3555 }
3556
3557 proc_rele(p);
3558 return 0;
3559}
3560
3561int
3562proc_dopcontrol(proc_t p)
3563{
3564 int pcontrol;
3565 os_reason_t kill_reason;
3566
3567 proc_lock(p);
3568
3569 pcontrol = PROC_CONTROL_STATE(p);
3570
3571 if (PROC_ACTION_STATE(p) == 0) {
3572 switch (pcontrol) {
3573 case P_PCTHROTTLE:
3574 PROC_SETACTION_STATE(p);
3575 proc_unlock(p);
3576 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3577 break;
3578
3579 case P_PCSUSP:
3580 PROC_SETACTION_STATE(p);
3581 proc_unlock(p);
3582 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3583 task_suspend(p->task);
3584 break;
3585
3586 case P_PCKILL:
3587 PROC_SETACTION_STATE(p);
3588 proc_unlock(p);
3589 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3590 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3591 psignal_with_reason(p, SIGKILL, kill_reason);
3592 break;
3593
3594 default:
3595 proc_unlock(p);
3596 }
3597 } else {
3598 proc_unlock(p);
3599 }
3600
3601 return PROC_RETURNED;
3602}
3603
3604
3605/*
3606 * Resume a throttled or suspended process. This is an internal interface that's only
3607 * used by the user level code that presents the GUI when we run out of swap space and
3608 * hence is restricted to processes with superuser privileges.
3609 */
3610
3611int
3612proc_resetpcontrol(int pid)
3613{
3614 proc_t p;
3615 int pcontrol;
3616 int error;
3617 proc_t self = current_proc();
3618
3619 /* if the process has been validated to handle resource control or root is valid one */
3620 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0))) {
3621 return error;
3622 }
3623
3624 p = proc_find(pid);
3625 if (p == PROC_NULL) {
3626 return ESRCH;
3627 }
3628
3629 proc_lock(p);
3630
3631 pcontrol = PROC_CONTROL_STATE(p);
3632
3633 if (PROC_ACTION_STATE(p) != 0) {
3634 switch (pcontrol) {
3635 case P_PCTHROTTLE:
3636 PROC_RESETACTION_STATE(p);
3637 proc_unlock(p);
3638 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3639 break;
3640
3641 case P_PCSUSP:
3642 PROC_RESETACTION_STATE(p);
3643 proc_unlock(p);
3644 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3645 task_resume(p->task);
3646 break;
3647
3648 case P_PCKILL:
3649 /* Huh? */
3650 PROC_SETACTION_STATE(p);
3651 proc_unlock(p);
3652 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3653 break;
3654
3655 default:
3656 proc_unlock(p);
3657 }
3658 } else {
3659 proc_unlock(p);
3660 }
3661
3662 proc_rele(p);
3663 return 0;
3664}
3665
3666
3667
3668struct no_paging_space {
3669 uint64_t pcs_max_size;
3670 uint64_t pcs_uniqueid;
3671 int pcs_pid;
3672 int pcs_proc_count;
3673 uint64_t pcs_total_size;
3674
3675 uint64_t npcs_max_size;
3676 uint64_t npcs_uniqueid;
3677 int npcs_pid;
3678 int npcs_proc_count;
3679 uint64_t npcs_total_size;
3680
3681 int apcs_proc_count;
3682 uint64_t apcs_total_size;
3683};
3684
3685
3686static int
3687proc_pcontrol_filter(proc_t p, void *arg)
3688{
3689 struct no_paging_space *nps;
3690 uint64_t compressed;
3691
3692 nps = (struct no_paging_space *)arg;
3693
3694 compressed = get_task_compressed(p->task);
3695
3696 if (PROC_CONTROL_STATE(p)) {
3697 if (PROC_ACTION_STATE(p) == 0) {
3698 if (compressed > nps->pcs_max_size) {
3699 nps->pcs_pid = p->p_pid;
3700 nps->pcs_uniqueid = p->p_uniqueid;
3701 nps->pcs_max_size = compressed;
3702 }
3703 nps->pcs_total_size += compressed;
3704 nps->pcs_proc_count++;
3705 } else {
3706 nps->apcs_total_size += compressed;
3707 nps->apcs_proc_count++;
3708 }
3709 } else {
3710 if (compressed > nps->npcs_max_size) {
3711 nps->npcs_pid = p->p_pid;
3712 nps->npcs_uniqueid = p->p_uniqueid;
3713 nps->npcs_max_size = compressed;
3714 }
3715 nps->npcs_total_size += compressed;
3716 nps->npcs_proc_count++;
3717 }
3718 return 0;
3719}
3720
3721
3722static int
3723proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3724{
3725 return PROC_RETURNED;
3726}
3727
3728
3729/*
3730 * Deal with the low on compressor pool space condition... this function
3731 * gets called when we are approaching the limits of the compressor pool or
3732 * we are unable to create a new swap file.
3733 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3734 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3735 * There are 2 categories of processes to deal with. Those that have an action
3736 * associated with them by the task itself and those that do not. Actionable
3737 * tasks can have one of three categories specified: ones that
3738 * can be killed immediately, ones that should be suspended, and ones that should
3739 * be throttled. Processes that do not have an action associated with them are normally
3740 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3741 * that only by killing them can we hope to put the system back into a usable state.
3742 */
3743
3744#define NO_PAGING_SPACE_DEBUG 0
3745
3746extern uint64_t vm_compressor_pages_compressed(void);
3747
3748struct timeval last_no_space_action = {.tv_sec = 0, .tv_usec = 0};
3749
3750#define MB_SIZE (1024 * 1024ULL)
3751boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
3752
3753extern int32_t max_kill_priority;
3754extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3755
3756int
3757no_paging_space_action()
3758{
3759 proc_t p;
3760 struct no_paging_space nps;
3761 struct timeval now;
3762 os_reason_t kill_reason;
3763
3764 /*
3765 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3766 */
3767 microtime(&now);
3768
3769 if (now.tv_sec <= last_no_space_action.tv_sec + 5) {
3770 return 0;
3771 }
3772
3773 /*
3774 * Examine all processes and find the biggest (biggest is based on the number of pages this
3775 * task has in the compressor pool) that has been marked to have some action
3776 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3777 * action.
3778 *
3779 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3780 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3781 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3782 */
3783 bzero(&nps, sizeof(nps));
3784
3785 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3786
3787#if NO_PAGING_SPACE_DEBUG
3788 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3789 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3790 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3791 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3792 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3793 nps.apcs_proc_count, nps.apcs_total_size);
3794#endif
3795 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3796 /*
3797 * for now we'll knock out any task that has more then 50% of the pages
3798 * held by the compressor
3799 */
3800 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3801 if (nps.npcs_uniqueid == p->p_uniqueid) {
3802 /*
3803 * verify this is still the same process
3804 * in case the proc exited and the pid got reused while
3805 * we were finishing the proc_iterate and getting to this point
3806 */
3807 last_no_space_action = now;
3808
3809 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size / MB_SIZE));
3810 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3811 psignal_with_reason(p, SIGKILL, kill_reason);
3812
3813 proc_rele(p);
3814
3815 return 0;
3816 }
3817
3818 proc_rele(p);
3819 }
3820 }
3821
3822 /*
3823 * We have some processes within our jetsam bands of consideration and hence can be killed.
3824 * So we will invoke the memorystatus thread to go ahead and kill something.
3825 */
3826 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3827 last_no_space_action = now;
3828 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
3829 return 1;
3830 }
3831
3832 /*
3833 * No eligible processes to kill. So let's suspend/kill the largest
3834 * process depending on its policy control specifications.
3835 */
3836
3837 if (nps.pcs_max_size > 0) {
3838 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3839 if (nps.pcs_uniqueid == p->p_uniqueid) {
3840 /*
3841 * verify this is still the same process
3842 * in case the proc exited and the pid got reused while
3843 * we were finishing the proc_iterate and getting to this point
3844 */
3845 last_no_space_action = now;
3846
3847 proc_dopcontrol(p);
3848
3849 proc_rele(p);
3850
3851 return 1;
3852 }
3853
3854 proc_rele(p);
3855 }
3856 }
3857 last_no_space_action = now;
3858
3859 printf("low swap: unable to find any eligible processes to take action on\n");
3860
3861 return 0;
3862}
3863
3864int
3865proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3866{
3867 int ret = 0;
3868 proc_t target_proc = PROC_NULL;
3869 pid_t target_pid = uap->pid;
3870 uint64_t target_uniqueid = uap->uniqueid;
3871 task_t target_task = NULL;
3872
3873 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3874 ret = EPERM;
3875 goto out;
3876 }
3877 target_proc = proc_find(target_pid);
3878 if (target_proc != PROC_NULL) {
3879 if (target_uniqueid != proc_uniqueid(target_proc)) {
3880 ret = ENOENT;
3881 goto out;
3882 }
3883
3884 target_task = proc_task(target_proc);
3885 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3886 ret = EINVAL;
3887 goto out;
3888 }
3889 } else {
3890 ret = ENOENT;
3891 }
3892
3893out:
3894 if (target_proc != PROC_NULL) {
3895 proc_rele(target_proc);
3896 }
3897 return ret;
3898}
3899
3900#if VM_SCAN_FOR_SHADOW_CHAIN
3901extern int vm_map_shadow_max(vm_map_t map);
3902int proc_shadow_max(void);
3903int
3904proc_shadow_max(void)
3905{
3906 int retval, max;
3907 proc_t p;
3908 task_t task;
3909 vm_map_t map;
3910
3911 max = 0;
3912 proc_list_lock();
3913 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3914 if (p->p_stat == SIDL) {
3915 continue;
3916 }
3917 task = p->task;
3918 if (task == NULL) {
3919 continue;
3920 }
3921 map = get_task_map(task);
3922 if (map == NULL) {
3923 continue;
3924 }
3925 retval = vm_map_shadow_max(map);
3926 if (retval > max) {
3927 max = retval;
3928 }
3929 }
3930 proc_list_unlock();
3931 return max;
3932}
3933#endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3934
3935void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3936void
3937proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3938{
3939 if (target_proc != NULL) {
3940 target_proc->p_responsible_pid = responsible_pid;
3941 }
3942 return;
3943}
3944
3945int
3946proc_chrooted(proc_t p)
3947{
3948 int retval = 0;
3949
3950 if (p) {
3951 proc_fdlock(p);
3952 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3953 proc_fdunlock(p);
3954 }
3955
3956 return retval;
3957}
3958
3959boolean_t
3960proc_send_synchronous_EXC_RESOURCE(proc_t p)
3961{
3962 if (p == PROC_NULL) {
3963 return FALSE;
3964 }
3965
3966 /* Send sync EXC_RESOURCE if the process is traced */
3967 if (ISSET(p->p_lflag, P_LTRACED)) {
3968 return TRUE;
3969 }
3970 return FALSE;
3971}
3972
3973#if CONFIG_MACF
3974size_t
3975proc_get_syscall_filter_mask_size(int which)
3976{
3977 switch (which) {
3978 case SYSCALL_MASK_UNIX:
3979 return nsysent;
3980 case SYSCALL_MASK_MACH:
3981 return mach_trap_count;
3982 case SYSCALL_MASK_KOBJ:
3983 return mach_kobj_count;
3984 default:
3985 return 0;
3986 }
3987}
3988
3989int
3990proc_set_syscall_filter_mask(proc_t p, int which, unsigned char *maskptr, size_t masklen)
3991{
3992#if DEVELOPMENT || DEBUG
3993 if (syscallfilter_disable) {
3994 printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p));
3995 return 0;
3996 }
3997#endif // DEVELOPMENT || DEBUG
3998
3999 switch (which) {
4000 case SYSCALL_MASK_UNIX:
4001 if (maskptr != NULL && masklen != nsysent) {
4002 return EINVAL;
4003 }
4004 p->syscall_filter_mask = maskptr;
4005 break;
4006 case SYSCALL_MASK_MACH:
4007 if (maskptr != NULL && masklen != (size_t)mach_trap_count) {
4008 return EINVAL;
4009 }
4010 mac_task_set_mach_filter_mask(p->task, maskptr);
4011 break;
4012 case SYSCALL_MASK_KOBJ:
4013 if (maskptr != NULL && masklen != (size_t)mach_kobj_count) {
4014 return EINVAL;
4015 }
4016 mac_task_set_kobj_filter_mask(p->task, maskptr);
4017 break;
4018 default:
4019 return EINVAL;
4020 }
4021
4022 return 0;
4023}
4024
4025int
4026proc_set_syscall_filter_callbacks(syscall_filter_cbs_t cbs)
4027{
4028 if (cbs->version != SYSCALL_FILTER_CALLBACK_VERSION) {
4029 return EINVAL;
4030 }
4031
4032 /* XXX register unix filter callback instead of using MACF hook. */
4033
4034 if (cbs->mach_filter_cbfunc || cbs->kobj_filter_cbfunc) {
4035 if (mac_task_register_filter_callbacks(cbs->mach_filter_cbfunc,
4036 cbs->kobj_filter_cbfunc) != 0) {
4037 return EPERM;
4038 }
4039 }
4040
4041 return 0;
4042}
4043
4044int
4045proc_set_syscall_filter_index(int which, int num, int index)
4046{
4047 switch (which) {
4048 case SYSCALL_MASK_KOBJ:
4049 if (ipc_kobject_set_kobjidx(num, index) != 0) {
4050 return ENOENT;
4051 }
4052 break;
4053 default:
4054 return EINVAL;
4055 }
4056
4057 return 0;
4058}
4059#endif /* CONFIG_MACF */
4060
4061int
4062proc_set_filter_message_flag(proc_t p, boolean_t flag)
4063{
4064 if (p == PROC_NULL) {
4065 return EINVAL;
4066 }
4067
4068 task_set_filter_msg_flag(proc_task(p), flag);
4069
4070 return 0;
4071}
4072
4073int
4074proc_get_filter_message_flag(proc_t p, boolean_t *flag)
4075{
4076 if (p == PROC_NULL || flag == NULL) {
4077 return EINVAL;
4078 }
4079
4080 *flag = task_get_filter_msg_flag(proc_task(p));
4081
4082 return 0;
4083}
4084
4085bool
4086proc_is_traced(proc_t p)
4087{
4088 bool ret = FALSE;
4089 assert(p != PROC_NULL);
4090 proc_lock(p);
4091 if (p->p_lflag & P_LTRACED) {
4092 ret = TRUE;
4093 }
4094 proc_unlock(p);
4095 return ret;
4096}
4097
4098#ifdef CONFIG_32BIT_TELEMETRY
4099void
4100proc_log_32bit_telemetry(proc_t p)
4101{
4102 /* Gather info */
4103 char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
4104 char * signature_cur_end = &signature_buf[0];
4105 char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
4106 int bytes_printed = 0;
4107
4108 const char * teamid = NULL;
4109 const char * identity = NULL;
4110 struct cs_blob * csblob = NULL;
4111
4112 proc_list_lock();
4113
4114 /*
4115 * Get proc name and parent proc name; if the parent execs, we'll get a
4116 * garbled name.
4117 */
4118 bytes_printed = scnprintf(signature_cur_end,
4119 signature_buf_end - signature_cur_end,
4120 "%s,%s,", p->p_name,
4121 (p->p_pptr ? p->p_pptr->p_name : ""));
4122
4123 if (bytes_printed > 0) {
4124 signature_cur_end += bytes_printed;
4125 }
4126
4127 proc_list_unlock();
4128
4129 /* Get developer info. */
4130 vnode_t v = proc_getexecutablevnode(p);
4131
4132 if (v) {
4133 csblob = csvnode_get_blob(v, 0);
4134
4135 if (csblob) {
4136 teamid = csblob_get_teamid(csblob);
4137 identity = csblob_get_identity(csblob);
4138 }
4139 }
4140
4141 if (teamid == NULL) {
4142 teamid = "";
4143 }
4144
4145 if (identity == NULL) {
4146 identity = "";
4147 }
4148
4149 bytes_printed = scnprintf(signature_cur_end,
4150 signature_buf_end - signature_cur_end,
4151 "%s,%s", teamid, identity);
4152
4153 if (bytes_printed > 0) {
4154 signature_cur_end += bytes_printed;
4155 }
4156
4157 if (v) {
4158 vnode_put(v);
4159 }
4160
4161 /*
4162 * We may want to rate limit here, although the SUMMARIZE key should
4163 * help us aggregate events in userspace.
4164 */
4165
4166 /* Emit log */
4167 kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
4168 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
4169 /* 1 */ "com.apple.message.signature", signature_buf,
4170 /* 2 */ "com.apple.message.summarize", "YES",
4171 NULL);
4172}
4173#endif /* CONFIG_32BIT_TELEMETRY */