]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114 #include <sys/sysent.h>
115 #include <sys/reason.h>
116 #include <sys/proc_require.h>
117 #include <IOKit/IOBSD.h> /* IOTaskHasEntitlement() */
118 #include <kern/ipc_kobject.h> /* ipc_kobject_set_kobjidx() */
119
120 #ifdef CONFIG_32BIT_TELEMETRY
121 #include <sys/kasl.h>
122 #endif /* CONFIG_32BIT_TELEMETRY */
123
124 #if CONFIG_CSR
125 #include <sys/csr.h>
126 #endif
127
128 #if CONFIG_MEMORYSTATUS
129 #include <sys/kern_memorystatus.h>
130 #endif
131
132 #if CONFIG_MACF
133 #include <security/mac_framework.h>
134 #include <security/mac_mach_internal.h>
135 #endif
136
137 #include <libkern/crypto/sha1.h>
138
139 #ifdef CONFIG_32BIT_TELEMETRY
140 #define MAX_32BIT_EXEC_SIG_SIZE 160
141 #endif /* CONFIG_32BIT_TELEMETRY */
142
143 /*
144 * Structure associated with user cacheing.
145 */
146 struct uidinfo {
147 LIST_ENTRY(uidinfo) ui_hash;
148 uid_t ui_uid;
149 size_t ui_proccnt;
150 };
151 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
152 LIST_HEAD(uihashhead, uidinfo) * uihashtbl;
153 u_long uihash; /* size of hash table - 1 */
154
155 /*
156 * Other process lists
157 */
158 struct pidhashhead *pidhashtbl;
159 u_long pidhash;
160 struct pgrphashhead *pgrphashtbl;
161 u_long pgrphash;
162 struct sesshashhead *sesshashtbl;
163 u_long sesshash;
164
165 struct proclist allproc;
166 struct proclist zombproc;
167 extern struct tty cons;
168
169 extern int cs_debug;
170
171 #if DEVELOPMENT || DEBUG
172 int syscallfilter_disable = 0;
173 #endif // DEVELOPMENT || DEBUG
174
175 #if DEBUG
176 #define __PROC_INTERNAL_DEBUG 1
177 #endif
178 #if CONFIG_COREDUMP
179 /* Name to give to core files */
180 #if defined(XNU_TARGET_OS_BRIDGE)
181 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/internal/%N.core"};
182 #elif defined(XNU_TARGET_OS_OSX)
183 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/cores/core.%P"};
184 #else
185 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/cores/%N.core"};
186 #endif
187 #endif
188
189 #if PROC_REF_DEBUG
190 #include <kern/backtrace.h>
191 #endif
192
193 ZONE_DECLARE(pgrp_zone, "pgrp",
194 sizeof(struct pgrp), ZC_ZFREE_CLEARMEM);
195 ZONE_DECLARE(session_zone, "session",
196 sizeof(struct session), ZC_ZFREE_CLEARMEM);
197
198 typedef uint64_t unaligned_u64 __attribute__((aligned(1)));
199
200 static void orphanpg(struct pgrp * pg);
201 void proc_name_kdp(task_t t, char * buf, int size);
202 boolean_t proc_binary_uuid_kdp(task_t task, uuid_t uuid);
203 int proc_threadname_kdp(void * uth, char * buf, size_t size);
204 void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime);
205 void proc_archinfo_kdp(void* p, cpu_type_t* cputype, cpu_subtype_t* cpusubtype);
206 char * proc_name_address(void * p);
207 char * proc_longname_address(void *);
208
209 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
210 static void pgrp_remove(proc_t p);
211 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
212 static void pgdelete_dropref(struct pgrp *pgrp);
213 extern void pg_rele_dropref(struct pgrp * pgrp);
214 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
215 static boolean_t proc_parent_is_currentproc(proc_t p);
216
217 struct fixjob_iterargs {
218 struct pgrp * pg;
219 struct session * mysession;
220 int entering;
221 };
222
223 int fixjob_callback(proc_t, void *);
224
225 uint64_t
226 get_current_unique_pid(void)
227 {
228 proc_t p = current_proc();
229
230 if (p) {
231 return p->p_uniqueid;
232 } else {
233 return 0;
234 }
235 }
236
237 /*
238 * Initialize global process hashing structures.
239 */
240 void
241 procinit(void)
242 {
243 LIST_INIT(&allproc);
244 LIST_INIT(&zombproc);
245 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
246 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
247 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
248 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
249 #if CONFIG_PERSONAS
250 personas_bootstrap();
251 #endif
252 }
253
254 /*
255 * Change the count associated with number of processes
256 * a given user is using. This routine protects the uihash
257 * with the list lock
258 */
259 size_t
260 chgproccnt(uid_t uid, int diff)
261 {
262 struct uidinfo *uip;
263 struct uidinfo *newuip = NULL;
264 struct uihashhead *uipp;
265 size_t retval;
266
267 again:
268 proc_list_lock();
269 uipp = UIHASH(uid);
270 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) {
271 if (uip->ui_uid == uid) {
272 break;
273 }
274 }
275 if (uip) {
276 uip->ui_proccnt += diff;
277 if (uip->ui_proccnt > 0) {
278 retval = uip->ui_proccnt;
279 proc_list_unlock();
280 goto out;
281 }
282 LIST_REMOVE(uip, ui_hash);
283 retval = 0;
284 proc_list_unlock();
285 FREE(uip, M_PROC);
286 goto out;
287 }
288 if (diff <= 0) {
289 if (diff == 0) {
290 retval = 0;
291 proc_list_unlock();
292 goto out;
293 }
294 panic("chgproccnt: lost user");
295 }
296 if (newuip != NULL) {
297 uip = newuip;
298 newuip = NULL;
299 LIST_INSERT_HEAD(uipp, uip, ui_hash);
300 uip->ui_uid = uid;
301 uip->ui_proccnt = diff;
302 retval = diff;
303 proc_list_unlock();
304 goto out;
305 }
306 proc_list_unlock();
307 MALLOC(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
308 if (newuip == NULL) {
309 panic("chgproccnt: M_PROC zone depleted");
310 }
311 goto again;
312 out:
313 if (newuip != NULL) {
314 FREE(newuip, M_PROC);
315 }
316 return retval;
317 }
318
319 /*
320 * Is p an inferior of the current process?
321 */
322 int
323 inferior(proc_t p)
324 {
325 int retval = 0;
326
327 proc_list_lock();
328 for (; p != current_proc(); p = p->p_pptr) {
329 if (p->p_pid == 0) {
330 goto out;
331 }
332 }
333 retval = 1;
334 out:
335 proc_list_unlock();
336 return retval;
337 }
338
339 /*
340 * Is p an inferior of t ?
341 */
342 int
343 isinferior(proc_t p, proc_t t)
344 {
345 int retval = 0;
346 int nchecked = 0;
347 proc_t start = p;
348
349 /* if p==t they are not inferior */
350 if (p == t) {
351 return 0;
352 }
353
354 proc_list_lock();
355 for (; p != t; p = p->p_pptr) {
356 nchecked++;
357
358 /* Detect here if we're in a cycle */
359 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs)) {
360 goto out;
361 }
362 }
363 retval = 1;
364 out:
365 proc_list_unlock();
366 return retval;
367 }
368
369 int
370 proc_isinferior(int pid1, int pid2)
371 {
372 proc_t p = PROC_NULL;
373 proc_t t = PROC_NULL;
374 int retval = 0;
375
376 if (((p = proc_find(pid1)) != (proc_t)0) && ((t = proc_find(pid2)) != (proc_t)0)) {
377 retval = isinferior(p, t);
378 }
379
380 if (p != PROC_NULL) {
381 proc_rele(p);
382 }
383 if (t != PROC_NULL) {
384 proc_rele(t);
385 }
386
387 return retval;
388 }
389
390 proc_t
391 proc_find(int pid)
392 {
393 return proc_findinternal(pid, 0);
394 }
395
396 proc_t
397 proc_findinternal(int pid, int locked)
398 {
399 proc_t p = PROC_NULL;
400
401 if (locked == 0) {
402 proc_list_lock();
403 }
404
405 p = pfind_locked(pid);
406 if ((p == PROC_NULL) || (p != proc_ref_locked(p))) {
407 p = PROC_NULL;
408 }
409
410 if (locked == 0) {
411 proc_list_unlock();
412 }
413
414 return p;
415 }
416
417 proc_t
418 proc_findthread(thread_t thread)
419 {
420 proc_t p = PROC_NULL;
421 struct uthread *uth;
422
423 proc_list_lock();
424 uth = get_bsdthread_info(thread);
425 if (uth && (uth->uu_flag & UT_VFORK)) {
426 p = uth->uu_proc;
427 } else {
428 p = (proc_t)(get_bsdthreadtask_info(thread));
429 }
430 p = proc_ref_locked(p);
431 proc_list_unlock();
432 return p;
433 }
434
435 /*
436 * Returns process identity of a given process. Calling this function is not
437 * racy for a current process or if a reference to the process is held.
438 */
439 struct proc_ident
440 proc_ident(proc_t p)
441 {
442 struct proc_ident ident = {
443 .p_pid = proc_pid(p),
444 .p_uniqueid = proc_uniqueid(p),
445 .p_idversion = proc_pidversion(p),
446 };
447
448 return ident;
449 }
450
451 proc_t
452 proc_find_ident(struct proc_ident const *ident)
453 {
454 proc_t proc = PROC_NULL;
455
456 proc = proc_find(ident->p_pid);
457 if (proc == PROC_NULL) {
458 return PROC_NULL;
459 }
460
461 if (proc_uniqueid(proc) != ident->p_uniqueid ||
462 proc_pidversion(proc) != ident->p_idversion) {
463 proc_rele(proc);
464 return PROC_NULL;
465 }
466
467 return proc;
468 }
469
470 void
471 uthread_reset_proc_refcount(void *uthread)
472 {
473 uthread_t uth;
474
475 uth = (uthread_t) uthread;
476 uth->uu_proc_refcount = 0;
477
478 #if PROC_REF_DEBUG
479 if (proc_ref_tracking_disabled) {
480 return;
481 }
482
483 uth->uu_pindex = 0;
484 #endif
485 }
486
487 #if PROC_REF_DEBUG
488 int
489 uthread_get_proc_refcount(void *uthread)
490 {
491 uthread_t uth;
492
493 if (proc_ref_tracking_disabled) {
494 return 0;
495 }
496
497 uth = (uthread_t) uthread;
498
499 return uth->uu_proc_refcount;
500 }
501 #endif
502
503 static void
504 record_procref(proc_t p __unused, int count)
505 {
506 uthread_t uth;
507
508 uth = current_uthread();
509 uth->uu_proc_refcount += count;
510
511 #if PROC_REF_DEBUG
512 if (proc_ref_tracking_disabled) {
513 return;
514 }
515
516 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
517 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex],
518 PROC_REF_STACK_DEPTH, NULL);
519
520 uth->uu_proc_ps[uth->uu_pindex] = p;
521 uth->uu_pindex++;
522 }
523 #endif
524 }
525
526 static boolean_t
527 uthread_needs_to_wait_in_proc_refwait(void)
528 {
529 uthread_t uth = current_uthread();
530
531 /*
532 * Allow threads holding no proc refs to wait
533 * in proc_refwait, allowing threads holding
534 * proc refs to wait in proc_refwait causes
535 * deadlocks and makes proc_find non-reentrant.
536 */
537 if (uth->uu_proc_refcount == 0) {
538 return TRUE;
539 }
540
541 return FALSE;
542 }
543
544 int
545 proc_rele(proc_t p)
546 {
547 proc_list_lock();
548 proc_rele_locked(p);
549 proc_list_unlock();
550
551 return 0;
552 }
553
554 proc_t
555 proc_self(void)
556 {
557 struct proc * p;
558
559 p = current_proc();
560
561 proc_list_lock();
562 if (p != proc_ref_locked(p)) {
563 p = PROC_NULL;
564 }
565 proc_list_unlock();
566 return p;
567 }
568
569
570 proc_t
571 proc_ref_locked(proc_t p)
572 {
573 proc_t p1 = p;
574 int pid = proc_pid(p);
575
576 retry:
577 /*
578 * if process still in creation or proc got recycled
579 * during msleep then return failure.
580 */
581 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0)) {
582 return PROC_NULL;
583 }
584
585 /*
586 * Do not return process marked for termination
587 * or proc_refdrain called without ref wait.
588 * Wait for proc_refdrain_with_refwait to complete if
589 * process in refdrain and refwait flag is set, unless
590 * the current thread is holding to a proc_ref
591 * for any proc.
592 */
593 if ((p->p_stat != SZOMB) &&
594 ((p->p_listflag & P_LIST_EXITED) == 0) &&
595 ((p->p_listflag & P_LIST_DEAD) == 0) &&
596 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
597 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
598 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
599 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0);
600 /*
601 * the proc might have been recycled since we dropped
602 * the proc list lock, get the proc again.
603 */
604 p = pfind_locked(pid);
605 goto retry;
606 }
607 p->p_refcount++;
608 record_procref(p, 1);
609 } else {
610 p1 = PROC_NULL;
611 }
612
613 return p1;
614 }
615
616 void
617 proc_rele_locked(proc_t p)
618 {
619 if (p->p_refcount > 0) {
620 p->p_refcount--;
621 record_procref(p, -1);
622 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
623 p->p_listflag &= ~P_LIST_DRAINWAIT;
624 wakeup(&p->p_refcount);
625 }
626 } else {
627 panic("proc_rele_locked -ve ref\n");
628 }
629 }
630
631 proc_t
632 proc_find_zombref(int pid)
633 {
634 proc_t p;
635
636 proc_list_lock();
637
638 again:
639 p = pfind_locked(pid);
640
641 /* should we bail? */
642 if ((p == PROC_NULL) /* not found */
643 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
644 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
645 proc_list_unlock();
646 return PROC_NULL;
647 }
648
649 /* If someone else is controlling the (unreaped) zombie - wait */
650 if ((p->p_listflag & P_LIST_WAITING) != 0) {
651 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
652 goto again;
653 }
654 p->p_listflag |= P_LIST_WAITING;
655
656 proc_list_unlock();
657
658 return p;
659 }
660
661 void
662 proc_drop_zombref(proc_t p)
663 {
664 proc_list_lock();
665 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
666 p->p_listflag &= ~P_LIST_WAITING;
667 wakeup(&p->p_stat);
668 }
669 proc_list_unlock();
670 }
671
672
673 void
674 proc_refdrain(proc_t p)
675 {
676 proc_refdrain_with_refwait(p, FALSE);
677 }
678
679 proc_t
680 proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
681 {
682 boolean_t initexec = FALSE;
683 proc_list_lock();
684
685 p->p_listflag |= P_LIST_DRAIN;
686 if (get_ref_and_allow_wait) {
687 /*
688 * All the calls to proc_ref_locked will wait
689 * for the flag to get cleared before returning a ref,
690 * unless the current thread is holding to a proc ref
691 * for any proc.
692 */
693 p->p_listflag |= P_LIST_REFWAIT;
694 if (p == initproc) {
695 initexec = TRUE;
696 }
697 }
698
699 /* Do not wait in ref drain for launchd exec */
700 while (p->p_refcount && !initexec) {
701 p->p_listflag |= P_LIST_DRAINWAIT;
702 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0);
703 }
704
705 p->p_listflag &= ~P_LIST_DRAIN;
706 if (!get_ref_and_allow_wait) {
707 p->p_listflag |= P_LIST_DEAD;
708 } else {
709 /* Return a ref to the caller */
710 p->p_refcount++;
711 record_procref(p, 1);
712 }
713
714 proc_list_unlock();
715
716 if (get_ref_and_allow_wait) {
717 return p;
718 }
719 return NULL;
720 }
721
722 void
723 proc_refwake(proc_t p)
724 {
725 proc_list_lock();
726 p->p_listflag &= ~P_LIST_REFWAIT;
727 wakeup(&p->p_listflag);
728 proc_list_unlock();
729 }
730
731 proc_t
732 proc_parentholdref(proc_t p)
733 {
734 proc_t parent = PROC_NULL;
735 proc_t pp;
736 int loopcnt = 0;
737
738
739 proc_list_lock();
740 loop:
741 pp = p->p_pptr;
742 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
743 parent = PROC_NULL;
744 goto out;
745 }
746
747 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
748 pp->p_listflag |= P_LIST_CHILDDRWAIT;
749 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
750 loopcnt++;
751 if (loopcnt == 5) {
752 parent = PROC_NULL;
753 goto out;
754 }
755 goto loop;
756 }
757
758 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
759 pp->p_parentref++;
760 parent = pp;
761 goto out;
762 }
763
764 out:
765 proc_list_unlock();
766 return parent;
767 }
768 int
769 proc_parentdropref(proc_t p, int listlocked)
770 {
771 if (listlocked == 0) {
772 proc_list_lock();
773 }
774
775 if (p->p_parentref > 0) {
776 p->p_parentref--;
777 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
778 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
779 wakeup(&p->p_parentref);
780 }
781 } else {
782 panic("proc_parentdropref -ve ref\n");
783 }
784 if (listlocked == 0) {
785 proc_list_unlock();
786 }
787
788 return 0;
789 }
790
791 void
792 proc_childdrainstart(proc_t p)
793 {
794 #if __PROC_INTERNAL_DEBUG
795 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART) {
796 panic("proc_childdrainstart: childdrain already started\n");
797 }
798 #endif
799 p->p_listflag |= P_LIST_CHILDDRSTART;
800 /* wait for all that hold parentrefs to drop */
801 while (p->p_parentref > 0) {
802 p->p_listflag |= P_LIST_PARENTREFWAIT;
803 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0);
804 }
805 }
806
807
808 void
809 proc_childdrainend(proc_t p)
810 {
811 #if __PROC_INTERNAL_DEBUG
812 if (p->p_childrencnt > 0) {
813 panic("exiting: children stil hanging around\n");
814 }
815 #endif
816 p->p_listflag |= P_LIST_CHILDDRAINED;
817 if ((p->p_listflag & (P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT)) != 0) {
818 p->p_listflag &= ~(P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT);
819 wakeup(&p->p_childrencnt);
820 }
821 }
822
823 void
824 proc_checkdeadrefs(__unused proc_t p)
825 {
826 #if __PROC_INTERNAL_DEBUG
827 if ((p->p_listflag & P_LIST_INHASH) != 0) {
828 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
829 }
830 if (p->p_childrencnt != 0) {
831 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
832 }
833 if (p->p_refcount != 0) {
834 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
835 }
836 if (p->p_parentref != 0) {
837 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
838 }
839 #endif
840 }
841
842
843 __attribute__((always_inline, visibility("hidden")))
844 void
845 proc_require(proc_t proc, proc_require_flags_t flags)
846 {
847 if ((flags & PROC_REQUIRE_ALLOW_NULL) && proc == PROC_NULL) {
848 return;
849 }
850 if ((flags & PROC_REQUIRE_ALLOW_KERNPROC) && proc == &proc0) {
851 return;
852 }
853 zone_id_require(ZONE_ID_PROC, sizeof(struct proc), proc);
854 }
855
856 int
857 proc_pid(proc_t p)
858 {
859 if (p != NULL) {
860 return p->p_pid;
861 }
862 return -1;
863 }
864
865 int
866 proc_ppid(proc_t p)
867 {
868 if (p != NULL) {
869 return p->p_ppid;
870 }
871 return -1;
872 }
873
874 int
875 proc_original_ppid(proc_t p)
876 {
877 if (p != NULL) {
878 return p->p_original_ppid;
879 }
880 return -1;
881 }
882
883 int
884 proc_starttime(proc_t p, struct timeval *tv)
885 {
886 if (p != NULL && tv != NULL) {
887 tv->tv_sec = p->p_start.tv_sec;
888 tv->tv_usec = p->p_start.tv_usec;
889 return 0;
890 }
891 return EINVAL;
892 }
893
894 int
895 proc_selfpid(void)
896 {
897 return current_proc()->p_pid;
898 }
899
900 int
901 proc_selfppid(void)
902 {
903 return current_proc()->p_ppid;
904 }
905
906 uint64_t
907 proc_selfcsflags(void)
908 {
909 return (uint64_t)current_proc()->p_csflags;
910 }
911
912 int
913 proc_csflags(proc_t p, uint64_t *flags)
914 {
915 if (p && flags) {
916 *flags = (uint64_t)p->p_csflags;
917 return 0;
918 }
919 return EINVAL;
920 }
921
922 uint32_t
923 proc_platform(const proc_t p)
924 {
925 if (p != NULL) {
926 return p->p_platform;
927 }
928 return (uint32_t)-1;
929 }
930
931 uint32_t
932 proc_min_sdk(proc_t p)
933 {
934 if (p != NULL) {
935 return p->p_min_sdk;
936 }
937 return (uint32_t)-1;
938 }
939
940 uint32_t
941 proc_sdk(proc_t p)
942 {
943 if (p != NULL) {
944 return p->p_sdk;
945 }
946 return (uint32_t)-1;
947 }
948
949 #if CONFIG_DTRACE
950 static proc_t
951 dtrace_current_proc_vforking(void)
952 {
953 thread_t th = current_thread();
954 struct uthread *ut = get_bsdthread_info(th);
955
956 if (ut &&
957 ((ut->uu_flag & (UT_VFORK | UT_VFORKING)) == (UT_VFORK | UT_VFORKING))) {
958 /*
959 * Handle the narrow window where we're in the vfork syscall,
960 * but we're not quite ready to claim (in particular, to DTrace)
961 * that we're running as the child.
962 */
963 return get_bsdtask_info(get_threadtask(th));
964 }
965 return current_proc();
966 }
967
968 int
969 dtrace_proc_selfpid(void)
970 {
971 return dtrace_current_proc_vforking()->p_pid;
972 }
973
974 int
975 dtrace_proc_selfppid(void)
976 {
977 return dtrace_current_proc_vforking()->p_ppid;
978 }
979
980 uid_t
981 dtrace_proc_selfruid(void)
982 {
983 return dtrace_current_proc_vforking()->p_ruid;
984 }
985 #endif /* CONFIG_DTRACE */
986
987 proc_t
988 proc_parent(proc_t p)
989 {
990 proc_t parent;
991 proc_t pp;
992
993 proc_list_lock();
994 loop:
995 pp = p->p_pptr;
996 parent = proc_ref_locked(pp);
997 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED) == 0)) {
998 pp->p_listflag |= P_LIST_CHILDLKWAIT;
999 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
1000 goto loop;
1001 }
1002 proc_list_unlock();
1003 return parent;
1004 }
1005
1006 static boolean_t
1007 proc_parent_is_currentproc(proc_t p)
1008 {
1009 boolean_t ret = FALSE;
1010
1011 proc_list_lock();
1012 if (p->p_pptr == current_proc()) {
1013 ret = TRUE;
1014 }
1015
1016 proc_list_unlock();
1017 return ret;
1018 }
1019
1020 void
1021 proc_name(int pid, char * buf, int size)
1022 {
1023 proc_t p;
1024
1025 if (size <= 0) {
1026 return;
1027 }
1028
1029 bzero(buf, size);
1030
1031 if ((p = proc_find(pid)) != PROC_NULL) {
1032 strlcpy(buf, &p->p_comm[0], size);
1033 proc_rele(p);
1034 }
1035 }
1036
1037 void
1038 proc_name_kdp(task_t t, char * buf, int size)
1039 {
1040 proc_t p = get_bsdtask_info(t);
1041 if (p == PROC_NULL) {
1042 return;
1043 }
1044
1045 if ((size_t)size > sizeof(p->p_comm)) {
1046 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
1047 } else {
1048 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
1049 }
1050 }
1051
1052 boolean_t
1053 proc_binary_uuid_kdp(task_t task, uuid_t uuid)
1054 {
1055 proc_t p = get_bsdtask_info(task);
1056 if (p == PROC_NULL) {
1057 return FALSE;
1058 }
1059
1060 proc_getexecutableuuid(p, uuid, sizeof(uuid_t));
1061
1062 return TRUE;
1063 }
1064
1065 int
1066 proc_threadname_kdp(void * uth, char * buf, size_t size)
1067 {
1068 if (size < MAXTHREADNAMESIZE) {
1069 /* this is really just a protective measure for the future in
1070 * case the thread name size in stackshot gets out of sync with
1071 * the BSD max thread name size. Note that bsd_getthreadname
1072 * doesn't take input buffer size into account. */
1073 return -1;
1074 }
1075
1076 if (uth != NULL) {
1077 bsd_getthreadname(uth, buf);
1078 }
1079 return 0;
1080 }
1081
1082
1083 /* note that this function is generally going to be called from stackshot,
1084 * and the arguments will be coming from a struct which is declared packed
1085 * thus the input arguments will in general be unaligned. We have to handle
1086 * that here. */
1087 void
1088 proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime)
1089 {
1090 proc_t pp = (proc_t)p;
1091 if (pp != PROC_NULL) {
1092 if (tv_sec != NULL) {
1093 *tv_sec = pp->p_start.tv_sec;
1094 }
1095 if (tv_usec != NULL) {
1096 *tv_usec = pp->p_start.tv_usec;
1097 }
1098 if (abstime != NULL) {
1099 if (pp->p_stats != NULL) {
1100 *abstime = pp->p_stats->ps_start;
1101 } else {
1102 *abstime = 0;
1103 }
1104 }
1105 }
1106 }
1107
1108 void
1109 proc_archinfo_kdp(void* p, cpu_type_t* cputype, cpu_subtype_t* cpusubtype)
1110 {
1111 proc_t pp = (proc_t)p;
1112 if (pp != PROC_NULL) {
1113 *cputype = pp->p_cputype;
1114 *cpusubtype = pp->p_cpusubtype;
1115 }
1116 }
1117
1118 char *
1119 proc_name_address(void *p)
1120 {
1121 return &((proc_t)p)->p_comm[0];
1122 }
1123
1124 char *
1125 proc_longname_address(void *p)
1126 {
1127 return &((proc_t)p)->p_name[0];
1128 }
1129
1130 char *
1131 proc_best_name(proc_t p)
1132 {
1133 if (p->p_name[0] != '\0') {
1134 return &p->p_name[0];
1135 }
1136 return &p->p_comm[0];
1137 }
1138
1139 void
1140 proc_selfname(char * buf, int size)
1141 {
1142 proc_t p;
1143
1144 if ((p = current_proc()) != (proc_t)0) {
1145 strlcpy(buf, &p->p_comm[0], size);
1146 }
1147 }
1148
1149 void
1150 proc_signal(int pid, int signum)
1151 {
1152 proc_t p;
1153
1154 if ((p = proc_find(pid)) != PROC_NULL) {
1155 psignal(p, signum);
1156 proc_rele(p);
1157 }
1158 }
1159
1160 int
1161 proc_issignal(int pid, sigset_t mask)
1162 {
1163 proc_t p;
1164 int error = 0;
1165
1166 if ((p = proc_find(pid)) != PROC_NULL) {
1167 error = proc_pendingsignals(p, mask);
1168 proc_rele(p);
1169 }
1170
1171 return error;
1172 }
1173
1174 int
1175 proc_noremotehang(proc_t p)
1176 {
1177 int retval = 0;
1178
1179 if (p) {
1180 retval = p->p_flag & P_NOREMOTEHANG;
1181 }
1182 return retval? 1: 0;
1183 }
1184
1185 int
1186 proc_exiting(proc_t p)
1187 {
1188 int retval = 0;
1189
1190 if (p) {
1191 retval = p->p_lflag & P_LEXIT;
1192 }
1193 return retval? 1: 0;
1194 }
1195
1196 int
1197 proc_in_teardown(proc_t p)
1198 {
1199 int retval = 0;
1200
1201 if (p) {
1202 retval = p->p_lflag & P_LPEXIT;
1203 }
1204 return retval? 1: 0;
1205 }
1206
1207 int
1208 proc_forcequota(proc_t p)
1209 {
1210 int retval = 0;
1211
1212 if (p) {
1213 retval = p->p_flag & P_FORCEQUOTA;
1214 }
1215 return retval? 1: 0;
1216 }
1217
1218 int
1219 proc_suser(proc_t p)
1220 {
1221 kauth_cred_t my_cred;
1222 int error;
1223
1224 my_cred = kauth_cred_proc_ref(p);
1225 error = suser(my_cred, &p->p_acflag);
1226 kauth_cred_unref(&my_cred);
1227 return error;
1228 }
1229
1230 task_t
1231 proc_task(proc_t proc)
1232 {
1233 return (task_t)proc->task;
1234 }
1235
1236 /*
1237 * Obtain the first thread in a process
1238 *
1239 * XXX This is a bad thing to do; it exists predominantly to support the
1240 * XXX use of proc_t's in places that should really be using
1241 * XXX thread_t's instead. This maintains historical behaviour, but really
1242 * XXX needs an audit of the context (proxy vs. not) to clean up.
1243 */
1244 thread_t
1245 proc_thread(proc_t proc)
1246 {
1247 LCK_MTX_ASSERT(&proc->p_mlock, LCK_MTX_ASSERT_OWNED);
1248
1249 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1250
1251 if (uth != NULL) {
1252 return uth->uu_context.vc_thread;
1253 }
1254
1255 return NULL;
1256 }
1257
1258 kauth_cred_t
1259 proc_ucred(proc_t p)
1260 {
1261 return p->p_ucred;
1262 }
1263
1264 struct uthread *
1265 current_uthread()
1266 {
1267 thread_t th = current_thread();
1268
1269 return (struct uthread *)get_bsdthread_info(th);
1270 }
1271
1272
1273 int
1274 proc_is64bit(proc_t p)
1275 {
1276 return IS_64BIT_PROCESS(p);
1277 }
1278
1279 int
1280 proc_is64bit_data(proc_t p)
1281 {
1282 assert(p->task);
1283 return (int)task_get_64bit_data(p->task);
1284 }
1285
1286 int
1287 proc_isinitproc(proc_t p)
1288 {
1289 if (initproc == NULL) {
1290 return 0;
1291 }
1292 return p == initproc;
1293 }
1294
1295 int
1296 proc_pidversion(proc_t p)
1297 {
1298 return p->p_idversion;
1299 }
1300
1301 uint32_t
1302 proc_persona_id(proc_t p)
1303 {
1304 return (uint32_t)persona_id_from_proc(p);
1305 }
1306
1307 uint32_t
1308 proc_getuid(proc_t p)
1309 {
1310 return p->p_uid;
1311 }
1312
1313 uint32_t
1314 proc_getgid(proc_t p)
1315 {
1316 return p->p_gid;
1317 }
1318
1319 uint64_t
1320 proc_uniqueid(proc_t p)
1321 {
1322 return p->p_uniqueid;
1323 }
1324
1325 uint64_t
1326 proc_puniqueid(proc_t p)
1327 {
1328 return p->p_puniqueid;
1329 }
1330
1331 void
1332 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1333 {
1334 #if CONFIG_COALITIONS
1335 task_coalition_ids(p->task, ids);
1336 #else
1337 memset(ids, 0, sizeof(uint64_t[COALITION_NUM_TYPES]));
1338 #endif
1339 return;
1340 }
1341
1342 uint64_t
1343 proc_was_throttled(proc_t p)
1344 {
1345 return p->was_throttled;
1346 }
1347
1348 uint64_t
1349 proc_did_throttle(proc_t p)
1350 {
1351 return p->did_throttle;
1352 }
1353
1354 int
1355 proc_getcdhash(proc_t p, unsigned char *cdhash)
1356 {
1357 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1358 }
1359
1360 int
1361 proc_exitstatus(proc_t p)
1362 {
1363 return p->p_xstat & 0xffff;
1364 }
1365
1366 void
1367 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1368 {
1369 if (size >= sizeof(p->p_uuid)) {
1370 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1371 }
1372 }
1373
1374 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1375 vnode_t
1376 proc_getexecutablevnode(proc_t p)
1377 {
1378 vnode_t tvp = p->p_textvp;
1379
1380 if (tvp != NULLVP) {
1381 if (vnode_getwithref(tvp) == 0) {
1382 return tvp;
1383 }
1384 }
1385
1386 return NULLVP;
1387 }
1388
1389 int
1390 proc_gettty(proc_t p, vnode_t *vp)
1391 {
1392 if (!p || !vp) {
1393 return EINVAL;
1394 }
1395
1396 struct session *procsp = proc_session(p);
1397 int err = EINVAL;
1398
1399 if (procsp != SESSION_NULL) {
1400 session_lock(procsp);
1401 vnode_t ttyvp = procsp->s_ttyvp;
1402 int ttyvid = procsp->s_ttyvid;
1403 session_unlock(procsp);
1404
1405 if (ttyvp) {
1406 if (vnode_getwithvid(ttyvp, ttyvid) == 0) {
1407 *vp = ttyvp;
1408 err = 0;
1409 }
1410 } else {
1411 err = ENOENT;
1412 }
1413
1414 session_rele(procsp);
1415 }
1416
1417 return err;
1418 }
1419
1420 int
1421 proc_gettty_dev(proc_t p, dev_t *dev)
1422 {
1423 struct session *procsp = proc_session(p);
1424 boolean_t has_tty = FALSE;
1425
1426 if (procsp != SESSION_NULL) {
1427 session_lock(procsp);
1428
1429 struct tty * tp = SESSION_TP(procsp);
1430 if (tp != TTY_NULL) {
1431 *dev = tp->t_dev;
1432 has_tty = TRUE;
1433 }
1434
1435 session_unlock(procsp);
1436 session_rele(procsp);
1437 }
1438
1439 if (has_tty) {
1440 return 0;
1441 } else {
1442 return EINVAL;
1443 }
1444 }
1445
1446 int
1447 proc_selfexecutableargs(uint8_t *buf, size_t *buflen)
1448 {
1449 proc_t p = current_proc();
1450
1451 // buflen must always be provided
1452 if (buflen == NULL) {
1453 return EINVAL;
1454 }
1455
1456 // If a buf is provided, there must be at least enough room to fit argc
1457 if (buf && *buflen < sizeof(p->p_argc)) {
1458 return EINVAL;
1459 }
1460
1461 if (!p->user_stack) {
1462 return EINVAL;
1463 }
1464
1465 if (buf == NULL) {
1466 *buflen = p->p_argslen + sizeof(p->p_argc);
1467 return 0;
1468 }
1469
1470 // Copy in argc to the first 4 bytes
1471 memcpy(buf, &p->p_argc, sizeof(p->p_argc));
1472
1473 if (*buflen > sizeof(p->p_argc) && p->p_argslen > 0) {
1474 // See memory layout comment in kern_exec.c:exec_copyout_strings()
1475 // We want to copy starting from `p_argslen` bytes away from top of stack
1476 return copyin(p->user_stack - p->p_argslen,
1477 buf + sizeof(p->p_argc),
1478 MIN(p->p_argslen, *buflen - sizeof(p->p_argc)));
1479 } else {
1480 return 0;
1481 }
1482 }
1483
1484 off_t
1485 proc_getexecutableoffset(proc_t p)
1486 {
1487 return p->p_textoff;
1488 }
1489
1490 void
1491 bsd_set_dependency_capable(task_t task)
1492 {
1493 proc_t p = get_bsdtask_info(task);
1494
1495 if (p) {
1496 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1497 }
1498 }
1499
1500
1501 #ifndef __arm__
1502 int
1503 IS_64BIT_PROCESS(proc_t p)
1504 {
1505 if (p && (p->p_flag & P_LP64)) {
1506 return 1;
1507 } else {
1508 return 0;
1509 }
1510 }
1511 #endif
1512
1513 /*
1514 * Locate a process by number
1515 */
1516 proc_t
1517 pfind_locked(pid_t pid)
1518 {
1519 proc_t p;
1520 #if DEBUG
1521 proc_t q;
1522 #endif
1523
1524 if (!pid) {
1525 return kernproc;
1526 }
1527
1528 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1529 if (p->p_pid == pid) {
1530 #if DEBUG
1531 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1532 if ((p != q) && (q->p_pid == pid)) {
1533 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1534 }
1535 }
1536 #endif
1537 return p;
1538 }
1539 }
1540 return NULL;
1541 }
1542
1543 /*
1544 * Locate a zombie by PID
1545 */
1546 __private_extern__ proc_t
1547 pzfind(pid_t pid)
1548 {
1549 proc_t p;
1550
1551
1552 proc_list_lock();
1553
1554 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
1555 if (p->p_pid == pid) {
1556 break;
1557 }
1558 }
1559
1560 proc_list_unlock();
1561
1562 return p;
1563 }
1564
1565 /*
1566 * Locate a process group by number
1567 */
1568
1569 struct pgrp *
1570 pgfind(pid_t pgid)
1571 {
1572 struct pgrp * pgrp;
1573
1574 proc_list_lock();
1575 pgrp = pgfind_internal(pgid);
1576 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) {
1577 pgrp = PGRP_NULL;
1578 } else {
1579 pgrp->pg_refcount++;
1580 }
1581 proc_list_unlock();
1582 return pgrp;
1583 }
1584
1585
1586
1587 struct pgrp *
1588 pgfind_internal(pid_t pgid)
1589 {
1590 struct pgrp *pgrp;
1591
1592 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) {
1593 if (pgrp->pg_id == pgid) {
1594 return pgrp;
1595 }
1596 }
1597 return NULL;
1598 }
1599
1600 void
1601 pg_rele(struct pgrp * pgrp)
1602 {
1603 if (pgrp == PGRP_NULL) {
1604 return;
1605 }
1606 pg_rele_dropref(pgrp);
1607 }
1608
1609 void
1610 pg_rele_dropref(struct pgrp * pgrp)
1611 {
1612 proc_list_lock();
1613 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1614 proc_list_unlock();
1615 pgdelete_dropref(pgrp);
1616 return;
1617 }
1618
1619 pgrp->pg_refcount--;
1620 proc_list_unlock();
1621 }
1622
1623 struct session *
1624 session_find_internal(pid_t sessid)
1625 {
1626 struct session *sess;
1627
1628 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) {
1629 if (sess->s_sid == sessid) {
1630 return sess;
1631 }
1632 }
1633 return NULL;
1634 }
1635
1636
1637 /*
1638 * Make a new process ready to become a useful member of society by making it
1639 * visible in all the right places and initialize its own lists to empty.
1640 *
1641 * Parameters: parent The parent of the process to insert
1642 * child The child process to insert
1643 *
1644 * Returns: (void)
1645 *
1646 * Notes: Insert a child process into the parents process group, assign
1647 * the child the parent process pointer and PPID of the parent,
1648 * place it on the parents p_children list as a sibling,
1649 * initialize its own child list, place it in the allproc list,
1650 * insert it in the proper hash bucket, and initialize its
1651 * event list.
1652 */
1653 void
1654 pinsertchild(proc_t parent, proc_t child)
1655 {
1656 struct pgrp * pg;
1657
1658 LIST_INIT(&child->p_children);
1659 child->p_pptr = parent;
1660 child->p_ppid = parent->p_pid;
1661 child->p_original_ppid = parent->p_pid;
1662 child->p_puniqueid = parent->p_uniqueid;
1663 child->p_xhighbits = 0;
1664
1665 pg = proc_pgrp(parent);
1666 pgrp_add(pg, parent, child);
1667 pg_rele(pg);
1668
1669 proc_list_lock();
1670
1671 #if CONFIG_MEMORYSTATUS
1672 memorystatus_add(child, TRUE);
1673 #endif
1674
1675 parent->p_childrencnt++;
1676 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1677
1678 LIST_INSERT_HEAD(&allproc, child, p_list);
1679 /* mark the completion of proc creation */
1680 child->p_listflag &= ~P_LIST_INCREATE;
1681
1682 proc_list_unlock();
1683 }
1684
1685 /*
1686 * Move p to a new or existing process group (and session)
1687 *
1688 * Returns: 0 Success
1689 * ESRCH No such process
1690 */
1691 int
1692 enterpgrp(proc_t p, pid_t pgid, int mksess)
1693 {
1694 struct pgrp *pgrp;
1695 struct pgrp *mypgrp;
1696 struct session * procsp;
1697
1698 pgrp = pgfind(pgid);
1699 mypgrp = proc_pgrp(p);
1700 procsp = proc_session(p);
1701
1702 #if DIAGNOSTIC
1703 if (pgrp != NULL && mksess) { /* firewalls */
1704 panic("enterpgrp: setsid into non-empty pgrp");
1705 }
1706 if (SESS_LEADER(p, procsp)) {
1707 panic("enterpgrp: session leader attempted setpgrp");
1708 }
1709 #endif
1710 if (pgrp == PGRP_NULL) {
1711 pid_t savepid = p->p_pid;
1712 proc_t np = PROC_NULL;
1713 /*
1714 * new process group
1715 */
1716 #if DIAGNOSTIC
1717 if (p->p_pid != pgid) {
1718 panic("enterpgrp: new pgrp and pid != pgid");
1719 }
1720 #endif
1721 pgrp = zalloc_flags(pgrp_zone, Z_WAITOK | Z_ZERO);
1722 if ((np = proc_find(savepid)) == NULL || np != p) {
1723 if (np != PROC_NULL) {
1724 proc_rele(np);
1725 }
1726 if (mypgrp != PGRP_NULL) {
1727 pg_rele(mypgrp);
1728 }
1729 if (procsp != SESSION_NULL) {
1730 session_rele(procsp);
1731 }
1732 zfree(pgrp_zone, pgrp);
1733 return ESRCH;
1734 }
1735 proc_rele(np);
1736 if (mksess) {
1737 struct session *sess;
1738
1739 /*
1740 * new session
1741 */
1742 sess = zalloc_flags(session_zone, Z_WAITOK | Z_ZERO);
1743 sess->s_leader = p;
1744 sess->s_sid = p->p_pid;
1745 sess->s_count = 1;
1746 sess->s_ttypgrpid = NO_PID;
1747
1748 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1749
1750 bcopy(procsp->s_login, sess->s_login,
1751 sizeof(sess->s_login));
1752 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1753 proc_list_lock();
1754 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1755 proc_list_unlock();
1756 pgrp->pg_session = sess;
1757 p->p_sessionid = sess->s_sid;
1758 #if DIAGNOSTIC
1759 if (p != current_proc()) {
1760 panic("enterpgrp: mksession and p != curproc");
1761 }
1762 #endif
1763 } else {
1764 proc_list_lock();
1765 pgrp->pg_session = procsp;
1766 p->p_sessionid = procsp->s_sid;
1767
1768 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1769 panic("enterpgrp: providing ref to terminating session ");
1770 }
1771 pgrp->pg_session->s_count++;
1772 proc_list_unlock();
1773 }
1774 pgrp->pg_id = pgid;
1775
1776 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1777
1778 LIST_INIT(&pgrp->pg_members);
1779 proc_list_lock();
1780 pgrp->pg_refcount = 1;
1781 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1782 proc_list_unlock();
1783 } else if (pgrp == mypgrp) {
1784 pg_rele(pgrp);
1785 if (mypgrp != NULL) {
1786 pg_rele(mypgrp);
1787 }
1788 if (procsp != SESSION_NULL) {
1789 session_rele(procsp);
1790 }
1791 return 0;
1792 }
1793
1794 if (procsp != SESSION_NULL) {
1795 session_rele(procsp);
1796 }
1797 /*
1798 * Adjust eligibility of affected pgrps to participate in job control.
1799 * Increment eligibility counts before decrementing, otherwise we
1800 * could reach 0 spuriously during the first call.
1801 */
1802 fixjobc(p, pgrp, 1);
1803 fixjobc(p, mypgrp, 0);
1804
1805 if (mypgrp != PGRP_NULL) {
1806 pg_rele(mypgrp);
1807 }
1808 pgrp_replace(p, pgrp);
1809 pg_rele(pgrp);
1810
1811 return 0;
1812 }
1813
1814 /*
1815 * remove process from process group
1816 */
1817 int
1818 leavepgrp(proc_t p)
1819 {
1820 pgrp_remove(p);
1821 return 0;
1822 }
1823
1824 /*
1825 * delete a process group
1826 */
1827 static void
1828 pgdelete_dropref(struct pgrp *pgrp)
1829 {
1830 struct tty *ttyp;
1831 int emptypgrp = 1;
1832 struct session *sessp;
1833
1834
1835 pgrp_lock(pgrp);
1836 if (pgrp->pg_membercnt != 0) {
1837 emptypgrp = 0;
1838 }
1839 pgrp_unlock(pgrp);
1840
1841 proc_list_lock();
1842 pgrp->pg_refcount--;
1843 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1844 proc_list_unlock();
1845 return;
1846 }
1847
1848 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1849
1850 if (pgrp->pg_refcount > 0) {
1851 proc_list_unlock();
1852 return;
1853 }
1854
1855 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1856 LIST_REMOVE(pgrp, pg_hash);
1857
1858 proc_list_unlock();
1859
1860 ttyp = SESSION_TP(pgrp->pg_session);
1861 if (ttyp != TTY_NULL) {
1862 if (ttyp->t_pgrp == pgrp) {
1863 tty_lock(ttyp);
1864 /* Re-check after acquiring the lock */
1865 if (ttyp->t_pgrp == pgrp) {
1866 ttyp->t_pgrp = NULL;
1867 pgrp->pg_session->s_ttypgrpid = NO_PID;
1868 }
1869 tty_unlock(ttyp);
1870 }
1871 }
1872
1873 proc_list_lock();
1874
1875 sessp = pgrp->pg_session;
1876 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1877 panic("pg_deleteref: manipulating refs of already terminating session");
1878 }
1879 if (--sessp->s_count == 0) {
1880 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1881 panic("pg_deleteref: terminating already terminated session");
1882 }
1883 sessp->s_listflags |= S_LIST_TERM;
1884 ttyp = SESSION_TP(sessp);
1885 LIST_REMOVE(sessp, s_hash);
1886 proc_list_unlock();
1887 if (ttyp != TTY_NULL) {
1888 tty_lock(ttyp);
1889 if (ttyp->t_session == sessp) {
1890 ttyp->t_session = NULL;
1891 }
1892 tty_unlock(ttyp);
1893 }
1894 proc_list_lock();
1895 sessp->s_listflags |= S_LIST_DEAD;
1896 if (sessp->s_count != 0) {
1897 panic("pg_deleteref: freeing session in use");
1898 }
1899 proc_list_unlock();
1900 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1901
1902 zfree(session_zone, sessp);
1903 } else {
1904 proc_list_unlock();
1905 }
1906 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1907 zfree(pgrp_zone, pgrp);
1908 }
1909
1910
1911 /*
1912 * Adjust pgrp jobc counters when specified process changes process group.
1913 * We count the number of processes in each process group that "qualify"
1914 * the group for terminal job control (those with a parent in a different
1915 * process group of the same session). If that count reaches zero, the
1916 * process group becomes orphaned. Check both the specified process'
1917 * process group and that of its children.
1918 * entering == 0 => p is leaving specified group.
1919 * entering == 1 => p is entering specified group.
1920 */
1921 int
1922 fixjob_callback(proc_t p, void * arg)
1923 {
1924 struct fixjob_iterargs *fp;
1925 struct pgrp * pg, *hispg;
1926 struct session * mysession, *hissess;
1927 int entering;
1928
1929 fp = (struct fixjob_iterargs *)arg;
1930 pg = fp->pg;
1931 mysession = fp->mysession;
1932 entering = fp->entering;
1933
1934 hispg = proc_pgrp(p);
1935 hissess = proc_session(p);
1936
1937 if ((hispg != pg) &&
1938 (hissess == mysession)) {
1939 pgrp_lock(hispg);
1940 if (entering) {
1941 hispg->pg_jobc++;
1942 pgrp_unlock(hispg);
1943 } else if (--hispg->pg_jobc == 0) {
1944 pgrp_unlock(hispg);
1945 orphanpg(hispg);
1946 } else {
1947 pgrp_unlock(hispg);
1948 }
1949 }
1950 if (hissess != SESSION_NULL) {
1951 session_rele(hissess);
1952 }
1953 if (hispg != PGRP_NULL) {
1954 pg_rele(hispg);
1955 }
1956
1957 return PROC_RETURNED;
1958 }
1959
1960 void
1961 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1962 {
1963 struct pgrp *hispgrp = PGRP_NULL;
1964 struct session *hissess = SESSION_NULL;
1965 struct session *mysession = pgrp->pg_session;
1966 proc_t parent;
1967 struct fixjob_iterargs fjarg;
1968 boolean_t proc_parent_self;
1969
1970 /*
1971 * Check if p's parent is current proc, if yes then no need to take
1972 * a ref; calling proc_parent with current proc as parent may
1973 * deadlock if current proc is exiting.
1974 */
1975 proc_parent_self = proc_parent_is_currentproc(p);
1976 if (proc_parent_self) {
1977 parent = current_proc();
1978 } else {
1979 parent = proc_parent(p);
1980 }
1981
1982 if (parent != PROC_NULL) {
1983 hispgrp = proc_pgrp(parent);
1984 hissess = proc_session(parent);
1985 if (!proc_parent_self) {
1986 proc_rele(parent);
1987 }
1988 }
1989
1990
1991 /*
1992 * Check p's parent to see whether p qualifies its own process
1993 * group; if so, adjust count for p's process group.
1994 */
1995 if ((hispgrp != pgrp) &&
1996 (hissess == mysession)) {
1997 pgrp_lock(pgrp);
1998 if (entering) {
1999 pgrp->pg_jobc++;
2000 pgrp_unlock(pgrp);
2001 } else if (--pgrp->pg_jobc == 0) {
2002 pgrp_unlock(pgrp);
2003 orphanpg(pgrp);
2004 } else {
2005 pgrp_unlock(pgrp);
2006 }
2007 }
2008
2009 if (hissess != SESSION_NULL) {
2010 session_rele(hissess);
2011 }
2012 if (hispgrp != PGRP_NULL) {
2013 pg_rele(hispgrp);
2014 }
2015
2016 /*
2017 * Check this process' children to see whether they qualify
2018 * their process groups; if so, adjust counts for children's
2019 * process groups.
2020 */
2021 fjarg.pg = pgrp;
2022 fjarg.mysession = mysession;
2023 fjarg.entering = entering;
2024 proc_childrenwalk(p, fixjob_callback, &fjarg);
2025 }
2026
2027 /*
2028 * The pidlist_* routines support the functions in this file that
2029 * walk lists of processes applying filters and callouts to the
2030 * elements of the list.
2031 *
2032 * A prior implementation used a single linear array, which can be
2033 * tricky to allocate on large systems. This implementation creates
2034 * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
2035 *
2036 * The array should be sized large enough to keep the overhead of
2037 * walking the list low, but small enough that blocking allocations of
2038 * pidlist_entry_t structures always succeed.
2039 */
2040
2041 #define PIDS_PER_ENTRY 1021
2042
2043 typedef struct pidlist_entry {
2044 SLIST_ENTRY(pidlist_entry) pe_link;
2045 u_int pe_nused;
2046 pid_t pe_pid[PIDS_PER_ENTRY];
2047 } pidlist_entry_t;
2048
2049 typedef struct {
2050 SLIST_HEAD(, pidlist_entry) pl_head;
2051 struct pidlist_entry *pl_active;
2052 u_int pl_nalloc;
2053 } pidlist_t;
2054
2055 static __inline__ pidlist_t *
2056 pidlist_init(pidlist_t *pl)
2057 {
2058 SLIST_INIT(&pl->pl_head);
2059 pl->pl_active = NULL;
2060 pl->pl_nalloc = 0;
2061 return pl;
2062 }
2063
2064 static u_int
2065 pidlist_alloc(pidlist_t *pl, u_int needed)
2066 {
2067 while (pl->pl_nalloc < needed) {
2068 pidlist_entry_t *pe = kheap_alloc(KHEAP_TEMP, sizeof(*pe),
2069 Z_WAITOK | Z_ZERO);
2070 if (NULL == pe) {
2071 panic("no space for pidlist entry");
2072 }
2073 SLIST_INSERT_HEAD(&pl->pl_head, pe, pe_link);
2074 pl->pl_nalloc += (sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0]));
2075 }
2076 return pl->pl_nalloc;
2077 }
2078
2079 static void
2080 pidlist_free(pidlist_t *pl)
2081 {
2082 pidlist_entry_t *pe;
2083 while (NULL != (pe = SLIST_FIRST(&pl->pl_head))) {
2084 SLIST_FIRST(&pl->pl_head) = SLIST_NEXT(pe, pe_link);
2085 kheap_free(KHEAP_TEMP, pe, sizeof(*pe));
2086 }
2087 pl->pl_nalloc = 0;
2088 }
2089
2090 static __inline__ void
2091 pidlist_set_active(pidlist_t *pl)
2092 {
2093 pl->pl_active = SLIST_FIRST(&pl->pl_head);
2094 assert(pl->pl_active);
2095 }
2096
2097 static void
2098 pidlist_add_pid(pidlist_t *pl, pid_t pid)
2099 {
2100 pidlist_entry_t *pe = pl->pl_active;
2101 if (pe->pe_nused >= sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0])) {
2102 if (NULL == (pe = SLIST_NEXT(pe, pe_link))) {
2103 panic("pidlist allocation exhausted");
2104 }
2105 pl->pl_active = pe;
2106 }
2107 pe->pe_pid[pe->pe_nused++] = pid;
2108 }
2109
2110 static __inline__ u_int
2111 pidlist_nalloc(const pidlist_t *pl)
2112 {
2113 return pl->pl_nalloc;
2114 }
2115
2116 /*
2117 * A process group has become orphaned; if there are any stopped processes in
2118 * the group, hang-up all process in that group.
2119 */
2120 static void
2121 orphanpg(struct pgrp *pgrp)
2122 {
2123 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2124 u_int pid_count_available = 0;
2125 proc_t p;
2126
2127 /* allocate outside of the pgrp_lock */
2128 for (;;) {
2129 pgrp_lock(pgrp);
2130
2131 boolean_t should_iterate = FALSE;
2132 pid_count_available = 0;
2133
2134 PGMEMBERS_FOREACH(pgrp, p) {
2135 pid_count_available++;
2136 if (p->p_stat == SSTOP) {
2137 should_iterate = TRUE;
2138 }
2139 }
2140 if (pid_count_available == 0 || !should_iterate) {
2141 pgrp_unlock(pgrp);
2142 goto out; /* no orphaned processes OR nothing stopped */
2143 }
2144 if (pidlist_nalloc(pl) >= pid_count_available) {
2145 break;
2146 }
2147 pgrp_unlock(pgrp);
2148
2149 pidlist_alloc(pl, pid_count_available);
2150 }
2151 pidlist_set_active(pl);
2152
2153 u_int pid_count = 0;
2154 PGMEMBERS_FOREACH(pgrp, p) {
2155 pidlist_add_pid(pl, proc_pid(p));
2156 if (++pid_count >= pid_count_available) {
2157 break;
2158 }
2159 }
2160 pgrp_unlock(pgrp);
2161
2162 const pidlist_entry_t *pe;
2163 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2164 for (u_int i = 0; i < pe->pe_nused; i++) {
2165 const pid_t pid = pe->pe_pid[i];
2166 if (0 == pid) {
2167 continue; /* skip kernproc */
2168 }
2169 p = proc_find(pid);
2170 if (!p) {
2171 continue;
2172 }
2173 proc_transwait(p, 0);
2174 pt_setrunnable(p);
2175 psignal(p, SIGHUP);
2176 psignal(p, SIGCONT);
2177 proc_rele(p);
2178 }
2179 }
2180 out:
2181 pidlist_free(pl);
2182 }
2183
2184 boolean_t
2185 proc_is_translated(proc_t p __unused)
2186 {
2187 return 0;
2188 }
2189
2190 int
2191 proc_is_classic(proc_t p __unused)
2192 {
2193 return 0;
2194 }
2195
2196 bool
2197 proc_is_exotic(
2198 proc_t p)
2199 {
2200 if (p == NULL) {
2201 return false;
2202 }
2203 return task_is_exotic(proc_task(p));
2204 }
2205
2206 bool
2207 proc_is_alien(
2208 proc_t p)
2209 {
2210 if (p == NULL) {
2211 return false;
2212 }
2213 return task_is_alien(proc_task(p));
2214 }
2215
2216 /* XXX Why does this function exist? Need to kill it off... */
2217 proc_t
2218 current_proc_EXTERNAL(void)
2219 {
2220 return current_proc();
2221 }
2222
2223 int
2224 proc_is_forcing_hfs_case_sensitivity(proc_t p)
2225 {
2226 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
2227 }
2228
2229 bool
2230 proc_ignores_content_protection(proc_t p)
2231 {
2232 return os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION;
2233 }
2234
2235 #if CONFIG_COREDUMP
2236 /*
2237 * proc_core_name(name, uid, pid)
2238 * Expand the name described in corefilename, using name, uid, and pid.
2239 * corefilename is a printf-like string, with three format specifiers:
2240 * %N name of process ("name")
2241 * %P process id (pid)
2242 * %U user id (uid)
2243 * For example, "%N.core" is the default; they can be disabled completely
2244 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2245 * This is controlled by the sysctl variable kern.corefile (see above).
2246 */
2247 __private_extern__ int
2248 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
2249 size_t cf_name_len)
2250 {
2251 const char *format, *appendstr;
2252 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
2253 size_t i, l, n;
2254
2255 if (cf_name == NULL) {
2256 goto toolong;
2257 }
2258
2259 format = corefilename;
2260 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
2261 switch (format[i]) {
2262 case '%': /* Format character */
2263 i++;
2264 switch (format[i]) {
2265 case '%':
2266 appendstr = "%";
2267 break;
2268 case 'N': /* process name */
2269 appendstr = name;
2270 break;
2271 case 'P': /* process id */
2272 snprintf(id_buf, sizeof(id_buf), "%u", pid);
2273 appendstr = id_buf;
2274 break;
2275 case 'U': /* user id */
2276 snprintf(id_buf, sizeof(id_buf), "%u", uid);
2277 appendstr = id_buf;
2278 break;
2279 case '\0': /* format string ended in % symbol */
2280 goto endofstring;
2281 default:
2282 appendstr = "";
2283 log(LOG_ERR,
2284 "Unknown format character %c in `%s'\n",
2285 format[i], format);
2286 }
2287 l = strlen(appendstr);
2288 if ((n + l) >= cf_name_len) {
2289 goto toolong;
2290 }
2291 bcopy(appendstr, cf_name + n, l);
2292 n += l;
2293 break;
2294 default:
2295 cf_name[n++] = format[i];
2296 }
2297 }
2298 if (format[i] != '\0') {
2299 goto toolong;
2300 }
2301 return 0;
2302 toolong:
2303 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
2304 (long)pid, name, (uint32_t)uid);
2305 return 1;
2306 endofstring:
2307 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
2308 (long)pid, name, (uint32_t)uid);
2309 return 1;
2310 }
2311 #endif /* CONFIG_COREDUMP */
2312
2313 /* Code Signing related routines */
2314
2315 int
2316 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
2317 {
2318 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2319 uap->usersize, USER_ADDR_NULL);
2320 }
2321
2322 int
2323 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
2324 {
2325 if (uap->uaudittoken == USER_ADDR_NULL) {
2326 return EINVAL;
2327 }
2328 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2329 uap->usersize, uap->uaudittoken);
2330 }
2331
2332 static int
2333 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
2334 {
2335 char fakeheader[8] = { 0 };
2336 int error;
2337
2338 if (usize < sizeof(fakeheader)) {
2339 return ERANGE;
2340 }
2341
2342 /* if no blob, fill in zero header */
2343 if (NULL == start) {
2344 start = fakeheader;
2345 length = sizeof(fakeheader);
2346 } else if (usize < length) {
2347 /* ... if input too short, copy out length of entitlement */
2348 uint32_t length32 = htonl((uint32_t)length);
2349 memcpy(&fakeheader[4], &length32, sizeof(length32));
2350
2351 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2352 if (error == 0) {
2353 return ERANGE; /* input buffer to short, ERANGE signals that */
2354 }
2355 return error;
2356 }
2357 return copyout(start, uaddr, length);
2358 }
2359
2360 static int
2361 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
2362 {
2363 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
2364 proc_t pt;
2365 int forself;
2366 int error;
2367 vnode_t tvp;
2368 off_t toff;
2369 unsigned char cdhash[SHA1_RESULTLEN];
2370 audit_token_t token;
2371 unsigned int upid = 0, uidversion = 0;
2372
2373 forself = error = 0;
2374
2375 if (pid == 0) {
2376 pid = proc_selfpid();
2377 }
2378 if (pid == proc_selfpid()) {
2379 forself = 1;
2380 }
2381
2382
2383 switch (ops) {
2384 case CS_OPS_STATUS:
2385 case CS_OPS_CDHASH:
2386 case CS_OPS_PIDOFFSET:
2387 case CS_OPS_ENTITLEMENTS_BLOB:
2388 case CS_OPS_IDENTITY:
2389 case CS_OPS_BLOB:
2390 case CS_OPS_TEAMID:
2391 case CS_OPS_CLEAR_LV:
2392 break; /* not restricted to root */
2393 default:
2394 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) {
2395 return EPERM;
2396 }
2397 break;
2398 }
2399
2400 pt = proc_find(pid);
2401 if (pt == PROC_NULL) {
2402 return ESRCH;
2403 }
2404
2405 upid = pt->p_pid;
2406 uidversion = pt->p_idversion;
2407 if (uaudittoken != USER_ADDR_NULL) {
2408 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
2409 if (error != 0) {
2410 goto out;
2411 }
2412 /* verify the audit token pid/idversion matches with proc */
2413 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
2414 error = ESRCH;
2415 goto out;
2416 }
2417 }
2418
2419 #if CONFIG_MACF
2420 switch (ops) {
2421 case CS_OPS_MARKINVALID:
2422 case CS_OPS_MARKHARD:
2423 case CS_OPS_MARKKILL:
2424 case CS_OPS_MARKRESTRICT:
2425 case CS_OPS_SET_STATUS:
2426 case CS_OPS_CLEARINSTALLER:
2427 case CS_OPS_CLEARPLATFORM:
2428 case CS_OPS_CLEAR_LV:
2429 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) {
2430 goto out;
2431 }
2432 break;
2433 default:
2434 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops))) {
2435 goto out;
2436 }
2437 }
2438 #endif
2439
2440 switch (ops) {
2441 case CS_OPS_STATUS: {
2442 uint32_t retflags;
2443
2444 proc_lock(pt);
2445 retflags = pt->p_csflags;
2446 if (cs_process_enforcement(pt)) {
2447 retflags |= CS_ENFORCEMENT;
2448 }
2449 if (csproc_get_platform_binary(pt)) {
2450 retflags |= CS_PLATFORM_BINARY;
2451 }
2452 if (csproc_get_platform_path(pt)) {
2453 retflags |= CS_PLATFORM_PATH;
2454 }
2455 //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
2456 if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) {
2457 retflags &= (~CS_REQUIRE_LV);
2458 }
2459 proc_unlock(pt);
2460
2461 if (uaddr != USER_ADDR_NULL) {
2462 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2463 }
2464 break;
2465 }
2466 case CS_OPS_MARKINVALID:
2467 proc_lock(pt);
2468 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2469 pt->p_csflags &= ~CS_VALID; /* set invalid */
2470 cs_process_invalidated(pt);
2471 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2472 pt->p_csflags |= CS_KILLED;
2473 proc_unlock(pt);
2474 if (cs_debug) {
2475 printf("CODE SIGNING: marked invalid by pid %d: "
2476 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2477 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2478 }
2479 psignal(pt, SIGKILL);
2480 } else {
2481 proc_unlock(pt);
2482 }
2483 } else {
2484 proc_unlock(pt);
2485 }
2486
2487 break;
2488
2489 case CS_OPS_MARKHARD:
2490 proc_lock(pt);
2491 pt->p_csflags |= CS_HARD;
2492 if ((pt->p_csflags & CS_VALID) == 0) {
2493 /* @@@ allow? reject? kill? @@@ */
2494 proc_unlock(pt);
2495 error = EINVAL;
2496 goto out;
2497 } else {
2498 proc_unlock(pt);
2499 }
2500 break;
2501
2502 case CS_OPS_MARKKILL:
2503 proc_lock(pt);
2504 pt->p_csflags |= CS_KILL;
2505 if ((pt->p_csflags & CS_VALID) == 0) {
2506 proc_unlock(pt);
2507 psignal(pt, SIGKILL);
2508 } else {
2509 proc_unlock(pt);
2510 }
2511 break;
2512
2513 case CS_OPS_PIDOFFSET:
2514 toff = pt->p_textoff;
2515 proc_rele(pt);
2516 error = copyout(&toff, uaddr, sizeof(toff));
2517 return error;
2518
2519 case CS_OPS_CDHASH:
2520
2521 /* pt already holds a reference on its p_textvp */
2522 tvp = pt->p_textvp;
2523 toff = pt->p_textoff;
2524
2525 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2526 proc_rele(pt);
2527 return EINVAL;
2528 }
2529
2530 error = vn_getcdhash(tvp, toff, cdhash);
2531 proc_rele(pt);
2532
2533 if (error == 0) {
2534 error = copyout(cdhash, uaddr, sizeof(cdhash));
2535 }
2536
2537 return error;
2538
2539 case CS_OPS_ENTITLEMENTS_BLOB: {
2540 void *start;
2541 size_t length;
2542
2543 proc_lock(pt);
2544
2545 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2546 proc_unlock(pt);
2547 error = EINVAL;
2548 break;
2549 }
2550
2551 error = cs_entitlements_blob_get(pt, &start, &length);
2552 proc_unlock(pt);
2553 if (error) {
2554 break;
2555 }
2556
2557 error = csops_copy_token(start, length, usize, uaddr);
2558 break;
2559 }
2560 case CS_OPS_MARKRESTRICT:
2561 proc_lock(pt);
2562 pt->p_csflags |= CS_RESTRICT;
2563 proc_unlock(pt);
2564 break;
2565
2566 case CS_OPS_SET_STATUS: {
2567 uint32_t flags;
2568
2569 if (usize < sizeof(flags)) {
2570 error = ERANGE;
2571 break;
2572 }
2573
2574 error = copyin(uaddr, &flags, sizeof(flags));
2575 if (error) {
2576 break;
2577 }
2578
2579 /* only allow setting a subset of all code sign flags */
2580 flags &=
2581 CS_HARD | CS_EXEC_SET_HARD |
2582 CS_KILL | CS_EXEC_SET_KILL |
2583 CS_RESTRICT |
2584 CS_REQUIRE_LV |
2585 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2586
2587 proc_lock(pt);
2588 if (pt->p_csflags & CS_VALID) {
2589 if ((flags & CS_ENFORCEMENT) &&
2590 !(pt->p_csflags & CS_ENFORCEMENT)) {
2591 vm_map_cs_enforcement_set(get_task_map(pt->task), TRUE);
2592 }
2593 pt->p_csflags |= flags;
2594 } else {
2595 error = EINVAL;
2596 }
2597 proc_unlock(pt);
2598
2599 break;
2600 }
2601 case CS_OPS_CLEAR_LV: {
2602 /*
2603 * This option is used to remove library validation from
2604 * a running process. This is used in plugin architectures
2605 * when a program needs to load untrusted libraries. This
2606 * allows the process to maintain library validation as
2607 * long as possible, then drop it only when required.
2608 * Once a process has loaded the untrusted library,
2609 * relying on library validation in the future will
2610 * not be effective. An alternative is to re-exec
2611 * your application without library validation, or
2612 * fork an untrusted child.
2613 */
2614 #if !defined(XNU_TARGET_OS_OSX)
2615 // We only support dropping library validation on macOS
2616 error = ENOTSUP;
2617 #else
2618 /*
2619 * if we have the flag set, and the caller wants
2620 * to remove it, and they're entitled to, then
2621 * we remove it from the csflags
2622 *
2623 * NOTE: We are fine to poke into the task because
2624 * we get a ref to pt when we do the proc_find
2625 * at the beginning of this function.
2626 *
2627 * We also only allow altering ourselves.
2628 */
2629 if (forself == 1 && IOTaskHasEntitlement(pt->task, CLEAR_LV_ENTITLEMENT)) {
2630 proc_lock(pt);
2631 pt->p_csflags &= (~(CS_REQUIRE_LV | CS_FORCED_LV));
2632 proc_unlock(pt);
2633 error = 0;
2634 } else {
2635 error = EPERM;
2636 }
2637 #endif
2638 break;
2639 }
2640 case CS_OPS_BLOB: {
2641 void *start;
2642 size_t length;
2643
2644 proc_lock(pt);
2645 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2646 proc_unlock(pt);
2647 error = EINVAL;
2648 break;
2649 }
2650
2651 error = cs_blob_get(pt, &start, &length);
2652 proc_unlock(pt);
2653 if (error) {
2654 break;
2655 }
2656
2657 error = csops_copy_token(start, length, usize, uaddr);
2658 break;
2659 }
2660 case CS_OPS_IDENTITY:
2661 case CS_OPS_TEAMID: {
2662 const char *identity;
2663 uint8_t fakeheader[8];
2664 uint32_t idlen;
2665 size_t length;
2666
2667 /*
2668 * Make identity have a blob header to make it
2669 * easier on userland to guess the identity
2670 * length.
2671 */
2672 if (usize < sizeof(fakeheader)) {
2673 error = ERANGE;
2674 break;
2675 }
2676 memset(fakeheader, 0, sizeof(fakeheader));
2677
2678 proc_lock(pt);
2679 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2680 proc_unlock(pt);
2681 error = EINVAL;
2682 break;
2683 }
2684
2685 identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt);
2686 proc_unlock(pt);
2687 if (identity == NULL) {
2688 error = ENOENT;
2689 break;
2690 }
2691
2692 length = strlen(identity) + 1; /* include NUL */
2693 idlen = htonl((uint32_t)(length + sizeof(fakeheader)));
2694 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2695
2696 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2697 if (error) {
2698 break;
2699 }
2700
2701 if (usize < sizeof(fakeheader) + length) {
2702 error = ERANGE;
2703 } else if (usize > sizeof(fakeheader)) {
2704 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2705 }
2706
2707 break;
2708 }
2709
2710 case CS_OPS_CLEARINSTALLER:
2711 proc_lock(pt);
2712 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2713 proc_unlock(pt);
2714 break;
2715
2716 case CS_OPS_CLEARPLATFORM:
2717 #if DEVELOPMENT || DEBUG
2718 if (cs_process_global_enforcement()) {
2719 error = ENOTSUP;
2720 break;
2721 }
2722
2723 #if CONFIG_CSR
2724 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2725 error = ENOTSUP;
2726 break;
2727 }
2728 #endif
2729
2730 proc_lock(pt);
2731 pt->p_csflags &= ~(CS_PLATFORM_BINARY | CS_PLATFORM_PATH);
2732 csproc_clear_platform_binary(pt);
2733 proc_unlock(pt);
2734 break;
2735 #else
2736 error = ENOTSUP;
2737 break;
2738 #endif /* !DEVELOPMENT || DEBUG */
2739
2740 default:
2741 error = EINVAL;
2742 break;
2743 }
2744 out:
2745 proc_rele(pt);
2746 return error;
2747 }
2748
2749 void
2750 proc_iterate(
2751 unsigned int flags,
2752 proc_iterate_fn_t callout,
2753 void *arg,
2754 proc_iterate_fn_t filterfn,
2755 void *filterarg)
2756 {
2757 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2758 u_int pid_count_available = 0;
2759
2760 assert(callout != NULL);
2761
2762 /* allocate outside of the proc_list_lock */
2763 for (;;) {
2764 proc_list_lock();
2765 pid_count_available = nprocs + 1; /* kernel_task not counted in nprocs */
2766 assert(pid_count_available > 0);
2767 if (pidlist_nalloc(pl) > pid_count_available) {
2768 break;
2769 }
2770 proc_list_unlock();
2771
2772 pidlist_alloc(pl, pid_count_available);
2773 }
2774 pidlist_set_active(pl);
2775
2776 /* filter pids into the pid_list */
2777
2778 u_int pid_count = 0;
2779 if (flags & PROC_ALLPROCLIST) {
2780 proc_t p;
2781 ALLPROC_FOREACH(p) {
2782 /* ignore processes that are being forked */
2783 if (p->p_stat == SIDL) {
2784 continue;
2785 }
2786 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2787 continue;
2788 }
2789 pidlist_add_pid(pl, proc_pid(p));
2790 if (++pid_count >= pid_count_available) {
2791 break;
2792 }
2793 }
2794 }
2795
2796 if ((pid_count < pid_count_available) &&
2797 (flags & PROC_ZOMBPROCLIST)) {
2798 proc_t p;
2799 ZOMBPROC_FOREACH(p) {
2800 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2801 continue;
2802 }
2803 pidlist_add_pid(pl, proc_pid(p));
2804 if (++pid_count >= pid_count_available) {
2805 break;
2806 }
2807 }
2808 }
2809
2810 proc_list_unlock();
2811
2812 /* call callout on processes in the pid_list */
2813
2814 const pidlist_entry_t *pe;
2815 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2816 for (u_int i = 0; i < pe->pe_nused; i++) {
2817 const pid_t pid = pe->pe_pid[i];
2818 proc_t p = proc_find(pid);
2819 if (p) {
2820 if ((flags & PROC_NOWAITTRANS) == 0) {
2821 proc_transwait(p, 0);
2822 }
2823 const int callout_ret = callout(p, arg);
2824
2825 switch (callout_ret) {
2826 case PROC_RETURNED_DONE:
2827 proc_rele(p);
2828 OS_FALLTHROUGH;
2829 case PROC_CLAIMED_DONE:
2830 goto out;
2831
2832 case PROC_RETURNED:
2833 proc_rele(p);
2834 OS_FALLTHROUGH;
2835 case PROC_CLAIMED:
2836 break;
2837 default:
2838 panic("%s: callout =%d for pid %d",
2839 __func__, callout_ret, pid);
2840 break;
2841 }
2842 } else if (flags & PROC_ZOMBPROCLIST) {
2843 p = proc_find_zombref(pid);
2844 if (!p) {
2845 continue;
2846 }
2847 const int callout_ret = callout(p, arg);
2848
2849 switch (callout_ret) {
2850 case PROC_RETURNED_DONE:
2851 proc_drop_zombref(p);
2852 OS_FALLTHROUGH;
2853 case PROC_CLAIMED_DONE:
2854 goto out;
2855
2856 case PROC_RETURNED:
2857 proc_drop_zombref(p);
2858 OS_FALLTHROUGH;
2859 case PROC_CLAIMED:
2860 break;
2861 default:
2862 panic("%s: callout =%d for zombie %d",
2863 __func__, callout_ret, pid);
2864 break;
2865 }
2866 }
2867 }
2868 }
2869 out:
2870 pidlist_free(pl);
2871 }
2872
2873 void
2874 proc_rebootscan(
2875 proc_iterate_fn_t callout,
2876 void *arg,
2877 proc_iterate_fn_t filterfn,
2878 void *filterarg)
2879 {
2880 proc_t p;
2881
2882 assert(callout != NULL);
2883
2884 proc_shutdown_exitcount = 0;
2885
2886 restart_foreach:
2887
2888 proc_list_lock();
2889
2890 ALLPROC_FOREACH(p) {
2891 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2892 continue;
2893 }
2894 p = proc_ref_locked(p);
2895 if (!p) {
2896 continue;
2897 }
2898
2899 proc_list_unlock();
2900
2901 proc_transwait(p, 0);
2902 (void)callout(p, arg);
2903 proc_rele(p);
2904
2905 goto restart_foreach;
2906 }
2907
2908 proc_list_unlock();
2909 }
2910
2911 void
2912 proc_childrenwalk(
2913 proc_t parent,
2914 proc_iterate_fn_t callout,
2915 void *arg)
2916 {
2917 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2918 u_int pid_count_available = 0;
2919
2920 assert(parent != NULL);
2921 assert(callout != NULL);
2922
2923 for (;;) {
2924 proc_list_lock();
2925 pid_count_available = parent->p_childrencnt;
2926 if (pid_count_available == 0) {
2927 proc_list_unlock();
2928 goto out;
2929 }
2930 if (pidlist_nalloc(pl) > pid_count_available) {
2931 break;
2932 }
2933 proc_list_unlock();
2934
2935 pidlist_alloc(pl, pid_count_available);
2936 }
2937 pidlist_set_active(pl);
2938
2939 u_int pid_count = 0;
2940 proc_t p;
2941 PCHILDREN_FOREACH(parent, p) {
2942 if (p->p_stat == SIDL) {
2943 continue;
2944 }
2945 pidlist_add_pid(pl, proc_pid(p));
2946 if (++pid_count >= pid_count_available) {
2947 break;
2948 }
2949 }
2950
2951 proc_list_unlock();
2952
2953 const pidlist_entry_t *pe;
2954 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2955 for (u_int i = 0; i < pe->pe_nused; i++) {
2956 const pid_t pid = pe->pe_pid[i];
2957 p = proc_find(pid);
2958 if (!p) {
2959 continue;
2960 }
2961 const int callout_ret = callout(p, arg);
2962
2963 switch (callout_ret) {
2964 case PROC_RETURNED_DONE:
2965 proc_rele(p);
2966 OS_FALLTHROUGH;
2967 case PROC_CLAIMED_DONE:
2968 goto out;
2969
2970 case PROC_RETURNED:
2971 proc_rele(p);
2972 OS_FALLTHROUGH;
2973 case PROC_CLAIMED:
2974 break;
2975 default:
2976 panic("%s: callout =%d for pid %d",
2977 __func__, callout_ret, pid);
2978 break;
2979 }
2980 }
2981 }
2982 out:
2983 pidlist_free(pl);
2984 }
2985
2986 void
2987 pgrp_iterate(
2988 struct pgrp *pgrp,
2989 unsigned int flags,
2990 proc_iterate_fn_t callout,
2991 void * arg,
2992 proc_iterate_fn_t filterfn,
2993 void * filterarg)
2994 {
2995 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2996 u_int pid_count_available = 0;
2997
2998 assert(pgrp != NULL);
2999 assert(callout != NULL);
3000
3001 for (;;) {
3002 pgrp_lock(pgrp);
3003 pid_count_available = pgrp->pg_membercnt;
3004 if (pid_count_available == 0) {
3005 pgrp_unlock(pgrp);
3006 if (flags & PGRP_DROPREF) {
3007 pg_rele(pgrp);
3008 }
3009 goto out;
3010 }
3011 if (pidlist_nalloc(pl) > pid_count_available) {
3012 break;
3013 }
3014 pgrp_unlock(pgrp);
3015
3016 pidlist_alloc(pl, pid_count_available);
3017 }
3018 pidlist_set_active(pl);
3019
3020 const pid_t pgid = pgrp->pg_id;
3021 u_int pid_count = 0;
3022 proc_t p;
3023 PGMEMBERS_FOREACH(pgrp, p) {
3024 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
3025 continue;;
3026 }
3027 pidlist_add_pid(pl, proc_pid(p));
3028 if (++pid_count >= pid_count_available) {
3029 break;
3030 }
3031 }
3032
3033 pgrp_unlock(pgrp);
3034
3035 if (flags & PGRP_DROPREF) {
3036 pg_rele(pgrp);
3037 }
3038
3039 const pidlist_entry_t *pe;
3040 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
3041 for (u_int i = 0; i < pe->pe_nused; i++) {
3042 const pid_t pid = pe->pe_pid[i];
3043 if (0 == pid) {
3044 continue; /* skip kernproc */
3045 }
3046 p = proc_find(pid);
3047 if (!p) {
3048 continue;
3049 }
3050 if (p->p_pgrpid != pgid) {
3051 proc_rele(p);
3052 continue;
3053 }
3054 const int callout_ret = callout(p, arg);
3055
3056 switch (callout_ret) {
3057 case PROC_RETURNED:
3058 proc_rele(p);
3059 OS_FALLTHROUGH;
3060 case PROC_CLAIMED:
3061 break;
3062 case PROC_RETURNED_DONE:
3063 proc_rele(p);
3064 OS_FALLTHROUGH;
3065 case PROC_CLAIMED_DONE:
3066 goto out;
3067
3068 default:
3069 panic("%s: callout =%d for pid %d",
3070 __func__, callout_ret, pid);
3071 }
3072 }
3073 }
3074
3075 out:
3076 pidlist_free(pl);
3077 }
3078
3079 static void
3080 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
3081 {
3082 proc_list_lock();
3083 child->p_pgrp = pgrp;
3084 child->p_pgrpid = pgrp->pg_id;
3085 child->p_sessionid = pgrp->pg_session->s_sid;
3086 child->p_listflag |= P_LIST_INPGRP;
3087 /*
3088 * When pgrp is being freed , a process can still
3089 * request addition using setpgid from bash when
3090 * login is terminated (login cycler) return ESRCH
3091 * Safe to hold lock due to refcount on pgrp
3092 */
3093 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
3094 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3095 }
3096
3097 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
3098 panic("pgrp_add : pgrp is dead adding process");
3099 }
3100 proc_list_unlock();
3101
3102 pgrp_lock(pgrp);
3103 pgrp->pg_membercnt++;
3104 if (parent != PROC_NULL) {
3105 LIST_INSERT_AFTER(parent, child, p_pglist);
3106 } else {
3107 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
3108 }
3109 pgrp_unlock(pgrp);
3110
3111 proc_list_lock();
3112 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
3113 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3114 }
3115 proc_list_unlock();
3116 }
3117
3118 static void
3119 pgrp_remove(struct proc * p)
3120 {
3121 struct pgrp * pg;
3122
3123 pg = proc_pgrp(p);
3124
3125 proc_list_lock();
3126 #if __PROC_INTERNAL_DEBUG
3127 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
3128 panic("removing from pglist but no named ref\n");
3129 }
3130 #endif
3131 p->p_pgrpid = PGRPID_DEAD;
3132 p->p_listflag &= ~P_LIST_INPGRP;
3133 p->p_pgrp = NULL;
3134 proc_list_unlock();
3135
3136 if (pg == PGRP_NULL) {
3137 panic("pgrp_remove: pg is NULL");
3138 }
3139 pgrp_lock(pg);
3140 pg->pg_membercnt--;
3141
3142 if (pg->pg_membercnt < 0) {
3143 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", pg, p);
3144 }
3145
3146 LIST_REMOVE(p, p_pglist);
3147 if (pg->pg_members.lh_first == 0) {
3148 pgrp_unlock(pg);
3149 pgdelete_dropref(pg);
3150 } else {
3151 pgrp_unlock(pg);
3152 pg_rele(pg);
3153 }
3154 }
3155
3156
3157 /* cannot use proc_pgrp as it maybe stalled */
3158 static void
3159 pgrp_replace(struct proc * p, struct pgrp * newpg)
3160 {
3161 struct pgrp * oldpg;
3162
3163
3164
3165 proc_list_lock();
3166
3167 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3168 p->p_listflag |= P_LIST_PGRPTRWAIT;
3169 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3170 }
3171
3172 p->p_listflag |= P_LIST_PGRPTRANS;
3173
3174 oldpg = p->p_pgrp;
3175 if (oldpg == PGRP_NULL) {
3176 panic("pgrp_replace: oldpg NULL");
3177 }
3178 oldpg->pg_refcount++;
3179 #if __PROC_INTERNAL_DEBUG
3180 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
3181 panic("removing from pglist but no named ref\n");
3182 }
3183 #endif
3184 p->p_pgrpid = PGRPID_DEAD;
3185 p->p_listflag &= ~P_LIST_INPGRP;
3186 p->p_pgrp = NULL;
3187
3188 proc_list_unlock();
3189
3190 pgrp_lock(oldpg);
3191 oldpg->pg_membercnt--;
3192 if (oldpg->pg_membercnt < 0) {
3193 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", oldpg, p);
3194 }
3195 LIST_REMOVE(p, p_pglist);
3196 if (oldpg->pg_members.lh_first == 0) {
3197 pgrp_unlock(oldpg);
3198 pgdelete_dropref(oldpg);
3199 } else {
3200 pgrp_unlock(oldpg);
3201 pg_rele(oldpg);
3202 }
3203
3204 proc_list_lock();
3205 p->p_pgrp = newpg;
3206 p->p_pgrpid = newpg->pg_id;
3207 p->p_sessionid = newpg->pg_session->s_sid;
3208 p->p_listflag |= P_LIST_INPGRP;
3209 /*
3210 * When pgrp is being freed , a process can still
3211 * request addition using setpgid from bash when
3212 * login is terminated (login cycler) return ESRCH
3213 * Safe to hold lock due to refcount on pgrp
3214 */
3215 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
3216 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3217 }
3218
3219 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
3220 panic("pgrp_add : pgrp is dead adding process");
3221 }
3222 proc_list_unlock();
3223
3224 pgrp_lock(newpg);
3225 newpg->pg_membercnt++;
3226 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
3227 pgrp_unlock(newpg);
3228
3229 proc_list_lock();
3230 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
3231 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3232 }
3233
3234 p->p_listflag &= ~P_LIST_PGRPTRANS;
3235 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
3236 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
3237 wakeup(&p->p_pgrpid);
3238 }
3239 proc_list_unlock();
3240 }
3241
3242 void
3243 pgrp_lock(struct pgrp * pgrp)
3244 {
3245 lck_mtx_lock(&pgrp->pg_mlock);
3246 }
3247
3248 void
3249 pgrp_unlock(struct pgrp * pgrp)
3250 {
3251 lck_mtx_unlock(&pgrp->pg_mlock);
3252 }
3253
3254 void
3255 session_lock(struct session * sess)
3256 {
3257 lck_mtx_lock(&sess->s_mlock);
3258 }
3259
3260
3261 void
3262 session_unlock(struct session * sess)
3263 {
3264 lck_mtx_unlock(&sess->s_mlock);
3265 }
3266
3267 struct pgrp *
3268 proc_pgrp(proc_t p)
3269 {
3270 struct pgrp * pgrp;
3271
3272 if (p == PROC_NULL) {
3273 return PGRP_NULL;
3274 }
3275 proc_list_lock();
3276
3277 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3278 p->p_listflag |= P_LIST_PGRPTRWAIT;
3279 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3280 }
3281
3282 pgrp = p->p_pgrp;
3283
3284 assert(pgrp != NULL);
3285
3286 if (pgrp != PGRP_NULL) {
3287 pgrp->pg_refcount++;
3288 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0) {
3289 panic("proc_pgrp: ref being povided for dead pgrp");
3290 }
3291 }
3292
3293 proc_list_unlock();
3294
3295 return pgrp;
3296 }
3297
3298 struct pgrp *
3299 tty_pgrp(struct tty * tp)
3300 {
3301 struct pgrp * pg = PGRP_NULL;
3302
3303 proc_list_lock();
3304 pg = tp->t_pgrp;
3305
3306 if (pg != PGRP_NULL) {
3307 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0) {
3308 panic("tty_pgrp: ref being povided for dead pgrp");
3309 }
3310 pg->pg_refcount++;
3311 }
3312 proc_list_unlock();
3313
3314 return pg;
3315 }
3316
3317 struct session *
3318 proc_session(proc_t p)
3319 {
3320 struct session * sess = SESSION_NULL;
3321
3322 if (p == PROC_NULL) {
3323 return SESSION_NULL;
3324 }
3325
3326 proc_list_lock();
3327
3328 /* wait during transitions */
3329 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3330 p->p_listflag |= P_LIST_PGRPTRWAIT;
3331 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3332 }
3333
3334 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
3335 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3336 panic("proc_session:returning sesssion ref on terminating session");
3337 }
3338 sess->s_count++;
3339 }
3340 proc_list_unlock();
3341 return sess;
3342 }
3343
3344 void
3345 session_rele(struct session *sess)
3346 {
3347 proc_list_lock();
3348 if (--sess->s_count == 0) {
3349 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3350 panic("session_rele: terminating already terminated session");
3351 }
3352 sess->s_listflags |= S_LIST_TERM;
3353 LIST_REMOVE(sess, s_hash);
3354 sess->s_listflags |= S_LIST_DEAD;
3355 if (sess->s_count != 0) {
3356 panic("session_rele: freeing session in use");
3357 }
3358 proc_list_unlock();
3359 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
3360 zfree(session_zone, sess);
3361 } else {
3362 proc_list_unlock();
3363 }
3364 }
3365
3366 int
3367 proc_transstart(proc_t p, int locked, int non_blocking)
3368 {
3369 if (locked == 0) {
3370 proc_lock(p);
3371 }
3372 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3373 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
3374 if (locked == 0) {
3375 proc_unlock(p);
3376 }
3377 return EDEADLK;
3378 }
3379 p->p_lflag |= P_LTRANSWAIT;
3380 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3381 }
3382 p->p_lflag |= P_LINTRANSIT;
3383 p->p_transholder = current_thread();
3384 if (locked == 0) {
3385 proc_unlock(p);
3386 }
3387 return 0;
3388 }
3389
3390 void
3391 proc_transcommit(proc_t p, int locked)
3392 {
3393 if (locked == 0) {
3394 proc_lock(p);
3395 }
3396
3397 assert((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
3398 assert(p->p_transholder == current_thread());
3399 p->p_lflag |= P_LTRANSCOMMIT;
3400
3401 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3402 p->p_lflag &= ~P_LTRANSWAIT;
3403 wakeup(&p->p_lflag);
3404 }
3405 if (locked == 0) {
3406 proc_unlock(p);
3407 }
3408 }
3409
3410 void
3411 proc_transend(proc_t p, int locked)
3412 {
3413 if (locked == 0) {
3414 proc_lock(p);
3415 }
3416
3417 p->p_lflag &= ~(P_LINTRANSIT | P_LTRANSCOMMIT);
3418 p->p_transholder = NULL;
3419
3420 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3421 p->p_lflag &= ~P_LTRANSWAIT;
3422 wakeup(&p->p_lflag);
3423 }
3424 if (locked == 0) {
3425 proc_unlock(p);
3426 }
3427 }
3428
3429 int
3430 proc_transwait(proc_t p, int locked)
3431 {
3432 if (locked == 0) {
3433 proc_lock(p);
3434 }
3435 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3436 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
3437 if (locked == 0) {
3438 proc_unlock(p);
3439 }
3440 return EDEADLK;
3441 }
3442 p->p_lflag |= P_LTRANSWAIT;
3443 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3444 }
3445 if (locked == 0) {
3446 proc_unlock(p);
3447 }
3448 return 0;
3449 }
3450
3451 void
3452 proc_klist_lock(void)
3453 {
3454 lck_mtx_lock(proc_klist_mlock);
3455 }
3456
3457 void
3458 proc_klist_unlock(void)
3459 {
3460 lck_mtx_unlock(proc_klist_mlock);
3461 }
3462
3463 void
3464 proc_knote(struct proc * p, long hint)
3465 {
3466 proc_klist_lock();
3467 KNOTE(&p->p_klist, hint);
3468 proc_klist_unlock();
3469 }
3470
3471 void
3472 proc_knote_drain(struct proc *p)
3473 {
3474 struct knote *kn = NULL;
3475
3476 /*
3477 * Clear the proc's klist to avoid references after the proc is reaped.
3478 */
3479 proc_klist_lock();
3480 while ((kn = SLIST_FIRST(&p->p_klist))) {
3481 kn->kn_proc = PROC_NULL;
3482 KNOTE_DETACH(&p->p_klist, kn);
3483 }
3484 proc_klist_unlock();
3485 }
3486
3487 void
3488 proc_setregister(proc_t p)
3489 {
3490 proc_lock(p);
3491 p->p_lflag |= P_LREGISTER;
3492 proc_unlock(p);
3493 }
3494
3495 void
3496 proc_resetregister(proc_t p)
3497 {
3498 proc_lock(p);
3499 p->p_lflag &= ~P_LREGISTER;
3500 proc_unlock(p);
3501 }
3502
3503 pid_t
3504 proc_pgrpid(proc_t p)
3505 {
3506 return p->p_pgrpid;
3507 }
3508
3509 pid_t
3510 proc_sessionid(proc_t p)
3511 {
3512 return p->p_sessionid;
3513 }
3514
3515 pid_t
3516 proc_selfpgrpid()
3517 {
3518 return current_proc()->p_pgrpid;
3519 }
3520
3521
3522 /* return control and action states */
3523 int
3524 proc_getpcontrol(int pid, int * pcontrolp)
3525 {
3526 proc_t p;
3527
3528 p = proc_find(pid);
3529 if (p == PROC_NULL) {
3530 return ESRCH;
3531 }
3532 if (pcontrolp != NULL) {
3533 *pcontrolp = p->p_pcaction;
3534 }
3535
3536 proc_rele(p);
3537 return 0;
3538 }
3539
3540 int
3541 proc_dopcontrol(proc_t p)
3542 {
3543 int pcontrol;
3544 os_reason_t kill_reason;
3545
3546 proc_lock(p);
3547
3548 pcontrol = PROC_CONTROL_STATE(p);
3549
3550 if (PROC_ACTION_STATE(p) == 0) {
3551 switch (pcontrol) {
3552 case P_PCTHROTTLE:
3553 PROC_SETACTION_STATE(p);
3554 proc_unlock(p);
3555 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3556 break;
3557
3558 case P_PCSUSP:
3559 PROC_SETACTION_STATE(p);
3560 proc_unlock(p);
3561 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3562 task_suspend(p->task);
3563 break;
3564
3565 case P_PCKILL:
3566 PROC_SETACTION_STATE(p);
3567 proc_unlock(p);
3568 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3569 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3570 psignal_with_reason(p, SIGKILL, kill_reason);
3571 break;
3572
3573 default:
3574 proc_unlock(p);
3575 }
3576 } else {
3577 proc_unlock(p);
3578 }
3579
3580 return PROC_RETURNED;
3581 }
3582
3583
3584 /*
3585 * Resume a throttled or suspended process. This is an internal interface that's only
3586 * used by the user level code that presents the GUI when we run out of swap space and
3587 * hence is restricted to processes with superuser privileges.
3588 */
3589
3590 int
3591 proc_resetpcontrol(int pid)
3592 {
3593 proc_t p;
3594 int pcontrol;
3595 int error;
3596 proc_t self = current_proc();
3597
3598 /* if the process has been validated to handle resource control or root is valid one */
3599 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0))) {
3600 return error;
3601 }
3602
3603 p = proc_find(pid);
3604 if (p == PROC_NULL) {
3605 return ESRCH;
3606 }
3607
3608 proc_lock(p);
3609
3610 pcontrol = PROC_CONTROL_STATE(p);
3611
3612 if (PROC_ACTION_STATE(p) != 0) {
3613 switch (pcontrol) {
3614 case P_PCTHROTTLE:
3615 PROC_RESETACTION_STATE(p);
3616 proc_unlock(p);
3617 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3618 break;
3619
3620 case P_PCSUSP:
3621 PROC_RESETACTION_STATE(p);
3622 proc_unlock(p);
3623 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3624 task_resume(p->task);
3625 break;
3626
3627 case P_PCKILL:
3628 /* Huh? */
3629 PROC_SETACTION_STATE(p);
3630 proc_unlock(p);
3631 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3632 break;
3633
3634 default:
3635 proc_unlock(p);
3636 }
3637 } else {
3638 proc_unlock(p);
3639 }
3640
3641 proc_rele(p);
3642 return 0;
3643 }
3644
3645
3646
3647 struct no_paging_space {
3648 uint64_t pcs_max_size;
3649 uint64_t pcs_uniqueid;
3650 int pcs_pid;
3651 int pcs_proc_count;
3652 uint64_t pcs_total_size;
3653
3654 uint64_t npcs_max_size;
3655 uint64_t npcs_uniqueid;
3656 int npcs_pid;
3657 int npcs_proc_count;
3658 uint64_t npcs_total_size;
3659
3660 int apcs_proc_count;
3661 uint64_t apcs_total_size;
3662 };
3663
3664
3665 static int
3666 proc_pcontrol_filter(proc_t p, void *arg)
3667 {
3668 struct no_paging_space *nps;
3669 uint64_t compressed;
3670
3671 nps = (struct no_paging_space *)arg;
3672
3673 compressed = get_task_compressed(p->task);
3674
3675 if (PROC_CONTROL_STATE(p)) {
3676 if (PROC_ACTION_STATE(p) == 0) {
3677 if (compressed > nps->pcs_max_size) {
3678 nps->pcs_pid = p->p_pid;
3679 nps->pcs_uniqueid = p->p_uniqueid;
3680 nps->pcs_max_size = compressed;
3681 }
3682 nps->pcs_total_size += compressed;
3683 nps->pcs_proc_count++;
3684 } else {
3685 nps->apcs_total_size += compressed;
3686 nps->apcs_proc_count++;
3687 }
3688 } else {
3689 if (compressed > nps->npcs_max_size) {
3690 nps->npcs_pid = p->p_pid;
3691 nps->npcs_uniqueid = p->p_uniqueid;
3692 nps->npcs_max_size = compressed;
3693 }
3694 nps->npcs_total_size += compressed;
3695 nps->npcs_proc_count++;
3696 }
3697 return 0;
3698 }
3699
3700
3701 static int
3702 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3703 {
3704 return PROC_RETURNED;
3705 }
3706
3707
3708 /*
3709 * Deal with the low on compressor pool space condition... this function
3710 * gets called when we are approaching the limits of the compressor pool or
3711 * we are unable to create a new swap file.
3712 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3713 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3714 * There are 2 categories of processes to deal with. Those that have an action
3715 * associated with them by the task itself and those that do not. Actionable
3716 * tasks can have one of three categories specified: ones that
3717 * can be killed immediately, ones that should be suspended, and ones that should
3718 * be throttled. Processes that do not have an action associated with them are normally
3719 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3720 * that only by killing them can we hope to put the system back into a usable state.
3721 */
3722
3723 #define NO_PAGING_SPACE_DEBUG 0
3724
3725 extern uint64_t vm_compressor_pages_compressed(void);
3726
3727 struct timeval last_no_space_action = {.tv_sec = 0, .tv_usec = 0};
3728
3729 #define MB_SIZE (1024 * 1024ULL)
3730 boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
3731
3732 extern int32_t max_kill_priority;
3733 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3734
3735 int
3736 no_paging_space_action()
3737 {
3738 proc_t p;
3739 struct no_paging_space nps;
3740 struct timeval now;
3741 os_reason_t kill_reason;
3742
3743 /*
3744 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3745 */
3746 microtime(&now);
3747
3748 if (now.tv_sec <= last_no_space_action.tv_sec + 5) {
3749 return 0;
3750 }
3751
3752 /*
3753 * Examine all processes and find the biggest (biggest is based on the number of pages this
3754 * task has in the compressor pool) that has been marked to have some action
3755 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3756 * action.
3757 *
3758 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3759 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3760 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3761 */
3762 bzero(&nps, sizeof(nps));
3763
3764 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3765
3766 #if NO_PAGING_SPACE_DEBUG
3767 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3768 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3769 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3770 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3771 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3772 nps.apcs_proc_count, nps.apcs_total_size);
3773 #endif
3774 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3775 /*
3776 * for now we'll knock out any task that has more then 50% of the pages
3777 * held by the compressor
3778 */
3779 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3780 if (nps.npcs_uniqueid == p->p_uniqueid) {
3781 /*
3782 * verify this is still the same process
3783 * in case the proc exited and the pid got reused while
3784 * we were finishing the proc_iterate and getting to this point
3785 */
3786 last_no_space_action = now;
3787
3788 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size / MB_SIZE));
3789 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3790 psignal_with_reason(p, SIGKILL, kill_reason);
3791
3792 proc_rele(p);
3793
3794 return 0;
3795 }
3796
3797 proc_rele(p);
3798 }
3799 }
3800
3801 /*
3802 * We have some processes within our jetsam bands of consideration and hence can be killed.
3803 * So we will invoke the memorystatus thread to go ahead and kill something.
3804 */
3805 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3806 last_no_space_action = now;
3807 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
3808 return 1;
3809 }
3810
3811 /*
3812 * No eligible processes to kill. So let's suspend/kill the largest
3813 * process depending on its policy control specifications.
3814 */
3815
3816 if (nps.pcs_max_size > 0) {
3817 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3818 if (nps.pcs_uniqueid == p->p_uniqueid) {
3819 /*
3820 * verify this is still the same process
3821 * in case the proc exited and the pid got reused while
3822 * we were finishing the proc_iterate and getting to this point
3823 */
3824 last_no_space_action = now;
3825
3826 proc_dopcontrol(p);
3827
3828 proc_rele(p);
3829
3830 return 1;
3831 }
3832
3833 proc_rele(p);
3834 }
3835 }
3836 last_no_space_action = now;
3837
3838 printf("low swap: unable to find any eligible processes to take action on\n");
3839
3840 return 0;
3841 }
3842
3843 int
3844 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3845 {
3846 int ret = 0;
3847 proc_t target_proc = PROC_NULL;
3848 pid_t target_pid = uap->pid;
3849 uint64_t target_uniqueid = uap->uniqueid;
3850 task_t target_task = NULL;
3851
3852 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3853 ret = EPERM;
3854 goto out;
3855 }
3856 target_proc = proc_find(target_pid);
3857 if (target_proc != PROC_NULL) {
3858 if (target_uniqueid != proc_uniqueid(target_proc)) {
3859 ret = ENOENT;
3860 goto out;
3861 }
3862
3863 target_task = proc_task(target_proc);
3864 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3865 ret = EINVAL;
3866 goto out;
3867 }
3868 } else {
3869 ret = ENOENT;
3870 }
3871
3872 out:
3873 if (target_proc != PROC_NULL) {
3874 proc_rele(target_proc);
3875 }
3876 return ret;
3877 }
3878
3879 #if VM_SCAN_FOR_SHADOW_CHAIN
3880 extern int vm_map_shadow_max(vm_map_t map);
3881 int proc_shadow_max(void);
3882 int
3883 proc_shadow_max(void)
3884 {
3885 int retval, max;
3886 proc_t p;
3887 task_t task;
3888 vm_map_t map;
3889
3890 max = 0;
3891 proc_list_lock();
3892 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3893 if (p->p_stat == SIDL) {
3894 continue;
3895 }
3896 task = p->task;
3897 if (task == NULL) {
3898 continue;
3899 }
3900 map = get_task_map(task);
3901 if (map == NULL) {
3902 continue;
3903 }
3904 retval = vm_map_shadow_max(map);
3905 if (retval > max) {
3906 max = retval;
3907 }
3908 }
3909 proc_list_unlock();
3910 return max;
3911 }
3912 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3913
3914 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3915 void
3916 proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3917 {
3918 if (target_proc != NULL) {
3919 target_proc->p_responsible_pid = responsible_pid;
3920 }
3921 return;
3922 }
3923
3924 int
3925 proc_chrooted(proc_t p)
3926 {
3927 int retval = 0;
3928
3929 if (p) {
3930 proc_fdlock(p);
3931 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3932 proc_fdunlock(p);
3933 }
3934
3935 return retval;
3936 }
3937
3938 boolean_t
3939 proc_send_synchronous_EXC_RESOURCE(proc_t p)
3940 {
3941 if (p == PROC_NULL) {
3942 return FALSE;
3943 }
3944
3945 /* Send sync EXC_RESOURCE if the process is traced */
3946 if (ISSET(p->p_lflag, P_LTRACED)) {
3947 return TRUE;
3948 }
3949 return FALSE;
3950 }
3951
3952 #if CONFIG_MACF
3953 size_t
3954 proc_get_syscall_filter_mask_size(int which)
3955 {
3956 switch (which) {
3957 case SYSCALL_MASK_UNIX:
3958 return nsysent;
3959 case SYSCALL_MASK_MACH:
3960 return mach_trap_count;
3961 case SYSCALL_MASK_KOBJ:
3962 return mach_kobj_count;
3963 default:
3964 return 0;
3965 }
3966 }
3967
3968 int
3969 proc_set_syscall_filter_mask(proc_t p, int which, unsigned char *maskptr, size_t masklen)
3970 {
3971 #if DEVELOPMENT || DEBUG
3972 if (syscallfilter_disable) {
3973 printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p));
3974 return 0;
3975 }
3976 #endif // DEVELOPMENT || DEBUG
3977
3978 switch (which) {
3979 case SYSCALL_MASK_UNIX:
3980 if (maskptr != NULL && masklen != nsysent) {
3981 return EINVAL;
3982 }
3983 p->syscall_filter_mask = maskptr;
3984 break;
3985 case SYSCALL_MASK_MACH:
3986 if (maskptr != NULL && masklen != (size_t)mach_trap_count) {
3987 return EINVAL;
3988 }
3989 mac_task_set_mach_filter_mask(p->task, maskptr);
3990 break;
3991 case SYSCALL_MASK_KOBJ:
3992 if (maskptr != NULL && masklen != (size_t)mach_kobj_count) {
3993 return EINVAL;
3994 }
3995 mac_task_set_kobj_filter_mask(p->task, maskptr);
3996 break;
3997 default:
3998 return EINVAL;
3999 }
4000
4001 return 0;
4002 }
4003
4004 int
4005 proc_set_syscall_filter_callbacks(syscall_filter_cbs_t cbs)
4006 {
4007 if (cbs->version != SYSCALL_FILTER_CALLBACK_VERSION) {
4008 return EINVAL;
4009 }
4010
4011 /* XXX register unix filter callback instead of using MACF hook. */
4012
4013 if (cbs->mach_filter_cbfunc || cbs->kobj_filter_cbfunc) {
4014 if (mac_task_register_filter_callbacks(cbs->mach_filter_cbfunc,
4015 cbs->kobj_filter_cbfunc) != 0) {
4016 return EPERM;
4017 }
4018 }
4019
4020 return 0;
4021 }
4022
4023 int
4024 proc_set_syscall_filter_index(int which, int num, int index)
4025 {
4026 switch (which) {
4027 case SYSCALL_MASK_KOBJ:
4028 if (ipc_kobject_set_kobjidx(num, index) != 0) {
4029 return ENOENT;
4030 }
4031 break;
4032 default:
4033 return EINVAL;
4034 }
4035
4036 return 0;
4037 }
4038 #endif /* CONFIG_MACF */
4039
4040 int
4041 proc_set_filter_message_flag(proc_t p, boolean_t flag)
4042 {
4043 if (p == PROC_NULL) {
4044 return EINVAL;
4045 }
4046
4047 task_set_filter_msg_flag(proc_task(p), flag);
4048
4049 return 0;
4050 }
4051
4052 int
4053 proc_get_filter_message_flag(proc_t p, boolean_t *flag)
4054 {
4055 if (p == PROC_NULL || flag == NULL) {
4056 return EINVAL;
4057 }
4058
4059 *flag = task_get_filter_msg_flag(proc_task(p));
4060
4061 return 0;
4062 }
4063
4064 bool
4065 proc_is_traced(proc_t p)
4066 {
4067 bool ret = FALSE;
4068 assert(p != PROC_NULL);
4069 proc_lock(p);
4070 if (p->p_lflag & P_LTRACED) {
4071 ret = TRUE;
4072 }
4073 proc_unlock(p);
4074 return ret;
4075 }
4076
4077 #ifdef CONFIG_32BIT_TELEMETRY
4078 void
4079 proc_log_32bit_telemetry(proc_t p)
4080 {
4081 /* Gather info */
4082 char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
4083 char * signature_cur_end = &signature_buf[0];
4084 char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
4085 int bytes_printed = 0;
4086
4087 const char * teamid = NULL;
4088 const char * identity = NULL;
4089 struct cs_blob * csblob = NULL;
4090
4091 proc_list_lock();
4092
4093 /*
4094 * Get proc name and parent proc name; if the parent execs, we'll get a
4095 * garbled name.
4096 */
4097 bytes_printed = scnprintf(signature_cur_end,
4098 signature_buf_end - signature_cur_end,
4099 "%s,%s,", p->p_name,
4100 (p->p_pptr ? p->p_pptr->p_name : ""));
4101
4102 if (bytes_printed > 0) {
4103 signature_cur_end += bytes_printed;
4104 }
4105
4106 proc_list_unlock();
4107
4108 /* Get developer info. */
4109 vnode_t v = proc_getexecutablevnode(p);
4110
4111 if (v) {
4112 csblob = csvnode_get_blob(v, 0);
4113
4114 if (csblob) {
4115 teamid = csblob_get_teamid(csblob);
4116 identity = csblob_get_identity(csblob);
4117 }
4118 }
4119
4120 if (teamid == NULL) {
4121 teamid = "";
4122 }
4123
4124 if (identity == NULL) {
4125 identity = "";
4126 }
4127
4128 bytes_printed = scnprintf(signature_cur_end,
4129 signature_buf_end - signature_cur_end,
4130 "%s,%s", teamid, identity);
4131
4132 if (bytes_printed > 0) {
4133 signature_cur_end += bytes_printed;
4134 }
4135
4136 if (v) {
4137 vnode_put(v);
4138 }
4139
4140 /*
4141 * We may want to rate limit here, although the SUMMARIZE key should
4142 * help us aggregate events in userspace.
4143 */
4144
4145 /* Emit log */
4146 kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
4147 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
4148 /* 1 */ "com.apple.message.signature", signature_buf,
4149 /* 2 */ "com.apple.message.summarize", "YES",
4150 NULL);
4151 }
4152 #endif /* CONFIG_32BIT_TELEMETRY */