]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
c074ae1c628536b1b37ca478f1d26ce8ff275c56
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114 #include <sys/sysent.h>
115 #include <sys/reason.h>
116 #include <IOKit/IOBSD.h> /* IOTaskHasEntitlement() */
117
118 #ifdef CONFIG_32BIT_TELEMETRY
119 #include <sys/kasl.h>
120 #endif /* CONFIG_32BIT_TELEMETRY */
121
122 #if CONFIG_CSR
123 #include <sys/csr.h>
124 #endif
125
126 #if CONFIG_MEMORYSTATUS
127 #include <sys/kern_memorystatus.h>
128 #endif
129
130 #if CONFIG_MACF
131 #include <security/mac_framework.h>
132 #endif
133
134 #include <libkern/crypto/sha1.h>
135
136 #ifdef CONFIG_32BIT_TELEMETRY
137 #define MAX_32BIT_EXEC_SIG_SIZE 160
138 #endif /* CONFIG_32BIT_TELEMETRY */
139
140 /*
141 * Structure associated with user cacheing.
142 */
143 struct uidinfo {
144 LIST_ENTRY(uidinfo) ui_hash;
145 uid_t ui_uid;
146 long ui_proccnt;
147 };
148 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
149 LIST_HEAD(uihashhead, uidinfo) * uihashtbl;
150 u_long uihash; /* size of hash table - 1 */
151
152 /*
153 * Other process lists
154 */
155 struct pidhashhead *pidhashtbl;
156 u_long pidhash;
157 struct pgrphashhead *pgrphashtbl;
158 u_long pgrphash;
159 struct sesshashhead *sesshashtbl;
160 u_long sesshash;
161
162 struct proclist allproc;
163 struct proclist zombproc;
164 extern struct tty cons;
165
166 extern int cs_debug;
167
168 #if DEVELOPMENT || DEBUG
169 int syscallfilter_disable = 0;
170 #endif // DEVELOPMENT || DEBUG
171
172 #if DEBUG
173 #define __PROC_INTERNAL_DEBUG 1
174 #endif
175 #if CONFIG_COREDUMP
176 /* Name to give to core files */
177 #if defined(XNU_TARGET_OS_BRIDGE)
178 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/internal/%N.core"};
179 #elif CONFIG_EMBEDDED
180 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/cores/%N.core"};
181 #else
182 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/cores/core.%P"};
183 #endif
184 #endif
185
186 #if PROC_REF_DEBUG
187 #include <kern/backtrace.h>
188 #endif
189
190 typedef uint64_t unaligned_u64 __attribute__((aligned(1)));
191
192 static void orphanpg(struct pgrp * pg);
193 void proc_name_kdp(task_t t, char * buf, int size);
194 boolean_t proc_binary_uuid_kdp(task_t task, uuid_t uuid);
195 int proc_threadname_kdp(void * uth, char * buf, size_t size);
196 void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime);
197 char * proc_name_address(void * p);
198
199 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
200 static void pgrp_remove(proc_t p);
201 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
202 static void pgdelete_dropref(struct pgrp *pgrp);
203 extern void pg_rele_dropref(struct pgrp * pgrp);
204 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
205 static boolean_t proc_parent_is_currentproc(proc_t p);
206
207 struct fixjob_iterargs {
208 struct pgrp * pg;
209 struct session * mysession;
210 int entering;
211 };
212
213 int fixjob_callback(proc_t, void *);
214
215 uint64_t
216 get_current_unique_pid(void)
217 {
218 proc_t p = current_proc();
219
220 if (p) {
221 return p->p_uniqueid;
222 } else {
223 return 0;
224 }
225 }
226
227 /*
228 * Initialize global process hashing structures.
229 */
230 void
231 procinit(void)
232 {
233 LIST_INIT(&allproc);
234 LIST_INIT(&zombproc);
235 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
236 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
237 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
238 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
239 #if CONFIG_PERSONAS
240 personas_bootstrap();
241 #endif
242 }
243
244 /*
245 * Change the count associated with number of processes
246 * a given user is using. This routine protects the uihash
247 * with the list lock
248 */
249 int
250 chgproccnt(uid_t uid, int diff)
251 {
252 struct uidinfo *uip;
253 struct uidinfo *newuip = NULL;
254 struct uihashhead *uipp;
255 int retval;
256
257 again:
258 proc_list_lock();
259 uipp = UIHASH(uid);
260 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) {
261 if (uip->ui_uid == uid) {
262 break;
263 }
264 }
265 if (uip) {
266 uip->ui_proccnt += diff;
267 if (uip->ui_proccnt > 0) {
268 retval = uip->ui_proccnt;
269 proc_list_unlock();
270 goto out;
271 }
272 if (uip->ui_proccnt < 0) {
273 panic("chgproccnt: procs < 0");
274 }
275 LIST_REMOVE(uip, ui_hash);
276 retval = 0;
277 proc_list_unlock();
278 FREE_ZONE(uip, sizeof(*uip), M_PROC);
279 goto out;
280 }
281 if (diff <= 0) {
282 if (diff == 0) {
283 retval = 0;
284 proc_list_unlock();
285 goto out;
286 }
287 panic("chgproccnt: lost user");
288 }
289 if (newuip != NULL) {
290 uip = newuip;
291 newuip = NULL;
292 LIST_INSERT_HEAD(uipp, uip, ui_hash);
293 uip->ui_uid = uid;
294 uip->ui_proccnt = diff;
295 retval = diff;
296 proc_list_unlock();
297 goto out;
298 }
299 proc_list_unlock();
300 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
301 if (newuip == NULL) {
302 panic("chgproccnt: M_PROC zone depleted");
303 }
304 goto again;
305 out:
306 if (newuip != NULL) {
307 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
308 }
309 return retval;
310 }
311
312 /*
313 * Is p an inferior of the current process?
314 */
315 int
316 inferior(proc_t p)
317 {
318 int retval = 0;
319
320 proc_list_lock();
321 for (; p != current_proc(); p = p->p_pptr) {
322 if (p->p_pid == 0) {
323 goto out;
324 }
325 }
326 retval = 1;
327 out:
328 proc_list_unlock();
329 return retval;
330 }
331
332 /*
333 * Is p an inferior of t ?
334 */
335 int
336 isinferior(proc_t p, proc_t t)
337 {
338 int retval = 0;
339 int nchecked = 0;
340 proc_t start = p;
341
342 /* if p==t they are not inferior */
343 if (p == t) {
344 return 0;
345 }
346
347 proc_list_lock();
348 for (; p != t; p = p->p_pptr) {
349 nchecked++;
350
351 /* Detect here if we're in a cycle */
352 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs)) {
353 goto out;
354 }
355 }
356 retval = 1;
357 out:
358 proc_list_unlock();
359 return retval;
360 }
361
362 int
363 proc_isinferior(int pid1, int pid2)
364 {
365 proc_t p = PROC_NULL;
366 proc_t t = PROC_NULL;
367 int retval = 0;
368
369 if (((p = proc_find(pid1)) != (proc_t)0) && ((t = proc_find(pid2)) != (proc_t)0)) {
370 retval = isinferior(p, t);
371 }
372
373 if (p != PROC_NULL) {
374 proc_rele(p);
375 }
376 if (t != PROC_NULL) {
377 proc_rele(t);
378 }
379
380 return retval;
381 }
382
383 proc_t
384 proc_find(int pid)
385 {
386 return proc_findinternal(pid, 0);
387 }
388
389 proc_t
390 proc_findinternal(int pid, int locked)
391 {
392 proc_t p = PROC_NULL;
393
394 if (locked == 0) {
395 proc_list_lock();
396 }
397
398 p = pfind_locked(pid);
399 if ((p == PROC_NULL) || (p != proc_ref_locked(p))) {
400 p = PROC_NULL;
401 }
402
403 if (locked == 0) {
404 proc_list_unlock();
405 }
406
407 return p;
408 }
409
410 proc_t
411 proc_findthread(thread_t thread)
412 {
413 proc_t p = PROC_NULL;
414 struct uthread *uth;
415
416 proc_list_lock();
417 uth = get_bsdthread_info(thread);
418 if (uth && (uth->uu_flag & UT_VFORK)) {
419 p = uth->uu_proc;
420 } else {
421 p = (proc_t)(get_bsdthreadtask_info(thread));
422 }
423 p = proc_ref_locked(p);
424 proc_list_unlock();
425 return p;
426 }
427
428 void
429 uthread_reset_proc_refcount(void *uthread)
430 {
431 uthread_t uth;
432
433 uth = (uthread_t) uthread;
434 uth->uu_proc_refcount = 0;
435
436 #if PROC_REF_DEBUG
437 if (proc_ref_tracking_disabled) {
438 return;
439 }
440
441 uth->uu_pindex = 0;
442 #endif
443 }
444
445 #if PROC_REF_DEBUG
446 int
447 uthread_get_proc_refcount(void *uthread)
448 {
449 uthread_t uth;
450
451 if (proc_ref_tracking_disabled) {
452 return 0;
453 }
454
455 uth = (uthread_t) uthread;
456
457 return uth->uu_proc_refcount;
458 }
459 #endif
460
461 static void
462 record_procref(proc_t p __unused, int count)
463 {
464 uthread_t uth;
465
466 uth = current_uthread();
467 uth->uu_proc_refcount += count;
468
469 #if PROC_REF_DEBUG
470 if (proc_ref_tracking_disabled) {
471 return;
472 }
473
474 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
475 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex],
476 PROC_REF_STACK_DEPTH, NULL);
477
478 uth->uu_proc_ps[uth->uu_pindex] = p;
479 uth->uu_pindex++;
480 }
481 #endif
482 }
483
484 static boolean_t
485 uthread_needs_to_wait_in_proc_refwait(void)
486 {
487 uthread_t uth = current_uthread();
488
489 /*
490 * Allow threads holding no proc refs to wait
491 * in proc_refwait, allowing threads holding
492 * proc refs to wait in proc_refwait causes
493 * deadlocks and makes proc_find non-reentrant.
494 */
495 if (uth->uu_proc_refcount == 0) {
496 return TRUE;
497 }
498
499 return FALSE;
500 }
501
502 int
503 proc_rele(proc_t p)
504 {
505 proc_list_lock();
506 proc_rele_locked(p);
507 proc_list_unlock();
508
509 return 0;
510 }
511
512 proc_t
513 proc_self(void)
514 {
515 struct proc * p;
516
517 p = current_proc();
518
519 proc_list_lock();
520 if (p != proc_ref_locked(p)) {
521 p = PROC_NULL;
522 }
523 proc_list_unlock();
524 return p;
525 }
526
527
528 proc_t
529 proc_ref_locked(proc_t p)
530 {
531 proc_t p1 = p;
532 int pid = proc_pid(p);
533
534 retry:
535 /*
536 * if process still in creation or proc got recycled
537 * during msleep then return failure.
538 */
539 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0)) {
540 return PROC_NULL;
541 }
542
543 /*
544 * Do not return process marked for termination
545 * or proc_refdrain called without ref wait.
546 * Wait for proc_refdrain_with_refwait to complete if
547 * process in refdrain and refwait flag is set, unless
548 * the current thread is holding to a proc_ref
549 * for any proc.
550 */
551 if ((p->p_stat != SZOMB) &&
552 ((p->p_listflag & P_LIST_EXITED) == 0) &&
553 ((p->p_listflag & P_LIST_DEAD) == 0) &&
554 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
555 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
556 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
557 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0);
558 /*
559 * the proc might have been recycled since we dropped
560 * the proc list lock, get the proc again.
561 */
562 p = pfind_locked(pid);
563 goto retry;
564 }
565 p->p_refcount++;
566 record_procref(p, 1);
567 } else {
568 p1 = PROC_NULL;
569 }
570
571 return p1;
572 }
573
574 void
575 proc_rele_locked(proc_t p)
576 {
577 if (p->p_refcount > 0) {
578 p->p_refcount--;
579 record_procref(p, -1);
580 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
581 p->p_listflag &= ~P_LIST_DRAINWAIT;
582 wakeup(&p->p_refcount);
583 }
584 } else {
585 panic("proc_rele_locked -ve ref\n");
586 }
587 }
588
589 proc_t
590 proc_find_zombref(int pid)
591 {
592 proc_t p;
593
594 proc_list_lock();
595
596 again:
597 p = pfind_locked(pid);
598
599 /* should we bail? */
600 if ((p == PROC_NULL) /* not found */
601 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
602 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
603 proc_list_unlock();
604 return PROC_NULL;
605 }
606
607 /* If someone else is controlling the (unreaped) zombie - wait */
608 if ((p->p_listflag & P_LIST_WAITING) != 0) {
609 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
610 goto again;
611 }
612 p->p_listflag |= P_LIST_WAITING;
613
614 proc_list_unlock();
615
616 return p;
617 }
618
619 void
620 proc_drop_zombref(proc_t p)
621 {
622 proc_list_lock();
623 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
624 p->p_listflag &= ~P_LIST_WAITING;
625 wakeup(&p->p_stat);
626 }
627 proc_list_unlock();
628 }
629
630
631 void
632 proc_refdrain(proc_t p)
633 {
634 proc_refdrain_with_refwait(p, FALSE);
635 }
636
637 proc_t
638 proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
639 {
640 boolean_t initexec = FALSE;
641 proc_list_lock();
642
643 p->p_listflag |= P_LIST_DRAIN;
644 if (get_ref_and_allow_wait) {
645 /*
646 * All the calls to proc_ref_locked will wait
647 * for the flag to get cleared before returning a ref,
648 * unless the current thread is holding to a proc ref
649 * for any proc.
650 */
651 p->p_listflag |= P_LIST_REFWAIT;
652 if (p == initproc) {
653 initexec = TRUE;
654 }
655 }
656
657 /* Do not wait in ref drain for launchd exec */
658 while (p->p_refcount && !initexec) {
659 p->p_listflag |= P_LIST_DRAINWAIT;
660 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0);
661 }
662
663 p->p_listflag &= ~P_LIST_DRAIN;
664 if (!get_ref_and_allow_wait) {
665 p->p_listflag |= P_LIST_DEAD;
666 } else {
667 /* Return a ref to the caller */
668 p->p_refcount++;
669 record_procref(p, 1);
670 }
671
672 proc_list_unlock();
673
674 if (get_ref_and_allow_wait) {
675 return p;
676 }
677 return NULL;
678 }
679
680 void
681 proc_refwake(proc_t p)
682 {
683 proc_list_lock();
684 p->p_listflag &= ~P_LIST_REFWAIT;
685 wakeup(&p->p_listflag);
686 proc_list_unlock();
687 }
688
689 proc_t
690 proc_parentholdref(proc_t p)
691 {
692 proc_t parent = PROC_NULL;
693 proc_t pp;
694 int loopcnt = 0;
695
696
697 proc_list_lock();
698 loop:
699 pp = p->p_pptr;
700 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
701 parent = PROC_NULL;
702 goto out;
703 }
704
705 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
706 pp->p_listflag |= P_LIST_CHILDDRWAIT;
707 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
708 loopcnt++;
709 if (loopcnt == 5) {
710 parent = PROC_NULL;
711 goto out;
712 }
713 goto loop;
714 }
715
716 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
717 pp->p_parentref++;
718 parent = pp;
719 goto out;
720 }
721
722 out:
723 proc_list_unlock();
724 return parent;
725 }
726 int
727 proc_parentdropref(proc_t p, int listlocked)
728 {
729 if (listlocked == 0) {
730 proc_list_lock();
731 }
732
733 if (p->p_parentref > 0) {
734 p->p_parentref--;
735 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
736 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
737 wakeup(&p->p_parentref);
738 }
739 } else {
740 panic("proc_parentdropref -ve ref\n");
741 }
742 if (listlocked == 0) {
743 proc_list_unlock();
744 }
745
746 return 0;
747 }
748
749 void
750 proc_childdrainstart(proc_t p)
751 {
752 #if __PROC_INTERNAL_DEBUG
753 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART) {
754 panic("proc_childdrainstart: childdrain already started\n");
755 }
756 #endif
757 p->p_listflag |= P_LIST_CHILDDRSTART;
758 /* wait for all that hold parentrefs to drop */
759 while (p->p_parentref > 0) {
760 p->p_listflag |= P_LIST_PARENTREFWAIT;
761 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0);
762 }
763 }
764
765
766 void
767 proc_childdrainend(proc_t p)
768 {
769 #if __PROC_INTERNAL_DEBUG
770 if (p->p_childrencnt > 0) {
771 panic("exiting: children stil hanging around\n");
772 }
773 #endif
774 p->p_listflag |= P_LIST_CHILDDRAINED;
775 if ((p->p_listflag & (P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT)) != 0) {
776 p->p_listflag &= ~(P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT);
777 wakeup(&p->p_childrencnt);
778 }
779 }
780
781 void
782 proc_checkdeadrefs(__unused proc_t p)
783 {
784 #if __PROC_INTERNAL_DEBUG
785 if ((p->p_listflag & P_LIST_INHASH) != 0) {
786 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
787 }
788 if (p->p_childrencnt != 0) {
789 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
790 }
791 if (p->p_refcount != 0) {
792 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
793 }
794 if (p->p_parentref != 0) {
795 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
796 }
797 #endif
798 }
799
800 int
801 proc_pid(proc_t p)
802 {
803 if (p != NULL) {
804 return p->p_pid;
805 }
806 return -1;
807 }
808
809 int
810 proc_ppid(proc_t p)
811 {
812 if (p != NULL) {
813 return p->p_ppid;
814 }
815 return -1;
816 }
817
818 int
819 proc_original_ppid(proc_t p)
820 {
821 if (p != NULL) {
822 return p->p_original_ppid;
823 }
824 return -1;
825 }
826
827 int
828 proc_selfpid(void)
829 {
830 return current_proc()->p_pid;
831 }
832
833 int
834 proc_selfppid(void)
835 {
836 return current_proc()->p_ppid;
837 }
838
839 int
840 proc_selfcsflags(void)
841 {
842 return current_proc()->p_csflags;
843 }
844
845 uint32_t
846 proc_platform(proc_t p)
847 {
848 if (p != NULL) {
849 return p->p_platform;
850 }
851 return (uint32_t)-1;
852 }
853
854 uint32_t
855 proc_sdk(proc_t p)
856 {
857 if (p != NULL) {
858 return p->p_sdk;
859 }
860 return (uint32_t)-1;
861 }
862
863 #if CONFIG_DTRACE
864 static proc_t
865 dtrace_current_proc_vforking(void)
866 {
867 thread_t th = current_thread();
868 struct uthread *ut = get_bsdthread_info(th);
869
870 if (ut &&
871 ((ut->uu_flag & (UT_VFORK | UT_VFORKING)) == (UT_VFORK | UT_VFORKING))) {
872 /*
873 * Handle the narrow window where we're in the vfork syscall,
874 * but we're not quite ready to claim (in particular, to DTrace)
875 * that we're running as the child.
876 */
877 return get_bsdtask_info(get_threadtask(th));
878 }
879 return current_proc();
880 }
881
882 int
883 dtrace_proc_selfpid(void)
884 {
885 return dtrace_current_proc_vforking()->p_pid;
886 }
887
888 int
889 dtrace_proc_selfppid(void)
890 {
891 return dtrace_current_proc_vforking()->p_ppid;
892 }
893
894 uid_t
895 dtrace_proc_selfruid(void)
896 {
897 return dtrace_current_proc_vforking()->p_ruid;
898 }
899 #endif /* CONFIG_DTRACE */
900
901 proc_t
902 proc_parent(proc_t p)
903 {
904 proc_t parent;
905 proc_t pp;
906
907 proc_list_lock();
908 loop:
909 pp = p->p_pptr;
910 parent = proc_ref_locked(pp);
911 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED) == 0)) {
912 pp->p_listflag |= P_LIST_CHILDLKWAIT;
913 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
914 goto loop;
915 }
916 proc_list_unlock();
917 return parent;
918 }
919
920 static boolean_t
921 proc_parent_is_currentproc(proc_t p)
922 {
923 boolean_t ret = FALSE;
924
925 proc_list_lock();
926 if (p->p_pptr == current_proc()) {
927 ret = TRUE;
928 }
929
930 proc_list_unlock();
931 return ret;
932 }
933
934 void
935 proc_name(int pid, char * buf, int size)
936 {
937 proc_t p;
938
939 if ((p = proc_find(pid)) != PROC_NULL) {
940 strlcpy(buf, &p->p_comm[0], size);
941 proc_rele(p);
942 }
943 }
944
945 void
946 proc_name_kdp(task_t t, char * buf, int size)
947 {
948 proc_t p = get_bsdtask_info(t);
949 if (p == PROC_NULL) {
950 return;
951 }
952
953 if ((size_t)size > sizeof(p->p_comm)) {
954 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
955 } else {
956 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
957 }
958 }
959
960 boolean_t
961 proc_binary_uuid_kdp(task_t task, uuid_t uuid)
962 {
963 proc_t p = get_bsdtask_info(task);
964 if (p == PROC_NULL) {
965 return FALSE;
966 }
967
968 proc_getexecutableuuid(p, uuid, sizeof(uuid_t));
969
970 return TRUE;
971 }
972
973 int
974 proc_threadname_kdp(void * uth, char * buf, size_t size)
975 {
976 if (size < MAXTHREADNAMESIZE) {
977 /* this is really just a protective measure for the future in
978 * case the thread name size in stackshot gets out of sync with
979 * the BSD max thread name size. Note that bsd_getthreadname
980 * doesn't take input buffer size into account. */
981 return -1;
982 }
983
984 if (uth != NULL) {
985 bsd_getthreadname(uth, buf);
986 }
987 return 0;
988 }
989
990
991 /* note that this function is generally going to be called from stackshot,
992 * and the arguments will be coming from a struct which is declared packed
993 * thus the input arguments will in general be unaligned. We have to handle
994 * that here. */
995 void
996 proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime)
997 {
998 proc_t pp = (proc_t)p;
999 if (pp != PROC_NULL) {
1000 if (tv_sec != NULL) {
1001 *tv_sec = pp->p_start.tv_sec;
1002 }
1003 if (tv_usec != NULL) {
1004 *tv_usec = pp->p_start.tv_usec;
1005 }
1006 if (abstime != NULL) {
1007 if (pp->p_stats != NULL) {
1008 *abstime = pp->p_stats->ps_start;
1009 } else {
1010 *abstime = 0;
1011 }
1012 }
1013 }
1014 }
1015
1016 char *
1017 proc_name_address(void *p)
1018 {
1019 return &((proc_t)p)->p_comm[0];
1020 }
1021
1022 char *
1023 proc_best_name(proc_t p)
1024 {
1025 if (p->p_name[0] != 0) {
1026 return &p->p_name[0];
1027 }
1028 return &p->p_comm[0];
1029 }
1030
1031 void
1032 proc_selfname(char * buf, int size)
1033 {
1034 proc_t p;
1035
1036 if ((p = current_proc()) != (proc_t)0) {
1037 strlcpy(buf, &p->p_comm[0], size);
1038 }
1039 }
1040
1041 void
1042 proc_signal(int pid, int signum)
1043 {
1044 proc_t p;
1045
1046 if ((p = proc_find(pid)) != PROC_NULL) {
1047 psignal(p, signum);
1048 proc_rele(p);
1049 }
1050 }
1051
1052 int
1053 proc_issignal(int pid, sigset_t mask)
1054 {
1055 proc_t p;
1056 int error = 0;
1057
1058 if ((p = proc_find(pid)) != PROC_NULL) {
1059 error = proc_pendingsignals(p, mask);
1060 proc_rele(p);
1061 }
1062
1063 return error;
1064 }
1065
1066 int
1067 proc_noremotehang(proc_t p)
1068 {
1069 int retval = 0;
1070
1071 if (p) {
1072 retval = p->p_flag & P_NOREMOTEHANG;
1073 }
1074 return retval? 1: 0;
1075 }
1076
1077 int
1078 proc_exiting(proc_t p)
1079 {
1080 int retval = 0;
1081
1082 if (p) {
1083 retval = p->p_lflag & P_LEXIT;
1084 }
1085 return retval? 1: 0;
1086 }
1087
1088 int
1089 proc_in_teardown(proc_t p)
1090 {
1091 int retval = 0;
1092
1093 if (p) {
1094 retval = p->p_lflag & P_LPEXIT;
1095 }
1096 return retval? 1: 0;
1097 }
1098
1099 int
1100 proc_forcequota(proc_t p)
1101 {
1102 int retval = 0;
1103
1104 if (p) {
1105 retval = p->p_flag & P_FORCEQUOTA;
1106 }
1107 return retval? 1: 0;
1108 }
1109
1110 int
1111 proc_suser(proc_t p)
1112 {
1113 kauth_cred_t my_cred;
1114 int error;
1115
1116 my_cred = kauth_cred_proc_ref(p);
1117 error = suser(my_cred, &p->p_acflag);
1118 kauth_cred_unref(&my_cred);
1119 return error;
1120 }
1121
1122 task_t
1123 proc_task(proc_t proc)
1124 {
1125 return (task_t)proc->task;
1126 }
1127
1128 /*
1129 * Obtain the first thread in a process
1130 *
1131 * XXX This is a bad thing to do; it exists predominantly to support the
1132 * XXX use of proc_t's in places that should really be using
1133 * XXX thread_t's instead. This maintains historical behaviour, but really
1134 * XXX needs an audit of the context (proxy vs. not) to clean up.
1135 */
1136 thread_t
1137 proc_thread(proc_t proc)
1138 {
1139 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1140
1141 if (uth != NULL) {
1142 return uth->uu_context.vc_thread;
1143 }
1144
1145 return NULL;
1146 }
1147
1148 kauth_cred_t
1149 proc_ucred(proc_t p)
1150 {
1151 return p->p_ucred;
1152 }
1153
1154 struct uthread *
1155 current_uthread()
1156 {
1157 thread_t th = current_thread();
1158
1159 return (struct uthread *)get_bsdthread_info(th);
1160 }
1161
1162
1163 int
1164 proc_is64bit(proc_t p)
1165 {
1166 return IS_64BIT_PROCESS(p);
1167 }
1168
1169 int
1170 proc_is64bit_data(proc_t p)
1171 {
1172 assert(p->task);
1173 return (int)task_get_64bit_data(p->task);
1174 }
1175
1176 int
1177 proc_pidversion(proc_t p)
1178 {
1179 return p->p_idversion;
1180 }
1181
1182 uint32_t
1183 proc_persona_id(proc_t p)
1184 {
1185 return (uint32_t)persona_id_from_proc(p);
1186 }
1187
1188 uint32_t
1189 proc_getuid(proc_t p)
1190 {
1191 return p->p_uid;
1192 }
1193
1194 uint32_t
1195 proc_getgid(proc_t p)
1196 {
1197 return p->p_gid;
1198 }
1199
1200 uint64_t
1201 proc_uniqueid(proc_t p)
1202 {
1203 return p->p_uniqueid;
1204 }
1205
1206 uint64_t
1207 proc_puniqueid(proc_t p)
1208 {
1209 return p->p_puniqueid;
1210 }
1211
1212 void
1213 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1214 {
1215 #if CONFIG_COALITIONS
1216 task_coalition_ids(p->task, ids);
1217 #else
1218 memset(ids, 0, sizeof(uint64_t[COALITION_NUM_TYPES]));
1219 #endif
1220 return;
1221 }
1222
1223 uint64_t
1224 proc_was_throttled(proc_t p)
1225 {
1226 return p->was_throttled;
1227 }
1228
1229 uint64_t
1230 proc_did_throttle(proc_t p)
1231 {
1232 return p->did_throttle;
1233 }
1234
1235 int
1236 proc_getcdhash(proc_t p, unsigned char *cdhash)
1237 {
1238 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1239 }
1240
1241 int
1242 proc_exitstatus(proc_t p)
1243 {
1244 return p->p_xstat & 0xffff;
1245 }
1246
1247 void
1248 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1249 {
1250 if (size >= sizeof(p->p_uuid)) {
1251 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1252 }
1253 }
1254
1255 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1256 vnode_t
1257 proc_getexecutablevnode(proc_t p)
1258 {
1259 vnode_t tvp = p->p_textvp;
1260
1261 if (tvp != NULLVP) {
1262 if (vnode_getwithref(tvp) == 0) {
1263 return tvp;
1264 }
1265 }
1266
1267 return NULLVP;
1268 }
1269
1270 int
1271 proc_selfexecutableargs(uint8_t *buf, size_t *buflen)
1272 {
1273 proc_t p = current_proc();
1274
1275 // buflen must always be provided
1276 if (buflen == NULL) {
1277 return EINVAL;
1278 }
1279
1280 // If a buf is provided, there must be at least enough room to fit argc
1281 if (buf && *buflen < sizeof(p->p_argc)) {
1282 return EINVAL;
1283 }
1284
1285 if (!p->user_stack) {
1286 return EINVAL;
1287 }
1288
1289 if (buf == NULL) {
1290 *buflen = p->p_argslen + sizeof(p->p_argc);
1291 return 0;
1292 }
1293
1294 // Copy in argc to the first 4 bytes
1295 memcpy(buf, &p->p_argc, sizeof(p->p_argc));
1296
1297 if (*buflen > sizeof(p->p_argc) && p->p_argslen > 0) {
1298 // See memory layout comment in kern_exec.c:exec_copyout_strings()
1299 // We want to copy starting from `p_argslen` bytes away from top of stack
1300 return copyin(p->user_stack - p->p_argslen,
1301 buf + sizeof(p->p_argc),
1302 MIN(p->p_argslen, *buflen - sizeof(p->p_argc)));
1303 } else {
1304 return 0;
1305 }
1306 }
1307
1308 off_t
1309 proc_getexecutableoffset(proc_t p)
1310 {
1311 return p->p_textoff;
1312 }
1313
1314 void
1315 bsd_set_dependency_capable(task_t task)
1316 {
1317 proc_t p = get_bsdtask_info(task);
1318
1319 if (p) {
1320 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1321 }
1322 }
1323
1324
1325 #ifndef __arm__
1326 int
1327 IS_64BIT_PROCESS(proc_t p)
1328 {
1329 if (p && (p->p_flag & P_LP64)) {
1330 return 1;
1331 } else {
1332 return 0;
1333 }
1334 }
1335 #endif
1336
1337 /*
1338 * Locate a process by number
1339 */
1340 proc_t
1341 pfind_locked(pid_t pid)
1342 {
1343 proc_t p;
1344 #if DEBUG
1345 proc_t q;
1346 #endif
1347
1348 if (!pid) {
1349 return kernproc;
1350 }
1351
1352 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1353 if (p->p_pid == pid) {
1354 #if DEBUG
1355 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1356 if ((p != q) && (q->p_pid == pid)) {
1357 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1358 }
1359 }
1360 #endif
1361 return p;
1362 }
1363 }
1364 return NULL;
1365 }
1366
1367 /*
1368 * Locate a zombie by PID
1369 */
1370 __private_extern__ proc_t
1371 pzfind(pid_t pid)
1372 {
1373 proc_t p;
1374
1375
1376 proc_list_lock();
1377
1378 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
1379 if (p->p_pid == pid) {
1380 break;
1381 }
1382 }
1383
1384 proc_list_unlock();
1385
1386 return p;
1387 }
1388
1389 /*
1390 * Locate a process group by number
1391 */
1392
1393 struct pgrp *
1394 pgfind(pid_t pgid)
1395 {
1396 struct pgrp * pgrp;
1397
1398 proc_list_lock();
1399 pgrp = pgfind_internal(pgid);
1400 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) {
1401 pgrp = PGRP_NULL;
1402 } else {
1403 pgrp->pg_refcount++;
1404 }
1405 proc_list_unlock();
1406 return pgrp;
1407 }
1408
1409
1410
1411 struct pgrp *
1412 pgfind_internal(pid_t pgid)
1413 {
1414 struct pgrp *pgrp;
1415
1416 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) {
1417 if (pgrp->pg_id == pgid) {
1418 return pgrp;
1419 }
1420 }
1421 return NULL;
1422 }
1423
1424 void
1425 pg_rele(struct pgrp * pgrp)
1426 {
1427 if (pgrp == PGRP_NULL) {
1428 return;
1429 }
1430 pg_rele_dropref(pgrp);
1431 }
1432
1433 void
1434 pg_rele_dropref(struct pgrp * pgrp)
1435 {
1436 proc_list_lock();
1437 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1438 proc_list_unlock();
1439 pgdelete_dropref(pgrp);
1440 return;
1441 }
1442
1443 pgrp->pg_refcount--;
1444 proc_list_unlock();
1445 }
1446
1447 struct session *
1448 session_find_internal(pid_t sessid)
1449 {
1450 struct session *sess;
1451
1452 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) {
1453 if (sess->s_sid == sessid) {
1454 return sess;
1455 }
1456 }
1457 return NULL;
1458 }
1459
1460
1461 /*
1462 * Make a new process ready to become a useful member of society by making it
1463 * visible in all the right places and initialize its own lists to empty.
1464 *
1465 * Parameters: parent The parent of the process to insert
1466 * child The child process to insert
1467 *
1468 * Returns: (void)
1469 *
1470 * Notes: Insert a child process into the parents process group, assign
1471 * the child the parent process pointer and PPID of the parent,
1472 * place it on the parents p_children list as a sibling,
1473 * initialize its own child list, place it in the allproc list,
1474 * insert it in the proper hash bucket, and initialize its
1475 * event list.
1476 */
1477 void
1478 pinsertchild(proc_t parent, proc_t child)
1479 {
1480 struct pgrp * pg;
1481
1482 LIST_INIT(&child->p_children);
1483 TAILQ_INIT(&child->p_evlist);
1484 child->p_pptr = parent;
1485 child->p_ppid = parent->p_pid;
1486 child->p_original_ppid = parent->p_pid;
1487 child->p_puniqueid = parent->p_uniqueid;
1488 child->p_xhighbits = 0;
1489
1490 pg = proc_pgrp(parent);
1491 pgrp_add(pg, parent, child);
1492 pg_rele(pg);
1493
1494 proc_list_lock();
1495
1496 #if CONFIG_MEMORYSTATUS
1497 memorystatus_add(child, TRUE);
1498 #endif
1499
1500 parent->p_childrencnt++;
1501 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1502
1503 LIST_INSERT_HEAD(&allproc, child, p_list);
1504 /* mark the completion of proc creation */
1505 child->p_listflag &= ~P_LIST_INCREATE;
1506
1507 proc_list_unlock();
1508 }
1509
1510 /*
1511 * Move p to a new or existing process group (and session)
1512 *
1513 * Returns: 0 Success
1514 * ESRCH No such process
1515 */
1516 int
1517 enterpgrp(proc_t p, pid_t pgid, int mksess)
1518 {
1519 struct pgrp *pgrp;
1520 struct pgrp *mypgrp;
1521 struct session * procsp;
1522
1523 pgrp = pgfind(pgid);
1524 mypgrp = proc_pgrp(p);
1525 procsp = proc_session(p);
1526
1527 #if DIAGNOSTIC
1528 if (pgrp != NULL && mksess) { /* firewalls */
1529 panic("enterpgrp: setsid into non-empty pgrp");
1530 }
1531 if (SESS_LEADER(p, procsp)) {
1532 panic("enterpgrp: session leader attempted setpgrp");
1533 }
1534 #endif
1535 if (pgrp == PGRP_NULL) {
1536 pid_t savepid = p->p_pid;
1537 proc_t np = PROC_NULL;
1538 /*
1539 * new process group
1540 */
1541 #if DIAGNOSTIC
1542 if (p->p_pid != pgid) {
1543 panic("enterpgrp: new pgrp and pid != pgid");
1544 }
1545 #endif
1546 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1547 M_WAITOK);
1548 if (pgrp == NULL) {
1549 panic("enterpgrp: M_PGRP zone depleted");
1550 }
1551 if ((np = proc_find(savepid)) == NULL || np != p) {
1552 if (np != PROC_NULL) {
1553 proc_rele(np);
1554 }
1555 if (mypgrp != PGRP_NULL) {
1556 pg_rele(mypgrp);
1557 }
1558 if (procsp != SESSION_NULL) {
1559 session_rele(procsp);
1560 }
1561 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1562 return ESRCH;
1563 }
1564 proc_rele(np);
1565 if (mksess) {
1566 struct session *sess;
1567
1568 /*
1569 * new session
1570 */
1571 MALLOC_ZONE(sess, struct session *,
1572 sizeof(struct session), M_SESSION, M_WAITOK);
1573 if (sess == NULL) {
1574 panic("enterpgrp: M_SESSION zone depleted");
1575 }
1576 sess->s_leader = p;
1577 sess->s_sid = p->p_pid;
1578 sess->s_count = 1;
1579 sess->s_ttyvp = NULL;
1580 sess->s_ttyp = TTY_NULL;
1581 sess->s_flags = 0;
1582 sess->s_listflags = 0;
1583 sess->s_ttypgrpid = NO_PID;
1584
1585 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1586
1587 bcopy(procsp->s_login, sess->s_login,
1588 sizeof(sess->s_login));
1589 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1590 proc_list_lock();
1591 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1592 proc_list_unlock();
1593 pgrp->pg_session = sess;
1594 #if DIAGNOSTIC
1595 if (p != current_proc()) {
1596 panic("enterpgrp: mksession and p != curproc");
1597 }
1598 #endif
1599 } else {
1600 proc_list_lock();
1601 pgrp->pg_session = procsp;
1602
1603 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1604 panic("enterpgrp: providing ref to terminating session ");
1605 }
1606 pgrp->pg_session->s_count++;
1607 proc_list_unlock();
1608 }
1609 pgrp->pg_id = pgid;
1610
1611 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1612
1613 LIST_INIT(&pgrp->pg_members);
1614 pgrp->pg_membercnt = 0;
1615 pgrp->pg_jobc = 0;
1616 proc_list_lock();
1617 pgrp->pg_refcount = 1;
1618 pgrp->pg_listflags = 0;
1619 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1620 proc_list_unlock();
1621 } else if (pgrp == mypgrp) {
1622 pg_rele(pgrp);
1623 if (mypgrp != NULL) {
1624 pg_rele(mypgrp);
1625 }
1626 if (procsp != SESSION_NULL) {
1627 session_rele(procsp);
1628 }
1629 return 0;
1630 }
1631
1632 if (procsp != SESSION_NULL) {
1633 session_rele(procsp);
1634 }
1635 /*
1636 * Adjust eligibility of affected pgrps to participate in job control.
1637 * Increment eligibility counts before decrementing, otherwise we
1638 * could reach 0 spuriously during the first call.
1639 */
1640 fixjobc(p, pgrp, 1);
1641 fixjobc(p, mypgrp, 0);
1642
1643 if (mypgrp != PGRP_NULL) {
1644 pg_rele(mypgrp);
1645 }
1646 pgrp_replace(p, pgrp);
1647 pg_rele(pgrp);
1648
1649 return 0;
1650 }
1651
1652 /*
1653 * remove process from process group
1654 */
1655 int
1656 leavepgrp(proc_t p)
1657 {
1658 pgrp_remove(p);
1659 return 0;
1660 }
1661
1662 /*
1663 * delete a process group
1664 */
1665 static void
1666 pgdelete_dropref(struct pgrp *pgrp)
1667 {
1668 struct tty *ttyp;
1669 int emptypgrp = 1;
1670 struct session *sessp;
1671
1672
1673 pgrp_lock(pgrp);
1674 if (pgrp->pg_membercnt != 0) {
1675 emptypgrp = 0;
1676 }
1677 pgrp_unlock(pgrp);
1678
1679 proc_list_lock();
1680 pgrp->pg_refcount--;
1681 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1682 proc_list_unlock();
1683 return;
1684 }
1685
1686 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1687
1688 if (pgrp->pg_refcount > 0) {
1689 proc_list_unlock();
1690 return;
1691 }
1692
1693 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1694 LIST_REMOVE(pgrp, pg_hash);
1695
1696 proc_list_unlock();
1697
1698 ttyp = SESSION_TP(pgrp->pg_session);
1699 if (ttyp != TTY_NULL) {
1700 if (ttyp->t_pgrp == pgrp) {
1701 tty_lock(ttyp);
1702 /* Re-check after acquiring the lock */
1703 if (ttyp->t_pgrp == pgrp) {
1704 ttyp->t_pgrp = NULL;
1705 pgrp->pg_session->s_ttypgrpid = NO_PID;
1706 }
1707 tty_unlock(ttyp);
1708 }
1709 }
1710
1711 proc_list_lock();
1712
1713 sessp = pgrp->pg_session;
1714 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1715 panic("pg_deleteref: manipulating refs of already terminating session");
1716 }
1717 if (--sessp->s_count == 0) {
1718 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1719 panic("pg_deleteref: terminating already terminated session");
1720 }
1721 sessp->s_listflags |= S_LIST_TERM;
1722 ttyp = SESSION_TP(sessp);
1723 LIST_REMOVE(sessp, s_hash);
1724 proc_list_unlock();
1725 if (ttyp != TTY_NULL) {
1726 tty_lock(ttyp);
1727 if (ttyp->t_session == sessp) {
1728 ttyp->t_session = NULL;
1729 }
1730 tty_unlock(ttyp);
1731 }
1732 proc_list_lock();
1733 sessp->s_listflags |= S_LIST_DEAD;
1734 if (sessp->s_count != 0) {
1735 panic("pg_deleteref: freeing session in use");
1736 }
1737 proc_list_unlock();
1738 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1739
1740 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1741 } else {
1742 proc_list_unlock();
1743 }
1744 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1745 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1746 }
1747
1748
1749 /*
1750 * Adjust pgrp jobc counters when specified process changes process group.
1751 * We count the number of processes in each process group that "qualify"
1752 * the group for terminal job control (those with a parent in a different
1753 * process group of the same session). If that count reaches zero, the
1754 * process group becomes orphaned. Check both the specified process'
1755 * process group and that of its children.
1756 * entering == 0 => p is leaving specified group.
1757 * entering == 1 => p is entering specified group.
1758 */
1759 int
1760 fixjob_callback(proc_t p, void * arg)
1761 {
1762 struct fixjob_iterargs *fp;
1763 struct pgrp * pg, *hispg;
1764 struct session * mysession, *hissess;
1765 int entering;
1766
1767 fp = (struct fixjob_iterargs *)arg;
1768 pg = fp->pg;
1769 mysession = fp->mysession;
1770 entering = fp->entering;
1771
1772 hispg = proc_pgrp(p);
1773 hissess = proc_session(p);
1774
1775 if ((hispg != pg) &&
1776 (hissess == mysession)) {
1777 pgrp_lock(hispg);
1778 if (entering) {
1779 hispg->pg_jobc++;
1780 pgrp_unlock(hispg);
1781 } else if (--hispg->pg_jobc == 0) {
1782 pgrp_unlock(hispg);
1783 orphanpg(hispg);
1784 } else {
1785 pgrp_unlock(hispg);
1786 }
1787 }
1788 if (hissess != SESSION_NULL) {
1789 session_rele(hissess);
1790 }
1791 if (hispg != PGRP_NULL) {
1792 pg_rele(hispg);
1793 }
1794
1795 return PROC_RETURNED;
1796 }
1797
1798 void
1799 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1800 {
1801 struct pgrp *hispgrp = PGRP_NULL;
1802 struct session *hissess = SESSION_NULL;
1803 struct session *mysession = pgrp->pg_session;
1804 proc_t parent;
1805 struct fixjob_iterargs fjarg;
1806 boolean_t proc_parent_self;
1807
1808 /*
1809 * Check if p's parent is current proc, if yes then no need to take
1810 * a ref; calling proc_parent with current proc as parent may
1811 * deadlock if current proc is exiting.
1812 */
1813 proc_parent_self = proc_parent_is_currentproc(p);
1814 if (proc_parent_self) {
1815 parent = current_proc();
1816 } else {
1817 parent = proc_parent(p);
1818 }
1819
1820 if (parent != PROC_NULL) {
1821 hispgrp = proc_pgrp(parent);
1822 hissess = proc_session(parent);
1823 if (!proc_parent_self) {
1824 proc_rele(parent);
1825 }
1826 }
1827
1828
1829 /*
1830 * Check p's parent to see whether p qualifies its own process
1831 * group; if so, adjust count for p's process group.
1832 */
1833 if ((hispgrp != pgrp) &&
1834 (hissess == mysession)) {
1835 pgrp_lock(pgrp);
1836 if (entering) {
1837 pgrp->pg_jobc++;
1838 pgrp_unlock(pgrp);
1839 } else if (--pgrp->pg_jobc == 0) {
1840 pgrp_unlock(pgrp);
1841 orphanpg(pgrp);
1842 } else {
1843 pgrp_unlock(pgrp);
1844 }
1845 }
1846
1847 if (hissess != SESSION_NULL) {
1848 session_rele(hissess);
1849 }
1850 if (hispgrp != PGRP_NULL) {
1851 pg_rele(hispgrp);
1852 }
1853
1854 /*
1855 * Check this process' children to see whether they qualify
1856 * their process groups; if so, adjust counts for children's
1857 * process groups.
1858 */
1859 fjarg.pg = pgrp;
1860 fjarg.mysession = mysession;
1861 fjarg.entering = entering;
1862 proc_childrenwalk(p, fixjob_callback, &fjarg);
1863 }
1864
1865 /*
1866 * The pidlist_* routines support the functions in this file that
1867 * walk lists of processes applying filters and callouts to the
1868 * elements of the list.
1869 *
1870 * A prior implementation used a single linear array, which can be
1871 * tricky to allocate on large systems. This implementation creates
1872 * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
1873 *
1874 * The array should be sized large enough to keep the overhead of
1875 * walking the list low, but small enough that blocking allocations of
1876 * pidlist_entry_t structures always succeed.
1877 */
1878
1879 #define PIDS_PER_ENTRY 1021
1880
1881 typedef struct pidlist_entry {
1882 SLIST_ENTRY(pidlist_entry) pe_link;
1883 u_int pe_nused;
1884 pid_t pe_pid[PIDS_PER_ENTRY];
1885 } pidlist_entry_t;
1886
1887 typedef struct {
1888 SLIST_HEAD(, pidlist_entry) pl_head;
1889 struct pidlist_entry *pl_active;
1890 u_int pl_nalloc;
1891 } pidlist_t;
1892
1893 static __inline__ pidlist_t *
1894 pidlist_init(pidlist_t *pl)
1895 {
1896 SLIST_INIT(&pl->pl_head);
1897 pl->pl_active = NULL;
1898 pl->pl_nalloc = 0;
1899 return pl;
1900 }
1901
1902 static u_int
1903 pidlist_alloc(pidlist_t *pl, u_int needed)
1904 {
1905 while (pl->pl_nalloc < needed) {
1906 pidlist_entry_t *pe = kalloc(sizeof(*pe));
1907 if (NULL == pe) {
1908 panic("no space for pidlist entry");
1909 }
1910 pe->pe_nused = 0;
1911 SLIST_INSERT_HEAD(&pl->pl_head, pe, pe_link);
1912 pl->pl_nalloc += (sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0]));
1913 }
1914 return pl->pl_nalloc;
1915 }
1916
1917 static void
1918 pidlist_free(pidlist_t *pl)
1919 {
1920 pidlist_entry_t *pe;
1921 while (NULL != (pe = SLIST_FIRST(&pl->pl_head))) {
1922 SLIST_FIRST(&pl->pl_head) = SLIST_NEXT(pe, pe_link);
1923 kfree(pe, sizeof(*pe));
1924 }
1925 pl->pl_nalloc = 0;
1926 }
1927
1928 static __inline__ void
1929 pidlist_set_active(pidlist_t *pl)
1930 {
1931 pl->pl_active = SLIST_FIRST(&pl->pl_head);
1932 assert(pl->pl_active);
1933 }
1934
1935 static void
1936 pidlist_add_pid(pidlist_t *pl, pid_t pid)
1937 {
1938 pidlist_entry_t *pe = pl->pl_active;
1939 if (pe->pe_nused >= sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0])) {
1940 if (NULL == (pe = SLIST_NEXT(pe, pe_link))) {
1941 panic("pidlist allocation exhausted");
1942 }
1943 pl->pl_active = pe;
1944 }
1945 pe->pe_pid[pe->pe_nused++] = pid;
1946 }
1947
1948 static __inline__ u_int
1949 pidlist_nalloc(const pidlist_t *pl)
1950 {
1951 return pl->pl_nalloc;
1952 }
1953
1954 /*
1955 * A process group has become orphaned; if there are any stopped processes in
1956 * the group, hang-up all process in that group.
1957 */
1958 static void
1959 orphanpg(struct pgrp *pgrp)
1960 {
1961 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
1962 u_int pid_count_available = 0;
1963 proc_t p;
1964
1965 /* allocate outside of the pgrp_lock */
1966 for (;;) {
1967 pgrp_lock(pgrp);
1968
1969 boolean_t should_iterate = FALSE;
1970 pid_count_available = 0;
1971
1972 PGMEMBERS_FOREACH(pgrp, p) {
1973 pid_count_available++;
1974 if (p->p_stat == SSTOP) {
1975 should_iterate = TRUE;
1976 }
1977 }
1978 if (pid_count_available == 0 || !should_iterate) {
1979 pgrp_unlock(pgrp);
1980 goto out; /* no orphaned processes OR nothing stopped */
1981 }
1982 if (pidlist_nalloc(pl) >= pid_count_available) {
1983 break;
1984 }
1985 pgrp_unlock(pgrp);
1986
1987 pidlist_alloc(pl, pid_count_available);
1988 }
1989 pidlist_set_active(pl);
1990
1991 u_int pid_count = 0;
1992 PGMEMBERS_FOREACH(pgrp, p) {
1993 pidlist_add_pid(pl, proc_pid(p));
1994 if (++pid_count >= pid_count_available) {
1995 break;
1996 }
1997 }
1998 pgrp_unlock(pgrp);
1999
2000 const pidlist_entry_t *pe;
2001 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2002 for (u_int i = 0; i < pe->pe_nused; i++) {
2003 const pid_t pid = pe->pe_pid[i];
2004 if (0 == pid) {
2005 continue; /* skip kernproc */
2006 }
2007 p = proc_find(pid);
2008 if (!p) {
2009 continue;
2010 }
2011 proc_transwait(p, 0);
2012 pt_setrunnable(p);
2013 psignal(p, SIGHUP);
2014 psignal(p, SIGCONT);
2015 proc_rele(p);
2016 }
2017 }
2018 out:
2019 pidlist_free(pl);
2020 }
2021
2022 int
2023 proc_is_classic(proc_t p __unused)
2024 {
2025 return 0;
2026 }
2027
2028 /* XXX Why does this function exist? Need to kill it off... */
2029 proc_t
2030 current_proc_EXTERNAL(void)
2031 {
2032 return current_proc();
2033 }
2034
2035 int
2036 proc_is_forcing_hfs_case_sensitivity(proc_t p)
2037 {
2038 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
2039 }
2040
2041 #if CONFIG_COREDUMP
2042 /*
2043 * proc_core_name(name, uid, pid)
2044 * Expand the name described in corefilename, using name, uid, and pid.
2045 * corefilename is a printf-like string, with three format specifiers:
2046 * %N name of process ("name")
2047 * %P process id (pid)
2048 * %U user id (uid)
2049 * For example, "%N.core" is the default; they can be disabled completely
2050 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2051 * This is controlled by the sysctl variable kern.corefile (see above).
2052 */
2053 __private_extern__ int
2054 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
2055 size_t cf_name_len)
2056 {
2057 const char *format, *appendstr;
2058 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
2059 size_t i, l, n;
2060
2061 if (cf_name == NULL) {
2062 goto toolong;
2063 }
2064
2065 format = corefilename;
2066 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
2067 switch (format[i]) {
2068 case '%': /* Format character */
2069 i++;
2070 switch (format[i]) {
2071 case '%':
2072 appendstr = "%";
2073 break;
2074 case 'N': /* process name */
2075 appendstr = name;
2076 break;
2077 case 'P': /* process id */
2078 snprintf(id_buf, sizeof(id_buf), "%u", pid);
2079 appendstr = id_buf;
2080 break;
2081 case 'U': /* user id */
2082 snprintf(id_buf, sizeof(id_buf), "%u", uid);
2083 appendstr = id_buf;
2084 break;
2085 case '\0': /* format string ended in % symbol */
2086 goto endofstring;
2087 default:
2088 appendstr = "";
2089 log(LOG_ERR,
2090 "Unknown format character %c in `%s'\n",
2091 format[i], format);
2092 }
2093 l = strlen(appendstr);
2094 if ((n + l) >= cf_name_len) {
2095 goto toolong;
2096 }
2097 bcopy(appendstr, cf_name + n, l);
2098 n += l;
2099 break;
2100 default:
2101 cf_name[n++] = format[i];
2102 }
2103 }
2104 if (format[i] != '\0') {
2105 goto toolong;
2106 }
2107 return 0;
2108 toolong:
2109 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
2110 (long)pid, name, (uint32_t)uid);
2111 return 1;
2112 endofstring:
2113 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
2114 (long)pid, name, (uint32_t)uid);
2115 return 1;
2116 }
2117 #endif /* CONFIG_COREDUMP */
2118
2119 /* Code Signing related routines */
2120
2121 int
2122 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
2123 {
2124 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2125 uap->usersize, USER_ADDR_NULL);
2126 }
2127
2128 int
2129 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
2130 {
2131 if (uap->uaudittoken == USER_ADDR_NULL) {
2132 return EINVAL;
2133 }
2134 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2135 uap->usersize, uap->uaudittoken);
2136 }
2137
2138 static int
2139 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
2140 {
2141 char fakeheader[8] = { 0 };
2142 int error;
2143
2144 if (usize < sizeof(fakeheader)) {
2145 return ERANGE;
2146 }
2147
2148 /* if no blob, fill in zero header */
2149 if (NULL == start) {
2150 start = fakeheader;
2151 length = sizeof(fakeheader);
2152 } else if (usize < length) {
2153 /* ... if input too short, copy out length of entitlement */
2154 uint32_t length32 = htonl((uint32_t)length);
2155 memcpy(&fakeheader[4], &length32, sizeof(length32));
2156
2157 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2158 if (error == 0) {
2159 return ERANGE; /* input buffer to short, ERANGE signals that */
2160 }
2161 return error;
2162 }
2163 return copyout(start, uaddr, length);
2164 }
2165
2166 static int
2167 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
2168 {
2169 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
2170 proc_t pt;
2171 int forself;
2172 int error;
2173 vnode_t tvp;
2174 off_t toff;
2175 unsigned char cdhash[SHA1_RESULTLEN];
2176 audit_token_t token;
2177 unsigned int upid = 0, uidversion = 0;
2178
2179 forself = error = 0;
2180
2181 if (pid == 0) {
2182 pid = proc_selfpid();
2183 }
2184 if (pid == proc_selfpid()) {
2185 forself = 1;
2186 }
2187
2188
2189 switch (ops) {
2190 case CS_OPS_STATUS:
2191 case CS_OPS_CDHASH:
2192 case CS_OPS_PIDOFFSET:
2193 case CS_OPS_ENTITLEMENTS_BLOB:
2194 case CS_OPS_IDENTITY:
2195 case CS_OPS_BLOB:
2196 case CS_OPS_TEAMID:
2197 case CS_OPS_CLEAR_LV:
2198 break; /* not restricted to root */
2199 default:
2200 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) {
2201 return EPERM;
2202 }
2203 break;
2204 }
2205
2206 pt = proc_find(pid);
2207 if (pt == PROC_NULL) {
2208 return ESRCH;
2209 }
2210
2211 upid = pt->p_pid;
2212 uidversion = pt->p_idversion;
2213 if (uaudittoken != USER_ADDR_NULL) {
2214 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
2215 if (error != 0) {
2216 goto out;
2217 }
2218 /* verify the audit token pid/idversion matches with proc */
2219 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
2220 error = ESRCH;
2221 goto out;
2222 }
2223 }
2224
2225 #if CONFIG_MACF
2226 switch (ops) {
2227 case CS_OPS_MARKINVALID:
2228 case CS_OPS_MARKHARD:
2229 case CS_OPS_MARKKILL:
2230 case CS_OPS_MARKRESTRICT:
2231 case CS_OPS_SET_STATUS:
2232 case CS_OPS_CLEARINSTALLER:
2233 case CS_OPS_CLEARPLATFORM:
2234 case CS_OPS_CLEAR_LV:
2235 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) {
2236 goto out;
2237 }
2238 break;
2239 default:
2240 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops))) {
2241 goto out;
2242 }
2243 }
2244 #endif
2245
2246 switch (ops) {
2247 case CS_OPS_STATUS: {
2248 uint32_t retflags;
2249
2250 proc_lock(pt);
2251 retflags = pt->p_csflags;
2252 if (cs_process_enforcement(pt)) {
2253 retflags |= CS_ENFORCEMENT;
2254 }
2255 if (csproc_get_platform_binary(pt)) {
2256 retflags |= CS_PLATFORM_BINARY;
2257 }
2258 if (csproc_get_platform_path(pt)) {
2259 retflags |= CS_PLATFORM_PATH;
2260 }
2261 //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
2262 if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) {
2263 retflags &= (~CS_REQUIRE_LV);
2264 }
2265 proc_unlock(pt);
2266
2267 if (uaddr != USER_ADDR_NULL) {
2268 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2269 }
2270 break;
2271 }
2272 case CS_OPS_MARKINVALID:
2273 proc_lock(pt);
2274 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2275 pt->p_csflags &= ~CS_VALID; /* set invalid */
2276 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2277 pt->p_csflags |= CS_KILLED;
2278 proc_unlock(pt);
2279 if (cs_debug) {
2280 printf("CODE SIGNING: marked invalid by pid %d: "
2281 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2282 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2283 }
2284 psignal(pt, SIGKILL);
2285 } else {
2286 proc_unlock(pt);
2287 }
2288 } else {
2289 proc_unlock(pt);
2290 }
2291
2292 break;
2293
2294 case CS_OPS_MARKHARD:
2295 proc_lock(pt);
2296 pt->p_csflags |= CS_HARD;
2297 if ((pt->p_csflags & CS_VALID) == 0) {
2298 /* @@@ allow? reject? kill? @@@ */
2299 proc_unlock(pt);
2300 error = EINVAL;
2301 goto out;
2302 } else {
2303 proc_unlock(pt);
2304 }
2305 break;
2306
2307 case CS_OPS_MARKKILL:
2308 proc_lock(pt);
2309 pt->p_csflags |= CS_KILL;
2310 if ((pt->p_csflags & CS_VALID) == 0) {
2311 proc_unlock(pt);
2312 psignal(pt, SIGKILL);
2313 } else {
2314 proc_unlock(pt);
2315 }
2316 break;
2317
2318 case CS_OPS_PIDOFFSET:
2319 toff = pt->p_textoff;
2320 proc_rele(pt);
2321 error = copyout(&toff, uaddr, sizeof(toff));
2322 return error;
2323
2324 case CS_OPS_CDHASH:
2325
2326 /* pt already holds a reference on its p_textvp */
2327 tvp = pt->p_textvp;
2328 toff = pt->p_textoff;
2329
2330 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2331 proc_rele(pt);
2332 return EINVAL;
2333 }
2334
2335 error = vn_getcdhash(tvp, toff, cdhash);
2336 proc_rele(pt);
2337
2338 if (error == 0) {
2339 error = copyout(cdhash, uaddr, sizeof(cdhash));
2340 }
2341
2342 return error;
2343
2344 case CS_OPS_ENTITLEMENTS_BLOB: {
2345 void *start;
2346 size_t length;
2347
2348 proc_lock(pt);
2349
2350 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2351 proc_unlock(pt);
2352 error = EINVAL;
2353 break;
2354 }
2355
2356 error = cs_entitlements_blob_get(pt, &start, &length);
2357 proc_unlock(pt);
2358 if (error) {
2359 break;
2360 }
2361
2362 error = csops_copy_token(start, length, usize, uaddr);
2363 break;
2364 }
2365 case CS_OPS_MARKRESTRICT:
2366 proc_lock(pt);
2367 pt->p_csflags |= CS_RESTRICT;
2368 proc_unlock(pt);
2369 break;
2370
2371 case CS_OPS_SET_STATUS: {
2372 uint32_t flags;
2373
2374 if (usize < sizeof(flags)) {
2375 error = ERANGE;
2376 break;
2377 }
2378
2379 error = copyin(uaddr, &flags, sizeof(flags));
2380 if (error) {
2381 break;
2382 }
2383
2384 /* only allow setting a subset of all code sign flags */
2385 flags &=
2386 CS_HARD | CS_EXEC_SET_HARD |
2387 CS_KILL | CS_EXEC_SET_KILL |
2388 CS_RESTRICT |
2389 CS_REQUIRE_LV |
2390 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2391
2392 proc_lock(pt);
2393 if (pt->p_csflags & CS_VALID) {
2394 pt->p_csflags |= flags;
2395 } else {
2396 error = EINVAL;
2397 }
2398 proc_unlock(pt);
2399
2400 break;
2401 }
2402 case CS_OPS_CLEAR_LV: {
2403 /*
2404 * This option is used to remove library validation from
2405 * a running process. This is used in plugin architectures
2406 * when a program needs to load untrusted libraries. This
2407 * allows the process to maintain library validation as
2408 * long as possible, then drop it only when required.
2409 * Once a process has loaded the untrusted library,
2410 * relying on library validation in the future will
2411 * not be effective. An alternative is to re-exec
2412 * your application without library validation, or
2413 * fork an untrusted child.
2414 */
2415 #ifdef CONFIG_EMBEDDED
2416 // On embedded platforms, we don't support dropping LV
2417 error = ENOTSUP;
2418 #else
2419 /*
2420 * if we have the flag set, and the caller wants
2421 * to remove it, and they're entitled to, then
2422 * we remove it from the csflags
2423 *
2424 * NOTE: We are fine to poke into the task because
2425 * we get a ref to pt when we do the proc_find
2426 * at the beginning of this function.
2427 *
2428 * We also only allow altering ourselves.
2429 */
2430 if (forself == 1 && IOTaskHasEntitlement(pt->task, CLEAR_LV_ENTITLEMENT)) {
2431 proc_lock(pt);
2432 pt->p_csflags &= (~(CS_REQUIRE_LV & CS_FORCED_LV));
2433 proc_unlock(pt);
2434 error = 0;
2435 } else {
2436 error = EPERM;
2437 }
2438 #endif
2439 break;
2440 }
2441 case CS_OPS_BLOB: {
2442 void *start;
2443 size_t length;
2444
2445 proc_lock(pt);
2446 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2447 proc_unlock(pt);
2448 error = EINVAL;
2449 break;
2450 }
2451
2452 error = cs_blob_get(pt, &start, &length);
2453 proc_unlock(pt);
2454 if (error) {
2455 break;
2456 }
2457
2458 error = csops_copy_token(start, length, usize, uaddr);
2459 break;
2460 }
2461 case CS_OPS_IDENTITY:
2462 case CS_OPS_TEAMID: {
2463 const char *identity;
2464 uint8_t fakeheader[8];
2465 uint32_t idlen;
2466 size_t length;
2467
2468 /*
2469 * Make identity have a blob header to make it
2470 * easier on userland to guess the identity
2471 * length.
2472 */
2473 if (usize < sizeof(fakeheader)) {
2474 error = ERANGE;
2475 break;
2476 }
2477 memset(fakeheader, 0, sizeof(fakeheader));
2478
2479 proc_lock(pt);
2480 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2481 proc_unlock(pt);
2482 error = EINVAL;
2483 break;
2484 }
2485
2486 identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt);
2487 proc_unlock(pt);
2488 if (identity == NULL) {
2489 error = ENOENT;
2490 break;
2491 }
2492
2493 length = strlen(identity) + 1; /* include NUL */
2494 idlen = htonl(length + sizeof(fakeheader));
2495 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2496
2497 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2498 if (error) {
2499 break;
2500 }
2501
2502 if (usize < sizeof(fakeheader) + length) {
2503 error = ERANGE;
2504 } else if (usize > sizeof(fakeheader)) {
2505 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2506 }
2507
2508 break;
2509 }
2510
2511 case CS_OPS_CLEARINSTALLER:
2512 proc_lock(pt);
2513 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2514 proc_unlock(pt);
2515 break;
2516
2517 case CS_OPS_CLEARPLATFORM:
2518 #if DEVELOPMENT || DEBUG
2519 if (cs_process_global_enforcement()) {
2520 error = ENOTSUP;
2521 break;
2522 }
2523
2524 #if CONFIG_CSR
2525 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2526 error = ENOTSUP;
2527 break;
2528 }
2529 #endif
2530
2531 proc_lock(pt);
2532 pt->p_csflags &= ~(CS_PLATFORM_BINARY | CS_PLATFORM_PATH);
2533 csproc_clear_platform_binary(pt);
2534 proc_unlock(pt);
2535 break;
2536 #else
2537 error = ENOTSUP;
2538 break;
2539 #endif /* !DEVELOPMENT || DEBUG */
2540
2541 default:
2542 error = EINVAL;
2543 break;
2544 }
2545 out:
2546 proc_rele(pt);
2547 return error;
2548 }
2549
2550 void
2551 proc_iterate(
2552 unsigned int flags,
2553 proc_iterate_fn_t callout,
2554 void *arg,
2555 proc_iterate_fn_t filterfn,
2556 void *filterarg)
2557 {
2558 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2559 u_int pid_count_available = 0;
2560
2561 assert(callout != NULL);
2562
2563 /* allocate outside of the proc_list_lock */
2564 for (;;) {
2565 proc_list_lock();
2566 pid_count_available = nprocs + 1; /* kernel_task not counted in nprocs */
2567 assert(pid_count_available > 0);
2568 if (pidlist_nalloc(pl) > pid_count_available) {
2569 break;
2570 }
2571 proc_list_unlock();
2572
2573 pidlist_alloc(pl, pid_count_available);
2574 }
2575 pidlist_set_active(pl);
2576
2577 /* filter pids into the pid_list */
2578
2579 u_int pid_count = 0;
2580 if (flags & PROC_ALLPROCLIST) {
2581 proc_t p;
2582 ALLPROC_FOREACH(p) {
2583 /* ignore processes that are being forked */
2584 if (p->p_stat == SIDL) {
2585 continue;
2586 }
2587 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2588 continue;
2589 }
2590 pidlist_add_pid(pl, proc_pid(p));
2591 if (++pid_count >= pid_count_available) {
2592 break;
2593 }
2594 }
2595 }
2596
2597 if ((pid_count < pid_count_available) &&
2598 (flags & PROC_ZOMBPROCLIST)) {
2599 proc_t p;
2600 ZOMBPROC_FOREACH(p) {
2601 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2602 continue;
2603 }
2604 pidlist_add_pid(pl, proc_pid(p));
2605 if (++pid_count >= pid_count_available) {
2606 break;
2607 }
2608 }
2609 }
2610
2611 proc_list_unlock();
2612
2613 /* call callout on processes in the pid_list */
2614
2615 const pidlist_entry_t *pe;
2616 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2617 for (u_int i = 0; i < pe->pe_nused; i++) {
2618 const pid_t pid = pe->pe_pid[i];
2619 proc_t p = proc_find(pid);
2620 if (p) {
2621 if ((flags & PROC_NOWAITTRANS) == 0) {
2622 proc_transwait(p, 0);
2623 }
2624 const int callout_ret = callout(p, arg);
2625
2626 switch (callout_ret) {
2627 case PROC_RETURNED_DONE:
2628 proc_rele(p);
2629 /* FALLTHROUGH */
2630 case PROC_CLAIMED_DONE:
2631 goto out;
2632
2633 case PROC_RETURNED:
2634 proc_rele(p);
2635 /* FALLTHROUGH */
2636 case PROC_CLAIMED:
2637 break;
2638 default:
2639 panic("%s: callout =%d for pid %d",
2640 __func__, callout_ret, pid);
2641 break;
2642 }
2643 } else if (flags & PROC_ZOMBPROCLIST) {
2644 p = proc_find_zombref(pid);
2645 if (!p) {
2646 continue;
2647 }
2648 const int callout_ret = callout(p, arg);
2649
2650 switch (callout_ret) {
2651 case PROC_RETURNED_DONE:
2652 proc_drop_zombref(p);
2653 /* FALLTHROUGH */
2654 case PROC_CLAIMED_DONE:
2655 goto out;
2656
2657 case PROC_RETURNED:
2658 proc_drop_zombref(p);
2659 /* FALLTHROUGH */
2660 case PROC_CLAIMED:
2661 break;
2662 default:
2663 panic("%s: callout =%d for zombie %d",
2664 __func__, callout_ret, pid);
2665 break;
2666 }
2667 }
2668 }
2669 }
2670 out:
2671 pidlist_free(pl);
2672 }
2673
2674 void
2675 proc_rebootscan(
2676 proc_iterate_fn_t callout,
2677 void *arg,
2678 proc_iterate_fn_t filterfn,
2679 void *filterarg)
2680 {
2681 proc_t p;
2682
2683 assert(callout != NULL);
2684
2685 proc_shutdown_exitcount = 0;
2686
2687 restart_foreach:
2688
2689 proc_list_lock();
2690
2691 ALLPROC_FOREACH(p) {
2692 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2693 continue;
2694 }
2695 p = proc_ref_locked(p);
2696 if (!p) {
2697 continue;
2698 }
2699
2700 proc_list_unlock();
2701
2702 proc_transwait(p, 0);
2703 (void)callout(p, arg);
2704 proc_rele(p);
2705
2706 goto restart_foreach;
2707 }
2708
2709 proc_list_unlock();
2710 }
2711
2712 void
2713 proc_childrenwalk(
2714 proc_t parent,
2715 proc_iterate_fn_t callout,
2716 void *arg)
2717 {
2718 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2719 u_int pid_count_available = 0;
2720
2721 assert(parent != NULL);
2722 assert(callout != NULL);
2723
2724 for (;;) {
2725 proc_list_lock();
2726 pid_count_available = parent->p_childrencnt;
2727 if (pid_count_available == 0) {
2728 proc_list_unlock();
2729 goto out;
2730 }
2731 if (pidlist_nalloc(pl) > pid_count_available) {
2732 break;
2733 }
2734 proc_list_unlock();
2735
2736 pidlist_alloc(pl, pid_count_available);
2737 }
2738 pidlist_set_active(pl);
2739
2740 u_int pid_count = 0;
2741 proc_t p;
2742 PCHILDREN_FOREACH(parent, p) {
2743 if (p->p_stat == SIDL) {
2744 continue;
2745 }
2746 pidlist_add_pid(pl, proc_pid(p));
2747 if (++pid_count >= pid_count_available) {
2748 break;
2749 }
2750 }
2751
2752 proc_list_unlock();
2753
2754 const pidlist_entry_t *pe;
2755 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2756 for (u_int i = 0; i < pe->pe_nused; i++) {
2757 const pid_t pid = pe->pe_pid[i];
2758 p = proc_find(pid);
2759 if (!p) {
2760 continue;
2761 }
2762 const int callout_ret = callout(p, arg);
2763
2764 switch (callout_ret) {
2765 case PROC_RETURNED_DONE:
2766 proc_rele(p);
2767 /* FALLTHROUGH */
2768 case PROC_CLAIMED_DONE:
2769 goto out;
2770
2771 case PROC_RETURNED:
2772 proc_rele(p);
2773 /* FALLTHROUGH */
2774 case PROC_CLAIMED:
2775 break;
2776 default:
2777 panic("%s: callout =%d for pid %d",
2778 __func__, callout_ret, pid);
2779 break;
2780 }
2781 }
2782 }
2783 out:
2784 pidlist_free(pl);
2785 }
2786
2787 void
2788 pgrp_iterate(
2789 struct pgrp *pgrp,
2790 unsigned int flags,
2791 proc_iterate_fn_t callout,
2792 void * arg,
2793 proc_iterate_fn_t filterfn,
2794 void * filterarg)
2795 {
2796 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2797 u_int pid_count_available = 0;
2798
2799 assert(pgrp != NULL);
2800 assert(callout != NULL);
2801
2802 for (;;) {
2803 pgrp_lock(pgrp);
2804 pid_count_available = pgrp->pg_membercnt;
2805 if (pid_count_available == 0) {
2806 pgrp_unlock(pgrp);
2807 if (flags & PGRP_DROPREF) {
2808 pg_rele(pgrp);
2809 }
2810 goto out;
2811 }
2812 if (pidlist_nalloc(pl) > pid_count_available) {
2813 break;
2814 }
2815 pgrp_unlock(pgrp);
2816
2817 pidlist_alloc(pl, pid_count_available);
2818 }
2819 pidlist_set_active(pl);
2820
2821 const pid_t pgid = pgrp->pg_id;
2822 u_int pid_count = 0;
2823 proc_t p;
2824 PGMEMBERS_FOREACH(pgrp, p) {
2825 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2826 continue;;
2827 }
2828 pidlist_add_pid(pl, proc_pid(p));
2829 if (++pid_count >= pid_count_available) {
2830 break;
2831 }
2832 }
2833
2834 pgrp_unlock(pgrp);
2835
2836 if (flags & PGRP_DROPREF) {
2837 pg_rele(pgrp);
2838 }
2839
2840 const pidlist_entry_t *pe;
2841 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2842 for (u_int i = 0; i < pe->pe_nused; i++) {
2843 const pid_t pid = pe->pe_pid[i];
2844 if (0 == pid) {
2845 continue; /* skip kernproc */
2846 }
2847 p = proc_find(pid);
2848 if (!p) {
2849 continue;
2850 }
2851 if (p->p_pgrpid != pgid) {
2852 proc_rele(p);
2853 continue;
2854 }
2855 const int callout_ret = callout(p, arg);
2856
2857 switch (callout_ret) {
2858 case PROC_RETURNED:
2859 proc_rele(p);
2860 /* FALLTHROUGH */
2861 case PROC_CLAIMED:
2862 break;
2863 case PROC_RETURNED_DONE:
2864 proc_rele(p);
2865 /* FALLTHROUGH */
2866 case PROC_CLAIMED_DONE:
2867 goto out;
2868
2869 default:
2870 panic("%s: callout =%d for pid %d",
2871 __func__, callout_ret, pid);
2872 }
2873 }
2874 }
2875
2876 out:
2877 pidlist_free(pl);
2878 }
2879
2880 static void
2881 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2882 {
2883 proc_list_lock();
2884 child->p_pgrp = pgrp;
2885 child->p_pgrpid = pgrp->pg_id;
2886 child->p_listflag |= P_LIST_INPGRP;
2887 /*
2888 * When pgrp is being freed , a process can still
2889 * request addition using setpgid from bash when
2890 * login is terminated (login cycler) return ESRCH
2891 * Safe to hold lock due to refcount on pgrp
2892 */
2893 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2894 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2895 }
2896
2897 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
2898 panic("pgrp_add : pgrp is dead adding process");
2899 }
2900 proc_list_unlock();
2901
2902 pgrp_lock(pgrp);
2903 pgrp->pg_membercnt++;
2904 if (parent != PROC_NULL) {
2905 LIST_INSERT_AFTER(parent, child, p_pglist);
2906 } else {
2907 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2908 }
2909 pgrp_unlock(pgrp);
2910
2911 proc_list_lock();
2912 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2913 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2914 }
2915 proc_list_unlock();
2916 }
2917
2918 static void
2919 pgrp_remove(struct proc * p)
2920 {
2921 struct pgrp * pg;
2922
2923 pg = proc_pgrp(p);
2924
2925 proc_list_lock();
2926 #if __PROC_INTERNAL_DEBUG
2927 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
2928 panic("removing from pglist but no named ref\n");
2929 }
2930 #endif
2931 p->p_pgrpid = PGRPID_DEAD;
2932 p->p_listflag &= ~P_LIST_INPGRP;
2933 p->p_pgrp = NULL;
2934 proc_list_unlock();
2935
2936 if (pg == PGRP_NULL) {
2937 panic("pgrp_remove: pg is NULL");
2938 }
2939 pgrp_lock(pg);
2940 pg->pg_membercnt--;
2941
2942 if (pg->pg_membercnt < 0) {
2943 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", pg, p);
2944 }
2945
2946 LIST_REMOVE(p, p_pglist);
2947 if (pg->pg_members.lh_first == 0) {
2948 pgrp_unlock(pg);
2949 pgdelete_dropref(pg);
2950 } else {
2951 pgrp_unlock(pg);
2952 pg_rele(pg);
2953 }
2954 }
2955
2956
2957 /* cannot use proc_pgrp as it maybe stalled */
2958 static void
2959 pgrp_replace(struct proc * p, struct pgrp * newpg)
2960 {
2961 struct pgrp * oldpg;
2962
2963
2964
2965 proc_list_lock();
2966
2967 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2968 p->p_listflag |= P_LIST_PGRPTRWAIT;
2969 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2970 }
2971
2972 p->p_listflag |= P_LIST_PGRPTRANS;
2973
2974 oldpg = p->p_pgrp;
2975 if (oldpg == PGRP_NULL) {
2976 panic("pgrp_replace: oldpg NULL");
2977 }
2978 oldpg->pg_refcount++;
2979 #if __PROC_INTERNAL_DEBUG
2980 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
2981 panic("removing from pglist but no named ref\n");
2982 }
2983 #endif
2984 p->p_pgrpid = PGRPID_DEAD;
2985 p->p_listflag &= ~P_LIST_INPGRP;
2986 p->p_pgrp = NULL;
2987
2988 proc_list_unlock();
2989
2990 pgrp_lock(oldpg);
2991 oldpg->pg_membercnt--;
2992 if (oldpg->pg_membercnt < 0) {
2993 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", oldpg, p);
2994 }
2995 LIST_REMOVE(p, p_pglist);
2996 if (oldpg->pg_members.lh_first == 0) {
2997 pgrp_unlock(oldpg);
2998 pgdelete_dropref(oldpg);
2999 } else {
3000 pgrp_unlock(oldpg);
3001 pg_rele(oldpg);
3002 }
3003
3004 proc_list_lock();
3005 p->p_pgrp = newpg;
3006 p->p_pgrpid = newpg->pg_id;
3007 p->p_listflag |= P_LIST_INPGRP;
3008 /*
3009 * When pgrp is being freed , a process can still
3010 * request addition using setpgid from bash when
3011 * login is terminated (login cycler) return ESRCH
3012 * Safe to hold lock due to refcount on pgrp
3013 */
3014 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
3015 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3016 }
3017
3018 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
3019 panic("pgrp_add : pgrp is dead adding process");
3020 }
3021 proc_list_unlock();
3022
3023 pgrp_lock(newpg);
3024 newpg->pg_membercnt++;
3025 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
3026 pgrp_unlock(newpg);
3027
3028 proc_list_lock();
3029 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
3030 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
3031 }
3032
3033 p->p_listflag &= ~P_LIST_PGRPTRANS;
3034 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
3035 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
3036 wakeup(&p->p_pgrpid);
3037 }
3038 proc_list_unlock();
3039 }
3040
3041 void
3042 pgrp_lock(struct pgrp * pgrp)
3043 {
3044 lck_mtx_lock(&pgrp->pg_mlock);
3045 }
3046
3047 void
3048 pgrp_unlock(struct pgrp * pgrp)
3049 {
3050 lck_mtx_unlock(&pgrp->pg_mlock);
3051 }
3052
3053 void
3054 session_lock(struct session * sess)
3055 {
3056 lck_mtx_lock(&sess->s_mlock);
3057 }
3058
3059
3060 void
3061 session_unlock(struct session * sess)
3062 {
3063 lck_mtx_unlock(&sess->s_mlock);
3064 }
3065
3066 struct pgrp *
3067 proc_pgrp(proc_t p)
3068 {
3069 struct pgrp * pgrp;
3070
3071 if (p == PROC_NULL) {
3072 return PGRP_NULL;
3073 }
3074 proc_list_lock();
3075
3076 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3077 p->p_listflag |= P_LIST_PGRPTRWAIT;
3078 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3079 }
3080
3081 pgrp = p->p_pgrp;
3082
3083 assert(pgrp != NULL);
3084
3085 if (pgrp != PGRP_NULL) {
3086 pgrp->pg_refcount++;
3087 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0) {
3088 panic("proc_pgrp: ref being povided for dead pgrp");
3089 }
3090 }
3091
3092 proc_list_unlock();
3093
3094 return pgrp;
3095 }
3096
3097 struct pgrp *
3098 tty_pgrp(struct tty * tp)
3099 {
3100 struct pgrp * pg = PGRP_NULL;
3101
3102 proc_list_lock();
3103 pg = tp->t_pgrp;
3104
3105 if (pg != PGRP_NULL) {
3106 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0) {
3107 panic("tty_pgrp: ref being povided for dead pgrp");
3108 }
3109 pg->pg_refcount++;
3110 }
3111 proc_list_unlock();
3112
3113 return pg;
3114 }
3115
3116 struct session *
3117 proc_session(proc_t p)
3118 {
3119 struct session * sess = SESSION_NULL;
3120
3121 if (p == PROC_NULL) {
3122 return SESSION_NULL;
3123 }
3124
3125 proc_list_lock();
3126
3127 /* wait during transitions */
3128 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3129 p->p_listflag |= P_LIST_PGRPTRWAIT;
3130 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3131 }
3132
3133 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
3134 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3135 panic("proc_session:returning sesssion ref on terminating session");
3136 }
3137 sess->s_count++;
3138 }
3139 proc_list_unlock();
3140 return sess;
3141 }
3142
3143 void
3144 session_rele(struct session *sess)
3145 {
3146 proc_list_lock();
3147 if (--sess->s_count == 0) {
3148 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3149 panic("session_rele: terminating already terminated session");
3150 }
3151 sess->s_listflags |= S_LIST_TERM;
3152 LIST_REMOVE(sess, s_hash);
3153 sess->s_listflags |= S_LIST_DEAD;
3154 if (sess->s_count != 0) {
3155 panic("session_rele: freeing session in use");
3156 }
3157 proc_list_unlock();
3158 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
3159 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
3160 } else {
3161 proc_list_unlock();
3162 }
3163 }
3164
3165 int
3166 proc_transstart(proc_t p, int locked, int non_blocking)
3167 {
3168 if (locked == 0) {
3169 proc_lock(p);
3170 }
3171 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3172 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
3173 if (locked == 0) {
3174 proc_unlock(p);
3175 }
3176 return EDEADLK;
3177 }
3178 p->p_lflag |= P_LTRANSWAIT;
3179 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3180 }
3181 p->p_lflag |= P_LINTRANSIT;
3182 p->p_transholder = current_thread();
3183 if (locked == 0) {
3184 proc_unlock(p);
3185 }
3186 return 0;
3187 }
3188
3189 void
3190 proc_transcommit(proc_t p, int locked)
3191 {
3192 if (locked == 0) {
3193 proc_lock(p);
3194 }
3195
3196 assert((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
3197 assert(p->p_transholder == current_thread());
3198 p->p_lflag |= P_LTRANSCOMMIT;
3199
3200 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3201 p->p_lflag &= ~P_LTRANSWAIT;
3202 wakeup(&p->p_lflag);
3203 }
3204 if (locked == 0) {
3205 proc_unlock(p);
3206 }
3207 }
3208
3209 void
3210 proc_transend(proc_t p, int locked)
3211 {
3212 if (locked == 0) {
3213 proc_lock(p);
3214 }
3215
3216 p->p_lflag &= ~(P_LINTRANSIT | P_LTRANSCOMMIT);
3217 p->p_transholder = NULL;
3218
3219 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3220 p->p_lflag &= ~P_LTRANSWAIT;
3221 wakeup(&p->p_lflag);
3222 }
3223 if (locked == 0) {
3224 proc_unlock(p);
3225 }
3226 }
3227
3228 int
3229 proc_transwait(proc_t p, int locked)
3230 {
3231 if (locked == 0) {
3232 proc_lock(p);
3233 }
3234 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3235 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
3236 if (locked == 0) {
3237 proc_unlock(p);
3238 }
3239 return EDEADLK;
3240 }
3241 p->p_lflag |= P_LTRANSWAIT;
3242 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3243 }
3244 if (locked == 0) {
3245 proc_unlock(p);
3246 }
3247 return 0;
3248 }
3249
3250 void
3251 proc_klist_lock(void)
3252 {
3253 lck_mtx_lock(proc_klist_mlock);
3254 }
3255
3256 void
3257 proc_klist_unlock(void)
3258 {
3259 lck_mtx_unlock(proc_klist_mlock);
3260 }
3261
3262 void
3263 proc_knote(struct proc * p, long hint)
3264 {
3265 proc_klist_lock();
3266 KNOTE(&p->p_klist, hint);
3267 proc_klist_unlock();
3268 }
3269
3270 void
3271 proc_knote_drain(struct proc *p)
3272 {
3273 struct knote *kn = NULL;
3274
3275 /*
3276 * Clear the proc's klist to avoid references after the proc is reaped.
3277 */
3278 proc_klist_lock();
3279 while ((kn = SLIST_FIRST(&p->p_klist))) {
3280 kn->kn_proc = PROC_NULL;
3281 KNOTE_DETACH(&p->p_klist, kn);
3282 }
3283 proc_klist_unlock();
3284 }
3285
3286 void
3287 proc_setregister(proc_t p)
3288 {
3289 proc_lock(p);
3290 p->p_lflag |= P_LREGISTER;
3291 proc_unlock(p);
3292 }
3293
3294 void
3295 proc_resetregister(proc_t p)
3296 {
3297 proc_lock(p);
3298 p->p_lflag &= ~P_LREGISTER;
3299 proc_unlock(p);
3300 }
3301
3302 pid_t
3303 proc_pgrpid(proc_t p)
3304 {
3305 return p->p_pgrpid;
3306 }
3307
3308 pid_t
3309 proc_sessionid(proc_t p)
3310 {
3311 pid_t sid = -1;
3312 struct session * sessp = proc_session(p);
3313
3314 if (sessp != SESSION_NULL) {
3315 sid = sessp->s_sid;
3316 session_rele(sessp);
3317 }
3318
3319 return sid;
3320 }
3321
3322 pid_t
3323 proc_selfpgrpid()
3324 {
3325 return current_proc()->p_pgrpid;
3326 }
3327
3328
3329 /* return control and action states */
3330 int
3331 proc_getpcontrol(int pid, int * pcontrolp)
3332 {
3333 proc_t p;
3334
3335 p = proc_find(pid);
3336 if (p == PROC_NULL) {
3337 return ESRCH;
3338 }
3339 if (pcontrolp != NULL) {
3340 *pcontrolp = p->p_pcaction;
3341 }
3342
3343 proc_rele(p);
3344 return 0;
3345 }
3346
3347 int
3348 proc_dopcontrol(proc_t p)
3349 {
3350 int pcontrol;
3351 os_reason_t kill_reason;
3352
3353 proc_lock(p);
3354
3355 pcontrol = PROC_CONTROL_STATE(p);
3356
3357 if (PROC_ACTION_STATE(p) == 0) {
3358 switch (pcontrol) {
3359 case P_PCTHROTTLE:
3360 PROC_SETACTION_STATE(p);
3361 proc_unlock(p);
3362 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3363 break;
3364
3365 case P_PCSUSP:
3366 PROC_SETACTION_STATE(p);
3367 proc_unlock(p);
3368 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3369 task_suspend(p->task);
3370 break;
3371
3372 case P_PCKILL:
3373 PROC_SETACTION_STATE(p);
3374 proc_unlock(p);
3375 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3376 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3377 psignal_with_reason(p, SIGKILL, kill_reason);
3378 break;
3379
3380 default:
3381 proc_unlock(p);
3382 }
3383 } else {
3384 proc_unlock(p);
3385 }
3386
3387 return PROC_RETURNED;
3388 }
3389
3390
3391 /*
3392 * Resume a throttled or suspended process. This is an internal interface that's only
3393 * used by the user level code that presents the GUI when we run out of swap space and
3394 * hence is restricted to processes with superuser privileges.
3395 */
3396
3397 int
3398 proc_resetpcontrol(int pid)
3399 {
3400 proc_t p;
3401 int pcontrol;
3402 int error;
3403 proc_t self = current_proc();
3404
3405 /* if the process has been validated to handle resource control or root is valid one */
3406 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0))) {
3407 return error;
3408 }
3409
3410 p = proc_find(pid);
3411 if (p == PROC_NULL) {
3412 return ESRCH;
3413 }
3414
3415 proc_lock(p);
3416
3417 pcontrol = PROC_CONTROL_STATE(p);
3418
3419 if (PROC_ACTION_STATE(p) != 0) {
3420 switch (pcontrol) {
3421 case P_PCTHROTTLE:
3422 PROC_RESETACTION_STATE(p);
3423 proc_unlock(p);
3424 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3425 break;
3426
3427 case P_PCSUSP:
3428 PROC_RESETACTION_STATE(p);
3429 proc_unlock(p);
3430 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3431 task_resume(p->task);
3432 break;
3433
3434 case P_PCKILL:
3435 /* Huh? */
3436 PROC_SETACTION_STATE(p);
3437 proc_unlock(p);
3438 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3439 break;
3440
3441 default:
3442 proc_unlock(p);
3443 }
3444 } else {
3445 proc_unlock(p);
3446 }
3447
3448 proc_rele(p);
3449 return 0;
3450 }
3451
3452
3453
3454 struct no_paging_space {
3455 uint64_t pcs_max_size;
3456 uint64_t pcs_uniqueid;
3457 int pcs_pid;
3458 int pcs_proc_count;
3459 uint64_t pcs_total_size;
3460
3461 uint64_t npcs_max_size;
3462 uint64_t npcs_uniqueid;
3463 int npcs_pid;
3464 int npcs_proc_count;
3465 uint64_t npcs_total_size;
3466
3467 int apcs_proc_count;
3468 uint64_t apcs_total_size;
3469 };
3470
3471
3472 static int
3473 proc_pcontrol_filter(proc_t p, void *arg)
3474 {
3475 struct no_paging_space *nps;
3476 uint64_t compressed;
3477
3478 nps = (struct no_paging_space *)arg;
3479
3480 compressed = get_task_compressed(p->task);
3481
3482 if (PROC_CONTROL_STATE(p)) {
3483 if (PROC_ACTION_STATE(p) == 0) {
3484 if (compressed > nps->pcs_max_size) {
3485 nps->pcs_pid = p->p_pid;
3486 nps->pcs_uniqueid = p->p_uniqueid;
3487 nps->pcs_max_size = compressed;
3488 }
3489 nps->pcs_total_size += compressed;
3490 nps->pcs_proc_count++;
3491 } else {
3492 nps->apcs_total_size += compressed;
3493 nps->apcs_proc_count++;
3494 }
3495 } else {
3496 if (compressed > nps->npcs_max_size) {
3497 nps->npcs_pid = p->p_pid;
3498 nps->npcs_uniqueid = p->p_uniqueid;
3499 nps->npcs_max_size = compressed;
3500 }
3501 nps->npcs_total_size += compressed;
3502 nps->npcs_proc_count++;
3503 }
3504 return 0;
3505 }
3506
3507
3508 static int
3509 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3510 {
3511 return PROC_RETURNED;
3512 }
3513
3514
3515 /*
3516 * Deal with the low on compressor pool space condition... this function
3517 * gets called when we are approaching the limits of the compressor pool or
3518 * we are unable to create a new swap file.
3519 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3520 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3521 * There are 2 categories of processes to deal with. Those that have an action
3522 * associated with them by the task itself and those that do not. Actionable
3523 * tasks can have one of three categories specified: ones that
3524 * can be killed immediately, ones that should be suspended, and ones that should
3525 * be throttled. Processes that do not have an action associated with them are normally
3526 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3527 * that only by killing them can we hope to put the system back into a usable state.
3528 */
3529
3530 #define NO_PAGING_SPACE_DEBUG 0
3531
3532 extern uint64_t vm_compressor_pages_compressed(void);
3533
3534 struct timeval last_no_space_action = {.tv_sec = 0, .tv_usec = 0};
3535
3536 #if DEVELOPMENT || DEBUG
3537 extern boolean_t kill_on_no_paging_space;
3538 #endif /* DEVELOPMENT || DEBUG */
3539
3540 #define MB_SIZE (1024 * 1024ULL)
3541 boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
3542
3543 extern int32_t max_kill_priority;
3544 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3545
3546 int
3547 no_paging_space_action()
3548 {
3549 proc_t p;
3550 struct no_paging_space nps;
3551 struct timeval now;
3552 os_reason_t kill_reason;
3553
3554 /*
3555 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3556 */
3557 microtime(&now);
3558
3559 if (now.tv_sec <= last_no_space_action.tv_sec + 5) {
3560 return 0;
3561 }
3562
3563 /*
3564 * Examine all processes and find the biggest (biggest is based on the number of pages this
3565 * task has in the compressor pool) that has been marked to have some action
3566 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3567 * action.
3568 *
3569 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3570 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3571 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3572 */
3573 bzero(&nps, sizeof(nps));
3574
3575 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3576
3577 #if NO_PAGING_SPACE_DEBUG
3578 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3579 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3580 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3581 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3582 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3583 nps.apcs_proc_count, nps.apcs_total_size);
3584 #endif
3585 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3586 /*
3587 * for now we'll knock out any task that has more then 50% of the pages
3588 * held by the compressor
3589 */
3590 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3591 if (nps.npcs_uniqueid == p->p_uniqueid) {
3592 /*
3593 * verify this is still the same process
3594 * in case the proc exited and the pid got reused while
3595 * we were finishing the proc_iterate and getting to this point
3596 */
3597 last_no_space_action = now;
3598
3599 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size / MB_SIZE));
3600 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3601 psignal_with_reason(p, SIGKILL, kill_reason);
3602
3603 proc_rele(p);
3604
3605 return 0;
3606 }
3607
3608 proc_rele(p);
3609 }
3610 }
3611
3612 /*
3613 * We have some processes within our jetsam bands of consideration and hence can be killed.
3614 * So we will invoke the memorystatus thread to go ahead and kill something.
3615 */
3616 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3617 last_no_space_action = now;
3618 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
3619 return 1;
3620 }
3621
3622 /*
3623 * No eligible processes to kill. So let's suspend/kill the largest
3624 * process depending on its policy control specifications.
3625 */
3626
3627 if (nps.pcs_max_size > 0) {
3628 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3629 if (nps.pcs_uniqueid == p->p_uniqueid) {
3630 /*
3631 * verify this is still the same process
3632 * in case the proc exited and the pid got reused while
3633 * we were finishing the proc_iterate and getting to this point
3634 */
3635 last_no_space_action = now;
3636
3637 proc_dopcontrol(p);
3638
3639 proc_rele(p);
3640
3641 return 1;
3642 }
3643
3644 proc_rele(p);
3645 }
3646 }
3647 last_no_space_action = now;
3648
3649 printf("low swap: unable to find any eligible processes to take action on\n");
3650
3651 return 0;
3652 }
3653
3654 int
3655 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3656 {
3657 int ret = 0;
3658 proc_t target_proc = PROC_NULL;
3659 pid_t target_pid = uap->pid;
3660 uint64_t target_uniqueid = uap->uniqueid;
3661 task_t target_task = NULL;
3662
3663 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3664 ret = EPERM;
3665 goto out;
3666 }
3667 target_proc = proc_find(target_pid);
3668 if (target_proc != PROC_NULL) {
3669 if (target_uniqueid != proc_uniqueid(target_proc)) {
3670 ret = ENOENT;
3671 goto out;
3672 }
3673
3674 target_task = proc_task(target_proc);
3675 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3676 ret = EINVAL;
3677 goto out;
3678 }
3679 } else {
3680 ret = ENOENT;
3681 }
3682
3683 out:
3684 if (target_proc != PROC_NULL) {
3685 proc_rele(target_proc);
3686 }
3687 return ret;
3688 }
3689
3690 #if VM_SCAN_FOR_SHADOW_CHAIN
3691 extern int vm_map_shadow_max(vm_map_t map);
3692 int proc_shadow_max(void);
3693 int
3694 proc_shadow_max(void)
3695 {
3696 int retval, max;
3697 proc_t p;
3698 task_t task;
3699 vm_map_t map;
3700
3701 max = 0;
3702 proc_list_lock();
3703 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3704 if (p->p_stat == SIDL) {
3705 continue;
3706 }
3707 task = p->task;
3708 if (task == NULL) {
3709 continue;
3710 }
3711 map = get_task_map(task);
3712 if (map == NULL) {
3713 continue;
3714 }
3715 retval = vm_map_shadow_max(map);
3716 if (retval > max) {
3717 max = retval;
3718 }
3719 }
3720 proc_list_unlock();
3721 return max;
3722 }
3723 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3724
3725 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3726 void
3727 proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3728 {
3729 if (target_proc != NULL) {
3730 target_proc->p_responsible_pid = responsible_pid;
3731 }
3732 return;
3733 }
3734
3735 int
3736 proc_chrooted(proc_t p)
3737 {
3738 int retval = 0;
3739
3740 if (p) {
3741 proc_fdlock(p);
3742 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3743 proc_fdunlock(p);
3744 }
3745
3746 return retval;
3747 }
3748
3749 boolean_t
3750 proc_send_synchronous_EXC_RESOURCE(proc_t p)
3751 {
3752 if (p == PROC_NULL) {
3753 return FALSE;
3754 }
3755
3756 /* Send sync EXC_RESOURCE if the process is traced */
3757 if (ISSET(p->p_lflag, P_LTRACED)) {
3758 return TRUE;
3759 }
3760 return FALSE;
3761 }
3762
3763 size_t
3764 proc_get_syscall_filter_mask_size(int which)
3765 {
3766 if (which == SYSCALL_MASK_UNIX) {
3767 return nsysent;
3768 }
3769
3770 return 0;
3771 }
3772
3773 int
3774 proc_set_syscall_filter_mask(proc_t p, int which, unsigned char *maskptr, size_t masklen)
3775 {
3776 #if DEVELOPMENT || DEBUG
3777 if (syscallfilter_disable) {
3778 printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p));
3779 return KERN_SUCCESS;
3780 }
3781 #endif // DEVELOPMENT || DEBUG
3782
3783 if (which != SYSCALL_MASK_UNIX ||
3784 (maskptr != NULL && masklen != nsysent)) {
3785 return EINVAL;
3786 }
3787
3788 p->syscall_filter_mask = maskptr;
3789
3790 return KERN_SUCCESS;
3791 }
3792
3793 #ifdef CONFIG_32BIT_TELEMETRY
3794 void
3795 proc_log_32bit_telemetry(proc_t p)
3796 {
3797 /* Gather info */
3798 char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
3799 char * signature_cur_end = &signature_buf[0];
3800 char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
3801 int bytes_printed = 0;
3802
3803 const char * teamid = NULL;
3804 const char * identity = NULL;
3805 struct cs_blob * csblob = NULL;
3806
3807 proc_list_lock();
3808
3809 /*
3810 * Get proc name and parent proc name; if the parent execs, we'll get a
3811 * garbled name.
3812 */
3813 bytes_printed = scnprintf(signature_cur_end,
3814 signature_buf_end - signature_cur_end,
3815 "%s,%s,", p->p_name,
3816 (p->p_pptr ? p->p_pptr->p_name : ""));
3817
3818 if (bytes_printed > 0) {
3819 signature_cur_end += bytes_printed;
3820 }
3821
3822 proc_list_unlock();
3823
3824 /* Get developer info. */
3825 vnode_t v = proc_getexecutablevnode(p);
3826
3827 if (v) {
3828 csblob = csvnode_get_blob(v, 0);
3829
3830 if (csblob) {
3831 teamid = csblob_get_teamid(csblob);
3832 identity = csblob_get_identity(csblob);
3833 }
3834 }
3835
3836 if (teamid == NULL) {
3837 teamid = "";
3838 }
3839
3840 if (identity == NULL) {
3841 identity = "";
3842 }
3843
3844 bytes_printed = scnprintf(signature_cur_end,
3845 signature_buf_end - signature_cur_end,
3846 "%s,%s", teamid, identity);
3847
3848 if (bytes_printed > 0) {
3849 signature_cur_end += bytes_printed;
3850 }
3851
3852 if (v) {
3853 vnode_put(v);
3854 }
3855
3856 /*
3857 * We may want to rate limit here, although the SUMMARIZE key should
3858 * help us aggregate events in userspace.
3859 */
3860
3861 /* Emit log */
3862 kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
3863 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
3864 /* 1 */ "com.apple.message.signature", signature_buf,
3865 /* 2 */ "com.apple.message.summarize", "YES",
3866 NULL);
3867 }
3868 #endif /* CONFIG_32BIT_TELEMETRY */