]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_proc.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_proc.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69 /* HISTORY
70 * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com)
71 * Added current_proc_EXTERNAL() function for the use of kernel
72 * lodable modules.
73 *
74 * 05-Jun-95 Mac Gillon (mgillon) at NeXT
75 * New version based on 3.3NS and 4.4
76 */
77
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc_internal.h>
83 #include <sys/acct.h>
84 #include <sys/wait.h>
85 #include <sys/file_internal.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/lock.h>
89 #include <sys/mbuf.h>
90 #include <sys/ioctl.h>
91 #include <sys/tty.h>
92 #include <sys/signalvar.h>
93 #include <sys/syslog.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/kauth.h>
97 #include <sys/codesign.h>
98 #include <sys/kernel_types.h>
99 #include <sys/ubc.h>
100 #include <kern/kalloc.h>
101 #include <kern/task.h>
102 #include <kern/coalition.h>
103 #include <sys/coalition.h>
104 #include <kern/assert.h>
105 #include <vm/vm_protos.h>
106 #include <vm/vm_map.h> /* vm_map_switch_protect() */
107 #include <vm/vm_pageout.h>
108 #include <mach/task.h>
109 #include <mach/message.h>
110 #include <sys/priv.h>
111 #include <sys/proc_info.h>
112 #include <sys/bsdtask_info.h>
113 #include <sys/persona.h>
114 #include <sys/sysent.h>
115 #include <sys/reason.h>
116
117 #ifdef CONFIG_32BIT_TELEMETRY
118 #include <sys/kasl.h>
119 #endif /* CONFIG_32BIT_TELEMETRY */
120
121 #if CONFIG_CSR
122 #include <sys/csr.h>
123 #endif
124
125 #if CONFIG_MEMORYSTATUS
126 #include <sys/kern_memorystatus.h>
127 #endif
128
129 #if CONFIG_MACF
130 #include <security/mac_framework.h>
131 #endif
132
133 #include <libkern/crypto/sha1.h>
134
135 #ifdef CONFIG_32BIT_TELEMETRY
136 #define MAX_32BIT_EXEC_SIG_SIZE 160
137 #endif /* CONFIG_32BIT_TELEMETRY */
138
139 /*
140 * Structure associated with user cacheing.
141 */
142 struct uidinfo {
143 LIST_ENTRY(uidinfo) ui_hash;
144 uid_t ui_uid;
145 long ui_proccnt;
146 };
147 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
148 LIST_HEAD(uihashhead, uidinfo) * uihashtbl;
149 u_long uihash; /* size of hash table - 1 */
150
151 /*
152 * Other process lists
153 */
154 struct pidhashhead *pidhashtbl;
155 u_long pidhash;
156 struct pgrphashhead *pgrphashtbl;
157 u_long pgrphash;
158 struct sesshashhead *sesshashtbl;
159 u_long sesshash;
160
161 struct proclist allproc;
162 struct proclist zombproc;
163 extern struct tty cons;
164
165 extern int cs_debug;
166
167 #if DEVELOPMENT || DEBUG
168 int syscallfilter_disable = 0;
169 #endif // DEVELOPMENT || DEBUG
170
171 #if DEBUG
172 #define __PROC_INTERNAL_DEBUG 1
173 #endif
174 #if CONFIG_COREDUMP
175 /* Name to give to core files */
176 #if defined(XNU_TARGET_OS_BRIDGE)
177 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/internal/%N.core"};
178 #elif CONFIG_EMBEDDED
179 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/cores/%N.core"};
180 #else
181 __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/cores/core.%P"};
182 #endif
183 #endif
184
185 #if PROC_REF_DEBUG
186 #include <kern/backtrace.h>
187 #endif
188
189 typedef uint64_t unaligned_u64 __attribute__((aligned(1)));
190
191 static void orphanpg(struct pgrp * pg);
192 void proc_name_kdp(task_t t, char * buf, int size);
193 boolean_t proc_binary_uuid_kdp(task_t task, uuid_t uuid);
194 int proc_threadname_kdp(void * uth, char * buf, size_t size);
195 void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime);
196 char * proc_name_address(void * p);
197
198 static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
199 static void pgrp_remove(proc_t p);
200 static void pgrp_replace(proc_t p, struct pgrp *pgrp);
201 static void pgdelete_dropref(struct pgrp *pgrp);
202 extern void pg_rele_dropref(struct pgrp * pgrp);
203 static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken);
204 static boolean_t proc_parent_is_currentproc(proc_t p);
205
206 struct fixjob_iterargs {
207 struct pgrp * pg;
208 struct session * mysession;
209 int entering;
210 };
211
212 int fixjob_callback(proc_t, void *);
213
214 uint64_t
215 get_current_unique_pid(void)
216 {
217 proc_t p = current_proc();
218
219 if (p) {
220 return p->p_uniqueid;
221 } else {
222 return 0;
223 }
224 }
225
226 /*
227 * Initialize global process hashing structures.
228 */
229 void
230 procinit(void)
231 {
232 LIST_INIT(&allproc);
233 LIST_INIT(&zombproc);
234 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
235 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
236 sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash);
237 uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
238 #if CONFIG_PERSONAS
239 personas_bootstrap();
240 #endif
241 }
242
243 /*
244 * Change the count associated with number of processes
245 * a given user is using. This routine protects the uihash
246 * with the list lock
247 */
248 int
249 chgproccnt(uid_t uid, int diff)
250 {
251 struct uidinfo *uip;
252 struct uidinfo *newuip = NULL;
253 struct uihashhead *uipp;
254 int retval;
255
256 again:
257 proc_list_lock();
258 uipp = UIHASH(uid);
259 for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) {
260 if (uip->ui_uid == uid) {
261 break;
262 }
263 }
264 if (uip) {
265 uip->ui_proccnt += diff;
266 if (uip->ui_proccnt > 0) {
267 retval = uip->ui_proccnt;
268 proc_list_unlock();
269 goto out;
270 }
271 if (uip->ui_proccnt < 0) {
272 panic("chgproccnt: procs < 0");
273 }
274 LIST_REMOVE(uip, ui_hash);
275 retval = 0;
276 proc_list_unlock();
277 FREE_ZONE(uip, sizeof(*uip), M_PROC);
278 goto out;
279 }
280 if (diff <= 0) {
281 if (diff == 0) {
282 retval = 0;
283 proc_list_unlock();
284 goto out;
285 }
286 panic("chgproccnt: lost user");
287 }
288 if (newuip != NULL) {
289 uip = newuip;
290 newuip = NULL;
291 LIST_INSERT_HEAD(uipp, uip, ui_hash);
292 uip->ui_uid = uid;
293 uip->ui_proccnt = diff;
294 retval = diff;
295 proc_list_unlock();
296 goto out;
297 }
298 proc_list_unlock();
299 MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
300 if (newuip == NULL) {
301 panic("chgproccnt: M_PROC zone depleted");
302 }
303 goto again;
304 out:
305 if (newuip != NULL) {
306 FREE_ZONE(newuip, sizeof(*uip), M_PROC);
307 }
308 return retval;
309 }
310
311 /*
312 * Is p an inferior of the current process?
313 */
314 int
315 inferior(proc_t p)
316 {
317 int retval = 0;
318
319 proc_list_lock();
320 for (; p != current_proc(); p = p->p_pptr) {
321 if (p->p_pid == 0) {
322 goto out;
323 }
324 }
325 retval = 1;
326 out:
327 proc_list_unlock();
328 return retval;
329 }
330
331 /*
332 * Is p an inferior of t ?
333 */
334 int
335 isinferior(proc_t p, proc_t t)
336 {
337 int retval = 0;
338 int nchecked = 0;
339 proc_t start = p;
340
341 /* if p==t they are not inferior */
342 if (p == t) {
343 return 0;
344 }
345
346 proc_list_lock();
347 for (; p != t; p = p->p_pptr) {
348 nchecked++;
349
350 /* Detect here if we're in a cycle */
351 if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs)) {
352 goto out;
353 }
354 }
355 retval = 1;
356 out:
357 proc_list_unlock();
358 return retval;
359 }
360
361 int
362 proc_isinferior(int pid1, int pid2)
363 {
364 proc_t p = PROC_NULL;
365 proc_t t = PROC_NULL;
366 int retval = 0;
367
368 if (((p = proc_find(pid1)) != (proc_t)0) && ((t = proc_find(pid2)) != (proc_t)0)) {
369 retval = isinferior(p, t);
370 }
371
372 if (p != PROC_NULL) {
373 proc_rele(p);
374 }
375 if (t != PROC_NULL) {
376 proc_rele(t);
377 }
378
379 return retval;
380 }
381
382 proc_t
383 proc_find(int pid)
384 {
385 return proc_findinternal(pid, 0);
386 }
387
388 proc_t
389 proc_findinternal(int pid, int locked)
390 {
391 proc_t p = PROC_NULL;
392
393 if (locked == 0) {
394 proc_list_lock();
395 }
396
397 p = pfind_locked(pid);
398 if ((p == PROC_NULL) || (p != proc_ref_locked(p))) {
399 p = PROC_NULL;
400 }
401
402 if (locked == 0) {
403 proc_list_unlock();
404 }
405
406 return p;
407 }
408
409 proc_t
410 proc_findthread(thread_t thread)
411 {
412 proc_t p = PROC_NULL;
413 struct uthread *uth;
414
415 proc_list_lock();
416 uth = get_bsdthread_info(thread);
417 if (uth && (uth->uu_flag & UT_VFORK)) {
418 p = uth->uu_proc;
419 } else {
420 p = (proc_t)(get_bsdthreadtask_info(thread));
421 }
422 p = proc_ref_locked(p);
423 proc_list_unlock();
424 return p;
425 }
426
427 void
428 uthread_reset_proc_refcount(void *uthread)
429 {
430 uthread_t uth;
431
432 uth = (uthread_t) uthread;
433 uth->uu_proc_refcount = 0;
434
435 #if PROC_REF_DEBUG
436 if (proc_ref_tracking_disabled) {
437 return;
438 }
439
440 uth->uu_pindex = 0;
441 #endif
442 }
443
444 #if PROC_REF_DEBUG
445 int
446 uthread_get_proc_refcount(void *uthread)
447 {
448 uthread_t uth;
449
450 if (proc_ref_tracking_disabled) {
451 return 0;
452 }
453
454 uth = (uthread_t) uthread;
455
456 return uth->uu_proc_refcount;
457 }
458 #endif
459
460 static void
461 record_procref(proc_t p __unused, int count)
462 {
463 uthread_t uth;
464
465 uth = current_uthread();
466 uth->uu_proc_refcount += count;
467
468 #if PROC_REF_DEBUG
469 if (proc_ref_tracking_disabled) {
470 return;
471 }
472
473 if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
474 backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex],
475 PROC_REF_STACK_DEPTH, NULL);
476
477 uth->uu_proc_ps[uth->uu_pindex] = p;
478 uth->uu_pindex++;
479 }
480 #endif
481 }
482
483 static boolean_t
484 uthread_needs_to_wait_in_proc_refwait(void)
485 {
486 uthread_t uth = current_uthread();
487
488 /*
489 * Allow threads holding no proc refs to wait
490 * in proc_refwait, allowing threads holding
491 * proc refs to wait in proc_refwait causes
492 * deadlocks and makes proc_find non-reentrant.
493 */
494 if (uth->uu_proc_refcount == 0) {
495 return TRUE;
496 }
497
498 return FALSE;
499 }
500
501 int
502 proc_rele(proc_t p)
503 {
504 proc_list_lock();
505 proc_rele_locked(p);
506 proc_list_unlock();
507
508 return 0;
509 }
510
511 proc_t
512 proc_self(void)
513 {
514 struct proc * p;
515
516 p = current_proc();
517
518 proc_list_lock();
519 if (p != proc_ref_locked(p)) {
520 p = PROC_NULL;
521 }
522 proc_list_unlock();
523 return p;
524 }
525
526
527 proc_t
528 proc_ref_locked(proc_t p)
529 {
530 proc_t p1 = p;
531 int pid = proc_pid(p);
532
533 retry:
534 /*
535 * if process still in creation or proc got recycled
536 * during msleep then return failure.
537 */
538 if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0)) {
539 return PROC_NULL;
540 }
541
542 /*
543 * Do not return process marked for termination
544 * or proc_refdrain called without ref wait.
545 * Wait for proc_refdrain_with_refwait to complete if
546 * process in refdrain and refwait flag is set, unless
547 * the current thread is holding to a proc_ref
548 * for any proc.
549 */
550 if ((p->p_stat != SZOMB) &&
551 ((p->p_listflag & P_LIST_EXITED) == 0) &&
552 ((p->p_listflag & P_LIST_DEAD) == 0) &&
553 (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
554 ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
555 if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
556 msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0);
557 /*
558 * the proc might have been recycled since we dropped
559 * the proc list lock, get the proc again.
560 */
561 p = pfind_locked(pid);
562 goto retry;
563 }
564 p->p_refcount++;
565 record_procref(p, 1);
566 } else {
567 p1 = PROC_NULL;
568 }
569
570 return p1;
571 }
572
573 void
574 proc_rele_locked(proc_t p)
575 {
576 if (p->p_refcount > 0) {
577 p->p_refcount--;
578 record_procref(p, -1);
579 if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
580 p->p_listflag &= ~P_LIST_DRAINWAIT;
581 wakeup(&p->p_refcount);
582 }
583 } else {
584 panic("proc_rele_locked -ve ref\n");
585 }
586 }
587
588 proc_t
589 proc_find_zombref(int pid)
590 {
591 proc_t p;
592
593 proc_list_lock();
594
595 again:
596 p = pfind_locked(pid);
597
598 /* should we bail? */
599 if ((p == PROC_NULL) /* not found */
600 || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */
601 || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */
602 proc_list_unlock();
603 return PROC_NULL;
604 }
605
606 /* If someone else is controlling the (unreaped) zombie - wait */
607 if ((p->p_listflag & P_LIST_WAITING) != 0) {
608 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
609 goto again;
610 }
611 p->p_listflag |= P_LIST_WAITING;
612
613 proc_list_unlock();
614
615 return p;
616 }
617
618 void
619 proc_drop_zombref(proc_t p)
620 {
621 proc_list_lock();
622 if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
623 p->p_listflag &= ~P_LIST_WAITING;
624 wakeup(&p->p_stat);
625 }
626 proc_list_unlock();
627 }
628
629
630 void
631 proc_refdrain(proc_t p)
632 {
633 proc_refdrain_with_refwait(p, FALSE);
634 }
635
636 proc_t
637 proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
638 {
639 boolean_t initexec = FALSE;
640 proc_list_lock();
641
642 p->p_listflag |= P_LIST_DRAIN;
643 if (get_ref_and_allow_wait) {
644 /*
645 * All the calls to proc_ref_locked will wait
646 * for the flag to get cleared before returning a ref,
647 * unless the current thread is holding to a proc ref
648 * for any proc.
649 */
650 p->p_listflag |= P_LIST_REFWAIT;
651 if (p == initproc) {
652 initexec = TRUE;
653 }
654 }
655
656 /* Do not wait in ref drain for launchd exec */
657 while (p->p_refcount && !initexec) {
658 p->p_listflag |= P_LIST_DRAINWAIT;
659 msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0);
660 }
661
662 p->p_listflag &= ~P_LIST_DRAIN;
663 if (!get_ref_and_allow_wait) {
664 p->p_listflag |= P_LIST_DEAD;
665 } else {
666 /* Return a ref to the caller */
667 p->p_refcount++;
668 record_procref(p, 1);
669 }
670
671 proc_list_unlock();
672
673 if (get_ref_and_allow_wait) {
674 return p;
675 }
676 return NULL;
677 }
678
679 void
680 proc_refwake(proc_t p)
681 {
682 proc_list_lock();
683 p->p_listflag &= ~P_LIST_REFWAIT;
684 wakeup(&p->p_listflag);
685 proc_list_unlock();
686 }
687
688 proc_t
689 proc_parentholdref(proc_t p)
690 {
691 proc_t parent = PROC_NULL;
692 proc_t pp;
693 int loopcnt = 0;
694
695
696 proc_list_lock();
697 loop:
698 pp = p->p_pptr;
699 if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) {
700 parent = PROC_NULL;
701 goto out;
702 }
703
704 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) {
705 pp->p_listflag |= P_LIST_CHILDDRWAIT;
706 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
707 loopcnt++;
708 if (loopcnt == 5) {
709 parent = PROC_NULL;
710 goto out;
711 }
712 goto loop;
713 }
714
715 if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) {
716 pp->p_parentref++;
717 parent = pp;
718 goto out;
719 }
720
721 out:
722 proc_list_unlock();
723 return parent;
724 }
725 int
726 proc_parentdropref(proc_t p, int listlocked)
727 {
728 if (listlocked == 0) {
729 proc_list_lock();
730 }
731
732 if (p->p_parentref > 0) {
733 p->p_parentref--;
734 if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) {
735 p->p_listflag &= ~P_LIST_PARENTREFWAIT;
736 wakeup(&p->p_parentref);
737 }
738 } else {
739 panic("proc_parentdropref -ve ref\n");
740 }
741 if (listlocked == 0) {
742 proc_list_unlock();
743 }
744
745 return 0;
746 }
747
748 void
749 proc_childdrainstart(proc_t p)
750 {
751 #if __PROC_INTERNAL_DEBUG
752 if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART) {
753 panic("proc_childdrainstart: childdrain already started\n");
754 }
755 #endif
756 p->p_listflag |= P_LIST_CHILDDRSTART;
757 /* wait for all that hold parentrefs to drop */
758 while (p->p_parentref > 0) {
759 p->p_listflag |= P_LIST_PARENTREFWAIT;
760 msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0);
761 }
762 }
763
764
765 void
766 proc_childdrainend(proc_t p)
767 {
768 #if __PROC_INTERNAL_DEBUG
769 if (p->p_childrencnt > 0) {
770 panic("exiting: children stil hanging around\n");
771 }
772 #endif
773 p->p_listflag |= P_LIST_CHILDDRAINED;
774 if ((p->p_listflag & (P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT)) != 0) {
775 p->p_listflag &= ~(P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT);
776 wakeup(&p->p_childrencnt);
777 }
778 }
779
780 void
781 proc_checkdeadrefs(__unused proc_t p)
782 {
783 #if __PROC_INTERNAL_DEBUG
784 if ((p->p_listflag & P_LIST_INHASH) != 0) {
785 panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
786 }
787 if (p->p_childrencnt != 0) {
788 panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
789 }
790 if (p->p_refcount != 0) {
791 panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
792 }
793 if (p->p_parentref != 0) {
794 panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
795 }
796 #endif
797 }
798
799 int
800 proc_pid(proc_t p)
801 {
802 if (p != NULL) {
803 return p->p_pid;
804 }
805 return -1;
806 }
807
808 int
809 proc_ppid(proc_t p)
810 {
811 if (p != NULL) {
812 return p->p_ppid;
813 }
814 return -1;
815 }
816
817 int
818 proc_original_ppid(proc_t p)
819 {
820 if (p != NULL) {
821 return p->p_original_ppid;
822 }
823 return -1;
824 }
825
826 int
827 proc_selfpid(void)
828 {
829 return current_proc()->p_pid;
830 }
831
832 int
833 proc_selfppid(void)
834 {
835 return current_proc()->p_ppid;
836 }
837
838 int
839 proc_selfcsflags(void)
840 {
841 return current_proc()->p_csflags;
842 }
843
844 uint32_t
845 proc_platform(proc_t p)
846 {
847 if (p != NULL) {
848 return p->p_platform;
849 }
850 return (uint32_t)-1;
851 }
852
853 uint32_t
854 proc_sdk(proc_t p)
855 {
856 if (p != NULL) {
857 return p->p_sdk;
858 }
859 return (uint32_t)-1;
860 }
861
862 #if CONFIG_DTRACE
863 static proc_t
864 dtrace_current_proc_vforking(void)
865 {
866 thread_t th = current_thread();
867 struct uthread *ut = get_bsdthread_info(th);
868
869 if (ut &&
870 ((ut->uu_flag & (UT_VFORK | UT_VFORKING)) == (UT_VFORK | UT_VFORKING))) {
871 /*
872 * Handle the narrow window where we're in the vfork syscall,
873 * but we're not quite ready to claim (in particular, to DTrace)
874 * that we're running as the child.
875 */
876 return get_bsdtask_info(get_threadtask(th));
877 }
878 return current_proc();
879 }
880
881 int
882 dtrace_proc_selfpid(void)
883 {
884 return dtrace_current_proc_vforking()->p_pid;
885 }
886
887 int
888 dtrace_proc_selfppid(void)
889 {
890 return dtrace_current_proc_vforking()->p_ppid;
891 }
892
893 uid_t
894 dtrace_proc_selfruid(void)
895 {
896 return dtrace_current_proc_vforking()->p_ruid;
897 }
898 #endif /* CONFIG_DTRACE */
899
900 proc_t
901 proc_parent(proc_t p)
902 {
903 proc_t parent;
904 proc_t pp;
905
906 proc_list_lock();
907 loop:
908 pp = p->p_pptr;
909 parent = proc_ref_locked(pp);
910 if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED) == 0)) {
911 pp->p_listflag |= P_LIST_CHILDLKWAIT;
912 msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
913 goto loop;
914 }
915 proc_list_unlock();
916 return parent;
917 }
918
919 static boolean_t
920 proc_parent_is_currentproc(proc_t p)
921 {
922 boolean_t ret = FALSE;
923
924 proc_list_lock();
925 if (p->p_pptr == current_proc()) {
926 ret = TRUE;
927 }
928
929 proc_list_unlock();
930 return ret;
931 }
932
933 void
934 proc_name(int pid, char * buf, int size)
935 {
936 proc_t p;
937
938 if ((p = proc_find(pid)) != PROC_NULL) {
939 strlcpy(buf, &p->p_comm[0], size);
940 proc_rele(p);
941 }
942 }
943
944 void
945 proc_name_kdp(task_t t, char * buf, int size)
946 {
947 proc_t p = get_bsdtask_info(t);
948 if (p == PROC_NULL) {
949 return;
950 }
951
952 if ((size_t)size > sizeof(p->p_comm)) {
953 strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
954 } else {
955 strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
956 }
957 }
958
959 boolean_t
960 proc_binary_uuid_kdp(task_t task, uuid_t uuid)
961 {
962 proc_t p = get_bsdtask_info(task);
963 if (p == PROC_NULL) {
964 return FALSE;
965 }
966
967 proc_getexecutableuuid(p, uuid, sizeof(uuid_t));
968
969 return TRUE;
970 }
971
972 int
973 proc_threadname_kdp(void * uth, char * buf, size_t size)
974 {
975 if (size < MAXTHREADNAMESIZE) {
976 /* this is really just a protective measure for the future in
977 * case the thread name size in stackshot gets out of sync with
978 * the BSD max thread name size. Note that bsd_getthreadname
979 * doesn't take input buffer size into account. */
980 return -1;
981 }
982
983 if (uth != NULL) {
984 bsd_getthreadname(uth, buf);
985 }
986 return 0;
987 }
988
989
990 /* note that this function is generally going to be called from stackshot,
991 * and the arguments will be coming from a struct which is declared packed
992 * thus the input arguments will in general be unaligned. We have to handle
993 * that here. */
994 void
995 proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime)
996 {
997 proc_t pp = (proc_t)p;
998 if (pp != PROC_NULL) {
999 if (tv_sec != NULL) {
1000 *tv_sec = pp->p_start.tv_sec;
1001 }
1002 if (tv_usec != NULL) {
1003 *tv_usec = pp->p_start.tv_usec;
1004 }
1005 if (abstime != NULL) {
1006 if (pp->p_stats != NULL) {
1007 *abstime = pp->p_stats->ps_start;
1008 } else {
1009 *abstime = 0;
1010 }
1011 }
1012 }
1013 }
1014
1015 char *
1016 proc_name_address(void *p)
1017 {
1018 return &((proc_t)p)->p_comm[0];
1019 }
1020
1021 char *
1022 proc_best_name(proc_t p)
1023 {
1024 if (p->p_name[0] != 0) {
1025 return &p->p_name[0];
1026 }
1027 return &p->p_comm[0];
1028 }
1029
1030 void
1031 proc_selfname(char * buf, int size)
1032 {
1033 proc_t p;
1034
1035 if ((p = current_proc()) != (proc_t)0) {
1036 strlcpy(buf, &p->p_comm[0], size);
1037 }
1038 }
1039
1040 void
1041 proc_signal(int pid, int signum)
1042 {
1043 proc_t p;
1044
1045 if ((p = proc_find(pid)) != PROC_NULL) {
1046 psignal(p, signum);
1047 proc_rele(p);
1048 }
1049 }
1050
1051 int
1052 proc_issignal(int pid, sigset_t mask)
1053 {
1054 proc_t p;
1055 int error = 0;
1056
1057 if ((p = proc_find(pid)) != PROC_NULL) {
1058 error = proc_pendingsignals(p, mask);
1059 proc_rele(p);
1060 }
1061
1062 return error;
1063 }
1064
1065 int
1066 proc_noremotehang(proc_t p)
1067 {
1068 int retval = 0;
1069
1070 if (p) {
1071 retval = p->p_flag & P_NOREMOTEHANG;
1072 }
1073 return retval? 1: 0;
1074 }
1075
1076 int
1077 proc_exiting(proc_t p)
1078 {
1079 int retval = 0;
1080
1081 if (p) {
1082 retval = p->p_lflag & P_LEXIT;
1083 }
1084 return retval? 1: 0;
1085 }
1086
1087 int
1088 proc_in_teardown(proc_t p)
1089 {
1090 int retval = 0;
1091
1092 if (p) {
1093 retval = p->p_lflag & P_LPEXIT;
1094 }
1095 return retval? 1: 0;
1096 }
1097
1098 int
1099 proc_forcequota(proc_t p)
1100 {
1101 int retval = 0;
1102
1103 if (p) {
1104 retval = p->p_flag & P_FORCEQUOTA;
1105 }
1106 return retval? 1: 0;
1107 }
1108
1109 int
1110 proc_suser(proc_t p)
1111 {
1112 kauth_cred_t my_cred;
1113 int error;
1114
1115 my_cred = kauth_cred_proc_ref(p);
1116 error = suser(my_cred, &p->p_acflag);
1117 kauth_cred_unref(&my_cred);
1118 return error;
1119 }
1120
1121 task_t
1122 proc_task(proc_t proc)
1123 {
1124 return (task_t)proc->task;
1125 }
1126
1127 /*
1128 * Obtain the first thread in a process
1129 *
1130 * XXX This is a bad thing to do; it exists predominantly to support the
1131 * XXX use of proc_t's in places that should really be using
1132 * XXX thread_t's instead. This maintains historical behaviour, but really
1133 * XXX needs an audit of the context (proxy vs. not) to clean up.
1134 */
1135 thread_t
1136 proc_thread(proc_t proc)
1137 {
1138 uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
1139
1140 if (uth != NULL) {
1141 return uth->uu_context.vc_thread;
1142 }
1143
1144 return NULL;
1145 }
1146
1147 kauth_cred_t
1148 proc_ucred(proc_t p)
1149 {
1150 return p->p_ucred;
1151 }
1152
1153 struct uthread *
1154 current_uthread()
1155 {
1156 thread_t th = current_thread();
1157
1158 return (struct uthread *)get_bsdthread_info(th);
1159 }
1160
1161
1162 int
1163 proc_is64bit(proc_t p)
1164 {
1165 return IS_64BIT_PROCESS(p);
1166 }
1167
1168 int
1169 proc_is64bit_data(proc_t p)
1170 {
1171 assert(p->task);
1172 return (int)task_get_64bit_data(p->task);
1173 }
1174
1175 int
1176 proc_pidversion(proc_t p)
1177 {
1178 return p->p_idversion;
1179 }
1180
1181 uint32_t
1182 proc_persona_id(proc_t p)
1183 {
1184 return (uint32_t)persona_id_from_proc(p);
1185 }
1186
1187 uint32_t
1188 proc_getuid(proc_t p)
1189 {
1190 return p->p_uid;
1191 }
1192
1193 uint32_t
1194 proc_getgid(proc_t p)
1195 {
1196 return p->p_gid;
1197 }
1198
1199 uint64_t
1200 proc_uniqueid(proc_t p)
1201 {
1202 return p->p_uniqueid;
1203 }
1204
1205 uint64_t
1206 proc_puniqueid(proc_t p)
1207 {
1208 return p->p_puniqueid;
1209 }
1210
1211 void
1212 proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
1213 {
1214 #if CONFIG_COALITIONS
1215 task_coalition_ids(p->task, ids);
1216 #else
1217 memset(ids, 0, sizeof(uint64_t[COALITION_NUM_TYPES]));
1218 #endif
1219 return;
1220 }
1221
1222 uint64_t
1223 proc_was_throttled(proc_t p)
1224 {
1225 return p->was_throttled;
1226 }
1227
1228 uint64_t
1229 proc_did_throttle(proc_t p)
1230 {
1231 return p->did_throttle;
1232 }
1233
1234 int
1235 proc_getcdhash(proc_t p, unsigned char *cdhash)
1236 {
1237 return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
1238 }
1239
1240 int
1241 proc_exitstatus(proc_t p)
1242 {
1243 return p->p_xstat & 0xffff;
1244 }
1245
1246 void
1247 proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
1248 {
1249 if (size >= sizeof(p->p_uuid)) {
1250 memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
1251 }
1252 }
1253
1254 /* Return vnode for executable with an iocount. Must be released with vnode_put() */
1255 vnode_t
1256 proc_getexecutablevnode(proc_t p)
1257 {
1258 vnode_t tvp = p->p_textvp;
1259
1260 if (tvp != NULLVP) {
1261 if (vnode_getwithref(tvp) == 0) {
1262 return tvp;
1263 }
1264 }
1265
1266 return NULLVP;
1267 }
1268
1269 int
1270 proc_selfexecutableargs(uint8_t *buf, size_t *buflen)
1271 {
1272 proc_t p = current_proc();
1273
1274 // buflen must always be provided
1275 if (buflen == NULL) {
1276 return EINVAL;
1277 }
1278
1279 // If a buf is provided, there must be at least enough room to fit argc
1280 if (buf && *buflen < sizeof(p->p_argc)) {
1281 return EINVAL;
1282 }
1283
1284 if (!p->user_stack) {
1285 return EINVAL;
1286 }
1287
1288 if (buf == NULL) {
1289 *buflen = p->p_argslen + sizeof(p->p_argc);
1290 return 0;
1291 }
1292
1293 // Copy in argc to the first 4 bytes
1294 memcpy(buf, &p->p_argc, sizeof(p->p_argc));
1295
1296 if (*buflen > sizeof(p->p_argc) && p->p_argslen > 0) {
1297 // See memory layout comment in kern_exec.c:exec_copyout_strings()
1298 // We want to copy starting from `p_argslen` bytes away from top of stack
1299 return copyin(p->user_stack - p->p_argslen,
1300 buf + sizeof(p->p_argc),
1301 MIN(p->p_argslen, *buflen - sizeof(p->p_argc)));
1302 } else {
1303 return 0;
1304 }
1305 }
1306
1307 off_t
1308 proc_getexecutableoffset(proc_t p)
1309 {
1310 return p->p_textoff;
1311 }
1312
1313 void
1314 bsd_set_dependency_capable(task_t task)
1315 {
1316 proc_t p = get_bsdtask_info(task);
1317
1318 if (p) {
1319 OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
1320 }
1321 }
1322
1323
1324 #ifndef __arm__
1325 int
1326 IS_64BIT_PROCESS(proc_t p)
1327 {
1328 if (p && (p->p_flag & P_LP64)) {
1329 return 1;
1330 } else {
1331 return 0;
1332 }
1333 }
1334 #endif
1335
1336 /*
1337 * Locate a process by number
1338 */
1339 proc_t
1340 pfind_locked(pid_t pid)
1341 {
1342 proc_t p;
1343 #if DEBUG
1344 proc_t q;
1345 #endif
1346
1347 if (!pid) {
1348 return kernproc;
1349 }
1350
1351 for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
1352 if (p->p_pid == pid) {
1353 #if DEBUG
1354 for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
1355 if ((p != q) && (q->p_pid == pid)) {
1356 panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
1357 }
1358 }
1359 #endif
1360 return p;
1361 }
1362 }
1363 return NULL;
1364 }
1365
1366 /*
1367 * Locate a zombie by PID
1368 */
1369 __private_extern__ proc_t
1370 pzfind(pid_t pid)
1371 {
1372 proc_t p;
1373
1374
1375 proc_list_lock();
1376
1377 for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
1378 if (p->p_pid == pid) {
1379 break;
1380 }
1381 }
1382
1383 proc_list_unlock();
1384
1385 return p;
1386 }
1387
1388 /*
1389 * Locate a process group by number
1390 */
1391
1392 struct pgrp *
1393 pgfind(pid_t pgid)
1394 {
1395 struct pgrp * pgrp;
1396
1397 proc_list_lock();
1398 pgrp = pgfind_internal(pgid);
1399 if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) {
1400 pgrp = PGRP_NULL;
1401 } else {
1402 pgrp->pg_refcount++;
1403 }
1404 proc_list_unlock();
1405 return pgrp;
1406 }
1407
1408
1409
1410 struct pgrp *
1411 pgfind_internal(pid_t pgid)
1412 {
1413 struct pgrp *pgrp;
1414
1415 for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) {
1416 if (pgrp->pg_id == pgid) {
1417 return pgrp;
1418 }
1419 }
1420 return NULL;
1421 }
1422
1423 void
1424 pg_rele(struct pgrp * pgrp)
1425 {
1426 if (pgrp == PGRP_NULL) {
1427 return;
1428 }
1429 pg_rele_dropref(pgrp);
1430 }
1431
1432 void
1433 pg_rele_dropref(struct pgrp * pgrp)
1434 {
1435 proc_list_lock();
1436 if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
1437 proc_list_unlock();
1438 pgdelete_dropref(pgrp);
1439 return;
1440 }
1441
1442 pgrp->pg_refcount--;
1443 proc_list_unlock();
1444 }
1445
1446 struct session *
1447 session_find_internal(pid_t sessid)
1448 {
1449 struct session *sess;
1450
1451 for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) {
1452 if (sess->s_sid == sessid) {
1453 return sess;
1454 }
1455 }
1456 return NULL;
1457 }
1458
1459
1460 /*
1461 * Make a new process ready to become a useful member of society by making it
1462 * visible in all the right places and initialize its own lists to empty.
1463 *
1464 * Parameters: parent The parent of the process to insert
1465 * child The child process to insert
1466 *
1467 * Returns: (void)
1468 *
1469 * Notes: Insert a child process into the parents process group, assign
1470 * the child the parent process pointer and PPID of the parent,
1471 * place it on the parents p_children list as a sibling,
1472 * initialize its own child list, place it in the allproc list,
1473 * insert it in the proper hash bucket, and initialize its
1474 * event list.
1475 */
1476 void
1477 pinsertchild(proc_t parent, proc_t child)
1478 {
1479 struct pgrp * pg;
1480
1481 LIST_INIT(&child->p_children);
1482 TAILQ_INIT(&child->p_evlist);
1483 child->p_pptr = parent;
1484 child->p_ppid = parent->p_pid;
1485 child->p_original_ppid = parent->p_pid;
1486 child->p_puniqueid = parent->p_uniqueid;
1487 child->p_xhighbits = 0;
1488
1489 pg = proc_pgrp(parent);
1490 pgrp_add(pg, parent, child);
1491 pg_rele(pg);
1492
1493 proc_list_lock();
1494
1495 #if CONFIG_MEMORYSTATUS
1496 memorystatus_add(child, TRUE);
1497 #endif
1498
1499 parent->p_childrencnt++;
1500 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1501
1502 LIST_INSERT_HEAD(&allproc, child, p_list);
1503 /* mark the completion of proc creation */
1504 child->p_listflag &= ~P_LIST_INCREATE;
1505
1506 proc_list_unlock();
1507 }
1508
1509 /*
1510 * Move p to a new or existing process group (and session)
1511 *
1512 * Returns: 0 Success
1513 * ESRCH No such process
1514 */
1515 int
1516 enterpgrp(proc_t p, pid_t pgid, int mksess)
1517 {
1518 struct pgrp *pgrp;
1519 struct pgrp *mypgrp;
1520 struct session * procsp;
1521
1522 pgrp = pgfind(pgid);
1523 mypgrp = proc_pgrp(p);
1524 procsp = proc_session(p);
1525
1526 #if DIAGNOSTIC
1527 if (pgrp != NULL && mksess) { /* firewalls */
1528 panic("enterpgrp: setsid into non-empty pgrp");
1529 }
1530 if (SESS_LEADER(p, procsp)) {
1531 panic("enterpgrp: session leader attempted setpgrp");
1532 }
1533 #endif
1534 if (pgrp == PGRP_NULL) {
1535 pid_t savepid = p->p_pid;
1536 proc_t np = PROC_NULL;
1537 /*
1538 * new process group
1539 */
1540 #if DIAGNOSTIC
1541 if (p->p_pid != pgid) {
1542 panic("enterpgrp: new pgrp and pid != pgid");
1543 }
1544 #endif
1545 MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
1546 M_WAITOK);
1547 if (pgrp == NULL) {
1548 panic("enterpgrp: M_PGRP zone depleted");
1549 }
1550 if ((np = proc_find(savepid)) == NULL || np != p) {
1551 if (np != PROC_NULL) {
1552 proc_rele(np);
1553 }
1554 if (mypgrp != PGRP_NULL) {
1555 pg_rele(mypgrp);
1556 }
1557 if (procsp != SESSION_NULL) {
1558 session_rele(procsp);
1559 }
1560 FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
1561 return ESRCH;
1562 }
1563 proc_rele(np);
1564 if (mksess) {
1565 struct session *sess;
1566
1567 /*
1568 * new session
1569 */
1570 MALLOC_ZONE(sess, struct session *,
1571 sizeof(struct session), M_SESSION, M_WAITOK);
1572 if (sess == NULL) {
1573 panic("enterpgrp: M_SESSION zone depleted");
1574 }
1575 sess->s_leader = p;
1576 sess->s_sid = p->p_pid;
1577 sess->s_count = 1;
1578 sess->s_ttyvp = NULL;
1579 sess->s_ttyp = TTY_NULL;
1580 sess->s_flags = 0;
1581 sess->s_listflags = 0;
1582 sess->s_ttypgrpid = NO_PID;
1583
1584 lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
1585
1586 bcopy(procsp->s_login, sess->s_login,
1587 sizeof(sess->s_login));
1588 OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
1589 proc_list_lock();
1590 LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
1591 proc_list_unlock();
1592 pgrp->pg_session = sess;
1593 #if DIAGNOSTIC
1594 if (p != current_proc()) {
1595 panic("enterpgrp: mksession and p != curproc");
1596 }
1597 #endif
1598 } else {
1599 proc_list_lock();
1600 pgrp->pg_session = procsp;
1601
1602 if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1603 panic("enterpgrp: providing ref to terminating session ");
1604 }
1605 pgrp->pg_session->s_count++;
1606 proc_list_unlock();
1607 }
1608 pgrp->pg_id = pgid;
1609
1610 lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
1611
1612 LIST_INIT(&pgrp->pg_members);
1613 pgrp->pg_membercnt = 0;
1614 pgrp->pg_jobc = 0;
1615 proc_list_lock();
1616 pgrp->pg_refcount = 1;
1617 pgrp->pg_listflags = 0;
1618 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
1619 proc_list_unlock();
1620 } else if (pgrp == mypgrp) {
1621 pg_rele(pgrp);
1622 if (mypgrp != NULL) {
1623 pg_rele(mypgrp);
1624 }
1625 if (procsp != SESSION_NULL) {
1626 session_rele(procsp);
1627 }
1628 return 0;
1629 }
1630
1631 if (procsp != SESSION_NULL) {
1632 session_rele(procsp);
1633 }
1634 /*
1635 * Adjust eligibility of affected pgrps to participate in job control.
1636 * Increment eligibility counts before decrementing, otherwise we
1637 * could reach 0 spuriously during the first call.
1638 */
1639 fixjobc(p, pgrp, 1);
1640 fixjobc(p, mypgrp, 0);
1641
1642 if (mypgrp != PGRP_NULL) {
1643 pg_rele(mypgrp);
1644 }
1645 pgrp_replace(p, pgrp);
1646 pg_rele(pgrp);
1647
1648 return 0;
1649 }
1650
1651 /*
1652 * remove process from process group
1653 */
1654 int
1655 leavepgrp(proc_t p)
1656 {
1657 pgrp_remove(p);
1658 return 0;
1659 }
1660
1661 /*
1662 * delete a process group
1663 */
1664 static void
1665 pgdelete_dropref(struct pgrp *pgrp)
1666 {
1667 struct tty *ttyp;
1668 int emptypgrp = 1;
1669 struct session *sessp;
1670
1671
1672 pgrp_lock(pgrp);
1673 if (pgrp->pg_membercnt != 0) {
1674 emptypgrp = 0;
1675 }
1676 pgrp_unlock(pgrp);
1677
1678 proc_list_lock();
1679 pgrp->pg_refcount--;
1680 if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
1681 proc_list_unlock();
1682 return;
1683 }
1684
1685 pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
1686
1687 if (pgrp->pg_refcount > 0) {
1688 proc_list_unlock();
1689 return;
1690 }
1691
1692 pgrp->pg_listflags |= PGRP_FLAG_DEAD;
1693 LIST_REMOVE(pgrp, pg_hash);
1694
1695 proc_list_unlock();
1696
1697 ttyp = SESSION_TP(pgrp->pg_session);
1698 if (ttyp != TTY_NULL) {
1699 if (ttyp->t_pgrp == pgrp) {
1700 tty_lock(ttyp);
1701 /* Re-check after acquiring the lock */
1702 if (ttyp->t_pgrp == pgrp) {
1703 ttyp->t_pgrp = NULL;
1704 pgrp->pg_session->s_ttypgrpid = NO_PID;
1705 }
1706 tty_unlock(ttyp);
1707 }
1708 }
1709
1710 proc_list_lock();
1711
1712 sessp = pgrp->pg_session;
1713 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1714 panic("pg_deleteref: manipulating refs of already terminating session");
1715 }
1716 if (--sessp->s_count == 0) {
1717 if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
1718 panic("pg_deleteref: terminating already terminated session");
1719 }
1720 sessp->s_listflags |= S_LIST_TERM;
1721 ttyp = SESSION_TP(sessp);
1722 LIST_REMOVE(sessp, s_hash);
1723 proc_list_unlock();
1724 if (ttyp != TTY_NULL) {
1725 tty_lock(ttyp);
1726 if (ttyp->t_session == sessp) {
1727 ttyp->t_session = NULL;
1728 }
1729 tty_unlock(ttyp);
1730 }
1731 proc_list_lock();
1732 sessp->s_listflags |= S_LIST_DEAD;
1733 if (sessp->s_count != 0) {
1734 panic("pg_deleteref: freeing session in use");
1735 }
1736 proc_list_unlock();
1737 lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
1738
1739 FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
1740 } else {
1741 proc_list_unlock();
1742 }
1743 lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
1744 FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
1745 }
1746
1747
1748 /*
1749 * Adjust pgrp jobc counters when specified process changes process group.
1750 * We count the number of processes in each process group that "qualify"
1751 * the group for terminal job control (those with a parent in a different
1752 * process group of the same session). If that count reaches zero, the
1753 * process group becomes orphaned. Check both the specified process'
1754 * process group and that of its children.
1755 * entering == 0 => p is leaving specified group.
1756 * entering == 1 => p is entering specified group.
1757 */
1758 int
1759 fixjob_callback(proc_t p, void * arg)
1760 {
1761 struct fixjob_iterargs *fp;
1762 struct pgrp * pg, *hispg;
1763 struct session * mysession, *hissess;
1764 int entering;
1765
1766 fp = (struct fixjob_iterargs *)arg;
1767 pg = fp->pg;
1768 mysession = fp->mysession;
1769 entering = fp->entering;
1770
1771 hispg = proc_pgrp(p);
1772 hissess = proc_session(p);
1773
1774 if ((hispg != pg) &&
1775 (hissess == mysession)) {
1776 pgrp_lock(hispg);
1777 if (entering) {
1778 hispg->pg_jobc++;
1779 pgrp_unlock(hispg);
1780 } else if (--hispg->pg_jobc == 0) {
1781 pgrp_unlock(hispg);
1782 orphanpg(hispg);
1783 } else {
1784 pgrp_unlock(hispg);
1785 }
1786 }
1787 if (hissess != SESSION_NULL) {
1788 session_rele(hissess);
1789 }
1790 if (hispg != PGRP_NULL) {
1791 pg_rele(hispg);
1792 }
1793
1794 return PROC_RETURNED;
1795 }
1796
1797 void
1798 fixjobc(proc_t p, struct pgrp *pgrp, int entering)
1799 {
1800 struct pgrp *hispgrp = PGRP_NULL;
1801 struct session *hissess = SESSION_NULL;
1802 struct session *mysession = pgrp->pg_session;
1803 proc_t parent;
1804 struct fixjob_iterargs fjarg;
1805 boolean_t proc_parent_self;
1806
1807 /*
1808 * Check if p's parent is current proc, if yes then no need to take
1809 * a ref; calling proc_parent with current proc as parent may
1810 * deadlock if current proc is exiting.
1811 */
1812 proc_parent_self = proc_parent_is_currentproc(p);
1813 if (proc_parent_self) {
1814 parent = current_proc();
1815 } else {
1816 parent = proc_parent(p);
1817 }
1818
1819 if (parent != PROC_NULL) {
1820 hispgrp = proc_pgrp(parent);
1821 hissess = proc_session(parent);
1822 if (!proc_parent_self) {
1823 proc_rele(parent);
1824 }
1825 }
1826
1827
1828 /*
1829 * Check p's parent to see whether p qualifies its own process
1830 * group; if so, adjust count for p's process group.
1831 */
1832 if ((hispgrp != pgrp) &&
1833 (hissess == mysession)) {
1834 pgrp_lock(pgrp);
1835 if (entering) {
1836 pgrp->pg_jobc++;
1837 pgrp_unlock(pgrp);
1838 } else if (--pgrp->pg_jobc == 0) {
1839 pgrp_unlock(pgrp);
1840 orphanpg(pgrp);
1841 } else {
1842 pgrp_unlock(pgrp);
1843 }
1844 }
1845
1846 if (hissess != SESSION_NULL) {
1847 session_rele(hissess);
1848 }
1849 if (hispgrp != PGRP_NULL) {
1850 pg_rele(hispgrp);
1851 }
1852
1853 /*
1854 * Check this process' children to see whether they qualify
1855 * their process groups; if so, adjust counts for children's
1856 * process groups.
1857 */
1858 fjarg.pg = pgrp;
1859 fjarg.mysession = mysession;
1860 fjarg.entering = entering;
1861 proc_childrenwalk(p, fixjob_callback, &fjarg);
1862 }
1863
1864 /*
1865 * The pidlist_* routines support the functions in this file that
1866 * walk lists of processes applying filters and callouts to the
1867 * elements of the list.
1868 *
1869 * A prior implementation used a single linear array, which can be
1870 * tricky to allocate on large systems. This implementation creates
1871 * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
1872 *
1873 * The array should be sized large enough to keep the overhead of
1874 * walking the list low, but small enough that blocking allocations of
1875 * pidlist_entry_t structures always succeed.
1876 */
1877
1878 #define PIDS_PER_ENTRY 1021
1879
1880 typedef struct pidlist_entry {
1881 SLIST_ENTRY(pidlist_entry) pe_link;
1882 u_int pe_nused;
1883 pid_t pe_pid[PIDS_PER_ENTRY];
1884 } pidlist_entry_t;
1885
1886 typedef struct {
1887 SLIST_HEAD(, pidlist_entry) pl_head;
1888 struct pidlist_entry *pl_active;
1889 u_int pl_nalloc;
1890 } pidlist_t;
1891
1892 static __inline__ pidlist_t *
1893 pidlist_init(pidlist_t *pl)
1894 {
1895 SLIST_INIT(&pl->pl_head);
1896 pl->pl_active = NULL;
1897 pl->pl_nalloc = 0;
1898 return pl;
1899 }
1900
1901 static u_int
1902 pidlist_alloc(pidlist_t *pl, u_int needed)
1903 {
1904 while (pl->pl_nalloc < needed) {
1905 pidlist_entry_t *pe = kalloc(sizeof(*pe));
1906 if (NULL == pe) {
1907 panic("no space for pidlist entry");
1908 }
1909 pe->pe_nused = 0;
1910 SLIST_INSERT_HEAD(&pl->pl_head, pe, pe_link);
1911 pl->pl_nalloc += (sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0]));
1912 }
1913 return pl->pl_nalloc;
1914 }
1915
1916 static void
1917 pidlist_free(pidlist_t *pl)
1918 {
1919 pidlist_entry_t *pe;
1920 while (NULL != (pe = SLIST_FIRST(&pl->pl_head))) {
1921 SLIST_FIRST(&pl->pl_head) = SLIST_NEXT(pe, pe_link);
1922 kfree(pe, sizeof(*pe));
1923 }
1924 pl->pl_nalloc = 0;
1925 }
1926
1927 static __inline__ void
1928 pidlist_set_active(pidlist_t *pl)
1929 {
1930 pl->pl_active = SLIST_FIRST(&pl->pl_head);
1931 assert(pl->pl_active);
1932 }
1933
1934 static void
1935 pidlist_add_pid(pidlist_t *pl, pid_t pid)
1936 {
1937 pidlist_entry_t *pe = pl->pl_active;
1938 if (pe->pe_nused >= sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0])) {
1939 if (NULL == (pe = SLIST_NEXT(pe, pe_link))) {
1940 panic("pidlist allocation exhausted");
1941 }
1942 pl->pl_active = pe;
1943 }
1944 pe->pe_pid[pe->pe_nused++] = pid;
1945 }
1946
1947 static __inline__ u_int
1948 pidlist_nalloc(const pidlist_t *pl)
1949 {
1950 return pl->pl_nalloc;
1951 }
1952
1953 /*
1954 * A process group has become orphaned; if there are any stopped processes in
1955 * the group, hang-up all process in that group.
1956 */
1957 static void
1958 orphanpg(struct pgrp *pgrp)
1959 {
1960 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
1961 u_int pid_count_available = 0;
1962 proc_t p;
1963
1964 /* allocate outside of the pgrp_lock */
1965 for (;;) {
1966 pgrp_lock(pgrp);
1967
1968 boolean_t should_iterate = FALSE;
1969 pid_count_available = 0;
1970
1971 PGMEMBERS_FOREACH(pgrp, p) {
1972 pid_count_available++;
1973 if (p->p_stat == SSTOP) {
1974 should_iterate = TRUE;
1975 }
1976 }
1977 if (pid_count_available == 0 || !should_iterate) {
1978 pgrp_unlock(pgrp);
1979 goto out; /* no orphaned processes OR nothing stopped */
1980 }
1981 if (pidlist_nalloc(pl) >= pid_count_available) {
1982 break;
1983 }
1984 pgrp_unlock(pgrp);
1985
1986 pidlist_alloc(pl, pid_count_available);
1987 }
1988 pidlist_set_active(pl);
1989
1990 u_int pid_count = 0;
1991 PGMEMBERS_FOREACH(pgrp, p) {
1992 pidlist_add_pid(pl, proc_pid(p));
1993 if (++pid_count >= pid_count_available) {
1994 break;
1995 }
1996 }
1997 pgrp_unlock(pgrp);
1998
1999 const pidlist_entry_t *pe;
2000 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2001 for (u_int i = 0; i < pe->pe_nused; i++) {
2002 const pid_t pid = pe->pe_pid[i];
2003 if (0 == pid) {
2004 continue; /* skip kernproc */
2005 }
2006 p = proc_find(pid);
2007 if (!p) {
2008 continue;
2009 }
2010 proc_transwait(p, 0);
2011 pt_setrunnable(p);
2012 psignal(p, SIGHUP);
2013 psignal(p, SIGCONT);
2014 proc_rele(p);
2015 }
2016 }
2017 out:
2018 pidlist_free(pl);
2019 }
2020
2021 int
2022 proc_is_classic(proc_t p __unused)
2023 {
2024 return 0;
2025 }
2026
2027 /* XXX Why does this function exist? Need to kill it off... */
2028 proc_t
2029 current_proc_EXTERNAL(void)
2030 {
2031 return current_proc();
2032 }
2033
2034 int
2035 proc_is_forcing_hfs_case_sensitivity(proc_t p)
2036 {
2037 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
2038 }
2039
2040 #if CONFIG_COREDUMP
2041 /*
2042 * proc_core_name(name, uid, pid)
2043 * Expand the name described in corefilename, using name, uid, and pid.
2044 * corefilename is a printf-like string, with three format specifiers:
2045 * %N name of process ("name")
2046 * %P process id (pid)
2047 * %U user id (uid)
2048 * For example, "%N.core" is the default; they can be disabled completely
2049 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2050 * This is controlled by the sysctl variable kern.corefile (see above).
2051 */
2052 __private_extern__ int
2053 proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
2054 size_t cf_name_len)
2055 {
2056 const char *format, *appendstr;
2057 char id_buf[11]; /* Buffer for pid/uid -- max 4B */
2058 size_t i, l, n;
2059
2060 if (cf_name == NULL) {
2061 goto toolong;
2062 }
2063
2064 format = corefilename;
2065 for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {
2066 switch (format[i]) {
2067 case '%': /* Format character */
2068 i++;
2069 switch (format[i]) {
2070 case '%':
2071 appendstr = "%";
2072 break;
2073 case 'N': /* process name */
2074 appendstr = name;
2075 break;
2076 case 'P': /* process id */
2077 snprintf(id_buf, sizeof(id_buf), "%u", pid);
2078 appendstr = id_buf;
2079 break;
2080 case 'U': /* user id */
2081 snprintf(id_buf, sizeof(id_buf), "%u", uid);
2082 appendstr = id_buf;
2083 break;
2084 case '\0': /* format string ended in % symbol */
2085 goto endofstring;
2086 default:
2087 appendstr = "";
2088 log(LOG_ERR,
2089 "Unknown format character %c in `%s'\n",
2090 format[i], format);
2091 }
2092 l = strlen(appendstr);
2093 if ((n + l) >= cf_name_len) {
2094 goto toolong;
2095 }
2096 bcopy(appendstr, cf_name + n, l);
2097 n += l;
2098 break;
2099 default:
2100 cf_name[n++] = format[i];
2101 }
2102 }
2103 if (format[i] != '\0') {
2104 goto toolong;
2105 }
2106 return 0;
2107 toolong:
2108 log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
2109 (long)pid, name, (uint32_t)uid);
2110 return 1;
2111 endofstring:
2112 log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
2113 (long)pid, name, (uint32_t)uid);
2114 return 1;
2115 }
2116 #endif /* CONFIG_COREDUMP */
2117
2118 /* Code Signing related routines */
2119
2120 int
2121 csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
2122 {
2123 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2124 uap->usersize, USER_ADDR_NULL);
2125 }
2126
2127 int
2128 csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
2129 {
2130 if (uap->uaudittoken == USER_ADDR_NULL) {
2131 return EINVAL;
2132 }
2133 return csops_internal(uap->pid, uap->ops, uap->useraddr,
2134 uap->usersize, uap->uaudittoken);
2135 }
2136
2137 static int
2138 csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
2139 {
2140 char fakeheader[8] = { 0 };
2141 int error;
2142
2143 if (usize < sizeof(fakeheader)) {
2144 return ERANGE;
2145 }
2146
2147 /* if no blob, fill in zero header */
2148 if (NULL == start) {
2149 start = fakeheader;
2150 length = sizeof(fakeheader);
2151 } else if (usize < length) {
2152 /* ... if input too short, copy out length of entitlement */
2153 uint32_t length32 = htonl((uint32_t)length);
2154 memcpy(&fakeheader[4], &length32, sizeof(length32));
2155
2156 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2157 if (error == 0) {
2158 return ERANGE; /* input buffer to short, ERANGE signals that */
2159 }
2160 return error;
2161 }
2162 return copyout(start, uaddr, length);
2163 }
2164
2165 static int
2166 csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
2167 {
2168 size_t usize = (size_t)CAST_DOWN(size_t, usersize);
2169 proc_t pt;
2170 int forself;
2171 int error;
2172 vnode_t tvp;
2173 off_t toff;
2174 unsigned char cdhash[SHA1_RESULTLEN];
2175 audit_token_t token;
2176 unsigned int upid = 0, uidversion = 0;
2177
2178 forself = error = 0;
2179
2180 if (pid == 0) {
2181 pid = proc_selfpid();
2182 }
2183 if (pid == proc_selfpid()) {
2184 forself = 1;
2185 }
2186
2187
2188 switch (ops) {
2189 case CS_OPS_STATUS:
2190 case CS_OPS_CDHASH:
2191 case CS_OPS_PIDOFFSET:
2192 case CS_OPS_ENTITLEMENTS_BLOB:
2193 case CS_OPS_IDENTITY:
2194 case CS_OPS_BLOB:
2195 case CS_OPS_TEAMID:
2196 break; /* not restricted to root */
2197 default:
2198 if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) {
2199 return EPERM;
2200 }
2201 break;
2202 }
2203
2204 pt = proc_find(pid);
2205 if (pt == PROC_NULL) {
2206 return ESRCH;
2207 }
2208
2209 upid = pt->p_pid;
2210 uidversion = pt->p_idversion;
2211 if (uaudittoken != USER_ADDR_NULL) {
2212 error = copyin(uaudittoken, &token, sizeof(audit_token_t));
2213 if (error != 0) {
2214 goto out;
2215 }
2216 /* verify the audit token pid/idversion matches with proc */
2217 if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
2218 error = ESRCH;
2219 goto out;
2220 }
2221 }
2222
2223 #if CONFIG_MACF
2224 switch (ops) {
2225 case CS_OPS_MARKINVALID:
2226 case CS_OPS_MARKHARD:
2227 case CS_OPS_MARKKILL:
2228 case CS_OPS_MARKRESTRICT:
2229 case CS_OPS_SET_STATUS:
2230 case CS_OPS_CLEARINSTALLER:
2231 case CS_OPS_CLEARPLATFORM:
2232 if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) {
2233 goto out;
2234 }
2235 break;
2236 default:
2237 if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops))) {
2238 goto out;
2239 }
2240 }
2241 #endif
2242
2243 switch (ops) {
2244 case CS_OPS_STATUS: {
2245 uint32_t retflags;
2246
2247 proc_lock(pt);
2248 retflags = pt->p_csflags;
2249 if (cs_process_enforcement(pt)) {
2250 retflags |= CS_ENFORCEMENT;
2251 }
2252 if (csproc_get_platform_binary(pt)) {
2253 retflags |= CS_PLATFORM_BINARY;
2254 }
2255 if (csproc_get_platform_path(pt)) {
2256 retflags |= CS_PLATFORM_PATH;
2257 }
2258 //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
2259 if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) {
2260 retflags &= (~CS_REQUIRE_LV);
2261 }
2262 proc_unlock(pt);
2263
2264 if (uaddr != USER_ADDR_NULL) {
2265 error = copyout(&retflags, uaddr, sizeof(uint32_t));
2266 }
2267 break;
2268 }
2269 case CS_OPS_MARKINVALID:
2270 proc_lock(pt);
2271 if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
2272 pt->p_csflags &= ~CS_VALID; /* set invalid */
2273 if ((pt->p_csflags & CS_KILL) == CS_KILL) {
2274 pt->p_csflags |= CS_KILLED;
2275 proc_unlock(pt);
2276 if (cs_debug) {
2277 printf("CODE SIGNING: marked invalid by pid %d: "
2278 "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
2279 proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
2280 }
2281 psignal(pt, SIGKILL);
2282 } else {
2283 proc_unlock(pt);
2284 }
2285 } else {
2286 proc_unlock(pt);
2287 }
2288
2289 break;
2290
2291 case CS_OPS_MARKHARD:
2292 proc_lock(pt);
2293 pt->p_csflags |= CS_HARD;
2294 if ((pt->p_csflags & CS_VALID) == 0) {
2295 /* @@@ allow? reject? kill? @@@ */
2296 proc_unlock(pt);
2297 error = EINVAL;
2298 goto out;
2299 } else {
2300 proc_unlock(pt);
2301 }
2302 break;
2303
2304 case CS_OPS_MARKKILL:
2305 proc_lock(pt);
2306 pt->p_csflags |= CS_KILL;
2307 if ((pt->p_csflags & CS_VALID) == 0) {
2308 proc_unlock(pt);
2309 psignal(pt, SIGKILL);
2310 } else {
2311 proc_unlock(pt);
2312 }
2313 break;
2314
2315 case CS_OPS_PIDOFFSET:
2316 toff = pt->p_textoff;
2317 proc_rele(pt);
2318 error = copyout(&toff, uaddr, sizeof(toff));
2319 return error;
2320
2321 case CS_OPS_CDHASH:
2322
2323 /* pt already holds a reference on its p_textvp */
2324 tvp = pt->p_textvp;
2325 toff = pt->p_textoff;
2326
2327 if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
2328 proc_rele(pt);
2329 return EINVAL;
2330 }
2331
2332 error = vn_getcdhash(tvp, toff, cdhash);
2333 proc_rele(pt);
2334
2335 if (error == 0) {
2336 error = copyout(cdhash, uaddr, sizeof(cdhash));
2337 }
2338
2339 return error;
2340
2341 case CS_OPS_ENTITLEMENTS_BLOB: {
2342 void *start;
2343 size_t length;
2344
2345 proc_lock(pt);
2346
2347 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2348 proc_unlock(pt);
2349 error = EINVAL;
2350 break;
2351 }
2352
2353 error = cs_entitlements_blob_get(pt, &start, &length);
2354 proc_unlock(pt);
2355 if (error) {
2356 break;
2357 }
2358
2359 error = csops_copy_token(start, length, usize, uaddr);
2360 break;
2361 }
2362 case CS_OPS_MARKRESTRICT:
2363 proc_lock(pt);
2364 pt->p_csflags |= CS_RESTRICT;
2365 proc_unlock(pt);
2366 break;
2367
2368 case CS_OPS_SET_STATUS: {
2369 uint32_t flags;
2370
2371 if (usize < sizeof(flags)) {
2372 error = ERANGE;
2373 break;
2374 }
2375
2376 error = copyin(uaddr, &flags, sizeof(flags));
2377 if (error) {
2378 break;
2379 }
2380
2381 /* only allow setting a subset of all code sign flags */
2382 flags &=
2383 CS_HARD | CS_EXEC_SET_HARD |
2384 CS_KILL | CS_EXEC_SET_KILL |
2385 CS_RESTRICT |
2386 CS_REQUIRE_LV |
2387 CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
2388
2389 proc_lock(pt);
2390 if (pt->p_csflags & CS_VALID) {
2391 pt->p_csflags |= flags;
2392 } else {
2393 error = EINVAL;
2394 }
2395 proc_unlock(pt);
2396
2397 break;
2398 }
2399 case CS_OPS_BLOB: {
2400 void *start;
2401 size_t length;
2402
2403 proc_lock(pt);
2404 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2405 proc_unlock(pt);
2406 error = EINVAL;
2407 break;
2408 }
2409
2410 error = cs_blob_get(pt, &start, &length);
2411 proc_unlock(pt);
2412 if (error) {
2413 break;
2414 }
2415
2416 error = csops_copy_token(start, length, usize, uaddr);
2417 break;
2418 }
2419 case CS_OPS_IDENTITY:
2420 case CS_OPS_TEAMID: {
2421 const char *identity;
2422 uint8_t fakeheader[8];
2423 uint32_t idlen;
2424 size_t length;
2425
2426 /*
2427 * Make identity have a blob header to make it
2428 * easier on userland to guess the identity
2429 * length.
2430 */
2431 if (usize < sizeof(fakeheader)) {
2432 error = ERANGE;
2433 break;
2434 }
2435 memset(fakeheader, 0, sizeof(fakeheader));
2436
2437 proc_lock(pt);
2438 if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
2439 proc_unlock(pt);
2440 error = EINVAL;
2441 break;
2442 }
2443
2444 identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt);
2445 proc_unlock(pt);
2446 if (identity == NULL) {
2447 error = ENOENT;
2448 break;
2449 }
2450
2451 length = strlen(identity) + 1; /* include NUL */
2452 idlen = htonl(length + sizeof(fakeheader));
2453 memcpy(&fakeheader[4], &idlen, sizeof(idlen));
2454
2455 error = copyout(fakeheader, uaddr, sizeof(fakeheader));
2456 if (error) {
2457 break;
2458 }
2459
2460 if (usize < sizeof(fakeheader) + length) {
2461 error = ERANGE;
2462 } else if (usize > sizeof(fakeheader)) {
2463 error = copyout(identity, uaddr + sizeof(fakeheader), length);
2464 }
2465
2466 break;
2467 }
2468
2469 case CS_OPS_CLEARINSTALLER:
2470 proc_lock(pt);
2471 pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
2472 proc_unlock(pt);
2473 break;
2474
2475 case CS_OPS_CLEARPLATFORM:
2476 #if DEVELOPMENT || DEBUG
2477 if (cs_process_global_enforcement()) {
2478 error = ENOTSUP;
2479 break;
2480 }
2481
2482 #if CONFIG_CSR
2483 if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
2484 error = ENOTSUP;
2485 break;
2486 }
2487 #endif
2488
2489 proc_lock(pt);
2490 pt->p_csflags &= ~(CS_PLATFORM_BINARY | CS_PLATFORM_PATH);
2491 csproc_clear_platform_binary(pt);
2492 proc_unlock(pt);
2493 break;
2494 #else
2495 error = ENOTSUP;
2496 break;
2497 #endif /* !DEVELOPMENT || DEBUG */
2498
2499 default:
2500 error = EINVAL;
2501 break;
2502 }
2503 out:
2504 proc_rele(pt);
2505 return error;
2506 }
2507
2508 void
2509 proc_iterate(
2510 unsigned int flags,
2511 proc_iterate_fn_t callout,
2512 void *arg,
2513 proc_iterate_fn_t filterfn,
2514 void *filterarg)
2515 {
2516 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2517 u_int pid_count_available = 0;
2518
2519 assert(callout != NULL);
2520
2521 /* allocate outside of the proc_list_lock */
2522 for (;;) {
2523 proc_list_lock();
2524 pid_count_available = nprocs + 1; /* kernel_task not counted in nprocs */
2525 assert(pid_count_available > 0);
2526 if (pidlist_nalloc(pl) > pid_count_available) {
2527 break;
2528 }
2529 proc_list_unlock();
2530
2531 pidlist_alloc(pl, pid_count_available);
2532 }
2533 pidlist_set_active(pl);
2534
2535 /* filter pids into the pid_list */
2536
2537 u_int pid_count = 0;
2538 if (flags & PROC_ALLPROCLIST) {
2539 proc_t p;
2540 ALLPROC_FOREACH(p) {
2541 /* ignore processes that are being forked */
2542 if (p->p_stat == SIDL) {
2543 continue;
2544 }
2545 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2546 continue;
2547 }
2548 pidlist_add_pid(pl, proc_pid(p));
2549 if (++pid_count >= pid_count_available) {
2550 break;
2551 }
2552 }
2553 }
2554
2555 if ((pid_count < pid_count_available) &&
2556 (flags & PROC_ZOMBPROCLIST)) {
2557 proc_t p;
2558 ZOMBPROC_FOREACH(p) {
2559 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2560 continue;
2561 }
2562 pidlist_add_pid(pl, proc_pid(p));
2563 if (++pid_count >= pid_count_available) {
2564 break;
2565 }
2566 }
2567 }
2568
2569 proc_list_unlock();
2570
2571 /* call callout on processes in the pid_list */
2572
2573 const pidlist_entry_t *pe;
2574 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2575 for (u_int i = 0; i < pe->pe_nused; i++) {
2576 const pid_t pid = pe->pe_pid[i];
2577 proc_t p = proc_find(pid);
2578 if (p) {
2579 if ((flags & PROC_NOWAITTRANS) == 0) {
2580 proc_transwait(p, 0);
2581 }
2582 const int callout_ret = callout(p, arg);
2583
2584 switch (callout_ret) {
2585 case PROC_RETURNED_DONE:
2586 proc_rele(p);
2587 /* FALLTHROUGH */
2588 case PROC_CLAIMED_DONE:
2589 goto out;
2590
2591 case PROC_RETURNED:
2592 proc_rele(p);
2593 /* FALLTHROUGH */
2594 case PROC_CLAIMED:
2595 break;
2596 default:
2597 panic("%s: callout =%d for pid %d",
2598 __func__, callout_ret, pid);
2599 break;
2600 }
2601 } else if (flags & PROC_ZOMBPROCLIST) {
2602 p = proc_find_zombref(pid);
2603 if (!p) {
2604 continue;
2605 }
2606 const int callout_ret = callout(p, arg);
2607
2608 switch (callout_ret) {
2609 case PROC_RETURNED_DONE:
2610 proc_drop_zombref(p);
2611 /* FALLTHROUGH */
2612 case PROC_CLAIMED_DONE:
2613 goto out;
2614
2615 case PROC_RETURNED:
2616 proc_drop_zombref(p);
2617 /* FALLTHROUGH */
2618 case PROC_CLAIMED:
2619 break;
2620 default:
2621 panic("%s: callout =%d for zombie %d",
2622 __func__, callout_ret, pid);
2623 break;
2624 }
2625 }
2626 }
2627 }
2628 out:
2629 pidlist_free(pl);
2630 }
2631
2632 void
2633 proc_rebootscan(
2634 proc_iterate_fn_t callout,
2635 void *arg,
2636 proc_iterate_fn_t filterfn,
2637 void *filterarg)
2638 {
2639 proc_t p;
2640
2641 assert(callout != NULL);
2642
2643 proc_shutdown_exitcount = 0;
2644
2645 restart_foreach:
2646
2647 proc_list_lock();
2648
2649 ALLPROC_FOREACH(p) {
2650 if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
2651 continue;
2652 }
2653 p = proc_ref_locked(p);
2654 if (!p) {
2655 continue;
2656 }
2657
2658 proc_list_unlock();
2659
2660 proc_transwait(p, 0);
2661 (void)callout(p, arg);
2662 proc_rele(p);
2663
2664 goto restart_foreach;
2665 }
2666
2667 proc_list_unlock();
2668 }
2669
2670 void
2671 proc_childrenwalk(
2672 proc_t parent,
2673 proc_iterate_fn_t callout,
2674 void *arg)
2675 {
2676 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2677 u_int pid_count_available = 0;
2678
2679 assert(parent != NULL);
2680 assert(callout != NULL);
2681
2682 for (;;) {
2683 proc_list_lock();
2684 pid_count_available = parent->p_childrencnt;
2685 if (pid_count_available == 0) {
2686 proc_list_unlock();
2687 goto out;
2688 }
2689 if (pidlist_nalloc(pl) > pid_count_available) {
2690 break;
2691 }
2692 proc_list_unlock();
2693
2694 pidlist_alloc(pl, pid_count_available);
2695 }
2696 pidlist_set_active(pl);
2697
2698 u_int pid_count = 0;
2699 proc_t p;
2700 PCHILDREN_FOREACH(parent, p) {
2701 if (p->p_stat == SIDL) {
2702 continue;
2703 }
2704 pidlist_add_pid(pl, proc_pid(p));
2705 if (++pid_count >= pid_count_available) {
2706 break;
2707 }
2708 }
2709
2710 proc_list_unlock();
2711
2712 const pidlist_entry_t *pe;
2713 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2714 for (u_int i = 0; i < pe->pe_nused; i++) {
2715 const pid_t pid = pe->pe_pid[i];
2716 p = proc_find(pid);
2717 if (!p) {
2718 continue;
2719 }
2720 const int callout_ret = callout(p, arg);
2721
2722 switch (callout_ret) {
2723 case PROC_RETURNED_DONE:
2724 proc_rele(p);
2725 /* FALLTHROUGH */
2726 case PROC_CLAIMED_DONE:
2727 goto out;
2728
2729 case PROC_RETURNED:
2730 proc_rele(p);
2731 /* FALLTHROUGH */
2732 case PROC_CLAIMED:
2733 break;
2734 default:
2735 panic("%s: callout =%d for pid %d",
2736 __func__, callout_ret, pid);
2737 break;
2738 }
2739 }
2740 }
2741 out:
2742 pidlist_free(pl);
2743 }
2744
2745 void
2746 pgrp_iterate(
2747 struct pgrp *pgrp,
2748 unsigned int flags,
2749 proc_iterate_fn_t callout,
2750 void * arg,
2751 proc_iterate_fn_t filterfn,
2752 void * filterarg)
2753 {
2754 pidlist_t pid_list, *pl = pidlist_init(&pid_list);
2755 u_int pid_count_available = 0;
2756
2757 assert(pgrp != NULL);
2758 assert(callout != NULL);
2759
2760 for (;;) {
2761 pgrp_lock(pgrp);
2762 pid_count_available = pgrp->pg_membercnt;
2763 if (pid_count_available == 0) {
2764 pgrp_unlock(pgrp);
2765 if (flags & PGRP_DROPREF) {
2766 pg_rele(pgrp);
2767 }
2768 goto out;
2769 }
2770 if (pidlist_nalloc(pl) > pid_count_available) {
2771 break;
2772 }
2773 pgrp_unlock(pgrp);
2774
2775 pidlist_alloc(pl, pid_count_available);
2776 }
2777 pidlist_set_active(pl);
2778
2779 const pid_t pgid = pgrp->pg_id;
2780 u_int pid_count = 0;
2781 proc_t p;
2782 PGMEMBERS_FOREACH(pgrp, p) {
2783 if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
2784 continue;;
2785 }
2786 pidlist_add_pid(pl, proc_pid(p));
2787 if (++pid_count >= pid_count_available) {
2788 break;
2789 }
2790 }
2791
2792 pgrp_unlock(pgrp);
2793
2794 if (flags & PGRP_DROPREF) {
2795 pg_rele(pgrp);
2796 }
2797
2798 const pidlist_entry_t *pe;
2799 SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
2800 for (u_int i = 0; i < pe->pe_nused; i++) {
2801 const pid_t pid = pe->pe_pid[i];
2802 if (0 == pid) {
2803 continue; /* skip kernproc */
2804 }
2805 p = proc_find(pid);
2806 if (!p) {
2807 continue;
2808 }
2809 if (p->p_pgrpid != pgid) {
2810 proc_rele(p);
2811 continue;
2812 }
2813 const int callout_ret = callout(p, arg);
2814
2815 switch (callout_ret) {
2816 case PROC_RETURNED:
2817 proc_rele(p);
2818 /* FALLTHROUGH */
2819 case PROC_CLAIMED:
2820 break;
2821 case PROC_RETURNED_DONE:
2822 proc_rele(p);
2823 /* FALLTHROUGH */
2824 case PROC_CLAIMED_DONE:
2825 goto out;
2826
2827 default:
2828 panic("%s: callout =%d for pid %d",
2829 __func__, callout_ret, pid);
2830 }
2831 }
2832 }
2833
2834 out:
2835 pidlist_free(pl);
2836 }
2837
2838 static void
2839 pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
2840 {
2841 proc_list_lock();
2842 child->p_pgrp = pgrp;
2843 child->p_pgrpid = pgrp->pg_id;
2844 child->p_listflag |= P_LIST_INPGRP;
2845 /*
2846 * When pgrp is being freed , a process can still
2847 * request addition using setpgid from bash when
2848 * login is terminated (login cycler) return ESRCH
2849 * Safe to hold lock due to refcount on pgrp
2850 */
2851 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2852 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2853 }
2854
2855 if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
2856 panic("pgrp_add : pgrp is dead adding process");
2857 }
2858 proc_list_unlock();
2859
2860 pgrp_lock(pgrp);
2861 pgrp->pg_membercnt++;
2862 if (parent != PROC_NULL) {
2863 LIST_INSERT_AFTER(parent, child, p_pglist);
2864 } else {
2865 LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
2866 }
2867 pgrp_unlock(pgrp);
2868
2869 proc_list_lock();
2870 if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
2871 pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2872 }
2873 proc_list_unlock();
2874 }
2875
2876 static void
2877 pgrp_remove(struct proc * p)
2878 {
2879 struct pgrp * pg;
2880
2881 pg = proc_pgrp(p);
2882
2883 proc_list_lock();
2884 #if __PROC_INTERNAL_DEBUG
2885 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
2886 panic("removing from pglist but no named ref\n");
2887 }
2888 #endif
2889 p->p_pgrpid = PGRPID_DEAD;
2890 p->p_listflag &= ~P_LIST_INPGRP;
2891 p->p_pgrp = NULL;
2892 proc_list_unlock();
2893
2894 if (pg == PGRP_NULL) {
2895 panic("pgrp_remove: pg is NULL");
2896 }
2897 pgrp_lock(pg);
2898 pg->pg_membercnt--;
2899
2900 if (pg->pg_membercnt < 0) {
2901 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", pg, p);
2902 }
2903
2904 LIST_REMOVE(p, p_pglist);
2905 if (pg->pg_members.lh_first == 0) {
2906 pgrp_unlock(pg);
2907 pgdelete_dropref(pg);
2908 } else {
2909 pgrp_unlock(pg);
2910 pg_rele(pg);
2911 }
2912 }
2913
2914
2915 /* cannot use proc_pgrp as it maybe stalled */
2916 static void
2917 pgrp_replace(struct proc * p, struct pgrp * newpg)
2918 {
2919 struct pgrp * oldpg;
2920
2921
2922
2923 proc_list_lock();
2924
2925 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
2926 p->p_listflag |= P_LIST_PGRPTRWAIT;
2927 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
2928 }
2929
2930 p->p_listflag |= P_LIST_PGRPTRANS;
2931
2932 oldpg = p->p_pgrp;
2933 if (oldpg == PGRP_NULL) {
2934 panic("pgrp_replace: oldpg NULL");
2935 }
2936 oldpg->pg_refcount++;
2937 #if __PROC_INTERNAL_DEBUG
2938 if ((p->p_listflag & P_LIST_INPGRP) == 0) {
2939 panic("removing from pglist but no named ref\n");
2940 }
2941 #endif
2942 p->p_pgrpid = PGRPID_DEAD;
2943 p->p_listflag &= ~P_LIST_INPGRP;
2944 p->p_pgrp = NULL;
2945
2946 proc_list_unlock();
2947
2948 pgrp_lock(oldpg);
2949 oldpg->pg_membercnt--;
2950 if (oldpg->pg_membercnt < 0) {
2951 panic("pgprp: -ve membercnt pgprp:%p p:%p\n", oldpg, p);
2952 }
2953 LIST_REMOVE(p, p_pglist);
2954 if (oldpg->pg_members.lh_first == 0) {
2955 pgrp_unlock(oldpg);
2956 pgdelete_dropref(oldpg);
2957 } else {
2958 pgrp_unlock(oldpg);
2959 pg_rele(oldpg);
2960 }
2961
2962 proc_list_lock();
2963 p->p_pgrp = newpg;
2964 p->p_pgrpid = newpg->pg_id;
2965 p->p_listflag |= P_LIST_INPGRP;
2966 /*
2967 * When pgrp is being freed , a process can still
2968 * request addition using setpgid from bash when
2969 * login is terminated (login cycler) return ESRCH
2970 * Safe to hold lock due to refcount on pgrp
2971 */
2972 if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
2973 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2974 }
2975
2976 if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) {
2977 panic("pgrp_add : pgrp is dead adding process");
2978 }
2979 proc_list_unlock();
2980
2981 pgrp_lock(newpg);
2982 newpg->pg_membercnt++;
2983 LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
2984 pgrp_unlock(newpg);
2985
2986 proc_list_lock();
2987 if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
2988 newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
2989 }
2990
2991 p->p_listflag &= ~P_LIST_PGRPTRANS;
2992 if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
2993 p->p_listflag &= ~P_LIST_PGRPTRWAIT;
2994 wakeup(&p->p_pgrpid);
2995 }
2996 proc_list_unlock();
2997 }
2998
2999 void
3000 pgrp_lock(struct pgrp * pgrp)
3001 {
3002 lck_mtx_lock(&pgrp->pg_mlock);
3003 }
3004
3005 void
3006 pgrp_unlock(struct pgrp * pgrp)
3007 {
3008 lck_mtx_unlock(&pgrp->pg_mlock);
3009 }
3010
3011 void
3012 session_lock(struct session * sess)
3013 {
3014 lck_mtx_lock(&sess->s_mlock);
3015 }
3016
3017
3018 void
3019 session_unlock(struct session * sess)
3020 {
3021 lck_mtx_unlock(&sess->s_mlock);
3022 }
3023
3024 struct pgrp *
3025 proc_pgrp(proc_t p)
3026 {
3027 struct pgrp * pgrp;
3028
3029 if (p == PROC_NULL) {
3030 return PGRP_NULL;
3031 }
3032 proc_list_lock();
3033
3034 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3035 p->p_listflag |= P_LIST_PGRPTRWAIT;
3036 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3037 }
3038
3039 pgrp = p->p_pgrp;
3040
3041 assert(pgrp != NULL);
3042
3043 if (pgrp != PGRP_NULL) {
3044 pgrp->pg_refcount++;
3045 if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0) {
3046 panic("proc_pgrp: ref being povided for dead pgrp");
3047 }
3048 }
3049
3050 proc_list_unlock();
3051
3052 return pgrp;
3053 }
3054
3055 struct pgrp *
3056 tty_pgrp(struct tty * tp)
3057 {
3058 struct pgrp * pg = PGRP_NULL;
3059
3060 proc_list_lock();
3061 pg = tp->t_pgrp;
3062
3063 if (pg != PGRP_NULL) {
3064 if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0) {
3065 panic("tty_pgrp: ref being povided for dead pgrp");
3066 }
3067 pg->pg_refcount++;
3068 }
3069 proc_list_unlock();
3070
3071 return pg;
3072 }
3073
3074 struct session *
3075 proc_session(proc_t p)
3076 {
3077 struct session * sess = SESSION_NULL;
3078
3079 if (p == PROC_NULL) {
3080 return SESSION_NULL;
3081 }
3082
3083 proc_list_lock();
3084
3085 /* wait during transitions */
3086 while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
3087 p->p_listflag |= P_LIST_PGRPTRWAIT;
3088 (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
3089 }
3090
3091 if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
3092 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3093 panic("proc_session:returning sesssion ref on terminating session");
3094 }
3095 sess->s_count++;
3096 }
3097 proc_list_unlock();
3098 return sess;
3099 }
3100
3101 void
3102 session_rele(struct session *sess)
3103 {
3104 proc_list_lock();
3105 if (--sess->s_count == 0) {
3106 if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
3107 panic("session_rele: terminating already terminated session");
3108 }
3109 sess->s_listflags |= S_LIST_TERM;
3110 LIST_REMOVE(sess, s_hash);
3111 sess->s_listflags |= S_LIST_DEAD;
3112 if (sess->s_count != 0) {
3113 panic("session_rele: freeing session in use");
3114 }
3115 proc_list_unlock();
3116 lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
3117 FREE_ZONE(sess, sizeof(struct session), M_SESSION);
3118 } else {
3119 proc_list_unlock();
3120 }
3121 }
3122
3123 int
3124 proc_transstart(proc_t p, int locked, int non_blocking)
3125 {
3126 if (locked == 0) {
3127 proc_lock(p);
3128 }
3129 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3130 if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
3131 if (locked == 0) {
3132 proc_unlock(p);
3133 }
3134 return EDEADLK;
3135 }
3136 p->p_lflag |= P_LTRANSWAIT;
3137 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3138 }
3139 p->p_lflag |= P_LINTRANSIT;
3140 p->p_transholder = current_thread();
3141 if (locked == 0) {
3142 proc_unlock(p);
3143 }
3144 return 0;
3145 }
3146
3147 void
3148 proc_transcommit(proc_t p, int locked)
3149 {
3150 if (locked == 0) {
3151 proc_lock(p);
3152 }
3153
3154 assert((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
3155 assert(p->p_transholder == current_thread());
3156 p->p_lflag |= P_LTRANSCOMMIT;
3157
3158 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3159 p->p_lflag &= ~P_LTRANSWAIT;
3160 wakeup(&p->p_lflag);
3161 }
3162 if (locked == 0) {
3163 proc_unlock(p);
3164 }
3165 }
3166
3167 void
3168 proc_transend(proc_t p, int locked)
3169 {
3170 if (locked == 0) {
3171 proc_lock(p);
3172 }
3173
3174 p->p_lflag &= ~(P_LINTRANSIT | P_LTRANSCOMMIT);
3175 p->p_transholder = NULL;
3176
3177 if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
3178 p->p_lflag &= ~P_LTRANSWAIT;
3179 wakeup(&p->p_lflag);
3180 }
3181 if (locked == 0) {
3182 proc_unlock(p);
3183 }
3184 }
3185
3186 int
3187 proc_transwait(proc_t p, int locked)
3188 {
3189 if (locked == 0) {
3190 proc_lock(p);
3191 }
3192 while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
3193 if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
3194 if (locked == 0) {
3195 proc_unlock(p);
3196 }
3197 return EDEADLK;
3198 }
3199 p->p_lflag |= P_LTRANSWAIT;
3200 msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
3201 }
3202 if (locked == 0) {
3203 proc_unlock(p);
3204 }
3205 return 0;
3206 }
3207
3208 void
3209 proc_klist_lock(void)
3210 {
3211 lck_mtx_lock(proc_klist_mlock);
3212 }
3213
3214 void
3215 proc_klist_unlock(void)
3216 {
3217 lck_mtx_unlock(proc_klist_mlock);
3218 }
3219
3220 void
3221 proc_knote(struct proc * p, long hint)
3222 {
3223 proc_klist_lock();
3224 KNOTE(&p->p_klist, hint);
3225 proc_klist_unlock();
3226 }
3227
3228 void
3229 proc_knote_drain(struct proc *p)
3230 {
3231 struct knote *kn = NULL;
3232
3233 /*
3234 * Clear the proc's klist to avoid references after the proc is reaped.
3235 */
3236 proc_klist_lock();
3237 while ((kn = SLIST_FIRST(&p->p_klist))) {
3238 kn->kn_proc = PROC_NULL;
3239 KNOTE_DETACH(&p->p_klist, kn);
3240 }
3241 proc_klist_unlock();
3242 }
3243
3244 void
3245 proc_setregister(proc_t p)
3246 {
3247 proc_lock(p);
3248 p->p_lflag |= P_LREGISTER;
3249 proc_unlock(p);
3250 }
3251
3252 void
3253 proc_resetregister(proc_t p)
3254 {
3255 proc_lock(p);
3256 p->p_lflag &= ~P_LREGISTER;
3257 proc_unlock(p);
3258 }
3259
3260 pid_t
3261 proc_pgrpid(proc_t p)
3262 {
3263 return p->p_pgrpid;
3264 }
3265
3266 pid_t
3267 proc_sessionid(proc_t p)
3268 {
3269 pid_t sid = -1;
3270 struct session * sessp = proc_session(p);
3271
3272 if (sessp != SESSION_NULL) {
3273 sid = sessp->s_sid;
3274 session_rele(sessp);
3275 }
3276
3277 return sid;
3278 }
3279
3280 pid_t
3281 proc_selfpgrpid()
3282 {
3283 return current_proc()->p_pgrpid;
3284 }
3285
3286
3287 /* return control and action states */
3288 int
3289 proc_getpcontrol(int pid, int * pcontrolp)
3290 {
3291 proc_t p;
3292
3293 p = proc_find(pid);
3294 if (p == PROC_NULL) {
3295 return ESRCH;
3296 }
3297 if (pcontrolp != NULL) {
3298 *pcontrolp = p->p_pcaction;
3299 }
3300
3301 proc_rele(p);
3302 return 0;
3303 }
3304
3305 int
3306 proc_dopcontrol(proc_t p)
3307 {
3308 int pcontrol;
3309 os_reason_t kill_reason;
3310
3311 proc_lock(p);
3312
3313 pcontrol = PROC_CONTROL_STATE(p);
3314
3315 if (PROC_ACTION_STATE(p) == 0) {
3316 switch (pcontrol) {
3317 case P_PCTHROTTLE:
3318 PROC_SETACTION_STATE(p);
3319 proc_unlock(p);
3320 printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
3321 break;
3322
3323 case P_PCSUSP:
3324 PROC_SETACTION_STATE(p);
3325 proc_unlock(p);
3326 printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
3327 task_suspend(p->task);
3328 break;
3329
3330 case P_PCKILL:
3331 PROC_SETACTION_STATE(p);
3332 proc_unlock(p);
3333 printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
3334 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3335 psignal_with_reason(p, SIGKILL, kill_reason);
3336 break;
3337
3338 default:
3339 proc_unlock(p);
3340 }
3341 } else {
3342 proc_unlock(p);
3343 }
3344
3345 return PROC_RETURNED;
3346 }
3347
3348
3349 /*
3350 * Resume a throttled or suspended process. This is an internal interface that's only
3351 * used by the user level code that presents the GUI when we run out of swap space and
3352 * hence is restricted to processes with superuser privileges.
3353 */
3354
3355 int
3356 proc_resetpcontrol(int pid)
3357 {
3358 proc_t p;
3359 int pcontrol;
3360 int error;
3361 proc_t self = current_proc();
3362
3363 /* if the process has been validated to handle resource control or root is valid one */
3364 if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0))) {
3365 return error;
3366 }
3367
3368 p = proc_find(pid);
3369 if (p == PROC_NULL) {
3370 return ESRCH;
3371 }
3372
3373 proc_lock(p);
3374
3375 pcontrol = PROC_CONTROL_STATE(p);
3376
3377 if (PROC_ACTION_STATE(p) != 0) {
3378 switch (pcontrol) {
3379 case P_PCTHROTTLE:
3380 PROC_RESETACTION_STATE(p);
3381 proc_unlock(p);
3382 printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
3383 break;
3384
3385 case P_PCSUSP:
3386 PROC_RESETACTION_STATE(p);
3387 proc_unlock(p);
3388 printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
3389 task_resume(p->task);
3390 break;
3391
3392 case P_PCKILL:
3393 /* Huh? */
3394 PROC_SETACTION_STATE(p);
3395 proc_unlock(p);
3396 printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
3397 break;
3398
3399 default:
3400 proc_unlock(p);
3401 }
3402 } else {
3403 proc_unlock(p);
3404 }
3405
3406 proc_rele(p);
3407 return 0;
3408 }
3409
3410
3411
3412 struct no_paging_space {
3413 uint64_t pcs_max_size;
3414 uint64_t pcs_uniqueid;
3415 int pcs_pid;
3416 int pcs_proc_count;
3417 uint64_t pcs_total_size;
3418
3419 uint64_t npcs_max_size;
3420 uint64_t npcs_uniqueid;
3421 int npcs_pid;
3422 int npcs_proc_count;
3423 uint64_t npcs_total_size;
3424
3425 int apcs_proc_count;
3426 uint64_t apcs_total_size;
3427 };
3428
3429
3430 static int
3431 proc_pcontrol_filter(proc_t p, void *arg)
3432 {
3433 struct no_paging_space *nps;
3434 uint64_t compressed;
3435
3436 nps = (struct no_paging_space *)arg;
3437
3438 compressed = get_task_compressed(p->task);
3439
3440 if (PROC_CONTROL_STATE(p)) {
3441 if (PROC_ACTION_STATE(p) == 0) {
3442 if (compressed > nps->pcs_max_size) {
3443 nps->pcs_pid = p->p_pid;
3444 nps->pcs_uniqueid = p->p_uniqueid;
3445 nps->pcs_max_size = compressed;
3446 }
3447 nps->pcs_total_size += compressed;
3448 nps->pcs_proc_count++;
3449 } else {
3450 nps->apcs_total_size += compressed;
3451 nps->apcs_proc_count++;
3452 }
3453 } else {
3454 if (compressed > nps->npcs_max_size) {
3455 nps->npcs_pid = p->p_pid;
3456 nps->npcs_uniqueid = p->p_uniqueid;
3457 nps->npcs_max_size = compressed;
3458 }
3459 nps->npcs_total_size += compressed;
3460 nps->npcs_proc_count++;
3461 }
3462 return 0;
3463 }
3464
3465
3466 static int
3467 proc_pcontrol_null(__unused proc_t p, __unused void *arg)
3468 {
3469 return PROC_RETURNED;
3470 }
3471
3472
3473 /*
3474 * Deal with the low on compressor pool space condition... this function
3475 * gets called when we are approaching the limits of the compressor pool or
3476 * we are unable to create a new swap file.
3477 * Since this eventually creates a memory deadlock situtation, we need to take action to free up
3478 * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
3479 * There are 2 categories of processes to deal with. Those that have an action
3480 * associated with them by the task itself and those that do not. Actionable
3481 * tasks can have one of three categories specified: ones that
3482 * can be killed immediately, ones that should be suspended, and ones that should
3483 * be throttled. Processes that do not have an action associated with them are normally
3484 * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
3485 * that only by killing them can we hope to put the system back into a usable state.
3486 */
3487
3488 #define NO_PAGING_SPACE_DEBUG 0
3489
3490 extern uint64_t vm_compressor_pages_compressed(void);
3491
3492 struct timeval last_no_space_action = {.tv_sec = 0, .tv_usec = 0};
3493
3494 #if DEVELOPMENT || DEBUG
3495 extern boolean_t kill_on_no_paging_space;
3496 #endif /* DEVELOPMENT || DEBUG */
3497
3498 #define MB_SIZE (1024 * 1024ULL)
3499 boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
3500
3501 extern int32_t max_kill_priority;
3502 extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
3503
3504 int
3505 no_paging_space_action()
3506 {
3507 proc_t p;
3508 struct no_paging_space nps;
3509 struct timeval now;
3510 os_reason_t kill_reason;
3511
3512 /*
3513 * Throttle how often we come through here. Once every 5 seconds should be plenty.
3514 */
3515 microtime(&now);
3516
3517 if (now.tv_sec <= last_no_space_action.tv_sec + 5) {
3518 return 0;
3519 }
3520
3521 /*
3522 * Examine all processes and find the biggest (biggest is based on the number of pages this
3523 * task has in the compressor pool) that has been marked to have some action
3524 * taken when swap space runs out... we also find the biggest that hasn't been marked for
3525 * action.
3526 *
3527 * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
3528 * the total number of pages held by the compressor, we go ahead and kill it since no other task
3529 * can have any real effect on the situation. Otherwise, we go after the actionable process.
3530 */
3531 bzero(&nps, sizeof(nps));
3532
3533 proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
3534
3535 #if NO_PAGING_SPACE_DEBUG
3536 printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
3537 nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
3538 printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
3539 nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
3540 printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
3541 nps.apcs_proc_count, nps.apcs_total_size);
3542 #endif
3543 if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
3544 /*
3545 * for now we'll knock out any task that has more then 50% of the pages
3546 * held by the compressor
3547 */
3548 if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
3549 if (nps.npcs_uniqueid == p->p_uniqueid) {
3550 /*
3551 * verify this is still the same process
3552 * in case the proc exited and the pid got reused while
3553 * we were finishing the proc_iterate and getting to this point
3554 */
3555 last_no_space_action = now;
3556
3557 printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size / MB_SIZE));
3558 kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
3559 psignal_with_reason(p, SIGKILL, kill_reason);
3560
3561 proc_rele(p);
3562
3563 return 0;
3564 }
3565
3566 proc_rele(p);
3567 }
3568 }
3569
3570 /*
3571 * We have some processes within our jetsam bands of consideration and hence can be killed.
3572 * So we will invoke the memorystatus thread to go ahead and kill something.
3573 */
3574 if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
3575 last_no_space_action = now;
3576 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
3577 return 1;
3578 }
3579
3580 /*
3581 * No eligible processes to kill. So let's suspend/kill the largest
3582 * process depending on its policy control specifications.
3583 */
3584
3585 if (nps.pcs_max_size > 0) {
3586 if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
3587 if (nps.pcs_uniqueid == p->p_uniqueid) {
3588 /*
3589 * verify this is still the same process
3590 * in case the proc exited and the pid got reused while
3591 * we were finishing the proc_iterate and getting to this point
3592 */
3593 last_no_space_action = now;
3594
3595 proc_dopcontrol(p);
3596
3597 proc_rele(p);
3598
3599 return 1;
3600 }
3601
3602 proc_rele(p);
3603 }
3604 }
3605 last_no_space_action = now;
3606
3607 printf("low swap: unable to find any eligible processes to take action on\n");
3608
3609 return 0;
3610 }
3611
3612 int
3613 proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
3614 {
3615 int ret = 0;
3616 proc_t target_proc = PROC_NULL;
3617 pid_t target_pid = uap->pid;
3618 uint64_t target_uniqueid = uap->uniqueid;
3619 task_t target_task = NULL;
3620
3621 if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
3622 ret = EPERM;
3623 goto out;
3624 }
3625 target_proc = proc_find(target_pid);
3626 if (target_proc != PROC_NULL) {
3627 if (target_uniqueid != proc_uniqueid(target_proc)) {
3628 ret = ENOENT;
3629 goto out;
3630 }
3631
3632 target_task = proc_task(target_proc);
3633 if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
3634 ret = EINVAL;
3635 goto out;
3636 }
3637 } else {
3638 ret = ENOENT;
3639 }
3640
3641 out:
3642 if (target_proc != PROC_NULL) {
3643 proc_rele(target_proc);
3644 }
3645 return ret;
3646 }
3647
3648 #if VM_SCAN_FOR_SHADOW_CHAIN
3649 extern int vm_map_shadow_max(vm_map_t map);
3650 int proc_shadow_max(void);
3651 int
3652 proc_shadow_max(void)
3653 {
3654 int retval, max;
3655 proc_t p;
3656 task_t task;
3657 vm_map_t map;
3658
3659 max = 0;
3660 proc_list_lock();
3661 for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
3662 if (p->p_stat == SIDL) {
3663 continue;
3664 }
3665 task = p->task;
3666 if (task == NULL) {
3667 continue;
3668 }
3669 map = get_task_map(task);
3670 if (map == NULL) {
3671 continue;
3672 }
3673 retval = vm_map_shadow_max(map);
3674 if (retval > max) {
3675 max = retval;
3676 }
3677 }
3678 proc_list_unlock();
3679 return max;
3680 }
3681 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
3682
3683 void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
3684 void
3685 proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
3686 {
3687 if (target_proc != NULL) {
3688 target_proc->p_responsible_pid = responsible_pid;
3689 }
3690 return;
3691 }
3692
3693 int
3694 proc_chrooted(proc_t p)
3695 {
3696 int retval = 0;
3697
3698 if (p) {
3699 proc_fdlock(p);
3700 retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
3701 proc_fdunlock(p);
3702 }
3703
3704 return retval;
3705 }
3706
3707 boolean_t
3708 proc_send_synchronous_EXC_RESOURCE(proc_t p)
3709 {
3710 if (p == PROC_NULL) {
3711 return FALSE;
3712 }
3713
3714 /* Send sync EXC_RESOURCE if the process is traced */
3715 if (ISSET(p->p_lflag, P_LTRACED)) {
3716 return TRUE;
3717 }
3718 return FALSE;
3719 }
3720
3721 size_t
3722 proc_get_syscall_filter_mask_size(int which)
3723 {
3724 if (which == SYSCALL_MASK_UNIX) {
3725 return nsysent;
3726 }
3727
3728 return 0;
3729 }
3730
3731 int
3732 proc_set_syscall_filter_mask(proc_t p, int which, unsigned char *maskptr, size_t masklen)
3733 {
3734 #if DEVELOPMENT || DEBUG
3735 if (syscallfilter_disable) {
3736 printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p));
3737 return KERN_SUCCESS;
3738 }
3739 #endif // DEVELOPMENT || DEBUG
3740
3741 if (which != SYSCALL_MASK_UNIX ||
3742 (maskptr != NULL && masklen != nsysent)) {
3743 return EINVAL;
3744 }
3745
3746 p->syscall_filter_mask = maskptr;
3747
3748 return KERN_SUCCESS;
3749 }
3750
3751 #ifdef CONFIG_32BIT_TELEMETRY
3752 void
3753 proc_log_32bit_telemetry(proc_t p)
3754 {
3755 /* Gather info */
3756 char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
3757 char * signature_cur_end = &signature_buf[0];
3758 char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
3759 int bytes_printed = 0;
3760
3761 const char * teamid = NULL;
3762 const char * identity = NULL;
3763 struct cs_blob * csblob = NULL;
3764
3765 proc_list_lock();
3766
3767 /*
3768 * Get proc name and parent proc name; if the parent execs, we'll get a
3769 * garbled name.
3770 */
3771 bytes_printed = snprintf(signature_cur_end,
3772 signature_buf_end - signature_cur_end,
3773 "%s,%s,", p->p_name,
3774 (p->p_pptr ? p->p_pptr->p_name : ""));
3775
3776 if (bytes_printed > 0) {
3777 signature_cur_end += bytes_printed;
3778 }
3779
3780 proc_list_unlock();
3781
3782 /* Get developer info. */
3783 vnode_t v = proc_getexecutablevnode(p);
3784
3785 if (v) {
3786 csblob = csvnode_get_blob(v, 0);
3787
3788 if (csblob) {
3789 teamid = csblob_get_teamid(csblob);
3790 identity = csblob_get_identity(csblob);
3791 }
3792 }
3793
3794 if (teamid == NULL) {
3795 teamid = "";
3796 }
3797
3798 if (identity == NULL) {
3799 identity = "";
3800 }
3801
3802 bytes_printed = snprintf(signature_cur_end,
3803 signature_buf_end - signature_cur_end,
3804 "%s,%s", teamid, identity);
3805
3806 if (bytes_printed > 0) {
3807 signature_cur_end += bytes_printed;
3808 }
3809
3810 if (v) {
3811 vnode_put(v);
3812 }
3813
3814 /*
3815 * We may want to rate limit here, although the SUMMARIZE key should
3816 * help us aggregate events in userspace.
3817 */
3818
3819 /* Emit log */
3820 kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
3821 /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
3822 /* 1 */ "com.apple.message.signature", signature_buf,
3823 /* 2 */ "com.apple.message.summarize", "YES",
3824 NULL);
3825 }
3826 #endif /* CONFIG_32BIT_TELEMETRY */