]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_resource.c
839a190b5b61a92f9c84127f89f3b2a1f3acc0b9
[apple/xnu.git] / bsd / kern / kern_resource.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sysctl.h>
78 #include <sys/kernel.h>
79 #include <sys/file_internal.h>
80 #include <sys/resourcevar.h>
81 #include <sys/malloc.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/sysproto.h>
86
87 #include <security/audit/audit.h>
88
89 #include <machine/vmparam.h>
90
91 #include <mach/mach_types.h>
92 #include <mach/time_value.h>
93 #include <mach/task.h>
94 #include <mach/task_info.h>
95 #include <mach/vm_map.h>
96 #include <mach/mach_vm.h>
97 #include <mach/thread_act.h> /* for thread_policy_set( ) */
98 #include <kern/thread.h>
99 #include <kern/policy_internal.h>
100
101 #include <kern/task.h>
102 #include <kern/clock.h> /* for absolutetime_to_microtime() */
103 #include <netinet/in.h> /* for TRAFFIC_MGT_SO_* */
104 #include <sys/socketvar.h> /* for struct socket */
105 #if NECP
106 #include <net/necp.h>
107 #endif /* NECP */
108
109 #include <vm/vm_map.h>
110
111 #include <kern/assert.h>
112 #include <sys/resource.h>
113 #include <sys/priv.h>
114 #include <IOKit/IOBSD.h>
115
116 #if CONFIG_MACF
117 #include <security/mac_framework.h>
118 #endif
119
120 int donice(struct proc *curp, struct proc *chgp, int n);
121 int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp);
122 int uthread_get_background_state(uthread_t);
123 static void do_background_socket(struct proc *p, thread_t thread);
124 static int do_background_thread(thread_t thread, int priority);
125 static int do_background_proc(struct proc *curp, struct proc *targetp, int priority);
126 static int set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority);
127 static int proc_set_darwin_role(proc_t curp, proc_t targetp, int priority);
128 static int proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority);
129 static int get_background_proc(struct proc *curp, struct proc *targetp, int *priority);
130 int proc_pid_rusage(int pid, int flavor, user_addr_t buf, int32_t *retval);
131 void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor);
132 int fill_task_rusage(task_t task, rusage_info_current *ri);
133 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
134 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
135 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
136 uint64_t get_task_logical_writes(task_t task);
137 void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri);
138
139 int proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie);
140
141 rlim_t maxdmap = MAXDSIZ; /* XXX */
142 rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */
143
144 /*
145 * Limits on the number of open files per process, and the number
146 * of child processes per process.
147 *
148 * Note: would be in kern/subr_param.c in FreeBSD.
149 */
150 __private_extern__ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */
151
152 SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED,
153 &maxprocperuid, 0, "Maximum processes allowed per userid" );
154
155 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED,
156 &maxfilesperproc, 0, "Maximum files allowed open per process" );
157
158 /* Args and fn for proc_iteration callback used in setpriority */
159 struct puser_nice_args {
160 proc_t curp;
161 int prio;
162 id_t who;
163 int * foundp;
164 int * errorp;
165 };
166 static int puser_donice_callback(proc_t p, void * arg);
167
168
169 /* Args and fn for proc_iteration callback used in setpriority */
170 struct ppgrp_nice_args {
171 proc_t curp;
172 int prio;
173 int * foundp;
174 int * errorp;
175 };
176 static int ppgrp_donice_callback(proc_t p, void * arg);
177
178 /*
179 * Resource controls and accounting.
180 */
181 int
182 getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval)
183 {
184 struct proc *p;
185 int low = PRIO_MAX + 1;
186 kauth_cred_t my_cred;
187 int refheld = 0;
188 int error = 0;
189
190 /* would also test (uap->who < 0), but id_t is unsigned */
191 if (uap->who > 0x7fffffff) {
192 return EINVAL;
193 }
194
195 switch (uap->which) {
196 case PRIO_PROCESS:
197 if (uap->who == 0) {
198 p = curp;
199 low = p->p_nice;
200 } else {
201 p = proc_find(uap->who);
202 if (p == 0) {
203 break;
204 }
205 low = p->p_nice;
206 proc_rele(p);
207 }
208 break;
209
210 case PRIO_PGRP: {
211 struct pgrp *pg = PGRP_NULL;
212
213 if (uap->who == 0) {
214 /* returns the pgrp to ref */
215 pg = proc_pgrp(curp);
216 } else if ((pg = pgfind(uap->who)) == PGRP_NULL) {
217 break;
218 }
219 /* No need for iteration as it is a simple scan */
220 pgrp_lock(pg);
221 PGMEMBERS_FOREACH(pg, p) {
222 if (p->p_nice < low) {
223 low = p->p_nice;
224 }
225 }
226 pgrp_unlock(pg);
227 pg_rele(pg);
228 break;
229 }
230
231 case PRIO_USER:
232 if (uap->who == 0) {
233 uap->who = kauth_cred_getuid(kauth_cred_get());
234 }
235
236 proc_list_lock();
237
238 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
239 my_cred = kauth_cred_proc_ref(p);
240 if (kauth_cred_getuid(my_cred) == uap->who &&
241 p->p_nice < low) {
242 low = p->p_nice;
243 }
244 kauth_cred_unref(&my_cred);
245 }
246
247 proc_list_unlock();
248
249 break;
250
251 case PRIO_DARWIN_THREAD:
252 /* we currently only support the current thread */
253 if (uap->who != 0) {
254 return EINVAL;
255 }
256
257 low = proc_get_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG);
258
259 break;
260
261 case PRIO_DARWIN_PROCESS:
262 if (uap->who == 0) {
263 p = curp;
264 } else {
265 p = proc_find(uap->who);
266 if (p == PROC_NULL) {
267 break;
268 }
269 refheld = 1;
270 }
271
272 error = get_background_proc(curp, p, &low);
273
274 if (refheld) {
275 proc_rele(p);
276 }
277 if (error) {
278 return error;
279 }
280 break;
281
282 case PRIO_DARWIN_ROLE:
283 if (uap->who == 0) {
284 p = curp;
285 } else {
286 p = proc_find(uap->who);
287 if (p == PROC_NULL) {
288 break;
289 }
290 refheld = 1;
291 }
292
293 error = proc_get_darwin_role(curp, p, &low);
294
295 if (refheld) {
296 proc_rele(p);
297 }
298 if (error) {
299 return error;
300 }
301 break;
302
303 default:
304 return EINVAL;
305 }
306 if (low == PRIO_MAX + 1) {
307 return ESRCH;
308 }
309 *retval = low;
310 return 0;
311 }
312
313 /* call back function used for proc iteration in PRIO_USER */
314 static int
315 puser_donice_callback(proc_t p, void * arg)
316 {
317 int error, n;
318 struct puser_nice_args * pun = (struct puser_nice_args *)arg;
319 kauth_cred_t my_cred;
320
321 my_cred = kauth_cred_proc_ref(p);
322 if (kauth_cred_getuid(my_cred) == pun->who) {
323 error = donice(pun->curp, p, pun->prio);
324 if (pun->errorp != NULL) {
325 *pun->errorp = error;
326 }
327 if (pun->foundp != NULL) {
328 n = *pun->foundp;
329 *pun->foundp = n + 1;
330 }
331 }
332 kauth_cred_unref(&my_cred);
333
334 return PROC_RETURNED;
335 }
336
337 /* call back function used for proc iteration in PRIO_PGRP */
338 static int
339 ppgrp_donice_callback(proc_t p, void * arg)
340 {
341 int error;
342 struct ppgrp_nice_args * pun = (struct ppgrp_nice_args *)arg;
343 int n;
344
345 error = donice(pun->curp, p, pun->prio);
346 if (pun->errorp != NULL) {
347 *pun->errorp = error;
348 }
349 if (pun->foundp != NULL) {
350 n = *pun->foundp;
351 *pun->foundp = n + 1;
352 }
353
354 return PROC_RETURNED;
355 }
356
357 /*
358 * Returns: 0 Success
359 * EINVAL
360 * ESRCH
361 * donice:EPERM
362 * donice:EACCES
363 */
364 /* ARGSUSED */
365 int
366 setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval)
367 {
368 struct proc *p;
369 int found = 0, error = 0;
370 int refheld = 0;
371
372 AUDIT_ARG(cmd, uap->which);
373 AUDIT_ARG(owner, uap->who, 0);
374 AUDIT_ARG(value32, uap->prio);
375
376 /* would also test (uap->who < 0), but id_t is unsigned */
377 if (uap->who > 0x7fffffff) {
378 return EINVAL;
379 }
380
381 switch (uap->which) {
382 case PRIO_PROCESS:
383 if (uap->who == 0) {
384 p = curp;
385 } else {
386 p = proc_find(uap->who);
387 if (p == 0) {
388 break;
389 }
390 refheld = 1;
391 }
392 error = donice(curp, p, uap->prio);
393 found++;
394 if (refheld != 0) {
395 proc_rele(p);
396 }
397 break;
398
399 case PRIO_PGRP: {
400 struct pgrp *pg = PGRP_NULL;
401 struct ppgrp_nice_args ppgrp;
402
403 if (uap->who == 0) {
404 pg = proc_pgrp(curp);
405 } else if ((pg = pgfind(uap->who)) == PGRP_NULL) {
406 break;
407 }
408
409 ppgrp.curp = curp;
410 ppgrp.prio = uap->prio;
411 ppgrp.foundp = &found;
412 ppgrp.errorp = &error;
413
414 /* PGRP_DROPREF drops the reference on process group */
415 pgrp_iterate(pg, PGRP_DROPREF, ppgrp_donice_callback, (void *)&ppgrp, NULL, NULL);
416
417 break;
418 }
419
420 case PRIO_USER: {
421 struct puser_nice_args punice;
422
423 if (uap->who == 0) {
424 uap->who = kauth_cred_getuid(kauth_cred_get());
425 }
426
427 punice.curp = curp;
428 punice.prio = uap->prio;
429 punice.who = uap->who;
430 punice.foundp = &found;
431 error = 0;
432 punice.errorp = &error;
433 proc_iterate(PROC_ALLPROCLIST, puser_donice_callback, (void *)&punice, NULL, NULL);
434
435 break;
436 }
437
438 case PRIO_DARWIN_THREAD: {
439 /* we currently only support the current thread */
440 if (uap->who != 0) {
441 return EINVAL;
442 }
443
444 error = do_background_thread(current_thread(), uap->prio);
445 found++;
446 break;
447 }
448
449 case PRIO_DARWIN_PROCESS: {
450 if (uap->who == 0) {
451 p = curp;
452 } else {
453 p = proc_find(uap->who);
454 if (p == 0) {
455 break;
456 }
457 refheld = 1;
458 }
459
460 error = do_background_proc(curp, p, uap->prio);
461
462 found++;
463 if (refheld != 0) {
464 proc_rele(p);
465 }
466 break;
467 }
468
469 case PRIO_DARWIN_GPU: {
470 if (uap->who == 0) {
471 return EINVAL;
472 }
473
474 p = proc_find(uap->who);
475 if (p == PROC_NULL) {
476 break;
477 }
478
479 error = set_gpudeny_proc(curp, p, uap->prio);
480
481 found++;
482 proc_rele(p);
483 break;
484 }
485
486 case PRIO_DARWIN_ROLE: {
487 if (uap->who == 0) {
488 p = curp;
489 } else {
490 p = proc_find(uap->who);
491 if (p == PROC_NULL) {
492 break;
493 }
494 refheld = 1;
495 }
496
497 error = proc_set_darwin_role(curp, p, uap->prio);
498
499 found++;
500 if (refheld != 0) {
501 proc_rele(p);
502 }
503 break;
504 }
505
506 default:
507 return EINVAL;
508 }
509 if (found == 0) {
510 return ESRCH;
511 }
512 if (error == EIDRM) {
513 *retval = -2;
514 error = 0;
515 }
516 return error;
517 }
518
519
520 /*
521 * Returns: 0 Success
522 * EPERM
523 * EACCES
524 * mac_check_proc_sched:???
525 */
526 int
527 donice(struct proc *curp, struct proc *chgp, int n)
528 {
529 int error = 0;
530 kauth_cred_t ucred;
531 kauth_cred_t my_cred;
532
533 ucred = kauth_cred_proc_ref(curp);
534 my_cred = kauth_cred_proc_ref(chgp);
535
536 if (suser(ucred, NULL) && kauth_cred_getruid(ucred) &&
537 kauth_cred_getuid(ucred) != kauth_cred_getuid(my_cred) &&
538 kauth_cred_getruid(ucred) != kauth_cred_getuid(my_cred)) {
539 error = EPERM;
540 goto out;
541 }
542 if (n > PRIO_MAX) {
543 n = PRIO_MAX;
544 }
545 if (n < PRIO_MIN) {
546 n = PRIO_MIN;
547 }
548 if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) {
549 error = EACCES;
550 goto out;
551 }
552 #if CONFIG_MACF
553 error = mac_proc_check_sched(curp, chgp);
554 if (error) {
555 goto out;
556 }
557 #endif
558 proc_lock(chgp);
559 chgp->p_nice = n;
560 proc_unlock(chgp);
561 (void)resetpriority(chgp);
562 out:
563 kauth_cred_unref(&ucred);
564 kauth_cred_unref(&my_cred);
565 return error;
566 }
567
568 static int
569 set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority)
570 {
571 int error = 0;
572 kauth_cred_t ucred;
573 kauth_cred_t target_cred;
574
575 ucred = kauth_cred_get();
576 target_cred = kauth_cred_proc_ref(targetp);
577
578 /* TODO: Entitlement instead of uid check */
579
580 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
581 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
582 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
583 error = EPERM;
584 goto out;
585 }
586
587 if (curp == targetp) {
588 error = EPERM;
589 goto out;
590 }
591
592 #if CONFIG_MACF
593 error = mac_proc_check_sched(curp, targetp);
594 if (error) {
595 goto out;
596 }
597 #endif
598
599 switch (priority) {
600 case PRIO_DARWIN_GPU_DENY:
601 task_set_gpu_denied(proc_task(targetp), TRUE);
602 break;
603 case PRIO_DARWIN_GPU_ALLOW:
604 task_set_gpu_denied(proc_task(targetp), FALSE);
605 break;
606 default:
607 error = EINVAL;
608 goto out;
609 }
610
611 out:
612 kauth_cred_unref(&target_cred);
613 return error;
614 }
615
616 static int
617 proc_set_darwin_role(proc_t curp, proc_t targetp, int priority)
618 {
619 int error = 0;
620 uint32_t flagsp = 0;
621
622 kauth_cred_t ucred, target_cred;
623
624 ucred = kauth_cred_get();
625 target_cred = kauth_cred_proc_ref(targetp);
626
627 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
628 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
629 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
630 if (priv_check_cred(ucred, PRIV_SETPRIORITY_DARWIN_ROLE, 0) != 0) {
631 error = EPERM;
632 goto out;
633 }
634 }
635
636 if (curp != targetp) {
637 #if CONFIG_MACF
638 if ((error = mac_proc_check_sched(curp, targetp))) {
639 goto out;
640 }
641 #endif
642 }
643
644 proc_get_darwinbgstate(proc_task(targetp), &flagsp);
645 if ((flagsp & PROC_FLAG_APPLICATION) != PROC_FLAG_APPLICATION) {
646 error = ENOTSUP;
647 goto out;
648 }
649
650 integer_t role = 0;
651
652 if ((error = proc_darwin_role_to_task_role(priority, &role))) {
653 goto out;
654 }
655
656 proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE,
657 TASK_POLICY_ROLE, role);
658
659 out:
660 kauth_cred_unref(&target_cred);
661 return error;
662 }
663
664 static int
665 proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority)
666 {
667 int error = 0;
668 int role = 0;
669
670 kauth_cred_t ucred, target_cred;
671
672 ucred = kauth_cred_get();
673 target_cred = kauth_cred_proc_ref(targetp);
674
675 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
676 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
677 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
678 error = EPERM;
679 goto out;
680 }
681
682 if (curp != targetp) {
683 #if CONFIG_MACF
684 if ((error = mac_proc_check_sched(curp, targetp))) {
685 goto out;
686 }
687 #endif
688 }
689
690 role = proc_get_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
691
692 *priority = proc_task_role_to_darwin_role(role);
693
694 out:
695 kauth_cred_unref(&target_cred);
696 return error;
697 }
698
699
700 static int
701 get_background_proc(struct proc *curp, struct proc *targetp, int *priority)
702 {
703 int external = 0;
704 int error = 0;
705 kauth_cred_t ucred, target_cred;
706
707 ucred = kauth_cred_get();
708 target_cred = kauth_cred_proc_ref(targetp);
709
710 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
711 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
712 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
713 error = EPERM;
714 goto out;
715 }
716
717 external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
718
719 *priority = proc_get_task_policy(current_task(), external, TASK_POLICY_DARWIN_BG);
720
721 out:
722 kauth_cred_unref(&target_cred);
723 return error;
724 }
725
726 static int
727 do_background_proc(struct proc *curp, struct proc *targetp, int priority)
728 {
729 #if !CONFIG_MACF
730 #pragma unused(curp)
731 #endif
732 int error = 0;
733 kauth_cred_t ucred;
734 kauth_cred_t target_cred;
735 int external;
736 int enable;
737
738 ucred = kauth_cred_get();
739 target_cred = kauth_cred_proc_ref(targetp);
740
741 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
742 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
743 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
744 error = EPERM;
745 goto out;
746 }
747
748 #if CONFIG_MACF
749 error = mac_proc_check_sched(curp, targetp);
750 if (error) {
751 goto out;
752 }
753 #endif
754
755 external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
756
757 switch (priority) {
758 case PRIO_DARWIN_BG:
759 enable = TASK_POLICY_ENABLE;
760 break;
761 case PRIO_DARWIN_NONUI:
762 /* ignored for compatibility */
763 goto out;
764 default:
765 /* TODO: EINVAL if priority != 0 */
766 enable = TASK_POLICY_DISABLE;
767 break;
768 }
769
770 proc_set_task_policy(proc_task(targetp), external, TASK_POLICY_DARWIN_BG, enable);
771
772 out:
773 kauth_cred_unref(&target_cred);
774 return error;
775 }
776
777 static void
778 do_background_socket(struct proc *p, thread_t thread)
779 {
780 #if SOCKETS
781 struct filedesc *fdp;
782 struct fileproc *fp;
783 int i, background;
784
785 proc_fdlock(p);
786
787 if (thread != THREAD_NULL) {
788 background = proc_get_effective_thread_policy(thread, TASK_POLICY_ALL_SOCKETS_BG);
789 } else {
790 background = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ALL_SOCKETS_BG);
791 }
792
793 if (background) {
794 /*
795 * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark
796 * the sockets with the background flag. There's nothing
797 * to do here for the PRIO_DARWIN_THREAD case.
798 */
799 if (thread == THREAD_NULL) {
800 fdp = p->p_fd;
801
802 for (i = 0; i < fdp->fd_nfiles; i++) {
803 fp = fdp->fd_ofiles[i];
804 if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
805 continue;
806 }
807 if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) {
808 struct socket *sockp = (struct socket *)fp->f_fglob->fg_data;
809 socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
810 sockp->so_background_thread = NULL;
811 }
812 #if NECP
813 else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY) {
814 necp_set_client_as_background(p, fp, background);
815 }
816 #endif /* NECP */
817 }
818 }
819 } else {
820 /* disable networking IO throttle.
821 * NOTE - It is a known limitation of the current design that we
822 * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for
823 * sockets created by other threads within this process.
824 */
825 fdp = p->p_fd;
826 for (i = 0; i < fdp->fd_nfiles; i++) {
827 struct socket *sockp;
828
829 fp = fdp->fd_ofiles[i];
830 if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
831 continue;
832 }
833 if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) {
834 sockp = (struct socket *)fp->f_fglob->fg_data;
835 /* skip if only clearing this thread's sockets */
836 if ((thread) && (sockp->so_background_thread != thread)) {
837 continue;
838 }
839 socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
840 sockp->so_background_thread = NULL;
841 }
842 #if NECP
843 else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY) {
844 necp_set_client_as_background(p, fp, background);
845 }
846 #endif /* NECP */
847 }
848 }
849
850 proc_fdunlock(p);
851 #else
852 #pragma unused(p, thread)
853 #endif
854 }
855
856
857 /*
858 * do_background_thread
859 *
860 * Requires: thread reference
861 *
862 * Returns: 0 Success
863 * EPERM Tried to background while in vfork
864 * XXX - todo - does this need a MACF hook?
865 */
866 static int
867 do_background_thread(thread_t thread, int priority)
868 {
869 struct uthread *ut;
870 int enable, external;
871 int rv = 0;
872
873 ut = get_bsdthread_info(thread);
874
875 /* Backgrounding is unsupported for threads in vfork */
876 if ((ut->uu_flag & UT_VFORK) != 0) {
877 return EPERM;
878 }
879
880 /* Backgrounding is unsupported for workq threads */
881 if (thread_is_static_param(thread)) {
882 return EPERM;
883 }
884
885 /* Not allowed to combine QoS and DARWIN_BG, doing so strips the QoS */
886 if (thread_has_qos_policy(thread)) {
887 thread_remove_qos_policy(thread);
888 rv = EIDRM;
889 }
890
891 /* TODO: Fail if someone passes something besides 0 or PRIO_DARWIN_BG */
892 enable = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE;
893 external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
894
895 proc_set_thread_policy(thread, external, TASK_POLICY_DARWIN_BG, enable);
896
897 return rv;
898 }
899
900
901 /*
902 * Returns: 0 Success
903 * copyin:EFAULT
904 * dosetrlimit:
905 */
906 /* ARGSUSED */
907 int
908 setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval)
909 {
910 struct rlimit alim;
911 int error;
912
913 if ((error = copyin(uap->rlp, (caddr_t)&alim,
914 sizeof(struct rlimit)))) {
915 return error;
916 }
917
918 return dosetrlimit(p, uap->which, &alim);
919 }
920
921 /*
922 * Returns: 0 Success
923 * EINVAL
924 * ENOMEM Cannot copy limit structure
925 * suser:EPERM
926 *
927 * Notes: EINVAL is returned both for invalid arguments, and in the
928 * case that the current usage (e.g. RLIMIT_STACK) is already
929 * in excess of the requested limit.
930 */
931 int
932 dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
933 {
934 struct rlimit *alimp;
935 int error;
936 kern_return_t kr;
937 int posix = (which & _RLIMIT_POSIX_FLAG) ? 1 : 0;
938
939 /* Mask out POSIX flag, saved above */
940 which &= ~_RLIMIT_POSIX_FLAG;
941
942 if (which >= RLIM_NLIMITS) {
943 return EINVAL;
944 }
945
946 alimp = &p->p_rlimit[which];
947 if (limp->rlim_cur > limp->rlim_max) {
948 return EINVAL;
949 }
950
951 if (limp->rlim_cur > alimp->rlim_max ||
952 limp->rlim_max > alimp->rlim_max) {
953 if ((error = suser(kauth_cred_get(), &p->p_acflag))) {
954 return error;
955 }
956 }
957
958 proc_limitblock(p);
959
960 if ((error = proc_limitreplace(p)) != 0) {
961 proc_limitunblock(p);
962 return error;
963 }
964
965 alimp = &p->p_rlimit[which];
966
967 switch (which) {
968 case RLIMIT_CPU:
969 if (limp->rlim_cur == RLIM_INFINITY) {
970 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
971 timerclear(&p->p_rlim_cpu);
972 } else {
973 task_absolutetime_info_data_t tinfo;
974 mach_msg_type_number_t count;
975 struct timeval ttv, tv;
976 clock_sec_t tv_sec;
977 clock_usec_t tv_usec;
978
979 count = TASK_ABSOLUTETIME_INFO_COUNT;
980 task_info(p->task, TASK_ABSOLUTETIME_INFO,
981 (task_info_t)&tinfo, &count);
982 absolutetime_to_microtime(tinfo.total_user + tinfo.total_system,
983 &tv_sec, &tv_usec);
984 ttv.tv_sec = tv_sec;
985 ttv.tv_usec = tv_usec;
986
987 tv.tv_sec = (limp->rlim_cur > __INT_MAX__ ? __INT_MAX__ : limp->rlim_cur);
988 tv.tv_usec = 0;
989 timersub(&tv, &ttv, &p->p_rlim_cpu);
990
991 timerclear(&tv);
992 if (timercmp(&p->p_rlim_cpu, &tv, >)) {
993 task_vtimer_set(p->task, TASK_VTIMER_RLIM);
994 } else {
995 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
996
997 timerclear(&p->p_rlim_cpu);
998
999 psignal(p, SIGXCPU);
1000 }
1001 }
1002 break;
1003
1004 case RLIMIT_DATA:
1005 if (limp->rlim_cur > maxdmap) {
1006 limp->rlim_cur = maxdmap;
1007 }
1008 if (limp->rlim_max > maxdmap) {
1009 limp->rlim_max = maxdmap;
1010 }
1011 break;
1012
1013 case RLIMIT_STACK:
1014 if (p->p_lflag & P_LCUSTOM_STACK) {
1015 /* Process has a custom stack set - rlimit cannot be used to change it */
1016 error = EINVAL;
1017 goto out;
1018 }
1019
1020 /* Disallow illegal stack size instead of clipping */
1021 if (limp->rlim_cur > maxsmap ||
1022 limp->rlim_max > maxsmap) {
1023 if (posix) {
1024 error = EINVAL;
1025 goto out;
1026 } else {
1027 /*
1028 * 4797860 - workaround poorly written installers by
1029 * doing previous implementation (< 10.5) when caller
1030 * is non-POSIX conforming.
1031 */
1032 if (limp->rlim_cur > maxsmap) {
1033 limp->rlim_cur = maxsmap;
1034 }
1035 if (limp->rlim_max > maxsmap) {
1036 limp->rlim_max = maxsmap;
1037 }
1038 }
1039 }
1040
1041 /*
1042 * Stack is allocated to the max at exec time with only
1043 * "rlim_cur" bytes accessible. If stack limit is going
1044 * up make more accessible, if going down make inaccessible.
1045 */
1046 if (limp->rlim_cur > alimp->rlim_cur) {
1047 user_addr_t addr;
1048 user_size_t size;
1049
1050 /* grow stack */
1051 size = round_page_64(limp->rlim_cur);
1052 size -= round_page_64(alimp->rlim_cur);
1053
1054 addr = p->user_stack - round_page_64(limp->rlim_cur);
1055 kr = mach_vm_protect(current_map(),
1056 addr, size,
1057 FALSE, VM_PROT_DEFAULT);
1058 if (kr != KERN_SUCCESS) {
1059 error = EINVAL;
1060 goto out;
1061 }
1062 } else if (limp->rlim_cur < alimp->rlim_cur) {
1063 user_addr_t addr;
1064 user_size_t size;
1065 user_addr_t cur_sp;
1066
1067 /* shrink stack */
1068
1069 /*
1070 * First check if new stack limit would agree
1071 * with current stack usage.
1072 * Get the current thread's stack pointer...
1073 */
1074 cur_sp = thread_adjuserstack(current_thread(),
1075 0);
1076 if (cur_sp <= p->user_stack &&
1077 cur_sp > (p->user_stack -
1078 round_page_64(alimp->rlim_cur))) {
1079 /* stack pointer is in main stack */
1080 if (cur_sp <= (p->user_stack -
1081 round_page_64(limp->rlim_cur))) {
1082 /*
1083 * New limit would cause
1084 * current usage to be invalid:
1085 * reject new limit.
1086 */
1087 error = EINVAL;
1088 goto out;
1089 }
1090 } else {
1091 /* not on the main stack: reject */
1092 error = EINVAL;
1093 goto out;
1094 }
1095
1096 size = round_page_64(alimp->rlim_cur);
1097 size -= round_page_64(limp->rlim_cur);
1098
1099 addr = p->user_stack - round_page_64(alimp->rlim_cur);
1100
1101 kr = mach_vm_protect(current_map(),
1102 addr, size,
1103 FALSE, VM_PROT_NONE);
1104 if (kr != KERN_SUCCESS) {
1105 error = EINVAL;
1106 goto out;
1107 }
1108 } else {
1109 /* no change ... */
1110 }
1111 break;
1112
1113 case RLIMIT_NOFILE:
1114 /*
1115 * Only root can set the maxfiles limits, as it is
1116 * systemwide resource. If we are expecting POSIX behavior,
1117 * instead of clamping the value, return EINVAL. We do this
1118 * because historically, people have been able to attempt to
1119 * set RLIM_INFINITY to get "whatever the maximum is".
1120 */
1121 if (kauth_cred_issuser(kauth_cred_get())) {
1122 if (limp->rlim_cur != alimp->rlim_cur &&
1123 limp->rlim_cur > (rlim_t)maxfiles) {
1124 if (posix) {
1125 error = EINVAL;
1126 goto out;
1127 }
1128 limp->rlim_cur = maxfiles;
1129 }
1130 if (limp->rlim_max != alimp->rlim_max &&
1131 limp->rlim_max > (rlim_t)maxfiles) {
1132 limp->rlim_max = maxfiles;
1133 }
1134 } else {
1135 if (limp->rlim_cur != alimp->rlim_cur &&
1136 limp->rlim_cur > (rlim_t)maxfilesperproc) {
1137 if (posix) {
1138 error = EINVAL;
1139 goto out;
1140 }
1141 limp->rlim_cur = maxfilesperproc;
1142 }
1143 if (limp->rlim_max != alimp->rlim_max &&
1144 limp->rlim_max > (rlim_t)maxfilesperproc) {
1145 limp->rlim_max = maxfilesperproc;
1146 }
1147 }
1148 break;
1149
1150 case RLIMIT_NPROC:
1151 /*
1152 * Only root can set to the maxproc limits, as it is
1153 * systemwide resource; all others are limited to
1154 * maxprocperuid (presumably less than maxproc).
1155 */
1156 if (kauth_cred_issuser(kauth_cred_get())) {
1157 if (limp->rlim_cur > (rlim_t)maxproc) {
1158 limp->rlim_cur = maxproc;
1159 }
1160 if (limp->rlim_max > (rlim_t)maxproc) {
1161 limp->rlim_max = maxproc;
1162 }
1163 } else {
1164 if (limp->rlim_cur > (rlim_t)maxprocperuid) {
1165 limp->rlim_cur = maxprocperuid;
1166 }
1167 if (limp->rlim_max > (rlim_t)maxprocperuid) {
1168 limp->rlim_max = maxprocperuid;
1169 }
1170 }
1171 break;
1172
1173 case RLIMIT_MEMLOCK:
1174 /*
1175 * Tell the Mach VM layer about the new limit value.
1176 */
1177
1178 vm_map_set_user_wire_limit(current_map(), limp->rlim_cur);
1179 break;
1180 } /* switch... */
1181 proc_lock(p);
1182 *alimp = *limp;
1183 proc_unlock(p);
1184 error = 0;
1185 out:
1186 proc_limitunblock(p);
1187 return error;
1188 }
1189
1190 /* ARGSUSED */
1191 int
1192 getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval)
1193 {
1194 struct rlimit lim = {};
1195
1196 /*
1197 * Take out flag now in case we need to use it to trigger variant
1198 * behaviour later.
1199 */
1200 uap->which &= ~_RLIMIT_POSIX_FLAG;
1201
1202 if (uap->which >= RLIM_NLIMITS) {
1203 return EINVAL;
1204 }
1205 proc_limitget(p, uap->which, &lim);
1206 return copyout((caddr_t)&lim,
1207 uap->rlp, sizeof(struct rlimit));
1208 }
1209
1210 /*
1211 * Transform the running time and tick information in proc p into user,
1212 * system, and interrupt time usage.
1213 */
1214 /* No lock on proc is held for this.. */
1215 void
1216 calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip)
1217 {
1218 task_t task;
1219
1220 timerclear(up);
1221 timerclear(sp);
1222 if (ip != NULL) {
1223 timerclear(ip);
1224 }
1225
1226 task = p->task;
1227 if (task) {
1228 mach_task_basic_info_data_t tinfo;
1229 task_thread_times_info_data_t ttimesinfo;
1230 task_events_info_data_t teventsinfo;
1231 mach_msg_type_number_t task_info_count, task_ttimes_count;
1232 mach_msg_type_number_t task_events_count;
1233 struct timeval ut, st;
1234
1235 task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1236 task_info(task, MACH_TASK_BASIC_INFO,
1237 (task_info_t)&tinfo, &task_info_count);
1238 ut.tv_sec = tinfo.user_time.seconds;
1239 ut.tv_usec = tinfo.user_time.microseconds;
1240 st.tv_sec = tinfo.system_time.seconds;
1241 st.tv_usec = tinfo.system_time.microseconds;
1242 timeradd(&ut, up, up);
1243 timeradd(&st, sp, sp);
1244
1245 task_ttimes_count = TASK_THREAD_TIMES_INFO_COUNT;
1246 task_info(task, TASK_THREAD_TIMES_INFO,
1247 (task_info_t)&ttimesinfo, &task_ttimes_count);
1248
1249 ut.tv_sec = ttimesinfo.user_time.seconds;
1250 ut.tv_usec = ttimesinfo.user_time.microseconds;
1251 st.tv_sec = ttimesinfo.system_time.seconds;
1252 st.tv_usec = ttimesinfo.system_time.microseconds;
1253 timeradd(&ut, up, up);
1254 timeradd(&st, sp, sp);
1255
1256 task_events_count = TASK_EVENTS_INFO_COUNT;
1257 task_info(task, TASK_EVENTS_INFO,
1258 (task_info_t)&teventsinfo, &task_events_count);
1259
1260 /*
1261 * No need to lock "p": this does not need to be
1262 * completely consistent, right ?
1263 */
1264 p->p_stats->p_ru.ru_minflt = (teventsinfo.faults -
1265 teventsinfo.pageins);
1266 p->p_stats->p_ru.ru_majflt = teventsinfo.pageins;
1267 p->p_stats->p_ru.ru_nivcsw = (teventsinfo.csw -
1268 p->p_stats->p_ru.ru_nvcsw);
1269 if (p->p_stats->p_ru.ru_nivcsw < 0) {
1270 p->p_stats->p_ru.ru_nivcsw = 0;
1271 }
1272
1273 p->p_stats->p_ru.ru_maxrss = tinfo.resident_size_max;
1274 }
1275 }
1276
1277 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
1278 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
1279
1280 /* ARGSUSED */
1281 int
1282 getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval)
1283 {
1284 struct rusage *rup, rubuf;
1285 struct user64_rusage rubuf64 = {};
1286 struct user32_rusage rubuf32 = {};
1287 size_t retsize = sizeof(rubuf); /* default: 32 bits */
1288 caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */
1289 struct timeval utime;
1290 struct timeval stime;
1291
1292
1293 switch (uap->who) {
1294 case RUSAGE_SELF:
1295 calcru(p, &utime, &stime, NULL);
1296 proc_lock(p);
1297 rup = &p->p_stats->p_ru;
1298 rup->ru_utime = utime;
1299 rup->ru_stime = stime;
1300
1301 rubuf = *rup;
1302 proc_unlock(p);
1303
1304 break;
1305
1306 case RUSAGE_CHILDREN:
1307 proc_lock(p);
1308 rup = &p->p_stats->p_cru;
1309 rubuf = *rup;
1310 proc_unlock(p);
1311 break;
1312
1313 default:
1314 return EINVAL;
1315 }
1316 if (IS_64BIT_PROCESS(p)) {
1317 retsize = sizeof(rubuf64);
1318 retbuf = (caddr_t)&rubuf64;
1319 munge_user64_rusage(&rubuf, &rubuf64);
1320 } else {
1321 retsize = sizeof(rubuf32);
1322 retbuf = (caddr_t)&rubuf32;
1323 munge_user32_rusage(&rubuf, &rubuf32);
1324 }
1325
1326 return copyout(retbuf, uap->rusage, retsize);
1327 }
1328
1329 void
1330 ruadd(struct rusage *ru, struct rusage *ru2)
1331 {
1332 long *ip, *ip2;
1333 long i;
1334
1335 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
1336 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
1337 if (ru->ru_maxrss < ru2->ru_maxrss) {
1338 ru->ru_maxrss = ru2->ru_maxrss;
1339 }
1340 ip = &ru->ru_first; ip2 = &ru2->ru_first;
1341 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) {
1342 *ip++ += *ip2++;
1343 }
1344 }
1345
1346 /*
1347 * Add the rusage stats of child in parent.
1348 *
1349 * It adds rusage statistics of child process and statistics of all its
1350 * children to its parent.
1351 *
1352 * Note: proc lock of parent should be held while calling this function.
1353 */
1354 void
1355 update_rusage_info_child(struct rusage_info_child *ri, rusage_info_current *ri_current)
1356 {
1357 ri->ri_child_user_time += (ri_current->ri_user_time +
1358 ri_current->ri_child_user_time);
1359 ri->ri_child_system_time += (ri_current->ri_system_time +
1360 ri_current->ri_child_system_time);
1361 ri->ri_child_pkg_idle_wkups += (ri_current->ri_pkg_idle_wkups +
1362 ri_current->ri_child_pkg_idle_wkups);
1363 ri->ri_child_interrupt_wkups += (ri_current->ri_interrupt_wkups +
1364 ri_current->ri_child_interrupt_wkups);
1365 ri->ri_child_pageins += (ri_current->ri_pageins +
1366 ri_current->ri_child_pageins);
1367 ri->ri_child_elapsed_abstime += ((ri_current->ri_proc_exit_abstime -
1368 ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime);
1369 }
1370
1371 void
1372 proc_limitget(proc_t p, int which, struct rlimit * limp)
1373 {
1374 proc_list_lock();
1375 limp->rlim_cur = p->p_rlimit[which].rlim_cur;
1376 limp->rlim_max = p->p_rlimit[which].rlim_max;
1377 proc_list_unlock();
1378 }
1379
1380
1381 void
1382 proc_limitdrop(proc_t p, int exiting)
1383 {
1384 struct plimit * freelim = NULL;
1385 struct plimit * freeoldlim = NULL;
1386
1387 proc_list_lock();
1388
1389 if (--p->p_limit->pl_refcnt == 0) {
1390 freelim = p->p_limit;
1391 p->p_limit = NULL;
1392 }
1393 if ((exiting != 0) && (p->p_olimit != NULL) && (--p->p_olimit->pl_refcnt == 0)) {
1394 freeoldlim = p->p_olimit;
1395 p->p_olimit = NULL;
1396 }
1397
1398 proc_list_unlock();
1399 if (freelim != NULL) {
1400 FREE_ZONE(freelim, sizeof *p->p_limit, M_PLIMIT);
1401 }
1402 if (freeoldlim != NULL) {
1403 FREE_ZONE(freeoldlim, sizeof *p->p_olimit, M_PLIMIT);
1404 }
1405 }
1406
1407
1408 void
1409 proc_limitfork(proc_t parent, proc_t child)
1410 {
1411 proc_list_lock();
1412 child->p_limit = parent->p_limit;
1413 child->p_limit->pl_refcnt++;
1414 child->p_olimit = NULL;
1415 proc_list_unlock();
1416 }
1417
1418 void
1419 proc_limitblock(proc_t p)
1420 {
1421 proc_lock(p);
1422 while (p->p_lflag & P_LLIMCHANGE) {
1423 p->p_lflag |= P_LLIMWAIT;
1424 msleep(&p->p_olimit, &p->p_mlock, 0, "proc_limitblock", NULL);
1425 }
1426 p->p_lflag |= P_LLIMCHANGE;
1427 proc_unlock(p);
1428 }
1429
1430
1431 void
1432 proc_limitunblock(proc_t p)
1433 {
1434 proc_lock(p);
1435 p->p_lflag &= ~P_LLIMCHANGE;
1436 if (p->p_lflag & P_LLIMWAIT) {
1437 p->p_lflag &= ~P_LLIMWAIT;
1438 wakeup(&p->p_olimit);
1439 }
1440 proc_unlock(p);
1441 }
1442
1443 /* This is called behind serialization provided by proc_limitblock/unlbock */
1444 int
1445 proc_limitreplace(proc_t p)
1446 {
1447 struct plimit *copy;
1448
1449
1450 proc_list_lock();
1451
1452 if (p->p_limit->pl_refcnt == 1) {
1453 proc_list_unlock();
1454 return 0;
1455 }
1456
1457 proc_list_unlock();
1458
1459 MALLOC_ZONE(copy, struct plimit *,
1460 sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1461 if (copy == NULL) {
1462 return ENOMEM;
1463 }
1464
1465 proc_list_lock();
1466 bcopy(p->p_limit->pl_rlimit, copy->pl_rlimit,
1467 sizeof(struct rlimit) * RLIM_NLIMITS);
1468 copy->pl_refcnt = 1;
1469 /* hang on to reference to old till process exits */
1470 p->p_olimit = p->p_limit;
1471 p->p_limit = copy;
1472 proc_list_unlock();
1473
1474 return 0;
1475 }
1476
1477 static int
1478 iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1479 static int
1480 iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1481 static int
1482 iopolicysys_vfs_atime_updates(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1483
1484 /*
1485 * iopolicysys
1486 *
1487 * Description: System call MUX for use in manipulating I/O policy attributes of the current process or thread
1488 *
1489 * Parameters: cmd Policy command
1490 * arg Pointer to policy arguments
1491 *
1492 * Returns: 0 Success
1493 * EINVAL Invalid command or invalid policy arguments
1494 *
1495 */
1496 int
1497 iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval)
1498 {
1499 int error = 0;
1500 struct _iopol_param_t iop_param;
1501
1502 if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) {
1503 goto out;
1504 }
1505
1506 switch (iop_param.iop_iotype) {
1507 case IOPOL_TYPE_DISK:
1508 error = iopolicysys_disk(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1509 if (error == EIDRM) {
1510 *retval = -2;
1511 error = 0;
1512 }
1513 if (error) {
1514 goto out;
1515 }
1516 break;
1517 case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY:
1518 error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1519 if (error) {
1520 goto out;
1521 }
1522 break;
1523 case IOPOL_TYPE_VFS_ATIME_UPDATES:
1524 error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1525 if (error) {
1526 goto out;
1527 }
1528 break;
1529 default:
1530 error = EINVAL;
1531 goto out;
1532 }
1533
1534 /* Individual iotype handlers are expected to update iop_param, if requested with a GET command */
1535 if (uap->cmd == IOPOL_CMD_GET) {
1536 error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param));
1537 if (error) {
1538 goto out;
1539 }
1540 }
1541
1542 out:
1543 return error;
1544 }
1545
1546 static int
1547 iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
1548 {
1549 int error = 0;
1550 thread_t thread;
1551 int policy_flavor;
1552
1553 /* Validate scope */
1554 switch (scope) {
1555 case IOPOL_SCOPE_PROCESS:
1556 thread = THREAD_NULL;
1557 policy_flavor = TASK_POLICY_IOPOL;
1558 break;
1559
1560 case IOPOL_SCOPE_THREAD:
1561 thread = current_thread();
1562 policy_flavor = TASK_POLICY_IOPOL;
1563
1564 /* Not allowed to combine QoS and (non-PASSIVE) IO policy, doing so strips the QoS */
1565 if (cmd == IOPOL_CMD_SET && thread_has_qos_policy(thread)) {
1566 switch (policy) {
1567 case IOPOL_DEFAULT:
1568 case IOPOL_PASSIVE:
1569 break;
1570 case IOPOL_UTILITY:
1571 case IOPOL_THROTTLE:
1572 case IOPOL_IMPORTANT:
1573 case IOPOL_STANDARD:
1574 if (!thread_is_static_param(thread)) {
1575 thread_remove_qos_policy(thread);
1576 /*
1577 * This is not an error case, this is to return a marker to user-space that
1578 * we stripped the thread of its QoS class.
1579 */
1580 error = EIDRM;
1581 break;
1582 }
1583 /* otherwise, fall through to the error case. */
1584 default:
1585 error = EINVAL;
1586 goto out;
1587 }
1588 }
1589 break;
1590
1591 case IOPOL_SCOPE_DARWIN_BG:
1592 #if CONFIG_EMBEDDED
1593 /* Embedded doesn't want this as BG is always IOPOL_THROTTLE */
1594 error = ENOTSUP;
1595 goto out;
1596 #else /* CONFIG_EMBEDDED */
1597 thread = THREAD_NULL;
1598 policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL;
1599 break;
1600 #endif /* CONFIG_EMBEDDED */
1601
1602 default:
1603 error = EINVAL;
1604 goto out;
1605 }
1606
1607 /* Validate policy */
1608 if (cmd == IOPOL_CMD_SET) {
1609 switch (policy) {
1610 case IOPOL_DEFAULT:
1611 if (scope == IOPOL_SCOPE_DARWIN_BG) {
1612 /* the current default BG throttle level is UTILITY */
1613 policy = IOPOL_UTILITY;
1614 } else {
1615 policy = IOPOL_IMPORTANT;
1616 }
1617 break;
1618 case IOPOL_UTILITY:
1619 /* fall-through */
1620 case IOPOL_THROTTLE:
1621 /* These levels are OK */
1622 break;
1623 case IOPOL_IMPORTANT:
1624 /* fall-through */
1625 case IOPOL_STANDARD:
1626 /* fall-through */
1627 case IOPOL_PASSIVE:
1628 if (scope == IOPOL_SCOPE_DARWIN_BG) {
1629 /* These levels are invalid for BG */
1630 error = EINVAL;
1631 goto out;
1632 } else {
1633 /* OK for other scopes */
1634 }
1635 break;
1636 default:
1637 error = EINVAL;
1638 goto out;
1639 }
1640 }
1641
1642 /* Perform command */
1643 switch (cmd) {
1644 case IOPOL_CMD_SET:
1645 if (thread != THREAD_NULL) {
1646 proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor, policy);
1647 } else {
1648 proc_set_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor, policy);
1649 }
1650 break;
1651 case IOPOL_CMD_GET:
1652 if (thread != THREAD_NULL) {
1653 policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor);
1654 } else {
1655 policy = proc_get_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor);
1656 }
1657 iop_param->iop_policy = policy;
1658 break;
1659 default:
1660 error = EINVAL; /* unknown command */
1661 break;
1662 }
1663
1664 out:
1665 return error;
1666 }
1667
1668 static int
1669 iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
1670 {
1671 int error = 0;
1672
1673 /* Validate scope */
1674 switch (scope) {
1675 case IOPOL_SCOPE_PROCESS:
1676 /* Only process OK */
1677 break;
1678 default:
1679 error = EINVAL;
1680 goto out;
1681 }
1682
1683 /* Validate policy */
1684 if (cmd == IOPOL_CMD_SET) {
1685 switch (policy) {
1686 case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
1687 /* fall-through */
1688 case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
1689 /* These policies are OK */
1690 break;
1691 default:
1692 error = EINVAL;
1693 goto out;
1694 }
1695 }
1696
1697 /* Perform command */
1698 switch (cmd) {
1699 case IOPOL_CMD_SET:
1700 if (0 == kauth_cred_issuser(kauth_cred_get())) {
1701 /* If it's a non-root process, it needs to have the entitlement to set the policy */
1702 boolean_t entitled = FALSE;
1703 entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.iopol.case_sensitivity");
1704 if (!entitled) {
1705 error = EPERM;
1706 goto out;
1707 }
1708 }
1709
1710 switch (policy) {
1711 case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
1712 OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), &p->p_vfs_iopolicy);
1713 break;
1714 case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
1715 OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, &p->p_vfs_iopolicy);
1716 break;
1717 default:
1718 error = EINVAL;
1719 goto out;
1720 }
1721
1722 break;
1723 case IOPOL_CMD_GET:
1724 iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY)
1725 ? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE
1726 : IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT;
1727 break;
1728 default:
1729 error = EINVAL; /* unknown command */
1730 break;
1731 }
1732
1733 out:
1734 return error;
1735 }
1736
1737 static inline int
1738 get_thread_atime_policy(struct uthread *ut)
1739 {
1740 return (ut->uu_flag & UT_ATIME_UPDATE) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT;
1741 }
1742
1743 static inline void
1744 set_thread_atime_policy(struct uthread *ut, int policy)
1745 {
1746 if (policy == IOPOL_ATIME_UPDATES_OFF) {
1747 ut->uu_flag |= UT_ATIME_UPDATE;
1748 } else {
1749 ut->uu_flag &= ~UT_ATIME_UPDATE;
1750 }
1751 }
1752
1753 static inline void
1754 set_task_atime_policy(struct proc *p, int policy)
1755 {
1756 if (policy == IOPOL_ATIME_UPDATES_OFF) {
1757 OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES, &p->p_vfs_iopolicy);
1758 } else {
1759 OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES), &p->p_vfs_iopolicy);
1760 }
1761 }
1762
1763 static inline int
1764 get_task_atime_policy(struct proc *p)
1765 {
1766 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT;
1767 }
1768
1769 static int
1770 iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
1771 {
1772 int error = 0;
1773 thread_t thread;
1774
1775 /* Validate scope */
1776 switch (scope) {
1777 case IOPOL_SCOPE_THREAD:
1778 thread = current_thread();
1779 break;
1780 case IOPOL_SCOPE_PROCESS:
1781 thread = THREAD_NULL;
1782 break;
1783 default:
1784 error = EINVAL;
1785 goto out;
1786 }
1787
1788 /* Validate policy */
1789 if (cmd == IOPOL_CMD_SET) {
1790 switch (policy) {
1791 case IOPOL_ATIME_UPDATES_DEFAULT:
1792 case IOPOL_ATIME_UPDATES_OFF:
1793 break;
1794 default:
1795 error = EINVAL;
1796 goto out;
1797 }
1798 }
1799
1800 /* Perform command */
1801 switch (cmd) {
1802 case IOPOL_CMD_SET:
1803 if (thread != THREAD_NULL) {
1804 set_thread_atime_policy(get_bsdthread_info(thread), policy);
1805 } else {
1806 set_task_atime_policy(p, policy);
1807 }
1808 break;
1809 case IOPOL_CMD_GET:
1810 if (thread != THREAD_NULL) {
1811 policy = get_thread_atime_policy(get_bsdthread_info(thread));
1812 } else {
1813 policy = get_task_atime_policy(p);
1814 }
1815 iop_param->iop_policy = policy;
1816 break;
1817 default:
1818 error = EINVAL; /* unknown command */
1819 break;
1820 }
1821
1822 out:
1823 return error;
1824 }
1825
1826 /* BSD call back function for task_policy networking changes */
1827 void
1828 proc_apply_task_networkbg(void * bsd_info, thread_t thread)
1829 {
1830 assert(bsd_info != PROC_NULL);
1831
1832 pid_t pid = proc_pid((proc_t)bsd_info);
1833
1834 proc_t p = proc_find(pid);
1835
1836 if (p != PROC_NULL) {
1837 assert(p == (proc_t)bsd_info);
1838
1839 do_background_socket(p, thread);
1840 proc_rele(p);
1841 }
1842 }
1843
1844 void
1845 gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor)
1846 {
1847 struct rusage_info_child *ri_child;
1848
1849 assert(p->p_stats != NULL);
1850 memset(ru, 0, sizeof(*ru));
1851 switch (flavor) {
1852 case RUSAGE_INFO_V4:
1853 ru->ri_logical_writes = get_task_logical_writes(p->task);
1854 ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(p->task);
1855 #if CONFIG_LEDGER_INTERVAL_MAX
1856 ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(p->task, FALSE);
1857 #endif
1858 fill_task_monotonic_rusage(p->task, ru);
1859 /* fall through */
1860
1861 case RUSAGE_INFO_V3:
1862 fill_task_qos_rusage(p->task, ru);
1863 fill_task_billed_usage(p->task, ru);
1864 /* fall through */
1865
1866 case RUSAGE_INFO_V2:
1867 fill_task_io_rusage(p->task, ru);
1868 /* fall through */
1869
1870 case RUSAGE_INFO_V1:
1871 /*
1872 * p->p_stats->ri_child statistics are protected under proc lock.
1873 */
1874 proc_lock(p);
1875
1876 ri_child = &(p->p_stats->ri_child);
1877 ru->ri_child_user_time = ri_child->ri_child_user_time;
1878 ru->ri_child_system_time = ri_child->ri_child_system_time;
1879 ru->ri_child_pkg_idle_wkups = ri_child->ri_child_pkg_idle_wkups;
1880 ru->ri_child_interrupt_wkups = ri_child->ri_child_interrupt_wkups;
1881 ru->ri_child_pageins = ri_child->ri_child_pageins;
1882 ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime;
1883
1884 proc_unlock(p);
1885 /* fall through */
1886
1887 case RUSAGE_INFO_V0:
1888 proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof(ru->ri_uuid));
1889 fill_task_rusage(p->task, ru);
1890 ru->ri_proc_start_abstime = p->p_stats->ps_start;
1891 }
1892 }
1893
1894 int
1895 proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie)
1896 {
1897 rusage_info_current ri_current = {};
1898
1899 int error = 0;
1900 size_t size = 0;
1901
1902 switch (flavor) {
1903 case RUSAGE_INFO_V0:
1904 size = sizeof(struct rusage_info_v0);
1905 break;
1906
1907 case RUSAGE_INFO_V1:
1908 size = sizeof(struct rusage_info_v1);
1909 break;
1910
1911 case RUSAGE_INFO_V2:
1912 size = sizeof(struct rusage_info_v2);
1913 break;
1914
1915 case RUSAGE_INFO_V3:
1916 size = sizeof(struct rusage_info_v3);
1917 break;
1918
1919 case RUSAGE_INFO_V4:
1920 size = sizeof(struct rusage_info_v4);
1921 break;
1922
1923 default:
1924 return EINVAL;
1925 }
1926
1927 if (size == 0) {
1928 return EINVAL;
1929 }
1930
1931 /*
1932 * If task is still alive, collect info from the live task itself.
1933 * Otherwise, look to the cached info in the zombie proc.
1934 */
1935 if (p->p_ru == NULL) {
1936 gather_rusage_info(p, &ri_current, flavor);
1937 ri_current.ri_proc_exit_abstime = 0;
1938 error = copyout(&ri_current, buffer, size);
1939 } else {
1940 ri_current = p->p_ru->ri;
1941 error = copyout(&p->p_ru->ri, buffer, size);
1942 }
1943
1944 return error;
1945 }
1946
1947 static int
1948 mach_to_bsd_rv(int mach_rv)
1949 {
1950 int bsd_rv = 0;
1951
1952 switch (mach_rv) {
1953 case KERN_SUCCESS:
1954 bsd_rv = 0;
1955 break;
1956 case KERN_INVALID_ARGUMENT:
1957 bsd_rv = EINVAL;
1958 break;
1959 default:
1960 panic("unknown error %#x", mach_rv);
1961 }
1962
1963 return bsd_rv;
1964 }
1965
1966 /*
1967 * Resource limit controls
1968 *
1969 * uap->flavor available flavors:
1970 *
1971 * RLIMIT_WAKEUPS_MONITOR
1972 * RLIMIT_CPU_USAGE_MONITOR
1973 * RLIMIT_THREAD_CPULIMITS
1974 * RLIMIT_FOOTPRINT_INTERVAL
1975 */
1976 int
1977 proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, __unused int32_t *retval)
1978 {
1979 proc_t targetp;
1980 int error = 0;
1981 struct proc_rlimit_control_wakeupmon wakeupmon_args;
1982 uint32_t cpumon_flags;
1983 uint32_t cpulimits_flags;
1984 kauth_cred_t my_cred, target_cred;
1985 #if CONFIG_LEDGER_INTERVAL_MAX
1986 uint32_t footprint_interval_flags;
1987 uint64_t interval_max_footprint;
1988 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
1989
1990 /* -1 implicitly means our own process (perhaps even the current thread for per-thread attributes) */
1991 if (uap->pid == -1) {
1992 targetp = proc_self();
1993 } else {
1994 targetp = proc_find(uap->pid);
1995 }
1996
1997 /* proc_self() can return NULL for an exiting process */
1998 if (targetp == PROC_NULL) {
1999 return ESRCH;
2000 }
2001
2002 my_cred = kauth_cred_get();
2003 target_cred = kauth_cred_proc_ref(targetp);
2004
2005 if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) &&
2006 kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) &&
2007 kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) {
2008 proc_rele(targetp);
2009 kauth_cred_unref(&target_cred);
2010 return EACCES;
2011 }
2012
2013 switch (uap->flavor) {
2014 case RLIMIT_WAKEUPS_MONITOR:
2015 if ((error = copyin(uap->arg, &wakeupmon_args, sizeof(wakeupmon_args))) != 0) {
2016 break;
2017 }
2018 if ((error = mach_to_bsd_rv(task_wakeups_monitor_ctl(targetp->task, &wakeupmon_args.wm_flags,
2019 &wakeupmon_args.wm_rate))) != 0) {
2020 break;
2021 }
2022 error = copyout(&wakeupmon_args, uap->arg, sizeof(wakeupmon_args));
2023 break;
2024 case RLIMIT_CPU_USAGE_MONITOR:
2025 cpumon_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127)
2026 error = mach_to_bsd_rv(task_cpu_usage_monitor_ctl(targetp->task, &cpumon_flags));
2027 break;
2028 case RLIMIT_THREAD_CPULIMITS:
2029 cpulimits_flags = (uint32_t)uap->arg; // only need a limited set of bits, pass in void * argument
2030
2031 if (uap->pid != -1) {
2032 error = EINVAL;
2033 break;
2034 }
2035
2036 uint8_t percent = 0;
2037 uint32_t ms_refill = 0;
2038 uint64_t ns_refill;
2039
2040 percent = (uint8_t)(cpulimits_flags & 0xffU); /* low 8 bits for percent */
2041 ms_refill = (cpulimits_flags >> 8) & 0xffffff; /* next 24 bits represent ms refill value */
2042 if (percent >= 100) {
2043 error = EINVAL;
2044 break;
2045 }
2046
2047 ns_refill = ((uint64_t)ms_refill) * NSEC_PER_MSEC;
2048
2049 error = mach_to_bsd_rv(thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ns_refill));
2050 break;
2051
2052 #if CONFIG_LEDGER_INTERVAL_MAX
2053 case RLIMIT_FOOTPRINT_INTERVAL:
2054 footprint_interval_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127)
2055 /*
2056 * There is currently only one option for this flavor.
2057 */
2058 if ((footprint_interval_flags & FOOTPRINT_INTERVAL_RESET) == 0) {
2059 error = EINVAL;
2060 break;
2061 }
2062 interval_max_footprint = get_task_phys_footprint_interval_max(targetp->task, TRUE);
2063 break;
2064 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
2065 default:
2066 error = EINVAL;
2067 break;
2068 }
2069
2070 proc_rele(targetp);
2071 kauth_cred_unref(&target_cred);
2072
2073 /*
2074 * Return value from this function becomes errno to userland caller.
2075 */
2076 return error;
2077 }
2078
2079 /*
2080 * Return the current amount of CPU consumed by this thread (in either user or kernel mode)
2081 */
2082 int
2083 thread_selfusage(struct proc *p __unused, struct thread_selfusage_args *uap __unused, uint64_t *retval)
2084 {
2085 uint64_t runtime;
2086
2087 runtime = thread_get_runtime_self();
2088 *retval = runtime;
2089
2090 return 0;
2091 }
2092
2093 #if !MONOTONIC
2094 int
2095 thread_selfcounts(__unused struct proc *p, __unused struct thread_selfcounts_args *uap, __unused int *ret_out)
2096 {
2097 return ENOTSUP;
2098 }
2099 #endif /* !MONOTONIC */