]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2018 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ | |
29 | /*- | |
30 | * Copyright (c) 1982, 1986, 1991, 1993 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * (c) UNIX System Laboratories, Inc. | |
33 | * All or some portions of this file are derived from material licensed | |
34 | * to the University of California by American Telephone and Telegraph | |
35 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with | |
36 | * the permission of UNIX System Laboratories, Inc. | |
37 | * | |
38 | * Redistribution and use in source and binary forms, with or without | |
39 | * modification, are permitted provided that the following conditions | |
40 | * are met: | |
41 | * 1. Redistributions of source code must retain the above copyright | |
42 | * notice, this list of conditions and the following disclaimer. | |
43 | * 2. Redistributions in binary form must reproduce the above copyright | |
44 | * notice, this list of conditions and the following disclaimer in the | |
45 | * documentation and/or other materials provided with the distribution. | |
46 | * 3. All advertising materials mentioning features or use of this software | |
47 | * must display the following acknowledgement: | |
48 | * This product includes software developed by the University of | |
49 | * California, Berkeley and its contributors. | |
50 | * 4. Neither the name of the University nor the names of its contributors | |
51 | * may be used to endorse or promote products derived from this software | |
52 | * without specific prior written permission. | |
53 | * | |
54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
64 | * SUCH DAMAGE. | |
65 | * | |
66 | * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 | |
67 | */ | |
68 | /* | |
69 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce | |
70 | * support for mandatory and extensible security protections. This notice | |
71 | * is included in support of clause 2.2 (b) of the Apple Public License, | |
72 | * Version 2.0. | |
73 | */ | |
74 | ||
75 | #include <sys/param.h> | |
76 | #include <sys/systm.h> | |
77 | #include <sys/sysctl.h> | |
78 | #include <sys/kernel.h> | |
79 | #include <sys/file_internal.h> | |
80 | #include <sys/resourcevar.h> | |
81 | #include <sys/malloc.h> | |
82 | #include <sys/proc_internal.h> | |
83 | #include <sys/kauth.h> | |
84 | #include <sys/mount_internal.h> | |
85 | #include <sys/sysproto.h> | |
86 | ||
87 | #include <security/audit/audit.h> | |
88 | ||
89 | #include <machine/vmparam.h> | |
90 | ||
91 | #include <mach/mach_types.h> | |
92 | #include <mach/time_value.h> | |
93 | #include <mach/task.h> | |
94 | #include <mach/task_info.h> | |
95 | #include <mach/vm_map.h> | |
96 | #include <mach/mach_vm.h> | |
97 | #include <mach/thread_act.h> /* for thread_policy_set( ) */ | |
98 | #include <kern/thread.h> | |
99 | #include <kern/policy_internal.h> | |
100 | ||
101 | #include <kern/task.h> | |
102 | #include <kern/clock.h> /* for absolutetime_to_microtime() */ | |
103 | #include <netinet/in.h> /* for TRAFFIC_MGT_SO_* */ | |
104 | #include <sys/socketvar.h> /* for struct socket */ | |
105 | #if NECP | |
106 | #include <net/necp.h> | |
107 | #endif /* NECP */ | |
108 | ||
109 | #include <vm/vm_map.h> | |
110 | ||
111 | #include <kern/assert.h> | |
112 | #include <sys/resource.h> | |
113 | #include <sys/priv.h> | |
114 | #include <IOKit/IOBSD.h> | |
115 | ||
116 | #if CONFIG_MACF | |
117 | #include <security/mac_framework.h> | |
118 | #endif | |
119 | ||
120 | int donice(struct proc *curp, struct proc *chgp, int n); | |
121 | int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp); | |
122 | int uthread_get_background_state(uthread_t); | |
123 | static void do_background_socket(struct proc *p, thread_t thread); | |
124 | static int do_background_thread(thread_t thread, int priority); | |
125 | static int do_background_proc(struct proc *curp, struct proc *targetp, int priority); | |
126 | static int set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority); | |
127 | static int proc_set_darwin_role(proc_t curp, proc_t targetp, int priority); | |
128 | static int proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority); | |
129 | static int get_background_proc(struct proc *curp, struct proc *targetp, int *priority); | |
130 | int proc_pid_rusage(int pid, int flavor, user_addr_t buf, int32_t *retval); | |
131 | void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor); | |
132 | int fill_task_rusage(task_t task, rusage_info_current *ri); | |
133 | void fill_task_billed_usage(task_t task, rusage_info_current *ri); | |
134 | int fill_task_io_rusage(task_t task, rusage_info_current *ri); | |
135 | int fill_task_qos_rusage(task_t task, rusage_info_current *ri); | |
136 | uint64_t get_task_logical_writes(task_t task, boolean_t external); | |
137 | void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri); | |
138 | ||
139 | int proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie); | |
140 | ||
141 | rlim_t maxdmap = MAXDSIZ; /* XXX */ | |
142 | rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */ | |
143 | ||
144 | /* | |
145 | * Limits on the number of open files per process, and the number | |
146 | * of child processes per process. | |
147 | * | |
148 | * Note: would be in kern/subr_param.c in FreeBSD. | |
149 | */ | |
150 | __private_extern__ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */ | |
151 | ||
152 | SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED, | |
153 | &maxprocperuid, 0, "Maximum processes allowed per userid" ); | |
154 | ||
155 | SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED, | |
156 | &maxfilesperproc, 0, "Maximum files allowed open per process" ); | |
157 | ||
158 | /* Args and fn for proc_iteration callback used in setpriority */ | |
159 | struct puser_nice_args { | |
160 | proc_t curp; | |
161 | int prio; | |
162 | id_t who; | |
163 | int * foundp; | |
164 | int * errorp; | |
165 | }; | |
166 | static int puser_donice_callback(proc_t p, void * arg); | |
167 | ||
168 | ||
169 | /* Args and fn for proc_iteration callback used in setpriority */ | |
170 | struct ppgrp_nice_args { | |
171 | proc_t curp; | |
172 | int prio; | |
173 | int * foundp; | |
174 | int * errorp; | |
175 | }; | |
176 | static int ppgrp_donice_callback(proc_t p, void * arg); | |
177 | ||
178 | /* | |
179 | * Resource controls and accounting. | |
180 | */ | |
181 | int | |
182 | getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval) | |
183 | { | |
184 | struct proc *p; | |
185 | int low = PRIO_MAX + 1; | |
186 | kauth_cred_t my_cred; | |
187 | int refheld = 0; | |
188 | int error = 0; | |
189 | ||
190 | /* would also test (uap->who < 0), but id_t is unsigned */ | |
191 | if (uap->who > 0x7fffffff) { | |
192 | return EINVAL; | |
193 | } | |
194 | ||
195 | switch (uap->which) { | |
196 | case PRIO_PROCESS: | |
197 | if (uap->who == 0) { | |
198 | p = curp; | |
199 | low = p->p_nice; | |
200 | } else { | |
201 | p = proc_find(uap->who); | |
202 | if (p == 0) { | |
203 | break; | |
204 | } | |
205 | low = p->p_nice; | |
206 | proc_rele(p); | |
207 | } | |
208 | break; | |
209 | ||
210 | case PRIO_PGRP: { | |
211 | struct pgrp *pg = PGRP_NULL; | |
212 | ||
213 | if (uap->who == 0) { | |
214 | /* returns the pgrp to ref */ | |
215 | pg = proc_pgrp(curp); | |
216 | } else if ((pg = pgfind(uap->who)) == PGRP_NULL) { | |
217 | break; | |
218 | } | |
219 | /* No need for iteration as it is a simple scan */ | |
220 | pgrp_lock(pg); | |
221 | PGMEMBERS_FOREACH(pg, p) { | |
222 | if (p->p_nice < low) { | |
223 | low = p->p_nice; | |
224 | } | |
225 | } | |
226 | pgrp_unlock(pg); | |
227 | pg_rele(pg); | |
228 | break; | |
229 | } | |
230 | ||
231 | case PRIO_USER: | |
232 | if (uap->who == 0) { | |
233 | uap->who = kauth_cred_getuid(kauth_cred_get()); | |
234 | } | |
235 | ||
236 | proc_list_lock(); | |
237 | ||
238 | for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { | |
239 | my_cred = kauth_cred_proc_ref(p); | |
240 | if (kauth_cred_getuid(my_cred) == uap->who && | |
241 | p->p_nice < low) { | |
242 | low = p->p_nice; | |
243 | } | |
244 | kauth_cred_unref(&my_cred); | |
245 | } | |
246 | ||
247 | proc_list_unlock(); | |
248 | ||
249 | break; | |
250 | ||
251 | case PRIO_DARWIN_THREAD: | |
252 | /* we currently only support the current thread */ | |
253 | if (uap->who != 0) { | |
254 | return EINVAL; | |
255 | } | |
256 | ||
257 | low = proc_get_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG); | |
258 | ||
259 | break; | |
260 | ||
261 | case PRIO_DARWIN_PROCESS: | |
262 | if (uap->who == 0) { | |
263 | p = curp; | |
264 | } else { | |
265 | p = proc_find(uap->who); | |
266 | if (p == PROC_NULL) { | |
267 | break; | |
268 | } | |
269 | refheld = 1; | |
270 | } | |
271 | ||
272 | error = get_background_proc(curp, p, &low); | |
273 | ||
274 | if (refheld) { | |
275 | proc_rele(p); | |
276 | } | |
277 | if (error) { | |
278 | return error; | |
279 | } | |
280 | break; | |
281 | ||
282 | case PRIO_DARWIN_ROLE: | |
283 | if (uap->who == 0) { | |
284 | p = curp; | |
285 | } else { | |
286 | p = proc_find(uap->who); | |
287 | if (p == PROC_NULL) { | |
288 | break; | |
289 | } | |
290 | refheld = 1; | |
291 | } | |
292 | ||
293 | error = proc_get_darwin_role(curp, p, &low); | |
294 | ||
295 | if (refheld) { | |
296 | proc_rele(p); | |
297 | } | |
298 | if (error) { | |
299 | return error; | |
300 | } | |
301 | break; | |
302 | ||
303 | default: | |
304 | return EINVAL; | |
305 | } | |
306 | if (low == PRIO_MAX + 1) { | |
307 | return ESRCH; | |
308 | } | |
309 | *retval = low; | |
310 | return 0; | |
311 | } | |
312 | ||
313 | /* call back function used for proc iteration in PRIO_USER */ | |
314 | static int | |
315 | puser_donice_callback(proc_t p, void * arg) | |
316 | { | |
317 | int error, n; | |
318 | struct puser_nice_args * pun = (struct puser_nice_args *)arg; | |
319 | kauth_cred_t my_cred; | |
320 | ||
321 | my_cred = kauth_cred_proc_ref(p); | |
322 | if (kauth_cred_getuid(my_cred) == pun->who) { | |
323 | error = donice(pun->curp, p, pun->prio); | |
324 | if (pun->errorp != NULL) { | |
325 | *pun->errorp = error; | |
326 | } | |
327 | if (pun->foundp != NULL) { | |
328 | n = *pun->foundp; | |
329 | *pun->foundp = n + 1; | |
330 | } | |
331 | } | |
332 | kauth_cred_unref(&my_cred); | |
333 | ||
334 | return PROC_RETURNED; | |
335 | } | |
336 | ||
337 | /* call back function used for proc iteration in PRIO_PGRP */ | |
338 | static int | |
339 | ppgrp_donice_callback(proc_t p, void * arg) | |
340 | { | |
341 | int error; | |
342 | struct ppgrp_nice_args * pun = (struct ppgrp_nice_args *)arg; | |
343 | int n; | |
344 | ||
345 | error = donice(pun->curp, p, pun->prio); | |
346 | if (pun->errorp != NULL) { | |
347 | *pun->errorp = error; | |
348 | } | |
349 | if (pun->foundp != NULL) { | |
350 | n = *pun->foundp; | |
351 | *pun->foundp = n + 1; | |
352 | } | |
353 | ||
354 | return PROC_RETURNED; | |
355 | } | |
356 | ||
357 | /* | |
358 | * Returns: 0 Success | |
359 | * EINVAL | |
360 | * ESRCH | |
361 | * donice:EPERM | |
362 | * donice:EACCES | |
363 | */ | |
364 | /* ARGSUSED */ | |
365 | int | |
366 | setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval) | |
367 | { | |
368 | struct proc *p; | |
369 | int found = 0, error = 0; | |
370 | int refheld = 0; | |
371 | ||
372 | AUDIT_ARG(cmd, uap->which); | |
373 | AUDIT_ARG(owner, uap->who, 0); | |
374 | AUDIT_ARG(value32, uap->prio); | |
375 | ||
376 | /* would also test (uap->who < 0), but id_t is unsigned */ | |
377 | if (uap->who > 0x7fffffff) { | |
378 | return EINVAL; | |
379 | } | |
380 | ||
381 | switch (uap->which) { | |
382 | case PRIO_PROCESS: | |
383 | if (uap->who == 0) { | |
384 | p = curp; | |
385 | } else { | |
386 | p = proc_find(uap->who); | |
387 | if (p == 0) { | |
388 | break; | |
389 | } | |
390 | refheld = 1; | |
391 | } | |
392 | error = donice(curp, p, uap->prio); | |
393 | found++; | |
394 | if (refheld != 0) { | |
395 | proc_rele(p); | |
396 | } | |
397 | break; | |
398 | ||
399 | case PRIO_PGRP: { | |
400 | struct pgrp *pg = PGRP_NULL; | |
401 | struct ppgrp_nice_args ppgrp; | |
402 | ||
403 | if (uap->who == 0) { | |
404 | pg = proc_pgrp(curp); | |
405 | } else if ((pg = pgfind(uap->who)) == PGRP_NULL) { | |
406 | break; | |
407 | } | |
408 | ||
409 | ppgrp.curp = curp; | |
410 | ppgrp.prio = uap->prio; | |
411 | ppgrp.foundp = &found; | |
412 | ppgrp.errorp = &error; | |
413 | ||
414 | /* PGRP_DROPREF drops the reference on process group */ | |
415 | pgrp_iterate(pg, PGRP_DROPREF, ppgrp_donice_callback, (void *)&ppgrp, NULL, NULL); | |
416 | ||
417 | break; | |
418 | } | |
419 | ||
420 | case PRIO_USER: { | |
421 | struct puser_nice_args punice; | |
422 | ||
423 | if (uap->who == 0) { | |
424 | uap->who = kauth_cred_getuid(kauth_cred_get()); | |
425 | } | |
426 | ||
427 | punice.curp = curp; | |
428 | punice.prio = uap->prio; | |
429 | punice.who = uap->who; | |
430 | punice.foundp = &found; | |
431 | error = 0; | |
432 | punice.errorp = &error; | |
433 | proc_iterate(PROC_ALLPROCLIST, puser_donice_callback, (void *)&punice, NULL, NULL); | |
434 | ||
435 | break; | |
436 | } | |
437 | ||
438 | case PRIO_DARWIN_THREAD: { | |
439 | /* we currently only support the current thread */ | |
440 | if (uap->who != 0) { | |
441 | return EINVAL; | |
442 | } | |
443 | ||
444 | error = do_background_thread(current_thread(), uap->prio); | |
445 | found++; | |
446 | break; | |
447 | } | |
448 | ||
449 | case PRIO_DARWIN_PROCESS: { | |
450 | if (uap->who == 0) { | |
451 | p = curp; | |
452 | } else { | |
453 | p = proc_find(uap->who); | |
454 | if (p == 0) { | |
455 | break; | |
456 | } | |
457 | refheld = 1; | |
458 | } | |
459 | ||
460 | error = do_background_proc(curp, p, uap->prio); | |
461 | ||
462 | found++; | |
463 | if (refheld != 0) { | |
464 | proc_rele(p); | |
465 | } | |
466 | break; | |
467 | } | |
468 | ||
469 | case PRIO_DARWIN_GPU: { | |
470 | if (uap->who == 0) { | |
471 | return EINVAL; | |
472 | } | |
473 | ||
474 | p = proc_find(uap->who); | |
475 | if (p == PROC_NULL) { | |
476 | break; | |
477 | } | |
478 | ||
479 | error = set_gpudeny_proc(curp, p, uap->prio); | |
480 | ||
481 | found++; | |
482 | proc_rele(p); | |
483 | break; | |
484 | } | |
485 | ||
486 | case PRIO_DARWIN_ROLE: { | |
487 | if (uap->who == 0) { | |
488 | p = curp; | |
489 | } else { | |
490 | p = proc_find(uap->who); | |
491 | if (p == PROC_NULL) { | |
492 | break; | |
493 | } | |
494 | refheld = 1; | |
495 | } | |
496 | ||
497 | error = proc_set_darwin_role(curp, p, uap->prio); | |
498 | ||
499 | found++; | |
500 | if (refheld != 0) { | |
501 | proc_rele(p); | |
502 | } | |
503 | break; | |
504 | } | |
505 | ||
506 | default: | |
507 | return EINVAL; | |
508 | } | |
509 | if (found == 0) { | |
510 | return ESRCH; | |
511 | } | |
512 | if (error == EIDRM) { | |
513 | *retval = -2; | |
514 | error = 0; | |
515 | } | |
516 | return error; | |
517 | } | |
518 | ||
519 | ||
520 | /* | |
521 | * Returns: 0 Success | |
522 | * EPERM | |
523 | * EACCES | |
524 | * mac_check_proc_sched:??? | |
525 | */ | |
526 | int | |
527 | donice(struct proc *curp, struct proc *chgp, int n) | |
528 | { | |
529 | int error = 0; | |
530 | kauth_cred_t ucred; | |
531 | kauth_cred_t my_cred; | |
532 | ||
533 | ucred = kauth_cred_proc_ref(curp); | |
534 | my_cred = kauth_cred_proc_ref(chgp); | |
535 | ||
536 | if (suser(ucred, NULL) && kauth_cred_getruid(ucred) && | |
537 | kauth_cred_getuid(ucred) != kauth_cred_getuid(my_cred) && | |
538 | kauth_cred_getruid(ucred) != kauth_cred_getuid(my_cred)) { | |
539 | error = EPERM; | |
540 | goto out; | |
541 | } | |
542 | if (n > PRIO_MAX) { | |
543 | n = PRIO_MAX; | |
544 | } | |
545 | if (n < PRIO_MIN) { | |
546 | n = PRIO_MIN; | |
547 | } | |
548 | if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) { | |
549 | error = EACCES; | |
550 | goto out; | |
551 | } | |
552 | #if CONFIG_MACF | |
553 | error = mac_proc_check_sched(curp, chgp); | |
554 | if (error) { | |
555 | goto out; | |
556 | } | |
557 | #endif | |
558 | proc_lock(chgp); | |
559 | chgp->p_nice = n; | |
560 | proc_unlock(chgp); | |
561 | (void)resetpriority(chgp); | |
562 | out: | |
563 | kauth_cred_unref(&ucred); | |
564 | kauth_cred_unref(&my_cred); | |
565 | return error; | |
566 | } | |
567 | ||
568 | static int | |
569 | set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority) | |
570 | { | |
571 | int error = 0; | |
572 | kauth_cred_t ucred; | |
573 | kauth_cred_t target_cred; | |
574 | ||
575 | ucred = kauth_cred_get(); | |
576 | target_cred = kauth_cred_proc_ref(targetp); | |
577 | ||
578 | /* TODO: Entitlement instead of uid check */ | |
579 | ||
580 | if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && | |
581 | kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && | |
582 | kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { | |
583 | error = EPERM; | |
584 | goto out; | |
585 | } | |
586 | ||
587 | if (curp == targetp) { | |
588 | error = EPERM; | |
589 | goto out; | |
590 | } | |
591 | ||
592 | #if CONFIG_MACF | |
593 | error = mac_proc_check_sched(curp, targetp); | |
594 | if (error) { | |
595 | goto out; | |
596 | } | |
597 | #endif | |
598 | ||
599 | switch (priority) { | |
600 | case PRIO_DARWIN_GPU_DENY: | |
601 | task_set_gpu_denied(proc_task(targetp), TRUE); | |
602 | break; | |
603 | case PRIO_DARWIN_GPU_ALLOW: | |
604 | task_set_gpu_denied(proc_task(targetp), FALSE); | |
605 | break; | |
606 | default: | |
607 | error = EINVAL; | |
608 | goto out; | |
609 | } | |
610 | ||
611 | out: | |
612 | kauth_cred_unref(&target_cred); | |
613 | return error; | |
614 | } | |
615 | ||
616 | static int | |
617 | proc_set_darwin_role(proc_t curp, proc_t targetp, int priority) | |
618 | { | |
619 | int error = 0; | |
620 | uint32_t flagsp = 0; | |
621 | ||
622 | kauth_cred_t ucred, target_cred; | |
623 | ||
624 | ucred = kauth_cred_get(); | |
625 | target_cred = kauth_cred_proc_ref(targetp); | |
626 | ||
627 | if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && | |
628 | kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && | |
629 | kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { | |
630 | if (priv_check_cred(ucred, PRIV_SETPRIORITY_DARWIN_ROLE, 0) != 0) { | |
631 | error = EPERM; | |
632 | goto out; | |
633 | } | |
634 | } | |
635 | ||
636 | if (curp != targetp) { | |
637 | #if CONFIG_MACF | |
638 | if ((error = mac_proc_check_sched(curp, targetp))) { | |
639 | goto out; | |
640 | } | |
641 | #endif | |
642 | } | |
643 | ||
644 | proc_get_darwinbgstate(proc_task(targetp), &flagsp); | |
645 | if ((flagsp & PROC_FLAG_APPLICATION) != PROC_FLAG_APPLICATION) { | |
646 | error = ENOTSUP; | |
647 | goto out; | |
648 | } | |
649 | ||
650 | integer_t role = 0; | |
651 | ||
652 | if ((error = proc_darwin_role_to_task_role(priority, &role))) { | |
653 | goto out; | |
654 | } | |
655 | ||
656 | proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, | |
657 | TASK_POLICY_ROLE, role); | |
658 | ||
659 | out: | |
660 | kauth_cred_unref(&target_cred); | |
661 | return error; | |
662 | } | |
663 | ||
664 | static int | |
665 | proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority) | |
666 | { | |
667 | int error = 0; | |
668 | int role = 0; | |
669 | ||
670 | kauth_cred_t ucred, target_cred; | |
671 | ||
672 | ucred = kauth_cred_get(); | |
673 | target_cred = kauth_cred_proc_ref(targetp); | |
674 | ||
675 | if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && | |
676 | kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && | |
677 | kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { | |
678 | error = EPERM; | |
679 | goto out; | |
680 | } | |
681 | ||
682 | if (curp != targetp) { | |
683 | #if CONFIG_MACF | |
684 | if ((error = mac_proc_check_sched(curp, targetp))) { | |
685 | goto out; | |
686 | } | |
687 | #endif | |
688 | } | |
689 | ||
690 | role = proc_get_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE); | |
691 | ||
692 | *priority = proc_task_role_to_darwin_role(role); | |
693 | ||
694 | out: | |
695 | kauth_cred_unref(&target_cred); | |
696 | return error; | |
697 | } | |
698 | ||
699 | ||
700 | static int | |
701 | get_background_proc(struct proc *curp, struct proc *targetp, int *priority) | |
702 | { | |
703 | int external = 0; | |
704 | int error = 0; | |
705 | kauth_cred_t ucred, target_cred; | |
706 | ||
707 | ucred = kauth_cred_get(); | |
708 | target_cred = kauth_cred_proc_ref(targetp); | |
709 | ||
710 | if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && | |
711 | kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && | |
712 | kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { | |
713 | error = EPERM; | |
714 | goto out; | |
715 | } | |
716 | ||
717 | external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL; | |
718 | ||
719 | *priority = proc_get_task_policy(current_task(), external, TASK_POLICY_DARWIN_BG); | |
720 | ||
721 | out: | |
722 | kauth_cred_unref(&target_cred); | |
723 | return error; | |
724 | } | |
725 | ||
726 | static int | |
727 | do_background_proc(struct proc *curp, struct proc *targetp, int priority) | |
728 | { | |
729 | #if !CONFIG_MACF | |
730 | #pragma unused(curp) | |
731 | #endif | |
732 | int error = 0; | |
733 | kauth_cred_t ucred; | |
734 | kauth_cred_t target_cred; | |
735 | int external; | |
736 | int enable; | |
737 | ||
738 | ucred = kauth_cred_get(); | |
739 | target_cred = kauth_cred_proc_ref(targetp); | |
740 | ||
741 | if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && | |
742 | kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && | |
743 | kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { | |
744 | error = EPERM; | |
745 | goto out; | |
746 | } | |
747 | ||
748 | #if CONFIG_MACF | |
749 | error = mac_proc_check_sched(curp, targetp); | |
750 | if (error) { | |
751 | goto out; | |
752 | } | |
753 | #endif | |
754 | ||
755 | external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL; | |
756 | ||
757 | switch (priority) { | |
758 | case PRIO_DARWIN_BG: | |
759 | enable = TASK_POLICY_ENABLE; | |
760 | break; | |
761 | case PRIO_DARWIN_NONUI: | |
762 | /* ignored for compatibility */ | |
763 | goto out; | |
764 | default: | |
765 | /* TODO: EINVAL if priority != 0 */ | |
766 | enable = TASK_POLICY_DISABLE; | |
767 | break; | |
768 | } | |
769 | ||
770 | proc_set_task_policy(proc_task(targetp), external, TASK_POLICY_DARWIN_BG, enable); | |
771 | ||
772 | out: | |
773 | kauth_cred_unref(&target_cred); | |
774 | return error; | |
775 | } | |
776 | ||
777 | static void | |
778 | do_background_socket(struct proc *p, thread_t thread) | |
779 | { | |
780 | #if SOCKETS | |
781 | struct filedesc *fdp; | |
782 | struct fileproc *fp; | |
783 | int i = 0; | |
784 | int background = false; | |
785 | #if NECP | |
786 | int update_necp = false; | |
787 | #endif /* NECP */ | |
788 | ||
789 | proc_fdlock(p); | |
790 | ||
791 | if (thread != THREAD_NULL) { | |
792 | background = proc_get_effective_thread_policy(thread, TASK_POLICY_ALL_SOCKETS_BG); | |
793 | } else { | |
794 | background = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ALL_SOCKETS_BG); | |
795 | } | |
796 | ||
797 | if (background) { | |
798 | /* | |
799 | * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark | |
800 | * the sockets with the background flag. There's nothing | |
801 | * to do here for the PRIO_DARWIN_THREAD case. | |
802 | */ | |
803 | if (thread == THREAD_NULL) { | |
804 | fdp = p->p_fd; | |
805 | ||
806 | for (i = 0; i < fdp->fd_nfiles; i++) { | |
807 | fp = fdp->fd_ofiles[i]; | |
808 | if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { | |
809 | continue; | |
810 | } | |
811 | if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { | |
812 | struct socket *sockp = (struct socket *)fp->f_fglob->fg_data; | |
813 | socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND); | |
814 | sockp->so_background_thread = NULL; | |
815 | } | |
816 | #if NECP | |
817 | else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY) { | |
818 | if (necp_set_client_as_background(p, fp, background)) { | |
819 | update_necp = true; | |
820 | } | |
821 | } | |
822 | #endif /* NECP */ | |
823 | } | |
824 | } | |
825 | } else { | |
826 | /* disable networking IO throttle. | |
827 | * NOTE - It is a known limitation of the current design that we | |
828 | * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for | |
829 | * sockets created by other threads within this process. | |
830 | */ | |
831 | fdp = p->p_fd; | |
832 | for (i = 0; i < fdp->fd_nfiles; i++) { | |
833 | struct socket *sockp; | |
834 | ||
835 | fp = fdp->fd_ofiles[i]; | |
836 | if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { | |
837 | continue; | |
838 | } | |
839 | if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { | |
840 | sockp = (struct socket *)fp->f_fglob->fg_data; | |
841 | /* skip if only clearing this thread's sockets */ | |
842 | if ((thread) && (sockp->so_background_thread != thread)) { | |
843 | continue; | |
844 | } | |
845 | socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND); | |
846 | sockp->so_background_thread = NULL; | |
847 | } | |
848 | #if NECP | |
849 | else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY) { | |
850 | if (necp_set_client_as_background(p, fp, background)) { | |
851 | update_necp = true; | |
852 | } | |
853 | } | |
854 | #endif /* NECP */ | |
855 | } | |
856 | } | |
857 | ||
858 | proc_fdunlock(p); | |
859 | ||
860 | #if NECP | |
861 | if (update_necp) { | |
862 | necp_update_all_clients(); | |
863 | } | |
864 | #endif /* NECP */ | |
865 | #else | |
866 | #pragma unused(p, thread) | |
867 | #endif | |
868 | } | |
869 | ||
870 | ||
871 | /* | |
872 | * do_background_thread | |
873 | * | |
874 | * Requires: thread reference | |
875 | * | |
876 | * Returns: 0 Success | |
877 | * EPERM Tried to background while in vfork | |
878 | * XXX - todo - does this need a MACF hook? | |
879 | */ | |
880 | static int | |
881 | do_background_thread(thread_t thread, int priority) | |
882 | { | |
883 | struct uthread *ut; | |
884 | int enable, external; | |
885 | int rv = 0; | |
886 | ||
887 | ut = get_bsdthread_info(thread); | |
888 | ||
889 | /* Backgrounding is unsupported for threads in vfork */ | |
890 | if ((ut->uu_flag & UT_VFORK) != 0) { | |
891 | return EPERM; | |
892 | } | |
893 | ||
894 | /* Backgrounding is unsupported for workq threads */ | |
895 | if (thread_is_static_param(thread)) { | |
896 | return EPERM; | |
897 | } | |
898 | ||
899 | /* Not allowed to combine QoS and DARWIN_BG, doing so strips the QoS */ | |
900 | if (thread_has_qos_policy(thread)) { | |
901 | thread_remove_qos_policy(thread); | |
902 | rv = EIDRM; | |
903 | } | |
904 | ||
905 | /* TODO: Fail if someone passes something besides 0 or PRIO_DARWIN_BG */ | |
906 | enable = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE; | |
907 | external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL; | |
908 | ||
909 | proc_set_thread_policy(thread, external, TASK_POLICY_DARWIN_BG, enable); | |
910 | ||
911 | return rv; | |
912 | } | |
913 | ||
914 | ||
915 | /* | |
916 | * Returns: 0 Success | |
917 | * copyin:EFAULT | |
918 | * dosetrlimit: | |
919 | */ | |
920 | /* ARGSUSED */ | |
921 | int | |
922 | setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval) | |
923 | { | |
924 | struct rlimit alim; | |
925 | int error; | |
926 | ||
927 | if ((error = copyin(uap->rlp, (caddr_t)&alim, | |
928 | sizeof(struct rlimit)))) { | |
929 | return error; | |
930 | } | |
931 | ||
932 | return dosetrlimit(p, uap->which, &alim); | |
933 | } | |
934 | ||
935 | /* | |
936 | * Returns: 0 Success | |
937 | * EINVAL | |
938 | * ENOMEM Cannot copy limit structure | |
939 | * suser:EPERM | |
940 | * | |
941 | * Notes: EINVAL is returned both for invalid arguments, and in the | |
942 | * case that the current usage (e.g. RLIMIT_STACK) is already | |
943 | * in excess of the requested limit. | |
944 | */ | |
945 | int | |
946 | dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) | |
947 | { | |
948 | struct rlimit *alimp; | |
949 | int error; | |
950 | kern_return_t kr; | |
951 | int posix = (which & _RLIMIT_POSIX_FLAG) ? 1 : 0; | |
952 | ||
953 | /* Mask out POSIX flag, saved above */ | |
954 | which &= ~_RLIMIT_POSIX_FLAG; | |
955 | ||
956 | if (which >= RLIM_NLIMITS) { | |
957 | return EINVAL; | |
958 | } | |
959 | ||
960 | alimp = &p->p_rlimit[which]; | |
961 | if (limp->rlim_cur > limp->rlim_max) { | |
962 | return EINVAL; | |
963 | } | |
964 | ||
965 | if (limp->rlim_cur > alimp->rlim_max || | |
966 | limp->rlim_max > alimp->rlim_max) { | |
967 | if ((error = suser(kauth_cred_get(), &p->p_acflag))) { | |
968 | return error; | |
969 | } | |
970 | } | |
971 | ||
972 | proc_limitblock(p); | |
973 | ||
974 | if ((error = proc_limitreplace(p)) != 0) { | |
975 | proc_limitunblock(p); | |
976 | return error; | |
977 | } | |
978 | ||
979 | alimp = &p->p_rlimit[which]; | |
980 | ||
981 | switch (which) { | |
982 | case RLIMIT_CPU: | |
983 | if (limp->rlim_cur == RLIM_INFINITY) { | |
984 | task_vtimer_clear(p->task, TASK_VTIMER_RLIM); | |
985 | timerclear(&p->p_rlim_cpu); | |
986 | } else { | |
987 | task_absolutetime_info_data_t tinfo; | |
988 | mach_msg_type_number_t count; | |
989 | struct timeval ttv, tv; | |
990 | clock_sec_t tv_sec; | |
991 | clock_usec_t tv_usec; | |
992 | ||
993 | count = TASK_ABSOLUTETIME_INFO_COUNT; | |
994 | task_info(p->task, TASK_ABSOLUTETIME_INFO, | |
995 | (task_info_t)&tinfo, &count); | |
996 | absolutetime_to_microtime(tinfo.total_user + tinfo.total_system, | |
997 | &tv_sec, &tv_usec); | |
998 | ttv.tv_sec = tv_sec; | |
999 | ttv.tv_usec = tv_usec; | |
1000 | ||
1001 | tv.tv_sec = (limp->rlim_cur > __INT_MAX__ ? __INT_MAX__ : limp->rlim_cur); | |
1002 | tv.tv_usec = 0; | |
1003 | timersub(&tv, &ttv, &p->p_rlim_cpu); | |
1004 | ||
1005 | timerclear(&tv); | |
1006 | if (timercmp(&p->p_rlim_cpu, &tv, >)) { | |
1007 | task_vtimer_set(p->task, TASK_VTIMER_RLIM); | |
1008 | } else { | |
1009 | task_vtimer_clear(p->task, TASK_VTIMER_RLIM); | |
1010 | ||
1011 | timerclear(&p->p_rlim_cpu); | |
1012 | ||
1013 | psignal(p, SIGXCPU); | |
1014 | } | |
1015 | } | |
1016 | break; | |
1017 | ||
1018 | case RLIMIT_DATA: | |
1019 | if (limp->rlim_cur > maxdmap) { | |
1020 | limp->rlim_cur = maxdmap; | |
1021 | } | |
1022 | if (limp->rlim_max > maxdmap) { | |
1023 | limp->rlim_max = maxdmap; | |
1024 | } | |
1025 | break; | |
1026 | ||
1027 | case RLIMIT_STACK: | |
1028 | if (p->p_lflag & P_LCUSTOM_STACK) { | |
1029 | /* Process has a custom stack set - rlimit cannot be used to change it */ | |
1030 | error = EINVAL; | |
1031 | goto out; | |
1032 | } | |
1033 | ||
1034 | /* Disallow illegal stack size instead of clipping */ | |
1035 | if (limp->rlim_cur > maxsmap || | |
1036 | limp->rlim_max > maxsmap) { | |
1037 | if (posix) { | |
1038 | error = EINVAL; | |
1039 | goto out; | |
1040 | } else { | |
1041 | /* | |
1042 | * 4797860 - workaround poorly written installers by | |
1043 | * doing previous implementation (< 10.5) when caller | |
1044 | * is non-POSIX conforming. | |
1045 | */ | |
1046 | if (limp->rlim_cur > maxsmap) { | |
1047 | limp->rlim_cur = maxsmap; | |
1048 | } | |
1049 | if (limp->rlim_max > maxsmap) { | |
1050 | limp->rlim_max = maxsmap; | |
1051 | } | |
1052 | } | |
1053 | } | |
1054 | ||
1055 | /* | |
1056 | * Stack is allocated to the max at exec time with only | |
1057 | * "rlim_cur" bytes accessible. If stack limit is going | |
1058 | * up make more accessible, if going down make inaccessible. | |
1059 | */ | |
1060 | if (limp->rlim_cur > alimp->rlim_cur) { | |
1061 | user_addr_t addr; | |
1062 | user_size_t size; | |
1063 | ||
1064 | /* grow stack */ | |
1065 | size = round_page_64(limp->rlim_cur); | |
1066 | size -= round_page_64(alimp->rlim_cur); | |
1067 | ||
1068 | addr = p->user_stack - round_page_64(limp->rlim_cur); | |
1069 | kr = mach_vm_protect(current_map(), | |
1070 | addr, size, | |
1071 | FALSE, VM_PROT_DEFAULT); | |
1072 | if (kr != KERN_SUCCESS) { | |
1073 | error = EINVAL; | |
1074 | goto out; | |
1075 | } | |
1076 | } else if (limp->rlim_cur < alimp->rlim_cur) { | |
1077 | user_addr_t addr; | |
1078 | user_size_t size; | |
1079 | user_addr_t cur_sp; | |
1080 | ||
1081 | /* shrink stack */ | |
1082 | ||
1083 | /* | |
1084 | * First check if new stack limit would agree | |
1085 | * with current stack usage. | |
1086 | * Get the current thread's stack pointer... | |
1087 | */ | |
1088 | cur_sp = thread_adjuserstack(current_thread(), | |
1089 | 0); | |
1090 | if (cur_sp <= p->user_stack && | |
1091 | cur_sp > (p->user_stack - | |
1092 | round_page_64(alimp->rlim_cur))) { | |
1093 | /* stack pointer is in main stack */ | |
1094 | if (cur_sp <= (p->user_stack - | |
1095 | round_page_64(limp->rlim_cur))) { | |
1096 | /* | |
1097 | * New limit would cause | |
1098 | * current usage to be invalid: | |
1099 | * reject new limit. | |
1100 | */ | |
1101 | error = EINVAL; | |
1102 | goto out; | |
1103 | } | |
1104 | } else { | |
1105 | /* not on the main stack: reject */ | |
1106 | error = EINVAL; | |
1107 | goto out; | |
1108 | } | |
1109 | ||
1110 | size = round_page_64(alimp->rlim_cur); | |
1111 | size -= round_page_64(limp->rlim_cur); | |
1112 | ||
1113 | addr = p->user_stack - round_page_64(alimp->rlim_cur); | |
1114 | ||
1115 | kr = mach_vm_protect(current_map(), | |
1116 | addr, size, | |
1117 | FALSE, VM_PROT_NONE); | |
1118 | if (kr != KERN_SUCCESS) { | |
1119 | error = EINVAL; | |
1120 | goto out; | |
1121 | } | |
1122 | } else { | |
1123 | /* no change ... */ | |
1124 | } | |
1125 | break; | |
1126 | ||
1127 | case RLIMIT_NOFILE: | |
1128 | /* | |
1129 | * Only root can set the maxfiles limits, as it is | |
1130 | * systemwide resource. If we are expecting POSIX behavior, | |
1131 | * instead of clamping the value, return EINVAL. We do this | |
1132 | * because historically, people have been able to attempt to | |
1133 | * set RLIM_INFINITY to get "whatever the maximum is". | |
1134 | */ | |
1135 | if (kauth_cred_issuser(kauth_cred_get())) { | |
1136 | if (limp->rlim_cur != alimp->rlim_cur && | |
1137 | limp->rlim_cur > (rlim_t)maxfiles) { | |
1138 | if (posix) { | |
1139 | error = EINVAL; | |
1140 | goto out; | |
1141 | } | |
1142 | limp->rlim_cur = maxfiles; | |
1143 | } | |
1144 | if (limp->rlim_max != alimp->rlim_max && | |
1145 | limp->rlim_max > (rlim_t)maxfiles) { | |
1146 | limp->rlim_max = maxfiles; | |
1147 | } | |
1148 | } else { | |
1149 | if (limp->rlim_cur != alimp->rlim_cur && | |
1150 | limp->rlim_cur > (rlim_t)maxfilesperproc) { | |
1151 | if (posix) { | |
1152 | error = EINVAL; | |
1153 | goto out; | |
1154 | } | |
1155 | limp->rlim_cur = maxfilesperproc; | |
1156 | } | |
1157 | if (limp->rlim_max != alimp->rlim_max && | |
1158 | limp->rlim_max > (rlim_t)maxfilesperproc) { | |
1159 | limp->rlim_max = maxfilesperproc; | |
1160 | } | |
1161 | } | |
1162 | break; | |
1163 | ||
1164 | case RLIMIT_NPROC: | |
1165 | /* | |
1166 | * Only root can set to the maxproc limits, as it is | |
1167 | * systemwide resource; all others are limited to | |
1168 | * maxprocperuid (presumably less than maxproc). | |
1169 | */ | |
1170 | if (kauth_cred_issuser(kauth_cred_get())) { | |
1171 | if (limp->rlim_cur > (rlim_t)maxproc) { | |
1172 | limp->rlim_cur = maxproc; | |
1173 | } | |
1174 | if (limp->rlim_max > (rlim_t)maxproc) { | |
1175 | limp->rlim_max = maxproc; | |
1176 | } | |
1177 | } else { | |
1178 | if (limp->rlim_cur > (rlim_t)maxprocperuid) { | |
1179 | limp->rlim_cur = maxprocperuid; | |
1180 | } | |
1181 | if (limp->rlim_max > (rlim_t)maxprocperuid) { | |
1182 | limp->rlim_max = maxprocperuid; | |
1183 | } | |
1184 | } | |
1185 | break; | |
1186 | ||
1187 | case RLIMIT_MEMLOCK: | |
1188 | /* | |
1189 | * Tell the Mach VM layer about the new limit value. | |
1190 | */ | |
1191 | ||
1192 | vm_map_set_user_wire_limit(current_map(), limp->rlim_cur); | |
1193 | break; | |
1194 | } /* switch... */ | |
1195 | proc_lock(p); | |
1196 | *alimp = *limp; | |
1197 | proc_unlock(p); | |
1198 | error = 0; | |
1199 | out: | |
1200 | proc_limitunblock(p); | |
1201 | return error; | |
1202 | } | |
1203 | ||
1204 | /* ARGSUSED */ | |
1205 | int | |
1206 | getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval) | |
1207 | { | |
1208 | struct rlimit lim = {}; | |
1209 | ||
1210 | /* | |
1211 | * Take out flag now in case we need to use it to trigger variant | |
1212 | * behaviour later. | |
1213 | */ | |
1214 | uap->which &= ~_RLIMIT_POSIX_FLAG; | |
1215 | ||
1216 | if (uap->which >= RLIM_NLIMITS) { | |
1217 | return EINVAL; | |
1218 | } | |
1219 | proc_limitget(p, uap->which, &lim); | |
1220 | return copyout((caddr_t)&lim, | |
1221 | uap->rlp, sizeof(struct rlimit)); | |
1222 | } | |
1223 | ||
1224 | /* | |
1225 | * Transform the running time and tick information in proc p into user, | |
1226 | * system, and interrupt time usage. | |
1227 | */ | |
1228 | /* No lock on proc is held for this.. */ | |
1229 | void | |
1230 | calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip) | |
1231 | { | |
1232 | task_t task; | |
1233 | ||
1234 | timerclear(up); | |
1235 | timerclear(sp); | |
1236 | if (ip != NULL) { | |
1237 | timerclear(ip); | |
1238 | } | |
1239 | ||
1240 | task = p->task; | |
1241 | if (task) { | |
1242 | mach_task_basic_info_data_t tinfo; | |
1243 | task_thread_times_info_data_t ttimesinfo; | |
1244 | task_events_info_data_t teventsinfo; | |
1245 | mach_msg_type_number_t task_info_count, task_ttimes_count; | |
1246 | mach_msg_type_number_t task_events_count; | |
1247 | struct timeval ut, st; | |
1248 | ||
1249 | task_info_count = MACH_TASK_BASIC_INFO_COUNT; | |
1250 | task_info(task, MACH_TASK_BASIC_INFO, | |
1251 | (task_info_t)&tinfo, &task_info_count); | |
1252 | ut.tv_sec = tinfo.user_time.seconds; | |
1253 | ut.tv_usec = tinfo.user_time.microseconds; | |
1254 | st.tv_sec = tinfo.system_time.seconds; | |
1255 | st.tv_usec = tinfo.system_time.microseconds; | |
1256 | timeradd(&ut, up, up); | |
1257 | timeradd(&st, sp, sp); | |
1258 | ||
1259 | task_ttimes_count = TASK_THREAD_TIMES_INFO_COUNT; | |
1260 | task_info(task, TASK_THREAD_TIMES_INFO, | |
1261 | (task_info_t)&ttimesinfo, &task_ttimes_count); | |
1262 | ||
1263 | ut.tv_sec = ttimesinfo.user_time.seconds; | |
1264 | ut.tv_usec = ttimesinfo.user_time.microseconds; | |
1265 | st.tv_sec = ttimesinfo.system_time.seconds; | |
1266 | st.tv_usec = ttimesinfo.system_time.microseconds; | |
1267 | timeradd(&ut, up, up); | |
1268 | timeradd(&st, sp, sp); | |
1269 | ||
1270 | task_events_count = TASK_EVENTS_INFO_COUNT; | |
1271 | task_info(task, TASK_EVENTS_INFO, | |
1272 | (task_info_t)&teventsinfo, &task_events_count); | |
1273 | ||
1274 | /* | |
1275 | * No need to lock "p": this does not need to be | |
1276 | * completely consistent, right ? | |
1277 | */ | |
1278 | p->p_stats->p_ru.ru_minflt = (teventsinfo.faults - | |
1279 | teventsinfo.pageins); | |
1280 | p->p_stats->p_ru.ru_majflt = teventsinfo.pageins; | |
1281 | p->p_stats->p_ru.ru_nivcsw = (teventsinfo.csw - | |
1282 | p->p_stats->p_ru.ru_nvcsw); | |
1283 | if (p->p_stats->p_ru.ru_nivcsw < 0) { | |
1284 | p->p_stats->p_ru.ru_nivcsw = 0; | |
1285 | } | |
1286 | ||
1287 | p->p_stats->p_ru.ru_maxrss = tinfo.resident_size_max; | |
1288 | } | |
1289 | } | |
1290 | ||
1291 | __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p); | |
1292 | __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p); | |
1293 | ||
1294 | /* ARGSUSED */ | |
1295 | int | |
1296 | getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval) | |
1297 | { | |
1298 | struct rusage *rup, rubuf; | |
1299 | struct user64_rusage rubuf64 = {}; | |
1300 | struct user32_rusage rubuf32 = {}; | |
1301 | size_t retsize = sizeof(rubuf); /* default: 32 bits */ | |
1302 | caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */ | |
1303 | struct timeval utime; | |
1304 | struct timeval stime; | |
1305 | ||
1306 | ||
1307 | switch (uap->who) { | |
1308 | case RUSAGE_SELF: | |
1309 | calcru(p, &utime, &stime, NULL); | |
1310 | proc_lock(p); | |
1311 | rup = &p->p_stats->p_ru; | |
1312 | rup->ru_utime = utime; | |
1313 | rup->ru_stime = stime; | |
1314 | ||
1315 | rubuf = *rup; | |
1316 | proc_unlock(p); | |
1317 | ||
1318 | break; | |
1319 | ||
1320 | case RUSAGE_CHILDREN: | |
1321 | proc_lock(p); | |
1322 | rup = &p->p_stats->p_cru; | |
1323 | rubuf = *rup; | |
1324 | proc_unlock(p); | |
1325 | break; | |
1326 | ||
1327 | default: | |
1328 | return EINVAL; | |
1329 | } | |
1330 | if (IS_64BIT_PROCESS(p)) { | |
1331 | retsize = sizeof(rubuf64); | |
1332 | retbuf = (caddr_t)&rubuf64; | |
1333 | munge_user64_rusage(&rubuf, &rubuf64); | |
1334 | } else { | |
1335 | retsize = sizeof(rubuf32); | |
1336 | retbuf = (caddr_t)&rubuf32; | |
1337 | munge_user32_rusage(&rubuf, &rubuf32); | |
1338 | } | |
1339 | ||
1340 | return copyout(retbuf, uap->rusage, retsize); | |
1341 | } | |
1342 | ||
1343 | void | |
1344 | ruadd(struct rusage *ru, struct rusage *ru2) | |
1345 | { | |
1346 | long *ip, *ip2; | |
1347 | long i; | |
1348 | ||
1349 | timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); | |
1350 | timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); | |
1351 | if (ru->ru_maxrss < ru2->ru_maxrss) { | |
1352 | ru->ru_maxrss = ru2->ru_maxrss; | |
1353 | } | |
1354 | ip = &ru->ru_first; ip2 = &ru2->ru_first; | |
1355 | for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) { | |
1356 | *ip++ += *ip2++; | |
1357 | } | |
1358 | } | |
1359 | ||
1360 | /* | |
1361 | * Add the rusage stats of child in parent. | |
1362 | * | |
1363 | * It adds rusage statistics of child process and statistics of all its | |
1364 | * children to its parent. | |
1365 | * | |
1366 | * Note: proc lock of parent should be held while calling this function. | |
1367 | */ | |
1368 | void | |
1369 | update_rusage_info_child(struct rusage_info_child *ri, rusage_info_current *ri_current) | |
1370 | { | |
1371 | ri->ri_child_user_time += (ri_current->ri_user_time + | |
1372 | ri_current->ri_child_user_time); | |
1373 | ri->ri_child_system_time += (ri_current->ri_system_time + | |
1374 | ri_current->ri_child_system_time); | |
1375 | ri->ri_child_pkg_idle_wkups += (ri_current->ri_pkg_idle_wkups + | |
1376 | ri_current->ri_child_pkg_idle_wkups); | |
1377 | ri->ri_child_interrupt_wkups += (ri_current->ri_interrupt_wkups + | |
1378 | ri_current->ri_child_interrupt_wkups); | |
1379 | ri->ri_child_pageins += (ri_current->ri_pageins + | |
1380 | ri_current->ri_child_pageins); | |
1381 | ri->ri_child_elapsed_abstime += ((ri_current->ri_proc_exit_abstime - | |
1382 | ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime); | |
1383 | } | |
1384 | ||
1385 | void | |
1386 | proc_limitget(proc_t p, int which, struct rlimit * limp) | |
1387 | { | |
1388 | proc_list_lock(); | |
1389 | limp->rlim_cur = p->p_rlimit[which].rlim_cur; | |
1390 | limp->rlim_max = p->p_rlimit[which].rlim_max; | |
1391 | proc_list_unlock(); | |
1392 | } | |
1393 | ||
1394 | ||
1395 | void | |
1396 | proc_limitdrop(proc_t p, int exiting) | |
1397 | { | |
1398 | struct plimit * freelim = NULL; | |
1399 | struct plimit * freeoldlim = NULL; | |
1400 | ||
1401 | proc_list_lock(); | |
1402 | ||
1403 | if (--p->p_limit->pl_refcnt == 0) { | |
1404 | freelim = p->p_limit; | |
1405 | p->p_limit = NULL; | |
1406 | } | |
1407 | if ((exiting != 0) && (p->p_olimit != NULL) && (--p->p_olimit->pl_refcnt == 0)) { | |
1408 | freeoldlim = p->p_olimit; | |
1409 | p->p_olimit = NULL; | |
1410 | } | |
1411 | ||
1412 | proc_list_unlock(); | |
1413 | if (freelim != NULL) { | |
1414 | FREE_ZONE(freelim, sizeof *p->p_limit, M_PLIMIT); | |
1415 | } | |
1416 | if (freeoldlim != NULL) { | |
1417 | FREE_ZONE(freeoldlim, sizeof *p->p_olimit, M_PLIMIT); | |
1418 | } | |
1419 | } | |
1420 | ||
1421 | ||
1422 | void | |
1423 | proc_limitfork(proc_t parent, proc_t child) | |
1424 | { | |
1425 | proc_list_lock(); | |
1426 | child->p_limit = parent->p_limit; | |
1427 | child->p_limit->pl_refcnt++; | |
1428 | child->p_olimit = NULL; | |
1429 | proc_list_unlock(); | |
1430 | } | |
1431 | ||
1432 | void | |
1433 | proc_limitblock(proc_t p) | |
1434 | { | |
1435 | proc_lock(p); | |
1436 | while (p->p_lflag & P_LLIMCHANGE) { | |
1437 | p->p_lflag |= P_LLIMWAIT; | |
1438 | msleep(&p->p_olimit, &p->p_mlock, 0, "proc_limitblock", NULL); | |
1439 | } | |
1440 | p->p_lflag |= P_LLIMCHANGE; | |
1441 | proc_unlock(p); | |
1442 | } | |
1443 | ||
1444 | ||
1445 | void | |
1446 | proc_limitunblock(proc_t p) | |
1447 | { | |
1448 | proc_lock(p); | |
1449 | p->p_lflag &= ~P_LLIMCHANGE; | |
1450 | if (p->p_lflag & P_LLIMWAIT) { | |
1451 | p->p_lflag &= ~P_LLIMWAIT; | |
1452 | wakeup(&p->p_olimit); | |
1453 | } | |
1454 | proc_unlock(p); | |
1455 | } | |
1456 | ||
1457 | /* This is called behind serialization provided by proc_limitblock/unlbock */ | |
1458 | int | |
1459 | proc_limitreplace(proc_t p) | |
1460 | { | |
1461 | struct plimit *copy; | |
1462 | ||
1463 | ||
1464 | proc_list_lock(); | |
1465 | ||
1466 | if (p->p_limit->pl_refcnt == 1) { | |
1467 | proc_list_unlock(); | |
1468 | return 0; | |
1469 | } | |
1470 | ||
1471 | proc_list_unlock(); | |
1472 | ||
1473 | MALLOC_ZONE(copy, struct plimit *, | |
1474 | sizeof(struct plimit), M_PLIMIT, M_WAITOK); | |
1475 | if (copy == NULL) { | |
1476 | return ENOMEM; | |
1477 | } | |
1478 | ||
1479 | proc_list_lock(); | |
1480 | bcopy(p->p_limit->pl_rlimit, copy->pl_rlimit, | |
1481 | sizeof(struct rlimit) * RLIM_NLIMITS); | |
1482 | copy->pl_refcnt = 1; | |
1483 | /* hang on to reference to old till process exits */ | |
1484 | p->p_olimit = p->p_limit; | |
1485 | p->p_limit = copy; | |
1486 | proc_list_unlock(); | |
1487 | ||
1488 | return 0; | |
1489 | } | |
1490 | ||
1491 | static int | |
1492 | iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); | |
1493 | static int | |
1494 | iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); | |
1495 | static int | |
1496 | iopolicysys_vfs_atime_updates(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); | |
1497 | static int | |
1498 | iopolicysys_vfs_materialize_dataless_files(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); | |
1499 | static int | |
1500 | iopolicysys_vfs_statfs_no_data_volume(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); | |
1501 | ||
1502 | /* | |
1503 | * iopolicysys | |
1504 | * | |
1505 | * Description: System call MUX for use in manipulating I/O policy attributes of the current process or thread | |
1506 | * | |
1507 | * Parameters: cmd Policy command | |
1508 | * arg Pointer to policy arguments | |
1509 | * | |
1510 | * Returns: 0 Success | |
1511 | * EINVAL Invalid command or invalid policy arguments | |
1512 | * | |
1513 | */ | |
1514 | int | |
1515 | iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval) | |
1516 | { | |
1517 | int error = 0; | |
1518 | struct _iopol_param_t iop_param; | |
1519 | ||
1520 | if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) { | |
1521 | goto out; | |
1522 | } | |
1523 | ||
1524 | switch (iop_param.iop_iotype) { | |
1525 | case IOPOL_TYPE_DISK: | |
1526 | error = iopolicysys_disk(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); | |
1527 | if (error == EIDRM) { | |
1528 | *retval = -2; | |
1529 | error = 0; | |
1530 | } | |
1531 | if (error) { | |
1532 | goto out; | |
1533 | } | |
1534 | break; | |
1535 | case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY: | |
1536 | error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); | |
1537 | if (error) { | |
1538 | goto out; | |
1539 | } | |
1540 | break; | |
1541 | case IOPOL_TYPE_VFS_ATIME_UPDATES: | |
1542 | error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); | |
1543 | if (error) { | |
1544 | goto out; | |
1545 | } | |
1546 | break; | |
1547 | case IOPOL_TYPE_VFS_MATERIALIZE_DATALESS_FILES: | |
1548 | error = iopolicysys_vfs_materialize_dataless_files(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); | |
1549 | if (error) { | |
1550 | goto out; | |
1551 | } | |
1552 | break; | |
1553 | case IOPOL_TYPE_VFS_STATFS_NO_DATA_VOLUME: | |
1554 | error = iopolicysys_vfs_statfs_no_data_volume(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); | |
1555 | if (error) { | |
1556 | goto out; | |
1557 | } | |
1558 | default: | |
1559 | error = EINVAL; | |
1560 | goto out; | |
1561 | } | |
1562 | ||
1563 | /* Individual iotype handlers are expected to update iop_param, if requested with a GET command */ | |
1564 | if (uap->cmd == IOPOL_CMD_GET) { | |
1565 | error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param)); | |
1566 | if (error) { | |
1567 | goto out; | |
1568 | } | |
1569 | } | |
1570 | ||
1571 | out: | |
1572 | return error; | |
1573 | } | |
1574 | ||
1575 | static int | |
1576 | iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) | |
1577 | { | |
1578 | int error = 0; | |
1579 | thread_t thread; | |
1580 | int policy_flavor; | |
1581 | ||
1582 | /* Validate scope */ | |
1583 | switch (scope) { | |
1584 | case IOPOL_SCOPE_PROCESS: | |
1585 | thread = THREAD_NULL; | |
1586 | policy_flavor = TASK_POLICY_IOPOL; | |
1587 | break; | |
1588 | ||
1589 | case IOPOL_SCOPE_THREAD: | |
1590 | thread = current_thread(); | |
1591 | policy_flavor = TASK_POLICY_IOPOL; | |
1592 | ||
1593 | /* Not allowed to combine QoS and (non-PASSIVE) IO policy, doing so strips the QoS */ | |
1594 | if (cmd == IOPOL_CMD_SET && thread_has_qos_policy(thread)) { | |
1595 | switch (policy) { | |
1596 | case IOPOL_DEFAULT: | |
1597 | case IOPOL_PASSIVE: | |
1598 | break; | |
1599 | case IOPOL_UTILITY: | |
1600 | case IOPOL_THROTTLE: | |
1601 | case IOPOL_IMPORTANT: | |
1602 | case IOPOL_STANDARD: | |
1603 | if (!thread_is_static_param(thread)) { | |
1604 | thread_remove_qos_policy(thread); | |
1605 | /* | |
1606 | * This is not an error case, this is to return a marker to user-space that | |
1607 | * we stripped the thread of its QoS class. | |
1608 | */ | |
1609 | error = EIDRM; | |
1610 | break; | |
1611 | } | |
1612 | /* otherwise, fall through to the error case. */ | |
1613 | default: | |
1614 | error = EINVAL; | |
1615 | goto out; | |
1616 | } | |
1617 | } | |
1618 | break; | |
1619 | ||
1620 | case IOPOL_SCOPE_DARWIN_BG: | |
1621 | #if CONFIG_EMBEDDED | |
1622 | /* Embedded doesn't want this as BG is always IOPOL_THROTTLE */ | |
1623 | error = ENOTSUP; | |
1624 | goto out; | |
1625 | #else /* CONFIG_EMBEDDED */ | |
1626 | thread = THREAD_NULL; | |
1627 | policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL; | |
1628 | break; | |
1629 | #endif /* CONFIG_EMBEDDED */ | |
1630 | ||
1631 | default: | |
1632 | error = EINVAL; | |
1633 | goto out; | |
1634 | } | |
1635 | ||
1636 | /* Validate policy */ | |
1637 | if (cmd == IOPOL_CMD_SET) { | |
1638 | switch (policy) { | |
1639 | case IOPOL_DEFAULT: | |
1640 | if (scope == IOPOL_SCOPE_DARWIN_BG) { | |
1641 | /* the current default BG throttle level is UTILITY */ | |
1642 | policy = IOPOL_UTILITY; | |
1643 | } else { | |
1644 | policy = IOPOL_IMPORTANT; | |
1645 | } | |
1646 | break; | |
1647 | case IOPOL_UTILITY: | |
1648 | /* fall-through */ | |
1649 | case IOPOL_THROTTLE: | |
1650 | /* These levels are OK */ | |
1651 | break; | |
1652 | case IOPOL_IMPORTANT: | |
1653 | /* fall-through */ | |
1654 | case IOPOL_STANDARD: | |
1655 | /* fall-through */ | |
1656 | case IOPOL_PASSIVE: | |
1657 | if (scope == IOPOL_SCOPE_DARWIN_BG) { | |
1658 | /* These levels are invalid for BG */ | |
1659 | error = EINVAL; | |
1660 | goto out; | |
1661 | } else { | |
1662 | /* OK for other scopes */ | |
1663 | } | |
1664 | break; | |
1665 | default: | |
1666 | error = EINVAL; | |
1667 | goto out; | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | /* Perform command */ | |
1672 | switch (cmd) { | |
1673 | case IOPOL_CMD_SET: | |
1674 | if (thread != THREAD_NULL) { | |
1675 | proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor, policy); | |
1676 | } else { | |
1677 | proc_set_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor, policy); | |
1678 | } | |
1679 | break; | |
1680 | case IOPOL_CMD_GET: | |
1681 | if (thread != THREAD_NULL) { | |
1682 | policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor); | |
1683 | } else { | |
1684 | policy = proc_get_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor); | |
1685 | } | |
1686 | iop_param->iop_policy = policy; | |
1687 | break; | |
1688 | default: | |
1689 | error = EINVAL; /* unknown command */ | |
1690 | break; | |
1691 | } | |
1692 | ||
1693 | out: | |
1694 | return error; | |
1695 | } | |
1696 | ||
1697 | static int | |
1698 | iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) | |
1699 | { | |
1700 | int error = 0; | |
1701 | ||
1702 | /* Validate scope */ | |
1703 | switch (scope) { | |
1704 | case IOPOL_SCOPE_PROCESS: | |
1705 | /* Only process OK */ | |
1706 | break; | |
1707 | default: | |
1708 | error = EINVAL; | |
1709 | goto out; | |
1710 | } | |
1711 | ||
1712 | /* Validate policy */ | |
1713 | if (cmd == IOPOL_CMD_SET) { | |
1714 | switch (policy) { | |
1715 | case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT: | |
1716 | /* fall-through */ | |
1717 | case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE: | |
1718 | /* These policies are OK */ | |
1719 | break; | |
1720 | default: | |
1721 | error = EINVAL; | |
1722 | goto out; | |
1723 | } | |
1724 | } | |
1725 | ||
1726 | /* Perform command */ | |
1727 | switch (cmd) { | |
1728 | case IOPOL_CMD_SET: | |
1729 | if (0 == kauth_cred_issuser(kauth_cred_get())) { | |
1730 | /* If it's a non-root process, it needs to have the entitlement to set the policy */ | |
1731 | boolean_t entitled = FALSE; | |
1732 | entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.iopol.case_sensitivity"); | |
1733 | if (!entitled) { | |
1734 | error = EPERM; | |
1735 | goto out; | |
1736 | } | |
1737 | } | |
1738 | ||
1739 | switch (policy) { | |
1740 | case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT: | |
1741 | OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), &p->p_vfs_iopolicy); | |
1742 | break; | |
1743 | case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE: | |
1744 | OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, &p->p_vfs_iopolicy); | |
1745 | break; | |
1746 | default: | |
1747 | error = EINVAL; | |
1748 | goto out; | |
1749 | } | |
1750 | ||
1751 | break; | |
1752 | case IOPOL_CMD_GET: | |
1753 | iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) | |
1754 | ? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE | |
1755 | : IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT; | |
1756 | break; | |
1757 | default: | |
1758 | error = EINVAL; /* unknown command */ | |
1759 | break; | |
1760 | } | |
1761 | ||
1762 | out: | |
1763 | return error; | |
1764 | } | |
1765 | ||
1766 | static inline int | |
1767 | get_thread_atime_policy(struct uthread *ut) | |
1768 | { | |
1769 | return (ut->uu_flag & UT_ATIME_UPDATE) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT; | |
1770 | } | |
1771 | ||
1772 | static inline void | |
1773 | set_thread_atime_policy(struct uthread *ut, int policy) | |
1774 | { | |
1775 | if (policy == IOPOL_ATIME_UPDATES_OFF) { | |
1776 | ut->uu_flag |= UT_ATIME_UPDATE; | |
1777 | } else { | |
1778 | ut->uu_flag &= ~UT_ATIME_UPDATE; | |
1779 | } | |
1780 | } | |
1781 | ||
1782 | static inline void | |
1783 | set_task_atime_policy(struct proc *p, int policy) | |
1784 | { | |
1785 | if (policy == IOPOL_ATIME_UPDATES_OFF) { | |
1786 | OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES, &p->p_vfs_iopolicy); | |
1787 | } else { | |
1788 | OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES), &p->p_vfs_iopolicy); | |
1789 | } | |
1790 | } | |
1791 | ||
1792 | static inline int | |
1793 | get_task_atime_policy(struct proc *p) | |
1794 | { | |
1795 | return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT; | |
1796 | } | |
1797 | ||
1798 | static int | |
1799 | iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) | |
1800 | { | |
1801 | int error = 0; | |
1802 | thread_t thread; | |
1803 | ||
1804 | /* Validate scope */ | |
1805 | switch (scope) { | |
1806 | case IOPOL_SCOPE_THREAD: | |
1807 | thread = current_thread(); | |
1808 | break; | |
1809 | case IOPOL_SCOPE_PROCESS: | |
1810 | thread = THREAD_NULL; | |
1811 | break; | |
1812 | default: | |
1813 | error = EINVAL; | |
1814 | goto out; | |
1815 | } | |
1816 | ||
1817 | /* Validate policy */ | |
1818 | if (cmd == IOPOL_CMD_SET) { | |
1819 | switch (policy) { | |
1820 | case IOPOL_ATIME_UPDATES_DEFAULT: | |
1821 | case IOPOL_ATIME_UPDATES_OFF: | |
1822 | break; | |
1823 | default: | |
1824 | error = EINVAL; | |
1825 | goto out; | |
1826 | } | |
1827 | } | |
1828 | ||
1829 | /* Perform command */ | |
1830 | switch (cmd) { | |
1831 | case IOPOL_CMD_SET: | |
1832 | if (thread != THREAD_NULL) { | |
1833 | set_thread_atime_policy(get_bsdthread_info(thread), policy); | |
1834 | } else { | |
1835 | set_task_atime_policy(p, policy); | |
1836 | } | |
1837 | break; | |
1838 | case IOPOL_CMD_GET: | |
1839 | if (thread != THREAD_NULL) { | |
1840 | policy = get_thread_atime_policy(get_bsdthread_info(thread)); | |
1841 | } else { | |
1842 | policy = get_task_atime_policy(p); | |
1843 | } | |
1844 | iop_param->iop_policy = policy; | |
1845 | break; | |
1846 | default: | |
1847 | error = EINVAL; /* unknown command */ | |
1848 | break; | |
1849 | } | |
1850 | ||
1851 | out: | |
1852 | return error; | |
1853 | } | |
1854 | ||
1855 | static inline int | |
1856 | get_thread_materialize_policy(struct uthread *ut) | |
1857 | { | |
1858 | if (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) { | |
1859 | return IOPOL_MATERIALIZE_DATALESS_FILES_OFF; | |
1860 | } else if (ut->uu_flag & UT_NSPACE_FORCEDATALESSFAULTS) { | |
1861 | return IOPOL_MATERIALIZE_DATALESS_FILES_ON; | |
1862 | } | |
1863 | /* Default thread behavior is "inherit process behavior". */ | |
1864 | return IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT; | |
1865 | } | |
1866 | ||
1867 | static inline void | |
1868 | set_thread_materialize_policy(struct uthread *ut, int policy) | |
1869 | { | |
1870 | if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_OFF) { | |
1871 | ut->uu_flag &= ~UT_NSPACE_FORCEDATALESSFAULTS; | |
1872 | ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS; | |
1873 | } else if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) { | |
1874 | ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS; | |
1875 | ut->uu_flag |= UT_NSPACE_FORCEDATALESSFAULTS; | |
1876 | } else { | |
1877 | ut->uu_flag &= ~(UT_NSPACE_NODATALESSFAULTS | UT_NSPACE_FORCEDATALESSFAULTS); | |
1878 | } | |
1879 | } | |
1880 | ||
1881 | static inline void | |
1882 | set_proc_materialize_policy(struct proc *p, int policy) | |
1883 | { | |
1884 | if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT) { | |
1885 | /* | |
1886 | * Caller has specified "use the default policy". | |
1887 | * The default policy is to NOT materialize dataless | |
1888 | * files. | |
1889 | */ | |
1890 | policy = IOPOL_MATERIALIZE_DATALESS_FILES_OFF; | |
1891 | } | |
1892 | if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) { | |
1893 | OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES, &p->p_vfs_iopolicy); | |
1894 | } else { | |
1895 | OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES), &p->p_vfs_iopolicy); | |
1896 | } | |
1897 | } | |
1898 | ||
1899 | static int | |
1900 | get_proc_materialize_policy(struct proc *p) | |
1901 | { | |
1902 | return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) ? IOPOL_MATERIALIZE_DATALESS_FILES_ON : IOPOL_MATERIALIZE_DATALESS_FILES_OFF; | |
1903 | } | |
1904 | ||
1905 | static int | |
1906 | iopolicysys_vfs_materialize_dataless_files(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) | |
1907 | { | |
1908 | int error = 0; | |
1909 | thread_t thread; | |
1910 | ||
1911 | /* Validate scope */ | |
1912 | switch (scope) { | |
1913 | case IOPOL_SCOPE_THREAD: | |
1914 | thread = current_thread(); | |
1915 | break; | |
1916 | case IOPOL_SCOPE_PROCESS: | |
1917 | thread = THREAD_NULL; | |
1918 | break; | |
1919 | default: | |
1920 | error = EINVAL; | |
1921 | goto out; | |
1922 | } | |
1923 | ||
1924 | /* Validate policy */ | |
1925 | if (cmd == IOPOL_CMD_SET) { | |
1926 | switch (policy) { | |
1927 | case IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT: | |
1928 | case IOPOL_MATERIALIZE_DATALESS_FILES_OFF: | |
1929 | case IOPOL_MATERIALIZE_DATALESS_FILES_ON: | |
1930 | break; | |
1931 | default: | |
1932 | error = EINVAL; | |
1933 | goto out; | |
1934 | } | |
1935 | } | |
1936 | ||
1937 | /* Perform command */ | |
1938 | switch (cmd) { | |
1939 | case IOPOL_CMD_SET: | |
1940 | if (thread != THREAD_NULL) { | |
1941 | set_thread_materialize_policy(get_bsdthread_info(thread), policy); | |
1942 | } else { | |
1943 | set_proc_materialize_policy(p, policy); | |
1944 | } | |
1945 | break; | |
1946 | case IOPOL_CMD_GET: | |
1947 | if (thread != THREAD_NULL) { | |
1948 | policy = get_thread_materialize_policy(get_bsdthread_info(thread)); | |
1949 | } else { | |
1950 | policy = get_proc_materialize_policy(p); | |
1951 | } | |
1952 | iop_param->iop_policy = policy; | |
1953 | break; | |
1954 | default: | |
1955 | error = EINVAL; /* unknown command */ | |
1956 | break; | |
1957 | } | |
1958 | ||
1959 | out: | |
1960 | return error; | |
1961 | } | |
1962 | ||
1963 | static int | |
1964 | iopolicysys_vfs_statfs_no_data_volume(struct proc *p __unused, int cmd, | |
1965 | int scope, int policy, struct _iopol_param_t *iop_param) | |
1966 | { | |
1967 | int error = 0; | |
1968 | ||
1969 | /* Validate scope */ | |
1970 | switch (scope) { | |
1971 | case IOPOL_SCOPE_PROCESS: | |
1972 | /* Only process OK */ | |
1973 | break; | |
1974 | default: | |
1975 | error = EINVAL; | |
1976 | goto out; | |
1977 | } | |
1978 | ||
1979 | /* Validate policy */ | |
1980 | if (cmd == IOPOL_CMD_SET) { | |
1981 | switch (policy) { | |
1982 | case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT: | |
1983 | /* fall-through */ | |
1984 | case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME: | |
1985 | /* These policies are OK */ | |
1986 | break; | |
1987 | default: | |
1988 | error = EINVAL; | |
1989 | goto out; | |
1990 | } | |
1991 | } | |
1992 | ||
1993 | /* Perform command */ | |
1994 | switch (cmd) { | |
1995 | case IOPOL_CMD_SET: | |
1996 | if (0 == kauth_cred_issuser(kauth_cred_get())) { | |
1997 | /* If it's a non-root process, it needs to have the entitlement to set the policy */ | |
1998 | boolean_t entitled = FALSE; | |
1999 | entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.iopol.case_sensitivity"); | |
2000 | if (!entitled) { | |
2001 | error = EPERM; | |
2002 | goto out; | |
2003 | } | |
2004 | } | |
2005 | ||
2006 | switch (policy) { | |
2007 | case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT: | |
2008 | OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME), &p->p_vfs_iopolicy); | |
2009 | break; | |
2010 | case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME: | |
2011 | OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME, &p->p_vfs_iopolicy); | |
2012 | break; | |
2013 | default: | |
2014 | error = EINVAL; | |
2015 | goto out; | |
2016 | } | |
2017 | ||
2018 | break; | |
2019 | case IOPOL_CMD_GET: | |
2020 | iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME) | |
2021 | ? IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME | |
2022 | : IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT; | |
2023 | break; | |
2024 | default: | |
2025 | error = EINVAL; /* unknown command */ | |
2026 | break; | |
2027 | } | |
2028 | ||
2029 | out: | |
2030 | return error; | |
2031 | } | |
2032 | ||
2033 | /* BSD call back function for task_policy networking changes */ | |
2034 | void | |
2035 | proc_apply_task_networkbg(void * bsd_info, thread_t thread) | |
2036 | { | |
2037 | assert(bsd_info != PROC_NULL); | |
2038 | ||
2039 | pid_t pid = proc_pid((proc_t)bsd_info); | |
2040 | ||
2041 | proc_t p = proc_find(pid); | |
2042 | ||
2043 | if (p != PROC_NULL) { | |
2044 | assert(p == (proc_t)bsd_info); | |
2045 | ||
2046 | do_background_socket(p, thread); | |
2047 | proc_rele(p); | |
2048 | } | |
2049 | } | |
2050 | ||
2051 | void | |
2052 | gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor) | |
2053 | { | |
2054 | struct rusage_info_child *ri_child; | |
2055 | ||
2056 | assert(p->p_stats != NULL); | |
2057 | memset(ru, 0, sizeof(*ru)); | |
2058 | switch (flavor) { | |
2059 | case RUSAGE_INFO_V4: | |
2060 | ru->ri_logical_writes = get_task_logical_writes(p->task, FALSE); | |
2061 | ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(p->task); | |
2062 | #if CONFIG_LEDGER_INTERVAL_MAX | |
2063 | ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(p->task, FALSE); | |
2064 | #endif | |
2065 | fill_task_monotonic_rusage(p->task, ru); | |
2066 | /* fall through */ | |
2067 | ||
2068 | case RUSAGE_INFO_V3: | |
2069 | fill_task_qos_rusage(p->task, ru); | |
2070 | fill_task_billed_usage(p->task, ru); | |
2071 | /* fall through */ | |
2072 | ||
2073 | case RUSAGE_INFO_V2: | |
2074 | fill_task_io_rusage(p->task, ru); | |
2075 | /* fall through */ | |
2076 | ||
2077 | case RUSAGE_INFO_V1: | |
2078 | /* | |
2079 | * p->p_stats->ri_child statistics are protected under proc lock. | |
2080 | */ | |
2081 | proc_lock(p); | |
2082 | ||
2083 | ri_child = &(p->p_stats->ri_child); | |
2084 | ru->ri_child_user_time = ri_child->ri_child_user_time; | |
2085 | ru->ri_child_system_time = ri_child->ri_child_system_time; | |
2086 | ru->ri_child_pkg_idle_wkups = ri_child->ri_child_pkg_idle_wkups; | |
2087 | ru->ri_child_interrupt_wkups = ri_child->ri_child_interrupt_wkups; | |
2088 | ru->ri_child_pageins = ri_child->ri_child_pageins; | |
2089 | ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime; | |
2090 | ||
2091 | proc_unlock(p); | |
2092 | /* fall through */ | |
2093 | ||
2094 | case RUSAGE_INFO_V0: | |
2095 | proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof(ru->ri_uuid)); | |
2096 | fill_task_rusage(p->task, ru); | |
2097 | ru->ri_proc_start_abstime = p->p_stats->ps_start; | |
2098 | } | |
2099 | } | |
2100 | ||
2101 | int | |
2102 | proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie) | |
2103 | { | |
2104 | rusage_info_current ri_current = {}; | |
2105 | ||
2106 | int error = 0; | |
2107 | size_t size = 0; | |
2108 | ||
2109 | switch (flavor) { | |
2110 | case RUSAGE_INFO_V0: | |
2111 | size = sizeof(struct rusage_info_v0); | |
2112 | break; | |
2113 | ||
2114 | case RUSAGE_INFO_V1: | |
2115 | size = sizeof(struct rusage_info_v1); | |
2116 | break; | |
2117 | ||
2118 | case RUSAGE_INFO_V2: | |
2119 | size = sizeof(struct rusage_info_v2); | |
2120 | break; | |
2121 | ||
2122 | case RUSAGE_INFO_V3: | |
2123 | size = sizeof(struct rusage_info_v3); | |
2124 | break; | |
2125 | ||
2126 | case RUSAGE_INFO_V4: | |
2127 | size = sizeof(struct rusage_info_v4); | |
2128 | break; | |
2129 | ||
2130 | default: | |
2131 | return EINVAL; | |
2132 | } | |
2133 | ||
2134 | if (size == 0) { | |
2135 | return EINVAL; | |
2136 | } | |
2137 | ||
2138 | /* | |
2139 | * If task is still alive, collect info from the live task itself. | |
2140 | * Otherwise, look to the cached info in the zombie proc. | |
2141 | */ | |
2142 | if (p->p_ru == NULL) { | |
2143 | gather_rusage_info(p, &ri_current, flavor); | |
2144 | ri_current.ri_proc_exit_abstime = 0; | |
2145 | error = copyout(&ri_current, buffer, size); | |
2146 | } else { | |
2147 | ri_current = p->p_ru->ri; | |
2148 | error = copyout(&p->p_ru->ri, buffer, size); | |
2149 | } | |
2150 | ||
2151 | return error; | |
2152 | } | |
2153 | ||
2154 | static int | |
2155 | mach_to_bsd_rv(int mach_rv) | |
2156 | { | |
2157 | int bsd_rv = 0; | |
2158 | ||
2159 | switch (mach_rv) { | |
2160 | case KERN_SUCCESS: | |
2161 | bsd_rv = 0; | |
2162 | break; | |
2163 | case KERN_INVALID_ARGUMENT: | |
2164 | bsd_rv = EINVAL; | |
2165 | break; | |
2166 | default: | |
2167 | panic("unknown error %#x", mach_rv); | |
2168 | } | |
2169 | ||
2170 | return bsd_rv; | |
2171 | } | |
2172 | ||
2173 | /* | |
2174 | * Resource limit controls | |
2175 | * | |
2176 | * uap->flavor available flavors: | |
2177 | * | |
2178 | * RLIMIT_WAKEUPS_MONITOR | |
2179 | * RLIMIT_CPU_USAGE_MONITOR | |
2180 | * RLIMIT_THREAD_CPULIMITS | |
2181 | * RLIMIT_FOOTPRINT_INTERVAL | |
2182 | */ | |
2183 | int | |
2184 | proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, __unused int32_t *retval) | |
2185 | { | |
2186 | proc_t targetp; | |
2187 | int error = 0; | |
2188 | struct proc_rlimit_control_wakeupmon wakeupmon_args; | |
2189 | uint32_t cpumon_flags; | |
2190 | uint32_t cpulimits_flags; | |
2191 | kauth_cred_t my_cred, target_cred; | |
2192 | #if CONFIG_LEDGER_INTERVAL_MAX | |
2193 | uint32_t footprint_interval_flags; | |
2194 | uint64_t interval_max_footprint; | |
2195 | #endif /* CONFIG_LEDGER_INTERVAL_MAX */ | |
2196 | ||
2197 | /* -1 implicitly means our own process (perhaps even the current thread for per-thread attributes) */ | |
2198 | if (uap->pid == -1) { | |
2199 | targetp = proc_self(); | |
2200 | } else { | |
2201 | targetp = proc_find(uap->pid); | |
2202 | } | |
2203 | ||
2204 | /* proc_self() can return NULL for an exiting process */ | |
2205 | if (targetp == PROC_NULL) { | |
2206 | return ESRCH; | |
2207 | } | |
2208 | ||
2209 | my_cred = kauth_cred_get(); | |
2210 | target_cred = kauth_cred_proc_ref(targetp); | |
2211 | ||
2212 | if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) && | |
2213 | kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) && | |
2214 | kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) { | |
2215 | proc_rele(targetp); | |
2216 | kauth_cred_unref(&target_cred); | |
2217 | return EACCES; | |
2218 | } | |
2219 | ||
2220 | switch (uap->flavor) { | |
2221 | case RLIMIT_WAKEUPS_MONITOR: | |
2222 | if ((error = copyin(uap->arg, &wakeupmon_args, sizeof(wakeupmon_args))) != 0) { | |
2223 | break; | |
2224 | } | |
2225 | if ((error = mach_to_bsd_rv(task_wakeups_monitor_ctl(targetp->task, &wakeupmon_args.wm_flags, | |
2226 | &wakeupmon_args.wm_rate))) != 0) { | |
2227 | break; | |
2228 | } | |
2229 | error = copyout(&wakeupmon_args, uap->arg, sizeof(wakeupmon_args)); | |
2230 | break; | |
2231 | case RLIMIT_CPU_USAGE_MONITOR: | |
2232 | cpumon_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127) | |
2233 | error = mach_to_bsd_rv(task_cpu_usage_monitor_ctl(targetp->task, &cpumon_flags)); | |
2234 | break; | |
2235 | case RLIMIT_THREAD_CPULIMITS: | |
2236 | cpulimits_flags = (uint32_t)uap->arg; // only need a limited set of bits, pass in void * argument | |
2237 | ||
2238 | if (uap->pid != -1) { | |
2239 | error = EINVAL; | |
2240 | break; | |
2241 | } | |
2242 | ||
2243 | uint8_t percent = 0; | |
2244 | uint32_t ms_refill = 0; | |
2245 | uint64_t ns_refill; | |
2246 | ||
2247 | percent = (uint8_t)(cpulimits_flags & 0xffU); /* low 8 bits for percent */ | |
2248 | ms_refill = (cpulimits_flags >> 8) & 0xffffff; /* next 24 bits represent ms refill value */ | |
2249 | if (percent >= 100) { | |
2250 | error = EINVAL; | |
2251 | break; | |
2252 | } | |
2253 | ||
2254 | ns_refill = ((uint64_t)ms_refill) * NSEC_PER_MSEC; | |
2255 | ||
2256 | error = mach_to_bsd_rv(thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ns_refill)); | |
2257 | break; | |
2258 | ||
2259 | #if CONFIG_LEDGER_INTERVAL_MAX | |
2260 | case RLIMIT_FOOTPRINT_INTERVAL: | |
2261 | footprint_interval_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127) | |
2262 | /* | |
2263 | * There is currently only one option for this flavor. | |
2264 | */ | |
2265 | if ((footprint_interval_flags & FOOTPRINT_INTERVAL_RESET) == 0) { | |
2266 | error = EINVAL; | |
2267 | break; | |
2268 | } | |
2269 | interval_max_footprint = get_task_phys_footprint_interval_max(targetp->task, TRUE); | |
2270 | break; | |
2271 | #endif /* CONFIG_LEDGER_INTERVAL_MAX */ | |
2272 | default: | |
2273 | error = EINVAL; | |
2274 | break; | |
2275 | } | |
2276 | ||
2277 | proc_rele(targetp); | |
2278 | kauth_cred_unref(&target_cred); | |
2279 | ||
2280 | /* | |
2281 | * Return value from this function becomes errno to userland caller. | |
2282 | */ | |
2283 | return error; | |
2284 | } | |
2285 | ||
2286 | /* | |
2287 | * Return the current amount of CPU consumed by this thread (in either user or kernel mode) | |
2288 | */ | |
2289 | int | |
2290 | thread_selfusage(struct proc *p __unused, struct thread_selfusage_args *uap __unused, uint64_t *retval) | |
2291 | { | |
2292 | uint64_t runtime; | |
2293 | ||
2294 | runtime = thread_get_runtime_self(); | |
2295 | *retval = runtime; | |
2296 | ||
2297 | return 0; | |
2298 | } | |
2299 | ||
2300 | #if !MONOTONIC | |
2301 | int | |
2302 | thread_selfcounts(__unused struct proc *p, __unused struct thread_selfcounts_args *uap, __unused int *ret_out) | |
2303 | { | |
2304 | return ENOTSUP; | |
2305 | } | |
2306 | #endif /* !MONOTONIC */ |