]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
82210871838380b34ed66443cba68923d9a1e0c9
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 /*-
25 * Copyright (c) 1982, 1986, 1989, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * This code is derived from software contributed to Berkeley by
29 * Mike Karels at Berkeley Software Design, Inc.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
60 */
61
62 /*
63 * sysctl system call.
64 */
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/proc_internal.h>
71 #include <sys/kauth.h>
72 #include <sys/file_internal.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/unistd.h>
75 #include <sys/buf.h>
76 #include <sys/ioctl.h>
77 #include <sys/namei.h>
78 #include <sys/tty.h>
79 #include <sys/disklabel.h>
80 #include <sys/vm.h>
81 #include <sys/sysctl.h>
82 #include <sys/user.h>
83 #include <sys/aio_kern.h>
84
85 #include <bsm/audit_kernel.h>
86
87 #include <mach/machine.h>
88 #include <mach/mach_types.h>
89 #include <mach/vm_param.h>
90 #include <kern/task.h>
91 #include <kern/lock.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_map.h>
94 #include <mach/host_info.h>
95
96 extern vm_map_t bsd_pageable_map;
97
98 #include <sys/mount_internal.h>
99 #include <sys/kdebug.h>
100 #include <sys/sysproto.h>
101
102 #include <IOKit/IOPlatformExpert.h>
103 #include <pexpert/pexpert.h>
104
105 #include <machine/machine_routines.h>
106
107 #include <vm/vm_protos.h>
108
109 sysctlfn kern_sysctl;
110 #ifdef DEBUG
111 sysctlfn debug_sysctl;
112 #endif
113 extern sysctlfn vm_sysctl;
114 extern sysctlfn vfs_sysctl;
115 extern sysctlfn net_sysctl;
116 extern sysctlfn cpu_sysctl;
117 extern int aio_max_requests;
118 extern int aio_max_requests_per_process;
119 extern int aio_worker_threads;
120 extern int maxprocperuid;
121 extern int maxfilesperproc;
122 extern int lowpri_IO_window_msecs;
123 extern int lowpri_IO_delay_msecs;
124
125 static void
126 fill_eproc(struct proc *p, struct eproc *ep);
127 static void
128 fill_externproc(struct proc *p, struct extern_proc *exp);
129 static void
130 fill_user_eproc(struct proc *p, struct user_eproc *ep);
131 static void
132 fill_user_proc(struct proc *p, struct user_kinfo_proc *kp);
133 static void
134 fill_user_externproc(struct proc *p, struct user_extern_proc *exp);
135 extern int
136 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
137 int
138 kdebug_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, struct proc *p);
139 #if NFSCLIENT
140 extern int
141 netboot_root(void);
142 #endif
143 int
144 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
145 struct proc *p);
146 __private_extern__ kern_return_t
147 reset_vmobjectcache(unsigned int val1, unsigned int val2);
148 extern int
149 resize_namecache(u_int newsize);
150 static int
151 sysctl_aiomax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
152 static int
153 sysctl_aioprocmax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
154 static int
155 sysctl_aiothreads(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
156 extern int
157 sysctl_clockrate(user_addr_t where, size_t *sizep);
158 int
159 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep);
160 int
161 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
162 user_addr_t newp, size_t newlen);
163 int
164 sysctl_file(user_addr_t where, size_t *sizep);
165 static void
166 fill_proc(struct proc *p, struct kinfo_proc *kp);
167 static int
168 sysctl_maxfilesperproc(user_addr_t oldp, size_t *oldlenp,
169 user_addr_t newp, size_t newlen);
170 static int
171 sysctl_maxprocperuid(user_addr_t oldp, size_t *oldlenp,
172 user_addr_t newp, size_t newlen);
173 static int
174 sysctl_maxproc(user_addr_t oldp, size_t *oldlenp,
175 user_addr_t newp, size_t newlen);
176 int
177 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
178 size_t *sizep, struct proc *cur_proc);
179 static int
180 sysctl_procargs2(int *name, u_int namelen, user_addr_t where, size_t *sizep,
181 struct proc *cur_proc);
182 static int
183 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
184 struct proc *cur_proc, int argc_yes);
185 int
186 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
187 size_t newlen, void *sp, int len);
188 extern int
189 sysctl_vnode(user_addr_t where, size_t *sizep);
190
191
192 /*
193 * temporary location for vm_sysctl. This should be machine independant
194 */
195
196 extern uint32_t mach_factor[3];
197
198 static void
199 loadavg32to64(struct loadavg *la32, struct user_loadavg *la64)
200 {
201 la64->ldavg[0] = la32->ldavg[0];
202 la64->ldavg[1] = la32->ldavg[1];
203 la64->ldavg[2] = la32->ldavg[2];
204 la64->fscale = (user_long_t)la32->fscale;
205 }
206
207 int
208 vm_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp,
209 user_addr_t newp, size_t newlen, __unused struct proc *p)
210 {
211 struct loadavg loadinfo;
212
213 switch (name[0]) {
214 case VM_LOADAVG:
215 if (proc_is64bit(p)) {
216 struct user_loadavg loadinfo64;
217 loadavg32to64(&averunnable, &loadinfo64);
218 return (sysctl_struct(oldp, oldlenp, newp, newlen,
219 &loadinfo64, sizeof(loadinfo64)));
220 } else {
221 return (sysctl_struct(oldp, oldlenp, newp, newlen,
222 &averunnable, sizeof(struct loadavg)));
223 }
224 case VM_MACHFACTOR:
225 loadinfo.ldavg[0] = mach_factor[0];
226 loadinfo.ldavg[1] = mach_factor[1];
227 loadinfo.ldavg[2] = mach_factor[2];
228 loadinfo.fscale = LSCALE;
229 if (proc_is64bit(p)) {
230 struct user_loadavg loadinfo64;
231 loadavg32to64(&loadinfo, &loadinfo64);
232 return (sysctl_struct(oldp, oldlenp, newp, newlen,
233 &loadinfo64, sizeof(loadinfo64)));
234 } else {
235 return (sysctl_struct(oldp, oldlenp, newp, newlen,
236 &loadinfo, sizeof(struct loadavg)));
237 }
238 case VM_SWAPUSAGE: {
239 int error;
240 uint64_t swap_total;
241 uint64_t swap_avail;
242 uint32_t swap_pagesize;
243 boolean_t swap_encrypted;
244 struct xsw_usage xsu;
245
246 error = macx_swapinfo(&swap_total,
247 &swap_avail,
248 &swap_pagesize,
249 &swap_encrypted);
250 if (error)
251 return error;
252
253 xsu.xsu_total = swap_total;
254 xsu.xsu_avail = swap_avail;
255 xsu.xsu_used = swap_total - swap_avail;
256 xsu.xsu_pagesize = swap_pagesize;
257 xsu.xsu_encrypted = swap_encrypted;
258 return sysctl_struct(oldp, oldlenp, newp, newlen,
259 &xsu, sizeof (struct xsw_usage));
260 }
261 case VM_METER:
262 return (ENOTSUP);
263 case VM_MAXID:
264 return (ENOTSUP);
265 default:
266 return (ENOTSUP);
267 }
268 /* NOTREACHED */
269 return (ENOTSUP);
270 }
271
272 /*
273 * Locking and stats
274 */
275 static struct sysctl_lock {
276 int sl_lock;
277 int sl_want;
278 int sl_locked;
279 } memlock;
280
281 int
282 __sysctl(struct proc *p, struct __sysctl_args *uap, __unused register_t *retval)
283 {
284 int error, dolock = 1;
285 size_t savelen = 0, oldlen = 0, newlen;
286 sysctlfn *fnp = NULL;
287 int name[CTL_MAXNAME];
288 int i;
289 int error1;
290
291 /*
292 * all top-level sysctl names are non-terminal
293 */
294 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
295 return (EINVAL);
296 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
297 if (error)
298 return (error);
299
300 AUDIT_ARG(ctlname, name, uap->namelen);
301
302 if (proc_is64bit(p)) {
303 /* uap->newlen is a size_t value which grows to 64 bits
304 * when coming from a 64-bit process. since it's doubtful we'll
305 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
306 */
307 newlen = CAST_DOWN(size_t, uap->newlen);
308 }
309 else {
310 newlen = uap->newlen;
311 }
312
313 /* CTL_UNSPEC is used to get oid to AUTO_OID */
314 if (uap->new != USER_ADDR_NULL
315 && ((name[0] == CTL_KERN
316 && !(name[1] == KERN_IPC || name[1] == KERN_PANICINFO || name[1] == KERN_PROCDELAYTERM ||
317 name[1] == KERN_PROC_LOW_PRI_IO))
318 || (name[0] == CTL_HW)
319 || (name[0] == CTL_VM)
320 || (name[0] == CTL_VFS))
321 && (error = suser(kauth_cred_get(), &p->p_acflag)))
322 return (error);
323
324 switch (name[0]) {
325 case CTL_KERN:
326 fnp = kern_sysctl;
327 if ((name[1] != KERN_VNODE) && (name[1] != KERN_FILE)
328 && (name[1] != KERN_PROC))
329 dolock = 0;
330 break;
331 case CTL_VM:
332 fnp = vm_sysctl;
333 break;
334
335 case CTL_VFS:
336 fnp = vfs_sysctl;
337 break;
338 #ifdef DEBUG
339 case CTL_DEBUG:
340 fnp = debug_sysctl;
341 break;
342 #endif
343 default:
344 fnp = NULL;
345 }
346
347 if (uap->oldlenp != USER_ADDR_NULL) {
348 uint64_t oldlen64 = fuulong(uap->oldlenp);
349
350 oldlen = CAST_DOWN(size_t, oldlen64);
351 /*
352 * If more than 4G, clamp to 4G - useracc() below will catch
353 * with an EFAULT, if it's actually necessary.
354 */
355 if (oldlen64 > 0x00000000ffffffffULL)
356 oldlen = 0xffffffffUL;
357 }
358
359 if (uap->old != USER_ADDR_NULL) {
360 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE))
361 return (EFAULT);
362
363 /* The pc sampling mechanism does not need to take this lock */
364 if ((name[1] != KERN_PCSAMPLES) &&
365 (!((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)))) {
366 while (memlock.sl_lock) {
367 memlock.sl_want = 1;
368 sleep((caddr_t)&memlock, PRIBIO+1);
369 memlock.sl_locked++;
370 }
371 memlock.sl_lock = 1;
372 }
373
374 if (dolock && oldlen &&
375 (error = vslock(uap->old, (user_size_t)oldlen))) {
376 if ((name[1] != KERN_PCSAMPLES) &&
377 (! ((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)))) {
378 memlock.sl_lock = 0;
379 if (memlock.sl_want) {
380 memlock.sl_want = 0;
381 wakeup((caddr_t)&memlock);
382 }
383 }
384 return(error);
385 }
386 savelen = oldlen;
387 }
388
389 if (fnp) {
390 error = (*fnp)(name + 1, uap->namelen - 1, uap->old,
391 &oldlen, uap->new, newlen, p);
392 }
393 else
394 error = ENOTSUP;
395
396 if ( (name[0] != CTL_VFS) && (error == ENOTSUP)) {
397 size_t tmp = oldlen;
398 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
399 1, uap->new, newlen, &oldlen);
400 }
401
402 if (uap->old != USER_ADDR_NULL) {
403 if (dolock && savelen) {
404 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
405 if (!error && error1)
406 error = error1;
407 }
408 if (name[1] != KERN_PCSAMPLES) {
409 memlock.sl_lock = 0;
410 if (memlock.sl_want) {
411 memlock.sl_want = 0;
412 wakeup((caddr_t)&memlock);
413 }
414 }
415 }
416 if ((error) && (error != ENOMEM))
417 return (error);
418
419 if (uap->oldlenp != USER_ADDR_NULL) {
420 i = suulong(uap->oldlenp, oldlen);
421 if (i)
422 return i;
423 }
424
425 return (error);
426 }
427
428 /*
429 * Attributes stored in the kernel.
430 */
431 extern char classichandler[32];
432 extern uint32_t classichandler_fsid;
433 extern long classichandler_fileid;
434 __private_extern__ char corefilename[MAXPATHLEN+1];
435 __private_extern__ int do_coredump;
436 __private_extern__ int sugid_coredump;
437
438
439 #ifdef INSECURE
440 int securelevel = -1;
441 #else
442 int securelevel;
443 #endif
444
445 static int
446 sysctl_affinity(
447 int *name,
448 u_int namelen,
449 user_addr_t oldBuf,
450 size_t *oldSize,
451 user_addr_t newBuf,
452 __unused size_t newSize,
453 struct proc *cur_proc)
454 {
455 if (namelen < 1)
456 return (ENOTSUP);
457
458 if (name[0] == 0 && 1 == namelen) {
459 return sysctl_rdint(oldBuf, oldSize, newBuf,
460 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
461 } else if (name[0] == 1 && 2 == namelen) {
462 if (name[1] == 0) {
463 cur_proc->p_flag &= ~P_AFFINITY;
464 } else {
465 cur_proc->p_flag |= P_AFFINITY;
466 }
467 return 0;
468 }
469 return (ENOTSUP);
470 }
471
472 static int
473 sysctl_classic(
474 int *name,
475 u_int namelen,
476 user_addr_t oldBuf,
477 size_t *oldSize,
478 user_addr_t newBuf,
479 __unused size_t newSize,
480 struct proc *cur_proc)
481 {
482 struct proc *p;
483
484 if (namelen != 1)
485 return (ENOTSUP);
486
487 p = pfind(name[0]);
488 if (p == NULL)
489 return (EINVAL);
490
491 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
492 && suser(kauth_cred_get(), &cur_proc->p_acflag))
493 return (EPERM);
494
495 return sysctl_rdint(oldBuf, oldSize, newBuf,
496 (p->p_flag & P_CLASSIC) ? 1 : 0);
497 }
498
499 static int
500 sysctl_classichandler(
501 __unused int *name,
502 __unused u_int namelen,
503 user_addr_t oldBuf,
504 size_t *oldSize,
505 user_addr_t newBuf,
506 size_t newSize,
507 struct proc *p)
508 {
509 int error;
510 size_t len;
511 struct nameidata nd;
512 struct vnode_attr va;
513 char handler[sizeof(classichandler)];
514 struct vfs_context context;
515
516 context.vc_proc = p;
517 context.vc_ucred = kauth_cred_get();
518
519 if (oldSize) {
520 len = strlen(classichandler) + 1;
521 if (oldBuf) {
522 if (*oldSize < len)
523 return (ENOMEM);
524 error = copyout(classichandler, oldBuf, len);
525 if (error)
526 return (error);
527 }
528 *oldSize = len - 1;
529 }
530 if (newBuf) {
531 error = suser(context.vc_ucred, &p->p_acflag);
532 if (error)
533 return (error);
534 if (newSize >= sizeof(classichandler))
535 return (ENAMETOOLONG);
536 error = copyin(newBuf, handler, newSize);
537 if (error)
538 return (error);
539 handler[newSize] = 0;
540
541 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32,
542 CAST_USER_ADDR_T(handler), &context);
543 error = namei(&nd);
544 if (error)
545 return (error);
546 nameidone(&nd);
547
548 /* Check mount point */
549 if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) ||
550 (nd.ni_vp->v_type != VREG)) {
551 vnode_put(nd.ni_vp);
552 return (EACCES);
553 }
554
555 VATTR_INIT(&va);
556 VATTR_WANTED(&va, va_fsid);
557 VATTR_WANTED(&va, va_fileid);
558 error = vnode_getattr(nd.ni_vp, &va, &context);
559 if (error) {
560 vnode_put(nd.ni_vp);
561 return (error);
562 }
563 vnode_put(nd.ni_vp);
564
565 classichandler_fsid = va.va_fsid;
566 classichandler_fileid = (u_long)va.va_fileid;
567 strcpy(classichandler, handler);
568 }
569 return 0;
570 }
571
572
573 extern int get_kernel_symfile( struct proc *, char **);
574 __private_extern__ int
575 sysctl_dopanicinfo(int *, u_int, user_addr_t, size_t *, user_addr_t,
576 size_t, struct proc *);
577
578 /*
579 * kernel related system variables.
580 */
581 int
582 kern_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
583 user_addr_t newp, size_t newlen, struct proc *p)
584 {
585 int error, level, inthostid, tmp;
586 unsigned int oldval=0;
587 char *str;
588 /* all sysctl names not listed below are terminal at this level */
589 if (namelen != 1
590 && !(name[0] == KERN_PROC
591 || name[0] == KERN_PROF
592 || name[0] == KERN_KDEBUG
593 || name[0] == KERN_PROCARGS
594 || name[0] == KERN_PROCARGS2
595 || name[0] == KERN_PCSAMPLES
596 || name[0] == KERN_IPC
597 || name[0] == KERN_SYSV
598 || name[0] == KERN_AFFINITY
599 || name[0] == KERN_CLASSIC
600 || name[0] == KERN_PANICINFO
601 || name[0] == KERN_POSIX)
602 )
603 return (ENOTDIR); /* overloaded */
604
605 switch (name[0]) {
606 case KERN_OSTYPE:
607 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
608 case KERN_OSRELEASE:
609 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
610 case KERN_OSREV:
611 return (sysctl_rdint(oldp, oldlenp, newp, BSD));
612 case KERN_VERSION:
613 return (sysctl_rdstring(oldp, oldlenp, newp, version));
614 case KERN_MAXVNODES:
615 oldval = desiredvnodes;
616 error = sysctl_int(oldp, oldlenp, newp,
617 newlen, &desiredvnodes);
618 reset_vmobjectcache(oldval, desiredvnodes);
619 resize_namecache(desiredvnodes);
620 return(error);
621 case KERN_MAXPROC:
622 return (sysctl_maxproc(oldp, oldlenp, newp, newlen));
623 case KERN_MAXFILES:
624 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
625 case KERN_MAXPROCPERUID:
626 return( sysctl_maxprocperuid( oldp, oldlenp, newp, newlen ) );
627 case KERN_MAXFILESPERPROC:
628 return( sysctl_maxfilesperproc( oldp, oldlenp, newp, newlen ) );
629 case KERN_ARGMAX:
630 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
631 case KERN_SECURELVL:
632 level = securelevel;
633 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
634 newp == USER_ADDR_NULL)
635 return (error);
636 if (level < securelevel && p->p_pid != 1)
637 return (EPERM);
638 securelevel = level;
639 return (0);
640 case KERN_HOSTNAME:
641 error = sysctl_trstring(oldp, oldlenp, newp, newlen,
642 hostname, sizeof(hostname));
643 if (newp && !error)
644 hostnamelen = newlen;
645 return (error);
646 case KERN_DOMAINNAME:
647 error = sysctl_string(oldp, oldlenp, newp, newlen,
648 domainname, sizeof(domainname));
649 if (newp && !error)
650 domainnamelen = newlen;
651 return (error);
652 case KERN_HOSTID:
653 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
654 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
655 hostid = inthostid;
656 return (error);
657 case KERN_CLOCKRATE:
658 return (sysctl_clockrate(oldp, oldlenp));
659 case KERN_BOOTTIME:
660 {
661 struct timeval t;
662
663 t.tv_sec = boottime_sec();
664 t.tv_usec = 0;
665
666 return (sysctl_rdstruct(oldp, oldlenp, newp, &t,
667 sizeof(struct timeval)));
668 }
669 case KERN_VNODE:
670 return (sysctl_vnode(oldp, oldlenp));
671 case KERN_PROC:
672 return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp));
673 case KERN_FILE:
674 return (sysctl_file(oldp, oldlenp));
675 #ifdef GPROF
676 case KERN_PROF:
677 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
678 newp, newlen));
679 #endif
680 case KERN_POSIX1:
681 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
682 case KERN_NGROUPS:
683 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
684 case KERN_JOB_CONTROL:
685 return (sysctl_rdint(oldp, oldlenp, newp, 1));
686 case KERN_SAVED_IDS:
687 #ifdef _POSIX_SAVED_IDS
688 return (sysctl_rdint(oldp, oldlenp, newp, 1));
689 #else
690 return (sysctl_rdint(oldp, oldlenp, newp, 0));
691 #endif
692 case KERN_KDEBUG:
693 return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p));
694 case KERN_PCSAMPLES:
695 return (pcsamples_ops(name + 1, namelen - 1, oldp, oldlenp, p));
696 case KERN_PROCARGS:
697 /* new one as it does not use kinfo_proc */
698 return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p));
699 case KERN_PROCARGS2:
700 /* new one as it does not use kinfo_proc */
701 return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p));
702 case KERN_SYMFILE:
703 error = get_kernel_symfile( p, &str );
704 if ( error )
705 return error;
706 return (sysctl_rdstring(oldp, oldlenp, newp, str));
707 #if NFSCLIENT
708 case KERN_NETBOOT:
709 return (sysctl_rdint(oldp, oldlenp, newp, netboot_root()));
710 #endif
711 case KERN_PANICINFO:
712 return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp,
713 newp, newlen, p));
714 case KERN_AFFINITY:
715 return sysctl_affinity(name+1, namelen-1, oldp, oldlenp,
716 newp, newlen, p);
717 case KERN_CLASSIC:
718 return sysctl_classic(name+1, namelen-1, oldp, oldlenp,
719 newp, newlen, p);
720 case KERN_CLASSICHANDLER:
721 return sysctl_classichandler(name+1, namelen-1, oldp, oldlenp,
722 newp, newlen, p);
723 case KERN_AIOMAX:
724 return( sysctl_aiomax( oldp, oldlenp, newp, newlen ) );
725 case KERN_AIOPROCMAX:
726 return( sysctl_aioprocmax( oldp, oldlenp, newp, newlen ) );
727 case KERN_AIOTHREADS:
728 return( sysctl_aiothreads( oldp, oldlenp, newp, newlen ) );
729 case KERN_USRSTACK:
730 return (sysctl_rdint(oldp, oldlenp, newp, (uintptr_t)p->user_stack));
731 case KERN_USRSTACK64:
732 return (sysctl_rdquad(oldp, oldlenp, newp, p->user_stack));
733 case KERN_COREFILE:
734 error = sysctl_string(oldp, oldlenp, newp, newlen,
735 corefilename, sizeof(corefilename));
736 return (error);
737 case KERN_COREDUMP:
738 tmp = do_coredump;
739 error = sysctl_int(oldp, oldlenp, newp, newlen, &do_coredump);
740 if (!error && ((do_coredump < 0) || (do_coredump > 1))) {
741 do_coredump = tmp;
742 error = EINVAL;
743 }
744 return (error);
745 case KERN_SUGID_COREDUMP:
746 tmp = sugid_coredump;
747 error = sysctl_int(oldp, oldlenp, newp, newlen, &sugid_coredump);
748 if (!error && ((sugid_coredump < 0) || (sugid_coredump > 1))) {
749 sugid_coredump = tmp;
750 error = EINVAL;
751 }
752 return (error);
753 case KERN_PROCDELAYTERM:
754 {
755 int old_value, new_value;
756
757 error = 0;
758 if (oldp && *oldlenp < sizeof(int))
759 return (ENOMEM);
760 if ( newp && newlen != sizeof(int) )
761 return(EINVAL);
762 *oldlenp = sizeof(int);
763 old_value = (p->p_lflag & P_LDELAYTERM)? 1: 0;
764 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
765 return(error);
766 if (error == 0 && newp )
767 error = copyin( newp, &new_value, sizeof(int) );
768 if (error == 0 && newp) {
769 if (new_value)
770 p->p_lflag |= P_LDELAYTERM;
771 else
772 p->p_lflag &= ~P_LDELAYTERM;
773 }
774 return(error);
775 }
776 case KERN_PROC_LOW_PRI_IO:
777 {
778 int old_value, new_value;
779
780 error = 0;
781 if (oldp && *oldlenp < sizeof(int))
782 return (ENOMEM);
783 if ( newp && newlen != sizeof(int) )
784 return(EINVAL);
785 *oldlenp = sizeof(int);
786
787 old_value = (p->p_lflag & P_LLOW_PRI_IO)? 0x01: 0;
788 if (p->p_lflag & P_LBACKGROUND_IO)
789 old_value |= 0x02;
790
791 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
792 return(error);
793 if (error == 0 && newp )
794 error = copyin( newp, &new_value, sizeof(int) );
795 if (error == 0 && newp) {
796 if (new_value & 0x01)
797 p->p_lflag |= P_LLOW_PRI_IO;
798 else if (new_value & 0x02)
799 p->p_lflag |= P_LBACKGROUND_IO;
800 else if (new_value == 0)
801 p->p_lflag &= ~(P_LLOW_PRI_IO | P_LBACKGROUND_IO);
802 }
803 return(error);
804 }
805 case KERN_LOW_PRI_WINDOW:
806 {
807 int old_value, new_value;
808
809 error = 0;
810 if (oldp && *oldlenp < sizeof(old_value) )
811 return (ENOMEM);
812 if ( newp && newlen != sizeof(new_value) )
813 return(EINVAL);
814 *oldlenp = sizeof(old_value);
815
816 old_value = lowpri_IO_window_msecs;
817
818 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
819 return(error);
820 if (error == 0 && newp )
821 error = copyin( newp, &new_value, sizeof(newlen) );
822 if (error == 0 && newp) {
823 lowpri_IO_window_msecs = new_value;
824 }
825 return(error);
826 }
827 case KERN_LOW_PRI_DELAY:
828 {
829 int old_value, new_value;
830
831 error = 0;
832 if (oldp && *oldlenp < sizeof(old_value) )
833 return (ENOMEM);
834 if ( newp && newlen != sizeof(new_value) )
835 return(EINVAL);
836 *oldlenp = sizeof(old_value);
837
838 old_value = lowpri_IO_delay_msecs;
839
840 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
841 return(error);
842 if (error == 0 && newp )
843 error = copyin( newp, &new_value, sizeof(newlen) );
844 if (error == 0 && newp) {
845 lowpri_IO_delay_msecs = new_value;
846 }
847 return(error);
848 }
849 case KERN_SHREG_PRIVATIZABLE:
850 /* this kernel does implement shared_region_make_private_np() */
851 return (sysctl_rdint(oldp, oldlenp, newp, 1));
852 default:
853 return (ENOTSUP);
854 }
855 /* NOTREACHED */
856 }
857
858 #ifdef DEBUG
859 /*
860 * Debugging related system variables.
861 */
862 #if DIAGNOSTIC
863 extern
864 #endif /* DIAGNOSTIC */
865 struct ctldebug debug0, debug1;
866 struct ctldebug debug2, debug3, debug4;
867 struct ctldebug debug5, debug6, debug7, debug8, debug9;
868 struct ctldebug debug10, debug11, debug12, debug13, debug14;
869 struct ctldebug debug15, debug16, debug17, debug18, debug19;
870 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
871 &debug0, &debug1, &debug2, &debug3, &debug4,
872 &debug5, &debug6, &debug7, &debug8, &debug9,
873 &debug10, &debug11, &debug12, &debug13, &debug14,
874 &debug15, &debug16, &debug17, &debug18, &debug19,
875 };
876 int
877 debug_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
878 user_addr_t newp, size_t newlen, struct proc *p)
879 {
880 struct ctldebug *cdp;
881
882 /* all sysctl names at this level are name and field */
883 if (namelen != 2)
884 return (ENOTDIR); /* overloaded */
885 cdp = debugvars[name[0]];
886 if (cdp->debugname == 0)
887 return (ENOTSUP);
888 switch (name[1]) {
889 case CTL_DEBUG_NAME:
890 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
891 case CTL_DEBUG_VALUE:
892 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
893 default:
894 return (ENOTSUP);
895 }
896 /* NOTREACHED */
897 }
898 #endif /* DEBUG */
899
900 /*
901 * Validate parameters and get old / set new parameters
902 * for an integer-valued sysctl function.
903 */
904 int
905 sysctl_int(user_addr_t oldp, size_t *oldlenp,
906 user_addr_t newp, size_t newlen, int *valp)
907 {
908 int error = 0;
909
910 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
911 return (EFAULT);
912 if (oldp && *oldlenp < sizeof(int))
913 return (ENOMEM);
914 if (newp && newlen != sizeof(int))
915 return (EINVAL);
916 *oldlenp = sizeof(int);
917 if (oldp)
918 error = copyout(valp, oldp, sizeof(int));
919 if (error == 0 && newp) {
920 error = copyin(newp, valp, sizeof(int));
921 AUDIT_ARG(value, *valp);
922 }
923 return (error);
924 }
925
926 /*
927 * As above, but read-only.
928 */
929 int
930 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
931 {
932 int error = 0;
933
934 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
935 return (EFAULT);
936 if (oldp && *oldlenp < sizeof(int))
937 return (ENOMEM);
938 if (newp)
939 return (EPERM);
940 *oldlenp = sizeof(int);
941 if (oldp)
942 error = copyout((caddr_t)&val, oldp, sizeof(int));
943 return (error);
944 }
945
946 /*
947 * Validate parameters and get old / set new parameters
948 * for an quad(64bit)-valued sysctl function.
949 */
950 int
951 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
952 user_addr_t newp, size_t newlen, quad_t *valp)
953 {
954 int error = 0;
955
956 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
957 return (EFAULT);
958 if (oldp && *oldlenp < sizeof(quad_t))
959 return (ENOMEM);
960 if (newp && newlen != sizeof(quad_t))
961 return (EINVAL);
962 *oldlenp = sizeof(quad_t);
963 if (oldp)
964 error = copyout(valp, oldp, sizeof(quad_t));
965 if (error == 0 && newp)
966 error = copyin(newp, valp, sizeof(quad_t));
967 return (error);
968 }
969
970 /*
971 * As above, but read-only.
972 */
973 int
974 sysctl_rdquad(oldp, oldlenp, newp, val)
975 void *oldp;
976 size_t *oldlenp;
977 void *newp;
978 quad_t val;
979 {
980 int error = 0;
981
982 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
983 return (EFAULT);
984 if (oldp && *oldlenp < sizeof(quad_t))
985 return (ENOMEM);
986 if (newp)
987 return (EPERM);
988 *oldlenp = sizeof(quad_t);
989 if (oldp)
990 error = copyout((caddr_t)&val, CAST_USER_ADDR_T(oldp), sizeof(quad_t));
991 return (error);
992 }
993
994 /*
995 * Validate parameters and get old / set new parameters
996 * for a string-valued sysctl function. Unlike sysctl_string, if you
997 * give it a too small (but larger than 0 bytes) buffer, instead of
998 * returning ENOMEM, it truncates the returned string to the buffer
999 * size. This preserves the semantics of some library routines
1000 * implemented via sysctl, which truncate their returned data, rather
1001 * than simply returning an error. The returned string is always NUL
1002 * terminated.
1003 */
1004 int
1005 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
1006 user_addr_t newp, size_t newlen, char *str, int maxlen)
1007 {
1008 int len, copylen, error = 0;
1009
1010 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1011 return (EFAULT);
1012 copylen = len = strlen(str) + 1;
1013 if (oldp && (len < 0 || *oldlenp < 1))
1014 return (ENOMEM);
1015 if (oldp && (*oldlenp < (size_t)len))
1016 copylen = *oldlenp + 1;
1017 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1018 return (EINVAL);
1019 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
1020 if (oldp) {
1021 error = copyout(str, oldp, copylen);
1022 if (!error) {
1023 unsigned char c = 0;
1024 /* NUL terminate */
1025 oldp += *oldlenp;
1026 error = copyout((void *)&c, oldp, sizeof(char));
1027 }
1028 }
1029 if (error == 0 && newp) {
1030 error = copyin(newp, str, newlen);
1031 str[newlen] = 0;
1032 AUDIT_ARG(text, (char *)str);
1033 }
1034 return (error);
1035 }
1036
1037 /*
1038 * Validate parameters and get old / set new parameters
1039 * for a string-valued sysctl function.
1040 */
1041 int
1042 sysctl_string(user_addr_t oldp, size_t *oldlenp,
1043 user_addr_t newp, size_t newlen, char *str, int maxlen)
1044 {
1045 int len, error = 0;
1046
1047 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1048 return (EFAULT);
1049 len = strlen(str) + 1;
1050 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1051 return (ENOMEM);
1052 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1053 return (EINVAL);
1054 *oldlenp = len -1; /* deal with NULL strings correctly */
1055 if (oldp) {
1056 error = copyout(str, oldp, len);
1057 }
1058 if (error == 0 && newp) {
1059 error = copyin(newp, str, newlen);
1060 str[newlen] = 0;
1061 AUDIT_ARG(text, (char *)str);
1062 }
1063 return (error);
1064 }
1065
1066 /*
1067 * As above, but read-only.
1068 */
1069 int
1070 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
1071 user_addr_t newp, char *str)
1072 {
1073 int len, error = 0;
1074
1075 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1076 return (EFAULT);
1077 len = strlen(str) + 1;
1078 if (oldp && *oldlenp < (size_t)len)
1079 return (ENOMEM);
1080 if (newp)
1081 return (EPERM);
1082 *oldlenp = len;
1083 if (oldp)
1084 error = copyout(str, oldp, len);
1085 return (error);
1086 }
1087
1088 /*
1089 * Validate parameters and get old / set new parameters
1090 * for a structure oriented sysctl function.
1091 */
1092 int
1093 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
1094 user_addr_t newp, size_t newlen, void *sp, int len)
1095 {
1096 int error = 0;
1097
1098 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1099 return (EFAULT);
1100 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1101 return (ENOMEM);
1102 if (newp && (len < 0 || newlen > (size_t)len))
1103 return (EINVAL);
1104 if (oldp) {
1105 *oldlenp = len;
1106 error = copyout(sp, oldp, len);
1107 }
1108 if (error == 0 && newp)
1109 error = copyin(newp, sp, len);
1110 return (error);
1111 }
1112
1113 /*
1114 * Validate parameters and get old parameters
1115 * for a structure oriented sysctl function.
1116 */
1117 int
1118 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
1119 user_addr_t newp, void *sp, int len)
1120 {
1121 int error = 0;
1122
1123 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1124 return (EFAULT);
1125 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1126 return (ENOMEM);
1127 if (newp)
1128 return (EPERM);
1129 *oldlenp = len;
1130 if (oldp)
1131 error = copyout(sp, oldp, len);
1132 return (error);
1133 }
1134
1135 /*
1136 * Get file structures.
1137 */
1138 int
1139 sysctl_file(user_addr_t where, size_t *sizep)
1140 {
1141 int buflen, error;
1142 struct fileglob *fg;
1143 user_addr_t start = where;
1144 struct extern_file nef;
1145
1146 buflen = *sizep;
1147 if (where == USER_ADDR_NULL) {
1148 /*
1149 * overestimate by 10 files
1150 */
1151 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct extern_file);
1152 return (0);
1153 }
1154
1155 /*
1156 * first copyout filehead
1157 */
1158 if (buflen < 0 || (size_t)buflen < sizeof(filehead)) {
1159 *sizep = 0;
1160 return (0);
1161 }
1162 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1163 if (error)
1164 return (error);
1165 buflen -= sizeof(filehead);
1166 where += sizeof(filehead);
1167
1168 /*
1169 * followed by an array of file structures
1170 */
1171 for (fg = filehead.lh_first; fg != 0; fg = fg->f_list.le_next) {
1172 if (buflen < 0 || (size_t)buflen < sizeof(struct extern_file)) {
1173 *sizep = where - start;
1174 return (ENOMEM);
1175 }
1176 nef.f_list.le_next = (struct extern_file *)fg->f_list.le_next;
1177 nef.f_list.le_prev = (struct extern_file **)fg->f_list.le_prev;
1178 nef.f_flag = (fg->fg_flag & FMASK);
1179 nef.f_type = fg->fg_type;
1180 nef.f_count = fg->fg_count;
1181 nef.f_msgcount = fg->fg_msgcount;
1182 nef.f_cred = fg->fg_cred;
1183 nef.f_ops = fg->fg_ops;
1184 nef.f_offset = fg->fg_offset;
1185 nef.f_data = fg->fg_data;
1186 error = copyout((caddr_t)&nef, where, sizeof (struct extern_file));
1187 if (error)
1188 return (error);
1189 buflen -= sizeof(struct extern_file);
1190 where += sizeof(struct extern_file);
1191 }
1192 *sizep = where - start;
1193 return (0);
1194 }
1195
1196 /*
1197 * try over estimating by 5 procs
1198 */
1199 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1200
1201 int
1202 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1203 {
1204 struct proc *p;
1205 user_addr_t dp = where;
1206 size_t needed = 0;
1207 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1208 int doingzomb;
1209 int error = 0;
1210 boolean_t is_64_bit = FALSE;
1211 struct kinfo_proc kproc;
1212 struct user_kinfo_proc user_kproc;
1213 int sizeof_kproc;
1214 caddr_t kprocp;
1215
1216 if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
1217 return (EINVAL);
1218 p = allproc.lh_first;
1219 doingzomb = 0;
1220 is_64_bit = proc_is64bit(current_proc());
1221 if (is_64_bit) {
1222 sizeof_kproc = sizeof(user_kproc);
1223 kprocp = (caddr_t) &user_kproc;
1224 }
1225 else {
1226 sizeof_kproc = sizeof(kproc);
1227 kprocp = (caddr_t) &kproc;
1228 }
1229 again:
1230 for (; p != 0; p = p->p_list.le_next) {
1231 /*
1232 * Skip embryonic processes.
1233 */
1234 if (p->p_stat == SIDL)
1235 continue;
1236 /*
1237 * TODO - make more efficient (see notes below).
1238 * do by session.
1239 */
1240 switch (name[0]) {
1241
1242 case KERN_PROC_PID:
1243 /* could do this with just a lookup */
1244 if (p->p_pid != (pid_t)name[1])
1245 continue;
1246 break;
1247
1248 case KERN_PROC_PGRP:
1249 /* could do this by traversing pgrp */
1250 if (p->p_pgrp->pg_id != (pid_t)name[1])
1251 continue;
1252 break;
1253
1254 case KERN_PROC_TTY:
1255 if ((p->p_flag & P_CONTROLT) == 0 ||
1256 (p->p_session == NULL) ||
1257 p->p_session->s_ttyp == NULL ||
1258 p->p_session->s_ttyp->t_dev != (dev_t)name[1])
1259 continue;
1260 break;
1261
1262 case KERN_PROC_UID:
1263 if ((p->p_ucred == NULL) ||
1264 (kauth_cred_getuid(p->p_ucred) != (uid_t)name[1]))
1265 continue;
1266 break;
1267
1268 case KERN_PROC_RUID:
1269 if ((p->p_ucred == NULL) ||
1270 (p->p_ucred->cr_ruid != (uid_t)name[1]))
1271 continue;
1272 break;
1273 }
1274 if (buflen >= sizeof_kproc) {
1275 bzero(kprocp, sizeof_kproc);
1276 if (is_64_bit) {
1277 fill_user_proc(p, (struct user_kinfo_proc *) kprocp);
1278 }
1279 else {
1280 fill_proc(p, (struct kinfo_proc *) kprocp);
1281 }
1282 error = copyout(kprocp, dp, sizeof_kproc);
1283 if (error)
1284 return (error);
1285 dp += sizeof_kproc;
1286 buflen -= sizeof_kproc;
1287 }
1288 needed += sizeof_kproc;
1289 }
1290 if (doingzomb == 0) {
1291 p = zombproc.lh_first;
1292 doingzomb++;
1293 goto again;
1294 }
1295 if (where != USER_ADDR_NULL) {
1296 *sizep = dp - where;
1297 if (needed > *sizep)
1298 return (ENOMEM);
1299 } else {
1300 needed += KERN_PROCSLOP;
1301 *sizep = needed;
1302 }
1303 return (0);
1304 }
1305
1306 /*
1307 * Fill in an eproc structure for the specified process.
1308 */
1309 static void
1310 fill_eproc(p, ep)
1311 register struct proc *p;
1312 register struct eproc *ep;
1313 {
1314 register struct tty *tp;
1315
1316 ep->e_paddr = p;
1317 if (p->p_pgrp) {
1318 ep->e_sess = p->p_pgrp->pg_session;
1319 ep->e_pgid = p->p_pgrp->pg_id;
1320 ep->e_jobc = p->p_pgrp->pg_jobc;
1321 if (ep->e_sess && ep->e_sess->s_ttyvp)
1322 ep->e_flag = EPROC_CTTY;
1323 } else {
1324 ep->e_sess = (struct session *)0;
1325 ep->e_pgid = 0;
1326 ep->e_jobc = 0;
1327 }
1328 ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0;
1329 /* Pre-zero the fake historical pcred */
1330 bzero(&ep->e_pcred, sizeof(struct _pcred));
1331 if (p->p_ucred) {
1332 /* XXX not ref-counted */
1333
1334 /* A fake historical pcred */
1335 ep->e_pcred.p_ruid = p->p_ucred->cr_ruid;
1336 ep->e_pcred.p_svuid = p->p_ucred->cr_svuid;
1337 ep->e_pcred.p_rgid = p->p_ucred->cr_rgid;
1338 ep->e_pcred.p_svgid = p->p_ucred->cr_svgid;
1339
1340 /* A fake historical *kauth_cred_t */
1341 ep->e_ucred.cr_ref = p->p_ucred->cr_ref;
1342 ep->e_ucred.cr_uid = kauth_cred_getuid(p->p_ucred);
1343 ep->e_ucred.cr_ngroups = p->p_ucred->cr_ngroups;
1344 bcopy(p->p_ucred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1345
1346 }
1347 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1348 ep->e_vm.vm_tsize = 0;
1349 ep->e_vm.vm_dsize = 0;
1350 ep->e_vm.vm_ssize = 0;
1351 }
1352 ep->e_vm.vm_rssize = 0;
1353
1354 if ((p->p_flag & P_CONTROLT) && (ep->e_sess) &&
1355 (tp = ep->e_sess->s_ttyp)) {
1356 ep->e_tdev = tp->t_dev;
1357 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1358 ep->e_tsess = tp->t_session;
1359 } else
1360 ep->e_tdev = NODEV;
1361
1362 if (SESS_LEADER(p))
1363 ep->e_flag |= EPROC_SLEADER;
1364 if (p->p_wmesg)
1365 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1366 ep->e_xsize = ep->e_xrssize = 0;
1367 ep->e_xccount = ep->e_xswrss = 0;
1368 }
1369
1370 /*
1371 * Fill in an LP64 version of eproc structure for the specified process.
1372 */
1373 static void
1374 fill_user_eproc(register struct proc *p, register struct user_eproc *ep)
1375 {
1376 register struct tty *tp;
1377 struct session *sessionp = NULL;
1378
1379 ep->e_paddr = CAST_USER_ADDR_T(p);
1380 if (p->p_pgrp) {
1381 sessionp = p->p_pgrp->pg_session;
1382 ep->e_sess = CAST_USER_ADDR_T(sessionp);
1383 ep->e_pgid = p->p_pgrp->pg_id;
1384 ep->e_jobc = p->p_pgrp->pg_jobc;
1385 if (sessionp) {
1386 if (sessionp->s_ttyvp)
1387 ep->e_flag = EPROC_CTTY;
1388 }
1389 } else {
1390 ep->e_sess = USER_ADDR_NULL;
1391 ep->e_pgid = 0;
1392 ep->e_jobc = 0;
1393 }
1394 ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0;
1395 /* Pre-zero the fake historical pcred */
1396 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1397 if (p->p_ucred) {
1398 /* XXX not ref-counted */
1399
1400 /* A fake historical pcred */
1401 ep->e_pcred.p_ruid = p->p_ucred->cr_ruid;
1402 ep->e_pcred.p_svuid = p->p_ucred->cr_svuid;
1403 ep->e_pcred.p_rgid = p->p_ucred->cr_rgid;
1404 ep->e_pcred.p_svgid = p->p_ucred->cr_svgid;
1405
1406 /* A fake historical *kauth_cred_t */
1407 ep->e_ucred.cr_ref = p->p_ucred->cr_ref;
1408 ep->e_ucred.cr_uid = kauth_cred_getuid(p->p_ucred);
1409 ep->e_ucred.cr_ngroups = p->p_ucred->cr_ngroups;
1410 bcopy(p->p_ucred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1411
1412 }
1413 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1414 ep->e_vm.vm_tsize = 0;
1415 ep->e_vm.vm_dsize = 0;
1416 ep->e_vm.vm_ssize = 0;
1417 }
1418 ep->e_vm.vm_rssize = 0;
1419
1420 if ((p->p_flag & P_CONTROLT) && (sessionp) &&
1421 (tp = sessionp->s_ttyp)) {
1422 ep->e_tdev = tp->t_dev;
1423 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1424 ep->e_tsess = CAST_USER_ADDR_T(tp->t_session);
1425 } else
1426 ep->e_tdev = NODEV;
1427
1428 if (SESS_LEADER(p))
1429 ep->e_flag |= EPROC_SLEADER;
1430 if (p->p_wmesg)
1431 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1432 ep->e_xsize = ep->e_xrssize = 0;
1433 ep->e_xccount = ep->e_xswrss = 0;
1434 }
1435
1436 /*
1437 * Fill in an eproc structure for the specified process.
1438 */
1439 static void
1440 fill_externproc(p, exp)
1441 register struct proc *p;
1442 register struct extern_proc *exp;
1443 {
1444 exp->p_forw = exp->p_back = NULL;
1445 if (p->p_stats)
1446 exp->p_starttime = p->p_stats->p_start;
1447 exp->p_vmspace = NULL;
1448 exp->p_sigacts = p->p_sigacts;
1449 exp->p_flag = p->p_flag;
1450 exp->p_stat = p->p_stat ;
1451 exp->p_pid = p->p_pid ;
1452 exp->p_oppid = p->p_oppid ;
1453 exp->p_dupfd = p->p_dupfd ;
1454 /* Mach related */
1455 exp->user_stack = CAST_DOWN(caddr_t, p->user_stack);
1456 exp->exit_thread = p->exit_thread ;
1457 exp->p_debugger = p->p_debugger ;
1458 exp->sigwait = p->sigwait ;
1459 /* scheduling */
1460 exp->p_estcpu = p->p_estcpu ;
1461 exp->p_cpticks = p->p_cpticks ;
1462 exp->p_pctcpu = p->p_pctcpu ;
1463 exp->p_wchan = p->p_wchan ;
1464 exp->p_wmesg = p->p_wmesg ;
1465 exp->p_swtime = p->p_swtime ;
1466 exp->p_slptime = p->p_slptime ;
1467 bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval));
1468 bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval));
1469 exp->p_uticks = p->p_uticks ;
1470 exp->p_sticks = p->p_sticks ;
1471 exp->p_iticks = p->p_iticks ;
1472 exp->p_traceflag = p->p_traceflag ;
1473 exp->p_tracep = p->p_tracep ;
1474 exp->p_siglist = 0 ; /* No longer relevant */
1475 exp->p_textvp = p->p_textvp ;
1476 exp->p_holdcnt = 0 ;
1477 exp->p_sigmask = 0 ; /* no longer avaialable */
1478 exp->p_sigignore = p->p_sigignore ;
1479 exp->p_sigcatch = p->p_sigcatch ;
1480 exp->p_priority = p->p_priority ;
1481 exp->p_usrpri = p->p_usrpri ;
1482 exp->p_nice = p->p_nice ;
1483 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1484 exp->p_comm[MAXCOMLEN] = '\0';
1485 exp->p_pgrp = p->p_pgrp ;
1486 exp->p_addr = NULL;
1487 exp->p_xstat = p->p_xstat ;
1488 exp->p_acflag = p->p_acflag ;
1489 exp->p_ru = p->p_ru ; /* XXX may be NULL */
1490 }
1491
1492 /*
1493 * Fill in an LP64 version of extern_proc structure for the specified process.
1494 */
1495 static void
1496 fill_user_externproc(register struct proc *p, register struct user_extern_proc *exp)
1497 {
1498 exp->p_forw = exp->p_back = USER_ADDR_NULL;
1499 if (p->p_stats) {
1500 exp->p_starttime.tv_sec = p->p_stats->p_start.tv_sec;
1501 exp->p_starttime.tv_usec = p->p_stats->p_start.tv_usec;
1502 }
1503 exp->p_vmspace = USER_ADDR_NULL;
1504 exp->p_sigacts = CAST_USER_ADDR_T(p->p_sigacts);
1505 exp->p_flag = p->p_flag;
1506 exp->p_stat = p->p_stat ;
1507 exp->p_pid = p->p_pid ;
1508 exp->p_oppid = p->p_oppid ;
1509 exp->p_dupfd = p->p_dupfd ;
1510 /* Mach related */
1511 exp->user_stack = p->user_stack;
1512 exp->exit_thread = CAST_USER_ADDR_T(p->exit_thread);
1513 exp->p_debugger = p->p_debugger ;
1514 exp->sigwait = p->sigwait ;
1515 /* scheduling */
1516 exp->p_estcpu = p->p_estcpu ;
1517 exp->p_cpticks = p->p_cpticks ;
1518 exp->p_pctcpu = p->p_pctcpu ;
1519 exp->p_wchan = CAST_USER_ADDR_T(p->p_wchan);
1520 exp->p_wmesg = CAST_USER_ADDR_T(p->p_wmesg);
1521 exp->p_swtime = p->p_swtime ;
1522 exp->p_slptime = p->p_slptime ;
1523 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1524 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1525 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1526 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1527 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1528 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1529 exp->p_uticks = p->p_uticks ;
1530 exp->p_sticks = p->p_sticks ;
1531 exp->p_iticks = p->p_iticks ;
1532 exp->p_traceflag = p->p_traceflag ;
1533 exp->p_tracep = CAST_USER_ADDR_T(p->p_tracep);
1534 exp->p_siglist = 0 ; /* No longer relevant */
1535 exp->p_textvp = CAST_USER_ADDR_T(p->p_textvp);
1536 exp->p_holdcnt = 0 ;
1537 exp->p_sigmask = 0 ; /* no longer avaialable */
1538 exp->p_sigignore = p->p_sigignore ;
1539 exp->p_sigcatch = p->p_sigcatch ;
1540 exp->p_priority = p->p_priority ;
1541 exp->p_usrpri = p->p_usrpri ;
1542 exp->p_nice = p->p_nice ;
1543 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1544 exp->p_comm[MAXCOMLEN] = '\0';
1545 exp->p_pgrp = CAST_USER_ADDR_T(p->p_pgrp);
1546 exp->p_addr = USER_ADDR_NULL;
1547 exp->p_xstat = p->p_xstat ;
1548 exp->p_acflag = p->p_acflag ;
1549 exp->p_ru = CAST_USER_ADDR_T(p->p_ru); /* XXX may be NULL */
1550 }
1551
1552 static void
1553 fill_proc(p, kp)
1554 register struct proc *p;
1555 register struct kinfo_proc *kp;
1556 {
1557 fill_externproc(p, &kp->kp_proc);
1558 fill_eproc(p, &kp->kp_eproc);
1559 }
1560
1561 static void
1562 fill_user_proc(register struct proc *p, register struct user_kinfo_proc *kp)
1563 {
1564 fill_user_externproc(p, &kp->kp_proc);
1565 fill_user_eproc(p, &kp->kp_eproc);
1566 }
1567
1568 int
1569 kdebug_ops(int *name, u_int namelen, user_addr_t where,
1570 size_t *sizep, struct proc *p)
1571 {
1572 int ret=0;
1573
1574 ret = suser(kauth_cred_get(), &p->p_acflag);
1575 if (ret)
1576 return(ret);
1577
1578 switch(name[0]) {
1579 case KERN_KDEFLAGS:
1580 case KERN_KDDFLAGS:
1581 case KERN_KDENABLE:
1582 case KERN_KDGETBUF:
1583 case KERN_KDSETUP:
1584 case KERN_KDREMOVE:
1585 case KERN_KDSETREG:
1586 case KERN_KDGETREG:
1587 case KERN_KDREADTR:
1588 case KERN_KDPIDTR:
1589 case KERN_KDTHRMAP:
1590 case KERN_KDPIDEX:
1591 case KERN_KDSETRTCDEC:
1592 case KERN_KDSETBUF:
1593 case KERN_KDGETENTROPY:
1594 ret = kdbg_control(name, namelen, where, sizep);
1595 break;
1596 default:
1597 ret= ENOTSUP;
1598 break;
1599 }
1600 return(ret);
1601 }
1602
1603 extern int pcsamples_control(int *name, u_int namelen, user_addr_t where,
1604 size_t * sizep);
1605
1606 int
1607 pcsamples_ops(int *name, u_int namelen, user_addr_t where,
1608 size_t *sizep, struct proc *p)
1609 {
1610 int ret=0;
1611
1612 ret = suser(kauth_cred_get(), &p->p_acflag);
1613 if (ret)
1614 return(ret);
1615
1616 switch(name[0]) {
1617 case KERN_PCDISABLE:
1618 case KERN_PCGETBUF:
1619 case KERN_PCSETUP:
1620 case KERN_PCREMOVE:
1621 case KERN_PCREADBUF:
1622 case KERN_PCSETREG:
1623 case KERN_PCSETBUF:
1624 case KERN_PCCOMM:
1625 ret = pcsamples_control(name, namelen, where, sizep);
1626 break;
1627 default:
1628 ret= ENOTSUP;
1629 break;
1630 }
1631 return(ret);
1632 }
1633
1634 /*
1635 * Return the top *sizep bytes of the user stack, or the entire area of the
1636 * user stack down through the saved exec_path, whichever is smaller.
1637 */
1638 int
1639 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
1640 size_t *sizep, struct proc *cur_proc)
1641 {
1642 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0);
1643 }
1644
1645 static int
1646 sysctl_procargs2(int *name, u_int namelen, user_addr_t where,
1647 size_t *sizep, struct proc *cur_proc)
1648 {
1649 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1);
1650 }
1651
1652 static int
1653 sysctl_procargsx(int *name, __unused u_int namelen, user_addr_t where,
1654 size_t *sizep, struct proc *cur_proc, int argc_yes)
1655 {
1656 struct proc *p;
1657 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1658 int error = 0;
1659 struct vm_map *proc_map;
1660 struct task * task;
1661 vm_map_copy_t tmp;
1662 user_addr_t arg_addr;
1663 size_t arg_size;
1664 caddr_t data;
1665 int size;
1666 vm_offset_t copy_start, copy_end;
1667 kern_return_t ret;
1668 int pid;
1669
1670 if (argc_yes)
1671 buflen -= sizeof(int); /* reserve first word to return argc */
1672
1673 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1674 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1675 /* is not NULL then the caller wants us to return the length needed to */
1676 /* hold the data we would return */
1677 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1678 return(EINVAL);
1679 }
1680 arg_size = buflen;
1681
1682 /*
1683 * Lookup process by pid
1684 */
1685 pid = name[0];
1686 p = pfind(pid);
1687 if (p == NULL) {
1688 return(EINVAL);
1689 }
1690
1691 /*
1692 * Copy the top N bytes of the stack.
1693 * On all machines we have so far, the stack grows
1694 * downwards.
1695 *
1696 * If the user expects no more than N bytes of
1697 * argument list, use that as a guess for the
1698 * size.
1699 */
1700
1701 if (!p->user_stack)
1702 return(EINVAL);
1703
1704 if (where == USER_ADDR_NULL) {
1705 /* caller only wants to know length of proc args data */
1706 if (sizep == NULL)
1707 return(EFAULT);
1708
1709 size = p->p_argslen;
1710 if (argc_yes) {
1711 size += sizeof(int);
1712 }
1713 else {
1714 /*
1715 * old PROCARGS will return the executable's path and plus some
1716 * extra space for work alignment and data tags
1717 */
1718 size += PATH_MAX + (6 * sizeof(int));
1719 }
1720 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1721 *sizep = size;
1722 return (0);
1723 }
1724
1725 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
1726 && suser(kauth_cred_get(), &cur_proc->p_acflag))
1727 return (EINVAL);
1728
1729 if ((u_int)arg_size > p->p_argslen)
1730 arg_size = round_page(p->p_argslen);
1731
1732 arg_addr = p->user_stack - arg_size;
1733
1734
1735 /*
1736 * Before we can block (any VM code), make another
1737 * reference to the map to keep it alive. We do
1738 * that by getting a reference on the task itself.
1739 */
1740 task = p->task;
1741 if (task == NULL)
1742 return(EINVAL);
1743
1744 /*
1745 * Once we have a task reference we can convert that into a
1746 * map reference, which we will use in the calls below. The
1747 * task/process may change its map after we take this reference
1748 * (see execve), but the worst that will happen then is a return
1749 * of stale info (which is always a possibility).
1750 */
1751 task_reference(task);
1752 proc_map = get_task_map_reference(task);
1753 task_deallocate(task);
1754 if (proc_map == NULL)
1755 return(EINVAL);
1756
1757
1758 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1759 if (ret != KERN_SUCCESS) {
1760 vm_map_deallocate(proc_map);
1761 return(ENOMEM);
1762 }
1763
1764 copy_end = round_page(copy_start + arg_size);
1765
1766 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1767 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1768 vm_map_deallocate(proc_map);
1769 kmem_free(kernel_map, copy_start,
1770 round_page(arg_size));
1771 return (EIO);
1772 }
1773
1774 /*
1775 * Now that we've done the copyin from the process'
1776 * map, we can release the reference to it.
1777 */
1778 vm_map_deallocate(proc_map);
1779
1780 if( vm_map_copy_overwrite(kernel_map,
1781 (vm_map_address_t)copy_start,
1782 tmp, FALSE) != KERN_SUCCESS) {
1783 kmem_free(kernel_map, copy_start,
1784 round_page(arg_size));
1785 return (EIO);
1786 }
1787
1788 if (arg_size > p->p_argslen) {
1789 data = (caddr_t) (copy_end - p->p_argslen);
1790 size = p->p_argslen;
1791 } else {
1792 data = (caddr_t) (copy_end - arg_size);
1793 size = arg_size;
1794 }
1795
1796 if (argc_yes) {
1797 /* Put processes argc as the first word in the copyout buffer */
1798 suword(where, p->p_argc);
1799 error = copyout(data, (where + sizeof(int)), size);
1800 size += sizeof(int);
1801 } else {
1802 error = copyout(data, where, size);
1803
1804 /*
1805 * Make the old PROCARGS work to return the executable's path
1806 * But, only if there is enough space in the provided buffer
1807 *
1808 * on entry: data [possibily] points to the beginning of the path
1809 *
1810 * Note: we keep all pointers&sizes aligned to word boundries
1811 */
1812 if ( (! error) && (buflen > 0 && (u_int)buflen > p->p_argslen) )
1813 {
1814 int binPath_sz, alignedBinPath_sz = 0;
1815 int extraSpaceNeeded, addThis;
1816 user_addr_t placeHere;
1817 char * str = (char *) data;
1818 int max_len = size;
1819
1820 /* Some apps are really bad about messing up their stacks
1821 So, we have to be extra careful about getting the length
1822 of the executing binary. If we encounter an error, we bail.
1823 */
1824
1825 /* Limit ourselves to PATH_MAX paths */
1826 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1827
1828 binPath_sz = 0;
1829
1830 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1831 binPath_sz++;
1832
1833 /* If we have a NUL terminator, copy it, too */
1834 if (binPath_sz < max_len-1) binPath_sz += 1;
1835
1836 /* Pre-Flight the space requiremnts */
1837
1838 /* Account for the padding that fills out binPath to the next word */
1839 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1840
1841 placeHere = where + size;
1842
1843 /* Account for the bytes needed to keep placeHere word aligned */
1844 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1845
1846 /* Add up all the space that is needed */
1847 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1848
1849 /* is there is room to tack on argv[0]? */
1850 if ( (buflen & ~(sizeof(int)-1)) >= ( p->p_argslen + extraSpaceNeeded ))
1851 {
1852 placeHere += addThis;
1853 suword(placeHere, 0);
1854 placeHere += sizeof(int);
1855 suword(placeHere, 0xBFFF0000);
1856 placeHere += sizeof(int);
1857 suword(placeHere, 0);
1858 placeHere += sizeof(int);
1859 error = copyout(data, placeHere, binPath_sz);
1860 if ( ! error )
1861 {
1862 placeHere += binPath_sz;
1863 suword(placeHere, 0);
1864 size += extraSpaceNeeded;
1865 }
1866 }
1867 }
1868 }
1869
1870 if (copy_start != (vm_offset_t) 0) {
1871 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1872 }
1873 if (error) {
1874 return(error);
1875 }
1876
1877 if (where != USER_ADDR_NULL)
1878 *sizep = size;
1879 return (0);
1880 }
1881
1882
1883 /*
1884 * Validate parameters and get old / set new parameters
1885 * for max number of concurrent aio requests. Makes sure
1886 * the system wide limit is greater than the per process
1887 * limit.
1888 */
1889 static int
1890 sysctl_aiomax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen)
1891 {
1892 int error = 0;
1893 int new_value;
1894
1895 if ( oldp && *oldlenp < sizeof(int) )
1896 return (ENOMEM);
1897 if ( newp && newlen != sizeof(int) )
1898 return (EINVAL);
1899
1900 *oldlenp = sizeof(int);
1901 if ( oldp )
1902 error = copyout( &aio_max_requests, oldp, sizeof(int) );
1903 if ( error == 0 && newp )
1904 error = copyin( newp, &new_value, sizeof(int) );
1905 if ( error == 0 && newp ) {
1906 if ( new_value >= aio_max_requests_per_process )
1907 aio_max_requests = new_value;
1908 else
1909 error = EINVAL;
1910 }
1911 return( error );
1912
1913 } /* sysctl_aiomax */
1914
1915
1916 /*
1917 * Validate parameters and get old / set new parameters
1918 * for max number of concurrent aio requests per process.
1919 * Makes sure per process limit is less than the system wide
1920 * limit.
1921 */
1922 static int
1923 sysctl_aioprocmax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen )
1924 {
1925 int error = 0;
1926 int new_value = 0;
1927
1928 if ( oldp && *oldlenp < sizeof(int) )
1929 return (ENOMEM);
1930 if ( newp && newlen != sizeof(int) )
1931 return (EINVAL);
1932
1933 *oldlenp = sizeof(int);
1934 if ( oldp )
1935 error = copyout( &aio_max_requests_per_process, oldp, sizeof(int) );
1936 if ( error == 0 && newp )
1937 error = copyin( newp, &new_value, sizeof(int) );
1938 if ( error == 0 && newp ) {
1939 if ( new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX )
1940 aio_max_requests_per_process = new_value;
1941 else
1942 error = EINVAL;
1943 }
1944 return( error );
1945
1946 } /* sysctl_aioprocmax */
1947
1948
1949 /*
1950 * Validate parameters and get old / set new parameters
1951 * for max number of async IO worker threads.
1952 * We only allow an increase in the number of worker threads.
1953 */
1954 static int
1955 sysctl_aiothreads(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen)
1956 {
1957 int error = 0;
1958 int new_value;
1959
1960 if ( oldp && *oldlenp < sizeof(int) )
1961 return (ENOMEM);
1962 if ( newp && newlen != sizeof(int) )
1963 return (EINVAL);
1964
1965 *oldlenp = sizeof(int);
1966 if ( oldp )
1967 error = copyout( &aio_worker_threads, oldp, sizeof(int) );
1968 if ( error == 0 && newp )
1969 error = copyin( newp, &new_value, sizeof(int) );
1970 if ( error == 0 && newp ) {
1971 if (new_value > aio_worker_threads ) {
1972 _aio_create_worker_threads( (new_value - aio_worker_threads) );
1973 aio_worker_threads = new_value;
1974 }
1975 else
1976 error = EINVAL;
1977 }
1978 return( error );
1979
1980 } /* sysctl_aiothreads */
1981
1982
1983 /*
1984 * Validate parameters and get old / set new parameters
1985 * for max number of processes per UID.
1986 * Makes sure per UID limit is less than the system wide limit.
1987 */
1988 static int
1989 sysctl_maxprocperuid(user_addr_t oldp, size_t *oldlenp,
1990 user_addr_t newp, size_t newlen)
1991 {
1992 int error = 0;
1993 int new_value;
1994
1995 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
1996 return (ENOMEM);
1997 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
1998 return (EINVAL);
1999
2000 *oldlenp = sizeof(int);
2001 if ( oldp != USER_ADDR_NULL )
2002 error = copyout( &maxprocperuid, oldp, sizeof(int) );
2003 if ( error == 0 && newp != USER_ADDR_NULL ) {
2004 error = copyin( newp, &new_value, sizeof(int) );
2005 if ( error == 0 ) {
2006 AUDIT_ARG(value, new_value);
2007 if ( new_value <= maxproc && new_value > 0 )
2008 maxprocperuid = new_value;
2009 else
2010 error = EINVAL;
2011 }
2012 else
2013 error = EINVAL;
2014 }
2015 return( error );
2016
2017 } /* sysctl_maxprocperuid */
2018
2019
2020 /*
2021 * Validate parameters and get old / set new parameters
2022 * for max number of files per process.
2023 * Makes sure per process limit is less than the system-wide limit.
2024 */
2025 static int
2026 sysctl_maxfilesperproc(user_addr_t oldp, size_t *oldlenp,
2027 user_addr_t newp, size_t newlen)
2028 {
2029 int error = 0;
2030 int new_value;
2031
2032 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2033 return (ENOMEM);
2034 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2035 return (EINVAL);
2036
2037 *oldlenp = sizeof(int);
2038 if ( oldp != USER_ADDR_NULL )
2039 error = copyout( &maxfilesperproc, oldp, sizeof(int) );
2040 if ( error == 0 && newp != USER_ADDR_NULL ) {
2041 error = copyin( newp, &new_value, sizeof(int) );
2042 if ( error == 0 ) {
2043 AUDIT_ARG(value, new_value);
2044 if ( new_value < maxfiles && new_value > 0 )
2045 maxfilesperproc = new_value;
2046 else
2047 error = EINVAL;
2048 }
2049 else
2050 error = EINVAL;
2051 }
2052 return( error );
2053
2054 } /* sysctl_maxfilesperproc */
2055
2056
2057 /*
2058 * Validate parameters and get old / set new parameters
2059 * for the system-wide limit on the max number of processes.
2060 * Makes sure the system-wide limit is less than the configured hard
2061 * limit set at kernel compilation.
2062 */
2063 static int
2064 sysctl_maxproc(user_addr_t oldp, size_t *oldlenp,
2065 user_addr_t newp, size_t newlen )
2066 {
2067 int error = 0;
2068 int new_value;
2069
2070 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2071 return (ENOMEM);
2072 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2073 return (EINVAL);
2074
2075 *oldlenp = sizeof(int);
2076 if ( oldp != USER_ADDR_NULL )
2077 error = copyout( &maxproc, oldp, sizeof(int) );
2078 if ( error == 0 && newp != USER_ADDR_NULL ) {
2079 error = copyin( newp, &new_value, sizeof(int) );
2080 if ( error == 0 ) {
2081 AUDIT_ARG(value, new_value);
2082 if ( new_value <= hard_maxproc && new_value > 0 )
2083 maxproc = new_value;
2084 else
2085 error = EINVAL;
2086 }
2087 else
2088 error = EINVAL;
2089 }
2090 return( error );
2091
2092 } /* sysctl_maxproc */