]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
b441586ec06a135af9d5879180939861a140471d
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 /*-
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * This code is derived from software contributed to Berkeley by
36 * Mike Karels at Berkeley Software Design, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 */
68
69 /*
70 * sysctl system call.
71 */
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/malloc.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/file_internal.h>
80 #include <sys/vnode_internal.h>
81 #include <sys/unistd.h>
82 #include <sys/buf.h>
83 #include <sys/ioctl.h>
84 #include <sys/namei.h>
85 #include <sys/tty.h>
86 #include <sys/disklabel.h>
87 #include <sys/vm.h>
88 #include <sys/sysctl.h>
89 #include <sys/user.h>
90 #include <sys/aio_kern.h>
91
92 #include <bsm/audit_kernel.h>
93
94 #include <mach/machine.h>
95 #include <mach/mach_types.h>
96 #include <mach/vm_param.h>
97 #include <kern/task.h>
98 #include <kern/lock.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_map.h>
101 #include <mach/host_info.h>
102
103 extern vm_map_t bsd_pageable_map;
104
105 #include <sys/mount_internal.h>
106 #include <sys/kdebug.h>
107 #include <sys/sysproto.h>
108
109 #include <IOKit/IOPlatformExpert.h>
110 #include <pexpert/pexpert.h>
111
112 #include <machine/machine_routines.h>
113 #include <machine/exec.h>
114
115 #include <vm/vm_protos.h>
116
117 #ifdef __i386__
118 #include <i386/cpuid.h>
119 #endif
120
121 sysctlfn kern_sysctl;
122 #ifdef DEBUG
123 sysctlfn debug_sysctl;
124 #endif
125 extern sysctlfn vm_sysctl;
126 extern sysctlfn vfs_sysctl;
127 extern sysctlfn net_sysctl;
128 extern sysctlfn cpu_sysctl;
129 extern int aio_max_requests;
130 extern int aio_max_requests_per_process;
131 extern int aio_worker_threads;
132 extern int maxfilesperproc;
133 extern int lowpri_IO_window_msecs;
134 extern int lowpri_IO_delay_msecs;
135 extern int nx_enabled;
136
137 static void
138 fill_eproc(struct proc *p, struct eproc *ep);
139 static void
140 fill_externproc(struct proc *p, struct extern_proc *exp);
141 static void
142 fill_user_eproc(struct proc *p, struct user_eproc *ep);
143 static void
144 fill_user_proc(struct proc *p, struct user_kinfo_proc *kp);
145 static void
146 fill_user_externproc(struct proc *p, struct user_extern_proc *exp);
147 extern int
148 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
149 int
150 kdebug_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, struct proc *p);
151 #if NFSCLIENT
152 extern int
153 netboot_root(void);
154 #endif
155 int
156 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
157 struct proc *p);
158 __private_extern__ kern_return_t
159 reset_vmobjectcache(unsigned int val1, unsigned int val2);
160 extern int
161 resize_namecache(u_int newsize);
162 static int
163 sysctl_aiomax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
164 static int
165 sysctl_aioprocmax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
166 static int
167 sysctl_aiothreads(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
168 extern int
169 sysctl_clockrate(user_addr_t where, size_t *sizep);
170 int
171 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep);
172 int
173 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
174 user_addr_t newp, size_t newlen);
175 int
176 sysctl_file(user_addr_t where, size_t *sizep);
177 static void
178 fill_proc(struct proc *p, struct kinfo_proc *kp);
179 static int
180 sysctl_maxfilesperproc(user_addr_t oldp, size_t *oldlenp,
181 user_addr_t newp, size_t newlen);
182 static int
183 sysctl_maxprocperuid(user_addr_t oldp, size_t *oldlenp,
184 user_addr_t newp, size_t newlen);
185 static int
186 sysctl_maxproc(user_addr_t oldp, size_t *oldlenp,
187 user_addr_t newp, size_t newlen);
188 int
189 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
190 size_t *sizep, struct proc *cur_proc);
191 static int
192 sysctl_procargs2(int *name, u_int namelen, user_addr_t where, size_t *sizep,
193 struct proc *cur_proc);
194 static int
195 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
196 struct proc *cur_proc, int argc_yes);
197 int
198 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
199 size_t newlen, void *sp, int len);
200 extern int
201 sysctl_vnode(user_addr_t where, size_t *sizep);
202
203
204 /*
205 * temporary location for vm_sysctl. This should be machine independant
206 */
207
208 extern uint32_t mach_factor[3];
209
210 static void
211 loadavg32to64(struct loadavg *la32, struct user_loadavg *la64)
212 {
213 la64->ldavg[0] = la32->ldavg[0];
214 la64->ldavg[1] = la32->ldavg[1];
215 la64->ldavg[2] = la32->ldavg[2];
216 la64->fscale = (user_long_t)la32->fscale;
217 }
218
219 int
220 vm_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp,
221 user_addr_t newp, size_t newlen, __unused struct proc *p)
222 {
223 struct loadavg loadinfo;
224
225 switch (name[0]) {
226 case VM_LOADAVG:
227 if (proc_is64bit(p)) {
228 struct user_loadavg loadinfo64;
229 loadavg32to64(&averunnable, &loadinfo64);
230 return (sysctl_struct(oldp, oldlenp, newp, newlen,
231 &loadinfo64, sizeof(loadinfo64)));
232 } else {
233 return (sysctl_struct(oldp, oldlenp, newp, newlen,
234 &averunnable, sizeof(struct loadavg)));
235 }
236 case VM_MACHFACTOR:
237 loadinfo.ldavg[0] = mach_factor[0];
238 loadinfo.ldavg[1] = mach_factor[1];
239 loadinfo.ldavg[2] = mach_factor[2];
240 loadinfo.fscale = LSCALE;
241 if (proc_is64bit(p)) {
242 struct user_loadavg loadinfo64;
243 loadavg32to64(&loadinfo, &loadinfo64);
244 return (sysctl_struct(oldp, oldlenp, newp, newlen,
245 &loadinfo64, sizeof(loadinfo64)));
246 } else {
247 return (sysctl_struct(oldp, oldlenp, newp, newlen,
248 &loadinfo, sizeof(struct loadavg)));
249 }
250 case VM_SWAPUSAGE: {
251 int error;
252 uint64_t swap_total;
253 uint64_t swap_avail;
254 uint32_t swap_pagesize;
255 boolean_t swap_encrypted;
256 struct xsw_usage xsu;
257
258 error = macx_swapinfo(&swap_total,
259 &swap_avail,
260 &swap_pagesize,
261 &swap_encrypted);
262 if (error)
263 return error;
264
265 xsu.xsu_total = swap_total;
266 xsu.xsu_avail = swap_avail;
267 xsu.xsu_used = swap_total - swap_avail;
268 xsu.xsu_pagesize = swap_pagesize;
269 xsu.xsu_encrypted = swap_encrypted;
270 return sysctl_struct(oldp, oldlenp, newp, newlen,
271 &xsu, sizeof (struct xsw_usage));
272 }
273 case VM_METER:
274 return (ENOTSUP);
275 case VM_MAXID:
276 return (ENOTSUP);
277 default:
278 return (ENOTSUP);
279 }
280 /* NOTREACHED */
281 return (ENOTSUP);
282 }
283
284 /*
285 * Locking and stats
286 */
287 static struct sysctl_lock {
288 int sl_lock;
289 int sl_want;
290 int sl_locked;
291 } memlock;
292
293 int
294 __sysctl(struct proc *p, struct __sysctl_args *uap, __unused register_t *retval)
295 {
296 int error, dolock = 1;
297 size_t savelen = 0, oldlen = 0, newlen;
298 sysctlfn *fnp = NULL;
299 int name[CTL_MAXNAME];
300 int i;
301 int error1;
302
303 /*
304 * all top-level sysctl names are non-terminal
305 */
306 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
307 return (EINVAL);
308 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
309 if (error)
310 return (error);
311
312 AUDIT_ARG(ctlname, name, uap->namelen);
313
314 if (proc_is64bit(p)) {
315 /* uap->newlen is a size_t value which grows to 64 bits
316 * when coming from a 64-bit process. since it's doubtful we'll
317 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
318 */
319 newlen = CAST_DOWN(size_t, uap->newlen);
320 }
321 else {
322 newlen = uap->newlen;
323 }
324
325 /* CTL_UNSPEC is used to get oid to AUTO_OID */
326 if (uap->new != USER_ADDR_NULL
327 && ((name[0] == CTL_KERN
328 && !(name[1] == KERN_IPC || name[1] == KERN_PANICINFO || name[1] == KERN_PROCDELAYTERM ||
329 name[1] == KERN_PROC_LOW_PRI_IO || name[1] == KERN_PROCNAME || name[1] == KERN_THALTSTACK))
330 || (name[0] == CTL_HW)
331 || (name[0] == CTL_VM)
332 || (name[0] == CTL_VFS))
333 && (error = suser(kauth_cred_get(), &p->p_acflag)))
334 return (error);
335
336 switch (name[0]) {
337 case CTL_KERN:
338 fnp = kern_sysctl;
339 if ((name[1] != KERN_VNODE) && (name[1] != KERN_FILE)
340 && (name[1] != KERN_PROC))
341 dolock = 0;
342 break;
343 case CTL_VM:
344 fnp = vm_sysctl;
345 break;
346
347 case CTL_VFS:
348 fnp = vfs_sysctl;
349 break;
350 #ifdef DEBUG
351 case CTL_DEBUG:
352 fnp = debug_sysctl;
353 break;
354 #endif
355 default:
356 fnp = NULL;
357 }
358
359 if (uap->oldlenp != USER_ADDR_NULL) {
360 uint64_t oldlen64 = fuulong(uap->oldlenp);
361
362 oldlen = CAST_DOWN(size_t, oldlen64);
363 /*
364 * If more than 4G, clamp to 4G - useracc() below will catch
365 * with an EFAULT, if it's actually necessary.
366 */
367 if (oldlen64 > 0x00000000ffffffffULL)
368 oldlen = 0xffffffffUL;
369 }
370
371 if (uap->old != USER_ADDR_NULL) {
372 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE))
373 return (EFAULT);
374
375 /* The pc sampling mechanism does not need to take this lock */
376 if ((name[1] != KERN_PCSAMPLES) &&
377 (!((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)))) {
378 while (memlock.sl_lock) {
379 memlock.sl_want = 1;
380 sleep((caddr_t)&memlock, PRIBIO+1);
381 memlock.sl_locked++;
382 }
383 memlock.sl_lock = 1;
384 }
385
386 if (dolock && oldlen &&
387 (error = vslock(uap->old, (user_size_t)oldlen))) {
388 if ((name[1] != KERN_PCSAMPLES) &&
389 (! ((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)))) {
390 memlock.sl_lock = 0;
391 if (memlock.sl_want) {
392 memlock.sl_want = 0;
393 wakeup((caddr_t)&memlock);
394 }
395 }
396 return(error);
397 }
398 savelen = oldlen;
399 }
400
401 if (fnp) {
402 error = (*fnp)(name + 1, uap->namelen - 1, uap->old,
403 &oldlen, uap->new, newlen, p);
404 }
405 else
406 error = ENOTSUP;
407
408 if ( (name[0] != CTL_VFS) && (error == ENOTSUP)) {
409 size_t tmp = oldlen;
410 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
411 1, uap->new, newlen, &oldlen);
412 }
413
414 if (uap->old != USER_ADDR_NULL) {
415 if (dolock && savelen) {
416 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
417 if (!error && error1)
418 error = error1;
419 }
420 if (name[1] != KERN_PCSAMPLES) {
421 memlock.sl_lock = 0;
422 if (memlock.sl_want) {
423 memlock.sl_want = 0;
424 wakeup((caddr_t)&memlock);
425 }
426 }
427 }
428 if ((error) && (error != ENOMEM))
429 return (error);
430
431 if (uap->oldlenp != USER_ADDR_NULL) {
432 i = suulong(uap->oldlenp, oldlen);
433 if (i)
434 return i;
435 }
436
437 return (error);
438 }
439
440 /*
441 * Attributes stored in the kernel.
442 */
443 __private_extern__ char corefilename[MAXPATHLEN+1];
444 __private_extern__ int do_coredump;
445 __private_extern__ int sugid_coredump;
446
447
448 #ifdef INSECURE
449 int securelevel = -1;
450 #else
451 int securelevel;
452 #endif
453
454 static int
455 sysctl_affinity(
456 int *name,
457 u_int namelen,
458 user_addr_t oldBuf,
459 size_t *oldSize,
460 user_addr_t newBuf,
461 __unused size_t newSize,
462 struct proc *cur_proc)
463 {
464 if (namelen < 1)
465 return (ENOTSUP);
466
467 if (name[0] == 0 && 1 == namelen) {
468 return sysctl_rdint(oldBuf, oldSize, newBuf,
469 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
470 } else if (name[0] == 1 && 2 == namelen) {
471 if (name[1] == 0) {
472 cur_proc->p_flag &= ~P_AFFINITY;
473 } else {
474 cur_proc->p_flag |= P_AFFINITY;
475 }
476 return 0;
477 }
478 return (ENOTSUP);
479 }
480
481
482 static int
483 sysctl_translate(
484 int *name,
485 u_int namelen,
486 user_addr_t oldBuf,
487 size_t *oldSize,
488 user_addr_t newBuf,
489 __unused size_t newSize,
490 struct proc *cur_proc)
491 {
492 struct proc *p;
493
494 if (namelen != 1)
495 return (ENOTSUP);
496
497 p = pfind(name[0]);
498 if (p == NULL)
499 return (EINVAL);
500
501 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
502 && suser(kauth_cred_get(), &cur_proc->p_acflag))
503 return (EPERM);
504
505 return sysctl_rdint(oldBuf, oldSize, newBuf,
506 (p->p_flag & P_TRANSLATED) ? 1 : 0);
507 }
508
509 int
510 set_archhandler(struct proc *p, int arch)
511 {
512 int error;
513 struct nameidata nd;
514 struct vnode_attr va;
515 struct vfs_context context;
516 char *archhandler;
517
518 switch(arch) {
519 case CPU_TYPE_POWERPC:
520 archhandler = exec_archhandler_ppc.path;
521 break;
522 default:
523 return (EBADARCH);
524 }
525
526 context.vc_proc = p;
527 context.vc_ucred = kauth_cred_get();
528
529 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32,
530 CAST_USER_ADDR_T(archhandler), &context);
531 error = namei(&nd);
532 if (error)
533 return (error);
534 nameidone(&nd);
535
536 /* Check mount point */
537 if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) ||
538 (nd.ni_vp->v_type != VREG)) {
539 vnode_put(nd.ni_vp);
540 return (EACCES);
541 }
542
543 VATTR_INIT(&va);
544 VATTR_WANTED(&va, va_fsid);
545 VATTR_WANTED(&va, va_fileid);
546 error = vnode_getattr(nd.ni_vp, &va, &context);
547 if (error) {
548 vnode_put(nd.ni_vp);
549 return (error);
550 }
551 vnode_put(nd.ni_vp);
552
553 exec_archhandler_ppc.fsid = va.va_fsid;
554 exec_archhandler_ppc.fileid = (u_long)va.va_fileid;
555 return 0;
556 }
557
558 static int
559 sysctl_exec_archhandler_ppc(
560 __unused int *name,
561 __unused u_int namelen,
562 user_addr_t oldBuf,
563 size_t *oldSize,
564 user_addr_t newBuf,
565 size_t newSize,
566 struct proc *p)
567 {
568 int error;
569 size_t len;
570 struct nameidata nd;
571 struct vnode_attr va;
572 char handler[sizeof(exec_archhandler_ppc.path)];
573 struct vfs_context context;
574
575 context.vc_proc = p;
576 context.vc_ucred = kauth_cred_get();
577
578 if (oldSize) {
579 len = strlen(exec_archhandler_ppc.path) + 1;
580 if (oldBuf) {
581 if (*oldSize < len)
582 return (ENOMEM);
583 error = copyout(exec_archhandler_ppc.path, oldBuf, len);
584 if (error)
585 return (error);
586 }
587 *oldSize = len - 1;
588 }
589 if (newBuf) {
590 error = suser(context.vc_ucred, &p->p_acflag);
591 if (error)
592 return (error);
593 if (newSize >= sizeof(exec_archhandler_ppc.path))
594 return (ENAMETOOLONG);
595 error = copyin(newBuf, handler, newSize);
596 if (error)
597 return (error);
598 handler[newSize] = 0;
599 strcpy(exec_archhandler_ppc.path, handler);
600 error = set_archhandler(p, CPU_TYPE_POWERPC);
601 if (error)
602 return (error);
603 }
604 return 0;
605 }
606
607 SYSCTL_NODE(_kern, KERN_EXEC, exec, CTLFLAG_RD, 0, "");
608
609 SYSCTL_NODE(_kern_exec, OID_AUTO, archhandler, CTLFLAG_RD, 0, "");
610
611 SYSCTL_STRING(_kern_exec_archhandler, OID_AUTO, powerpc, CTLFLAG_RD,
612 exec_archhandler_ppc.path, 0, "");
613
614 extern int get_kernel_symfile( struct proc *, char **);
615 __private_extern__ int
616 sysctl_dopanicinfo(int *, u_int, user_addr_t, size_t *, user_addr_t,
617 size_t, struct proc *);
618
619 /*
620 * kernel related system variables.
621 */
622 int
623 kern_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
624 user_addr_t newp, size_t newlen, struct proc *p)
625 {
626 int error, level, inthostid, tmp;
627 unsigned int oldval=0;
628 char *str;
629 /* all sysctl names not listed below are terminal at this level */
630 if (namelen != 1
631 && !(name[0] == KERN_PROC
632 || name[0] == KERN_PROF
633 || name[0] == KERN_KDEBUG
634 || name[0] == KERN_PROCARGS
635 || name[0] == KERN_PROCARGS2
636 || name[0] == KERN_PCSAMPLES
637 || name[0] == KERN_IPC
638 || name[0] == KERN_SYSV
639 || name[0] == KERN_AFFINITY
640 || name[0] == KERN_TRANSLATE
641 || name[0] == KERN_EXEC
642 || name[0] == KERN_PANICINFO
643 || name[0] == KERN_POSIX
644 || name[0] == KERN_TFP)
645 )
646 return (ENOTDIR); /* overloaded */
647
648 switch (name[0]) {
649 case KERN_OSTYPE:
650 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
651 case KERN_OSRELEASE:
652 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
653 case KERN_OSREV:
654 return (sysctl_rdint(oldp, oldlenp, newp, BSD));
655 case KERN_VERSION:
656 return (sysctl_rdstring(oldp, oldlenp, newp, version));
657 case KERN_MAXVNODES:
658 oldval = desiredvnodes;
659 error = sysctl_int(oldp, oldlenp, newp,
660 newlen, &desiredvnodes);
661 reset_vmobjectcache(oldval, desiredvnodes);
662 resize_namecache(desiredvnodes);
663 return(error);
664 case KERN_MAXPROC:
665 return (sysctl_maxproc(oldp, oldlenp, newp, newlen));
666 case KERN_MAXFILES:
667 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
668 case KERN_MAXPROCPERUID:
669 return( sysctl_maxprocperuid( oldp, oldlenp, newp, newlen ) );
670 case KERN_MAXFILESPERPROC:
671 return( sysctl_maxfilesperproc( oldp, oldlenp, newp, newlen ) );
672 case KERN_ARGMAX:
673 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
674 case KERN_SECURELVL:
675 level = securelevel;
676 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
677 newp == USER_ADDR_NULL)
678 return (error);
679 if (level < securelevel && p->p_pid != 1)
680 return (EPERM);
681 securelevel = level;
682 return (0);
683 case KERN_HOSTNAME:
684 error = sysctl_trstring(oldp, oldlenp, newp, newlen,
685 hostname, sizeof(hostname));
686 if (newp && !error)
687 hostnamelen = newlen;
688 return (error);
689 case KERN_DOMAINNAME:
690 error = sysctl_string(oldp, oldlenp, newp, newlen,
691 domainname, sizeof(domainname));
692 if (newp && !error)
693 domainnamelen = newlen;
694 return (error);
695 case KERN_HOSTID:
696 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
697 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
698 hostid = inthostid;
699 return (error);
700 case KERN_CLOCKRATE:
701 return (sysctl_clockrate(oldp, oldlenp));
702 case KERN_BOOTTIME:
703 {
704 struct timeval t;
705
706 t.tv_sec = boottime_sec();
707 t.tv_usec = 0;
708
709 return (sysctl_rdstruct(oldp, oldlenp, newp, &t,
710 sizeof(struct timeval)));
711 }
712 case KERN_VNODE:
713 return (sysctl_vnode(oldp, oldlenp));
714 case KERN_PROC:
715 return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp));
716 case KERN_FILE:
717 return (sysctl_file(oldp, oldlenp));
718 #ifdef GPROF
719 case KERN_PROF:
720 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
721 newp, newlen));
722 #endif
723 case KERN_POSIX1:
724 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
725 case KERN_NGROUPS:
726 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
727 case KERN_JOB_CONTROL:
728 return (sysctl_rdint(oldp, oldlenp, newp, 1));
729 case KERN_SAVED_IDS:
730 #ifdef _POSIX_SAVED_IDS
731 return (sysctl_rdint(oldp, oldlenp, newp, 1));
732 #else
733 return (sysctl_rdint(oldp, oldlenp, newp, 0));
734 #endif
735 case KERN_KDEBUG:
736 return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p));
737 case KERN_PCSAMPLES:
738 return (pcsamples_ops(name + 1, namelen - 1, oldp, oldlenp, p));
739 case KERN_PROCARGS:
740 /* new one as it does not use kinfo_proc */
741 return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p));
742 case KERN_PROCARGS2:
743 /* new one as it does not use kinfo_proc */
744 return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p));
745 case KERN_SYMFILE:
746 error = get_kernel_symfile( p, &str );
747 if ( error )
748 return error;
749 return (sysctl_rdstring(oldp, oldlenp, newp, str));
750 #if NFSCLIENT
751 case KERN_NETBOOT:
752 return (sysctl_rdint(oldp, oldlenp, newp, netboot_root()));
753 #endif
754 case KERN_PANICINFO:
755 return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp,
756 newp, newlen, p));
757 case KERN_AFFINITY:
758 return sysctl_affinity(name+1, namelen-1, oldp, oldlenp,
759 newp, newlen, p);
760 case KERN_TRANSLATE:
761 return sysctl_translate(name+1, namelen-1, oldp, oldlenp, newp,
762 newlen, p);
763 case KERN_CLASSICHANDLER:
764 return sysctl_exec_archhandler_ppc(name+1, namelen-1, oldp,
765 oldlenp, newp, newlen, p);
766 case KERN_AIOMAX:
767 return( sysctl_aiomax( oldp, oldlenp, newp, newlen ) );
768 case KERN_AIOPROCMAX:
769 return( sysctl_aioprocmax( oldp, oldlenp, newp, newlen ) );
770 case KERN_AIOTHREADS:
771 return( sysctl_aiothreads( oldp, oldlenp, newp, newlen ) );
772 case KERN_USRSTACK:
773 return (sysctl_rdint(oldp, oldlenp, newp, (uintptr_t)p->user_stack));
774 case KERN_USRSTACK64:
775 return (sysctl_rdquad(oldp, oldlenp, newp, p->user_stack));
776 case KERN_COREFILE:
777 error = sysctl_string(oldp, oldlenp, newp, newlen,
778 corefilename, sizeof(corefilename));
779 return (error);
780 case KERN_COREDUMP:
781 tmp = do_coredump;
782 error = sysctl_int(oldp, oldlenp, newp, newlen, &do_coredump);
783 if (!error && ((do_coredump < 0) || (do_coredump > 1))) {
784 do_coredump = tmp;
785 error = EINVAL;
786 }
787 return (error);
788 case KERN_SUGID_COREDUMP:
789 tmp = sugid_coredump;
790 error = sysctl_int(oldp, oldlenp, newp, newlen, &sugid_coredump);
791 if (!error && ((sugid_coredump < 0) || (sugid_coredump > 1))) {
792 sugid_coredump = tmp;
793 error = EINVAL;
794 }
795 return (error);
796 case KERN_PROCDELAYTERM:
797 {
798 int old_value, new_value;
799
800 error = 0;
801 if (oldp && *oldlenp < sizeof(int))
802 return (ENOMEM);
803 if ( newp && newlen != sizeof(int) )
804 return(EINVAL);
805 *oldlenp = sizeof(int);
806 old_value = (p->p_lflag & P_LDELAYTERM)? 1: 0;
807 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
808 return(error);
809 if (error == 0 && newp )
810 error = copyin( newp, &new_value, sizeof(int) );
811 if (error == 0 && newp) {
812 if (new_value)
813 p->p_lflag |= P_LDELAYTERM;
814 else
815 p->p_lflag &= ~P_LDELAYTERM;
816 }
817 return(error);
818 }
819 case KERN_PROC_LOW_PRI_IO:
820 {
821 int old_value, new_value;
822
823 error = 0;
824 if (oldp && *oldlenp < sizeof(int))
825 return (ENOMEM);
826 if ( newp && newlen != sizeof(int) )
827 return(EINVAL);
828 *oldlenp = sizeof(int);
829
830 old_value = (p->p_lflag & P_LLOW_PRI_IO)? 0x01: 0;
831 if (p->p_lflag & P_LBACKGROUND_IO)
832 old_value |= 0x02;
833
834 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
835 return(error);
836 if (error == 0 && newp )
837 error = copyin( newp, &new_value, sizeof(int) );
838 if (error == 0 && newp) {
839 if (new_value & 0x01)
840 p->p_lflag |= P_LLOW_PRI_IO;
841 else if (new_value & 0x02)
842 p->p_lflag |= P_LBACKGROUND_IO;
843 else if (new_value == 0)
844 p->p_lflag &= ~(P_LLOW_PRI_IO | P_LBACKGROUND_IO);
845 }
846 return(error);
847 }
848 case KERN_LOW_PRI_WINDOW:
849 {
850 int old_value, new_value;
851
852 error = 0;
853 if (oldp && *oldlenp < sizeof(old_value) )
854 return (ENOMEM);
855 if ( newp && newlen != sizeof(new_value) )
856 return(EINVAL);
857 *oldlenp = sizeof(old_value);
858
859 old_value = lowpri_IO_window_msecs;
860
861 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
862 return(error);
863 if (error == 0 && newp )
864 error = copyin( newp, &new_value, sizeof(newlen) );
865 if (error == 0 && newp) {
866 lowpri_IO_window_msecs = new_value;
867 }
868 return(error);
869 }
870 case KERN_LOW_PRI_DELAY:
871 {
872 int old_value, new_value;
873
874 error = 0;
875 if (oldp && *oldlenp < sizeof(old_value) )
876 return (ENOMEM);
877 if ( newp && newlen != sizeof(new_value) )
878 return(EINVAL);
879 *oldlenp = sizeof(old_value);
880
881 old_value = lowpri_IO_delay_msecs;
882
883 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
884 return(error);
885 if (error == 0 && newp )
886 error = copyin( newp, &new_value, sizeof(newlen) );
887 if (error == 0 && newp) {
888 lowpri_IO_delay_msecs = new_value;
889 }
890 return(error);
891 }
892 case KERN_NX_PROTECTION:
893 {
894 int old_value, new_value;
895
896 error = 0;
897 if (oldp && *oldlenp < sizeof(old_value) )
898 return (ENOMEM);
899 if ( newp && newlen != sizeof(new_value) )
900 return(EINVAL);
901 *oldlenp = sizeof(old_value);
902
903 old_value = nx_enabled;
904
905 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
906 return(error);
907 #ifdef __i386__
908 /*
909 * Only allow setting if NX is supported on the chip
910 */
911 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD) {
912 #endif
913 if (error == 0 && newp)
914 error = copyin(newp, &new_value,
915 sizeof(newlen));
916 if (error == 0 && newp)
917 nx_enabled = new_value;
918 #ifdef __i386__
919 } else if (newp) {
920 error = ENOTSUP;
921 }
922 #endif
923 return(error);
924 }
925 case KERN_SHREG_PRIVATIZABLE:
926 /* this kernel does implement shared_region_make_private_np() */
927 return (sysctl_rdint(oldp, oldlenp, newp, 1));
928 case KERN_PROCNAME:
929 error = sysctl_trstring(oldp, oldlenp, newp, newlen,
930 &p->p_name[0], (2*MAXCOMLEN+1));
931 return (error);
932 case KERN_THALTSTACK:
933 {
934 int old_value, new_value;
935
936 error = 0;
937 if (oldp && *oldlenp < sizeof(int))
938 return (ENOMEM);
939 if ( newp && newlen != sizeof(int) )
940 return(EINVAL);
941 *oldlenp = sizeof(int);
942 old_value = (p->p_lflag & P_LTHSIGSTACK)? 1: 0;
943 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
944 return(error);
945 if (error == 0 && newp )
946 error = copyin( newp, &new_value, sizeof(int) );
947 if (error == 0 && newp) {
948 if (new_value) {
949 /* we cannot swich midstream if inuse */
950 if ((p->p_sigacts->ps_flags & SAS_ALTSTACK) == SAS_ALTSTACK)
951 return(EPERM);
952 p->p_lflag |= P_LTHSIGSTACK;
953 } else {
954 /* we cannot swich midstream */
955 if ((p->p_lflag & P_LTHSIGSTACK) == P_LTHSIGSTACK)
956 return(EPERM);
957 p->p_lflag &= ~P_LTHSIGSTACK;
958 }
959 }
960 return(error);
961 }
962 default:
963 return (ENOTSUP);
964 }
965 /* NOTREACHED */
966 }
967
968 #ifdef DEBUG
969 /*
970 * Debugging related system variables.
971 */
972 #if DIAGNOSTIC
973 extern
974 #endif /* DIAGNOSTIC */
975 struct ctldebug debug0, debug1;
976 struct ctldebug debug2, debug3, debug4;
977 struct ctldebug debug5, debug6, debug7, debug8, debug9;
978 struct ctldebug debug10, debug11, debug12, debug13, debug14;
979 struct ctldebug debug15, debug16, debug17, debug18, debug19;
980 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
981 &debug0, &debug1, &debug2, &debug3, &debug4,
982 &debug5, &debug6, &debug7, &debug8, &debug9,
983 &debug10, &debug11, &debug12, &debug13, &debug14,
984 &debug15, &debug16, &debug17, &debug18, &debug19,
985 };
986 int
987 debug_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
988 user_addr_t newp, size_t newlen, struct proc *p)
989 {
990 struct ctldebug *cdp;
991
992 /* all sysctl names at this level are name and field */
993 if (namelen != 2)
994 return (ENOTDIR); /* overloaded */
995 cdp = debugvars[name[0]];
996 if (cdp->debugname == 0)
997 return (ENOTSUP);
998 switch (name[1]) {
999 case CTL_DEBUG_NAME:
1000 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
1001 case CTL_DEBUG_VALUE:
1002 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
1003 default:
1004 return (ENOTSUP);
1005 }
1006 /* NOTREACHED */
1007 }
1008 #endif /* DEBUG */
1009
1010 /*
1011 * Validate parameters and get old / set new parameters
1012 * for an integer-valued sysctl function.
1013 */
1014 int
1015 sysctl_int(user_addr_t oldp, size_t *oldlenp,
1016 user_addr_t newp, size_t newlen, int *valp)
1017 {
1018 int error = 0;
1019
1020 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1021 return (EFAULT);
1022 if (oldp && *oldlenp < sizeof(int))
1023 return (ENOMEM);
1024 if (newp && newlen != sizeof(int))
1025 return (EINVAL);
1026 *oldlenp = sizeof(int);
1027 if (oldp)
1028 error = copyout(valp, oldp, sizeof(int));
1029 if (error == 0 && newp) {
1030 error = copyin(newp, valp, sizeof(int));
1031 AUDIT_ARG(value, *valp);
1032 }
1033 return (error);
1034 }
1035
1036 /*
1037 * As above, but read-only.
1038 */
1039 int
1040 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
1041 {
1042 int error = 0;
1043
1044 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1045 return (EFAULT);
1046 if (oldp && *oldlenp < sizeof(int))
1047 return (ENOMEM);
1048 if (newp)
1049 return (EPERM);
1050 *oldlenp = sizeof(int);
1051 if (oldp)
1052 error = copyout((caddr_t)&val, oldp, sizeof(int));
1053 return (error);
1054 }
1055
1056 /*
1057 * Validate parameters and get old / set new parameters
1058 * for an quad(64bit)-valued sysctl function.
1059 */
1060 int
1061 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
1062 user_addr_t newp, size_t newlen, quad_t *valp)
1063 {
1064 int error = 0;
1065
1066 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1067 return (EFAULT);
1068 if (oldp && *oldlenp < sizeof(quad_t))
1069 return (ENOMEM);
1070 if (newp && newlen != sizeof(quad_t))
1071 return (EINVAL);
1072 *oldlenp = sizeof(quad_t);
1073 if (oldp)
1074 error = copyout(valp, oldp, sizeof(quad_t));
1075 if (error == 0 && newp)
1076 error = copyin(newp, valp, sizeof(quad_t));
1077 return (error);
1078 }
1079
1080 /*
1081 * As above, but read-only.
1082 */
1083 int
1084 sysctl_rdquad(oldp, oldlenp, newp, val)
1085 void *oldp;
1086 size_t *oldlenp;
1087 void *newp;
1088 quad_t val;
1089 {
1090 int error = 0;
1091
1092 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1093 return (EFAULT);
1094 if (oldp && *oldlenp < sizeof(quad_t))
1095 return (ENOMEM);
1096 if (newp)
1097 return (EPERM);
1098 *oldlenp = sizeof(quad_t);
1099 if (oldp)
1100 error = copyout((caddr_t)&val, CAST_USER_ADDR_T(oldp), sizeof(quad_t));
1101 return (error);
1102 }
1103
1104 /*
1105 * Validate parameters and get old / set new parameters
1106 * for a string-valued sysctl function. Unlike sysctl_string, if you
1107 * give it a too small (but larger than 0 bytes) buffer, instead of
1108 * returning ENOMEM, it truncates the returned string to the buffer
1109 * size. This preserves the semantics of some library routines
1110 * implemented via sysctl, which truncate their returned data, rather
1111 * than simply returning an error. The returned string is always NUL
1112 * terminated.
1113 */
1114 int
1115 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
1116 user_addr_t newp, size_t newlen, char *str, int maxlen)
1117 {
1118 int len, copylen, error = 0;
1119
1120 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1121 return (EFAULT);
1122 copylen = len = strlen(str) + 1;
1123 if (oldp && (len < 0 || *oldlenp < 1))
1124 return (ENOMEM);
1125 if (oldp && (*oldlenp < (size_t)len))
1126 copylen = *oldlenp + 1;
1127 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1128 return (EINVAL);
1129 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
1130 if (oldp) {
1131 error = copyout(str, oldp, copylen);
1132 if (!error) {
1133 unsigned char c = 0;
1134 /* NUL terminate */
1135 oldp += *oldlenp;
1136 error = copyout((void *)&c, oldp, sizeof(char));
1137 }
1138 }
1139 if (error == 0 && newp) {
1140 error = copyin(newp, str, newlen);
1141 str[newlen] = 0;
1142 AUDIT_ARG(text, (char *)str);
1143 }
1144 return (error);
1145 }
1146
1147 /*
1148 * Validate parameters and get old / set new parameters
1149 * for a string-valued sysctl function.
1150 */
1151 int
1152 sysctl_string(user_addr_t oldp, size_t *oldlenp,
1153 user_addr_t newp, size_t newlen, char *str, int maxlen)
1154 {
1155 int len, error = 0;
1156
1157 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1158 return (EFAULT);
1159 len = strlen(str) + 1;
1160 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1161 return (ENOMEM);
1162 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1163 return (EINVAL);
1164 *oldlenp = len -1; /* deal with NULL strings correctly */
1165 if (oldp) {
1166 error = copyout(str, oldp, len);
1167 }
1168 if (error == 0 && newp) {
1169 error = copyin(newp, str, newlen);
1170 str[newlen] = 0;
1171 AUDIT_ARG(text, (char *)str);
1172 }
1173 return (error);
1174 }
1175
1176 /*
1177 * As above, but read-only.
1178 */
1179 int
1180 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
1181 user_addr_t newp, char *str)
1182 {
1183 int len, error = 0;
1184
1185 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1186 return (EFAULT);
1187 len = strlen(str) + 1;
1188 if (oldp && *oldlenp < (size_t)len)
1189 return (ENOMEM);
1190 if (newp)
1191 return (EPERM);
1192 *oldlenp = len;
1193 if (oldp)
1194 error = copyout(str, oldp, len);
1195 return (error);
1196 }
1197
1198 /*
1199 * Validate parameters and get old / set new parameters
1200 * for a structure oriented sysctl function.
1201 */
1202 int
1203 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
1204 user_addr_t newp, size_t newlen, void *sp, int len)
1205 {
1206 int error = 0;
1207
1208 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1209 return (EFAULT);
1210 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1211 return (ENOMEM);
1212 if (newp && (len < 0 || newlen > (size_t)len))
1213 return (EINVAL);
1214 if (oldp) {
1215 *oldlenp = len;
1216 error = copyout(sp, oldp, len);
1217 }
1218 if (error == 0 && newp)
1219 error = copyin(newp, sp, len);
1220 return (error);
1221 }
1222
1223 /*
1224 * Validate parameters and get old parameters
1225 * for a structure oriented sysctl function.
1226 */
1227 int
1228 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
1229 user_addr_t newp, void *sp, int len)
1230 {
1231 int error = 0;
1232
1233 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1234 return (EFAULT);
1235 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1236 return (ENOMEM);
1237 if (newp)
1238 return (EPERM);
1239 *oldlenp = len;
1240 if (oldp)
1241 error = copyout(sp, oldp, len);
1242 return (error);
1243 }
1244
1245 /*
1246 * Get file structures.
1247 */
1248 int
1249 sysctl_file(user_addr_t where, size_t *sizep)
1250 {
1251 int buflen, error;
1252 struct fileglob *fg;
1253 user_addr_t start = where;
1254 struct extern_file nef;
1255
1256 buflen = *sizep;
1257 if (where == USER_ADDR_NULL) {
1258 /*
1259 * overestimate by 10 files
1260 */
1261 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct extern_file);
1262 return (0);
1263 }
1264
1265 /*
1266 * first copyout filehead
1267 */
1268 if (buflen < 0 || (size_t)buflen < sizeof(filehead)) {
1269 *sizep = 0;
1270 return (0);
1271 }
1272 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1273 if (error)
1274 return (error);
1275 buflen -= sizeof(filehead);
1276 where += sizeof(filehead);
1277
1278 /*
1279 * followed by an array of file structures
1280 */
1281 for (fg = filehead.lh_first; fg != 0; fg = fg->f_list.le_next) {
1282 if (buflen < 0 || (size_t)buflen < sizeof(struct extern_file)) {
1283 *sizep = where - start;
1284 return (ENOMEM);
1285 }
1286 nef.f_list.le_next = (struct extern_file *)fg->f_list.le_next;
1287 nef.f_list.le_prev = (struct extern_file **)fg->f_list.le_prev;
1288 nef.f_flag = (fg->fg_flag & FMASK);
1289 nef.f_type = fg->fg_type;
1290 nef.f_count = fg->fg_count;
1291 nef.f_msgcount = fg->fg_msgcount;
1292 nef.f_cred = fg->fg_cred;
1293 nef.f_ops = fg->fg_ops;
1294 nef.f_offset = fg->fg_offset;
1295 nef.f_data = fg->fg_data;
1296 error = copyout((caddr_t)&nef, where, sizeof (struct extern_file));
1297 if (error)
1298 return (error);
1299 buflen -= sizeof(struct extern_file);
1300 where += sizeof(struct extern_file);
1301 }
1302 *sizep = where - start;
1303 return (0);
1304 }
1305
1306 /*
1307 * try over estimating by 5 procs
1308 */
1309 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1310
1311 int
1312 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1313 {
1314 struct proc *p;
1315 user_addr_t dp = where;
1316 size_t needed = 0;
1317 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1318 int doingzomb;
1319 int error = 0;
1320 boolean_t is_64_bit = FALSE;
1321 struct kinfo_proc kproc;
1322 struct user_kinfo_proc user_kproc;
1323 int sizeof_kproc;
1324 caddr_t kprocp;
1325
1326 if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
1327 return (EINVAL);
1328 p = allproc.lh_first;
1329 doingzomb = 0;
1330 is_64_bit = proc_is64bit(current_proc());
1331 if (is_64_bit) {
1332 sizeof_kproc = sizeof(user_kproc);
1333 kprocp = (caddr_t) &user_kproc;
1334 }
1335 else {
1336 sizeof_kproc = sizeof(kproc);
1337 kprocp = (caddr_t) &kproc;
1338 }
1339 again:
1340 for (; p != 0; p = p->p_list.le_next) {
1341 /*
1342 * Skip embryonic processes.
1343 */
1344 if (p->p_stat == SIDL)
1345 continue;
1346 /*
1347 * TODO - make more efficient (see notes below).
1348 * do by session.
1349 */
1350 switch (name[0]) {
1351
1352 case KERN_PROC_PID:
1353 /* could do this with just a lookup */
1354 if (p->p_pid != (pid_t)name[1])
1355 continue;
1356 break;
1357
1358 case KERN_PROC_PGRP:
1359 /* could do this by traversing pgrp */
1360 if (p->p_pgrp->pg_id != (pid_t)name[1])
1361 continue;
1362 break;
1363
1364 case KERN_PROC_TTY:
1365 if ((p->p_flag & P_CONTROLT) == 0 ||
1366 (p->p_session == NULL) ||
1367 p->p_session->s_ttyp == NULL ||
1368 p->p_session->s_ttyp->t_dev != (dev_t)name[1])
1369 continue;
1370 break;
1371
1372 case KERN_PROC_UID:
1373 if ((p->p_ucred == NULL) ||
1374 (kauth_cred_getuid(p->p_ucred) != (uid_t)name[1]))
1375 continue;
1376 break;
1377
1378 case KERN_PROC_RUID:
1379 if ((p->p_ucred == NULL) ||
1380 (p->p_ucred->cr_ruid != (uid_t)name[1]))
1381 continue;
1382 break;
1383 }
1384 if (buflen >= sizeof_kproc) {
1385 bzero(kprocp, sizeof_kproc);
1386 if (is_64_bit) {
1387 fill_user_proc(p, (struct user_kinfo_proc *) kprocp);
1388 }
1389 else {
1390 fill_proc(p, (struct kinfo_proc *) kprocp);
1391 }
1392 error = copyout(kprocp, dp, sizeof_kproc);
1393 if (error)
1394 return (error);
1395 dp += sizeof_kproc;
1396 buflen -= sizeof_kproc;
1397 }
1398 needed += sizeof_kproc;
1399 }
1400 if (doingzomb == 0) {
1401 p = zombproc.lh_first;
1402 doingzomb++;
1403 goto again;
1404 }
1405 if (where != USER_ADDR_NULL) {
1406 *sizep = dp - where;
1407 if (needed > *sizep)
1408 return (ENOMEM);
1409 } else {
1410 needed += KERN_PROCSLOP;
1411 *sizep = needed;
1412 }
1413 return (0);
1414 }
1415
1416 /*
1417 * Fill in an eproc structure for the specified process.
1418 */
1419 static void
1420 fill_eproc(p, ep)
1421 register struct proc *p;
1422 register struct eproc *ep;
1423 {
1424 register struct tty *tp;
1425
1426 ep->e_paddr = p;
1427 if (p->p_pgrp) {
1428 ep->e_sess = p->p_pgrp->pg_session;
1429 ep->e_pgid = p->p_pgrp->pg_id;
1430 ep->e_jobc = p->p_pgrp->pg_jobc;
1431 if (ep->e_sess && ep->e_sess->s_ttyvp)
1432 ep->e_flag = EPROC_CTTY;
1433 } else {
1434 ep->e_sess = (struct session *)0;
1435 ep->e_pgid = 0;
1436 ep->e_jobc = 0;
1437 }
1438 ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0;
1439 /* Pre-zero the fake historical pcred */
1440 bzero(&ep->e_pcred, sizeof(struct _pcred));
1441 if (p->p_ucred) {
1442 /* XXX not ref-counted */
1443
1444 /* A fake historical pcred */
1445 ep->e_pcred.p_ruid = p->p_ucred->cr_ruid;
1446 ep->e_pcred.p_svuid = p->p_ucred->cr_svuid;
1447 ep->e_pcred.p_rgid = p->p_ucred->cr_rgid;
1448 ep->e_pcred.p_svgid = p->p_ucred->cr_svgid;
1449
1450 /* A fake historical *kauth_cred_t */
1451 ep->e_ucred.cr_ref = p->p_ucred->cr_ref;
1452 ep->e_ucred.cr_uid = kauth_cred_getuid(p->p_ucred);
1453 ep->e_ucred.cr_ngroups = p->p_ucred->cr_ngroups;
1454 bcopy(p->p_ucred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1455
1456 }
1457 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1458 ep->e_vm.vm_tsize = 0;
1459 ep->e_vm.vm_dsize = 0;
1460 ep->e_vm.vm_ssize = 0;
1461 }
1462 ep->e_vm.vm_rssize = 0;
1463
1464 if ((p->p_flag & P_CONTROLT) && (ep->e_sess) &&
1465 (tp = ep->e_sess->s_ttyp)) {
1466 ep->e_tdev = tp->t_dev;
1467 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1468 ep->e_tsess = tp->t_session;
1469 } else
1470 ep->e_tdev = NODEV;
1471
1472 if (SESS_LEADER(p))
1473 ep->e_flag |= EPROC_SLEADER;
1474 if (p->p_wmesg)
1475 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1476 ep->e_xsize = ep->e_xrssize = 0;
1477 ep->e_xccount = ep->e_xswrss = 0;
1478 }
1479
1480 /*
1481 * Fill in an LP64 version of eproc structure for the specified process.
1482 */
1483 static void
1484 fill_user_eproc(register struct proc *p, register struct user_eproc *ep)
1485 {
1486 register struct tty *tp;
1487 struct session *sessionp = NULL;
1488
1489 ep->e_paddr = CAST_USER_ADDR_T(p);
1490 if (p->p_pgrp) {
1491 sessionp = p->p_pgrp->pg_session;
1492 ep->e_sess = CAST_USER_ADDR_T(sessionp);
1493 ep->e_pgid = p->p_pgrp->pg_id;
1494 ep->e_jobc = p->p_pgrp->pg_jobc;
1495 if (sessionp) {
1496 if (sessionp->s_ttyvp)
1497 ep->e_flag = EPROC_CTTY;
1498 }
1499 } else {
1500 ep->e_sess = USER_ADDR_NULL;
1501 ep->e_pgid = 0;
1502 ep->e_jobc = 0;
1503 }
1504 ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0;
1505 /* Pre-zero the fake historical pcred */
1506 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1507 if (p->p_ucred) {
1508 /* XXX not ref-counted */
1509
1510 /* A fake historical pcred */
1511 ep->e_pcred.p_ruid = p->p_ucred->cr_ruid;
1512 ep->e_pcred.p_svuid = p->p_ucred->cr_svuid;
1513 ep->e_pcred.p_rgid = p->p_ucred->cr_rgid;
1514 ep->e_pcred.p_svgid = p->p_ucred->cr_svgid;
1515
1516 /* A fake historical *kauth_cred_t */
1517 ep->e_ucred.cr_ref = p->p_ucred->cr_ref;
1518 ep->e_ucred.cr_uid = kauth_cred_getuid(p->p_ucred);
1519 ep->e_ucred.cr_ngroups = p->p_ucred->cr_ngroups;
1520 bcopy(p->p_ucred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1521
1522 }
1523 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1524 ep->e_vm.vm_tsize = 0;
1525 ep->e_vm.vm_dsize = 0;
1526 ep->e_vm.vm_ssize = 0;
1527 }
1528 ep->e_vm.vm_rssize = 0;
1529
1530 if ((p->p_flag & P_CONTROLT) && (sessionp) &&
1531 (tp = sessionp->s_ttyp)) {
1532 ep->e_tdev = tp->t_dev;
1533 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1534 ep->e_tsess = CAST_USER_ADDR_T(tp->t_session);
1535 } else
1536 ep->e_tdev = NODEV;
1537
1538 if (SESS_LEADER(p))
1539 ep->e_flag |= EPROC_SLEADER;
1540 if (p->p_wmesg)
1541 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1542 ep->e_xsize = ep->e_xrssize = 0;
1543 ep->e_xccount = ep->e_xswrss = 0;
1544 }
1545
1546 /*
1547 * Fill in an eproc structure for the specified process.
1548 */
1549 static void
1550 fill_externproc(p, exp)
1551 register struct proc *p;
1552 register struct extern_proc *exp;
1553 {
1554 exp->p_forw = exp->p_back = NULL;
1555 if (p->p_stats)
1556 exp->p_starttime = p->p_stats->p_start;
1557 exp->p_vmspace = NULL;
1558 exp->p_sigacts = p->p_sigacts;
1559 exp->p_flag = p->p_flag;
1560 exp->p_stat = p->p_stat ;
1561 exp->p_pid = p->p_pid ;
1562 exp->p_oppid = p->p_oppid ;
1563 exp->p_dupfd = p->p_dupfd ;
1564 /* Mach related */
1565 exp->user_stack = CAST_DOWN(caddr_t, p->user_stack);
1566 exp->exit_thread = p->exit_thread ;
1567 exp->p_debugger = p->p_debugger ;
1568 exp->sigwait = p->sigwait ;
1569 /* scheduling */
1570 exp->p_estcpu = p->p_estcpu ;
1571 exp->p_cpticks = p->p_cpticks ;
1572 exp->p_pctcpu = p->p_pctcpu ;
1573 exp->p_wchan = p->p_wchan ;
1574 exp->p_wmesg = p->p_wmesg ;
1575 exp->p_swtime = p->p_swtime ;
1576 exp->p_slptime = p->p_slptime ;
1577 bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval));
1578 bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval));
1579 exp->p_uticks = p->p_uticks ;
1580 exp->p_sticks = p->p_sticks ;
1581 exp->p_iticks = p->p_iticks ;
1582 exp->p_traceflag = p->p_traceflag ;
1583 exp->p_tracep = p->p_tracep ;
1584 exp->p_siglist = 0 ; /* No longer relevant */
1585 exp->p_textvp = p->p_textvp ;
1586 exp->p_holdcnt = 0 ;
1587 exp->p_sigmask = 0 ; /* no longer avaialable */
1588 exp->p_sigignore = p->p_sigignore ;
1589 exp->p_sigcatch = p->p_sigcatch ;
1590 exp->p_priority = p->p_priority ;
1591 exp->p_usrpri = p->p_usrpri ;
1592 exp->p_nice = p->p_nice ;
1593 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1594 exp->p_comm[MAXCOMLEN] = '\0';
1595 exp->p_pgrp = p->p_pgrp ;
1596 exp->p_addr = NULL;
1597 exp->p_xstat = p->p_xstat ;
1598 exp->p_acflag = p->p_acflag ;
1599 exp->p_ru = p->p_ru ; /* XXX may be NULL */
1600 }
1601
1602 /*
1603 * Fill in an LP64 version of extern_proc structure for the specified process.
1604 */
1605 static void
1606 fill_user_externproc(register struct proc *p, register struct user_extern_proc *exp)
1607 {
1608 exp->p_forw = exp->p_back = USER_ADDR_NULL;
1609 if (p->p_stats) {
1610 exp->p_starttime.tv_sec = p->p_stats->p_start.tv_sec;
1611 exp->p_starttime.tv_usec = p->p_stats->p_start.tv_usec;
1612 }
1613 exp->p_vmspace = USER_ADDR_NULL;
1614 exp->p_sigacts = CAST_USER_ADDR_T(p->p_sigacts);
1615 exp->p_flag = p->p_flag;
1616 exp->p_stat = p->p_stat ;
1617 exp->p_pid = p->p_pid ;
1618 exp->p_oppid = p->p_oppid ;
1619 exp->p_dupfd = p->p_dupfd ;
1620 /* Mach related */
1621 exp->user_stack = p->user_stack;
1622 exp->exit_thread = CAST_USER_ADDR_T(p->exit_thread);
1623 exp->p_debugger = p->p_debugger ;
1624 exp->sigwait = p->sigwait ;
1625 /* scheduling */
1626 exp->p_estcpu = p->p_estcpu ;
1627 exp->p_cpticks = p->p_cpticks ;
1628 exp->p_pctcpu = p->p_pctcpu ;
1629 exp->p_wchan = CAST_USER_ADDR_T(p->p_wchan);
1630 exp->p_wmesg = CAST_USER_ADDR_T(p->p_wmesg);
1631 exp->p_swtime = p->p_swtime ;
1632 exp->p_slptime = p->p_slptime ;
1633 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1634 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1635 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1636 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1637 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1638 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1639 exp->p_uticks = p->p_uticks ;
1640 exp->p_sticks = p->p_sticks ;
1641 exp->p_iticks = p->p_iticks ;
1642 exp->p_traceflag = p->p_traceflag ;
1643 exp->p_tracep = CAST_USER_ADDR_T(p->p_tracep);
1644 exp->p_siglist = 0 ; /* No longer relevant */
1645 exp->p_textvp = CAST_USER_ADDR_T(p->p_textvp);
1646 exp->p_holdcnt = 0 ;
1647 exp->p_sigmask = 0 ; /* no longer avaialable */
1648 exp->p_sigignore = p->p_sigignore ;
1649 exp->p_sigcatch = p->p_sigcatch ;
1650 exp->p_priority = p->p_priority ;
1651 exp->p_usrpri = p->p_usrpri ;
1652 exp->p_nice = p->p_nice ;
1653 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1654 exp->p_comm[MAXCOMLEN] = '\0';
1655 exp->p_pgrp = CAST_USER_ADDR_T(p->p_pgrp);
1656 exp->p_addr = USER_ADDR_NULL;
1657 exp->p_xstat = p->p_xstat ;
1658 exp->p_acflag = p->p_acflag ;
1659 exp->p_ru = CAST_USER_ADDR_T(p->p_ru); /* XXX may be NULL */
1660 }
1661
1662 static void
1663 fill_proc(p, kp)
1664 register struct proc *p;
1665 register struct kinfo_proc *kp;
1666 {
1667 fill_externproc(p, &kp->kp_proc);
1668 fill_eproc(p, &kp->kp_eproc);
1669 }
1670
1671 static void
1672 fill_user_proc(register struct proc *p, register struct user_kinfo_proc *kp)
1673 {
1674 fill_user_externproc(p, &kp->kp_proc);
1675 fill_user_eproc(p, &kp->kp_eproc);
1676 }
1677
1678 int
1679 kdebug_ops(int *name, u_int namelen, user_addr_t where,
1680 size_t *sizep, struct proc *p)
1681 {
1682 int ret=0;
1683
1684 ret = suser(kauth_cred_get(), &p->p_acflag);
1685 if (ret)
1686 return(ret);
1687
1688 switch(name[0]) {
1689 case KERN_KDEFLAGS:
1690 case KERN_KDDFLAGS:
1691 case KERN_KDENABLE:
1692 case KERN_KDGETBUF:
1693 case KERN_KDSETUP:
1694 case KERN_KDREMOVE:
1695 case KERN_KDSETREG:
1696 case KERN_KDGETREG:
1697 case KERN_KDREADTR:
1698 case KERN_KDPIDTR:
1699 case KERN_KDTHRMAP:
1700 case KERN_KDPIDEX:
1701 case KERN_KDSETRTCDEC:
1702 case KERN_KDSETBUF:
1703 case KERN_KDGETENTROPY:
1704 ret = kdbg_control(name, namelen, where, sizep);
1705 break;
1706 default:
1707 ret= ENOTSUP;
1708 break;
1709 }
1710 return(ret);
1711 }
1712
1713 extern int pcsamples_control(int *name, u_int namelen, user_addr_t where,
1714 size_t * sizep);
1715
1716 int
1717 pcsamples_ops(int *name, u_int namelen, user_addr_t where,
1718 size_t *sizep, struct proc *p)
1719 {
1720 int ret=0;
1721
1722 ret = suser(kauth_cred_get(), &p->p_acflag);
1723 if (ret)
1724 return(ret);
1725
1726 switch(name[0]) {
1727 case KERN_PCDISABLE:
1728 case KERN_PCGETBUF:
1729 case KERN_PCSETUP:
1730 case KERN_PCREMOVE:
1731 case KERN_PCREADBUF:
1732 case KERN_PCSETREG:
1733 case KERN_PCSETBUF:
1734 case KERN_PCCOMM:
1735 ret = pcsamples_control(name, namelen, where, sizep);
1736 break;
1737 default:
1738 ret= ENOTSUP;
1739 break;
1740 }
1741 return(ret);
1742 }
1743
1744 /*
1745 * Return the top *sizep bytes of the user stack, or the entire area of the
1746 * user stack down through the saved exec_path, whichever is smaller.
1747 */
1748 int
1749 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
1750 size_t *sizep, struct proc *cur_proc)
1751 {
1752 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0);
1753 }
1754
1755 static int
1756 sysctl_procargs2(int *name, u_int namelen, user_addr_t where,
1757 size_t *sizep, struct proc *cur_proc)
1758 {
1759 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1);
1760 }
1761
1762 static int
1763 sysctl_procargsx(int *name, __unused u_int namelen, user_addr_t where,
1764 size_t *sizep, struct proc *cur_proc, int argc_yes)
1765 {
1766 struct proc *p;
1767 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1768 int error = 0;
1769 struct vm_map *proc_map;
1770 struct task * task;
1771 vm_map_copy_t tmp;
1772 user_addr_t arg_addr;
1773 size_t arg_size;
1774 caddr_t data;
1775 int size;
1776 vm_offset_t copy_start, copy_end;
1777 kern_return_t ret;
1778 int pid;
1779
1780 if (argc_yes)
1781 buflen -= sizeof(int); /* reserve first word to return argc */
1782
1783 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1784 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1785 /* is not NULL then the caller wants us to return the length needed to */
1786 /* hold the data we would return */
1787 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1788 return(EINVAL);
1789 }
1790 arg_size = buflen;
1791
1792 /*
1793 * Lookup process by pid
1794 */
1795 pid = name[0];
1796 p = pfind(pid);
1797 if (p == NULL) {
1798 return(EINVAL);
1799 }
1800
1801 /*
1802 * Copy the top N bytes of the stack.
1803 * On all machines we have so far, the stack grows
1804 * downwards.
1805 *
1806 * If the user expects no more than N bytes of
1807 * argument list, use that as a guess for the
1808 * size.
1809 */
1810
1811 if (!p->user_stack)
1812 return(EINVAL);
1813
1814 if (where == USER_ADDR_NULL) {
1815 /* caller only wants to know length of proc args data */
1816 if (sizep == NULL)
1817 return(EFAULT);
1818
1819 size = p->p_argslen;
1820 if (argc_yes) {
1821 size += sizeof(int);
1822 }
1823 else {
1824 /*
1825 * old PROCARGS will return the executable's path and plus some
1826 * extra space for work alignment and data tags
1827 */
1828 size += PATH_MAX + (6 * sizeof(int));
1829 }
1830 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1831 *sizep = size;
1832 return (0);
1833 }
1834
1835 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
1836 && suser(kauth_cred_get(), &cur_proc->p_acflag))
1837 return (EINVAL);
1838
1839 if ((u_int)arg_size > p->p_argslen)
1840 arg_size = round_page(p->p_argslen);
1841
1842 arg_addr = p->user_stack - arg_size;
1843
1844
1845 /*
1846 * Before we can block (any VM code), make another
1847 * reference to the map to keep it alive. We do
1848 * that by getting a reference on the task itself.
1849 */
1850 task = p->task;
1851 if (task == NULL)
1852 return(EINVAL);
1853
1854 /*
1855 * Once we have a task reference we can convert that into a
1856 * map reference, which we will use in the calls below. The
1857 * task/process may change its map after we take this reference
1858 * (see execve), but the worst that will happen then is a return
1859 * of stale info (which is always a possibility).
1860 */
1861 task_reference(task);
1862 proc_map = get_task_map_reference(task);
1863 task_deallocate(task);
1864 if (proc_map == NULL)
1865 return(EINVAL);
1866
1867
1868 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1869 if (ret != KERN_SUCCESS) {
1870 vm_map_deallocate(proc_map);
1871 return(ENOMEM);
1872 }
1873
1874 copy_end = round_page(copy_start + arg_size);
1875
1876 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1877 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1878 vm_map_deallocate(proc_map);
1879 kmem_free(kernel_map, copy_start,
1880 round_page(arg_size));
1881 return (EIO);
1882 }
1883
1884 /*
1885 * Now that we've done the copyin from the process'
1886 * map, we can release the reference to it.
1887 */
1888 vm_map_deallocate(proc_map);
1889
1890 if( vm_map_copy_overwrite(kernel_map,
1891 (vm_map_address_t)copy_start,
1892 tmp, FALSE) != KERN_SUCCESS) {
1893 kmem_free(kernel_map, copy_start,
1894 round_page(arg_size));
1895 return (EIO);
1896 }
1897
1898 if (arg_size > p->p_argslen) {
1899 data = (caddr_t) (copy_end - p->p_argslen);
1900 size = p->p_argslen;
1901 } else {
1902 data = (caddr_t) (copy_end - arg_size);
1903 size = arg_size;
1904 }
1905
1906 if (argc_yes) {
1907 /* Put processes argc as the first word in the copyout buffer */
1908 suword(where, p->p_argc);
1909 error = copyout(data, (where + sizeof(int)), size);
1910 size += sizeof(int);
1911 } else {
1912 error = copyout(data, where, size);
1913
1914 /*
1915 * Make the old PROCARGS work to return the executable's path
1916 * But, only if there is enough space in the provided buffer
1917 *
1918 * on entry: data [possibily] points to the beginning of the path
1919 *
1920 * Note: we keep all pointers&sizes aligned to word boundries
1921 */
1922 if ( (! error) && (buflen > 0 && (u_int)buflen > p->p_argslen) )
1923 {
1924 int binPath_sz, alignedBinPath_sz = 0;
1925 int extraSpaceNeeded, addThis;
1926 user_addr_t placeHere;
1927 char * str = (char *) data;
1928 int max_len = size;
1929
1930 /* Some apps are really bad about messing up their stacks
1931 So, we have to be extra careful about getting the length
1932 of the executing binary. If we encounter an error, we bail.
1933 */
1934
1935 /* Limit ourselves to PATH_MAX paths */
1936 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1937
1938 binPath_sz = 0;
1939
1940 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1941 binPath_sz++;
1942
1943 /* If we have a NUL terminator, copy it, too */
1944 if (binPath_sz < max_len-1) binPath_sz += 1;
1945
1946 /* Pre-Flight the space requiremnts */
1947
1948 /* Account for the padding that fills out binPath to the next word */
1949 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1950
1951 placeHere = where + size;
1952
1953 /* Account for the bytes needed to keep placeHere word aligned */
1954 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1955
1956 /* Add up all the space that is needed */
1957 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1958
1959 /* is there is room to tack on argv[0]? */
1960 if ( (buflen & ~(sizeof(int)-1)) >= ( p->p_argslen + extraSpaceNeeded ))
1961 {
1962 placeHere += addThis;
1963 suword(placeHere, 0);
1964 placeHere += sizeof(int);
1965 suword(placeHere, 0xBFFF0000);
1966 placeHere += sizeof(int);
1967 suword(placeHere, 0);
1968 placeHere += sizeof(int);
1969 error = copyout(data, placeHere, binPath_sz);
1970 if ( ! error )
1971 {
1972 placeHere += binPath_sz;
1973 suword(placeHere, 0);
1974 size += extraSpaceNeeded;
1975 }
1976 }
1977 }
1978 }
1979
1980 if (copy_start != (vm_offset_t) 0) {
1981 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1982 }
1983 if (error) {
1984 return(error);
1985 }
1986
1987 if (where != USER_ADDR_NULL)
1988 *sizep = size;
1989 return (0);
1990 }
1991
1992
1993 /*
1994 * Validate parameters and get old / set new parameters
1995 * for max number of concurrent aio requests. Makes sure
1996 * the system wide limit is greater than the per process
1997 * limit.
1998 */
1999 static int
2000 sysctl_aiomax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen)
2001 {
2002 int error = 0;
2003 int new_value;
2004
2005 if ( oldp && *oldlenp < sizeof(int) )
2006 return (ENOMEM);
2007 if ( newp && newlen != sizeof(int) )
2008 return (EINVAL);
2009
2010 *oldlenp = sizeof(int);
2011 if ( oldp )
2012 error = copyout( &aio_max_requests, oldp, sizeof(int) );
2013 if ( error == 0 && newp )
2014 error = copyin( newp, &new_value, sizeof(int) );
2015 if ( error == 0 && newp ) {
2016 if ( new_value >= aio_max_requests_per_process )
2017 aio_max_requests = new_value;
2018 else
2019 error = EINVAL;
2020 }
2021 return( error );
2022
2023 } /* sysctl_aiomax */
2024
2025
2026 /*
2027 * Validate parameters and get old / set new parameters
2028 * for max number of concurrent aio requests per process.
2029 * Makes sure per process limit is less than the system wide
2030 * limit.
2031 */
2032 static int
2033 sysctl_aioprocmax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen )
2034 {
2035 int error = 0;
2036 int new_value = 0;
2037
2038 if ( oldp && *oldlenp < sizeof(int) )
2039 return (ENOMEM);
2040 if ( newp && newlen != sizeof(int) )
2041 return (EINVAL);
2042
2043 *oldlenp = sizeof(int);
2044 if ( oldp )
2045 error = copyout( &aio_max_requests_per_process, oldp, sizeof(int) );
2046 if ( error == 0 && newp )
2047 error = copyin( newp, &new_value, sizeof(int) );
2048 if ( error == 0 && newp ) {
2049 if ( new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX )
2050 aio_max_requests_per_process = new_value;
2051 else
2052 error = EINVAL;
2053 }
2054 return( error );
2055
2056 } /* sysctl_aioprocmax */
2057
2058
2059 /*
2060 * Validate parameters and get old / set new parameters
2061 * for max number of async IO worker threads.
2062 * We only allow an increase in the number of worker threads.
2063 */
2064 static int
2065 sysctl_aiothreads(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen)
2066 {
2067 int error = 0;
2068 int new_value;
2069
2070 if ( oldp && *oldlenp < sizeof(int) )
2071 return (ENOMEM);
2072 if ( newp && newlen != sizeof(int) )
2073 return (EINVAL);
2074
2075 *oldlenp = sizeof(int);
2076 if ( oldp )
2077 error = copyout( &aio_worker_threads, oldp, sizeof(int) );
2078 if ( error == 0 && newp )
2079 error = copyin( newp, &new_value, sizeof(int) );
2080 if ( error == 0 && newp ) {
2081 if (new_value > aio_worker_threads ) {
2082 _aio_create_worker_threads( (new_value - aio_worker_threads) );
2083 aio_worker_threads = new_value;
2084 }
2085 else
2086 error = EINVAL;
2087 }
2088 return( error );
2089
2090 } /* sysctl_aiothreads */
2091
2092
2093 /*
2094 * Validate parameters and get old / set new parameters
2095 * for max number of processes per UID.
2096 * Makes sure per UID limit is less than the system wide limit.
2097 */
2098 static int
2099 sysctl_maxprocperuid(user_addr_t oldp, size_t *oldlenp,
2100 user_addr_t newp, size_t newlen)
2101 {
2102 int error = 0;
2103 int new_value;
2104
2105 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2106 return (ENOMEM);
2107 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2108 return (EINVAL);
2109
2110 *oldlenp = sizeof(int);
2111 if ( oldp != USER_ADDR_NULL )
2112 error = copyout( &maxprocperuid, oldp, sizeof(int) );
2113 if ( error == 0 && newp != USER_ADDR_NULL ) {
2114 error = copyin( newp, &new_value, sizeof(int) );
2115 if ( error == 0 ) {
2116 AUDIT_ARG(value, new_value);
2117 if ( new_value <= maxproc && new_value > 0 )
2118 maxprocperuid = new_value;
2119 else
2120 error = EINVAL;
2121 }
2122 else
2123 error = EINVAL;
2124 }
2125 return( error );
2126
2127 } /* sysctl_maxprocperuid */
2128
2129
2130 /*
2131 * Validate parameters and get old / set new parameters
2132 * for max number of files per process.
2133 * Makes sure per process limit is less than the system-wide limit.
2134 */
2135 static int
2136 sysctl_maxfilesperproc(user_addr_t oldp, size_t *oldlenp,
2137 user_addr_t newp, size_t newlen)
2138 {
2139 int error = 0;
2140 int new_value;
2141
2142 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2143 return (ENOMEM);
2144 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2145 return (EINVAL);
2146
2147 *oldlenp = sizeof(int);
2148 if ( oldp != USER_ADDR_NULL )
2149 error = copyout( &maxfilesperproc, oldp, sizeof(int) );
2150 if ( error == 0 && newp != USER_ADDR_NULL ) {
2151 error = copyin( newp, &new_value, sizeof(int) );
2152 if ( error == 0 ) {
2153 AUDIT_ARG(value, new_value);
2154 if ( new_value < maxfiles && new_value > 0 )
2155 maxfilesperproc = new_value;
2156 else
2157 error = EINVAL;
2158 }
2159 else
2160 error = EINVAL;
2161 }
2162 return( error );
2163
2164 } /* sysctl_maxfilesperproc */
2165
2166
2167 /*
2168 * Validate parameters and get old / set new parameters
2169 * for the system-wide limit on the max number of processes.
2170 * Makes sure the system-wide limit is less than the configured hard
2171 * limit set at kernel compilation.
2172 */
2173 static int
2174 sysctl_maxproc(user_addr_t oldp, size_t *oldlenp,
2175 user_addr_t newp, size_t newlen )
2176 {
2177 int error = 0;
2178 int new_value;
2179
2180 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2181 return (ENOMEM);
2182 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2183 return (EINVAL);
2184
2185 *oldlenp = sizeof(int);
2186 if ( oldp != USER_ADDR_NULL )
2187 error = copyout( &maxproc, oldp, sizeof(int) );
2188 if ( error == 0 && newp != USER_ADDR_NULL ) {
2189 error = copyin( newp, &new_value, sizeof(int) );
2190 if ( error == 0 ) {
2191 AUDIT_ARG(value, new_value);
2192 if ( new_value <= hard_maxproc && new_value > 0 )
2193 maxproc = new_value;
2194 else
2195 error = EINVAL;
2196 }
2197 else
2198 error = EINVAL;
2199 }
2200 return( error );
2201
2202 } /* sysctl_maxproc */
2203
2204 #if __i386__
2205 static int
2206 sysctl_sysctl_exec_affinity SYSCTL_HANDLER_ARGS
2207 {
2208 struct proc *cur_proc = req->p;
2209 int error;
2210
2211 if (req->oldptr != USER_ADDR_NULL) {
2212 cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
2213 if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
2214 return error;
2215 }
2216
2217 if (req->newptr != USER_ADDR_NULL) {
2218 cpu_type_t newcputype;
2219 if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
2220 return error;
2221 if (newcputype == CPU_TYPE_I386)
2222 cur_proc->p_flag &= ~P_AFFINITY;
2223 else if (newcputype == CPU_TYPE_POWERPC)
2224 cur_proc->p_flag |= P_AFFINITY;
2225 else
2226 return (EINVAL);
2227 }
2228
2229 return 0;
2230 }
2231 SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
2232 #endif
2233
2234 static int
2235 fetch_process_cputype(
2236 struct proc *cur_proc,
2237 int *name,
2238 u_int namelen,
2239 cpu_type_t *cputype)
2240 {
2241 struct proc *p = NULL;
2242 cpu_type_t ret = 0;
2243
2244 if (namelen == 0)
2245 p = cur_proc;
2246 else if (namelen == 1) {
2247 p = pfind(name[0]);
2248 if (p == NULL)
2249 return (EINVAL);
2250 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
2251 && suser(kauth_cred_get(), &cur_proc->p_acflag))
2252 return (EPERM);
2253 } else {
2254 return EINVAL;
2255 }
2256
2257 #if __i386__
2258 if (p->p_flag & P_TRANSLATED) {
2259 ret = CPU_TYPE_POWERPC;
2260 }
2261 else
2262 #endif
2263 {
2264 ret = cpu_type();
2265 if (IS_64BIT_PROCESS(p))
2266 ret |= CPU_ARCH_ABI64;
2267 }
2268 *cputype = ret;
2269
2270 return 0;
2271 }
2272
2273 static int
2274 sysctl_sysctl_native SYSCTL_HANDLER_ARGS
2275 {
2276 int error;
2277 cpu_type_t proc_cputype = 0;
2278 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2279 return error;
2280 int res = 1;
2281 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2282 res = 0;
2283 return SYSCTL_OUT(req, &res, sizeof(res));
2284 }
2285 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2286
2287 static int
2288 sysctl_sysctl_cputype SYSCTL_HANDLER_ARGS
2289 {
2290 int error;
2291 cpu_type_t proc_cputype = 0;
2292 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2293 return error;
2294 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2295 }
2296 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2297