]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
292085e1d84f5dfba4bb0717f993b5274bd958d8
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66
67 /*
68 * sysctl system call.
69 */
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/proc_internal.h>
76 #include <sys/kauth.h>
77 #include <sys/file_internal.h>
78 #include <sys/vnode_internal.h>
79 #include <sys/unistd.h>
80 #include <sys/buf.h>
81 #include <sys/ioctl.h>
82 #include <sys/namei.h>
83 #include <sys/tty.h>
84 #include <sys/disklabel.h>
85 #include <sys/vm.h>
86 #include <sys/sysctl.h>
87 #include <sys/user.h>
88 #include <sys/aio_kern.h>
89
90 #include <bsm/audit_kernel.h>
91
92 #include <mach/machine.h>
93 #include <mach/mach_types.h>
94 #include <mach/vm_param.h>
95 #include <kern/task.h>
96 #include <kern/lock.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_map.h>
99 #include <mach/host_info.h>
100
101 extern vm_map_t bsd_pageable_map;
102
103 #include <sys/mount_internal.h>
104 #include <sys/kdebug.h>
105 #include <sys/sysproto.h>
106
107 #include <IOKit/IOPlatformExpert.h>
108 #include <pexpert/pexpert.h>
109
110 #include <machine/machine_routines.h>
111 #include <machine/exec.h>
112
113 #include <vm/vm_protos.h>
114
115 #ifdef __i386__
116 #include <i386/cpuid.h>
117 #endif
118
119 sysctlfn kern_sysctl;
120 #ifdef DEBUG
121 sysctlfn debug_sysctl;
122 #endif
123 extern sysctlfn vm_sysctl;
124 extern sysctlfn vfs_sysctl;
125 extern sysctlfn net_sysctl;
126 extern sysctlfn cpu_sysctl;
127 extern int aio_max_requests;
128 extern int aio_max_requests_per_process;
129 extern int aio_worker_threads;
130 extern int maxfilesperproc;
131 extern int lowpri_IO_window_msecs;
132 extern int lowpri_IO_delay_msecs;
133 extern int nx_enabled;
134
135 static void
136 fill_eproc(struct proc *p, struct eproc *ep);
137 static void
138 fill_externproc(struct proc *p, struct extern_proc *exp);
139 static void
140 fill_user_eproc(struct proc *p, struct user_eproc *ep);
141 static void
142 fill_user_proc(struct proc *p, struct user_kinfo_proc *kp);
143 static void
144 fill_user_externproc(struct proc *p, struct user_extern_proc *exp);
145 extern int
146 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
147 int
148 kdebug_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, struct proc *p);
149 #if NFSCLIENT
150 extern int
151 netboot_root(void);
152 #endif
153 int
154 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
155 struct proc *p);
156 __private_extern__ kern_return_t
157 reset_vmobjectcache(unsigned int val1, unsigned int val2);
158 extern int
159 resize_namecache(u_int newsize);
160 static int
161 sysctl_aiomax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
162 static int
163 sysctl_aioprocmax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
164 static int
165 sysctl_aiothreads(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
166 extern int
167 sysctl_clockrate(user_addr_t where, size_t *sizep);
168 int
169 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep);
170 int
171 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
172 user_addr_t newp, size_t newlen);
173 int
174 sysctl_file(user_addr_t where, size_t *sizep);
175 static void
176 fill_proc(struct proc *p, struct kinfo_proc *kp);
177 static int
178 sysctl_maxfilesperproc(user_addr_t oldp, size_t *oldlenp,
179 user_addr_t newp, size_t newlen);
180 static int
181 sysctl_maxprocperuid(user_addr_t oldp, size_t *oldlenp,
182 user_addr_t newp, size_t newlen);
183 static int
184 sysctl_maxproc(user_addr_t oldp, size_t *oldlenp,
185 user_addr_t newp, size_t newlen);
186 int
187 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
188 size_t *sizep, struct proc *cur_proc);
189 static int
190 sysctl_procargs2(int *name, u_int namelen, user_addr_t where, size_t *sizep,
191 struct proc *cur_proc);
192 static int
193 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
194 struct proc *cur_proc, int argc_yes);
195 int
196 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
197 size_t newlen, void *sp, int len);
198 extern int
199 sysctl_vnode(user_addr_t where, size_t *sizep);
200
201
202 /*
203 * temporary location for vm_sysctl. This should be machine independant
204 */
205
206 extern uint32_t mach_factor[3];
207
208 static void
209 loadavg32to64(struct loadavg *la32, struct user_loadavg *la64)
210 {
211 la64->ldavg[0] = la32->ldavg[0];
212 la64->ldavg[1] = la32->ldavg[1];
213 la64->ldavg[2] = la32->ldavg[2];
214 la64->fscale = (user_long_t)la32->fscale;
215 }
216
217 int
218 vm_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp,
219 user_addr_t newp, size_t newlen, __unused struct proc *p)
220 {
221 struct loadavg loadinfo;
222
223 switch (name[0]) {
224 case VM_LOADAVG:
225 if (proc_is64bit(p)) {
226 struct user_loadavg loadinfo64;
227 loadavg32to64(&averunnable, &loadinfo64);
228 return (sysctl_struct(oldp, oldlenp, newp, newlen,
229 &loadinfo64, sizeof(loadinfo64)));
230 } else {
231 return (sysctl_struct(oldp, oldlenp, newp, newlen,
232 &averunnable, sizeof(struct loadavg)));
233 }
234 case VM_MACHFACTOR:
235 loadinfo.ldavg[0] = mach_factor[0];
236 loadinfo.ldavg[1] = mach_factor[1];
237 loadinfo.ldavg[2] = mach_factor[2];
238 loadinfo.fscale = LSCALE;
239 if (proc_is64bit(p)) {
240 struct user_loadavg loadinfo64;
241 loadavg32to64(&loadinfo, &loadinfo64);
242 return (sysctl_struct(oldp, oldlenp, newp, newlen,
243 &loadinfo64, sizeof(loadinfo64)));
244 } else {
245 return (sysctl_struct(oldp, oldlenp, newp, newlen,
246 &loadinfo, sizeof(struct loadavg)));
247 }
248 case VM_SWAPUSAGE: {
249 int error;
250 uint64_t swap_total;
251 uint64_t swap_avail;
252 uint32_t swap_pagesize;
253 boolean_t swap_encrypted;
254 struct xsw_usage xsu;
255
256 error = macx_swapinfo(&swap_total,
257 &swap_avail,
258 &swap_pagesize,
259 &swap_encrypted);
260 if (error)
261 return error;
262
263 xsu.xsu_total = swap_total;
264 xsu.xsu_avail = swap_avail;
265 xsu.xsu_used = swap_total - swap_avail;
266 xsu.xsu_pagesize = swap_pagesize;
267 xsu.xsu_encrypted = swap_encrypted;
268 return sysctl_struct(oldp, oldlenp, newp, newlen,
269 &xsu, sizeof (struct xsw_usage));
270 }
271 case VM_METER:
272 return (ENOTSUP);
273 case VM_MAXID:
274 return (ENOTSUP);
275 default:
276 return (ENOTSUP);
277 }
278 /* NOTREACHED */
279 return (ENOTSUP);
280 }
281
282 /*
283 * Locking and stats
284 */
285 static struct sysctl_lock {
286 int sl_lock;
287 int sl_want;
288 int sl_locked;
289 } memlock;
290
291 int
292 __sysctl(struct proc *p, struct __sysctl_args *uap, __unused register_t *retval)
293 {
294 int error, dolock = 1;
295 size_t savelen = 0, oldlen = 0, newlen;
296 sysctlfn *fnp = NULL;
297 int name[CTL_MAXNAME];
298 int i;
299 int error1;
300
301 /*
302 * all top-level sysctl names are non-terminal
303 */
304 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
305 return (EINVAL);
306 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
307 if (error)
308 return (error);
309
310 AUDIT_ARG(ctlname, name, uap->namelen);
311
312 if (proc_is64bit(p)) {
313 /* uap->newlen is a size_t value which grows to 64 bits
314 * when coming from a 64-bit process. since it's doubtful we'll
315 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
316 */
317 newlen = CAST_DOWN(size_t, uap->newlen);
318 }
319 else {
320 newlen = uap->newlen;
321 }
322
323 /* CTL_UNSPEC is used to get oid to AUTO_OID */
324 if (uap->new != USER_ADDR_NULL
325 && ((name[0] == CTL_KERN
326 && !(name[1] == KERN_IPC || name[1] == KERN_PANICINFO || name[1] == KERN_PROCDELAYTERM ||
327 name[1] == KERN_PROC_LOW_PRI_IO || name[1] == KERN_PROCNAME || name[1] == KERN_THALTSTACK))
328 || (name[0] == CTL_HW)
329 || (name[0] == CTL_VM)
330 || (name[0] == CTL_VFS))
331 && (error = suser(kauth_cred_get(), &p->p_acflag)))
332 return (error);
333
334 switch (name[0]) {
335 case CTL_KERN:
336 fnp = kern_sysctl;
337 if ((name[1] != KERN_VNODE) && (name[1] != KERN_FILE)
338 && (name[1] != KERN_PROC))
339 dolock = 0;
340 break;
341 case CTL_VM:
342 fnp = vm_sysctl;
343 break;
344
345 case CTL_VFS:
346 fnp = vfs_sysctl;
347 break;
348 #ifdef DEBUG
349 case CTL_DEBUG:
350 fnp = debug_sysctl;
351 break;
352 #endif
353 default:
354 fnp = NULL;
355 }
356
357 if (uap->oldlenp != USER_ADDR_NULL) {
358 uint64_t oldlen64 = fuulong(uap->oldlenp);
359
360 oldlen = CAST_DOWN(size_t, oldlen64);
361 /*
362 * If more than 4G, clamp to 4G - useracc() below will catch
363 * with an EFAULT, if it's actually necessary.
364 */
365 if (oldlen64 > 0x00000000ffffffffULL)
366 oldlen = 0xffffffffUL;
367 }
368
369 if (uap->old != USER_ADDR_NULL) {
370 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE))
371 return (EFAULT);
372
373 /* The pc sampling mechanism does not need to take this lock */
374 if ((name[1] != KERN_PCSAMPLES) &&
375 (!((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)))) {
376 while (memlock.sl_lock) {
377 memlock.sl_want = 1;
378 sleep((caddr_t)&memlock, PRIBIO+1);
379 memlock.sl_locked++;
380 }
381 memlock.sl_lock = 1;
382 }
383
384 if (dolock && oldlen &&
385 (error = vslock(uap->old, (user_size_t)oldlen))) {
386 if ((name[1] != KERN_PCSAMPLES) &&
387 (! ((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)))) {
388 memlock.sl_lock = 0;
389 if (memlock.sl_want) {
390 memlock.sl_want = 0;
391 wakeup((caddr_t)&memlock);
392 }
393 }
394 return(error);
395 }
396 savelen = oldlen;
397 }
398
399 if (fnp) {
400 error = (*fnp)(name + 1, uap->namelen - 1, uap->old,
401 &oldlen, uap->new, newlen, p);
402 }
403 else
404 error = ENOTSUP;
405
406 if ( (name[0] != CTL_VFS) && (error == ENOTSUP)) {
407 size_t tmp = oldlen;
408 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
409 1, uap->new, newlen, &oldlen);
410 }
411
412 if (uap->old != USER_ADDR_NULL) {
413 if (dolock && savelen) {
414 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
415 if (!error && error1)
416 error = error1;
417 }
418 if (name[1] != KERN_PCSAMPLES) {
419 memlock.sl_lock = 0;
420 if (memlock.sl_want) {
421 memlock.sl_want = 0;
422 wakeup((caddr_t)&memlock);
423 }
424 }
425 }
426 if ((error) && (error != ENOMEM))
427 return (error);
428
429 if (uap->oldlenp != USER_ADDR_NULL) {
430 i = suulong(uap->oldlenp, oldlen);
431 if (i)
432 return i;
433 }
434
435 return (error);
436 }
437
438 /*
439 * Attributes stored in the kernel.
440 */
441 __private_extern__ char corefilename[MAXPATHLEN+1];
442 __private_extern__ int do_coredump;
443 __private_extern__ int sugid_coredump;
444
445
446 #ifdef INSECURE
447 int securelevel = -1;
448 #else
449 int securelevel;
450 #endif
451
452 static int
453 sysctl_affinity(
454 int *name,
455 u_int namelen,
456 user_addr_t oldBuf,
457 size_t *oldSize,
458 user_addr_t newBuf,
459 __unused size_t newSize,
460 struct proc *cur_proc)
461 {
462 if (namelen < 1)
463 return (ENOTSUP);
464
465 if (name[0] == 0 && 1 == namelen) {
466 return sysctl_rdint(oldBuf, oldSize, newBuf,
467 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
468 } else if (name[0] == 1 && 2 == namelen) {
469 if (name[1] == 0) {
470 cur_proc->p_flag &= ~P_AFFINITY;
471 } else {
472 cur_proc->p_flag |= P_AFFINITY;
473 }
474 return 0;
475 }
476 return (ENOTSUP);
477 }
478
479
480 static int
481 sysctl_translate(
482 int *name,
483 u_int namelen,
484 user_addr_t oldBuf,
485 size_t *oldSize,
486 user_addr_t newBuf,
487 __unused size_t newSize,
488 struct proc *cur_proc)
489 {
490 struct proc *p;
491
492 if (namelen != 1)
493 return (ENOTSUP);
494
495 p = pfind(name[0]);
496 if (p == NULL)
497 return (EINVAL);
498
499 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
500 && suser(kauth_cred_get(), &cur_proc->p_acflag))
501 return (EPERM);
502
503 return sysctl_rdint(oldBuf, oldSize, newBuf,
504 (p->p_flag & P_TRANSLATED) ? 1 : 0);
505 }
506
507 int
508 set_archhandler(struct proc *p, int arch)
509 {
510 int error;
511 struct nameidata nd;
512 struct vnode_attr va;
513 struct vfs_context context;
514 char *archhandler;
515
516 switch(arch) {
517 case CPU_TYPE_POWERPC:
518 archhandler = exec_archhandler_ppc.path;
519 break;
520 default:
521 return (EBADARCH);
522 }
523
524 context.vc_proc = p;
525 context.vc_ucred = kauth_cred_get();
526
527 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32,
528 CAST_USER_ADDR_T(archhandler), &context);
529 error = namei(&nd);
530 if (error)
531 return (error);
532 nameidone(&nd);
533
534 /* Check mount point */
535 if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) ||
536 (nd.ni_vp->v_type != VREG)) {
537 vnode_put(nd.ni_vp);
538 return (EACCES);
539 }
540
541 VATTR_INIT(&va);
542 VATTR_WANTED(&va, va_fsid);
543 VATTR_WANTED(&va, va_fileid);
544 error = vnode_getattr(nd.ni_vp, &va, &context);
545 if (error) {
546 vnode_put(nd.ni_vp);
547 return (error);
548 }
549 vnode_put(nd.ni_vp);
550
551 exec_archhandler_ppc.fsid = va.va_fsid;
552 exec_archhandler_ppc.fileid = (u_long)va.va_fileid;
553 return 0;
554 }
555
556 static int
557 sysctl_exec_archhandler_ppc(
558 __unused int *name,
559 __unused u_int namelen,
560 user_addr_t oldBuf,
561 size_t *oldSize,
562 user_addr_t newBuf,
563 size_t newSize,
564 struct proc *p)
565 {
566 int error;
567 size_t len;
568 struct nameidata nd;
569 struct vnode_attr va;
570 char handler[sizeof(exec_archhandler_ppc.path)];
571 struct vfs_context context;
572
573 context.vc_proc = p;
574 context.vc_ucred = kauth_cred_get();
575
576 if (oldSize) {
577 len = strlen(exec_archhandler_ppc.path) + 1;
578 if (oldBuf) {
579 if (*oldSize < len)
580 return (ENOMEM);
581 error = copyout(exec_archhandler_ppc.path, oldBuf, len);
582 if (error)
583 return (error);
584 }
585 *oldSize = len - 1;
586 }
587 if (newBuf) {
588 error = suser(context.vc_ucred, &p->p_acflag);
589 if (error)
590 return (error);
591 if (newSize >= sizeof(exec_archhandler_ppc.path))
592 return (ENAMETOOLONG);
593 error = copyin(newBuf, handler, newSize);
594 if (error)
595 return (error);
596 handler[newSize] = 0;
597 strcpy(exec_archhandler_ppc.path, handler);
598 error = set_archhandler(p, CPU_TYPE_POWERPC);
599 if (error)
600 return (error);
601 }
602 return 0;
603 }
604
605 SYSCTL_NODE(_kern, KERN_EXEC, exec, CTLFLAG_RD, 0, "");
606
607 SYSCTL_NODE(_kern_exec, OID_AUTO, archhandler, CTLFLAG_RD, 0, "");
608
609 SYSCTL_STRING(_kern_exec_archhandler, OID_AUTO, powerpc, CTLFLAG_RD,
610 exec_archhandler_ppc.path, 0, "");
611
612 extern int get_kernel_symfile( struct proc *, char **);
613 __private_extern__ int
614 sysctl_dopanicinfo(int *, u_int, user_addr_t, size_t *, user_addr_t,
615 size_t, struct proc *);
616
617 /*
618 * kernel related system variables.
619 */
620 int
621 kern_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
622 user_addr_t newp, size_t newlen, struct proc *p)
623 {
624 int error, level, inthostid, tmp;
625 unsigned int oldval=0;
626 char *str;
627 /* all sysctl names not listed below are terminal at this level */
628 if (namelen != 1
629 && !(name[0] == KERN_PROC
630 || name[0] == KERN_PROF
631 || name[0] == KERN_KDEBUG
632 || name[0] == KERN_PROCARGS
633 || name[0] == KERN_PROCARGS2
634 || name[0] == KERN_PCSAMPLES
635 || name[0] == KERN_IPC
636 || name[0] == KERN_SYSV
637 || name[0] == KERN_AFFINITY
638 || name[0] == KERN_TRANSLATE
639 || name[0] == KERN_EXEC
640 || name[0] == KERN_PANICINFO
641 || name[0] == KERN_POSIX
642 || name[0] == KERN_TFP)
643 )
644 return (ENOTDIR); /* overloaded */
645
646 switch (name[0]) {
647 case KERN_OSTYPE:
648 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
649 case KERN_OSRELEASE:
650 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
651 case KERN_OSREV:
652 return (sysctl_rdint(oldp, oldlenp, newp, BSD));
653 case KERN_VERSION:
654 return (sysctl_rdstring(oldp, oldlenp, newp, version));
655 case KERN_MAXVNODES:
656 oldval = desiredvnodes;
657 error = sysctl_int(oldp, oldlenp, newp,
658 newlen, &desiredvnodes);
659 reset_vmobjectcache(oldval, desiredvnodes);
660 resize_namecache(desiredvnodes);
661 return(error);
662 case KERN_MAXPROC:
663 return (sysctl_maxproc(oldp, oldlenp, newp, newlen));
664 case KERN_MAXFILES:
665 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
666 case KERN_MAXPROCPERUID:
667 return( sysctl_maxprocperuid( oldp, oldlenp, newp, newlen ) );
668 case KERN_MAXFILESPERPROC:
669 return( sysctl_maxfilesperproc( oldp, oldlenp, newp, newlen ) );
670 case KERN_ARGMAX:
671 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
672 case KERN_SECURELVL:
673 level = securelevel;
674 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
675 newp == USER_ADDR_NULL)
676 return (error);
677 if (level < securelevel && p->p_pid != 1)
678 return (EPERM);
679 securelevel = level;
680 return (0);
681 case KERN_HOSTNAME:
682 error = sysctl_trstring(oldp, oldlenp, newp, newlen,
683 hostname, sizeof(hostname));
684 if (newp && !error)
685 hostnamelen = newlen;
686 return (error);
687 case KERN_DOMAINNAME:
688 error = sysctl_string(oldp, oldlenp, newp, newlen,
689 domainname, sizeof(domainname));
690 if (newp && !error)
691 domainnamelen = newlen;
692 return (error);
693 case KERN_HOSTID:
694 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
695 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
696 hostid = inthostid;
697 return (error);
698 case KERN_CLOCKRATE:
699 return (sysctl_clockrate(oldp, oldlenp));
700 case KERN_BOOTTIME:
701 {
702 struct timeval t;
703
704 t.tv_sec = boottime_sec();
705 t.tv_usec = 0;
706
707 return (sysctl_rdstruct(oldp, oldlenp, newp, &t,
708 sizeof(struct timeval)));
709 }
710 case KERN_VNODE:
711 return (sysctl_vnode(oldp, oldlenp));
712 case KERN_PROC:
713 return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp));
714 case KERN_FILE:
715 return (sysctl_file(oldp, oldlenp));
716 #ifdef GPROF
717 case KERN_PROF:
718 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
719 newp, newlen));
720 #endif
721 case KERN_POSIX1:
722 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
723 case KERN_NGROUPS:
724 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
725 case KERN_JOB_CONTROL:
726 return (sysctl_rdint(oldp, oldlenp, newp, 1));
727 case KERN_SAVED_IDS:
728 #ifdef _POSIX_SAVED_IDS
729 return (sysctl_rdint(oldp, oldlenp, newp, 1));
730 #else
731 return (sysctl_rdint(oldp, oldlenp, newp, 0));
732 #endif
733 case KERN_KDEBUG:
734 return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p));
735 case KERN_PCSAMPLES:
736 return (pcsamples_ops(name + 1, namelen - 1, oldp, oldlenp, p));
737 case KERN_PROCARGS:
738 /* new one as it does not use kinfo_proc */
739 return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p));
740 case KERN_PROCARGS2:
741 /* new one as it does not use kinfo_proc */
742 return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p));
743 case KERN_SYMFILE:
744 error = get_kernel_symfile( p, &str );
745 if ( error )
746 return error;
747 return (sysctl_rdstring(oldp, oldlenp, newp, str));
748 #if NFSCLIENT
749 case KERN_NETBOOT:
750 return (sysctl_rdint(oldp, oldlenp, newp, netboot_root()));
751 #endif
752 case KERN_PANICINFO:
753 return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp,
754 newp, newlen, p));
755 case KERN_AFFINITY:
756 return sysctl_affinity(name+1, namelen-1, oldp, oldlenp,
757 newp, newlen, p);
758 case KERN_TRANSLATE:
759 return sysctl_translate(name+1, namelen-1, oldp, oldlenp, newp,
760 newlen, p);
761 case KERN_CLASSICHANDLER:
762 return sysctl_exec_archhandler_ppc(name+1, namelen-1, oldp,
763 oldlenp, newp, newlen, p);
764 case KERN_AIOMAX:
765 return( sysctl_aiomax( oldp, oldlenp, newp, newlen ) );
766 case KERN_AIOPROCMAX:
767 return( sysctl_aioprocmax( oldp, oldlenp, newp, newlen ) );
768 case KERN_AIOTHREADS:
769 return( sysctl_aiothreads( oldp, oldlenp, newp, newlen ) );
770 case KERN_USRSTACK:
771 return (sysctl_rdint(oldp, oldlenp, newp, (uintptr_t)p->user_stack));
772 case KERN_USRSTACK64:
773 return (sysctl_rdquad(oldp, oldlenp, newp, p->user_stack));
774 case KERN_COREFILE:
775 error = sysctl_string(oldp, oldlenp, newp, newlen,
776 corefilename, sizeof(corefilename));
777 return (error);
778 case KERN_COREDUMP:
779 tmp = do_coredump;
780 error = sysctl_int(oldp, oldlenp, newp, newlen, &do_coredump);
781 if (!error && ((do_coredump < 0) || (do_coredump > 1))) {
782 do_coredump = tmp;
783 error = EINVAL;
784 }
785 return (error);
786 case KERN_SUGID_COREDUMP:
787 tmp = sugid_coredump;
788 error = sysctl_int(oldp, oldlenp, newp, newlen, &sugid_coredump);
789 if (!error && ((sugid_coredump < 0) || (sugid_coredump > 1))) {
790 sugid_coredump = tmp;
791 error = EINVAL;
792 }
793 return (error);
794 case KERN_PROCDELAYTERM:
795 {
796 int old_value, new_value;
797
798 error = 0;
799 if (oldp && *oldlenp < sizeof(int))
800 return (ENOMEM);
801 if ( newp && newlen != sizeof(int) )
802 return(EINVAL);
803 *oldlenp = sizeof(int);
804 old_value = (p->p_lflag & P_LDELAYTERM)? 1: 0;
805 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
806 return(error);
807 if (error == 0 && newp )
808 error = copyin( newp, &new_value, sizeof(int) );
809 if (error == 0 && newp) {
810 if (new_value)
811 p->p_lflag |= P_LDELAYTERM;
812 else
813 p->p_lflag &= ~P_LDELAYTERM;
814 }
815 return(error);
816 }
817 case KERN_PROC_LOW_PRI_IO:
818 {
819 int old_value, new_value;
820
821 error = 0;
822 if (oldp && *oldlenp < sizeof(int))
823 return (ENOMEM);
824 if ( newp && newlen != sizeof(int) )
825 return(EINVAL);
826 *oldlenp = sizeof(int);
827
828 old_value = (p->p_lflag & P_LLOW_PRI_IO)? 0x01: 0;
829 if (p->p_lflag & P_LBACKGROUND_IO)
830 old_value |= 0x02;
831
832 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
833 return(error);
834 if (error == 0 && newp )
835 error = copyin( newp, &new_value, sizeof(int) );
836 if (error == 0 && newp) {
837 if (new_value & 0x01)
838 p->p_lflag |= P_LLOW_PRI_IO;
839 else if (new_value & 0x02)
840 p->p_lflag |= P_LBACKGROUND_IO;
841 else if (new_value == 0)
842 p->p_lflag &= ~(P_LLOW_PRI_IO | P_LBACKGROUND_IO);
843 }
844 return(error);
845 }
846 case KERN_LOW_PRI_WINDOW:
847 {
848 int old_value, new_value;
849
850 error = 0;
851 if (oldp && *oldlenp < sizeof(old_value) )
852 return (ENOMEM);
853 if ( newp && newlen != sizeof(new_value) )
854 return(EINVAL);
855 *oldlenp = sizeof(old_value);
856
857 old_value = lowpri_IO_window_msecs;
858
859 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
860 return(error);
861 if (error == 0 && newp )
862 error = copyin( newp, &new_value, sizeof(newlen) );
863 if (error == 0 && newp) {
864 lowpri_IO_window_msecs = new_value;
865 }
866 return(error);
867 }
868 case KERN_LOW_PRI_DELAY:
869 {
870 int old_value, new_value;
871
872 error = 0;
873 if (oldp && *oldlenp < sizeof(old_value) )
874 return (ENOMEM);
875 if ( newp && newlen != sizeof(new_value) )
876 return(EINVAL);
877 *oldlenp = sizeof(old_value);
878
879 old_value = lowpri_IO_delay_msecs;
880
881 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
882 return(error);
883 if (error == 0 && newp )
884 error = copyin( newp, &new_value, sizeof(newlen) );
885 if (error == 0 && newp) {
886 lowpri_IO_delay_msecs = new_value;
887 }
888 return(error);
889 }
890 case KERN_NX_PROTECTION:
891 {
892 int old_value, new_value;
893
894 error = 0;
895 if (oldp && *oldlenp < sizeof(old_value) )
896 return (ENOMEM);
897 if ( newp && newlen != sizeof(new_value) )
898 return(EINVAL);
899 *oldlenp = sizeof(old_value);
900
901 old_value = nx_enabled;
902
903 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
904 return(error);
905 #ifdef __i386__
906 /*
907 * Only allow setting if NX is supported on the chip
908 */
909 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD) {
910 #endif
911 if (error == 0 && newp)
912 error = copyin(newp, &new_value,
913 sizeof(newlen));
914 if (error == 0 && newp)
915 nx_enabled = new_value;
916 #ifdef __i386__
917 } else if (newp) {
918 error = ENOTSUP;
919 }
920 #endif
921 return(error);
922 }
923 case KERN_SHREG_PRIVATIZABLE:
924 /* this kernel does implement shared_region_make_private_np() */
925 return (sysctl_rdint(oldp, oldlenp, newp, 1));
926 case KERN_PROCNAME:
927 error = sysctl_trstring(oldp, oldlenp, newp, newlen,
928 &p->p_name[0], (2*MAXCOMLEN+1));
929 return (error);
930 case KERN_THALTSTACK:
931 {
932 int old_value, new_value;
933
934 error = 0;
935 if (oldp && *oldlenp < sizeof(int))
936 return (ENOMEM);
937 if ( newp && newlen != sizeof(int) )
938 return(EINVAL);
939 *oldlenp = sizeof(int);
940 old_value = (p->p_lflag & P_LTHSIGSTACK)? 1: 0;
941 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
942 return(error);
943 if (error == 0 && newp )
944 error = copyin( newp, &new_value, sizeof(int) );
945 if (error == 0 && newp) {
946 if (new_value) {
947 /* we cannot swich midstream if inuse */
948 if ((p->p_sigacts->ps_flags & SAS_ALTSTACK) == SAS_ALTSTACK)
949 return(EPERM);
950 p->p_lflag |= P_LTHSIGSTACK;
951 } else {
952 /* we cannot swich midstream */
953 if ((p->p_lflag & P_LTHSIGSTACK) == P_LTHSIGSTACK)
954 return(EPERM);
955 p->p_lflag &= ~P_LTHSIGSTACK;
956 }
957 }
958 return(error);
959 }
960 default:
961 return (ENOTSUP);
962 }
963 /* NOTREACHED */
964 }
965
966 #ifdef DEBUG
967 /*
968 * Debugging related system variables.
969 */
970 #if DIAGNOSTIC
971 extern
972 #endif /* DIAGNOSTIC */
973 struct ctldebug debug0, debug1;
974 struct ctldebug debug2, debug3, debug4;
975 struct ctldebug debug5, debug6, debug7, debug8, debug9;
976 struct ctldebug debug10, debug11, debug12, debug13, debug14;
977 struct ctldebug debug15, debug16, debug17, debug18, debug19;
978 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
979 &debug0, &debug1, &debug2, &debug3, &debug4,
980 &debug5, &debug6, &debug7, &debug8, &debug9,
981 &debug10, &debug11, &debug12, &debug13, &debug14,
982 &debug15, &debug16, &debug17, &debug18, &debug19,
983 };
984 int
985 debug_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
986 user_addr_t newp, size_t newlen, struct proc *p)
987 {
988 struct ctldebug *cdp;
989
990 /* all sysctl names at this level are name and field */
991 if (namelen != 2)
992 return (ENOTDIR); /* overloaded */
993 cdp = debugvars[name[0]];
994 if (cdp->debugname == 0)
995 return (ENOTSUP);
996 switch (name[1]) {
997 case CTL_DEBUG_NAME:
998 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
999 case CTL_DEBUG_VALUE:
1000 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
1001 default:
1002 return (ENOTSUP);
1003 }
1004 /* NOTREACHED */
1005 }
1006 #endif /* DEBUG */
1007
1008 /*
1009 * Validate parameters and get old / set new parameters
1010 * for an integer-valued sysctl function.
1011 */
1012 int
1013 sysctl_int(user_addr_t oldp, size_t *oldlenp,
1014 user_addr_t newp, size_t newlen, int *valp)
1015 {
1016 int error = 0;
1017
1018 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1019 return (EFAULT);
1020 if (oldp && *oldlenp < sizeof(int))
1021 return (ENOMEM);
1022 if (newp && newlen != sizeof(int))
1023 return (EINVAL);
1024 *oldlenp = sizeof(int);
1025 if (oldp)
1026 error = copyout(valp, oldp, sizeof(int));
1027 if (error == 0 && newp) {
1028 error = copyin(newp, valp, sizeof(int));
1029 AUDIT_ARG(value, *valp);
1030 }
1031 return (error);
1032 }
1033
1034 /*
1035 * As above, but read-only.
1036 */
1037 int
1038 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
1039 {
1040 int error = 0;
1041
1042 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1043 return (EFAULT);
1044 if (oldp && *oldlenp < sizeof(int))
1045 return (ENOMEM);
1046 if (newp)
1047 return (EPERM);
1048 *oldlenp = sizeof(int);
1049 if (oldp)
1050 error = copyout((caddr_t)&val, oldp, sizeof(int));
1051 return (error);
1052 }
1053
1054 /*
1055 * Validate parameters and get old / set new parameters
1056 * for an quad(64bit)-valued sysctl function.
1057 */
1058 int
1059 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
1060 user_addr_t newp, size_t newlen, quad_t *valp)
1061 {
1062 int error = 0;
1063
1064 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1065 return (EFAULT);
1066 if (oldp && *oldlenp < sizeof(quad_t))
1067 return (ENOMEM);
1068 if (newp && newlen != sizeof(quad_t))
1069 return (EINVAL);
1070 *oldlenp = sizeof(quad_t);
1071 if (oldp)
1072 error = copyout(valp, oldp, sizeof(quad_t));
1073 if (error == 0 && newp)
1074 error = copyin(newp, valp, sizeof(quad_t));
1075 return (error);
1076 }
1077
1078 /*
1079 * As above, but read-only.
1080 */
1081 int
1082 sysctl_rdquad(oldp, oldlenp, newp, val)
1083 void *oldp;
1084 size_t *oldlenp;
1085 void *newp;
1086 quad_t val;
1087 {
1088 int error = 0;
1089
1090 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1091 return (EFAULT);
1092 if (oldp && *oldlenp < sizeof(quad_t))
1093 return (ENOMEM);
1094 if (newp)
1095 return (EPERM);
1096 *oldlenp = sizeof(quad_t);
1097 if (oldp)
1098 error = copyout((caddr_t)&val, CAST_USER_ADDR_T(oldp), sizeof(quad_t));
1099 return (error);
1100 }
1101
1102 /*
1103 * Validate parameters and get old / set new parameters
1104 * for a string-valued sysctl function. Unlike sysctl_string, if you
1105 * give it a too small (but larger than 0 bytes) buffer, instead of
1106 * returning ENOMEM, it truncates the returned string to the buffer
1107 * size. This preserves the semantics of some library routines
1108 * implemented via sysctl, which truncate their returned data, rather
1109 * than simply returning an error. The returned string is always NUL
1110 * terminated.
1111 */
1112 int
1113 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
1114 user_addr_t newp, size_t newlen, char *str, int maxlen)
1115 {
1116 int len, copylen, error = 0;
1117
1118 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1119 return (EFAULT);
1120 copylen = len = strlen(str) + 1;
1121 if (oldp && (len < 0 || *oldlenp < 1))
1122 return (ENOMEM);
1123 if (oldp && (*oldlenp < (size_t)len))
1124 copylen = *oldlenp + 1;
1125 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1126 return (EINVAL);
1127 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
1128 if (oldp) {
1129 error = copyout(str, oldp, copylen);
1130 if (!error) {
1131 unsigned char c = 0;
1132 /* NUL terminate */
1133 oldp += *oldlenp;
1134 error = copyout((void *)&c, oldp, sizeof(char));
1135 }
1136 }
1137 if (error == 0 && newp) {
1138 error = copyin(newp, str, newlen);
1139 str[newlen] = 0;
1140 AUDIT_ARG(text, (char *)str);
1141 }
1142 return (error);
1143 }
1144
1145 /*
1146 * Validate parameters and get old / set new parameters
1147 * for a string-valued sysctl function.
1148 */
1149 int
1150 sysctl_string(user_addr_t oldp, size_t *oldlenp,
1151 user_addr_t newp, size_t newlen, char *str, int maxlen)
1152 {
1153 int len, error = 0;
1154
1155 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1156 return (EFAULT);
1157 len = strlen(str) + 1;
1158 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1159 return (ENOMEM);
1160 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1161 return (EINVAL);
1162 *oldlenp = len -1; /* deal with NULL strings correctly */
1163 if (oldp) {
1164 error = copyout(str, oldp, len);
1165 }
1166 if (error == 0 && newp) {
1167 error = copyin(newp, str, newlen);
1168 str[newlen] = 0;
1169 AUDIT_ARG(text, (char *)str);
1170 }
1171 return (error);
1172 }
1173
1174 /*
1175 * As above, but read-only.
1176 */
1177 int
1178 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
1179 user_addr_t newp, char *str)
1180 {
1181 int len, error = 0;
1182
1183 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1184 return (EFAULT);
1185 len = strlen(str) + 1;
1186 if (oldp && *oldlenp < (size_t)len)
1187 return (ENOMEM);
1188 if (newp)
1189 return (EPERM);
1190 *oldlenp = len;
1191 if (oldp)
1192 error = copyout(str, oldp, len);
1193 return (error);
1194 }
1195
1196 /*
1197 * Validate parameters and get old / set new parameters
1198 * for a structure oriented sysctl function.
1199 */
1200 int
1201 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
1202 user_addr_t newp, size_t newlen, void *sp, int len)
1203 {
1204 int error = 0;
1205
1206 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1207 return (EFAULT);
1208 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1209 return (ENOMEM);
1210 if (newp && (len < 0 || newlen > (size_t)len))
1211 return (EINVAL);
1212 if (oldp) {
1213 *oldlenp = len;
1214 error = copyout(sp, oldp, len);
1215 }
1216 if (error == 0 && newp)
1217 error = copyin(newp, sp, len);
1218 return (error);
1219 }
1220
1221 /*
1222 * Validate parameters and get old parameters
1223 * for a structure oriented sysctl function.
1224 */
1225 int
1226 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
1227 user_addr_t newp, void *sp, int len)
1228 {
1229 int error = 0;
1230
1231 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1232 return (EFAULT);
1233 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1234 return (ENOMEM);
1235 if (newp)
1236 return (EPERM);
1237 *oldlenp = len;
1238 if (oldp)
1239 error = copyout(sp, oldp, len);
1240 return (error);
1241 }
1242
1243 /*
1244 * Get file structures.
1245 */
1246 int
1247 sysctl_file(user_addr_t where, size_t *sizep)
1248 {
1249 int buflen, error;
1250 struct fileglob *fg;
1251 user_addr_t start = where;
1252 struct extern_file nef;
1253
1254 buflen = *sizep;
1255 if (where == USER_ADDR_NULL) {
1256 /*
1257 * overestimate by 10 files
1258 */
1259 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct extern_file);
1260 return (0);
1261 }
1262
1263 /*
1264 * first copyout filehead
1265 */
1266 if (buflen < 0 || (size_t)buflen < sizeof(filehead)) {
1267 *sizep = 0;
1268 return (0);
1269 }
1270 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1271 if (error)
1272 return (error);
1273 buflen -= sizeof(filehead);
1274 where += sizeof(filehead);
1275
1276 /*
1277 * followed by an array of file structures
1278 */
1279 for (fg = filehead.lh_first; fg != 0; fg = fg->f_list.le_next) {
1280 if (buflen < 0 || (size_t)buflen < sizeof(struct extern_file)) {
1281 *sizep = where - start;
1282 return (ENOMEM);
1283 }
1284 nef.f_list.le_next = (struct extern_file *)fg->f_list.le_next;
1285 nef.f_list.le_prev = (struct extern_file **)fg->f_list.le_prev;
1286 nef.f_flag = (fg->fg_flag & FMASK);
1287 nef.f_type = fg->fg_type;
1288 nef.f_count = fg->fg_count;
1289 nef.f_msgcount = fg->fg_msgcount;
1290 nef.f_cred = fg->fg_cred;
1291 nef.f_ops = fg->fg_ops;
1292 nef.f_offset = fg->fg_offset;
1293 nef.f_data = fg->fg_data;
1294 error = copyout((caddr_t)&nef, where, sizeof (struct extern_file));
1295 if (error)
1296 return (error);
1297 buflen -= sizeof(struct extern_file);
1298 where += sizeof(struct extern_file);
1299 }
1300 *sizep = where - start;
1301 return (0);
1302 }
1303
1304 /*
1305 * try over estimating by 5 procs
1306 */
1307 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1308
1309 int
1310 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1311 {
1312 struct proc *p;
1313 user_addr_t dp = where;
1314 size_t needed = 0;
1315 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1316 int doingzomb;
1317 int error = 0;
1318 boolean_t is_64_bit = FALSE;
1319 struct kinfo_proc kproc;
1320 struct user_kinfo_proc user_kproc;
1321 int sizeof_kproc;
1322 caddr_t kprocp;
1323
1324 if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
1325 return (EINVAL);
1326 p = allproc.lh_first;
1327 doingzomb = 0;
1328 is_64_bit = proc_is64bit(current_proc());
1329 if (is_64_bit) {
1330 sizeof_kproc = sizeof(user_kproc);
1331 kprocp = (caddr_t) &user_kproc;
1332 }
1333 else {
1334 sizeof_kproc = sizeof(kproc);
1335 kprocp = (caddr_t) &kproc;
1336 }
1337 again:
1338 for (; p != 0; p = p->p_list.le_next) {
1339 /*
1340 * Skip embryonic processes.
1341 */
1342 if (p->p_stat == SIDL)
1343 continue;
1344 /*
1345 * TODO - make more efficient (see notes below).
1346 * do by session.
1347 */
1348 switch (name[0]) {
1349
1350 case KERN_PROC_PID:
1351 /* could do this with just a lookup */
1352 if (p->p_pid != (pid_t)name[1])
1353 continue;
1354 break;
1355
1356 case KERN_PROC_PGRP:
1357 /* could do this by traversing pgrp */
1358 if (p->p_pgrp->pg_id != (pid_t)name[1])
1359 continue;
1360 break;
1361
1362 case KERN_PROC_TTY:
1363 if ((p->p_flag & P_CONTROLT) == 0 ||
1364 (p->p_session == NULL) ||
1365 p->p_session->s_ttyp == NULL ||
1366 p->p_session->s_ttyp->t_dev != (dev_t)name[1])
1367 continue;
1368 break;
1369
1370 case KERN_PROC_UID:
1371 if ((p->p_ucred == NULL) ||
1372 (kauth_cred_getuid(p->p_ucred) != (uid_t)name[1]))
1373 continue;
1374 break;
1375
1376 case KERN_PROC_RUID:
1377 if ((p->p_ucred == NULL) ||
1378 (p->p_ucred->cr_ruid != (uid_t)name[1]))
1379 continue;
1380 break;
1381 }
1382 if (buflen >= sizeof_kproc) {
1383 bzero(kprocp, sizeof_kproc);
1384 if (is_64_bit) {
1385 fill_user_proc(p, (struct user_kinfo_proc *) kprocp);
1386 }
1387 else {
1388 fill_proc(p, (struct kinfo_proc *) kprocp);
1389 }
1390 error = copyout(kprocp, dp, sizeof_kproc);
1391 if (error)
1392 return (error);
1393 dp += sizeof_kproc;
1394 buflen -= sizeof_kproc;
1395 }
1396 needed += sizeof_kproc;
1397 }
1398 if (doingzomb == 0) {
1399 p = zombproc.lh_first;
1400 doingzomb++;
1401 goto again;
1402 }
1403 if (where != USER_ADDR_NULL) {
1404 *sizep = dp - where;
1405 if (needed > *sizep)
1406 return (ENOMEM);
1407 } else {
1408 needed += KERN_PROCSLOP;
1409 *sizep = needed;
1410 }
1411 return (0);
1412 }
1413
1414 /*
1415 * Fill in an eproc structure for the specified process.
1416 */
1417 static void
1418 fill_eproc(p, ep)
1419 register struct proc *p;
1420 register struct eproc *ep;
1421 {
1422 register struct tty *tp;
1423
1424 ep->e_paddr = p;
1425 if (p->p_pgrp) {
1426 ep->e_sess = p->p_pgrp->pg_session;
1427 ep->e_pgid = p->p_pgrp->pg_id;
1428 ep->e_jobc = p->p_pgrp->pg_jobc;
1429 if (ep->e_sess && ep->e_sess->s_ttyvp)
1430 ep->e_flag = EPROC_CTTY;
1431 } else {
1432 ep->e_sess = (struct session *)0;
1433 ep->e_pgid = 0;
1434 ep->e_jobc = 0;
1435 }
1436 ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0;
1437 /* Pre-zero the fake historical pcred */
1438 bzero(&ep->e_pcred, sizeof(struct _pcred));
1439 if (p->p_ucred) {
1440 /* XXX not ref-counted */
1441
1442 /* A fake historical pcred */
1443 ep->e_pcred.p_ruid = p->p_ucred->cr_ruid;
1444 ep->e_pcred.p_svuid = p->p_ucred->cr_svuid;
1445 ep->e_pcred.p_rgid = p->p_ucred->cr_rgid;
1446 ep->e_pcred.p_svgid = p->p_ucred->cr_svgid;
1447
1448 /* A fake historical *kauth_cred_t */
1449 ep->e_ucred.cr_ref = p->p_ucred->cr_ref;
1450 ep->e_ucred.cr_uid = kauth_cred_getuid(p->p_ucred);
1451 ep->e_ucred.cr_ngroups = p->p_ucred->cr_ngroups;
1452 bcopy(p->p_ucred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1453
1454 }
1455 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1456 ep->e_vm.vm_tsize = 0;
1457 ep->e_vm.vm_dsize = 0;
1458 ep->e_vm.vm_ssize = 0;
1459 }
1460 ep->e_vm.vm_rssize = 0;
1461
1462 if ((p->p_flag & P_CONTROLT) && (ep->e_sess) &&
1463 (tp = ep->e_sess->s_ttyp)) {
1464 ep->e_tdev = tp->t_dev;
1465 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1466 ep->e_tsess = tp->t_session;
1467 } else
1468 ep->e_tdev = NODEV;
1469
1470 if (SESS_LEADER(p))
1471 ep->e_flag |= EPROC_SLEADER;
1472 if (p->p_wmesg)
1473 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1474 ep->e_xsize = ep->e_xrssize = 0;
1475 ep->e_xccount = ep->e_xswrss = 0;
1476 }
1477
1478 /*
1479 * Fill in an LP64 version of eproc structure for the specified process.
1480 */
1481 static void
1482 fill_user_eproc(register struct proc *p, register struct user_eproc *ep)
1483 {
1484 register struct tty *tp;
1485 struct session *sessionp = NULL;
1486
1487 ep->e_paddr = CAST_USER_ADDR_T(p);
1488 if (p->p_pgrp) {
1489 sessionp = p->p_pgrp->pg_session;
1490 ep->e_sess = CAST_USER_ADDR_T(sessionp);
1491 ep->e_pgid = p->p_pgrp->pg_id;
1492 ep->e_jobc = p->p_pgrp->pg_jobc;
1493 if (sessionp) {
1494 if (sessionp->s_ttyvp)
1495 ep->e_flag = EPROC_CTTY;
1496 }
1497 } else {
1498 ep->e_sess = USER_ADDR_NULL;
1499 ep->e_pgid = 0;
1500 ep->e_jobc = 0;
1501 }
1502 ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0;
1503 /* Pre-zero the fake historical pcred */
1504 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1505 if (p->p_ucred) {
1506 /* XXX not ref-counted */
1507
1508 /* A fake historical pcred */
1509 ep->e_pcred.p_ruid = p->p_ucred->cr_ruid;
1510 ep->e_pcred.p_svuid = p->p_ucred->cr_svuid;
1511 ep->e_pcred.p_rgid = p->p_ucred->cr_rgid;
1512 ep->e_pcred.p_svgid = p->p_ucred->cr_svgid;
1513
1514 /* A fake historical *kauth_cred_t */
1515 ep->e_ucred.cr_ref = p->p_ucred->cr_ref;
1516 ep->e_ucred.cr_uid = kauth_cred_getuid(p->p_ucred);
1517 ep->e_ucred.cr_ngroups = p->p_ucred->cr_ngroups;
1518 bcopy(p->p_ucred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1519
1520 }
1521 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1522 ep->e_vm.vm_tsize = 0;
1523 ep->e_vm.vm_dsize = 0;
1524 ep->e_vm.vm_ssize = 0;
1525 }
1526 ep->e_vm.vm_rssize = 0;
1527
1528 if ((p->p_flag & P_CONTROLT) && (sessionp) &&
1529 (tp = sessionp->s_ttyp)) {
1530 ep->e_tdev = tp->t_dev;
1531 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1532 ep->e_tsess = CAST_USER_ADDR_T(tp->t_session);
1533 } else
1534 ep->e_tdev = NODEV;
1535
1536 if (SESS_LEADER(p))
1537 ep->e_flag |= EPROC_SLEADER;
1538 if (p->p_wmesg)
1539 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1540 ep->e_xsize = ep->e_xrssize = 0;
1541 ep->e_xccount = ep->e_xswrss = 0;
1542 }
1543
1544 /*
1545 * Fill in an eproc structure for the specified process.
1546 */
1547 static void
1548 fill_externproc(p, exp)
1549 register struct proc *p;
1550 register struct extern_proc *exp;
1551 {
1552 exp->p_forw = exp->p_back = NULL;
1553 if (p->p_stats)
1554 exp->p_starttime = p->p_stats->p_start;
1555 exp->p_vmspace = NULL;
1556 exp->p_sigacts = p->p_sigacts;
1557 exp->p_flag = p->p_flag;
1558 exp->p_stat = p->p_stat ;
1559 exp->p_pid = p->p_pid ;
1560 exp->p_oppid = p->p_oppid ;
1561 exp->p_dupfd = p->p_dupfd ;
1562 /* Mach related */
1563 exp->user_stack = CAST_DOWN(caddr_t, p->user_stack);
1564 exp->exit_thread = p->exit_thread ;
1565 exp->p_debugger = p->p_debugger ;
1566 exp->sigwait = p->sigwait ;
1567 /* scheduling */
1568 exp->p_estcpu = p->p_estcpu ;
1569 exp->p_cpticks = p->p_cpticks ;
1570 exp->p_pctcpu = p->p_pctcpu ;
1571 exp->p_wchan = p->p_wchan ;
1572 exp->p_wmesg = p->p_wmesg ;
1573 exp->p_swtime = p->p_swtime ;
1574 exp->p_slptime = p->p_slptime ;
1575 bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval));
1576 bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval));
1577 exp->p_uticks = p->p_uticks ;
1578 exp->p_sticks = p->p_sticks ;
1579 exp->p_iticks = p->p_iticks ;
1580 exp->p_traceflag = p->p_traceflag ;
1581 exp->p_tracep = p->p_tracep ;
1582 exp->p_siglist = 0 ; /* No longer relevant */
1583 exp->p_textvp = p->p_textvp ;
1584 exp->p_holdcnt = 0 ;
1585 exp->p_sigmask = 0 ; /* no longer avaialable */
1586 exp->p_sigignore = p->p_sigignore ;
1587 exp->p_sigcatch = p->p_sigcatch ;
1588 exp->p_priority = p->p_priority ;
1589 exp->p_usrpri = p->p_usrpri ;
1590 exp->p_nice = p->p_nice ;
1591 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1592 exp->p_comm[MAXCOMLEN] = '\0';
1593 exp->p_pgrp = p->p_pgrp ;
1594 exp->p_addr = NULL;
1595 exp->p_xstat = p->p_xstat ;
1596 exp->p_acflag = p->p_acflag ;
1597 exp->p_ru = p->p_ru ; /* XXX may be NULL */
1598 }
1599
1600 /*
1601 * Fill in an LP64 version of extern_proc structure for the specified process.
1602 */
1603 static void
1604 fill_user_externproc(register struct proc *p, register struct user_extern_proc *exp)
1605 {
1606 exp->p_forw = exp->p_back = USER_ADDR_NULL;
1607 if (p->p_stats) {
1608 exp->p_starttime.tv_sec = p->p_stats->p_start.tv_sec;
1609 exp->p_starttime.tv_usec = p->p_stats->p_start.tv_usec;
1610 }
1611 exp->p_vmspace = USER_ADDR_NULL;
1612 exp->p_sigacts = CAST_USER_ADDR_T(p->p_sigacts);
1613 exp->p_flag = p->p_flag;
1614 exp->p_stat = p->p_stat ;
1615 exp->p_pid = p->p_pid ;
1616 exp->p_oppid = p->p_oppid ;
1617 exp->p_dupfd = p->p_dupfd ;
1618 /* Mach related */
1619 exp->user_stack = p->user_stack;
1620 exp->exit_thread = CAST_USER_ADDR_T(p->exit_thread);
1621 exp->p_debugger = p->p_debugger ;
1622 exp->sigwait = p->sigwait ;
1623 /* scheduling */
1624 exp->p_estcpu = p->p_estcpu ;
1625 exp->p_cpticks = p->p_cpticks ;
1626 exp->p_pctcpu = p->p_pctcpu ;
1627 exp->p_wchan = CAST_USER_ADDR_T(p->p_wchan);
1628 exp->p_wmesg = CAST_USER_ADDR_T(p->p_wmesg);
1629 exp->p_swtime = p->p_swtime ;
1630 exp->p_slptime = p->p_slptime ;
1631 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1632 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1633 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1634 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1635 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1636 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1637 exp->p_uticks = p->p_uticks ;
1638 exp->p_sticks = p->p_sticks ;
1639 exp->p_iticks = p->p_iticks ;
1640 exp->p_traceflag = p->p_traceflag ;
1641 exp->p_tracep = CAST_USER_ADDR_T(p->p_tracep);
1642 exp->p_siglist = 0 ; /* No longer relevant */
1643 exp->p_textvp = CAST_USER_ADDR_T(p->p_textvp);
1644 exp->p_holdcnt = 0 ;
1645 exp->p_sigmask = 0 ; /* no longer avaialable */
1646 exp->p_sigignore = p->p_sigignore ;
1647 exp->p_sigcatch = p->p_sigcatch ;
1648 exp->p_priority = p->p_priority ;
1649 exp->p_usrpri = p->p_usrpri ;
1650 exp->p_nice = p->p_nice ;
1651 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1652 exp->p_comm[MAXCOMLEN] = '\0';
1653 exp->p_pgrp = CAST_USER_ADDR_T(p->p_pgrp);
1654 exp->p_addr = USER_ADDR_NULL;
1655 exp->p_xstat = p->p_xstat ;
1656 exp->p_acflag = p->p_acflag ;
1657 exp->p_ru = CAST_USER_ADDR_T(p->p_ru); /* XXX may be NULL */
1658 }
1659
1660 static void
1661 fill_proc(p, kp)
1662 register struct proc *p;
1663 register struct kinfo_proc *kp;
1664 {
1665 fill_externproc(p, &kp->kp_proc);
1666 fill_eproc(p, &kp->kp_eproc);
1667 }
1668
1669 static void
1670 fill_user_proc(register struct proc *p, register struct user_kinfo_proc *kp)
1671 {
1672 fill_user_externproc(p, &kp->kp_proc);
1673 fill_user_eproc(p, &kp->kp_eproc);
1674 }
1675
1676 int
1677 kdebug_ops(int *name, u_int namelen, user_addr_t where,
1678 size_t *sizep, struct proc *p)
1679 {
1680 int ret=0;
1681
1682 ret = suser(kauth_cred_get(), &p->p_acflag);
1683 if (ret)
1684 return(ret);
1685
1686 switch(name[0]) {
1687 case KERN_KDEFLAGS:
1688 case KERN_KDDFLAGS:
1689 case KERN_KDENABLE:
1690 case KERN_KDGETBUF:
1691 case KERN_KDSETUP:
1692 case KERN_KDREMOVE:
1693 case KERN_KDSETREG:
1694 case KERN_KDGETREG:
1695 case KERN_KDREADTR:
1696 case KERN_KDPIDTR:
1697 case KERN_KDTHRMAP:
1698 case KERN_KDPIDEX:
1699 case KERN_KDSETRTCDEC:
1700 case KERN_KDSETBUF:
1701 case KERN_KDGETENTROPY:
1702 ret = kdbg_control(name, namelen, where, sizep);
1703 break;
1704 default:
1705 ret= ENOTSUP;
1706 break;
1707 }
1708 return(ret);
1709 }
1710
1711 extern int pcsamples_control(int *name, u_int namelen, user_addr_t where,
1712 size_t * sizep);
1713
1714 int
1715 pcsamples_ops(int *name, u_int namelen, user_addr_t where,
1716 size_t *sizep, struct proc *p)
1717 {
1718 int ret=0;
1719
1720 ret = suser(kauth_cred_get(), &p->p_acflag);
1721 if (ret)
1722 return(ret);
1723
1724 switch(name[0]) {
1725 case KERN_PCDISABLE:
1726 case KERN_PCGETBUF:
1727 case KERN_PCSETUP:
1728 case KERN_PCREMOVE:
1729 case KERN_PCREADBUF:
1730 case KERN_PCSETREG:
1731 case KERN_PCSETBUF:
1732 case KERN_PCCOMM:
1733 ret = pcsamples_control(name, namelen, where, sizep);
1734 break;
1735 default:
1736 ret= ENOTSUP;
1737 break;
1738 }
1739 return(ret);
1740 }
1741
1742 /*
1743 * Return the top *sizep bytes of the user stack, or the entire area of the
1744 * user stack down through the saved exec_path, whichever is smaller.
1745 */
1746 int
1747 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
1748 size_t *sizep, struct proc *cur_proc)
1749 {
1750 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0);
1751 }
1752
1753 static int
1754 sysctl_procargs2(int *name, u_int namelen, user_addr_t where,
1755 size_t *sizep, struct proc *cur_proc)
1756 {
1757 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1);
1758 }
1759
1760 static int
1761 sysctl_procargsx(int *name, __unused u_int namelen, user_addr_t where,
1762 size_t *sizep, struct proc *cur_proc, int argc_yes)
1763 {
1764 struct proc *p;
1765 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1766 int error = 0;
1767 struct vm_map *proc_map;
1768 struct task * task;
1769 vm_map_copy_t tmp;
1770 user_addr_t arg_addr;
1771 size_t arg_size;
1772 caddr_t data;
1773 int size;
1774 vm_offset_t copy_start, copy_end;
1775 kern_return_t ret;
1776 int pid;
1777
1778 if (argc_yes)
1779 buflen -= sizeof(int); /* reserve first word to return argc */
1780
1781 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1782 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1783 /* is not NULL then the caller wants us to return the length needed to */
1784 /* hold the data we would return */
1785 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1786 return(EINVAL);
1787 }
1788 arg_size = buflen;
1789
1790 /*
1791 * Lookup process by pid
1792 */
1793 pid = name[0];
1794 p = pfind(pid);
1795 if (p == NULL) {
1796 return(EINVAL);
1797 }
1798
1799 /*
1800 * Copy the top N bytes of the stack.
1801 * On all machines we have so far, the stack grows
1802 * downwards.
1803 *
1804 * If the user expects no more than N bytes of
1805 * argument list, use that as a guess for the
1806 * size.
1807 */
1808
1809 if (!p->user_stack)
1810 return(EINVAL);
1811
1812 if (where == USER_ADDR_NULL) {
1813 /* caller only wants to know length of proc args data */
1814 if (sizep == NULL)
1815 return(EFAULT);
1816
1817 size = p->p_argslen;
1818 if (argc_yes) {
1819 size += sizeof(int);
1820 }
1821 else {
1822 /*
1823 * old PROCARGS will return the executable's path and plus some
1824 * extra space for work alignment and data tags
1825 */
1826 size += PATH_MAX + (6 * sizeof(int));
1827 }
1828 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1829 *sizep = size;
1830 return (0);
1831 }
1832
1833 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
1834 && suser(kauth_cred_get(), &cur_proc->p_acflag))
1835 return (EINVAL);
1836
1837 if ((u_int)arg_size > p->p_argslen)
1838 arg_size = round_page(p->p_argslen);
1839
1840 arg_addr = p->user_stack - arg_size;
1841
1842
1843 /*
1844 * Before we can block (any VM code), make another
1845 * reference to the map to keep it alive. We do
1846 * that by getting a reference on the task itself.
1847 */
1848 task = p->task;
1849 if (task == NULL)
1850 return(EINVAL);
1851
1852 /*
1853 * Once we have a task reference we can convert that into a
1854 * map reference, which we will use in the calls below. The
1855 * task/process may change its map after we take this reference
1856 * (see execve), but the worst that will happen then is a return
1857 * of stale info (which is always a possibility).
1858 */
1859 task_reference(task);
1860 proc_map = get_task_map_reference(task);
1861 task_deallocate(task);
1862 if (proc_map == NULL)
1863 return(EINVAL);
1864
1865
1866 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1867 if (ret != KERN_SUCCESS) {
1868 vm_map_deallocate(proc_map);
1869 return(ENOMEM);
1870 }
1871
1872 copy_end = round_page(copy_start + arg_size);
1873
1874 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1875 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1876 vm_map_deallocate(proc_map);
1877 kmem_free(kernel_map, copy_start,
1878 round_page(arg_size));
1879 return (EIO);
1880 }
1881
1882 /*
1883 * Now that we've done the copyin from the process'
1884 * map, we can release the reference to it.
1885 */
1886 vm_map_deallocate(proc_map);
1887
1888 if( vm_map_copy_overwrite(kernel_map,
1889 (vm_map_address_t)copy_start,
1890 tmp, FALSE) != KERN_SUCCESS) {
1891 kmem_free(kernel_map, copy_start,
1892 round_page(arg_size));
1893 return (EIO);
1894 }
1895
1896 if (arg_size > p->p_argslen) {
1897 data = (caddr_t) (copy_end - p->p_argslen);
1898 size = p->p_argslen;
1899 } else {
1900 data = (caddr_t) (copy_end - arg_size);
1901 size = arg_size;
1902 }
1903
1904 if (argc_yes) {
1905 /* Put processes argc as the first word in the copyout buffer */
1906 suword(where, p->p_argc);
1907 error = copyout(data, (where + sizeof(int)), size);
1908 size += sizeof(int);
1909 } else {
1910 error = copyout(data, where, size);
1911
1912 /*
1913 * Make the old PROCARGS work to return the executable's path
1914 * But, only if there is enough space in the provided buffer
1915 *
1916 * on entry: data [possibily] points to the beginning of the path
1917 *
1918 * Note: we keep all pointers&sizes aligned to word boundries
1919 */
1920 if ( (! error) && (buflen > 0 && (u_int)buflen > p->p_argslen) )
1921 {
1922 int binPath_sz, alignedBinPath_sz = 0;
1923 int extraSpaceNeeded, addThis;
1924 user_addr_t placeHere;
1925 char * str = (char *) data;
1926 int max_len = size;
1927
1928 /* Some apps are really bad about messing up their stacks
1929 So, we have to be extra careful about getting the length
1930 of the executing binary. If we encounter an error, we bail.
1931 */
1932
1933 /* Limit ourselves to PATH_MAX paths */
1934 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1935
1936 binPath_sz = 0;
1937
1938 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1939 binPath_sz++;
1940
1941 /* If we have a NUL terminator, copy it, too */
1942 if (binPath_sz < max_len-1) binPath_sz += 1;
1943
1944 /* Pre-Flight the space requiremnts */
1945
1946 /* Account for the padding that fills out binPath to the next word */
1947 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1948
1949 placeHere = where + size;
1950
1951 /* Account for the bytes needed to keep placeHere word aligned */
1952 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1953
1954 /* Add up all the space that is needed */
1955 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1956
1957 /* is there is room to tack on argv[0]? */
1958 if ( (buflen & ~(sizeof(int)-1)) >= ( p->p_argslen + extraSpaceNeeded ))
1959 {
1960 placeHere += addThis;
1961 suword(placeHere, 0);
1962 placeHere += sizeof(int);
1963 suword(placeHere, 0xBFFF0000);
1964 placeHere += sizeof(int);
1965 suword(placeHere, 0);
1966 placeHere += sizeof(int);
1967 error = copyout(data, placeHere, binPath_sz);
1968 if ( ! error )
1969 {
1970 placeHere += binPath_sz;
1971 suword(placeHere, 0);
1972 size += extraSpaceNeeded;
1973 }
1974 }
1975 }
1976 }
1977
1978 if (copy_start != (vm_offset_t) 0) {
1979 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1980 }
1981 if (error) {
1982 return(error);
1983 }
1984
1985 if (where != USER_ADDR_NULL)
1986 *sizep = size;
1987 return (0);
1988 }
1989
1990
1991 /*
1992 * Validate parameters and get old / set new parameters
1993 * for max number of concurrent aio requests. Makes sure
1994 * the system wide limit is greater than the per process
1995 * limit.
1996 */
1997 static int
1998 sysctl_aiomax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen)
1999 {
2000 int error = 0;
2001 int new_value;
2002
2003 if ( oldp && *oldlenp < sizeof(int) )
2004 return (ENOMEM);
2005 if ( newp && newlen != sizeof(int) )
2006 return (EINVAL);
2007
2008 *oldlenp = sizeof(int);
2009 if ( oldp )
2010 error = copyout( &aio_max_requests, oldp, sizeof(int) );
2011 if ( error == 0 && newp )
2012 error = copyin( newp, &new_value, sizeof(int) );
2013 if ( error == 0 && newp ) {
2014 if ( new_value >= aio_max_requests_per_process )
2015 aio_max_requests = new_value;
2016 else
2017 error = EINVAL;
2018 }
2019 return( error );
2020
2021 } /* sysctl_aiomax */
2022
2023
2024 /*
2025 * Validate parameters and get old / set new parameters
2026 * for max number of concurrent aio requests per process.
2027 * Makes sure per process limit is less than the system wide
2028 * limit.
2029 */
2030 static int
2031 sysctl_aioprocmax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen )
2032 {
2033 int error = 0;
2034 int new_value = 0;
2035
2036 if ( oldp && *oldlenp < sizeof(int) )
2037 return (ENOMEM);
2038 if ( newp && newlen != sizeof(int) )
2039 return (EINVAL);
2040
2041 *oldlenp = sizeof(int);
2042 if ( oldp )
2043 error = copyout( &aio_max_requests_per_process, oldp, sizeof(int) );
2044 if ( error == 0 && newp )
2045 error = copyin( newp, &new_value, sizeof(int) );
2046 if ( error == 0 && newp ) {
2047 if ( new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX )
2048 aio_max_requests_per_process = new_value;
2049 else
2050 error = EINVAL;
2051 }
2052 return( error );
2053
2054 } /* sysctl_aioprocmax */
2055
2056
2057 /*
2058 * Validate parameters and get old / set new parameters
2059 * for max number of async IO worker threads.
2060 * We only allow an increase in the number of worker threads.
2061 */
2062 static int
2063 sysctl_aiothreads(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen)
2064 {
2065 int error = 0;
2066 int new_value;
2067
2068 if ( oldp && *oldlenp < sizeof(int) )
2069 return (ENOMEM);
2070 if ( newp && newlen != sizeof(int) )
2071 return (EINVAL);
2072
2073 *oldlenp = sizeof(int);
2074 if ( oldp )
2075 error = copyout( &aio_worker_threads, oldp, sizeof(int) );
2076 if ( error == 0 && newp )
2077 error = copyin( newp, &new_value, sizeof(int) );
2078 if ( error == 0 && newp ) {
2079 if (new_value > aio_worker_threads ) {
2080 _aio_create_worker_threads( (new_value - aio_worker_threads) );
2081 aio_worker_threads = new_value;
2082 }
2083 else
2084 error = EINVAL;
2085 }
2086 return( error );
2087
2088 } /* sysctl_aiothreads */
2089
2090
2091 /*
2092 * Validate parameters and get old / set new parameters
2093 * for max number of processes per UID.
2094 * Makes sure per UID limit is less than the system wide limit.
2095 */
2096 static int
2097 sysctl_maxprocperuid(user_addr_t oldp, size_t *oldlenp,
2098 user_addr_t newp, size_t newlen)
2099 {
2100 int error = 0;
2101 int new_value;
2102
2103 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2104 return (ENOMEM);
2105 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2106 return (EINVAL);
2107
2108 *oldlenp = sizeof(int);
2109 if ( oldp != USER_ADDR_NULL )
2110 error = copyout( &maxprocperuid, oldp, sizeof(int) );
2111 if ( error == 0 && newp != USER_ADDR_NULL ) {
2112 error = copyin( newp, &new_value, sizeof(int) );
2113 if ( error == 0 ) {
2114 AUDIT_ARG(value, new_value);
2115 if ( new_value <= maxproc && new_value > 0 )
2116 maxprocperuid = new_value;
2117 else
2118 error = EINVAL;
2119 }
2120 else
2121 error = EINVAL;
2122 }
2123 return( error );
2124
2125 } /* sysctl_maxprocperuid */
2126
2127
2128 /*
2129 * Validate parameters and get old / set new parameters
2130 * for max number of files per process.
2131 * Makes sure per process limit is less than the system-wide limit.
2132 */
2133 static int
2134 sysctl_maxfilesperproc(user_addr_t oldp, size_t *oldlenp,
2135 user_addr_t newp, size_t newlen)
2136 {
2137 int error = 0;
2138 int new_value;
2139
2140 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2141 return (ENOMEM);
2142 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2143 return (EINVAL);
2144
2145 *oldlenp = sizeof(int);
2146 if ( oldp != USER_ADDR_NULL )
2147 error = copyout( &maxfilesperproc, oldp, sizeof(int) );
2148 if ( error == 0 && newp != USER_ADDR_NULL ) {
2149 error = copyin( newp, &new_value, sizeof(int) );
2150 if ( error == 0 ) {
2151 AUDIT_ARG(value, new_value);
2152 if ( new_value < maxfiles && new_value > 0 )
2153 maxfilesperproc = new_value;
2154 else
2155 error = EINVAL;
2156 }
2157 else
2158 error = EINVAL;
2159 }
2160 return( error );
2161
2162 } /* sysctl_maxfilesperproc */
2163
2164
2165 /*
2166 * Validate parameters and get old / set new parameters
2167 * for the system-wide limit on the max number of processes.
2168 * Makes sure the system-wide limit is less than the configured hard
2169 * limit set at kernel compilation.
2170 */
2171 static int
2172 sysctl_maxproc(user_addr_t oldp, size_t *oldlenp,
2173 user_addr_t newp, size_t newlen )
2174 {
2175 int error = 0;
2176 int new_value;
2177
2178 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2179 return (ENOMEM);
2180 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2181 return (EINVAL);
2182
2183 *oldlenp = sizeof(int);
2184 if ( oldp != USER_ADDR_NULL )
2185 error = copyout( &maxproc, oldp, sizeof(int) );
2186 if ( error == 0 && newp != USER_ADDR_NULL ) {
2187 error = copyin( newp, &new_value, sizeof(int) );
2188 if ( error == 0 ) {
2189 AUDIT_ARG(value, new_value);
2190 if ( new_value <= hard_maxproc && new_value > 0 )
2191 maxproc = new_value;
2192 else
2193 error = EINVAL;
2194 }
2195 else
2196 error = EINVAL;
2197 }
2198 return( error );
2199
2200 } /* sysctl_maxproc */
2201
2202 #if __i386__
2203 static int
2204 sysctl_sysctl_exec_affinity SYSCTL_HANDLER_ARGS
2205 {
2206 struct proc *cur_proc = req->p;
2207 int error;
2208
2209 if (req->oldptr != USER_ADDR_NULL) {
2210 cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
2211 if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
2212 return error;
2213 }
2214
2215 if (req->newptr != USER_ADDR_NULL) {
2216 cpu_type_t newcputype;
2217 if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
2218 return error;
2219 if (newcputype == CPU_TYPE_I386)
2220 cur_proc->p_flag &= ~P_AFFINITY;
2221 else if (newcputype == CPU_TYPE_POWERPC)
2222 cur_proc->p_flag |= P_AFFINITY;
2223 else
2224 return (EINVAL);
2225 }
2226
2227 return 0;
2228 }
2229 SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
2230 #endif
2231
2232 static int
2233 fetch_process_cputype(
2234 struct proc *cur_proc,
2235 int *name,
2236 u_int namelen,
2237 cpu_type_t *cputype)
2238 {
2239 struct proc *p = NULL;
2240 cpu_type_t ret = 0;
2241
2242 if (namelen == 0)
2243 p = cur_proc;
2244 else if (namelen == 1) {
2245 p = pfind(name[0]);
2246 if (p == NULL)
2247 return (EINVAL);
2248 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
2249 && suser(kauth_cred_get(), &cur_proc->p_acflag))
2250 return (EPERM);
2251 } else {
2252 return EINVAL;
2253 }
2254
2255 #if __i386__
2256 if (p->p_flag & P_TRANSLATED) {
2257 ret = CPU_TYPE_POWERPC;
2258 }
2259 else
2260 #endif
2261 {
2262 ret = cpu_type();
2263 if (IS_64BIT_PROCESS(p))
2264 ret |= CPU_ARCH_ABI64;
2265 }
2266 *cputype = ret;
2267
2268 return 0;
2269 }
2270
2271 static int
2272 sysctl_sysctl_native SYSCTL_HANDLER_ARGS
2273 {
2274 int error;
2275 cpu_type_t proc_cputype = 0;
2276 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2277 return error;
2278 int res = 1;
2279 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2280 res = 0;
2281 return SYSCTL_OUT(req, &res, sizeof(res));
2282 }
2283 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2284
2285 static int
2286 sysctl_sysctl_cputype SYSCTL_HANDLER_ARGS
2287 {
2288 int error;
2289 cpu_type_t proc_cputype = 0;
2290 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2291 return error;
2292 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2293 }
2294 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2295