]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
2b6a8b4a834dd1a3e3fcc13d9ddd686e37ab16da
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*-
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * This code is derived from software contributed to Berkeley by
28 * Mike Karels at Berkeley Software Design, Inc.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
59 */
60
61 /*
62 * sysctl system call.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/proc_internal.h>
70 #include <sys/kauth.h>
71 #include <sys/file_internal.h>
72 #include <sys/vnode_internal.h>
73 #include <sys/unistd.h>
74 #include <sys/buf.h>
75 #include <sys/ioctl.h>
76 #include <sys/namei.h>
77 #include <sys/tty.h>
78 #include <sys/disklabel.h>
79 #include <sys/vm.h>
80 #include <sys/sysctl.h>
81 #include <sys/user.h>
82 #include <sys/aio_kern.h>
83
84 #include <bsm/audit_kernel.h>
85
86 #include <mach/machine.h>
87 #include <mach/mach_types.h>
88 #include <mach/vm_param.h>
89 #include <kern/task.h>
90 #include <kern/lock.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_map.h>
93 #include <mach/host_info.h>
94
95 extern vm_map_t bsd_pageable_map;
96
97 #include <sys/mount_internal.h>
98 #include <sys/kdebug.h>
99 #include <sys/sysproto.h>
100
101 #include <IOKit/IOPlatformExpert.h>
102 #include <pexpert/pexpert.h>
103
104 #include <machine/machine_routines.h>
105 #include <machine/exec.h>
106
107 #include <vm/vm_protos.h>
108
109 #ifdef __i386__
110 #include <i386/cpuid.h>
111 #endif
112
113 sysctlfn kern_sysctl;
114 #ifdef DEBUG
115 sysctlfn debug_sysctl;
116 #endif
117 extern sysctlfn vm_sysctl;
118 extern sysctlfn vfs_sysctl;
119 extern sysctlfn net_sysctl;
120 extern sysctlfn cpu_sysctl;
121 extern int aio_max_requests;
122 extern int aio_max_requests_per_process;
123 extern int aio_worker_threads;
124 extern int maxfilesperproc;
125 extern int lowpri_IO_window_msecs;
126 extern int lowpri_IO_delay_msecs;
127 extern int nx_enabled;
128
129 static void
130 fill_eproc(struct proc *p, struct eproc *ep);
131 static void
132 fill_externproc(struct proc *p, struct extern_proc *exp);
133 static void
134 fill_user_eproc(struct proc *p, struct user_eproc *ep);
135 static void
136 fill_user_proc(struct proc *p, struct user_kinfo_proc *kp);
137 static void
138 fill_user_externproc(struct proc *p, struct user_extern_proc *exp);
139 extern int
140 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
141 int
142 kdebug_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, struct proc *p);
143 #if NFSCLIENT
144 extern int
145 netboot_root(void);
146 #endif
147 int
148 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
149 struct proc *p);
150 __private_extern__ kern_return_t
151 reset_vmobjectcache(unsigned int val1, unsigned int val2);
152 extern int
153 resize_namecache(u_int newsize);
154 static int
155 sysctl_aiomax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
156 static int
157 sysctl_aioprocmax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
158 static int
159 sysctl_aiothreads(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen);
160 extern int
161 sysctl_clockrate(user_addr_t where, size_t *sizep);
162 int
163 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep);
164 int
165 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
166 user_addr_t newp, size_t newlen);
167 int
168 sysctl_file(user_addr_t where, size_t *sizep);
169 static void
170 fill_proc(struct proc *p, struct kinfo_proc *kp);
171 static int
172 sysctl_maxfilesperproc(user_addr_t oldp, size_t *oldlenp,
173 user_addr_t newp, size_t newlen);
174 static int
175 sysctl_maxprocperuid(user_addr_t oldp, size_t *oldlenp,
176 user_addr_t newp, size_t newlen);
177 static int
178 sysctl_maxproc(user_addr_t oldp, size_t *oldlenp,
179 user_addr_t newp, size_t newlen);
180 int
181 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
182 size_t *sizep, struct proc *cur_proc);
183 static int
184 sysctl_procargs2(int *name, u_int namelen, user_addr_t where, size_t *sizep,
185 struct proc *cur_proc);
186 static int
187 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
188 struct proc *cur_proc, int argc_yes);
189 int
190 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
191 size_t newlen, void *sp, int len);
192 extern int
193 sysctl_vnode(user_addr_t where, size_t *sizep);
194
195
196 /*
197 * temporary location for vm_sysctl. This should be machine independant
198 */
199
200 extern uint32_t mach_factor[3];
201
202 static void
203 loadavg32to64(struct loadavg *la32, struct user_loadavg *la64)
204 {
205 la64->ldavg[0] = la32->ldavg[0];
206 la64->ldavg[1] = la32->ldavg[1];
207 la64->ldavg[2] = la32->ldavg[2];
208 la64->fscale = (user_long_t)la32->fscale;
209 }
210
211 int
212 vm_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp,
213 user_addr_t newp, size_t newlen, __unused struct proc *p)
214 {
215 struct loadavg loadinfo;
216
217 switch (name[0]) {
218 case VM_LOADAVG:
219 if (proc_is64bit(p)) {
220 struct user_loadavg loadinfo64;
221 loadavg32to64(&averunnable, &loadinfo64);
222 return (sysctl_struct(oldp, oldlenp, newp, newlen,
223 &loadinfo64, sizeof(loadinfo64)));
224 } else {
225 return (sysctl_struct(oldp, oldlenp, newp, newlen,
226 &averunnable, sizeof(struct loadavg)));
227 }
228 case VM_MACHFACTOR:
229 loadinfo.ldavg[0] = mach_factor[0];
230 loadinfo.ldavg[1] = mach_factor[1];
231 loadinfo.ldavg[2] = mach_factor[2];
232 loadinfo.fscale = LSCALE;
233 if (proc_is64bit(p)) {
234 struct user_loadavg loadinfo64;
235 loadavg32to64(&loadinfo, &loadinfo64);
236 return (sysctl_struct(oldp, oldlenp, newp, newlen,
237 &loadinfo64, sizeof(loadinfo64)));
238 } else {
239 return (sysctl_struct(oldp, oldlenp, newp, newlen,
240 &loadinfo, sizeof(struct loadavg)));
241 }
242 case VM_SWAPUSAGE: {
243 int error;
244 uint64_t swap_total;
245 uint64_t swap_avail;
246 uint32_t swap_pagesize;
247 boolean_t swap_encrypted;
248 struct xsw_usage xsu;
249
250 error = macx_swapinfo(&swap_total,
251 &swap_avail,
252 &swap_pagesize,
253 &swap_encrypted);
254 if (error)
255 return error;
256
257 xsu.xsu_total = swap_total;
258 xsu.xsu_avail = swap_avail;
259 xsu.xsu_used = swap_total - swap_avail;
260 xsu.xsu_pagesize = swap_pagesize;
261 xsu.xsu_encrypted = swap_encrypted;
262 return sysctl_struct(oldp, oldlenp, newp, newlen,
263 &xsu, sizeof (struct xsw_usage));
264 }
265 case VM_METER:
266 return (ENOTSUP);
267 case VM_MAXID:
268 return (ENOTSUP);
269 default:
270 return (ENOTSUP);
271 }
272 /* NOTREACHED */
273 return (ENOTSUP);
274 }
275
276 /*
277 * Locking and stats
278 */
279 static struct sysctl_lock {
280 int sl_lock;
281 int sl_want;
282 int sl_locked;
283 } memlock;
284
285 int
286 __sysctl(struct proc *p, struct __sysctl_args *uap, __unused register_t *retval)
287 {
288 int error, dolock = 1;
289 size_t savelen = 0, oldlen = 0, newlen;
290 sysctlfn *fnp = NULL;
291 int name[CTL_MAXNAME];
292 int i;
293 int error1;
294
295 /*
296 * all top-level sysctl names are non-terminal
297 */
298 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
299 return (EINVAL);
300 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
301 if (error)
302 return (error);
303
304 AUDIT_ARG(ctlname, name, uap->namelen);
305
306 if (proc_is64bit(p)) {
307 /* uap->newlen is a size_t value which grows to 64 bits
308 * when coming from a 64-bit process. since it's doubtful we'll
309 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
310 */
311 newlen = CAST_DOWN(size_t, uap->newlen);
312 }
313 else {
314 newlen = uap->newlen;
315 }
316
317 /* CTL_UNSPEC is used to get oid to AUTO_OID */
318 if (uap->new != USER_ADDR_NULL
319 && ((name[0] == CTL_KERN
320 && !(name[1] == KERN_IPC || name[1] == KERN_PANICINFO || name[1] == KERN_PROCDELAYTERM ||
321 name[1] == KERN_PROC_LOW_PRI_IO || name[1] == KERN_PROCNAME || name[1] == KERN_THALTSTACK))
322 || (name[0] == CTL_HW)
323 || (name[0] == CTL_VM)
324 || (name[0] == CTL_VFS))
325 && (error = suser(kauth_cred_get(), &p->p_acflag)))
326 return (error);
327
328 switch (name[0]) {
329 case CTL_KERN:
330 fnp = kern_sysctl;
331 if ((name[1] != KERN_VNODE) && (name[1] != KERN_FILE)
332 && (name[1] != KERN_PROC))
333 dolock = 0;
334 break;
335 case CTL_VM:
336 fnp = vm_sysctl;
337 break;
338
339 case CTL_VFS:
340 fnp = vfs_sysctl;
341 break;
342 #ifdef DEBUG
343 case CTL_DEBUG:
344 fnp = debug_sysctl;
345 break;
346 #endif
347 default:
348 fnp = NULL;
349 }
350
351 if (uap->oldlenp != USER_ADDR_NULL) {
352 uint64_t oldlen64 = fuulong(uap->oldlenp);
353
354 oldlen = CAST_DOWN(size_t, oldlen64);
355 /*
356 * If more than 4G, clamp to 4G - useracc() below will catch
357 * with an EFAULT, if it's actually necessary.
358 */
359 if (oldlen64 > 0x00000000ffffffffULL)
360 oldlen = 0xffffffffUL;
361 }
362
363 if (uap->old != USER_ADDR_NULL) {
364 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE))
365 return (EFAULT);
366
367 /* The pc sampling mechanism does not need to take this lock */
368 if ((name[1] != KERN_PCSAMPLES) &&
369 (!((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)))) {
370 while (memlock.sl_lock) {
371 memlock.sl_want = 1;
372 sleep((caddr_t)&memlock, PRIBIO+1);
373 memlock.sl_locked++;
374 }
375 memlock.sl_lock = 1;
376 }
377
378 if (dolock && oldlen &&
379 (error = vslock(uap->old, (user_size_t)oldlen))) {
380 if ((name[1] != KERN_PCSAMPLES) &&
381 (! ((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)))) {
382 memlock.sl_lock = 0;
383 if (memlock.sl_want) {
384 memlock.sl_want = 0;
385 wakeup((caddr_t)&memlock);
386 }
387 }
388 return(error);
389 }
390 savelen = oldlen;
391 }
392
393 if (fnp) {
394 error = (*fnp)(name + 1, uap->namelen - 1, uap->old,
395 &oldlen, uap->new, newlen, p);
396 }
397 else
398 error = ENOTSUP;
399
400 if ( (name[0] != CTL_VFS) && (error == ENOTSUP)) {
401 size_t tmp = oldlen;
402 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
403 1, uap->new, newlen, &oldlen);
404 }
405
406 if (uap->old != USER_ADDR_NULL) {
407 if (dolock && savelen) {
408 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
409 if (!error && error1)
410 error = error1;
411 }
412 if (name[1] != KERN_PCSAMPLES) {
413 memlock.sl_lock = 0;
414 if (memlock.sl_want) {
415 memlock.sl_want = 0;
416 wakeup((caddr_t)&memlock);
417 }
418 }
419 }
420 if ((error) && (error != ENOMEM))
421 return (error);
422
423 if (uap->oldlenp != USER_ADDR_NULL) {
424 i = suulong(uap->oldlenp, oldlen);
425 if (i)
426 return i;
427 }
428
429 return (error);
430 }
431
432 /*
433 * Attributes stored in the kernel.
434 */
435 __private_extern__ char corefilename[MAXPATHLEN+1];
436 __private_extern__ int do_coredump;
437 __private_extern__ int sugid_coredump;
438
439
440 #ifdef INSECURE
441 int securelevel = -1;
442 #else
443 int securelevel;
444 #endif
445
446 static int
447 sysctl_affinity(
448 int *name,
449 u_int namelen,
450 user_addr_t oldBuf,
451 size_t *oldSize,
452 user_addr_t newBuf,
453 __unused size_t newSize,
454 struct proc *cur_proc)
455 {
456 if (namelen < 1)
457 return (ENOTSUP);
458
459 if (name[0] == 0 && 1 == namelen) {
460 return sysctl_rdint(oldBuf, oldSize, newBuf,
461 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
462 } else if (name[0] == 1 && 2 == namelen) {
463 if (name[1] == 0) {
464 cur_proc->p_flag &= ~P_AFFINITY;
465 } else {
466 cur_proc->p_flag |= P_AFFINITY;
467 }
468 return 0;
469 }
470 return (ENOTSUP);
471 }
472
473
474 static int
475 sysctl_translate(
476 int *name,
477 u_int namelen,
478 user_addr_t oldBuf,
479 size_t *oldSize,
480 user_addr_t newBuf,
481 __unused size_t newSize,
482 struct proc *cur_proc)
483 {
484 struct proc *p;
485
486 if (namelen != 1)
487 return (ENOTSUP);
488
489 p = pfind(name[0]);
490 if (p == NULL)
491 return (EINVAL);
492
493 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
494 && suser(kauth_cred_get(), &cur_proc->p_acflag))
495 return (EPERM);
496
497 return sysctl_rdint(oldBuf, oldSize, newBuf,
498 (p->p_flag & P_TRANSLATED) ? 1 : 0);
499 }
500
501 int
502 set_archhandler(struct proc *p, int arch)
503 {
504 int error;
505 struct nameidata nd;
506 struct vnode_attr va;
507 struct vfs_context context;
508 char *archhandler;
509
510 switch(arch) {
511 case CPU_TYPE_POWERPC:
512 archhandler = exec_archhandler_ppc.path;
513 break;
514 default:
515 return (EBADARCH);
516 }
517
518 context.vc_proc = p;
519 context.vc_ucred = kauth_cred_get();
520
521 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32,
522 CAST_USER_ADDR_T(archhandler), &context);
523 error = namei(&nd);
524 if (error)
525 return (error);
526 nameidone(&nd);
527
528 /* Check mount point */
529 if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) ||
530 (nd.ni_vp->v_type != VREG)) {
531 vnode_put(nd.ni_vp);
532 return (EACCES);
533 }
534
535 VATTR_INIT(&va);
536 VATTR_WANTED(&va, va_fsid);
537 VATTR_WANTED(&va, va_fileid);
538 error = vnode_getattr(nd.ni_vp, &va, &context);
539 if (error) {
540 vnode_put(nd.ni_vp);
541 return (error);
542 }
543 vnode_put(nd.ni_vp);
544
545 exec_archhandler_ppc.fsid = va.va_fsid;
546 exec_archhandler_ppc.fileid = (u_long)va.va_fileid;
547 return 0;
548 }
549
550 static int
551 sysctl_exec_archhandler_ppc(
552 __unused int *name,
553 __unused u_int namelen,
554 user_addr_t oldBuf,
555 size_t *oldSize,
556 user_addr_t newBuf,
557 size_t newSize,
558 struct proc *p)
559 {
560 int error;
561 size_t len;
562 struct nameidata nd;
563 struct vnode_attr va;
564 char handler[sizeof(exec_archhandler_ppc.path)];
565 struct vfs_context context;
566
567 context.vc_proc = p;
568 context.vc_ucred = kauth_cred_get();
569
570 if (oldSize) {
571 len = strlen(exec_archhandler_ppc.path) + 1;
572 if (oldBuf) {
573 if (*oldSize < len)
574 return (ENOMEM);
575 error = copyout(exec_archhandler_ppc.path, oldBuf, len);
576 if (error)
577 return (error);
578 }
579 *oldSize = len - 1;
580 }
581 if (newBuf) {
582 error = suser(context.vc_ucred, &p->p_acflag);
583 if (error)
584 return (error);
585 if (newSize >= sizeof(exec_archhandler_ppc.path))
586 return (ENAMETOOLONG);
587 error = copyin(newBuf, handler, newSize);
588 if (error)
589 return (error);
590 handler[newSize] = 0;
591 strcpy(exec_archhandler_ppc.path, handler);
592 error = set_archhandler(p, CPU_TYPE_POWERPC);
593 if (error)
594 return (error);
595 }
596 return 0;
597 }
598
599 SYSCTL_NODE(_kern, KERN_EXEC, exec, CTLFLAG_RD, 0, "");
600
601 SYSCTL_NODE(_kern_exec, OID_AUTO, archhandler, CTLFLAG_RD, 0, "");
602
603 SYSCTL_STRING(_kern_exec_archhandler, OID_AUTO, powerpc, CTLFLAG_RD,
604 exec_archhandler_ppc.path, 0, "");
605
606 extern int get_kernel_symfile( struct proc *, char **);
607 __private_extern__ int
608 sysctl_dopanicinfo(int *, u_int, user_addr_t, size_t *, user_addr_t,
609 size_t, struct proc *);
610
611 /*
612 * kernel related system variables.
613 */
614 int
615 kern_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
616 user_addr_t newp, size_t newlen, struct proc *p)
617 {
618 int error, level, inthostid, tmp;
619 unsigned int oldval=0;
620 char *str;
621 /* all sysctl names not listed below are terminal at this level */
622 if (namelen != 1
623 && !(name[0] == KERN_PROC
624 || name[0] == KERN_PROF
625 || name[0] == KERN_KDEBUG
626 || name[0] == KERN_PROCARGS
627 || name[0] == KERN_PROCARGS2
628 || name[0] == KERN_PCSAMPLES
629 || name[0] == KERN_IPC
630 || name[0] == KERN_SYSV
631 || name[0] == KERN_AFFINITY
632 || name[0] == KERN_TRANSLATE
633 || name[0] == KERN_EXEC
634 || name[0] == KERN_PANICINFO
635 || name[0] == KERN_POSIX
636 || name[0] == KERN_TFP)
637 )
638 return (ENOTDIR); /* overloaded */
639
640 switch (name[0]) {
641 case KERN_OSTYPE:
642 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
643 case KERN_OSRELEASE:
644 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
645 case KERN_OSREV:
646 return (sysctl_rdint(oldp, oldlenp, newp, BSD));
647 case KERN_VERSION:
648 return (sysctl_rdstring(oldp, oldlenp, newp, version));
649 case KERN_MAXVNODES:
650 oldval = desiredvnodes;
651 error = sysctl_int(oldp, oldlenp, newp,
652 newlen, &desiredvnodes);
653 reset_vmobjectcache(oldval, desiredvnodes);
654 resize_namecache(desiredvnodes);
655 return(error);
656 case KERN_MAXPROC:
657 return (sysctl_maxproc(oldp, oldlenp, newp, newlen));
658 case KERN_MAXFILES:
659 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
660 case KERN_MAXPROCPERUID:
661 return( sysctl_maxprocperuid( oldp, oldlenp, newp, newlen ) );
662 case KERN_MAXFILESPERPROC:
663 return( sysctl_maxfilesperproc( oldp, oldlenp, newp, newlen ) );
664 case KERN_ARGMAX:
665 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
666 case KERN_SECURELVL:
667 level = securelevel;
668 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
669 newp == USER_ADDR_NULL)
670 return (error);
671 if (level < securelevel && p->p_pid != 1)
672 return (EPERM);
673 securelevel = level;
674 return (0);
675 case KERN_HOSTNAME:
676 error = sysctl_trstring(oldp, oldlenp, newp, newlen,
677 hostname, sizeof(hostname));
678 if (newp && !error)
679 hostnamelen = newlen;
680 return (error);
681 case KERN_DOMAINNAME:
682 error = sysctl_string(oldp, oldlenp, newp, newlen,
683 domainname, sizeof(domainname));
684 if (newp && !error)
685 domainnamelen = newlen;
686 return (error);
687 case KERN_HOSTID:
688 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
689 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
690 hostid = inthostid;
691 return (error);
692 case KERN_CLOCKRATE:
693 return (sysctl_clockrate(oldp, oldlenp));
694 case KERN_BOOTTIME:
695 {
696 struct timeval t;
697
698 t.tv_sec = boottime_sec();
699 t.tv_usec = 0;
700
701 return (sysctl_rdstruct(oldp, oldlenp, newp, &t,
702 sizeof(struct timeval)));
703 }
704 case KERN_VNODE:
705 return (sysctl_vnode(oldp, oldlenp));
706 case KERN_PROC:
707 return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp));
708 case KERN_FILE:
709 return (sysctl_file(oldp, oldlenp));
710 #ifdef GPROF
711 case KERN_PROF:
712 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
713 newp, newlen));
714 #endif
715 case KERN_POSIX1:
716 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
717 case KERN_NGROUPS:
718 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
719 case KERN_JOB_CONTROL:
720 return (sysctl_rdint(oldp, oldlenp, newp, 1));
721 case KERN_SAVED_IDS:
722 #ifdef _POSIX_SAVED_IDS
723 return (sysctl_rdint(oldp, oldlenp, newp, 1));
724 #else
725 return (sysctl_rdint(oldp, oldlenp, newp, 0));
726 #endif
727 case KERN_KDEBUG:
728 return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p));
729 case KERN_PCSAMPLES:
730 return (pcsamples_ops(name + 1, namelen - 1, oldp, oldlenp, p));
731 case KERN_PROCARGS:
732 /* new one as it does not use kinfo_proc */
733 return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p));
734 case KERN_PROCARGS2:
735 /* new one as it does not use kinfo_proc */
736 return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p));
737 case KERN_SYMFILE:
738 error = get_kernel_symfile( p, &str );
739 if ( error )
740 return error;
741 return (sysctl_rdstring(oldp, oldlenp, newp, str));
742 #if NFSCLIENT
743 case KERN_NETBOOT:
744 return (sysctl_rdint(oldp, oldlenp, newp, netboot_root()));
745 #endif
746 case KERN_PANICINFO:
747 return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp,
748 newp, newlen, p));
749 case KERN_AFFINITY:
750 return sysctl_affinity(name+1, namelen-1, oldp, oldlenp,
751 newp, newlen, p);
752 case KERN_TRANSLATE:
753 return sysctl_translate(name+1, namelen-1, oldp, oldlenp, newp,
754 newlen, p);
755 case KERN_CLASSICHANDLER:
756 return sysctl_exec_archhandler_ppc(name+1, namelen-1, oldp,
757 oldlenp, newp, newlen, p);
758 case KERN_AIOMAX:
759 return( sysctl_aiomax( oldp, oldlenp, newp, newlen ) );
760 case KERN_AIOPROCMAX:
761 return( sysctl_aioprocmax( oldp, oldlenp, newp, newlen ) );
762 case KERN_AIOTHREADS:
763 return( sysctl_aiothreads( oldp, oldlenp, newp, newlen ) );
764 case KERN_USRSTACK:
765 return (sysctl_rdint(oldp, oldlenp, newp, (uintptr_t)p->user_stack));
766 case KERN_USRSTACK64:
767 return (sysctl_rdquad(oldp, oldlenp, newp, p->user_stack));
768 case KERN_COREFILE:
769 error = sysctl_string(oldp, oldlenp, newp, newlen,
770 corefilename, sizeof(corefilename));
771 return (error);
772 case KERN_COREDUMP:
773 tmp = do_coredump;
774 error = sysctl_int(oldp, oldlenp, newp, newlen, &do_coredump);
775 if (!error && ((do_coredump < 0) || (do_coredump > 1))) {
776 do_coredump = tmp;
777 error = EINVAL;
778 }
779 return (error);
780 case KERN_SUGID_COREDUMP:
781 tmp = sugid_coredump;
782 error = sysctl_int(oldp, oldlenp, newp, newlen, &sugid_coredump);
783 if (!error && ((sugid_coredump < 0) || (sugid_coredump > 1))) {
784 sugid_coredump = tmp;
785 error = EINVAL;
786 }
787 return (error);
788 case KERN_PROCDELAYTERM:
789 {
790 int old_value, new_value;
791
792 error = 0;
793 if (oldp && *oldlenp < sizeof(int))
794 return (ENOMEM);
795 if ( newp && newlen != sizeof(int) )
796 return(EINVAL);
797 *oldlenp = sizeof(int);
798 old_value = (p->p_lflag & P_LDELAYTERM)? 1: 0;
799 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
800 return(error);
801 if (error == 0 && newp )
802 error = copyin( newp, &new_value, sizeof(int) );
803 if (error == 0 && newp) {
804 if (new_value)
805 p->p_lflag |= P_LDELAYTERM;
806 else
807 p->p_lflag &= ~P_LDELAYTERM;
808 }
809 return(error);
810 }
811 case KERN_PROC_LOW_PRI_IO:
812 {
813 int old_value, new_value;
814
815 error = 0;
816 if (oldp && *oldlenp < sizeof(int))
817 return (ENOMEM);
818 if ( newp && newlen != sizeof(int) )
819 return(EINVAL);
820 *oldlenp = sizeof(int);
821
822 old_value = (p->p_lflag & P_LLOW_PRI_IO)? 0x01: 0;
823 if (p->p_lflag & P_LBACKGROUND_IO)
824 old_value |= 0x02;
825
826 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
827 return(error);
828 if (error == 0 && newp )
829 error = copyin( newp, &new_value, sizeof(int) );
830 if (error == 0 && newp) {
831 if (new_value & 0x01)
832 p->p_lflag |= P_LLOW_PRI_IO;
833 else if (new_value & 0x02)
834 p->p_lflag |= P_LBACKGROUND_IO;
835 else if (new_value == 0)
836 p->p_lflag &= ~(P_LLOW_PRI_IO | P_LBACKGROUND_IO);
837 }
838 return(error);
839 }
840 case KERN_LOW_PRI_WINDOW:
841 {
842 int old_value, new_value;
843
844 error = 0;
845 if (oldp && *oldlenp < sizeof(old_value) )
846 return (ENOMEM);
847 if ( newp && newlen != sizeof(new_value) )
848 return(EINVAL);
849 *oldlenp = sizeof(old_value);
850
851 old_value = lowpri_IO_window_msecs;
852
853 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
854 return(error);
855 if (error == 0 && newp )
856 error = copyin( newp, &new_value, sizeof(newlen) );
857 if (error == 0 && newp) {
858 lowpri_IO_window_msecs = new_value;
859 }
860 return(error);
861 }
862 case KERN_LOW_PRI_DELAY:
863 {
864 int old_value, new_value;
865
866 error = 0;
867 if (oldp && *oldlenp < sizeof(old_value) )
868 return (ENOMEM);
869 if ( newp && newlen != sizeof(new_value) )
870 return(EINVAL);
871 *oldlenp = sizeof(old_value);
872
873 old_value = lowpri_IO_delay_msecs;
874
875 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
876 return(error);
877 if (error == 0 && newp )
878 error = copyin( newp, &new_value, sizeof(newlen) );
879 if (error == 0 && newp) {
880 lowpri_IO_delay_msecs = new_value;
881 }
882 return(error);
883 }
884 case KERN_NX_PROTECTION:
885 {
886 int old_value, new_value;
887
888 error = 0;
889 if (oldp && *oldlenp < sizeof(old_value) )
890 return (ENOMEM);
891 if ( newp && newlen != sizeof(new_value) )
892 return(EINVAL);
893 *oldlenp = sizeof(old_value);
894
895 old_value = nx_enabled;
896
897 if (oldp && (error = copyout( &old_value, oldp, *oldlenp)))
898 return(error);
899 #ifdef __i386__
900 /*
901 * Only allow setting if NX is supported on the chip
902 */
903 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD) {
904 #endif
905 if (error == 0 && newp)
906 error = copyin(newp, &new_value,
907 sizeof(newlen));
908 if (error == 0 && newp)
909 nx_enabled = new_value;
910 #ifdef __i386__
911 } else if (newp) {
912 error = ENOTSUP;
913 }
914 #endif
915 return(error);
916 }
917 case KERN_SHREG_PRIVATIZABLE:
918 /* this kernel does implement shared_region_make_private_np() */
919 return (sysctl_rdint(oldp, oldlenp, newp, 1));
920 case KERN_PROCNAME:
921 error = sysctl_trstring(oldp, oldlenp, newp, newlen,
922 &p->p_name[0], (2*MAXCOMLEN+1));
923 return (error);
924 case KERN_THALTSTACK:
925 {
926 int old_value, new_value;
927
928 error = 0;
929 if (oldp && *oldlenp < sizeof(int))
930 return (ENOMEM);
931 if ( newp && newlen != sizeof(int) )
932 return(EINVAL);
933 *oldlenp = sizeof(int);
934 old_value = (p->p_lflag & P_LTHSIGSTACK)? 1: 0;
935 if (oldp && (error = copyout( &old_value, oldp, sizeof(int))))
936 return(error);
937 if (error == 0 && newp )
938 error = copyin( newp, &new_value, sizeof(int) );
939 if (error == 0 && newp) {
940 if (new_value) {
941 /* we cannot swich midstream if inuse */
942 if ((p->p_sigacts->ps_flags & SAS_ALTSTACK) == SAS_ALTSTACK)
943 return(EPERM);
944 p->p_lflag |= P_LTHSIGSTACK;
945 } else {
946 /* we cannot swich midstream */
947 if ((p->p_lflag & P_LTHSIGSTACK) == P_LTHSIGSTACK)
948 return(EPERM);
949 p->p_lflag &= ~P_LTHSIGSTACK;
950 }
951 }
952 return(error);
953 }
954 default:
955 return (ENOTSUP);
956 }
957 /* NOTREACHED */
958 }
959
960 #ifdef DEBUG
961 /*
962 * Debugging related system variables.
963 */
964 #if DIAGNOSTIC
965 extern
966 #endif /* DIAGNOSTIC */
967 struct ctldebug debug0, debug1;
968 struct ctldebug debug2, debug3, debug4;
969 struct ctldebug debug5, debug6, debug7, debug8, debug9;
970 struct ctldebug debug10, debug11, debug12, debug13, debug14;
971 struct ctldebug debug15, debug16, debug17, debug18, debug19;
972 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
973 &debug0, &debug1, &debug2, &debug3, &debug4,
974 &debug5, &debug6, &debug7, &debug8, &debug9,
975 &debug10, &debug11, &debug12, &debug13, &debug14,
976 &debug15, &debug16, &debug17, &debug18, &debug19,
977 };
978 int
979 debug_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
980 user_addr_t newp, size_t newlen, struct proc *p)
981 {
982 struct ctldebug *cdp;
983
984 /* all sysctl names at this level are name and field */
985 if (namelen != 2)
986 return (ENOTDIR); /* overloaded */
987 cdp = debugvars[name[0]];
988 if (cdp->debugname == 0)
989 return (ENOTSUP);
990 switch (name[1]) {
991 case CTL_DEBUG_NAME:
992 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
993 case CTL_DEBUG_VALUE:
994 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
995 default:
996 return (ENOTSUP);
997 }
998 /* NOTREACHED */
999 }
1000 #endif /* DEBUG */
1001
1002 /*
1003 * Validate parameters and get old / set new parameters
1004 * for an integer-valued sysctl function.
1005 */
1006 int
1007 sysctl_int(user_addr_t oldp, size_t *oldlenp,
1008 user_addr_t newp, size_t newlen, int *valp)
1009 {
1010 int error = 0;
1011
1012 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1013 return (EFAULT);
1014 if (oldp && *oldlenp < sizeof(int))
1015 return (ENOMEM);
1016 if (newp && newlen != sizeof(int))
1017 return (EINVAL);
1018 *oldlenp = sizeof(int);
1019 if (oldp)
1020 error = copyout(valp, oldp, sizeof(int));
1021 if (error == 0 && newp) {
1022 error = copyin(newp, valp, sizeof(int));
1023 AUDIT_ARG(value, *valp);
1024 }
1025 return (error);
1026 }
1027
1028 /*
1029 * As above, but read-only.
1030 */
1031 int
1032 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
1033 {
1034 int error = 0;
1035
1036 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1037 return (EFAULT);
1038 if (oldp && *oldlenp < sizeof(int))
1039 return (ENOMEM);
1040 if (newp)
1041 return (EPERM);
1042 *oldlenp = sizeof(int);
1043 if (oldp)
1044 error = copyout((caddr_t)&val, oldp, sizeof(int));
1045 return (error);
1046 }
1047
1048 /*
1049 * Validate parameters and get old / set new parameters
1050 * for an quad(64bit)-valued sysctl function.
1051 */
1052 int
1053 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
1054 user_addr_t newp, size_t newlen, quad_t *valp)
1055 {
1056 int error = 0;
1057
1058 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1059 return (EFAULT);
1060 if (oldp && *oldlenp < sizeof(quad_t))
1061 return (ENOMEM);
1062 if (newp && newlen != sizeof(quad_t))
1063 return (EINVAL);
1064 *oldlenp = sizeof(quad_t);
1065 if (oldp)
1066 error = copyout(valp, oldp, sizeof(quad_t));
1067 if (error == 0 && newp)
1068 error = copyin(newp, valp, sizeof(quad_t));
1069 return (error);
1070 }
1071
1072 /*
1073 * As above, but read-only.
1074 */
1075 int
1076 sysctl_rdquad(oldp, oldlenp, newp, val)
1077 void *oldp;
1078 size_t *oldlenp;
1079 void *newp;
1080 quad_t val;
1081 {
1082 int error = 0;
1083
1084 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1085 return (EFAULT);
1086 if (oldp && *oldlenp < sizeof(quad_t))
1087 return (ENOMEM);
1088 if (newp)
1089 return (EPERM);
1090 *oldlenp = sizeof(quad_t);
1091 if (oldp)
1092 error = copyout((caddr_t)&val, CAST_USER_ADDR_T(oldp), sizeof(quad_t));
1093 return (error);
1094 }
1095
1096 /*
1097 * Validate parameters and get old / set new parameters
1098 * for a string-valued sysctl function. Unlike sysctl_string, if you
1099 * give it a too small (but larger than 0 bytes) buffer, instead of
1100 * returning ENOMEM, it truncates the returned string to the buffer
1101 * size. This preserves the semantics of some library routines
1102 * implemented via sysctl, which truncate their returned data, rather
1103 * than simply returning an error. The returned string is always NUL
1104 * terminated.
1105 */
1106 int
1107 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
1108 user_addr_t newp, size_t newlen, char *str, int maxlen)
1109 {
1110 int len, copylen, error = 0;
1111
1112 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1113 return (EFAULT);
1114 copylen = len = strlen(str) + 1;
1115 if (oldp && (len < 0 || *oldlenp < 1))
1116 return (ENOMEM);
1117 if (oldp && (*oldlenp < (size_t)len))
1118 copylen = *oldlenp + 1;
1119 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1120 return (EINVAL);
1121 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
1122 if (oldp) {
1123 error = copyout(str, oldp, copylen);
1124 if (!error) {
1125 unsigned char c = 0;
1126 /* NUL terminate */
1127 oldp += *oldlenp;
1128 error = copyout((void *)&c, oldp, sizeof(char));
1129 }
1130 }
1131 if (error == 0 && newp) {
1132 error = copyin(newp, str, newlen);
1133 str[newlen] = 0;
1134 AUDIT_ARG(text, (char *)str);
1135 }
1136 return (error);
1137 }
1138
1139 /*
1140 * Validate parameters and get old / set new parameters
1141 * for a string-valued sysctl function.
1142 */
1143 int
1144 sysctl_string(user_addr_t oldp, size_t *oldlenp,
1145 user_addr_t newp, size_t newlen, char *str, int maxlen)
1146 {
1147 int len, error = 0;
1148
1149 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1150 return (EFAULT);
1151 len = strlen(str) + 1;
1152 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1153 return (ENOMEM);
1154 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1155 return (EINVAL);
1156 *oldlenp = len -1; /* deal with NULL strings correctly */
1157 if (oldp) {
1158 error = copyout(str, oldp, len);
1159 }
1160 if (error == 0 && newp) {
1161 error = copyin(newp, str, newlen);
1162 str[newlen] = 0;
1163 AUDIT_ARG(text, (char *)str);
1164 }
1165 return (error);
1166 }
1167
1168 /*
1169 * As above, but read-only.
1170 */
1171 int
1172 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
1173 user_addr_t newp, char *str)
1174 {
1175 int len, error = 0;
1176
1177 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1178 return (EFAULT);
1179 len = strlen(str) + 1;
1180 if (oldp && *oldlenp < (size_t)len)
1181 return (ENOMEM);
1182 if (newp)
1183 return (EPERM);
1184 *oldlenp = len;
1185 if (oldp)
1186 error = copyout(str, oldp, len);
1187 return (error);
1188 }
1189
1190 /*
1191 * Validate parameters and get old / set new parameters
1192 * for a structure oriented sysctl function.
1193 */
1194 int
1195 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
1196 user_addr_t newp, size_t newlen, void *sp, int len)
1197 {
1198 int error = 0;
1199
1200 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1201 return (EFAULT);
1202 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1203 return (ENOMEM);
1204 if (newp && (len < 0 || newlen > (size_t)len))
1205 return (EINVAL);
1206 if (oldp) {
1207 *oldlenp = len;
1208 error = copyout(sp, oldp, len);
1209 }
1210 if (error == 0 && newp)
1211 error = copyin(newp, sp, len);
1212 return (error);
1213 }
1214
1215 /*
1216 * Validate parameters and get old parameters
1217 * for a structure oriented sysctl function.
1218 */
1219 int
1220 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
1221 user_addr_t newp, void *sp, int len)
1222 {
1223 int error = 0;
1224
1225 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1226 return (EFAULT);
1227 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1228 return (ENOMEM);
1229 if (newp)
1230 return (EPERM);
1231 *oldlenp = len;
1232 if (oldp)
1233 error = copyout(sp, oldp, len);
1234 return (error);
1235 }
1236
1237 /*
1238 * Get file structures.
1239 */
1240 int
1241 sysctl_file(user_addr_t where, size_t *sizep)
1242 {
1243 int buflen, error;
1244 struct fileglob *fg;
1245 user_addr_t start = where;
1246 struct extern_file nef;
1247
1248 buflen = *sizep;
1249 if (where == USER_ADDR_NULL) {
1250 /*
1251 * overestimate by 10 files
1252 */
1253 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct extern_file);
1254 return (0);
1255 }
1256
1257 /*
1258 * first copyout filehead
1259 */
1260 if (buflen < 0 || (size_t)buflen < sizeof(filehead)) {
1261 *sizep = 0;
1262 return (0);
1263 }
1264 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1265 if (error)
1266 return (error);
1267 buflen -= sizeof(filehead);
1268 where += sizeof(filehead);
1269
1270 /*
1271 * followed by an array of file structures
1272 */
1273 for (fg = filehead.lh_first; fg != 0; fg = fg->f_list.le_next) {
1274 if (buflen < 0 || (size_t)buflen < sizeof(struct extern_file)) {
1275 *sizep = where - start;
1276 return (ENOMEM);
1277 }
1278 nef.f_list.le_next = (struct extern_file *)fg->f_list.le_next;
1279 nef.f_list.le_prev = (struct extern_file **)fg->f_list.le_prev;
1280 nef.f_flag = (fg->fg_flag & FMASK);
1281 nef.f_type = fg->fg_type;
1282 nef.f_count = fg->fg_count;
1283 nef.f_msgcount = fg->fg_msgcount;
1284 nef.f_cred = fg->fg_cred;
1285 nef.f_ops = fg->fg_ops;
1286 nef.f_offset = fg->fg_offset;
1287 nef.f_data = fg->fg_data;
1288 error = copyout((caddr_t)&nef, where, sizeof (struct extern_file));
1289 if (error)
1290 return (error);
1291 buflen -= sizeof(struct extern_file);
1292 where += sizeof(struct extern_file);
1293 }
1294 *sizep = where - start;
1295 return (0);
1296 }
1297
1298 /*
1299 * try over estimating by 5 procs
1300 */
1301 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1302
1303 int
1304 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1305 {
1306 struct proc *p;
1307 user_addr_t dp = where;
1308 size_t needed = 0;
1309 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1310 int doingzomb;
1311 int error = 0;
1312 boolean_t is_64_bit = FALSE;
1313 struct kinfo_proc kproc;
1314 struct user_kinfo_proc user_kproc;
1315 int sizeof_kproc;
1316 caddr_t kprocp;
1317
1318 if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
1319 return (EINVAL);
1320 p = allproc.lh_first;
1321 doingzomb = 0;
1322 is_64_bit = proc_is64bit(current_proc());
1323 if (is_64_bit) {
1324 sizeof_kproc = sizeof(user_kproc);
1325 kprocp = (caddr_t) &user_kproc;
1326 }
1327 else {
1328 sizeof_kproc = sizeof(kproc);
1329 kprocp = (caddr_t) &kproc;
1330 }
1331 again:
1332 for (; p != 0; p = p->p_list.le_next) {
1333 /*
1334 * Skip embryonic processes.
1335 */
1336 if (p->p_stat == SIDL)
1337 continue;
1338 /*
1339 * TODO - make more efficient (see notes below).
1340 * do by session.
1341 */
1342 switch (name[0]) {
1343
1344 case KERN_PROC_PID:
1345 /* could do this with just a lookup */
1346 if (p->p_pid != (pid_t)name[1])
1347 continue;
1348 break;
1349
1350 case KERN_PROC_PGRP:
1351 /* could do this by traversing pgrp */
1352 if (p->p_pgrp->pg_id != (pid_t)name[1])
1353 continue;
1354 break;
1355
1356 case KERN_PROC_TTY:
1357 if ((p->p_flag & P_CONTROLT) == 0 ||
1358 (p->p_session == NULL) ||
1359 p->p_session->s_ttyp == NULL ||
1360 p->p_session->s_ttyp->t_dev != (dev_t)name[1])
1361 continue;
1362 break;
1363
1364 case KERN_PROC_UID:
1365 if ((p->p_ucred == NULL) ||
1366 (kauth_cred_getuid(p->p_ucred) != (uid_t)name[1]))
1367 continue;
1368 break;
1369
1370 case KERN_PROC_RUID:
1371 if ((p->p_ucred == NULL) ||
1372 (p->p_ucred->cr_ruid != (uid_t)name[1]))
1373 continue;
1374 break;
1375 }
1376 if (buflen >= sizeof_kproc) {
1377 bzero(kprocp, sizeof_kproc);
1378 if (is_64_bit) {
1379 fill_user_proc(p, (struct user_kinfo_proc *) kprocp);
1380 }
1381 else {
1382 fill_proc(p, (struct kinfo_proc *) kprocp);
1383 }
1384 error = copyout(kprocp, dp, sizeof_kproc);
1385 if (error)
1386 return (error);
1387 dp += sizeof_kproc;
1388 buflen -= sizeof_kproc;
1389 }
1390 needed += sizeof_kproc;
1391 }
1392 if (doingzomb == 0) {
1393 p = zombproc.lh_first;
1394 doingzomb++;
1395 goto again;
1396 }
1397 if (where != USER_ADDR_NULL) {
1398 *sizep = dp - where;
1399 if (needed > *sizep)
1400 return (ENOMEM);
1401 } else {
1402 needed += KERN_PROCSLOP;
1403 *sizep = needed;
1404 }
1405 return (0);
1406 }
1407
1408 /*
1409 * Fill in an eproc structure for the specified process.
1410 */
1411 static void
1412 fill_eproc(p, ep)
1413 register struct proc *p;
1414 register struct eproc *ep;
1415 {
1416 register struct tty *tp;
1417
1418 ep->e_paddr = p;
1419 if (p->p_pgrp) {
1420 ep->e_sess = p->p_pgrp->pg_session;
1421 ep->e_pgid = p->p_pgrp->pg_id;
1422 ep->e_jobc = p->p_pgrp->pg_jobc;
1423 if (ep->e_sess && ep->e_sess->s_ttyvp)
1424 ep->e_flag = EPROC_CTTY;
1425 } else {
1426 ep->e_sess = (struct session *)0;
1427 ep->e_pgid = 0;
1428 ep->e_jobc = 0;
1429 }
1430 ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0;
1431 /* Pre-zero the fake historical pcred */
1432 bzero(&ep->e_pcred, sizeof(struct _pcred));
1433 if (p->p_ucred) {
1434 /* XXX not ref-counted */
1435
1436 /* A fake historical pcred */
1437 ep->e_pcred.p_ruid = p->p_ucred->cr_ruid;
1438 ep->e_pcred.p_svuid = p->p_ucred->cr_svuid;
1439 ep->e_pcred.p_rgid = p->p_ucred->cr_rgid;
1440 ep->e_pcred.p_svgid = p->p_ucred->cr_svgid;
1441
1442 /* A fake historical *kauth_cred_t */
1443 ep->e_ucred.cr_ref = p->p_ucred->cr_ref;
1444 ep->e_ucred.cr_uid = kauth_cred_getuid(p->p_ucred);
1445 ep->e_ucred.cr_ngroups = p->p_ucred->cr_ngroups;
1446 bcopy(p->p_ucred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1447
1448 }
1449 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1450 ep->e_vm.vm_tsize = 0;
1451 ep->e_vm.vm_dsize = 0;
1452 ep->e_vm.vm_ssize = 0;
1453 }
1454 ep->e_vm.vm_rssize = 0;
1455
1456 if ((p->p_flag & P_CONTROLT) && (ep->e_sess) &&
1457 (tp = ep->e_sess->s_ttyp)) {
1458 ep->e_tdev = tp->t_dev;
1459 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1460 ep->e_tsess = tp->t_session;
1461 } else
1462 ep->e_tdev = NODEV;
1463
1464 if (SESS_LEADER(p))
1465 ep->e_flag |= EPROC_SLEADER;
1466 if (p->p_wmesg)
1467 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1468 ep->e_xsize = ep->e_xrssize = 0;
1469 ep->e_xccount = ep->e_xswrss = 0;
1470 }
1471
1472 /*
1473 * Fill in an LP64 version of eproc structure for the specified process.
1474 */
1475 static void
1476 fill_user_eproc(register struct proc *p, register struct user_eproc *ep)
1477 {
1478 register struct tty *tp;
1479 struct session *sessionp = NULL;
1480
1481 ep->e_paddr = CAST_USER_ADDR_T(p);
1482 if (p->p_pgrp) {
1483 sessionp = p->p_pgrp->pg_session;
1484 ep->e_sess = CAST_USER_ADDR_T(sessionp);
1485 ep->e_pgid = p->p_pgrp->pg_id;
1486 ep->e_jobc = p->p_pgrp->pg_jobc;
1487 if (sessionp) {
1488 if (sessionp->s_ttyvp)
1489 ep->e_flag = EPROC_CTTY;
1490 }
1491 } else {
1492 ep->e_sess = USER_ADDR_NULL;
1493 ep->e_pgid = 0;
1494 ep->e_jobc = 0;
1495 }
1496 ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0;
1497 /* Pre-zero the fake historical pcred */
1498 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1499 if (p->p_ucred) {
1500 /* XXX not ref-counted */
1501
1502 /* A fake historical pcred */
1503 ep->e_pcred.p_ruid = p->p_ucred->cr_ruid;
1504 ep->e_pcred.p_svuid = p->p_ucred->cr_svuid;
1505 ep->e_pcred.p_rgid = p->p_ucred->cr_rgid;
1506 ep->e_pcred.p_svgid = p->p_ucred->cr_svgid;
1507
1508 /* A fake historical *kauth_cred_t */
1509 ep->e_ucred.cr_ref = p->p_ucred->cr_ref;
1510 ep->e_ucred.cr_uid = kauth_cred_getuid(p->p_ucred);
1511 ep->e_ucred.cr_ngroups = p->p_ucred->cr_ngroups;
1512 bcopy(p->p_ucred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1513
1514 }
1515 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1516 ep->e_vm.vm_tsize = 0;
1517 ep->e_vm.vm_dsize = 0;
1518 ep->e_vm.vm_ssize = 0;
1519 }
1520 ep->e_vm.vm_rssize = 0;
1521
1522 if ((p->p_flag & P_CONTROLT) && (sessionp) &&
1523 (tp = sessionp->s_ttyp)) {
1524 ep->e_tdev = tp->t_dev;
1525 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1526 ep->e_tsess = CAST_USER_ADDR_T(tp->t_session);
1527 } else
1528 ep->e_tdev = NODEV;
1529
1530 if (SESS_LEADER(p))
1531 ep->e_flag |= EPROC_SLEADER;
1532 if (p->p_wmesg)
1533 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1534 ep->e_xsize = ep->e_xrssize = 0;
1535 ep->e_xccount = ep->e_xswrss = 0;
1536 }
1537
1538 /*
1539 * Fill in an eproc structure for the specified process.
1540 */
1541 static void
1542 fill_externproc(p, exp)
1543 register struct proc *p;
1544 register struct extern_proc *exp;
1545 {
1546 exp->p_forw = exp->p_back = NULL;
1547 if (p->p_stats)
1548 exp->p_starttime = p->p_stats->p_start;
1549 exp->p_vmspace = NULL;
1550 exp->p_sigacts = p->p_sigacts;
1551 exp->p_flag = p->p_flag;
1552 exp->p_stat = p->p_stat ;
1553 exp->p_pid = p->p_pid ;
1554 exp->p_oppid = p->p_oppid ;
1555 exp->p_dupfd = p->p_dupfd ;
1556 /* Mach related */
1557 exp->user_stack = CAST_DOWN(caddr_t, p->user_stack);
1558 exp->exit_thread = p->exit_thread ;
1559 exp->p_debugger = p->p_debugger ;
1560 exp->sigwait = p->sigwait ;
1561 /* scheduling */
1562 exp->p_estcpu = p->p_estcpu ;
1563 exp->p_cpticks = p->p_cpticks ;
1564 exp->p_pctcpu = p->p_pctcpu ;
1565 exp->p_wchan = p->p_wchan ;
1566 exp->p_wmesg = p->p_wmesg ;
1567 exp->p_swtime = p->p_swtime ;
1568 exp->p_slptime = p->p_slptime ;
1569 bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval));
1570 bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval));
1571 exp->p_uticks = p->p_uticks ;
1572 exp->p_sticks = p->p_sticks ;
1573 exp->p_iticks = p->p_iticks ;
1574 exp->p_traceflag = p->p_traceflag ;
1575 exp->p_tracep = p->p_tracep ;
1576 exp->p_siglist = 0 ; /* No longer relevant */
1577 exp->p_textvp = p->p_textvp ;
1578 exp->p_holdcnt = 0 ;
1579 exp->p_sigmask = 0 ; /* no longer avaialable */
1580 exp->p_sigignore = p->p_sigignore ;
1581 exp->p_sigcatch = p->p_sigcatch ;
1582 exp->p_priority = p->p_priority ;
1583 exp->p_usrpri = p->p_usrpri ;
1584 exp->p_nice = p->p_nice ;
1585 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1586 exp->p_comm[MAXCOMLEN] = '\0';
1587 exp->p_pgrp = p->p_pgrp ;
1588 exp->p_addr = NULL;
1589 exp->p_xstat = p->p_xstat ;
1590 exp->p_acflag = p->p_acflag ;
1591 exp->p_ru = p->p_ru ; /* XXX may be NULL */
1592 }
1593
1594 /*
1595 * Fill in an LP64 version of extern_proc structure for the specified process.
1596 */
1597 static void
1598 fill_user_externproc(register struct proc *p, register struct user_extern_proc *exp)
1599 {
1600 exp->p_forw = exp->p_back = USER_ADDR_NULL;
1601 if (p->p_stats) {
1602 exp->p_starttime.tv_sec = p->p_stats->p_start.tv_sec;
1603 exp->p_starttime.tv_usec = p->p_stats->p_start.tv_usec;
1604 }
1605 exp->p_vmspace = USER_ADDR_NULL;
1606 exp->p_sigacts = CAST_USER_ADDR_T(p->p_sigacts);
1607 exp->p_flag = p->p_flag;
1608 exp->p_stat = p->p_stat ;
1609 exp->p_pid = p->p_pid ;
1610 exp->p_oppid = p->p_oppid ;
1611 exp->p_dupfd = p->p_dupfd ;
1612 /* Mach related */
1613 exp->user_stack = p->user_stack;
1614 exp->exit_thread = CAST_USER_ADDR_T(p->exit_thread);
1615 exp->p_debugger = p->p_debugger ;
1616 exp->sigwait = p->sigwait ;
1617 /* scheduling */
1618 exp->p_estcpu = p->p_estcpu ;
1619 exp->p_cpticks = p->p_cpticks ;
1620 exp->p_pctcpu = p->p_pctcpu ;
1621 exp->p_wchan = CAST_USER_ADDR_T(p->p_wchan);
1622 exp->p_wmesg = CAST_USER_ADDR_T(p->p_wmesg);
1623 exp->p_swtime = p->p_swtime ;
1624 exp->p_slptime = p->p_slptime ;
1625 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1626 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1627 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1628 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1629 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1630 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1631 exp->p_uticks = p->p_uticks ;
1632 exp->p_sticks = p->p_sticks ;
1633 exp->p_iticks = p->p_iticks ;
1634 exp->p_traceflag = p->p_traceflag ;
1635 exp->p_tracep = CAST_USER_ADDR_T(p->p_tracep);
1636 exp->p_siglist = 0 ; /* No longer relevant */
1637 exp->p_textvp = CAST_USER_ADDR_T(p->p_textvp);
1638 exp->p_holdcnt = 0 ;
1639 exp->p_sigmask = 0 ; /* no longer avaialable */
1640 exp->p_sigignore = p->p_sigignore ;
1641 exp->p_sigcatch = p->p_sigcatch ;
1642 exp->p_priority = p->p_priority ;
1643 exp->p_usrpri = p->p_usrpri ;
1644 exp->p_nice = p->p_nice ;
1645 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1646 exp->p_comm[MAXCOMLEN] = '\0';
1647 exp->p_pgrp = CAST_USER_ADDR_T(p->p_pgrp);
1648 exp->p_addr = USER_ADDR_NULL;
1649 exp->p_xstat = p->p_xstat ;
1650 exp->p_acflag = p->p_acflag ;
1651 exp->p_ru = CAST_USER_ADDR_T(p->p_ru); /* XXX may be NULL */
1652 }
1653
1654 static void
1655 fill_proc(p, kp)
1656 register struct proc *p;
1657 register struct kinfo_proc *kp;
1658 {
1659 fill_externproc(p, &kp->kp_proc);
1660 fill_eproc(p, &kp->kp_eproc);
1661 }
1662
1663 static void
1664 fill_user_proc(register struct proc *p, register struct user_kinfo_proc *kp)
1665 {
1666 fill_user_externproc(p, &kp->kp_proc);
1667 fill_user_eproc(p, &kp->kp_eproc);
1668 }
1669
1670 int
1671 kdebug_ops(int *name, u_int namelen, user_addr_t where,
1672 size_t *sizep, struct proc *p)
1673 {
1674 int ret=0;
1675
1676 ret = suser(kauth_cred_get(), &p->p_acflag);
1677 if (ret)
1678 return(ret);
1679
1680 switch(name[0]) {
1681 case KERN_KDEFLAGS:
1682 case KERN_KDDFLAGS:
1683 case KERN_KDENABLE:
1684 case KERN_KDGETBUF:
1685 case KERN_KDSETUP:
1686 case KERN_KDREMOVE:
1687 case KERN_KDSETREG:
1688 case KERN_KDGETREG:
1689 case KERN_KDREADTR:
1690 case KERN_KDPIDTR:
1691 case KERN_KDTHRMAP:
1692 case KERN_KDPIDEX:
1693 case KERN_KDSETRTCDEC:
1694 case KERN_KDSETBUF:
1695 case KERN_KDGETENTROPY:
1696 ret = kdbg_control(name, namelen, where, sizep);
1697 break;
1698 default:
1699 ret= ENOTSUP;
1700 break;
1701 }
1702 return(ret);
1703 }
1704
1705 extern int pcsamples_control(int *name, u_int namelen, user_addr_t where,
1706 size_t * sizep);
1707
1708 int
1709 pcsamples_ops(int *name, u_int namelen, user_addr_t where,
1710 size_t *sizep, struct proc *p)
1711 {
1712 int ret=0;
1713
1714 ret = suser(kauth_cred_get(), &p->p_acflag);
1715 if (ret)
1716 return(ret);
1717
1718 switch(name[0]) {
1719 case KERN_PCDISABLE:
1720 case KERN_PCGETBUF:
1721 case KERN_PCSETUP:
1722 case KERN_PCREMOVE:
1723 case KERN_PCREADBUF:
1724 case KERN_PCSETREG:
1725 case KERN_PCSETBUF:
1726 case KERN_PCCOMM:
1727 ret = pcsamples_control(name, namelen, where, sizep);
1728 break;
1729 default:
1730 ret= ENOTSUP;
1731 break;
1732 }
1733 return(ret);
1734 }
1735
1736 /*
1737 * Return the top *sizep bytes of the user stack, or the entire area of the
1738 * user stack down through the saved exec_path, whichever is smaller.
1739 */
1740 int
1741 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
1742 size_t *sizep, struct proc *cur_proc)
1743 {
1744 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0);
1745 }
1746
1747 static int
1748 sysctl_procargs2(int *name, u_int namelen, user_addr_t where,
1749 size_t *sizep, struct proc *cur_proc)
1750 {
1751 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1);
1752 }
1753
1754 static int
1755 sysctl_procargsx(int *name, __unused u_int namelen, user_addr_t where,
1756 size_t *sizep, struct proc *cur_proc, int argc_yes)
1757 {
1758 struct proc *p;
1759 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1760 int error = 0;
1761 struct vm_map *proc_map;
1762 struct task * task;
1763 vm_map_copy_t tmp;
1764 user_addr_t arg_addr;
1765 size_t arg_size;
1766 caddr_t data;
1767 int size;
1768 vm_offset_t copy_start, copy_end;
1769 kern_return_t ret;
1770 int pid;
1771
1772 if (argc_yes)
1773 buflen -= sizeof(int); /* reserve first word to return argc */
1774
1775 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1776 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1777 /* is not NULL then the caller wants us to return the length needed to */
1778 /* hold the data we would return */
1779 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1780 return(EINVAL);
1781 }
1782 arg_size = buflen;
1783
1784 /*
1785 * Lookup process by pid
1786 */
1787 pid = name[0];
1788 p = pfind(pid);
1789 if (p == NULL) {
1790 return(EINVAL);
1791 }
1792
1793 /*
1794 * Copy the top N bytes of the stack.
1795 * On all machines we have so far, the stack grows
1796 * downwards.
1797 *
1798 * If the user expects no more than N bytes of
1799 * argument list, use that as a guess for the
1800 * size.
1801 */
1802
1803 if (!p->user_stack)
1804 return(EINVAL);
1805
1806 if (where == USER_ADDR_NULL) {
1807 /* caller only wants to know length of proc args data */
1808 if (sizep == NULL)
1809 return(EFAULT);
1810
1811 size = p->p_argslen;
1812 if (argc_yes) {
1813 size += sizeof(int);
1814 }
1815 else {
1816 /*
1817 * old PROCARGS will return the executable's path and plus some
1818 * extra space for work alignment and data tags
1819 */
1820 size += PATH_MAX + (6 * sizeof(int));
1821 }
1822 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1823 *sizep = size;
1824 return (0);
1825 }
1826
1827 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
1828 && suser(kauth_cred_get(), &cur_proc->p_acflag))
1829 return (EINVAL);
1830
1831 if ((u_int)arg_size > p->p_argslen)
1832 arg_size = round_page(p->p_argslen);
1833
1834 arg_addr = p->user_stack - arg_size;
1835
1836
1837 /*
1838 * Before we can block (any VM code), make another
1839 * reference to the map to keep it alive. We do
1840 * that by getting a reference on the task itself.
1841 */
1842 task = p->task;
1843 if (task == NULL)
1844 return(EINVAL);
1845
1846 /*
1847 * Once we have a task reference we can convert that into a
1848 * map reference, which we will use in the calls below. The
1849 * task/process may change its map after we take this reference
1850 * (see execve), but the worst that will happen then is a return
1851 * of stale info (which is always a possibility).
1852 */
1853 task_reference(task);
1854 proc_map = get_task_map_reference(task);
1855 task_deallocate(task);
1856 if (proc_map == NULL)
1857 return(EINVAL);
1858
1859
1860 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1861 if (ret != KERN_SUCCESS) {
1862 vm_map_deallocate(proc_map);
1863 return(ENOMEM);
1864 }
1865
1866 copy_end = round_page(copy_start + arg_size);
1867
1868 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1869 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1870 vm_map_deallocate(proc_map);
1871 kmem_free(kernel_map, copy_start,
1872 round_page(arg_size));
1873 return (EIO);
1874 }
1875
1876 /*
1877 * Now that we've done the copyin from the process'
1878 * map, we can release the reference to it.
1879 */
1880 vm_map_deallocate(proc_map);
1881
1882 if( vm_map_copy_overwrite(kernel_map,
1883 (vm_map_address_t)copy_start,
1884 tmp, FALSE) != KERN_SUCCESS) {
1885 kmem_free(kernel_map, copy_start,
1886 round_page(arg_size));
1887 return (EIO);
1888 }
1889
1890 if (arg_size > p->p_argslen) {
1891 data = (caddr_t) (copy_end - p->p_argslen);
1892 size = p->p_argslen;
1893 } else {
1894 data = (caddr_t) (copy_end - arg_size);
1895 size = arg_size;
1896 }
1897
1898 if (argc_yes) {
1899 /* Put processes argc as the first word in the copyout buffer */
1900 suword(where, p->p_argc);
1901 error = copyout(data, (where + sizeof(int)), size);
1902 size += sizeof(int);
1903 } else {
1904 error = copyout(data, where, size);
1905
1906 /*
1907 * Make the old PROCARGS work to return the executable's path
1908 * But, only if there is enough space in the provided buffer
1909 *
1910 * on entry: data [possibily] points to the beginning of the path
1911 *
1912 * Note: we keep all pointers&sizes aligned to word boundries
1913 */
1914 if ( (! error) && (buflen > 0 && (u_int)buflen > p->p_argslen) )
1915 {
1916 int binPath_sz, alignedBinPath_sz = 0;
1917 int extraSpaceNeeded, addThis;
1918 user_addr_t placeHere;
1919 char * str = (char *) data;
1920 int max_len = size;
1921
1922 /* Some apps are really bad about messing up their stacks
1923 So, we have to be extra careful about getting the length
1924 of the executing binary. If we encounter an error, we bail.
1925 */
1926
1927 /* Limit ourselves to PATH_MAX paths */
1928 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1929
1930 binPath_sz = 0;
1931
1932 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1933 binPath_sz++;
1934
1935 /* If we have a NUL terminator, copy it, too */
1936 if (binPath_sz < max_len-1) binPath_sz += 1;
1937
1938 /* Pre-Flight the space requiremnts */
1939
1940 /* Account for the padding that fills out binPath to the next word */
1941 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1942
1943 placeHere = where + size;
1944
1945 /* Account for the bytes needed to keep placeHere word aligned */
1946 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1947
1948 /* Add up all the space that is needed */
1949 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1950
1951 /* is there is room to tack on argv[0]? */
1952 if ( (buflen & ~(sizeof(int)-1)) >= ( p->p_argslen + extraSpaceNeeded ))
1953 {
1954 placeHere += addThis;
1955 suword(placeHere, 0);
1956 placeHere += sizeof(int);
1957 suword(placeHere, 0xBFFF0000);
1958 placeHere += sizeof(int);
1959 suword(placeHere, 0);
1960 placeHere += sizeof(int);
1961 error = copyout(data, placeHere, binPath_sz);
1962 if ( ! error )
1963 {
1964 placeHere += binPath_sz;
1965 suword(placeHere, 0);
1966 size += extraSpaceNeeded;
1967 }
1968 }
1969 }
1970 }
1971
1972 if (copy_start != (vm_offset_t) 0) {
1973 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1974 }
1975 if (error) {
1976 return(error);
1977 }
1978
1979 if (where != USER_ADDR_NULL)
1980 *sizep = size;
1981 return (0);
1982 }
1983
1984
1985 /*
1986 * Validate parameters and get old / set new parameters
1987 * for max number of concurrent aio requests. Makes sure
1988 * the system wide limit is greater than the per process
1989 * limit.
1990 */
1991 static int
1992 sysctl_aiomax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen)
1993 {
1994 int error = 0;
1995 int new_value;
1996
1997 if ( oldp && *oldlenp < sizeof(int) )
1998 return (ENOMEM);
1999 if ( newp && newlen != sizeof(int) )
2000 return (EINVAL);
2001
2002 *oldlenp = sizeof(int);
2003 if ( oldp )
2004 error = copyout( &aio_max_requests, oldp, sizeof(int) );
2005 if ( error == 0 && newp )
2006 error = copyin( newp, &new_value, sizeof(int) );
2007 if ( error == 0 && newp ) {
2008 if ( new_value >= aio_max_requests_per_process )
2009 aio_max_requests = new_value;
2010 else
2011 error = EINVAL;
2012 }
2013 return( error );
2014
2015 } /* sysctl_aiomax */
2016
2017
2018 /*
2019 * Validate parameters and get old / set new parameters
2020 * for max number of concurrent aio requests per process.
2021 * Makes sure per process limit is less than the system wide
2022 * limit.
2023 */
2024 static int
2025 sysctl_aioprocmax(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen )
2026 {
2027 int error = 0;
2028 int new_value = 0;
2029
2030 if ( oldp && *oldlenp < sizeof(int) )
2031 return (ENOMEM);
2032 if ( newp && newlen != sizeof(int) )
2033 return (EINVAL);
2034
2035 *oldlenp = sizeof(int);
2036 if ( oldp )
2037 error = copyout( &aio_max_requests_per_process, oldp, sizeof(int) );
2038 if ( error == 0 && newp )
2039 error = copyin( newp, &new_value, sizeof(int) );
2040 if ( error == 0 && newp ) {
2041 if ( new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX )
2042 aio_max_requests_per_process = new_value;
2043 else
2044 error = EINVAL;
2045 }
2046 return( error );
2047
2048 } /* sysctl_aioprocmax */
2049
2050
2051 /*
2052 * Validate parameters and get old / set new parameters
2053 * for max number of async IO worker threads.
2054 * We only allow an increase in the number of worker threads.
2055 */
2056 static int
2057 sysctl_aiothreads(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen)
2058 {
2059 int error = 0;
2060 int new_value;
2061
2062 if ( oldp && *oldlenp < sizeof(int) )
2063 return (ENOMEM);
2064 if ( newp && newlen != sizeof(int) )
2065 return (EINVAL);
2066
2067 *oldlenp = sizeof(int);
2068 if ( oldp )
2069 error = copyout( &aio_worker_threads, oldp, sizeof(int) );
2070 if ( error == 0 && newp )
2071 error = copyin( newp, &new_value, sizeof(int) );
2072 if ( error == 0 && newp ) {
2073 if (new_value > aio_worker_threads ) {
2074 _aio_create_worker_threads( (new_value - aio_worker_threads) );
2075 aio_worker_threads = new_value;
2076 }
2077 else
2078 error = EINVAL;
2079 }
2080 return( error );
2081
2082 } /* sysctl_aiothreads */
2083
2084
2085 /*
2086 * Validate parameters and get old / set new parameters
2087 * for max number of processes per UID.
2088 * Makes sure per UID limit is less than the system wide limit.
2089 */
2090 static int
2091 sysctl_maxprocperuid(user_addr_t oldp, size_t *oldlenp,
2092 user_addr_t newp, size_t newlen)
2093 {
2094 int error = 0;
2095 int new_value;
2096
2097 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2098 return (ENOMEM);
2099 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2100 return (EINVAL);
2101
2102 *oldlenp = sizeof(int);
2103 if ( oldp != USER_ADDR_NULL )
2104 error = copyout( &maxprocperuid, oldp, sizeof(int) );
2105 if ( error == 0 && newp != USER_ADDR_NULL ) {
2106 error = copyin( newp, &new_value, sizeof(int) );
2107 if ( error == 0 ) {
2108 AUDIT_ARG(value, new_value);
2109 if ( new_value <= maxproc && new_value > 0 )
2110 maxprocperuid = new_value;
2111 else
2112 error = EINVAL;
2113 }
2114 else
2115 error = EINVAL;
2116 }
2117 return( error );
2118
2119 } /* sysctl_maxprocperuid */
2120
2121
2122 /*
2123 * Validate parameters and get old / set new parameters
2124 * for max number of files per process.
2125 * Makes sure per process limit is less than the system-wide limit.
2126 */
2127 static int
2128 sysctl_maxfilesperproc(user_addr_t oldp, size_t *oldlenp,
2129 user_addr_t newp, size_t newlen)
2130 {
2131 int error = 0;
2132 int new_value;
2133
2134 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2135 return (ENOMEM);
2136 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2137 return (EINVAL);
2138
2139 *oldlenp = sizeof(int);
2140 if ( oldp != USER_ADDR_NULL )
2141 error = copyout( &maxfilesperproc, oldp, sizeof(int) );
2142 if ( error == 0 && newp != USER_ADDR_NULL ) {
2143 error = copyin( newp, &new_value, sizeof(int) );
2144 if ( error == 0 ) {
2145 AUDIT_ARG(value, new_value);
2146 if ( new_value < maxfiles && new_value > 0 )
2147 maxfilesperproc = new_value;
2148 else
2149 error = EINVAL;
2150 }
2151 else
2152 error = EINVAL;
2153 }
2154 return( error );
2155
2156 } /* sysctl_maxfilesperproc */
2157
2158
2159 /*
2160 * Validate parameters and get old / set new parameters
2161 * for the system-wide limit on the max number of processes.
2162 * Makes sure the system-wide limit is less than the configured hard
2163 * limit set at kernel compilation.
2164 */
2165 static int
2166 sysctl_maxproc(user_addr_t oldp, size_t *oldlenp,
2167 user_addr_t newp, size_t newlen )
2168 {
2169 int error = 0;
2170 int new_value;
2171
2172 if ( oldp != USER_ADDR_NULL && *oldlenp < sizeof(int) )
2173 return (ENOMEM);
2174 if ( newp != USER_ADDR_NULL && newlen != sizeof(int) )
2175 return (EINVAL);
2176
2177 *oldlenp = sizeof(int);
2178 if ( oldp != USER_ADDR_NULL )
2179 error = copyout( &maxproc, oldp, sizeof(int) );
2180 if ( error == 0 && newp != USER_ADDR_NULL ) {
2181 error = copyin( newp, &new_value, sizeof(int) );
2182 if ( error == 0 ) {
2183 AUDIT_ARG(value, new_value);
2184 if ( new_value <= hard_maxproc && new_value > 0 )
2185 maxproc = new_value;
2186 else
2187 error = EINVAL;
2188 }
2189 else
2190 error = EINVAL;
2191 }
2192 return( error );
2193
2194 } /* sysctl_maxproc */
2195
2196 #if __i386__
2197 static int
2198 sysctl_sysctl_exec_affinity SYSCTL_HANDLER_ARGS
2199 {
2200 struct proc *cur_proc = req->p;
2201 int error;
2202
2203 if (req->oldptr != USER_ADDR_NULL) {
2204 cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
2205 if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
2206 return error;
2207 }
2208
2209 if (req->newptr != USER_ADDR_NULL) {
2210 cpu_type_t newcputype;
2211 if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
2212 return error;
2213 if (newcputype == CPU_TYPE_I386)
2214 cur_proc->p_flag &= ~P_AFFINITY;
2215 else if (newcputype == CPU_TYPE_POWERPC)
2216 cur_proc->p_flag |= P_AFFINITY;
2217 else
2218 return (EINVAL);
2219 }
2220
2221 return 0;
2222 }
2223 SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
2224 #endif
2225
2226 static int
2227 fetch_process_cputype(
2228 struct proc *cur_proc,
2229 int *name,
2230 u_int namelen,
2231 cpu_type_t *cputype)
2232 {
2233 struct proc *p = NULL;
2234 cpu_type_t ret = 0;
2235
2236 if (namelen == 0)
2237 p = cur_proc;
2238 else if (namelen == 1) {
2239 p = pfind(name[0]);
2240 if (p == NULL)
2241 return (EINVAL);
2242 if ((kauth_cred_getuid(p->p_ucred) != kauth_cred_getuid(kauth_cred_get()))
2243 && suser(kauth_cred_get(), &cur_proc->p_acflag))
2244 return (EPERM);
2245 } else {
2246 return EINVAL;
2247 }
2248
2249 #if __i386__
2250 if (p->p_flag & P_TRANSLATED) {
2251 ret = CPU_TYPE_POWERPC;
2252 }
2253 else
2254 #endif
2255 {
2256 ret = cpu_type();
2257 if (IS_64BIT_PROCESS(p))
2258 ret |= CPU_ARCH_ABI64;
2259 }
2260 *cputype = ret;
2261
2262 return 0;
2263 }
2264
2265 static int
2266 sysctl_sysctl_native SYSCTL_HANDLER_ARGS
2267 {
2268 int error;
2269 cpu_type_t proc_cputype = 0;
2270 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2271 return error;
2272 int res = 1;
2273 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2274 res = 0;
2275 return SYSCTL_OUT(req, &res, sizeof(res));
2276 }
2277 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2278
2279 static int
2280 sysctl_sysctl_cputype SYSCTL_HANDLER_ARGS
2281 {
2282 int error;
2283 cpu_type_t proc_cputype = 0;
2284 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2285 return error;
2286 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2287 }
2288 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2289