]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105
106 #include <security/audit/audit.h>
107 #include <kern/kalloc.h>
108
109 #include <mach/machine.h>
110 #include <mach/mach_types.h>
111 #include <mach/vm_param.h>
112 #include <kern/mach_param.h>
113 #include <kern/task.h>
114 #include <kern/lock.h>
115 #include <kern/debug.h>
116 #include <vm/vm_kern.h>
117 #include <vm/vm_map.h>
118 #include <mach/host_info.h>
119
120 #include <sys/mount_internal.h>
121 #include <sys/kdebug.h>
122 #include <sys/sysproto.h>
123
124 #include <IOKit/IOPlatformExpert.h>
125 #include <pexpert/pexpert.h>
126
127 #include <machine/machine_routines.h>
128 #include <machine/exec.h>
129
130 #include <vm/vm_protos.h>
131
132 #if defined(__i386__) || defined(__x86_64__)
133 #include <i386/cpuid.h>
134 #endif
135
136 sysctlfn kern_sysctl;
137 #if DEBUG
138 sysctlfn debug_sysctl;
139 #endif
140 extern sysctlfn net_sysctl;
141 extern sysctlfn cpu_sysctl;
142 extern int aio_max_requests;
143 extern int aio_max_requests_per_process;
144 extern int aio_worker_threads;
145 extern int lowpri_IO_window_msecs;
146 extern int lowpri_IO_delay_msecs;
147 extern int nx_enabled;
148 extern int speculative_reads_disabled;
149 extern unsigned int preheat_pages_max;
150 extern unsigned int preheat_pages_min;
151 extern unsigned int preheat_pages_mult;
152 extern long numvnodes;
153
154 static void
155 fill_user32_eproc(proc_t p, struct user32_eproc *ep);
156 static void
157 fill_user32_externproc(proc_t p, struct user32_extern_proc *exp);
158 static void
159 fill_user64_eproc(proc_t p, struct user64_eproc *ep);
160 static void
161 fill_user64_proc(proc_t p, struct user64_kinfo_proc *kp);
162 static void
163 fill_user64_externproc(proc_t p, struct user64_extern_proc *exp);
164 extern int
165 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
166 int
167 kdebug_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, proc_t p);
168 #if NFSCLIENT
169 extern int
170 netboot_root(void);
171 #endif
172 int
173 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
174 proc_t p);
175 __private_extern__ kern_return_t
176 reset_vmobjectcache(unsigned int val1, unsigned int val2);
177 int
178 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep);
179 int
180 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
181 user_addr_t newp, size_t newlen);
182 static void
183 fill_user32_proc(proc_t p, struct user32_kinfo_proc *kp);
184 int
185 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
186 size_t *sizep, proc_t cur_proc);
187 static int
188 sysctl_procargs2(int *name, u_int namelen, user_addr_t where, size_t *sizep,
189 proc_t cur_proc);
190 static int
191 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
192 proc_t cur_proc, int argc_yes);
193 int
194 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
195 size_t newlen, void *sp, int len);
196
197 static int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
198 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
199 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
200 static int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
201 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
202 #if CONFIG_LCTX
203 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
204 #endif
205 int sysdoproc_callback(proc_t p, void *arg);
206
207 static int __sysctl_funneled(proc_t p, struct __sysctl_args *uap, int32_t *retval);
208
209 extern void IORegistrySetOSBuildVersion(char * build_version);
210
211 static void
212 fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
213 {
214 la64->ldavg[0] = la->ldavg[0];
215 la64->ldavg[1] = la->ldavg[1];
216 la64->ldavg[2] = la->ldavg[2];
217 la64->fscale = (user64_long_t)la->fscale;
218 }
219
220 static void
221 fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
222 {
223 la32->ldavg[0] = la->ldavg[0];
224 la32->ldavg[1] = la->ldavg[1];
225 la32->ldavg[2] = la->ldavg[2];
226 la32->fscale = (user32_long_t)la->fscale;
227 }
228
229 /*
230 * Locking and stats
231 */
232 static struct sysctl_lock memlock;
233
234 /* sysctl() syscall */
235 int
236 __sysctl(proc_t p, struct __sysctl_args *uap, int32_t *retval)
237 {
238 boolean_t funnel_state;
239 int error;
240
241 funnel_state = thread_funnel_set(kernel_flock, TRUE);
242 error = __sysctl_funneled(p, uap, retval);
243 thread_funnel_set(kernel_flock, funnel_state);
244 return(error);
245 }
246
247 static int
248 __sysctl_funneled(proc_t p, struct __sysctl_args *uap, __unused int32_t *retval)
249 {
250 int error, dolock = 1;
251 size_t savelen = 0, oldlen = 0, newlen;
252 sysctlfn *fnp = NULL;
253 int name[CTL_MAXNAME];
254 int error1;
255 boolean_t memlock_taken = FALSE;
256 boolean_t vslock_taken = FALSE;
257 #if CONFIG_MACF
258 kauth_cred_t my_cred;
259 #endif
260
261 /*
262 * all top-level sysctl names are non-terminal
263 */
264 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
265 return (EINVAL);
266 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
267 if (error)
268 return (error);
269
270 AUDIT_ARG(ctlname, name, uap->namelen);
271
272 if (proc_is64bit(p)) {
273 /* uap->newlen is a size_t value which grows to 64 bits
274 * when coming from a 64-bit process. since it's doubtful we'll
275 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
276 */
277 newlen = CAST_DOWN(size_t, uap->newlen);
278 }
279 else {
280 newlen = uap->newlen;
281 }
282
283 /* CTL_UNSPEC is used to get oid to AUTO_OID */
284 if (uap->new != USER_ADDR_NULL
285 && ((name[0] == CTL_KERN
286 && !(name[1] == KERN_IPC || name[1] == KERN_PANICINFO || name[1] == KERN_PROCDELAYTERM ||
287 name[1] == KERN_PROCNAME || name[1] == KERN_RAGEVNODE || name[1] == KERN_CHECKOPENEVT || name[1] == KERN_THREADNAME))
288 || (name[0] == CTL_HW)
289 || (name[0] == CTL_VM))
290 && (error = suser(kauth_cred_get(), &p->p_acflag)))
291 return (error);
292
293 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
294 * but there is a fallback for all sysctls other than VFS to
295 * userland_sysctl() - KILL THIS! */
296 switch (name[0]) {
297 case CTL_KERN:
298 fnp = kern_sysctl;
299 if ((name[1] != KERN_VNODE) && (name[1] != KERN_FILE)
300 && (name[1] != KERN_PROC))
301 dolock = 0;
302 break;
303 case CTL_VFS:
304 fnp = vfs_sysctl;
305 break;
306 #if DEBUG
307 case CTL_DEBUG:
308 fnp = debug_sysctl;
309 break;
310 #endif
311 default:
312 fnp = NULL;
313 }
314
315 if (uap->oldlenp != USER_ADDR_NULL) {
316 uint64_t oldlen64 = fuulong(uap->oldlenp);
317
318 oldlen = CAST_DOWN(size_t, oldlen64);
319 /*
320 * If more than 4G, clamp to 4G - useracc() below will catch
321 * with an EFAULT, if it's actually necessary.
322 */
323 if (oldlen64 > 0x00000000ffffffffULL)
324 oldlen = 0xffffffffUL;
325 }
326
327 if (uap->old != USER_ADDR_NULL) {
328 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE))
329 return (EFAULT);
330 /*
331 * The kernel debug mechanism does not need to take this lock, and
332 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
333 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
334 */
335 if (!((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)) &&
336 !(name[1] == KERN_PROC)) {
337 MEMLOCK_LOCK();
338 memlock_taken = TRUE;
339 }
340
341 if (dolock && oldlen) {
342 if ((error = vslock(uap->old, (user_size_t)oldlen))) {
343 if (memlock_taken == TRUE)
344 MEMLOCK_UNLOCK();
345 return(error);
346 }
347 savelen = oldlen;
348 vslock_taken = TRUE;
349 }
350 }
351
352 #if CONFIG_MACF
353 my_cred = kauth_cred_proc_ref(p);
354 error = mac_system_check_sysctl(
355 my_cred,
356 (int *) name,
357 uap->namelen,
358 uap->old,
359 uap->oldlenp,
360 fnp == kern_sysctl ? 1 : 0,
361 uap->new,
362 newlen
363 );
364 kauth_cred_unref(&my_cred);
365 if (!error) {
366 #endif
367 if (fnp) {
368 error = (*fnp)(name + 1, uap->namelen - 1, uap->old,
369 &oldlen, uap->new, newlen, p);
370 }
371 else
372 error = ENOTSUP;
373 #if CONFIG_MACF
374 }
375 #endif
376
377 if (vslock_taken == TRUE) {
378 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
379 if (!error)
380 error = error1;
381 }
382 if (memlock_taken == TRUE)
383 MEMLOCK_UNLOCK();
384
385 if ( (name[0] != CTL_VFS) && (error == ENOTSUP)) {
386 size_t tmp = oldlen;
387 boolean_t funnel_state;
388
389 /*
390 * Drop the funnel when calling new sysctl code, which will conditionally
391 * grab the funnel if it really needs to.
392 */
393 funnel_state = thread_funnel_set(kernel_flock, FALSE);
394
395 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
396 uap->new, newlen, &oldlen);
397
398 thread_funnel_set(kernel_flock, funnel_state);
399 }
400
401 if ((error) && (error != ENOMEM))
402 return (error);
403
404 if (uap->oldlenp != USER_ADDR_NULL)
405 error = suulong(uap->oldlenp, oldlen);
406
407 return (error);
408 }
409
410 /*
411 * Attributes stored in the kernel.
412 */
413 __private_extern__ char corefilename[MAXPATHLEN+1];
414 __private_extern__ int do_coredump;
415 __private_extern__ int sugid_coredump;
416
417 #if COUNT_SYSCALLS
418 __private_extern__ int do_count_syscalls;
419 #endif
420
421 #ifdef INSECURE
422 int securelevel = -1;
423 #else
424 int securelevel;
425 #endif
426
427 static int
428 sysctl_affinity(
429 int *name,
430 u_int namelen,
431 user_addr_t oldBuf,
432 size_t *oldSize,
433 user_addr_t newBuf,
434 __unused size_t newSize,
435 proc_t cur_proc)
436 {
437 if (namelen < 1)
438 return (ENOTSUP);
439
440 if (name[0] == 0 && 1 == namelen) {
441 return sysctl_rdint(oldBuf, oldSize, newBuf,
442 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
443 } else if (name[0] == 1 && 2 == namelen) {
444 if (name[1] == 0) {
445 OSBitAndAtomic(~((uint32_t)P_AFFINITY), &cur_proc->p_flag);
446 } else {
447 OSBitOrAtomic(P_AFFINITY, &cur_proc->p_flag);
448 }
449 return 0;
450 }
451 return (ENOTSUP);
452 }
453
454 static int
455 sysctl_translate(
456 int *name,
457 u_int namelen,
458 user_addr_t oldBuf,
459 size_t *oldSize,
460 user_addr_t newBuf,
461 __unused size_t newSize,
462 proc_t cur_proc)
463 {
464 proc_t p;
465 int istranslated = 0;
466 kauth_cred_t my_cred;
467 uid_t uid;
468
469 if (namelen != 1)
470 return (ENOTSUP);
471
472 p = proc_find(name[0]);
473 if (p == NULL)
474 return (EINVAL);
475
476 my_cred = kauth_cred_proc_ref(p);
477 uid = kauth_cred_getuid(my_cred);
478 kauth_cred_unref(&my_cred);
479 if ((uid != kauth_cred_getuid(kauth_cred_get()))
480 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
481 proc_rele(p);
482 return (EPERM);
483 }
484
485 istranslated = (p->p_flag & P_TRANSLATED);
486 proc_rele(p);
487 return sysctl_rdint(oldBuf, oldSize, newBuf,
488 (istranslated != 0) ? 1 : 0);
489 }
490
491 int
492 set_archhandler(__unused proc_t p, int arch)
493 {
494 int error;
495 struct nameidata nd;
496 struct vnode_attr va;
497 vfs_context_t ctx = vfs_context_current();
498 struct exec_archhandler *archhandler;
499
500 switch(arch) {
501 case CPU_TYPE_POWERPC:
502 archhandler = &exec_archhandler_ppc;
503 break;
504 default:
505 return (EBADARCH);
506 }
507
508 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
509 CAST_USER_ADDR_T(archhandler->path), ctx);
510 error = namei(&nd);
511 if (error)
512 return (error);
513 nameidone(&nd);
514
515 /* Check mount point */
516 if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) ||
517 (nd.ni_vp->v_type != VREG)) {
518 vnode_put(nd.ni_vp);
519 return (EACCES);
520 }
521
522 VATTR_INIT(&va);
523 VATTR_WANTED(&va, va_fsid);
524 VATTR_WANTED(&va, va_fileid);
525 error = vnode_getattr(nd.ni_vp, &va, ctx);
526 if (error) {
527 vnode_put(nd.ni_vp);
528 return (error);
529 }
530 vnode_put(nd.ni_vp);
531
532 archhandler->fsid = va.va_fsid;
533 archhandler->fileid = (u_int32_t)va.va_fileid;
534 return 0;
535 }
536
537 /* XXX remove once Rosetta is rev'ed */
538 /*****************************************************************************/
539 static int
540 sysctl_exec_archhandler_ppc(
541 __unused int *name,
542 __unused u_int namelen,
543 user_addr_t oldBuf,
544 size_t *oldSize,
545 user_addr_t newBuf,
546 size_t newSize,
547 proc_t p)
548 {
549 int error;
550 size_t len;
551 char handler[sizeof(exec_archhandler_ppc.path)];
552 vfs_context_t ctx = vfs_context_current();
553
554 if (oldSize) {
555 len = strlen(exec_archhandler_ppc.path) + 1;
556 if (oldBuf) {
557 if (*oldSize < len)
558 return (ENOMEM);
559 error = copyout(exec_archhandler_ppc.path, oldBuf, len);
560 if (error)
561 return (error);
562 }
563 *oldSize = len - 1;
564 }
565 if (newBuf) {
566 error = suser(vfs_context_ucred(ctx), &p->p_acflag);
567 if (error)
568 return (error);
569 if (newSize >= sizeof(exec_archhandler_ppc.path))
570 return (ENAMETOOLONG);
571 error = copyin(newBuf, handler, newSize);
572 if (error)
573 return (error);
574 handler[newSize] = 0;
575 strlcpy(exec_archhandler_ppc.path, handler, MAXPATHLEN);
576 error = set_archhandler(p, CPU_TYPE_POWERPC);
577 if (error)
578 return (error);
579 }
580 return 0;
581 }
582 /*****************************************************************************/
583
584 static int
585 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid *oidp, void *arg1,
586 int arg2, struct sysctl_req *req)
587 {
588 int error = 0;
589
590 error = sysctl_handle_string(oidp, arg1, arg2, req);
591
592 if (error)
593 goto done;
594
595 if (req->newptr)
596 error = set_archhandler(req->p, CPU_TYPE_POWERPC);
597
598 done:
599 return error;
600
601 }
602
603 static int
604 sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
605 __unused int arg2, struct sysctl_req *req)
606 {
607 int error;
608 struct uthread *ut = get_bsdthread_info(current_thread());
609 user_addr_t oldp=0, newp=0;
610 size_t *oldlenp=NULL;
611 size_t newlen=0;
612
613 oldp = req->oldptr;
614 oldlenp = &(req->oldlen);
615 newp = req->newptr;
616 newlen = req->newlen;
617
618 /* We want the current length, and maybe the string itself */
619 if(oldlenp) {
620 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
621 size_t currlen = MAXTHREADNAMESIZE - 1;
622
623 if(ut->pth_name)
624 /* use length of current thread name */
625 currlen = strlen(ut->pth_name);
626 if(oldp) {
627 if(*oldlenp < currlen)
628 return ENOMEM;
629 /* NOTE - we do not copy the NULL terminator */
630 if(ut->pth_name) {
631 error = copyout(ut->pth_name,oldp,currlen);
632 if(error)
633 return error;
634 }
635 }
636 /* return length of thread name minus NULL terminator (just like strlen) */
637 req->oldidx = currlen;
638 }
639
640 /* We want to set the name to something */
641 if(newp)
642 {
643 if(newlen > (MAXTHREADNAMESIZE - 1))
644 return ENAMETOOLONG;
645 if(!ut->pth_name)
646 {
647 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
648 if(!ut->pth_name)
649 return ENOMEM;
650 }
651 bzero(ut->pth_name, MAXTHREADNAMESIZE);
652 error = copyin(newp, ut->pth_name, newlen);
653 if(error)
654 return error;
655 }
656
657 return 0;
658 }
659
660 SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW, 0, 0, sysctl_handle_kern_threadname,"A","");
661
662 SYSCTL_NODE(_kern, KERN_EXEC, exec, CTLFLAG_RD|CTLFLAG_LOCKED, 0, "");
663
664 SYSCTL_NODE(_kern_exec, OID_AUTO, archhandler, CTLFLAG_RD|CTLFLAG_LOCKED, 0, "");
665
666 SYSCTL_PROC(_kern_exec_archhandler, OID_AUTO, powerpc,
667 CTLTYPE_STRING | CTLFLAG_RW, exec_archhandler_ppc.path, 0,
668 sysctl_handle_exec_archhandler_ppc, "A", "");
669
670 extern int get_kernel_symfile(proc_t, char **);
671 __private_extern__ int
672 sysctl_dopanicinfo(int *, u_int, user_addr_t, size_t *, user_addr_t,
673 size_t, proc_t);
674
675 /*
676 * kernel related system variables.
677 */
678 int
679 kern_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
680 user_addr_t newp, size_t newlen, proc_t p)
681 {
682 /* all sysctl names not listed below are terminal at this level */
683 if (namelen != 1
684 && !(name[0] == KERN_PROC
685 || name[0] == KERN_PROF
686 || name[0] == KERN_KDEBUG
687 #if !CONFIG_EMBEDDED
688 || name[0] == KERN_PROCARGS
689 #endif
690 || name[0] == KERN_PROCARGS2
691 || name[0] == KERN_IPC
692 || name[0] == KERN_SYSV
693 || name[0] == KERN_AFFINITY
694 || name[0] == KERN_TRANSLATE
695 || name[0] == KERN_EXEC
696 || name[0] == KERN_PANICINFO
697 || name[0] == KERN_POSIX
698 || name[0] == KERN_TFP
699 || name[0] == KERN_TTY
700 #if CONFIG_LCTX
701 || name[0] == KERN_LCTX
702 #endif
703 )
704 )
705 return (ENOTDIR); /* overloaded */
706
707 switch (name[0]) {
708 case KERN_PROC:
709 return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp));
710 #ifdef GPROF
711 case KERN_PROF:
712 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
713 newp, newlen));
714 #endif
715 case KERN_KDEBUG:
716 return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p));
717 #if !CONFIG_EMBEDDED
718 case KERN_PROCARGS:
719 /* new one as it does not use kinfo_proc */
720 return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p));
721 #endif
722 case KERN_PROCARGS2:
723 /* new one as it does not use kinfo_proc */
724 return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p));
725 #if PANIC_INFO
726 case KERN_PANICINFO:
727 return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp,
728 newp, newlen, p));
729 #endif
730 case KERN_AFFINITY:
731 return sysctl_affinity(name+1, namelen-1, oldp, oldlenp,
732 newp, newlen, p);
733 case KERN_TRANSLATE:
734 return sysctl_translate(name+1, namelen-1, oldp, oldlenp, newp,
735 newlen, p);
736
737 /* XXX remove once Rosetta has rev'ed */
738 case KERN_EXEC:
739 return sysctl_exec_archhandler_ppc(name+1, namelen-1, oldp,
740 oldlenp, newp, newlen, p);
741 #if COUNT_SYSCALLS
742 case KERN_COUNT_SYSCALLS:
743 {
744 /* valid values passed in:
745 * = 0 means don't keep called counts for each bsd syscall
746 * > 0 means keep called counts for each bsd syscall
747 * = 2 means dump current counts to the system log
748 * = 3 means reset all counts
749 * for example, to dump current counts:
750 * sysctl -w kern.count_calls=2
751 */
752 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
753 if ( error != 0 ) {
754 return (error);
755 }
756
757 if ( tmp == 1 ) {
758 do_count_syscalls = 1;
759 }
760 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
761 extern int nsysent;
762 extern int syscalls_log[];
763 extern const char * syscallnames[];
764 int i;
765 for ( i = 0; i < nsysent; i++ ) {
766 if ( syscalls_log[i] != 0 ) {
767 if ( tmp == 2 ) {
768 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
769 }
770 else {
771 syscalls_log[i] = 0;
772 }
773 }
774 }
775 if ( tmp != 0 ) {
776 do_count_syscalls = 1;
777 }
778 }
779 return (0);
780 }
781 #endif
782 default:
783 return (ENOTSUP);
784 }
785 /* NOTREACHED */
786 }
787
788 #if DEBUG
789 /*
790 * Debugging related system variables.
791 */
792 #if DIAGNOSTIC
793 extern
794 #endif /* DIAGNOSTIC */
795 struct ctldebug debug0, debug1;
796 struct ctldebug debug2, debug3, debug4;
797 struct ctldebug debug5, debug6, debug7, debug8, debug9;
798 struct ctldebug debug10, debug11, debug12, debug13, debug14;
799 struct ctldebug debug15, debug16, debug17, debug18, debug19;
800 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
801 &debug0, &debug1, &debug2, &debug3, &debug4,
802 &debug5, &debug6, &debug7, &debug8, &debug9,
803 &debug10, &debug11, &debug12, &debug13, &debug14,
804 &debug15, &debug16, &debug17, &debug18, &debug19,
805 };
806 int
807 debug_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
808 user_addr_t newp, size_t newlen, __unused proc_t p)
809 {
810 struct ctldebug *cdp;
811
812 /* all sysctl names at this level are name and field */
813 if (namelen != 2)
814 return (ENOTSUP); /* overloaded */
815 if (name[0] < 0 || name[0] >= CTL_DEBUG_MAXID)
816 return (ENOTSUP);
817 cdp = debugvars[name[0]];
818 if (cdp->debugname == 0)
819 return (ENOTSUP);
820 switch (name[1]) {
821 case CTL_DEBUG_NAME:
822 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
823 case CTL_DEBUG_VALUE:
824 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
825 default:
826 return (ENOTSUP);
827 }
828 /* NOTREACHED */
829 }
830 #endif /* DEBUG */
831
832 /*
833 * The following sysctl_* functions should not be used
834 * any more, as they can only cope with callers in
835 * user mode: Use new-style
836 * sysctl_io_number()
837 * sysctl_io_string()
838 * sysctl_io_opaque()
839 * instead.
840 */
841
842 /*
843 * Validate parameters and get old / set new parameters
844 * for an integer-valued sysctl function.
845 */
846 int
847 sysctl_int(user_addr_t oldp, size_t *oldlenp,
848 user_addr_t newp, size_t newlen, int *valp)
849 {
850 int error = 0;
851
852 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
853 return (EFAULT);
854 if (oldp && *oldlenp < sizeof(int))
855 return (ENOMEM);
856 if (newp && newlen != sizeof(int))
857 return (EINVAL);
858 *oldlenp = sizeof(int);
859 if (oldp)
860 error = copyout(valp, oldp, sizeof(int));
861 if (error == 0 && newp) {
862 error = copyin(newp, valp, sizeof(int));
863 AUDIT_ARG(value32, *valp);
864 }
865 return (error);
866 }
867
868 /*
869 * As above, but read-only.
870 */
871 int
872 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
873 {
874 int error = 0;
875
876 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
877 return (EFAULT);
878 if (oldp && *oldlenp < sizeof(int))
879 return (ENOMEM);
880 if (newp)
881 return (EPERM);
882 *oldlenp = sizeof(int);
883 if (oldp)
884 error = copyout((caddr_t)&val, oldp, sizeof(int));
885 return (error);
886 }
887
888 /*
889 * Validate parameters and get old / set new parameters
890 * for an quad(64bit)-valued sysctl function.
891 */
892 int
893 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
894 user_addr_t newp, size_t newlen, quad_t *valp)
895 {
896 int error = 0;
897
898 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
899 return (EFAULT);
900 if (oldp && *oldlenp < sizeof(quad_t))
901 return (ENOMEM);
902 if (newp && newlen != sizeof(quad_t))
903 return (EINVAL);
904 *oldlenp = sizeof(quad_t);
905 if (oldp)
906 error = copyout(valp, oldp, sizeof(quad_t));
907 if (error == 0 && newp)
908 error = copyin(newp, valp, sizeof(quad_t));
909 return (error);
910 }
911
912 /*
913 * As above, but read-only.
914 */
915 int
916 sysctl_rdquad(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, quad_t val)
917 {
918 int error = 0;
919
920 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
921 return (EFAULT);
922 if (oldp && *oldlenp < sizeof(quad_t))
923 return (ENOMEM);
924 if (newp)
925 return (EPERM);
926 *oldlenp = sizeof(quad_t);
927 if (oldp)
928 error = copyout((caddr_t)&val, oldp, sizeof(quad_t));
929 return (error);
930 }
931
932 /*
933 * Validate parameters and get old / set new parameters
934 * for a string-valued sysctl function. Unlike sysctl_string, if you
935 * give it a too small (but larger than 0 bytes) buffer, instead of
936 * returning ENOMEM, it truncates the returned string to the buffer
937 * size. This preserves the semantics of some library routines
938 * implemented via sysctl, which truncate their returned data, rather
939 * than simply returning an error. The returned string is always NUL
940 * terminated.
941 */
942 int
943 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
944 user_addr_t newp, size_t newlen, char *str, int maxlen)
945 {
946 int len, copylen, error = 0;
947
948 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
949 return (EFAULT);
950 copylen = len = strlen(str) + 1;
951 if (oldp && (len < 0 || *oldlenp < 1))
952 return (ENOMEM);
953 if (oldp && (*oldlenp < (size_t)len))
954 copylen = *oldlenp + 1;
955 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
956 return (EINVAL);
957 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
958 if (oldp) {
959 error = copyout(str, oldp, copylen);
960 if (!error) {
961 unsigned char c = 0;
962 /* NUL terminate */
963 oldp += *oldlenp;
964 error = copyout((void *)&c, oldp, sizeof(char));
965 }
966 }
967 if (error == 0 && newp) {
968 error = copyin(newp, str, newlen);
969 str[newlen] = 0;
970 AUDIT_ARG(text, (char *)str);
971 }
972 return (error);
973 }
974
975 /*
976 * Validate parameters and get old / set new parameters
977 * for a string-valued sysctl function.
978 */
979 int
980 sysctl_string(user_addr_t oldp, size_t *oldlenp,
981 user_addr_t newp, size_t newlen, char *str, int maxlen)
982 {
983 int len, error = 0;
984
985 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
986 return (EFAULT);
987 len = strlen(str) + 1;
988 if (oldp && (len < 0 || *oldlenp < (size_t)len))
989 return (ENOMEM);
990 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
991 return (EINVAL);
992 *oldlenp = len -1; /* deal with NULL strings correctly */
993 if (oldp) {
994 error = copyout(str, oldp, len);
995 }
996 if (error == 0 && newp) {
997 error = copyin(newp, str, newlen);
998 str[newlen] = 0;
999 AUDIT_ARG(text, (char *)str);
1000 }
1001 return (error);
1002 }
1003
1004 /*
1005 * As above, but read-only.
1006 */
1007 int
1008 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
1009 user_addr_t newp, char *str)
1010 {
1011 int len, error = 0;
1012
1013 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1014 return (EFAULT);
1015 len = strlen(str) + 1;
1016 if (oldp && *oldlenp < (size_t)len)
1017 return (ENOMEM);
1018 if (newp)
1019 return (EPERM);
1020 *oldlenp = len;
1021 if (oldp)
1022 error = copyout(str, oldp, len);
1023 return (error);
1024 }
1025
1026 /*
1027 * Validate parameters and get old / set new parameters
1028 * for a structure oriented sysctl function.
1029 */
1030 int
1031 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
1032 user_addr_t newp, size_t newlen, void *sp, int len)
1033 {
1034 int error = 0;
1035
1036 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1037 return (EFAULT);
1038 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1039 return (ENOMEM);
1040 if (newp && (len < 0 || newlen > (size_t)len))
1041 return (EINVAL);
1042 if (oldp) {
1043 *oldlenp = len;
1044 error = copyout(sp, oldp, len);
1045 }
1046 if (error == 0 && newp)
1047 error = copyin(newp, sp, len);
1048 return (error);
1049 }
1050
1051 /*
1052 * Validate parameters and get old parameters
1053 * for a structure oriented sysctl function.
1054 */
1055 int
1056 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
1057 user_addr_t newp, void *sp, int len)
1058 {
1059 int error = 0;
1060
1061 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1062 return (EFAULT);
1063 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1064 return (ENOMEM);
1065 if (newp)
1066 return (EPERM);
1067 *oldlenp = len;
1068 if (oldp)
1069 error = copyout(sp, oldp, len);
1070 return (error);
1071 }
1072
1073 /*
1074 * Get file structures.
1075 */
1076 static int
1077 sysctl_file
1078 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1079 {
1080 int error;
1081 struct fileglob *fg;
1082 struct extern_file nef;
1083
1084 if (req->oldptr == USER_ADDR_NULL) {
1085 /*
1086 * overestimate by 10 files
1087 */
1088 req->oldidx = sizeof(filehead) + (nfiles + 10) * sizeof(struct extern_file);
1089 return (0);
1090 }
1091
1092 /*
1093 * first copyout filehead
1094 */
1095 error = SYSCTL_OUT(req, &filehead, sizeof(filehead));
1096 if (error)
1097 return (error);
1098
1099 /*
1100 * followed by an array of file structures
1101 */
1102 for (fg = filehead.lh_first; fg != 0; fg = fg->f_list.le_next) {
1103 nef.f_list.le_next = (struct extern_file *)fg->f_list.le_next;
1104 nef.f_list.le_prev = (struct extern_file **)fg->f_list.le_prev;
1105 nef.f_flag = (fg->fg_flag & FMASK);
1106 nef.f_type = fg->fg_type;
1107 nef.f_count = fg->fg_count;
1108 nef.f_msgcount = fg->fg_msgcount;
1109 nef.f_cred = fg->fg_cred;
1110 nef.f_ops = fg->fg_ops;
1111 nef.f_offset = fg->fg_offset;
1112 nef.f_data = fg->fg_data;
1113 error = SYSCTL_OUT(req, &nef, sizeof(nef));
1114 if (error)
1115 return (error);
1116 }
1117 return (0);
1118 }
1119
1120 SYSCTL_PROC(_kern, KERN_FILE, file,
1121 CTLTYPE_STRUCT | CTLFLAG_RW,
1122 0, 0, sysctl_file, "S,filehead", "");
1123
1124 static int
1125 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
1126 {
1127 if (p->p_pid != (pid_t)*(int*)arg)
1128 return(0);
1129 else
1130 return(1);
1131 }
1132
1133 static int
1134 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
1135 {
1136 if (p->p_pgrpid != (pid_t)*(int*)arg)
1137 return(0);
1138 else
1139 return(1);
1140 }
1141
1142 static int
1143 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
1144 {
1145 boolean_t funnel_state;
1146 int retval;
1147 struct tty *tp;
1148
1149
1150 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1151 /* This is very racy but list lock is held.. Hmmm. */
1152 if ((p->p_flag & P_CONTROLT) == 0 ||
1153 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
1154 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
1155 tp->t_dev != (dev_t)*(int*)arg)
1156 retval = 0;
1157 else
1158 retval = 1;
1159
1160 thread_funnel_set(kernel_flock, funnel_state);
1161
1162 return(retval);
1163 }
1164
1165 static int
1166 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
1167 {
1168 kauth_cred_t my_cred;
1169 uid_t uid;
1170
1171 if (p->p_ucred == NULL)
1172 return(0);
1173 my_cred = kauth_cred_proc_ref(p);
1174 uid = kauth_cred_getuid(my_cred);
1175 kauth_cred_unref(&my_cred);
1176
1177 if (uid != (uid_t)*(int*)arg)
1178 return(0);
1179 else
1180 return(1);
1181 }
1182
1183
1184 static int
1185 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
1186 {
1187 kauth_cred_t my_cred;
1188 uid_t ruid;
1189
1190 if (p->p_ucred == NULL)
1191 return(0);
1192 my_cred = kauth_cred_proc_ref(p);
1193 ruid = my_cred->cr_ruid;
1194 kauth_cred_unref(&my_cred);
1195
1196 if (ruid != (uid_t)*(int*)arg)
1197 return(0);
1198 else
1199 return(1);
1200 }
1201
1202 #if CONFIG_LCTX
1203 static int
1204 sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
1205 {
1206 if ((p->p_lctx == NULL) ||
1207 (p->p_lctx->lc_id != (pid_t)*(int*)arg))
1208 return(0);
1209 else
1210 return(1);
1211 }
1212 #endif
1213
1214 /*
1215 * try over estimating by 5 procs
1216 */
1217 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1218 struct sysdoproc_args {
1219 int buflen;
1220 caddr_t kprocp;
1221 boolean_t is_64_bit;
1222 user_addr_t dp;
1223 size_t needed;
1224 int sizeof_kproc;
1225 int * errorp;
1226 int uidcheck;
1227 int ruidcheck;
1228 int ttycheck;
1229 int uidval;
1230 };
1231
1232 int
1233 sysdoproc_callback(proc_t p, void * arg)
1234 {
1235 struct sysdoproc_args * args = (struct sysdoproc_args *)arg;
1236 int error=0;
1237
1238 if (args->buflen >= args->sizeof_kproc) {
1239 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
1240 return(PROC_RETURNED);
1241 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
1242 return(PROC_RETURNED);
1243 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
1244 return(PROC_RETURNED);
1245
1246 bzero(args->kprocp, args->sizeof_kproc);
1247 if (args->is_64_bit) {
1248 fill_user64_proc(p, (struct user64_kinfo_proc *) args->kprocp);
1249 }
1250 else {
1251 fill_user32_proc(p, (struct user32_kinfo_proc *) args->kprocp);
1252 }
1253 error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
1254 if (error) {
1255 *args->errorp = error;
1256 return(PROC_RETURNED_DONE);
1257 return (error);
1258 }
1259 args->dp += args->sizeof_kproc;
1260 args->buflen -= args->sizeof_kproc;
1261 }
1262 args->needed += args->sizeof_kproc;
1263 return(PROC_RETURNED);
1264 }
1265
1266 int
1267 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1268 {
1269 user_addr_t dp = where;
1270 size_t needed = 0;
1271 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1272 int error = 0;
1273 boolean_t is_64_bit = FALSE;
1274 struct user32_kinfo_proc user32_kproc;
1275 struct user64_kinfo_proc user_kproc;
1276 int sizeof_kproc;
1277 caddr_t kprocp;
1278 int (*filterfn)(proc_t, void *) = 0;
1279 struct sysdoproc_args args;
1280 int uidcheck = 0;
1281 int ruidcheck = 0;
1282 int ttycheck = 0;
1283
1284 if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
1285 return (EINVAL);
1286 is_64_bit = proc_is64bit(current_proc());
1287 if (is_64_bit) {
1288 sizeof_kproc = sizeof(user_kproc);
1289 kprocp = (caddr_t) &user_kproc;
1290 }
1291 else {
1292 sizeof_kproc = sizeof(user32_kproc);
1293 kprocp = (caddr_t) &user32_kproc;
1294 }
1295
1296
1297 switch (name[0]) {
1298
1299 case KERN_PROC_PID:
1300 filterfn = sysdoproc_filt_KERN_PROC_PID;
1301 break;
1302
1303 case KERN_PROC_PGRP:
1304 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
1305 break;
1306
1307 case KERN_PROC_TTY:
1308 ttycheck = 1;
1309 break;
1310
1311 case KERN_PROC_UID:
1312 uidcheck = 1;
1313 break;
1314
1315 case KERN_PROC_RUID:
1316 ruidcheck = 1;
1317 break;
1318
1319 #if CONFIG_LCTX
1320 case KERN_PROC_LCID:
1321 filterfn = sysdoproc_filt_KERN_PROC_LCID;
1322 break;
1323 #endif
1324 }
1325
1326 error = 0;
1327 args.buflen = buflen;
1328 args.kprocp = kprocp;
1329 args.is_64_bit = is_64_bit;
1330 args.dp = dp;
1331 args.needed = needed;
1332 args.errorp = &error;
1333 args.uidcheck = uidcheck;
1334 args.ruidcheck = ruidcheck;
1335 args.ttycheck = ttycheck;
1336 args.sizeof_kproc = sizeof_kproc;
1337 args.uidval = name[1];
1338
1339 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), sysdoproc_callback, &args, filterfn, &name[1]);
1340
1341 if (error)
1342 return(error);
1343
1344 dp = args.dp;
1345 needed = args.needed;
1346
1347 if (where != USER_ADDR_NULL) {
1348 *sizep = dp - where;
1349 if (needed > *sizep)
1350 return (ENOMEM);
1351 } else {
1352 needed += KERN_PROCSLOP;
1353 *sizep = needed;
1354 }
1355 return (0);
1356 }
1357
1358 /*
1359 * Fill in an eproc structure for the specified process.
1360 */
1361 static void
1362 fill_user32_eproc(proc_t p, struct user32_eproc *ep)
1363 {
1364 struct tty *tp;
1365 kauth_cred_t my_cred;
1366 struct pgrp * pg;
1367 struct session * sessp;
1368
1369 pg = proc_pgrp(p);
1370 sessp = proc_session(p);
1371
1372 ep->e_paddr = CAST_DOWN_EXPLICIT(uint32_t,p);
1373
1374 if (pg != PGRP_NULL) {
1375 ep->e_sess = CAST_DOWN_EXPLICIT(uint32_t,sessp);
1376 ep->e_pgid = p->p_pgrpid;
1377 ep->e_jobc = pg->pg_jobc;
1378 if ((sessp != SESSION_NULL) && sessp->s_ttyvp)
1379 ep->e_flag = EPROC_CTTY;
1380 } else {
1381 ep->e_sess = 0;
1382 ep->e_pgid = 0;
1383 ep->e_jobc = 0;
1384 }
1385 #if CONFIG_LCTX
1386 if (p->p_lctx) {
1387 ep->e_lcid = p->p_lctx->lc_id;
1388 } else {
1389 ep->e_lcid = 0;
1390 }
1391 #endif
1392 ep->e_ppid = p->p_ppid;
1393 /* Pre-zero the fake historical pcred */
1394 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1395 if (p->p_ucred) {
1396 my_cred = kauth_cred_proc_ref(p);
1397
1398 /* A fake historical pcred */
1399 ep->e_pcred.p_ruid = my_cred->cr_ruid;
1400 ep->e_pcred.p_svuid = my_cred->cr_svuid;
1401 ep->e_pcred.p_rgid = my_cred->cr_rgid;
1402 ep->e_pcred.p_svgid = my_cred->cr_svgid;
1403 /* A fake historical *kauth_cred_t */
1404 ep->e_ucred.cr_ref = my_cred->cr_ref;
1405 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1406 ep->e_ucred.cr_ngroups = my_cred->cr_ngroups;
1407 bcopy(my_cred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1408
1409 kauth_cred_unref(&my_cred);
1410 }
1411 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1412 ep->e_vm.vm_tsize = 0;
1413 ep->e_vm.vm_dsize = 0;
1414 ep->e_vm.vm_ssize = 0;
1415 }
1416 ep->e_vm.vm_rssize = 0;
1417
1418 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1419 (tp = SESSION_TP(sessp))) {
1420 ep->e_tdev = tp->t_dev;
1421 ep->e_tpgid = sessp->s_ttypgrpid;
1422 ep->e_tsess = CAST_DOWN_EXPLICIT(uint32_t,tp->t_session);
1423 } else
1424 ep->e_tdev = NODEV;
1425
1426 if (SESS_LEADER(p, sessp))
1427 ep->e_flag |= EPROC_SLEADER;
1428 bzero(&ep->e_wmesg[0], WMESGLEN+1);
1429 ep->e_xsize = ep->e_xrssize = 0;
1430 ep->e_xccount = ep->e_xswrss = 0;
1431 if (sessp != SESSION_NULL)
1432 session_rele(sessp);
1433 if(pg != PGRP_NULL)
1434 pg_rele(pg);
1435 }
1436
1437 /*
1438 * Fill in an LP64 version of eproc structure for the specified process.
1439 */
1440 static void
1441 fill_user64_eproc(proc_t p, struct user64_eproc *ep)
1442 {
1443 struct tty *tp;
1444 struct session *sessp = NULL;
1445 struct pgrp * pg;
1446 kauth_cred_t my_cred;
1447
1448 pg = proc_pgrp(p);
1449 sessp = proc_session(p);
1450
1451 ep->e_paddr = CAST_USER_ADDR_T(p);
1452 if (pg != PGRP_NULL) {
1453 ep->e_sess = CAST_USER_ADDR_T(sessp);
1454 ep->e_pgid = p->p_pgrpid;
1455 ep->e_jobc = pg->pg_jobc;
1456 if (sessp != SESSION_NULL) {
1457 if (sessp->s_ttyvp)
1458 ep->e_flag = EPROC_CTTY;
1459 }
1460 } else {
1461 ep->e_sess = USER_ADDR_NULL;
1462 ep->e_pgid = 0;
1463 ep->e_jobc = 0;
1464 }
1465 #if CONFIG_LCTX
1466 if (p->p_lctx) {
1467 ep->e_lcid = p->p_lctx->lc_id;
1468 } else {
1469 ep->e_lcid = 0;
1470 }
1471 #endif
1472 ep->e_ppid = p->p_ppid;
1473 /* Pre-zero the fake historical pcred */
1474 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1475 if (p->p_ucred) {
1476 my_cred = kauth_cred_proc_ref(p);
1477
1478 /* A fake historical pcred */
1479 ep->e_pcred.p_ruid = my_cred->cr_ruid;
1480 ep->e_pcred.p_svuid = my_cred->cr_svuid;
1481 ep->e_pcred.p_rgid = my_cred->cr_rgid;
1482 ep->e_pcred.p_svgid = my_cred->cr_svgid;
1483
1484 /* A fake historical *kauth_cred_t */
1485 ep->e_ucred.cr_ref = my_cred->cr_ref;
1486 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1487 ep->e_ucred.cr_ngroups = my_cred->cr_ngroups;
1488 bcopy(my_cred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1489
1490 kauth_cred_unref(&my_cred);
1491 }
1492 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1493 ep->e_vm.vm_tsize = 0;
1494 ep->e_vm.vm_dsize = 0;
1495 ep->e_vm.vm_ssize = 0;
1496 }
1497 ep->e_vm.vm_rssize = 0;
1498
1499 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1500 (tp = SESSION_TP(sessp))) {
1501 ep->e_tdev = tp->t_dev;
1502 ep->e_tpgid = sessp->s_ttypgrpid;
1503 ep->e_tsess = CAST_USER_ADDR_T(tp->t_session);
1504 } else
1505 ep->e_tdev = NODEV;
1506
1507 if (SESS_LEADER(p, sessp))
1508 ep->e_flag |= EPROC_SLEADER;
1509 bzero(&ep->e_wmesg[0], WMESGLEN+1);
1510 ep->e_xsize = ep->e_xrssize = 0;
1511 ep->e_xccount = ep->e_xswrss = 0;
1512 if (sessp != SESSION_NULL)
1513 session_rele(sessp);
1514 if (pg != PGRP_NULL)
1515 pg_rele(pg);
1516 }
1517
1518 /*
1519 * Fill in an eproc structure for the specified process.
1520 */
1521 static void
1522 fill_user32_externproc(proc_t p, struct user32_extern_proc *exp)
1523 {
1524 exp->p_forw = exp->p_back = 0;
1525 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1526 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1527 exp->p_vmspace = 0;
1528 exp->p_sigacts = CAST_DOWN_EXPLICIT(uint32_t,p->p_sigacts);
1529 exp->p_flag = p->p_flag;
1530 if (p->p_lflag & P_LTRACED)
1531 exp->p_flag |= P_TRACED;
1532 if (p->p_lflag & P_LPPWAIT)
1533 exp->p_flag |= P_PPWAIT;
1534 if (p->p_lflag & P_LEXIT)
1535 exp->p_flag |= P_WEXIT;
1536 exp->p_stat = p->p_stat ;
1537 exp->p_pid = p->p_pid ;
1538 exp->p_oppid = p->p_oppid ;
1539 /* Mach related */
1540 exp->user_stack = p->user_stack;
1541 exp->exit_thread = CAST_DOWN_EXPLICIT(uint32_t,p->exit_thread);
1542 exp->p_debugger = p->p_debugger ;
1543 exp->sigwait = p->sigwait ;
1544 /* scheduling */
1545 #ifdef _PROC_HAS_SCHEDINFO_
1546 exp->p_estcpu = p->p_estcpu ;
1547 exp->p_pctcpu = p->p_pctcpu ;
1548 exp->p_slptime = p->p_slptime ;
1549 #else
1550 exp->p_estcpu = 0 ;
1551 exp->p_pctcpu = 0 ;
1552 exp->p_slptime = 0 ;
1553 #endif
1554 exp->p_cpticks = 0 ;
1555 exp->p_wchan = 0 ;
1556 exp->p_wmesg = 0 ;
1557 exp->p_swtime = 0 ;
1558 bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval));
1559 bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval));
1560 exp->p_uticks = 0 ;
1561 exp->p_sticks = 0 ;
1562 exp->p_iticks = 0 ;
1563 exp->p_traceflag = 0;
1564 exp->p_tracep = 0 ;
1565 exp->p_siglist = 0 ; /* No longer relevant */
1566 exp->p_textvp = CAST_DOWN_EXPLICIT(uint32_t,p->p_textvp) ;
1567 exp->p_holdcnt = 0 ;
1568 exp->p_sigmask = 0 ; /* no longer avaialable */
1569 exp->p_sigignore = p->p_sigignore ;
1570 exp->p_sigcatch = p->p_sigcatch ;
1571 exp->p_priority = p->p_priority ;
1572 exp->p_usrpri = 0 ;
1573 exp->p_nice = p->p_nice ;
1574 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1575 exp->p_comm[MAXCOMLEN] = '\0';
1576 exp->p_pgrp = CAST_DOWN_EXPLICIT(uint32_t,p->p_pgrp) ;
1577 exp->p_addr = 0;
1578 exp->p_xstat = p->p_xstat ;
1579 exp->p_acflag = p->p_acflag ;
1580 exp->p_ru = CAST_DOWN_EXPLICIT(uint32_t,p->p_ru) ; /* XXX may be NULL */
1581 }
1582
1583 /*
1584 * Fill in an LP64 version of extern_proc structure for the specified process.
1585 */
1586 static void
1587 fill_user64_externproc(proc_t p, struct user64_extern_proc *exp)
1588 {
1589 exp->p_forw = exp->p_back = USER_ADDR_NULL;
1590 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1591 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1592 exp->p_vmspace = USER_ADDR_NULL;
1593 exp->p_sigacts = CAST_USER_ADDR_T(p->p_sigacts);
1594 exp->p_flag = p->p_flag;
1595 if (p->p_lflag & P_LTRACED)
1596 exp->p_flag |= P_TRACED;
1597 if (p->p_lflag & P_LPPWAIT)
1598 exp->p_flag |= P_PPWAIT;
1599 if (p->p_lflag & P_LEXIT)
1600 exp->p_flag |= P_WEXIT;
1601 exp->p_stat = p->p_stat ;
1602 exp->p_pid = p->p_pid ;
1603 exp->p_oppid = p->p_oppid ;
1604 /* Mach related */
1605 exp->user_stack = p->user_stack;
1606 exp->exit_thread = CAST_USER_ADDR_T(p->exit_thread);
1607 exp->p_debugger = p->p_debugger ;
1608 exp->sigwait = p->sigwait ;
1609 /* scheduling */
1610 #ifdef _PROC_HAS_SCHEDINFO_
1611 exp->p_estcpu = p->p_estcpu ;
1612 exp->p_pctcpu = p->p_pctcpu ;
1613 exp->p_slptime = p->p_slptime ;
1614 #else
1615 exp->p_estcpu = 0 ;
1616 exp->p_pctcpu = 0 ;
1617 exp->p_slptime = 0 ;
1618 #endif
1619 exp->p_cpticks = 0 ;
1620 exp->p_wchan = 0;
1621 exp->p_wmesg = 0;
1622 exp->p_swtime = 0 ;
1623 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1624 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1625 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1626 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1627 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1628 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1629 exp->p_uticks = 0 ;
1630 exp->p_sticks = 0 ;
1631 exp->p_iticks = 0 ;
1632 exp->p_traceflag = 0 ;
1633 exp->p_tracep = 0;
1634 exp->p_siglist = 0 ; /* No longer relevant */
1635 exp->p_textvp = CAST_USER_ADDR_T(p->p_textvp);
1636 exp->p_holdcnt = 0 ;
1637 exp->p_sigmask = 0 ; /* no longer avaialable */
1638 exp->p_sigignore = p->p_sigignore ;
1639 exp->p_sigcatch = p->p_sigcatch ;
1640 exp->p_priority = p->p_priority ;
1641 exp->p_usrpri = 0 ;
1642 exp->p_nice = p->p_nice ;
1643 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1644 exp->p_comm[MAXCOMLEN] = '\0';
1645 exp->p_pgrp = CAST_USER_ADDR_T(p->p_pgrp);
1646 exp->p_addr = USER_ADDR_NULL;
1647 exp->p_xstat = p->p_xstat ;
1648 exp->p_acflag = p->p_acflag ;
1649 exp->p_ru = CAST_USER_ADDR_T(p->p_ru); /* XXX may be NULL */
1650 }
1651
1652 static void
1653 fill_user32_proc(proc_t p, struct user32_kinfo_proc *kp)
1654 {
1655 /* on a 64 bit kernel, 32 bit users will get some truncated information */
1656 fill_user32_externproc(p, &kp->kp_proc);
1657 fill_user32_eproc(p, &kp->kp_eproc);
1658 }
1659
1660 static void
1661 fill_user64_proc(proc_t p, struct user64_kinfo_proc *kp)
1662 {
1663 fill_user64_externproc(p, &kp->kp_proc);
1664 fill_user64_eproc(p, &kp->kp_eproc);
1665 }
1666
1667 int
1668 kdebug_ops(int *name, u_int namelen, user_addr_t where,
1669 size_t *sizep, proc_t p)
1670 {
1671 int ret=0;
1672
1673 if (namelen == 0)
1674 return(ENOTSUP);
1675
1676 ret = suser(kauth_cred_get(), &p->p_acflag);
1677 if (ret)
1678 return(ret);
1679
1680 switch(name[0]) {
1681 case KERN_KDEFLAGS:
1682 case KERN_KDDFLAGS:
1683 case KERN_KDENABLE:
1684 case KERN_KDGETBUF:
1685 case KERN_KDSETUP:
1686 case KERN_KDREMOVE:
1687 case KERN_KDSETREG:
1688 case KERN_KDGETREG:
1689 case KERN_KDREADTR:
1690 case KERN_KDPIDTR:
1691 case KERN_KDTHRMAP:
1692 case KERN_KDPIDEX:
1693 case KERN_KDSETRTCDEC:
1694 case KERN_KDSETBUF:
1695 case KERN_KDGETENTROPY:
1696 ret = kdbg_control(name, namelen, where, sizep);
1697 break;
1698 default:
1699 ret= ENOTSUP;
1700 break;
1701 }
1702 return(ret);
1703 }
1704
1705
1706 /*
1707 * Return the top *sizep bytes of the user stack, or the entire area of the
1708 * user stack down through the saved exec_path, whichever is smaller.
1709 */
1710 int
1711 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
1712 size_t *sizep, proc_t cur_proc)
1713 {
1714 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0);
1715 }
1716
1717 static int
1718 sysctl_procargs2(int *name, u_int namelen, user_addr_t where,
1719 size_t *sizep, proc_t cur_proc)
1720 {
1721 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1);
1722 }
1723
1724 static int
1725 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1726 size_t *sizep, proc_t cur_proc, int argc_yes)
1727 {
1728 proc_t p;
1729 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1730 int error = 0;
1731 struct _vm_map *proc_map;
1732 struct task * task;
1733 vm_map_copy_t tmp;
1734 user_addr_t arg_addr;
1735 size_t arg_size;
1736 caddr_t data;
1737 size_t argslen=0;
1738 int size;
1739 vm_offset_t copy_start, copy_end;
1740 kern_return_t ret;
1741 int pid;
1742 kauth_cred_t my_cred;
1743 uid_t uid;
1744
1745 if ( namelen < 1 )
1746 return(EINVAL);
1747
1748 if (argc_yes)
1749 buflen -= sizeof(int); /* reserve first word to return argc */
1750
1751 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1752 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1753 /* is not NULL then the caller wants us to return the length needed to */
1754 /* hold the data we would return */
1755 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1756 return(EINVAL);
1757 }
1758 arg_size = buflen;
1759
1760 /*
1761 * Lookup process by pid
1762 */
1763 pid = name[0];
1764 p = proc_find(pid);
1765 if (p == NULL) {
1766 return(EINVAL);
1767 }
1768
1769 /*
1770 * Copy the top N bytes of the stack.
1771 * On all machines we have so far, the stack grows
1772 * downwards.
1773 *
1774 * If the user expects no more than N bytes of
1775 * argument list, use that as a guess for the
1776 * size.
1777 */
1778
1779 if (!p->user_stack) {
1780 proc_rele(p);
1781 return(EINVAL);
1782 }
1783
1784 if (where == USER_ADDR_NULL) {
1785 /* caller only wants to know length of proc args data */
1786 if (sizep == NULL) {
1787 proc_rele(p);
1788 return(EFAULT);
1789 }
1790
1791 size = p->p_argslen;
1792 proc_rele(p);
1793 if (argc_yes) {
1794 size += sizeof(int);
1795 }
1796 else {
1797 /*
1798 * old PROCARGS will return the executable's path and plus some
1799 * extra space for work alignment and data tags
1800 */
1801 size += PATH_MAX + (6 * sizeof(int));
1802 }
1803 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1804 *sizep = size;
1805 return (0);
1806 }
1807
1808 my_cred = kauth_cred_proc_ref(p);
1809 uid = kauth_cred_getuid(my_cred);
1810 kauth_cred_unref(&my_cred);
1811
1812 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1813 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1814 proc_rele(p);
1815 return (EINVAL);
1816 }
1817
1818 if ((u_int)arg_size > p->p_argslen)
1819 arg_size = round_page(p->p_argslen);
1820
1821 arg_addr = p->user_stack - arg_size;
1822
1823
1824 /*
1825 * Before we can block (any VM code), make another
1826 * reference to the map to keep it alive. We do
1827 * that by getting a reference on the task itself.
1828 */
1829 task = p->task;
1830 if (task == NULL) {
1831 proc_rele(p);
1832 return(EINVAL);
1833 }
1834
1835 argslen = p->p_argslen;
1836 /*
1837 * Once we have a task reference we can convert that into a
1838 * map reference, which we will use in the calls below. The
1839 * task/process may change its map after we take this reference
1840 * (see execve), but the worst that will happen then is a return
1841 * of stale info (which is always a possibility).
1842 */
1843 task_reference(task);
1844 proc_rele(p);
1845 proc_map = get_task_map_reference(task);
1846 task_deallocate(task);
1847
1848 if (proc_map == NULL)
1849 return(EINVAL);
1850
1851
1852 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1853 if (ret != KERN_SUCCESS) {
1854 vm_map_deallocate(proc_map);
1855 return(ENOMEM);
1856 }
1857
1858 copy_end = round_page(copy_start + arg_size);
1859
1860 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1861 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1862 vm_map_deallocate(proc_map);
1863 kmem_free(kernel_map, copy_start,
1864 round_page(arg_size));
1865 return (EIO);
1866 }
1867
1868 /*
1869 * Now that we've done the copyin from the process'
1870 * map, we can release the reference to it.
1871 */
1872 vm_map_deallocate(proc_map);
1873
1874 if( vm_map_copy_overwrite(kernel_map,
1875 (vm_map_address_t)copy_start,
1876 tmp, FALSE) != KERN_SUCCESS) {
1877 kmem_free(kernel_map, copy_start,
1878 round_page(arg_size));
1879 return (EIO);
1880 }
1881
1882 if (arg_size > argslen) {
1883 data = (caddr_t) (copy_end - argslen);
1884 size = argslen;
1885 } else {
1886 data = (caddr_t) (copy_end - arg_size);
1887 size = arg_size;
1888 }
1889
1890 if (argc_yes) {
1891 /* Put processes argc as the first word in the copyout buffer */
1892 suword(where, p->p_argc);
1893 error = copyout(data, (where + sizeof(int)), size);
1894 size += sizeof(int);
1895 } else {
1896 error = copyout(data, where, size);
1897
1898 /*
1899 * Make the old PROCARGS work to return the executable's path
1900 * But, only if there is enough space in the provided buffer
1901 *
1902 * on entry: data [possibily] points to the beginning of the path
1903 *
1904 * Note: we keep all pointers&sizes aligned to word boundries
1905 */
1906 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1907 {
1908 int binPath_sz, alignedBinPath_sz = 0;
1909 int extraSpaceNeeded, addThis;
1910 user_addr_t placeHere;
1911 char * str = (char *) data;
1912 int max_len = size;
1913
1914 /* Some apps are really bad about messing up their stacks
1915 So, we have to be extra careful about getting the length
1916 of the executing binary. If we encounter an error, we bail.
1917 */
1918
1919 /* Limit ourselves to PATH_MAX paths */
1920 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1921
1922 binPath_sz = 0;
1923
1924 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1925 binPath_sz++;
1926
1927 /* If we have a NUL terminator, copy it, too */
1928 if (binPath_sz < max_len-1) binPath_sz += 1;
1929
1930 /* Pre-Flight the space requiremnts */
1931
1932 /* Account for the padding that fills out binPath to the next word */
1933 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1934
1935 placeHere = where + size;
1936
1937 /* Account for the bytes needed to keep placeHere word aligned */
1938 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1939
1940 /* Add up all the space that is needed */
1941 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1942
1943 /* is there is room to tack on argv[0]? */
1944 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1945 {
1946 placeHere += addThis;
1947 suword(placeHere, 0);
1948 placeHere += sizeof(int);
1949 suword(placeHere, 0xBFFF0000);
1950 placeHere += sizeof(int);
1951 suword(placeHere, 0);
1952 placeHere += sizeof(int);
1953 error = copyout(data, placeHere, binPath_sz);
1954 if ( ! error )
1955 {
1956 placeHere += binPath_sz;
1957 suword(placeHere, 0);
1958 size += extraSpaceNeeded;
1959 }
1960 }
1961 }
1962 }
1963
1964 if (copy_start != (vm_offset_t) 0) {
1965 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1966 }
1967 if (error) {
1968 return(error);
1969 }
1970
1971 if (where != USER_ADDR_NULL)
1972 *sizep = size;
1973 return (0);
1974 }
1975
1976
1977 /*
1978 * Max number of concurrent aio requests
1979 */
1980 static int
1981 sysctl_aiomax
1982 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1983 {
1984 int new_value, changed;
1985 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1986 if (changed) {
1987 /* make sure the system-wide limit is greater than the per process limit */
1988 if (new_value >= aio_max_requests_per_process)
1989 aio_max_requests = new_value;
1990 else
1991 error = EINVAL;
1992 }
1993 return(error);
1994 }
1995
1996
1997 /*
1998 * Max number of concurrent aio requests per process
1999 */
2000 static int
2001 sysctl_aioprocmax
2002 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2003 {
2004 int new_value, changed;
2005 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
2006 if (changed) {
2007 /* make sure per process limit is less than the system-wide limit */
2008 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
2009 aio_max_requests_per_process = new_value;
2010 else
2011 error = EINVAL;
2012 }
2013 return(error);
2014 }
2015
2016
2017 /*
2018 * Max number of async IO worker threads
2019 */
2020 static int
2021 sysctl_aiothreads
2022 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2023 {
2024 int new_value, changed;
2025 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
2026 if (changed) {
2027 /* we only allow an increase in the number of worker threads */
2028 if (new_value > aio_worker_threads ) {
2029 _aio_create_worker_threads((new_value - aio_worker_threads));
2030 aio_worker_threads = new_value;
2031 }
2032 else
2033 error = EINVAL;
2034 }
2035 return(error);
2036 }
2037
2038
2039 /*
2040 * System-wide limit on the max number of processes
2041 */
2042 static int
2043 sysctl_maxproc
2044 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2045 {
2046 int new_value, changed;
2047 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
2048 if (changed) {
2049 AUDIT_ARG(value32, new_value);
2050 /* make sure the system-wide limit is less than the configured hard
2051 limit set at kernel compilation */
2052 if (new_value <= hard_maxproc && new_value > 0)
2053 maxproc = new_value;
2054 else
2055 error = EINVAL;
2056 }
2057 return(error);
2058 }
2059
2060 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
2061 CTLFLAG_RD | CTLFLAG_KERN,
2062 ostype, 0, "");
2063 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
2064 CTLFLAG_RD | CTLFLAG_KERN,
2065 osrelease, 0, "");
2066 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
2067 CTLFLAG_RD | CTLFLAG_KERN,
2068 (int *)NULL, BSD, "");
2069 SYSCTL_STRING(_kern, KERN_VERSION, version,
2070 CTLFLAG_RD | CTLFLAG_KERN,
2071 version, 0, "");
2072
2073 #if DEBUG
2074 int debug_kprint_syscall = 0;
2075 char debug_kprint_syscall_process[MAXCOMLEN+1];
2076
2077 SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
2078 CTLFLAG_RW, &debug_kprint_syscall, 0, "kprintf syscall tracing");
2079 SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
2080 CTLFLAG_RW, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
2081 "name of process for kprintf syscall tracing");
2082
2083 int debug_kprint_current_process(const char **namep)
2084 {
2085 struct proc *p = current_proc();
2086
2087 if (p == NULL) {
2088 return 0;
2089 }
2090
2091 if (debug_kprint_syscall_process[0]) {
2092 /* user asked to scope tracing to a particular process name */
2093 if(0 == strncmp(debug_kprint_syscall_process,
2094 p->p_comm, sizeof(debug_kprint_syscall_process))) {
2095 /* no value in telling the user that we traced what they asked */
2096 if(namep) *namep = NULL;
2097
2098 return 1;
2099 } else {
2100 return 0;
2101 }
2102 }
2103
2104 /* trace all processes. Tell user what we traced */
2105 if (namep) {
2106 *namep = p->p_comm;
2107 }
2108
2109 return 1;
2110 }
2111 #endif
2112
2113 /* PR-5293665: need to use a callback function for kern.osversion to set
2114 * osversion in IORegistry */
2115
2116 static int
2117 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2118 {
2119 int rval = 0;
2120
2121 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2122
2123 if (req->newptr) {
2124 IORegistrySetOSBuildVersion((char *)arg1);
2125 }
2126
2127 return rval;
2128 }
2129
2130 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
2131 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING,
2132 osversion, 256 /* OSVERSIZE*/,
2133 sysctl_osversion, "A", "");
2134
2135 static int
2136 sysctl_sysctl_bootargs
2137 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2138 {
2139 int error;
2140 char buf[256];
2141
2142 strlcpy(buf, PE_boot_args(), 256);
2143 error = sysctl_io_string(req, buf, 256, 0, NULL);
2144 return(error);
2145 }
2146
2147 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
2148 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
2149 NULL, 0,
2150 sysctl_sysctl_bootargs, "A", "bootargs");
2151
2152 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
2153 CTLFLAG_RW | CTLFLAG_KERN,
2154 &maxfiles, 0, "");
2155 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
2156 CTLFLAG_RD | CTLFLAG_KERN,
2157 (int *)NULL, ARG_MAX, "");
2158 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
2159 CTLFLAG_RD | CTLFLAG_KERN,
2160 (int *)NULL, _POSIX_VERSION, "");
2161 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
2162 CTLFLAG_RD | CTLFLAG_KERN,
2163 (int *)NULL, NGROUPS_MAX, "");
2164 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
2165 CTLFLAG_RD | CTLFLAG_KERN,
2166 (int *)NULL, 1, "");
2167 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2168 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2169 CTLFLAG_RD | CTLFLAG_KERN,
2170 (int *)NULL, 1, "");
2171 #else
2172 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2173 CTLFLAG_RD | CTLFLAG_KERN,
2174 NULL, 0, "");
2175 #endif
2176 SYSCTL_INT(_kern, OID_AUTO, num_files,
2177 CTLFLAG_RD,
2178 &nfiles, 0, "");
2179 SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
2180 CTLFLAG_RD,
2181 &numvnodes, 0, "");
2182 SYSCTL_INT(_kern, OID_AUTO, num_tasks,
2183 CTLFLAG_RD,
2184 &task_max, 0, "");
2185 SYSCTL_INT(_kern, OID_AUTO, num_threads,
2186 CTLFLAG_RD,
2187 &thread_max, 0, "");
2188 SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
2189 CTLFLAG_RD,
2190 &task_threadmax, 0, "");
2191
2192 static int
2193 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2194 {
2195 int oldval = desiredvnodes;
2196 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
2197
2198 if (oldval != desiredvnodes) {
2199 reset_vmobjectcache(oldval, desiredvnodes);
2200 resize_namecache(desiredvnodes);
2201 }
2202
2203 return(error);
2204 }
2205
2206 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
2207 CTLTYPE_INT | CTLFLAG_RW,
2208 0, 0, sysctl_maxvnodes, "I", "");
2209
2210 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
2211 CTLTYPE_INT | CTLFLAG_RW,
2212 0, 0, sysctl_maxproc, "I", "");
2213
2214 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
2215 CTLTYPE_INT | CTLFLAG_RW,
2216 0, 0, sysctl_aiomax, "I", "");
2217
2218 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
2219 CTLTYPE_INT | CTLFLAG_RW,
2220 0, 0, sysctl_aioprocmax, "I", "");
2221
2222 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
2223 CTLTYPE_INT | CTLFLAG_RW,
2224 0, 0, sysctl_aiothreads, "I", "");
2225
2226 static int
2227 sysctl_securelvl
2228 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2229 {
2230 int new_value, changed;
2231 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
2232 if (changed) {
2233 if (!(new_value < securelevel && req->p->p_pid != 1)) {
2234 proc_list_lock();
2235 securelevel = new_value;
2236 proc_list_unlock();
2237 } else {
2238 error = EPERM;
2239 }
2240 }
2241 return(error);
2242 }
2243
2244 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
2245 CTLTYPE_INT | CTLFLAG_RW,
2246 0, 0, sysctl_securelvl, "I", "");
2247
2248
2249 static int
2250 sysctl_domainname
2251 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2252 {
2253 int error, changed;
2254 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
2255 if (changed) {
2256 domainnamelen = strlen(domainname);
2257 }
2258 return(error);
2259 }
2260
2261 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
2262 CTLTYPE_STRING | CTLFLAG_RW,
2263 0, 0, sysctl_domainname, "A", "");
2264
2265 SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
2266 CTLFLAG_RW | CTLFLAG_KERN,
2267 &hostid, 0, "");
2268
2269 static int
2270 sysctl_hostname
2271 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2272 {
2273 int error, changed;
2274 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
2275 if (changed) {
2276 hostnamelen = req->newlen;
2277 }
2278 return(error);
2279 }
2280
2281
2282 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
2283 CTLTYPE_STRING | CTLFLAG_RW,
2284 0, 0, sysctl_hostname, "A", "");
2285
2286 static int
2287 sysctl_procname
2288 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2289 {
2290 /* Original code allowed writing, I'm copying this, although this all makes
2291 no sense to me. Besides, this sysctl is never used. */
2292 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
2293 }
2294
2295 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
2296 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY,
2297 0, 0, sysctl_procname, "A", "");
2298
2299 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
2300 CTLFLAG_RW | CTLFLAG_KERN,
2301 &speculative_reads_disabled, 0, "");
2302
2303 SYSCTL_UINT(_kern, OID_AUTO, preheat_pages_max,
2304 CTLFLAG_RW | CTLFLAG_KERN,
2305 &preheat_pages_max, 0, "");
2306
2307 SYSCTL_UINT(_kern, OID_AUTO, preheat_pages_min,
2308 CTLFLAG_RW | CTLFLAG_KERN,
2309 &preheat_pages_min, 0, "");
2310
2311 SYSCTL_UINT(_kern, OID_AUTO, preheat_pages_mult,
2312 CTLFLAG_RW | CTLFLAG_KERN,
2313 &preheat_pages_mult, 0, "");
2314
2315 static int
2316 sysctl_boottime
2317 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2318 {
2319 time_t tv_sec = boottime_sec();
2320 struct proc *p = req->p;
2321
2322 if (proc_is64bit(p)) {
2323 struct user64_timeval t;
2324 t.tv_sec = tv_sec;
2325 t.tv_usec = 0;
2326 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2327 } else {
2328 struct user32_timeval t;
2329 t.tv_sec = tv_sec;
2330 t.tv_usec = 0;
2331 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2332 }
2333 }
2334
2335 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2336 CTLTYPE_STRUCT | CTLFLAG_RD,
2337 0, 0, sysctl_boottime, "S,timeval", "");
2338
2339 static int
2340 sysctl_symfile
2341 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2342 {
2343 char *str;
2344 int error = get_kernel_symfile(req->p, &str);
2345 if (error)
2346 return (error);
2347 return sysctl_io_string(req, str, 0, 0, NULL);
2348 }
2349
2350
2351 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
2352 CTLTYPE_STRING | CTLFLAG_RD,
2353 0, 0, sysctl_symfile, "A", "");
2354
2355 #if NFSCLIENT
2356 static int
2357 sysctl_netboot
2358 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2359 {
2360 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2361 }
2362
2363 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
2364 CTLTYPE_INT | CTLFLAG_RD,
2365 0, 0, sysctl_netboot, "I", "");
2366 #endif
2367
2368 #ifdef CONFIG_IMGSRC_ACCESS
2369 static int
2370 sysctl_imgsrcdev
2371 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2372 {
2373 vfs_context_t ctx = vfs_context_current();
2374 vnode_t devvp;
2375 int result;
2376
2377 if (!vfs_context_issuser(ctx)) {
2378 return EPERM;
2379 }
2380
2381 if (imgsrc_rootvnode == NULL) {
2382 return ENOENT;
2383 }
2384
2385 result = vnode_getwithref(imgsrc_rootvnode);
2386 if (result != 0) {
2387 return result;
2388 }
2389
2390 devvp = vnode_mount(imgsrc_rootvnode)->mnt_devvp;
2391 result = vnode_getwithref(devvp);
2392 if (result != 0) {
2393 goto out;
2394 }
2395
2396 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
2397
2398 vnode_put(devvp);
2399 out:
2400 vnode_put(imgsrc_rootvnode);
2401 return result;
2402 }
2403
2404 SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
2405 CTLTYPE_INT | CTLFLAG_RD,
2406 0, 0, sysctl_imgsrcdev, "I", "");
2407 #endif /* CONFIG_IMGSRC_ACCESS */
2408
2409 static int
2410 sysctl_usrstack
2411 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2412 {
2413 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2414 }
2415
2416 SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2417 CTLTYPE_INT | CTLFLAG_RD,
2418 0, 0, sysctl_usrstack, "I", "");
2419
2420 static int
2421 sysctl_usrstack64
2422 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2423 {
2424 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2425 }
2426
2427 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2428 CTLTYPE_QUAD | CTLFLAG_RD,
2429 0, 0, sysctl_usrstack64, "Q", "");
2430
2431 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2432 CTLFLAG_RW | CTLFLAG_KERN,
2433 corefilename, sizeof(corefilename), "");
2434
2435 static int
2436 sysctl_coredump
2437 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2438 {
2439 #ifdef SECURE_KERNEL
2440 return (ENOTSUP);
2441 #endif
2442 int new_value, changed;
2443 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2444 if (changed) {
2445 if ((new_value == 0) || (new_value == 1))
2446 do_coredump = new_value;
2447 else
2448 error = EINVAL;
2449 }
2450 return(error);
2451 }
2452
2453 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2454 CTLTYPE_INT | CTLFLAG_RW,
2455 0, 0, sysctl_coredump, "I", "");
2456
2457 static int
2458 sysctl_suid_coredump
2459 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2460 {
2461 #ifdef SECURE_KERNEL
2462 return (ENOTSUP);
2463 #endif
2464 int new_value, changed;
2465 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2466 if (changed) {
2467 if ((new_value == 0) || (new_value == 1))
2468 sugid_coredump = new_value;
2469 else
2470 error = EINVAL;
2471 }
2472 return(error);
2473 }
2474
2475 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2476 CTLTYPE_INT | CTLFLAG_RW,
2477 0, 0, sysctl_suid_coredump, "I", "");
2478
2479 static int
2480 sysctl_delayterm
2481 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2482 {
2483 struct proc *p = req->p;
2484 int new_value, changed;
2485 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2486 if (changed) {
2487 proc_lock(p);
2488 if (new_value)
2489 req->p->p_lflag |= P_LDELAYTERM;
2490 else
2491 req->p->p_lflag &= ~P_LDELAYTERM;
2492 proc_unlock(p);
2493 }
2494 return(error);
2495 }
2496
2497 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2498 CTLTYPE_INT | CTLFLAG_RW,
2499 0, 0, sysctl_delayterm, "I", "");
2500
2501
2502 static int
2503 sysctl_rage_vnode
2504 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2505 {
2506 struct proc *p = req->p;
2507 struct uthread *ut;
2508 int new_value, old_value, changed;
2509 int error;
2510
2511 ut = get_bsdthread_info(current_thread());
2512
2513 if (ut->uu_flag & UT_RAGE_VNODES)
2514 old_value = KERN_RAGE_THREAD;
2515 else if (p->p_lflag & P_LRAGE_VNODES)
2516 old_value = KERN_RAGE_PROC;
2517 else
2518 old_value = 0;
2519
2520 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2521
2522 if (error == 0) {
2523 switch (new_value) {
2524 case KERN_RAGE_PROC:
2525 proc_lock(p);
2526 p->p_lflag |= P_LRAGE_VNODES;
2527 proc_unlock(p);
2528 break;
2529 case KERN_UNRAGE_PROC:
2530 proc_lock(p);
2531 p->p_lflag &= ~P_LRAGE_VNODES;
2532 proc_unlock(p);
2533 break;
2534
2535 case KERN_RAGE_THREAD:
2536 ut->uu_flag |= UT_RAGE_VNODES;
2537 break;
2538 case KERN_UNRAGE_THREAD:
2539 ut = get_bsdthread_info(current_thread());
2540 ut->uu_flag &= ~UT_RAGE_VNODES;
2541 break;
2542 }
2543 }
2544 return(error);
2545 }
2546
2547 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2548 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY,
2549 0, 0, sysctl_rage_vnode, "I", "");
2550
2551
2552 static int
2553 sysctl_kern_check_openevt
2554 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2555 {
2556 struct proc *p = req->p;
2557 int new_value, old_value, changed;
2558 int error;
2559
2560 if (p->p_flag & P_CHECKOPENEVT) {
2561 old_value = KERN_OPENEVT_PROC;
2562 } else {
2563 old_value = 0;
2564 }
2565
2566 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2567
2568 if (error == 0) {
2569 switch (new_value) {
2570 case KERN_OPENEVT_PROC:
2571 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2572 break;
2573
2574 case KERN_UNOPENEVT_PROC:
2575 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2576 break;
2577
2578 default:
2579 error = EINVAL;
2580 }
2581 }
2582 return(error);
2583 }
2584
2585 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY,
2586 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2587
2588
2589
2590 static int
2591 sysctl_nx
2592 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2593 {
2594 #ifdef SECURE_KERNEL
2595 return ENOTSUP;
2596 #endif
2597 int new_value, changed;
2598 int error;
2599
2600 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2601 if (error)
2602 return error;
2603
2604 if (changed) {
2605 #if defined(__i386__) || defined(__x86_64__)
2606 /*
2607 * Only allow setting if NX is supported on the chip
2608 */
2609 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2610 return ENOTSUP;
2611 #endif
2612 nx_enabled = new_value;
2613 }
2614 return(error);
2615 }
2616
2617
2618
2619 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2620 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN,
2621 0, 0, sysctl_nx, "I", "");
2622
2623 static int
2624 sysctl_loadavg
2625 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2626 {
2627 if (proc_is64bit(req->p)) {
2628 struct user64_loadavg loadinfo64;
2629 fill_loadavg64(&averunnable, &loadinfo64);
2630 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2631 } else {
2632 struct user32_loadavg loadinfo32;
2633 fill_loadavg32(&averunnable, &loadinfo32);
2634 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2635 }
2636 }
2637
2638 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2639 CTLTYPE_STRUCT | CTLFLAG_RD,
2640 0, 0, sysctl_loadavg, "S,loadavg", "");
2641
2642 static int
2643 sysctl_swapusage
2644 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2645 {
2646 int error;
2647 uint64_t swap_total;
2648 uint64_t swap_avail;
2649 vm_size_t swap_pagesize;
2650 boolean_t swap_encrypted;
2651 struct xsw_usage xsu;
2652
2653 error = macx_swapinfo(&swap_total,
2654 &swap_avail,
2655 &swap_pagesize,
2656 &swap_encrypted);
2657 if (error)
2658 return error;
2659
2660 xsu.xsu_total = swap_total;
2661 xsu.xsu_avail = swap_avail;
2662 xsu.xsu_used = swap_total - swap_avail;
2663 xsu.xsu_pagesize = swap_pagesize;
2664 xsu.xsu_encrypted = swap_encrypted;
2665 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2666 }
2667
2668
2669
2670 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2671 CTLTYPE_STRUCT | CTLFLAG_RD,
2672 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2673
2674
2675 /* this kernel does NOT implement shared_region_make_private_np() */
2676 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2677 CTLFLAG_RD,
2678 (int *)NULL, 0, "");
2679
2680 #if defined(__i386__) || defined(__x86_64__)
2681 static int
2682 sysctl_sysctl_exec_affinity(__unused struct sysctl_oid *oidp,
2683 __unused void *arg1, __unused int arg2,
2684 struct sysctl_req *req)
2685 {
2686 proc_t cur_proc = req->p;
2687 int error;
2688
2689 if (req->oldptr != USER_ADDR_NULL) {
2690 cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
2691 if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
2692 return error;
2693 }
2694
2695 if (req->newptr != USER_ADDR_NULL) {
2696 cpu_type_t newcputype;
2697 if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
2698 return error;
2699 if (newcputype == CPU_TYPE_I386)
2700 OSBitAndAtomic(~((uint32_t)P_AFFINITY), &cur_proc->p_flag);
2701 else if (newcputype == CPU_TYPE_POWERPC)
2702 OSBitOrAtomic(P_AFFINITY, &cur_proc->p_flag);
2703 else
2704 return (EINVAL);
2705 }
2706
2707 return 0;
2708 }
2709 SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
2710 #endif
2711
2712 static int
2713 fetch_process_cputype(
2714 proc_t cur_proc,
2715 int *name,
2716 u_int namelen,
2717 cpu_type_t *cputype)
2718 {
2719 proc_t p = PROC_NULL;
2720 int refheld = 0;
2721 cpu_type_t ret = 0;
2722 int error = 0;
2723
2724 if (namelen == 0)
2725 p = cur_proc;
2726 else if (namelen == 1) {
2727 p = proc_find(name[0]);
2728 if (p == NULL)
2729 return (EINVAL);
2730 refheld = 1;
2731 } else {
2732 error = EINVAL;
2733 goto out;
2734 }
2735
2736 #if defined(__i386__) || defined(__x86_64__)
2737 if (p->p_flag & P_TRANSLATED) {
2738 ret = CPU_TYPE_POWERPC;
2739 }
2740 else
2741 #endif
2742 {
2743 ret = cpu_type();
2744 if (IS_64BIT_PROCESS(p))
2745 ret |= CPU_ARCH_ABI64;
2746 }
2747 *cputype = ret;
2748
2749 if (refheld != 0)
2750 proc_rele(p);
2751 out:
2752 return (error);
2753 }
2754
2755 static int
2756 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2757 struct sysctl_req *req)
2758 {
2759 int error;
2760 cpu_type_t proc_cputype = 0;
2761 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2762 return error;
2763 int res = 1;
2764 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2765 res = 0;
2766 return SYSCTL_OUT(req, &res, sizeof(res));
2767 }
2768 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2769
2770 static int
2771 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2772 struct sysctl_req *req)
2773 {
2774 int error;
2775 cpu_type_t proc_cputype = 0;
2776 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2777 return error;
2778 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2779 }
2780 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2781
2782 static int
2783 sysctl_safeboot
2784 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2785 {
2786 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2787 }
2788
2789 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2790 CTLTYPE_INT | CTLFLAG_RD,
2791 0, 0, sysctl_safeboot, "I", "");
2792
2793 static int
2794 sysctl_singleuser
2795 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2796 {
2797 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2798 }
2799
2800 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2801 CTLTYPE_INT | CTLFLAG_RD,
2802 0, 0, sysctl_singleuser, "I", "");
2803
2804 /*
2805 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2806 */
2807 extern boolean_t affinity_sets_enabled;
2808 extern int affinity_sets_mapping;
2809
2810 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2811 CTLFLAG_RW, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2812 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2813 CTLFLAG_RW, &affinity_sets_mapping, 0, "mapping policy");
2814
2815 /*
2816 * Limit on total memory users can wire.
2817 *
2818 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2819 *
2820 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2821 *
2822 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2823 * kmem_init().
2824 *
2825 * All values are in bytes.
2826 */
2827
2828 vm_map_size_t vm_global_no_user_wire_amount;
2829 vm_map_size_t vm_global_user_wire_limit;
2830 vm_map_size_t vm_user_wire_limit;
2831
2832 /*
2833 * There needs to be a more automatic/elegant way to do this
2834 */
2835
2836 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW, &vm_global_no_user_wire_amount, "");
2837 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW, &vm_global_user_wire_limit, "");
2838 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW, &vm_user_wire_limit, "");
2839
2840
2841
2842 /*
2843 * enable back trace events for thread blocks
2844 */
2845
2846 extern uint32_t kdebug_thread_block;
2847
2848 SYSCTL_INT (_kern, OID_AUTO, kdebug_thread_block,
2849 CTLFLAG_RW, &kdebug_thread_block, 0, "kdebug thread_block");
2850
2851 /*
2852 * Kernel stack size and depth
2853 */
2854 SYSCTL_INT (_kern, OID_AUTO, stack_size,
2855 CTLFLAG_RD, (int *) &kernel_stack_size, 0, "Kernel stack size");
2856 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
2857 CTLFLAG_RD, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
2858
2859 /*
2860 * enable back trace for port allocations
2861 */
2862 extern int ipc_portbt;
2863
2864 SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
2865 CTLFLAG_RW | CTLFLAG_KERN,
2866 &ipc_portbt, 0, "");
2867