]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
27f0e09066f220aa3a25dbdeabf1317a372f7899
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105
106 #include <bsm/audit_kernel.h>
107
108 #include <mach/machine.h>
109 #include <mach/mach_types.h>
110 #include <mach/vm_param.h>
111 #include <kern/task.h>
112 #include <kern/lock.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <mach/host_info.h>
116
117 #include <sys/mount_internal.h>
118 #include <sys/kdebug.h>
119 #include <sys/sysproto.h>
120
121 #include <IOKit/IOPlatformExpert.h>
122 #include <pexpert/pexpert.h>
123
124 #include <machine/machine_routines.h>
125 #include <machine/exec.h>
126
127 #include <vm/vm_protos.h>
128
129 #ifdef __i386__
130 #include <i386/cpuid.h>
131 #endif
132
133 sysctlfn kern_sysctl;
134 #ifdef DEBUG
135 sysctlfn debug_sysctl;
136 #endif
137 extern sysctlfn net_sysctl;
138 extern sysctlfn cpu_sysctl;
139 extern int aio_max_requests;
140 extern int aio_max_requests_per_process;
141 extern int aio_worker_threads;
142 extern int lowpri_IO_window_msecs;
143 extern int lowpri_IO_delay_msecs;
144 extern int nx_enabled;
145 extern int speculative_reads_disabled;
146
147 static void
148 fill_eproc(proc_t p, struct eproc *ep);
149 static void
150 fill_externproc(proc_t p, struct extern_proc *exp);
151 static void
152 fill_user_eproc(proc_t p, struct user_eproc *ep);
153 static void
154 fill_user_proc(proc_t p, struct user_kinfo_proc *kp);
155 static void
156 fill_user_externproc(proc_t p, struct user_extern_proc *exp);
157 extern int
158 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
159 int
160 kdebug_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, proc_t p);
161 #if NFSCLIENT
162 extern int
163 netboot_root(void);
164 #endif
165 int
166 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
167 proc_t p);
168 __private_extern__ kern_return_t
169 reset_vmobjectcache(unsigned int val1, unsigned int val2);
170 int
171 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep);
172 int
173 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
174 user_addr_t newp, size_t newlen);
175 static void
176 fill_proc(proc_t p, struct kinfo_proc *kp);
177 int
178 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
179 size_t *sizep, proc_t cur_proc);
180 static int
181 sysctl_procargs2(int *name, u_int namelen, user_addr_t where, size_t *sizep,
182 proc_t cur_proc);
183 static int
184 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
185 proc_t cur_proc, int argc_yes);
186 int
187 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
188 size_t newlen, void *sp, int len);
189
190 static int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
191 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
192 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
193 static int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
194 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
195 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
196 int sysdoproc_callback(proc_t p, void *arg);
197
198 static int __sysctl_funneled(proc_t p, struct __sysctl_args *uap, register_t *retval);
199
200 extern void IORegistrySetOSBuildVersion(char * build_version);
201
202 static void
203 loadavg32to64(struct loadavg *la32, struct user_loadavg *la64)
204 {
205 la64->ldavg[0] = la32->ldavg[0];
206 la64->ldavg[1] = la32->ldavg[1];
207 la64->ldavg[2] = la32->ldavg[2];
208 la64->fscale = (user_long_t)la32->fscale;
209 }
210
211 /*
212 * Locking and stats
213 */
214 static struct sysctl_lock memlock;
215
216 /* sysctl() syscall */
217 int
218 __sysctl(proc_t p, struct __sysctl_args *uap, register_t *retval)
219 {
220 boolean_t funnel_state;
221 int error;
222
223 funnel_state = thread_funnel_set(kernel_flock, TRUE);
224 error = __sysctl_funneled(p, uap, retval);
225 thread_funnel_set(kernel_flock, funnel_state);
226 return(error);
227 }
228
229 static int
230 __sysctl_funneled(proc_t p, struct __sysctl_args *uap, __unused register_t *retval)
231 {
232 int error, dolock = 1;
233 size_t savelen = 0, oldlen = 0, newlen;
234 sysctlfn *fnp = NULL;
235 int name[CTL_MAXNAME];
236 int error1;
237 boolean_t memlock_taken = FALSE;
238 boolean_t vslock_taken = FALSE;
239 #if CONFIG_MACF
240 kauth_cred_t my_cred;
241 #endif
242
243 /*
244 * all top-level sysctl names are non-terminal
245 */
246 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
247 return (EINVAL);
248 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
249 if (error)
250 return (error);
251
252 AUDIT_ARG(ctlname, name, uap->namelen);
253
254 if (proc_is64bit(p)) {
255 /* uap->newlen is a size_t value which grows to 64 bits
256 * when coming from a 64-bit process. since it's doubtful we'll
257 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
258 */
259 newlen = CAST_DOWN(size_t, uap->newlen);
260 }
261 else {
262 newlen = uap->newlen;
263 }
264
265 /* CTL_UNSPEC is used to get oid to AUTO_OID */
266 if (uap->new != USER_ADDR_NULL
267 && ((name[0] == CTL_KERN
268 && !(name[1] == KERN_IPC || name[1] == KERN_PANICINFO || name[1] == KERN_PROCDELAYTERM ||
269 name[1] == KERN_PROC_LOW_PRI_IO || name[1] == KERN_PROCNAME || name[1] == KERN_RAGEVNODE || name[1] == KERN_CHECKOPENEVT))
270 || (name[0] == CTL_HW)
271 || (name[0] == CTL_VM))
272 && (error = suser(kauth_cred_get(), &p->p_acflag)))
273 return (error);
274
275 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
276 * but there is a fallback for all sysctls other than VFS to
277 * userland_sysctl() - KILL THIS! */
278 switch (name[0]) {
279 case CTL_KERN:
280 fnp = kern_sysctl;
281 if ((name[1] != KERN_VNODE) && (name[1] != KERN_FILE)
282 && (name[1] != KERN_PROC))
283 dolock = 0;
284 break;
285 case CTL_VFS:
286 fnp = vfs_sysctl;
287 break;
288 #ifdef DEBUG
289 case CTL_DEBUG:
290 fnp = debug_sysctl;
291 break;
292 #endif
293 default:
294 fnp = NULL;
295 }
296
297 if (uap->oldlenp != USER_ADDR_NULL) {
298 uint64_t oldlen64 = fuulong(uap->oldlenp);
299
300 oldlen = CAST_DOWN(size_t, oldlen64);
301 /*
302 * If more than 4G, clamp to 4G - useracc() below will catch
303 * with an EFAULT, if it's actually necessary.
304 */
305 if (oldlen64 > 0x00000000ffffffffULL)
306 oldlen = 0xffffffffUL;
307 }
308
309 if (uap->old != USER_ADDR_NULL) {
310 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE))
311 return (EFAULT);
312 /*
313 * The kernel debug mechanism does not need to take this lock, and
314 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
315 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
316 */
317 if (!((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)) &&
318 !(name[1] == KERN_PROC)) {
319 MEMLOCK_LOCK();
320 memlock_taken = TRUE;
321 }
322
323 if (dolock && oldlen) {
324 if ((error = vslock(uap->old, (user_size_t)oldlen))) {
325 if (memlock_taken == TRUE)
326 MEMLOCK_UNLOCK();
327 return(error);
328 }
329 savelen = oldlen;
330 vslock_taken = TRUE;
331 }
332 }
333
334 #if CONFIG_MACF
335 my_cred = kauth_cred_proc_ref(p);
336 error = mac_system_check_sysctl(
337 my_cred,
338 (int *) name,
339 uap->namelen,
340 uap->old,
341 uap->oldlenp,
342 fnp == kern_sysctl ? 1 : 0,
343 uap->new,
344 newlen
345 );
346 kauth_cred_unref(&my_cred);
347 if (!error) {
348 #endif
349 if (fnp) {
350 error = (*fnp)(name + 1, uap->namelen - 1, uap->old,
351 &oldlen, uap->new, newlen, p);
352 }
353 else
354 error = ENOTSUP;
355 #if CONFIG_MACF
356 }
357 #endif
358
359 if (vslock_taken == TRUE) {
360 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
361 if (!error)
362 error = error1;
363 }
364 if (memlock_taken == TRUE)
365 MEMLOCK_UNLOCK();
366
367 if ( (name[0] != CTL_VFS) && (error == ENOTSUP)) {
368 size_t tmp = oldlen;
369 boolean_t funnel_state;
370
371 /*
372 * Drop the funnel when calling new sysctl code, which will conditionally
373 * grab the funnel if it really needs to.
374 */
375 funnel_state = thread_funnel_set(kernel_flock, FALSE);
376
377 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
378 1, uap->new, newlen, &oldlen);
379
380 thread_funnel_set(kernel_flock, funnel_state);
381 }
382
383 if ((error) && (error != ENOMEM))
384 return (error);
385
386 if (uap->oldlenp != USER_ADDR_NULL)
387 error = suulong(uap->oldlenp, oldlen);
388
389 return (error);
390 }
391
392 /*
393 * Attributes stored in the kernel.
394 */
395 __private_extern__ char corefilename[MAXPATHLEN+1];
396 __private_extern__ int do_coredump;
397 __private_extern__ int sugid_coredump;
398
399 #if COUNT_SYSCALLS
400 __private_extern__ int do_count_syscalls;
401 #endif
402
403 #ifdef INSECURE
404 int securelevel = -1;
405 #else
406 int securelevel;
407 #endif
408
409 static int
410 sysctl_affinity(
411 int *name,
412 u_int namelen,
413 user_addr_t oldBuf,
414 size_t *oldSize,
415 user_addr_t newBuf,
416 __unused size_t newSize,
417 proc_t cur_proc)
418 {
419 if (namelen < 1)
420 return (ENOTSUP);
421
422 if (name[0] == 0 && 1 == namelen) {
423 return sysctl_rdint(oldBuf, oldSize, newBuf,
424 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
425 } else if (name[0] == 1 && 2 == namelen) {
426 if (name[1] == 0) {
427 OSBitAndAtomic(~((uint32_t)P_AFFINITY), (UInt32 *)&cur_proc->p_flag);
428 } else {
429 OSBitOrAtomic(P_AFFINITY, (UInt32 *)&cur_proc->p_flag);
430 }
431 return 0;
432 }
433 return (ENOTSUP);
434 }
435
436 static int
437 sysctl_translate(
438 int *name,
439 u_int namelen,
440 user_addr_t oldBuf,
441 size_t *oldSize,
442 user_addr_t newBuf,
443 __unused size_t newSize,
444 proc_t cur_proc)
445 {
446 proc_t p;
447 int istranslated = 0;
448 kauth_cred_t my_cred;
449 uid_t uid;
450
451 if (namelen != 1)
452 return (ENOTSUP);
453
454 p = proc_find(name[0]);
455 if (p == NULL)
456 return (EINVAL);
457
458 my_cred = kauth_cred_proc_ref(p);
459 uid = kauth_cred_getuid(my_cred);
460 kauth_cred_unref(&my_cred);
461 if ((uid != kauth_cred_getuid(kauth_cred_get()))
462 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
463 proc_rele(p);
464 return (EPERM);
465 }
466
467 istranslated = (p->p_flag & P_TRANSLATED);
468 proc_rele(p);
469 return sysctl_rdint(oldBuf, oldSize, newBuf,
470 (istranslated != 0) ? 1 : 0);
471 }
472
473 int
474 set_archhandler(__unused proc_t p, int arch)
475 {
476 int error;
477 struct nameidata nd;
478 struct vnode_attr va;
479 vfs_context_t ctx = vfs_context_current();
480 struct exec_archhandler *archhandler;
481
482 switch(arch) {
483 case CPU_TYPE_POWERPC:
484 archhandler = &exec_archhandler_ppc;
485 break;
486 default:
487 return (EBADARCH);
488 }
489
490 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32,
491 CAST_USER_ADDR_T(archhandler->path), ctx);
492 error = namei(&nd);
493 if (error)
494 return (error);
495 nameidone(&nd);
496
497 /* Check mount point */
498 if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) ||
499 (nd.ni_vp->v_type != VREG)) {
500 vnode_put(nd.ni_vp);
501 return (EACCES);
502 }
503
504 VATTR_INIT(&va);
505 VATTR_WANTED(&va, va_fsid);
506 VATTR_WANTED(&va, va_fileid);
507 error = vnode_getattr(nd.ni_vp, &va, ctx);
508 if (error) {
509 vnode_put(nd.ni_vp);
510 return (error);
511 }
512 vnode_put(nd.ni_vp);
513
514 archhandler->fsid = va.va_fsid;
515 archhandler->fileid = (u_long)va.va_fileid;
516 return 0;
517 }
518
519 /* XXX remove once Rosetta is rev'ed */
520 /*****************************************************************************/
521 static int
522 sysctl_exec_archhandler_ppc(
523 __unused int *name,
524 __unused u_int namelen,
525 user_addr_t oldBuf,
526 size_t *oldSize,
527 user_addr_t newBuf,
528 size_t newSize,
529 proc_t p)
530 {
531 int error;
532 size_t len;
533 char handler[sizeof(exec_archhandler_ppc.path)];
534 vfs_context_t ctx = vfs_context_current();
535
536 if (oldSize) {
537 len = strlen(exec_archhandler_ppc.path) + 1;
538 if (oldBuf) {
539 if (*oldSize < len)
540 return (ENOMEM);
541 error = copyout(exec_archhandler_ppc.path, oldBuf, len);
542 if (error)
543 return (error);
544 }
545 *oldSize = len - 1;
546 }
547 if (newBuf) {
548 error = suser(vfs_context_ucred(ctx), &p->p_acflag);
549 if (error)
550 return (error);
551 if (newSize >= sizeof(exec_archhandler_ppc.path))
552 return (ENAMETOOLONG);
553 error = copyin(newBuf, handler, newSize);
554 if (error)
555 return (error);
556 handler[newSize] = 0;
557 strlcpy(exec_archhandler_ppc.path, handler, MAXPATHLEN);
558 error = set_archhandler(p, CPU_TYPE_POWERPC);
559 if (error)
560 return (error);
561 }
562 return 0;
563 }
564 /*****************************************************************************/
565
566 static int
567 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid *oidp, void *arg1,
568 int arg2, struct sysctl_req *req)
569 {
570 int error = 0;
571
572 error = sysctl_handle_string(oidp, arg1, arg2, req);
573
574 if (error)
575 goto done;
576
577 if (req->newptr)
578 error = set_archhandler(req->p, CPU_TYPE_POWERPC);
579
580 done:
581 return error;
582
583 }
584
585 SYSCTL_NODE(_kern, KERN_EXEC, exec, CTLFLAG_RD|CTLFLAG_LOCKED, 0, "");
586
587 SYSCTL_NODE(_kern_exec, OID_AUTO, archhandler, CTLFLAG_RD|CTLFLAG_LOCKED, 0, "");
588
589 SYSCTL_PROC(_kern_exec_archhandler, OID_AUTO, powerpc,
590 CTLTYPE_STRING | CTLFLAG_RW, exec_archhandler_ppc.path, 0,
591 sysctl_handle_exec_archhandler_ppc, "A", "");
592
593 extern int get_kernel_symfile(proc_t, char **);
594 __private_extern__ int
595 sysctl_dopanicinfo(int *, u_int, user_addr_t, size_t *, user_addr_t,
596 size_t, proc_t);
597
598 /*
599 * kernel related system variables.
600 */
601 int
602 kern_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
603 user_addr_t newp, size_t newlen, proc_t p)
604 {
605 /* all sysctl names not listed below are terminal at this level */
606 if (namelen != 1
607 && !(name[0] == KERN_PROC
608 || name[0] == KERN_PROF
609 || name[0] == KERN_KDEBUG
610 || name[0] == KERN_PROCARGS
611 || name[0] == KERN_PROCARGS2
612 || name[0] == KERN_IPC
613 || name[0] == KERN_SYSV
614 || name[0] == KERN_AFFINITY
615 || name[0] == KERN_TRANSLATE
616 || name[0] == KERN_EXEC
617 || name[0] == KERN_PANICINFO
618 || name[0] == KERN_POSIX
619 || name[0] == KERN_TFP
620 || name[0] == KERN_TTY
621 #if CONFIG_LCTX
622 || name[0] == KERN_LCTX
623 #endif
624 )
625 )
626 return (ENOTDIR); /* overloaded */
627
628 switch (name[0]) {
629 case KERN_PROC:
630 return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp));
631 #ifdef GPROF
632 case KERN_PROF:
633 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
634 newp, newlen));
635 #endif
636 case KERN_KDEBUG:
637 return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p));
638 case KERN_PROCARGS:
639 /* new one as it does not use kinfo_proc */
640 return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p));
641 case KERN_PROCARGS2:
642 /* new one as it does not use kinfo_proc */
643 return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p));
644 #if PANIC_INFO
645 case KERN_PANICINFO:
646 return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp,
647 newp, newlen, p));
648 #endif
649 case KERN_AFFINITY:
650 return sysctl_affinity(name+1, namelen-1, oldp, oldlenp,
651 newp, newlen, p);
652 case KERN_TRANSLATE:
653 return sysctl_translate(name+1, namelen-1, oldp, oldlenp, newp,
654 newlen, p);
655
656 /* XXX remove once Rosetta has rev'ed */
657 case KERN_EXEC:
658 return sysctl_exec_archhandler_ppc(name+1, namelen-1, oldp,
659 oldlenp, newp, newlen, p);
660 #if COUNT_SYSCALLS
661 case KERN_COUNT_SYSCALLS:
662 {
663 /* valid values passed in:
664 * = 0 means don't keep called counts for each bsd syscall
665 * > 0 means keep called counts for each bsd syscall
666 * = 2 means dump current counts to the system log
667 * = 3 means reset all counts
668 * for example, to dump current counts:
669 * sysctl -w kern.count_calls=2
670 */
671 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
672 if ( error != 0 ) {
673 return (error);
674 }
675
676 if ( tmp == 1 ) {
677 do_count_syscalls = 1;
678 }
679 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
680 extern int nsysent;
681 extern int syscalls_log[];
682 extern const char * syscallnames[];
683 int i;
684 for ( i = 0; i < nsysent; i++ ) {
685 if ( syscalls_log[i] != 0 ) {
686 if ( tmp == 2 ) {
687 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
688 }
689 else {
690 syscalls_log[i] = 0;
691 }
692 }
693 }
694 if ( tmp != 0 ) {
695 do_count_syscalls = 1;
696 }
697 }
698 return (0);
699 }
700 #endif
701 default:
702 return (ENOTSUP);
703 }
704 /* NOTREACHED */
705 }
706
707 #ifdef DEBUG
708 /*
709 * Debugging related system variables.
710 */
711 #if DIAGNOSTIC
712 extern
713 #endif /* DIAGNOSTIC */
714 struct ctldebug debug0, debug1;
715 struct ctldebug debug2, debug3, debug4;
716 struct ctldebug debug5, debug6, debug7, debug8, debug9;
717 struct ctldebug debug10, debug11, debug12, debug13, debug14;
718 struct ctldebug debug15, debug16, debug17, debug18, debug19;
719 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
720 &debug0, &debug1, &debug2, &debug3, &debug4,
721 &debug5, &debug6, &debug7, &debug8, &debug9,
722 &debug10, &debug11, &debug12, &debug13, &debug14,
723 &debug15, &debug16, &debug17, &debug18, &debug19,
724 };
725 int
726 debug_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
727 user_addr_t newp, size_t newlen, __unused proc_t p)
728 {
729 struct ctldebug *cdp;
730
731 /* all sysctl names at this level are name and field */
732 if (namelen != 2)
733 return (ENOTDIR); /* overloaded */
734 cdp = debugvars[name[0]];
735 if (cdp->debugname == 0)
736 return (ENOTSUP);
737 switch (name[1]) {
738 case CTL_DEBUG_NAME:
739 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
740 case CTL_DEBUG_VALUE:
741 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
742 default:
743 return (ENOTSUP);
744 }
745 /* NOTREACHED */
746 }
747 #endif /* DEBUG */
748
749 /*
750 * The following sysctl_* functions should not be used
751 * any more, as they can only cope with callers in
752 * user mode: Use new-style
753 * sysctl_io_number()
754 * sysctl_io_string()
755 * sysctl_io_opaque()
756 * instead.
757 */
758
759 /*
760 * Validate parameters and get old / set new parameters
761 * for an integer-valued sysctl function.
762 */
763 int
764 sysctl_int(user_addr_t oldp, size_t *oldlenp,
765 user_addr_t newp, size_t newlen, int *valp)
766 {
767 int error = 0;
768
769 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
770 return (EFAULT);
771 if (oldp && *oldlenp < sizeof(int))
772 return (ENOMEM);
773 if (newp && newlen != sizeof(int))
774 return (EINVAL);
775 *oldlenp = sizeof(int);
776 if (oldp)
777 error = copyout(valp, oldp, sizeof(int));
778 if (error == 0 && newp) {
779 error = copyin(newp, valp, sizeof(int));
780 AUDIT_ARG(value, *valp);
781 }
782 return (error);
783 }
784
785 /*
786 * As above, but read-only.
787 */
788 int
789 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
790 {
791 int error = 0;
792
793 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
794 return (EFAULT);
795 if (oldp && *oldlenp < sizeof(int))
796 return (ENOMEM);
797 if (newp)
798 return (EPERM);
799 *oldlenp = sizeof(int);
800 if (oldp)
801 error = copyout((caddr_t)&val, oldp, sizeof(int));
802 return (error);
803 }
804
805 /*
806 * Validate parameters and get old / set new parameters
807 * for an quad(64bit)-valued sysctl function.
808 */
809 int
810 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
811 user_addr_t newp, size_t newlen, quad_t *valp)
812 {
813 int error = 0;
814
815 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
816 return (EFAULT);
817 if (oldp && *oldlenp < sizeof(quad_t))
818 return (ENOMEM);
819 if (newp && newlen != sizeof(quad_t))
820 return (EINVAL);
821 *oldlenp = sizeof(quad_t);
822 if (oldp)
823 error = copyout(valp, oldp, sizeof(quad_t));
824 if (error == 0 && newp)
825 error = copyin(newp, valp, sizeof(quad_t));
826 return (error);
827 }
828
829 /*
830 * As above, but read-only.
831 */
832 int
833 sysctl_rdquad(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, quad_t val)
834 {
835 int error = 0;
836
837 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
838 return (EFAULT);
839 if (oldp && *oldlenp < sizeof(quad_t))
840 return (ENOMEM);
841 if (newp)
842 return (EPERM);
843 *oldlenp = sizeof(quad_t);
844 if (oldp)
845 error = copyout((caddr_t)&val, oldp, sizeof(quad_t));
846 return (error);
847 }
848
849 /*
850 * Validate parameters and get old / set new parameters
851 * for a string-valued sysctl function. Unlike sysctl_string, if you
852 * give it a too small (but larger than 0 bytes) buffer, instead of
853 * returning ENOMEM, it truncates the returned string to the buffer
854 * size. This preserves the semantics of some library routines
855 * implemented via sysctl, which truncate their returned data, rather
856 * than simply returning an error. The returned string is always NUL
857 * terminated.
858 */
859 int
860 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
861 user_addr_t newp, size_t newlen, char *str, int maxlen)
862 {
863 int len, copylen, error = 0;
864
865 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
866 return (EFAULT);
867 copylen = len = strlen(str) + 1;
868 if (oldp && (len < 0 || *oldlenp < 1))
869 return (ENOMEM);
870 if (oldp && (*oldlenp < (size_t)len))
871 copylen = *oldlenp + 1;
872 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
873 return (EINVAL);
874 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
875 if (oldp) {
876 error = copyout(str, oldp, copylen);
877 if (!error) {
878 unsigned char c = 0;
879 /* NUL terminate */
880 oldp += *oldlenp;
881 error = copyout((void *)&c, oldp, sizeof(char));
882 }
883 }
884 if (error == 0 && newp) {
885 error = copyin(newp, str, newlen);
886 str[newlen] = 0;
887 AUDIT_ARG(text, (char *)str);
888 }
889 return (error);
890 }
891
892 /*
893 * Validate parameters and get old / set new parameters
894 * for a string-valued sysctl function.
895 */
896 int
897 sysctl_string(user_addr_t oldp, size_t *oldlenp,
898 user_addr_t newp, size_t newlen, char *str, int maxlen)
899 {
900 int len, error = 0;
901
902 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
903 return (EFAULT);
904 len = strlen(str) + 1;
905 if (oldp && (len < 0 || *oldlenp < (size_t)len))
906 return (ENOMEM);
907 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
908 return (EINVAL);
909 *oldlenp = len -1; /* deal with NULL strings correctly */
910 if (oldp) {
911 error = copyout(str, oldp, len);
912 }
913 if (error == 0 && newp) {
914 error = copyin(newp, str, newlen);
915 str[newlen] = 0;
916 AUDIT_ARG(text, (char *)str);
917 }
918 return (error);
919 }
920
921 /*
922 * As above, but read-only.
923 */
924 int
925 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
926 user_addr_t newp, char *str)
927 {
928 int len, error = 0;
929
930 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
931 return (EFAULT);
932 len = strlen(str) + 1;
933 if (oldp && *oldlenp < (size_t)len)
934 return (ENOMEM);
935 if (newp)
936 return (EPERM);
937 *oldlenp = len;
938 if (oldp)
939 error = copyout(str, oldp, len);
940 return (error);
941 }
942
943 /*
944 * Validate parameters and get old / set new parameters
945 * for a structure oriented sysctl function.
946 */
947 int
948 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
949 user_addr_t newp, size_t newlen, void *sp, int len)
950 {
951 int error = 0;
952
953 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
954 return (EFAULT);
955 if (oldp && (len < 0 || *oldlenp < (size_t)len))
956 return (ENOMEM);
957 if (newp && (len < 0 || newlen > (size_t)len))
958 return (EINVAL);
959 if (oldp) {
960 *oldlenp = len;
961 error = copyout(sp, oldp, len);
962 }
963 if (error == 0 && newp)
964 error = copyin(newp, sp, len);
965 return (error);
966 }
967
968 /*
969 * Validate parameters and get old parameters
970 * for a structure oriented sysctl function.
971 */
972 int
973 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
974 user_addr_t newp, void *sp, int len)
975 {
976 int error = 0;
977
978 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
979 return (EFAULT);
980 if (oldp && (len < 0 || *oldlenp < (size_t)len))
981 return (ENOMEM);
982 if (newp)
983 return (EPERM);
984 *oldlenp = len;
985 if (oldp)
986 error = copyout(sp, oldp, len);
987 return (error);
988 }
989
990 /*
991 * Get file structures.
992 */
993 static int
994 sysctl_file
995 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
996 {
997 int error;
998 struct fileglob *fg;
999 struct extern_file nef;
1000
1001 if (req->oldptr == USER_ADDR_NULL) {
1002 /*
1003 * overestimate by 10 files
1004 */
1005 req->oldidx = sizeof(filehead) + (nfiles + 10) * sizeof(struct extern_file);
1006 return (0);
1007 }
1008
1009 /*
1010 * first copyout filehead
1011 */
1012 error = SYSCTL_OUT(req, &filehead, sizeof(filehead));
1013 if (error)
1014 return (error);
1015
1016 /*
1017 * followed by an array of file structures
1018 */
1019 for (fg = filehead.lh_first; fg != 0; fg = fg->f_list.le_next) {
1020 nef.f_list.le_next = (struct extern_file *)fg->f_list.le_next;
1021 nef.f_list.le_prev = (struct extern_file **)fg->f_list.le_prev;
1022 nef.f_flag = (fg->fg_flag & FMASK);
1023 nef.f_type = fg->fg_type;
1024 nef.f_count = fg->fg_count;
1025 nef.f_msgcount = fg->fg_msgcount;
1026 nef.f_cred = fg->fg_cred;
1027 nef.f_ops = fg->fg_ops;
1028 nef.f_offset = fg->fg_offset;
1029 nef.f_data = fg->fg_data;
1030 error = SYSCTL_OUT(req, &nef, sizeof(nef));
1031 if (error)
1032 return (error);
1033 }
1034 return (0);
1035 }
1036
1037 SYSCTL_PROC(_kern, KERN_FILE, file,
1038 CTLTYPE_STRUCT | CTLFLAG_RW,
1039 0, 0, sysctl_file, "S,filehead", "");
1040
1041 static int
1042 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
1043 {
1044 if (p->p_pid != (pid_t)arg)
1045 return(0);
1046 else
1047 return(1);
1048 }
1049
1050 static int
1051 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
1052 {
1053 if (p->p_pgrpid != (pid_t)arg)
1054 return(0);
1055 else
1056 return(1);
1057 }
1058
1059 static int
1060 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
1061 {
1062 boolean_t funnel_state;
1063 int retval;
1064
1065
1066 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1067 /* This is very racy but list lock is held.. Hmmm. */
1068 if ((p->p_flag & P_CONTROLT) == 0 ||
1069 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
1070 p->p_pgrp->pg_session->s_ttyp == NULL ||
1071 p->p_pgrp->pg_session->s_ttyp->t_dev != (dev_t)arg)
1072 retval = 0;
1073 else
1074 retval = 1;
1075
1076 thread_funnel_set(kernel_flock, funnel_state);
1077
1078 return(retval);
1079 }
1080
1081 static int
1082 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
1083 {
1084 kauth_cred_t my_cred;
1085 uid_t uid;
1086
1087 if (p->p_ucred == NULL)
1088 return(0);
1089 my_cred = kauth_cred_proc_ref(p);
1090 uid = kauth_cred_getuid(my_cred);
1091 kauth_cred_unref(&my_cred);
1092
1093 if (uid != (uid_t)arg)
1094 return(0);
1095 else
1096 return(1);
1097 }
1098
1099
1100 static int
1101 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
1102 {
1103 kauth_cred_t my_cred;
1104 uid_t ruid;
1105
1106 if (p->p_ucred == NULL)
1107 return(0);
1108 my_cred = kauth_cred_proc_ref(p);
1109 ruid = my_cred->cr_ruid;
1110 kauth_cred_unref(&my_cred);
1111
1112 if (ruid != (uid_t)arg)
1113 return(0);
1114 else
1115 return(1);
1116 }
1117
1118 static int
1119 sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
1120 {
1121 if ((p->p_lctx == NULL) ||
1122 (p->p_lctx->lc_id != (pid_t)arg))
1123 return(0);
1124 else
1125 return(1);
1126 }
1127
1128 /*
1129 * try over estimating by 5 procs
1130 */
1131 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1132 struct sysdoproc_args {
1133 int buflen;
1134 caddr_t kprocp;
1135 boolean_t is_64_bit;
1136 user_addr_t dp;
1137 size_t needed;
1138 int sizeof_kproc;
1139 int * errorp;
1140 int uidcheck;
1141 int ruidcheck;
1142 int ttycheck;
1143 int uidval;
1144 };
1145
1146 int
1147 sysdoproc_callback(proc_t p, void * arg)
1148 {
1149 struct sysdoproc_args * args = (struct sysdoproc_args *)arg;
1150 int error=0;
1151
1152 if (args->buflen >= args->sizeof_kproc) {
1153 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, (void *)args->uidval) == 0))
1154 return(PROC_RETURNED);
1155 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, (void *)args->uidval) == 0))
1156 return(PROC_RETURNED);
1157 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, (void *)args->uidval) == 0))
1158 return(PROC_RETURNED);
1159
1160 bzero(args->kprocp, args->sizeof_kproc);
1161 if (args->is_64_bit) {
1162 fill_user_proc(p, (struct user_kinfo_proc *) args->kprocp);
1163 }
1164 else {
1165 fill_proc(p, (struct kinfo_proc *) args->kprocp);
1166 }
1167 error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
1168 if (error) {
1169 *args->errorp = error;
1170 return(PROC_RETURNED_DONE);
1171 return (error);
1172 }
1173 args->dp += args->sizeof_kproc;
1174 args->buflen -= args->sizeof_kproc;
1175 }
1176 args->needed += args->sizeof_kproc;
1177 return(PROC_RETURNED);
1178 }
1179
1180 int
1181 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1182 {
1183 user_addr_t dp = where;
1184 size_t needed = 0;
1185 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1186 int error = 0;
1187 boolean_t is_64_bit = FALSE;
1188 struct kinfo_proc kproc;
1189 struct user_kinfo_proc user_kproc;
1190 int sizeof_kproc;
1191 caddr_t kprocp;
1192 int (*filterfn)(proc_t, void *) = 0;
1193 struct sysdoproc_args args;
1194 int uidcheck = 0;
1195 int ruidcheck = 0;
1196 int ttycheck = 0;
1197
1198 if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
1199 return (EINVAL);
1200 is_64_bit = proc_is64bit(current_proc());
1201 if (is_64_bit) {
1202 sizeof_kproc = sizeof(user_kproc);
1203 kprocp = (caddr_t) &user_kproc;
1204 }
1205 else {
1206 sizeof_kproc = sizeof(kproc);
1207 kprocp = (caddr_t) &kproc;
1208 }
1209
1210
1211 switch (name[0]) {
1212
1213 case KERN_PROC_PID:
1214 filterfn = sysdoproc_filt_KERN_PROC_PID;
1215 break;
1216
1217 case KERN_PROC_PGRP:
1218 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
1219 break;
1220
1221 case KERN_PROC_TTY:
1222 ttycheck = 1;
1223 break;
1224
1225 case KERN_PROC_UID:
1226 uidcheck = 1;
1227 break;
1228
1229 case KERN_PROC_RUID:
1230 ruidcheck = 1;
1231 break;
1232
1233 #if CONFIG_LCTX
1234 case KERN_PROC_LCID:
1235 filterfn = sysdoproc_filt_KERN_PROC_LCID;
1236 break;
1237 #endif
1238 }
1239
1240 error = 0;
1241 args.buflen = buflen;
1242 args.kprocp = kprocp;
1243 args.is_64_bit = is_64_bit;
1244 args.dp = dp;
1245 args.needed = needed;
1246 args.errorp = &error;
1247 args.uidcheck = uidcheck;
1248 args.ruidcheck = ruidcheck;
1249 args.ttycheck = ttycheck;
1250 args.sizeof_kproc = sizeof_kproc;
1251 args.uidval = name[1];
1252
1253 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), sysdoproc_callback, &args, filterfn, (void *)name[1]);
1254
1255 if (error)
1256 return(error);
1257
1258 dp = args.dp;
1259 needed = args.needed;
1260
1261 if (where != USER_ADDR_NULL) {
1262 *sizep = dp - where;
1263 if (needed > *sizep)
1264 return (ENOMEM);
1265 } else {
1266 needed += KERN_PROCSLOP;
1267 *sizep = needed;
1268 }
1269 return (0);
1270 }
1271
1272 /*
1273 * Fill in an eproc structure for the specified process.
1274 */
1275 static void
1276 fill_eproc(proc_t p, struct eproc *ep)
1277 {
1278 struct tty *tp;
1279 kauth_cred_t my_cred;
1280 struct pgrp * pg;
1281 struct session * sessp;
1282
1283 pg = proc_pgrp(p);
1284 sessp = proc_session(p);
1285
1286 ep->e_paddr = p;
1287
1288 if (pg != PGRP_NULL) {
1289 ep->e_sess = sessp;
1290 ep->e_pgid = p->p_pgrpid;
1291 ep->e_jobc = pg->pg_jobc;
1292 if ((sessp != SESSION_NULL) && sessp->s_ttyvp)
1293 ep->e_flag = EPROC_CTTY;
1294 } else {
1295 ep->e_sess = (struct session *)0;
1296 ep->e_pgid = 0;
1297 ep->e_jobc = 0;
1298 }
1299 #if CONFIG_LCTX
1300 if (p->p_lctx) {
1301 ep->e_lcid = p->p_lctx->lc_id;
1302 } else {
1303 ep->e_lcid = 0;
1304 }
1305 #endif
1306 ep->e_ppid = p->p_ppid;
1307 /* Pre-zero the fake historical pcred */
1308 bzero(&ep->e_pcred, sizeof(struct _pcred));
1309 if (p->p_ucred) {
1310 my_cred = kauth_cred_proc_ref(p);
1311
1312 /* A fake historical pcred */
1313 ep->e_pcred.p_ruid = my_cred->cr_ruid;
1314 ep->e_pcred.p_svuid = my_cred->cr_svuid;
1315 ep->e_pcred.p_rgid = my_cred->cr_rgid;
1316 ep->e_pcred.p_svgid = my_cred->cr_svgid;
1317 /* A fake historical *kauth_cred_t */
1318 ep->e_ucred.cr_ref = my_cred->cr_ref;
1319 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1320 ep->e_ucred.cr_ngroups = my_cred->cr_ngroups;
1321 bcopy(my_cred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1322
1323 kauth_cred_unref(&my_cred);
1324 }
1325 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1326 ep->e_vm.vm_tsize = 0;
1327 ep->e_vm.vm_dsize = 0;
1328 ep->e_vm.vm_ssize = 0;
1329 }
1330 ep->e_vm.vm_rssize = 0;
1331
1332 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1333 (tp = sessp->s_ttyp)) {
1334 ep->e_tdev = tp->t_dev;
1335 ep->e_tpgid = sessp->s_ttypgrpid;
1336 ep->e_tsess = tp->t_session;
1337 } else
1338 ep->e_tdev = NODEV;
1339
1340 if (SESS_LEADER(p, sessp))
1341 ep->e_flag |= EPROC_SLEADER;
1342 bzero(&ep->e_wmesg[0], WMESGLEN+1);
1343 ep->e_xsize = ep->e_xrssize = 0;
1344 ep->e_xccount = ep->e_xswrss = 0;
1345 if (sessp != SESSION_NULL)
1346 session_rele(sessp);
1347 if(pg != PGRP_NULL)
1348 pg_rele(pg);
1349 }
1350
1351 /*
1352 * Fill in an LP64 version of eproc structure for the specified process.
1353 */
1354 static void
1355 fill_user_eproc(proc_t p, struct user_eproc *ep)
1356 {
1357 struct tty *tp;
1358 struct session *sessp = NULL;
1359 struct pgrp * pg;
1360 kauth_cred_t my_cred;
1361
1362 pg = proc_pgrp(p);
1363 sessp = proc_session(p);
1364
1365 ep->e_paddr = CAST_USER_ADDR_T(p);
1366 if (pg != PGRP_NULL) {
1367 ep->e_sess = CAST_USER_ADDR_T(sessp);
1368 ep->e_pgid = p->p_pgrpid;
1369 ep->e_jobc = pg->pg_jobc;
1370 if (sessp != SESSION_NULL) {
1371 if (sessp->s_ttyvp)
1372 ep->e_flag = EPROC_CTTY;
1373 }
1374 } else {
1375 ep->e_sess = USER_ADDR_NULL;
1376 ep->e_pgid = 0;
1377 ep->e_jobc = 0;
1378 }
1379 #if CONFIG_LCTX
1380 if (p->p_lctx) {
1381 ep->e_lcid = p->p_lctx->lc_id;
1382 } else {
1383 ep->e_lcid = 0;
1384 }
1385 #endif
1386 ep->e_ppid = p->p_ppid;
1387 /* Pre-zero the fake historical pcred */
1388 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1389 if (p->p_ucred) {
1390 my_cred = kauth_cred_proc_ref(p);
1391
1392 /* A fake historical pcred */
1393 ep->e_pcred.p_ruid = my_cred->cr_ruid;
1394 ep->e_pcred.p_svuid = my_cred->cr_svuid;
1395 ep->e_pcred.p_rgid = my_cred->cr_rgid;
1396 ep->e_pcred.p_svgid = my_cred->cr_svgid;
1397
1398 /* A fake historical *kauth_cred_t */
1399 ep->e_ucred.cr_ref = my_cred->cr_ref;
1400 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1401 ep->e_ucred.cr_ngroups = my_cred->cr_ngroups;
1402 bcopy(my_cred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1403
1404 kauth_cred_unref(&my_cred);
1405 }
1406 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1407 ep->e_vm.vm_tsize = 0;
1408 ep->e_vm.vm_dsize = 0;
1409 ep->e_vm.vm_ssize = 0;
1410 }
1411 ep->e_vm.vm_rssize = 0;
1412
1413 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1414 (tp = sessp->s_ttyp)) {
1415 ep->e_tdev = tp->t_dev;
1416 ep->e_tpgid = sessp->s_ttypgrpid;
1417 ep->e_tsess = CAST_USER_ADDR_T(tp->t_session);
1418 } else
1419 ep->e_tdev = NODEV;
1420
1421 if (SESS_LEADER(p, sessp))
1422 ep->e_flag |= EPROC_SLEADER;
1423 bzero(&ep->e_wmesg[0], WMESGLEN+1);
1424 ep->e_xsize = ep->e_xrssize = 0;
1425 ep->e_xccount = ep->e_xswrss = 0;
1426 if (sessp != SESSION_NULL)
1427 session_rele(sessp);
1428 if (pg != PGRP_NULL)
1429 pg_rele(pg);
1430 }
1431
1432 /*
1433 * Fill in an eproc structure for the specified process.
1434 */
1435 static void
1436 fill_externproc(proc_t p, struct extern_proc *exp)
1437 {
1438 exp->p_forw = exp->p_back = NULL;
1439 exp->p_starttime = p->p_start;
1440 exp->p_vmspace = NULL;
1441 exp->p_sigacts = p->p_sigacts;
1442 exp->p_flag = p->p_flag;
1443 if (p->p_lflag & P_LTRACED)
1444 exp->p_flag |= P_TRACED;
1445 if (p->p_lflag & P_LPPWAIT)
1446 exp->p_flag |= P_PPWAIT;
1447 if (p->p_lflag & P_LEXIT)
1448 exp->p_flag |= P_WEXIT;
1449 exp->p_stat = p->p_stat ;
1450 exp->p_pid = p->p_pid ;
1451 exp->p_oppid = p->p_oppid ;
1452 /* Mach related */
1453 exp->user_stack = CAST_DOWN(caddr_t, p->user_stack);
1454 exp->exit_thread = p->exit_thread ;
1455 exp->p_debugger = p->p_debugger ;
1456 exp->sigwait = p->sigwait ;
1457 /* scheduling */
1458 #ifdef _PROC_HAS_SCHEDINFO_
1459 exp->p_estcpu = p->p_estcpu ;
1460 exp->p_pctcpu = p->p_pctcpu ;
1461 exp->p_slptime = p->p_slptime ;
1462 #else
1463 exp->p_estcpu = 0 ;
1464 exp->p_pctcpu = 0 ;
1465 exp->p_slptime = 0 ;
1466 #endif
1467 exp->p_cpticks = 0 ;
1468 exp->p_wchan = 0 ;
1469 exp->p_wmesg = 0 ;
1470 exp->p_swtime = 0 ;
1471 bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval));
1472 bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval));
1473 exp->p_uticks = 0 ;
1474 exp->p_sticks = 0 ;
1475 exp->p_iticks = 0 ;
1476 exp->p_traceflag = 0;
1477 exp->p_tracep = 0 ;
1478 exp->p_siglist = 0 ; /* No longer relevant */
1479 exp->p_textvp = p->p_textvp ;
1480 exp->p_holdcnt = 0 ;
1481 exp->p_sigmask = 0 ; /* no longer avaialable */
1482 exp->p_sigignore = p->p_sigignore ;
1483 exp->p_sigcatch = p->p_sigcatch ;
1484 exp->p_priority = p->p_priority ;
1485 exp->p_usrpri = 0 ;
1486 exp->p_nice = p->p_nice ;
1487 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1488 exp->p_comm[MAXCOMLEN] = '\0';
1489 exp->p_pgrp = p->p_pgrp ;
1490 exp->p_addr = NULL;
1491 exp->p_xstat = p->p_xstat ;
1492 exp->p_acflag = p->p_acflag ;
1493 exp->p_ru = p->p_ru ; /* XXX may be NULL */
1494 }
1495
1496 /*
1497 * Fill in an LP64 version of extern_proc structure for the specified process.
1498 */
1499 static void
1500 fill_user_externproc(proc_t p, struct user_extern_proc *exp)
1501 {
1502 exp->p_forw = exp->p_back = USER_ADDR_NULL;
1503 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1504 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1505 exp->p_vmspace = USER_ADDR_NULL;
1506 exp->p_sigacts = CAST_USER_ADDR_T(p->p_sigacts);
1507 exp->p_flag = p->p_flag;
1508 if (p->p_lflag & P_LTRACED)
1509 exp->p_flag |= P_TRACED;
1510 if (p->p_lflag & P_LPPWAIT)
1511 exp->p_flag |= P_PPWAIT;
1512 if (p->p_lflag & P_LEXIT)
1513 exp->p_flag |= P_WEXIT;
1514 exp->p_stat = p->p_stat ;
1515 exp->p_pid = p->p_pid ;
1516 exp->p_oppid = p->p_oppid ;
1517 /* Mach related */
1518 exp->user_stack = p->user_stack;
1519 exp->exit_thread = CAST_USER_ADDR_T(p->exit_thread);
1520 exp->p_debugger = p->p_debugger ;
1521 exp->sigwait = p->sigwait ;
1522 /* scheduling */
1523 #ifdef _PROC_HAS_SCHEDINFO_
1524 exp->p_estcpu = p->p_estcpu ;
1525 exp->p_pctcpu = p->p_pctcpu ;
1526 exp->p_slptime = p->p_slptime ;
1527 #else
1528 exp->p_estcpu = 0 ;
1529 exp->p_pctcpu = 0 ;
1530 exp->p_slptime = 0 ;
1531 #endif
1532 exp->p_cpticks = 0 ;
1533 exp->p_wchan = 0;
1534 exp->p_wmesg = 0;
1535 exp->p_swtime = 0 ;
1536 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1537 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1538 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1539 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1540 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1541 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1542 exp->p_uticks = 0 ;
1543 exp->p_sticks = 0 ;
1544 exp->p_iticks = 0 ;
1545 exp->p_traceflag = 0 ;
1546 exp->p_tracep = 0;
1547 exp->p_siglist = 0 ; /* No longer relevant */
1548 exp->p_textvp = CAST_USER_ADDR_T(p->p_textvp);
1549 exp->p_holdcnt = 0 ;
1550 exp->p_sigmask = 0 ; /* no longer avaialable */
1551 exp->p_sigignore = p->p_sigignore ;
1552 exp->p_sigcatch = p->p_sigcatch ;
1553 exp->p_priority = p->p_priority ;
1554 exp->p_usrpri = 0 ;
1555 exp->p_nice = p->p_nice ;
1556 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1557 exp->p_comm[MAXCOMLEN] = '\0';
1558 exp->p_pgrp = CAST_USER_ADDR_T(p->p_pgrp);
1559 exp->p_addr = USER_ADDR_NULL;
1560 exp->p_xstat = p->p_xstat ;
1561 exp->p_acflag = p->p_acflag ;
1562 exp->p_ru = CAST_USER_ADDR_T(p->p_ru); /* XXX may be NULL */
1563 }
1564
1565 static void
1566 fill_proc(proc_t p, struct kinfo_proc *kp)
1567 {
1568 fill_externproc(p, &kp->kp_proc);
1569 fill_eproc(p, &kp->kp_eproc);
1570 }
1571
1572 static void
1573 fill_user_proc(proc_t p, struct user_kinfo_proc *kp)
1574 {
1575 fill_user_externproc(p, &kp->kp_proc);
1576 fill_user_eproc(p, &kp->kp_eproc);
1577 }
1578
1579 int
1580 kdebug_ops(int *name, u_int namelen, user_addr_t where,
1581 size_t *sizep, proc_t p)
1582 {
1583 int ret=0;
1584
1585 ret = suser(kauth_cred_get(), &p->p_acflag);
1586 if (ret)
1587 return(ret);
1588
1589 switch(name[0]) {
1590 case KERN_KDEFLAGS:
1591 case KERN_KDDFLAGS:
1592 case KERN_KDENABLE:
1593 case KERN_KDGETBUF:
1594 case KERN_KDSETUP:
1595 case KERN_KDREMOVE:
1596 case KERN_KDSETREG:
1597 case KERN_KDGETREG:
1598 case KERN_KDREADTR:
1599 case KERN_KDPIDTR:
1600 case KERN_KDTHRMAP:
1601 case KERN_KDPIDEX:
1602 case KERN_KDSETRTCDEC:
1603 case KERN_KDSETBUF:
1604 case KERN_KDGETENTROPY:
1605 ret = kdbg_control(name, namelen, where, sizep);
1606 break;
1607 default:
1608 ret= ENOTSUP;
1609 break;
1610 }
1611 return(ret);
1612 }
1613
1614
1615 /*
1616 * Return the top *sizep bytes of the user stack, or the entire area of the
1617 * user stack down through the saved exec_path, whichever is smaller.
1618 */
1619 int
1620 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
1621 size_t *sizep, proc_t cur_proc)
1622 {
1623 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0);
1624 }
1625
1626 static int
1627 sysctl_procargs2(int *name, u_int namelen, user_addr_t where,
1628 size_t *sizep, proc_t cur_proc)
1629 {
1630 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1);
1631 }
1632
1633 static int
1634 sysctl_procargsx(int *name, __unused u_int namelen, user_addr_t where,
1635 size_t *sizep, proc_t cur_proc, int argc_yes)
1636 {
1637 proc_t p;
1638 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1639 int error = 0;
1640 struct _vm_map *proc_map;
1641 struct task * task;
1642 vm_map_copy_t tmp;
1643 user_addr_t arg_addr;
1644 size_t arg_size;
1645 caddr_t data;
1646 size_t argslen=0;
1647 int size;
1648 vm_offset_t copy_start, copy_end;
1649 kern_return_t ret;
1650 int pid;
1651 kauth_cred_t my_cred;
1652 uid_t uid;
1653
1654 if (argc_yes)
1655 buflen -= sizeof(int); /* reserve first word to return argc */
1656
1657 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1658 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1659 /* is not NULL then the caller wants us to return the length needed to */
1660 /* hold the data we would return */
1661 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1662 return(EINVAL);
1663 }
1664 arg_size = buflen;
1665
1666 /*
1667 * Lookup process by pid
1668 */
1669 pid = name[0];
1670 p = proc_find(pid);
1671 if (p == NULL) {
1672 return(EINVAL);
1673 }
1674
1675 /*
1676 * Copy the top N bytes of the stack.
1677 * On all machines we have so far, the stack grows
1678 * downwards.
1679 *
1680 * If the user expects no more than N bytes of
1681 * argument list, use that as a guess for the
1682 * size.
1683 */
1684
1685 if (!p->user_stack) {
1686 proc_rele(p);
1687 return(EINVAL);
1688 }
1689
1690 if (where == USER_ADDR_NULL) {
1691 /* caller only wants to know length of proc args data */
1692 if (sizep == NULL) {
1693 proc_rele(p);
1694 return(EFAULT);
1695 }
1696
1697 size = p->p_argslen;
1698 proc_rele(p);
1699 if (argc_yes) {
1700 size += sizeof(int);
1701 }
1702 else {
1703 /*
1704 * old PROCARGS will return the executable's path and plus some
1705 * extra space for work alignment and data tags
1706 */
1707 size += PATH_MAX + (6 * sizeof(int));
1708 }
1709 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1710 *sizep = size;
1711 return (0);
1712 }
1713
1714 my_cred = kauth_cred_proc_ref(p);
1715 uid = kauth_cred_getuid(my_cred);
1716 kauth_cred_unref(&my_cred);
1717
1718 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1719 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1720 proc_rele(p);
1721 return (EINVAL);
1722 }
1723
1724 if ((u_int)arg_size > p->p_argslen)
1725 arg_size = round_page(p->p_argslen);
1726
1727 arg_addr = p->user_stack - arg_size;
1728
1729
1730 /*
1731 * Before we can block (any VM code), make another
1732 * reference to the map to keep it alive. We do
1733 * that by getting a reference on the task itself.
1734 */
1735 task = p->task;
1736 if (task == NULL) {
1737 proc_rele(p);
1738 return(EINVAL);
1739 }
1740
1741 argslen = p->p_argslen;
1742 /*
1743 * Once we have a task reference we can convert that into a
1744 * map reference, which we will use in the calls below. The
1745 * task/process may change its map after we take this reference
1746 * (see execve), but the worst that will happen then is a return
1747 * of stale info (which is always a possibility).
1748 */
1749 task_reference(task);
1750 proc_rele(p);
1751 proc_map = get_task_map_reference(task);
1752 task_deallocate(task);
1753
1754 if (proc_map == NULL)
1755 return(EINVAL);
1756
1757
1758 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1759 if (ret != KERN_SUCCESS) {
1760 vm_map_deallocate(proc_map);
1761 return(ENOMEM);
1762 }
1763
1764 copy_end = round_page(copy_start + arg_size);
1765
1766 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1767 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1768 vm_map_deallocate(proc_map);
1769 kmem_free(kernel_map, copy_start,
1770 round_page(arg_size));
1771 return (EIO);
1772 }
1773
1774 /*
1775 * Now that we've done the copyin from the process'
1776 * map, we can release the reference to it.
1777 */
1778 vm_map_deallocate(proc_map);
1779
1780 if( vm_map_copy_overwrite(kernel_map,
1781 (vm_map_address_t)copy_start,
1782 tmp, FALSE) != KERN_SUCCESS) {
1783 kmem_free(kernel_map, copy_start,
1784 round_page(arg_size));
1785 return (EIO);
1786 }
1787
1788 if (arg_size > argslen) {
1789 data = (caddr_t) (copy_end - argslen);
1790 size = argslen;
1791 } else {
1792 data = (caddr_t) (copy_end - arg_size);
1793 size = arg_size;
1794 }
1795
1796 if (argc_yes) {
1797 /* Put processes argc as the first word in the copyout buffer */
1798 suword(where, p->p_argc);
1799 error = copyout(data, (where + sizeof(int)), size);
1800 size += sizeof(int);
1801 } else {
1802 error = copyout(data, where, size);
1803
1804 /*
1805 * Make the old PROCARGS work to return the executable's path
1806 * But, only if there is enough space in the provided buffer
1807 *
1808 * on entry: data [possibily] points to the beginning of the path
1809 *
1810 * Note: we keep all pointers&sizes aligned to word boundries
1811 */
1812 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1813 {
1814 int binPath_sz, alignedBinPath_sz = 0;
1815 int extraSpaceNeeded, addThis;
1816 user_addr_t placeHere;
1817 char * str = (char *) data;
1818 int max_len = size;
1819
1820 /* Some apps are really bad about messing up their stacks
1821 So, we have to be extra careful about getting the length
1822 of the executing binary. If we encounter an error, we bail.
1823 */
1824
1825 /* Limit ourselves to PATH_MAX paths */
1826 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1827
1828 binPath_sz = 0;
1829
1830 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1831 binPath_sz++;
1832
1833 /* If we have a NUL terminator, copy it, too */
1834 if (binPath_sz < max_len-1) binPath_sz += 1;
1835
1836 /* Pre-Flight the space requiremnts */
1837
1838 /* Account for the padding that fills out binPath to the next word */
1839 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1840
1841 placeHere = where + size;
1842
1843 /* Account for the bytes needed to keep placeHere word aligned */
1844 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1845
1846 /* Add up all the space that is needed */
1847 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1848
1849 /* is there is room to tack on argv[0]? */
1850 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1851 {
1852 placeHere += addThis;
1853 suword(placeHere, 0);
1854 placeHere += sizeof(int);
1855 suword(placeHere, 0xBFFF0000);
1856 placeHere += sizeof(int);
1857 suword(placeHere, 0);
1858 placeHere += sizeof(int);
1859 error = copyout(data, placeHere, binPath_sz);
1860 if ( ! error )
1861 {
1862 placeHere += binPath_sz;
1863 suword(placeHere, 0);
1864 size += extraSpaceNeeded;
1865 }
1866 }
1867 }
1868 }
1869
1870 if (copy_start != (vm_offset_t) 0) {
1871 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1872 }
1873 if (error) {
1874 return(error);
1875 }
1876
1877 if (where != USER_ADDR_NULL)
1878 *sizep = size;
1879 return (0);
1880 }
1881
1882
1883 /*
1884 * Max number of concurrent aio requests
1885 */
1886 static int
1887 sysctl_aiomax
1888 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1889 {
1890 int new_value, changed;
1891 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1892 if (changed) {
1893 /* make sure the system-wide limit is greater than the per process limit */
1894 if (new_value >= aio_max_requests_per_process)
1895 aio_max_requests = new_value;
1896 else
1897 error = EINVAL;
1898 }
1899 return(error);
1900 }
1901
1902
1903 /*
1904 * Max number of concurrent aio requests per process
1905 */
1906 static int
1907 sysctl_aioprocmax
1908 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1909 {
1910 int new_value, changed;
1911 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1912 if (changed) {
1913 /* make sure per process limit is less than the system-wide limit */
1914 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1915 aio_max_requests_per_process = new_value;
1916 else
1917 error = EINVAL;
1918 }
1919 return(error);
1920 }
1921
1922
1923 /*
1924 * Max number of async IO worker threads
1925 */
1926 static int
1927 sysctl_aiothreads
1928 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1929 {
1930 int new_value, changed;
1931 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1932 if (changed) {
1933 /* we only allow an increase in the number of worker threads */
1934 if (new_value > aio_worker_threads ) {
1935 _aio_create_worker_threads((new_value - aio_worker_threads));
1936 aio_worker_threads = new_value;
1937 }
1938 else
1939 error = EINVAL;
1940 }
1941 return(error);
1942 }
1943
1944
1945 /*
1946 * System-wide limit on the max number of processes
1947 */
1948 static int
1949 sysctl_maxproc
1950 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1951 {
1952 int new_value, changed;
1953 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1954 if (changed) {
1955 AUDIT_ARG(value, new_value);
1956 /* make sure the system-wide limit is less than the configured hard
1957 limit set at kernel compilation */
1958 if (new_value <= hard_maxproc && new_value > 0)
1959 maxproc = new_value;
1960 else
1961 error = EINVAL;
1962 }
1963 return(error);
1964 }
1965
1966 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1967 CTLFLAG_RD | CTLFLAG_KERN,
1968 ostype, 0, "");
1969 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1970 CTLFLAG_RD | CTLFLAG_KERN,
1971 osrelease, 0, "");
1972 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1973 CTLFLAG_RD | CTLFLAG_KERN,
1974 NULL, BSD, "");
1975 SYSCTL_STRING(_kern, KERN_VERSION, version,
1976 CTLFLAG_RD | CTLFLAG_KERN,
1977 version, 0, "");
1978
1979 /* PR-5293665: need to use a callback function for kern.osversion to set
1980 * osversion in IORegistry */
1981
1982 static int
1983 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1984 {
1985 int rval = 0;
1986
1987 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1988
1989 if (req->newptr) {
1990 IORegistrySetOSBuildVersion((char *)arg1);
1991 }
1992
1993 return rval;
1994 }
1995
1996 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1997 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING,
1998 osversion, 256 /* OSVERSIZE*/,
1999 sysctl_osversion, "A", "");
2000
2001 static int
2002 sysctl_sysctl_bootargs
2003 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2004 {
2005 int error;
2006 char buf[256];
2007
2008 strlcpy(buf, PE_boot_args(), 256);
2009 error = sysctl_io_string(req, buf, 256, 0, NULL);
2010 return(error);
2011 }
2012
2013 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
2014 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
2015 NULL, 0,
2016 sysctl_sysctl_bootargs, "A", "bootargs");
2017
2018 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
2019 CTLFLAG_RW | CTLFLAG_KERN,
2020 &maxfiles, 0, "");
2021 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
2022 CTLFLAG_RD | CTLFLAG_KERN,
2023 NULL, ARG_MAX, "");
2024 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
2025 CTLFLAG_RD | CTLFLAG_KERN,
2026 NULL, _POSIX_VERSION, "");
2027 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
2028 CTLFLAG_RD | CTLFLAG_KERN,
2029 NULL, NGROUPS_MAX, "");
2030 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
2031 CTLFLAG_RD | CTLFLAG_KERN,
2032 NULL, 1, "");
2033 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2034 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2035 CTLFLAG_RD | CTLFLAG_KERN,
2036 NULL, 1, "");
2037 #else
2038 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2039 CTLFLAG_RD | CTLFLAG_KERN,
2040 NULL, 0, "");
2041 #endif
2042
2043 static int
2044 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2045 {
2046 unsigned int oldval = desiredvnodes;
2047 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
2048 reset_vmobjectcache(oldval, desiredvnodes);
2049 resize_namecache(desiredvnodes);
2050 return(error);
2051 }
2052
2053 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
2054 CTLTYPE_INT | CTLFLAG_RW,
2055 0, 0, sysctl_maxvnodes, "I", "");
2056
2057 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
2058 CTLTYPE_INT | CTLFLAG_RW,
2059 0, 0, sysctl_maxproc, "I", "");
2060
2061 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
2062 CTLTYPE_INT | CTLFLAG_RW,
2063 0, 0, sysctl_aiomax, "I", "");
2064
2065 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
2066 CTLTYPE_INT | CTLFLAG_RW,
2067 0, 0, sysctl_aioprocmax, "I", "");
2068
2069 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
2070 CTLTYPE_INT | CTLFLAG_RW,
2071 0, 0, sysctl_aiothreads, "I", "");
2072
2073 static int
2074 sysctl_securelvl
2075 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2076 {
2077 int new_value, changed;
2078 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
2079 if (changed) {
2080 if (!(new_value < securelevel && req->p->p_pid != 1)) {
2081 proc_list_lock();
2082 securelevel = new_value;
2083 proc_list_unlock();
2084 } else {
2085 error = EPERM;
2086 }
2087 }
2088 return(error);
2089 }
2090
2091 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
2092 CTLTYPE_INT | CTLFLAG_RW,
2093 0, 0, sysctl_securelvl, "I", "");
2094
2095
2096 static int
2097 sysctl_domainname
2098 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2099 {
2100 int error, changed;
2101 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
2102 if (changed) {
2103 domainnamelen = strlen(domainname);
2104 }
2105 return(error);
2106 }
2107
2108 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
2109 CTLTYPE_STRING | CTLFLAG_RW,
2110 0, 0, sysctl_domainname, "A", "");
2111
2112 SYSCTL_INT(_kern, KERN_HOSTID, hostid,
2113 CTLFLAG_RW | CTLFLAG_KERN,
2114 &hostid, 0, "");
2115
2116 static int
2117 sysctl_hostname
2118 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2119 {
2120 int error, changed;
2121 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
2122 if (changed) {
2123 hostnamelen = req->newlen;
2124 }
2125 return(error);
2126 }
2127
2128
2129 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
2130 CTLTYPE_STRING | CTLFLAG_RW,
2131 0, 0, sysctl_hostname, "A", "");
2132
2133 static int
2134 sysctl_procname
2135 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2136 {
2137 /* Original code allowed writing, I'm copying this, although this all makes
2138 no sense to me. Besides, this sysctl is never used. */
2139 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
2140 }
2141
2142 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
2143 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY,
2144 0, 0, sysctl_procname, "A", "");
2145
2146 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
2147 CTLFLAG_RW | CTLFLAG_KERN,
2148 &speculative_reads_disabled, 0, "");
2149
2150 static int
2151 sysctl_boottime
2152 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2153 {
2154 struct timeval t;
2155
2156 t.tv_sec = boottime_sec();
2157 t.tv_usec = 0;
2158
2159 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2160 }
2161
2162 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2163 CTLTYPE_STRUCT | CTLFLAG_RD,
2164 0, 0, sysctl_boottime, "S,timeval", "");
2165
2166 static int
2167 sysctl_symfile
2168 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2169 {
2170 char *str;
2171 int error = get_kernel_symfile(req->p, &str);
2172 if (error)
2173 return (error);
2174 return sysctl_io_string(req, str, 0, 0, NULL);
2175 }
2176
2177
2178 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
2179 CTLTYPE_STRING | CTLFLAG_RD,
2180 0, 0, sysctl_symfile, "A", "");
2181
2182 #if NFSCLIENT
2183 static int
2184 sysctl_netboot
2185 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2186 {
2187 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2188 }
2189
2190 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
2191 CTLTYPE_INT | CTLFLAG_RD,
2192 0, 0, sysctl_netboot, "I", "");
2193 #endif
2194
2195 static int
2196 sysctl_usrstack
2197 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2198 {
2199 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2200 }
2201
2202 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack,
2203 CTLTYPE_INT | CTLFLAG_RD,
2204 0, 0, sysctl_usrstack, "I", "");
2205
2206 static int
2207 sysctl_usrstack64
2208 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2209 {
2210 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2211 }
2212
2213 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2214 CTLTYPE_QUAD | CTLFLAG_RD,
2215 0, 0, sysctl_usrstack64, "Q", "");
2216
2217 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2218 CTLFLAG_RW | CTLFLAG_KERN,
2219 corefilename, sizeof(corefilename), "");
2220
2221 static int
2222 sysctl_coredump
2223 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2224 {
2225 int new_value, changed;
2226 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2227 if (changed) {
2228 if ((new_value == 0) || (new_value == 1))
2229 do_coredump = new_value;
2230 else
2231 error = EINVAL;
2232 }
2233 return(error);
2234 }
2235
2236 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2237 CTLTYPE_INT | CTLFLAG_RW,
2238 0, 0, sysctl_coredump, "I", "");
2239
2240 static int
2241 sysctl_suid_coredump
2242 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2243 {
2244 int new_value, changed;
2245 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2246 if (changed) {
2247 if ((new_value == 0) || (new_value == 1))
2248 sugid_coredump = new_value;
2249 else
2250 error = EINVAL;
2251 }
2252 return(error);
2253 }
2254
2255 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2256 CTLTYPE_INT | CTLFLAG_RW,
2257 0, 0, sysctl_suid_coredump, "I", "");
2258
2259 static int
2260 sysctl_delayterm
2261 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2262 {
2263 struct proc *p = req->p;
2264 int new_value, changed;
2265 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2266 if (changed) {
2267 proc_lock(p);
2268 if (new_value)
2269 req->p->p_lflag |= P_LDELAYTERM;
2270 else
2271 req->p->p_lflag &= ~P_LDELAYTERM;
2272 proc_unlock(p);
2273 }
2274 return(error);
2275 }
2276
2277 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2278 CTLTYPE_INT | CTLFLAG_RW,
2279 0, 0, sysctl_delayterm, "I", "");
2280
2281 static int
2282 sysctl_proc_low_pri_io
2283 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2284 {
2285 struct proc *p = req->p;
2286 int new_value, old_value, changed;
2287 int error;
2288
2289 proc_lock(p);
2290 switch (req->p->p_iopol_disk) {
2291 case IOPOL_DEFAULT:
2292 case IOPOL_NORMAL:
2293 old_value = 0;
2294 break;
2295 case IOPOL_THROTTLE:
2296 old_value = 1;
2297 break;
2298 case IOPOL_PASSIVE:
2299 old_value = 2;
2300 break;
2301 default:
2302 /*\ 5 this should never happen, but to be robust, return the default value */
2303 old_value = 0;
2304 break;
2305 }
2306 proc_unlock(p);
2307
2308 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2309 if (changed) {
2310 proc_lock(p);
2311 if (new_value & 0x01)
2312 req->p->p_iopol_disk = IOPOL_THROTTLE;
2313 else if (new_value & 0x02)
2314 req->p->p_iopol_disk = IOPOL_PASSIVE;
2315 else if (new_value == 0)
2316 req->p->p_iopol_disk = IOPOL_NORMAL;
2317 proc_unlock(p);
2318 }
2319 return(error);
2320 }
2321
2322 SYSCTL_PROC(_kern, KERN_PROC_LOW_PRI_IO, proc_low_pri_io,
2323 CTLTYPE_INT | CTLFLAG_RW,
2324 0, 0, sysctl_proc_low_pri_io, "I", "");
2325
2326 static int
2327 sysctl_rage_vnode
2328 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2329 {
2330 struct proc *p = req->p;
2331 struct uthread *ut;
2332 int new_value, old_value, changed;
2333 int error;
2334
2335 ut = get_bsdthread_info(current_thread());
2336
2337 if (ut->uu_flag & UT_RAGE_VNODES)
2338 old_value = KERN_RAGE_THREAD;
2339 else if (p->p_lflag & P_LRAGE_VNODES)
2340 old_value = KERN_RAGE_PROC;
2341 else
2342 old_value = 0;
2343
2344 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2345
2346 if (error == 0) {
2347 switch (new_value) {
2348 case KERN_RAGE_PROC:
2349 proc_lock(p);
2350 p->p_lflag |= P_LRAGE_VNODES;
2351 proc_unlock(p);
2352 break;
2353 case KERN_UNRAGE_PROC:
2354 proc_lock(p);
2355 p->p_lflag &= ~P_LRAGE_VNODES;
2356 proc_unlock(p);
2357 break;
2358
2359 case KERN_RAGE_THREAD:
2360 ut->uu_flag |= UT_RAGE_VNODES;
2361 break;
2362 case KERN_UNRAGE_THREAD:
2363 ut = get_bsdthread_info(current_thread());
2364 ut->uu_flag &= ~UT_RAGE_VNODES;
2365 break;
2366 }
2367 }
2368 return(error);
2369 }
2370
2371 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2372 CTLTYPE_INT | CTLFLAG_RW,
2373 0, 0, sysctl_rage_vnode, "I", "");
2374
2375
2376 static int
2377 sysctl_kern_check_openevt
2378 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2379 {
2380 struct proc *p = req->p;
2381 int new_value, old_value, changed;
2382 int error;
2383
2384 if (p->p_flag & P_CHECKOPENEVT) {
2385 old_value = KERN_OPENEVT_PROC;
2386 } else {
2387 old_value = 0;
2388 }
2389
2390 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2391
2392 if (error == 0) {
2393 switch (new_value) {
2394 case KERN_OPENEVT_PROC:
2395 OSBitOrAtomic(P_CHECKOPENEVT, (UInt32 *)&p->p_flag);
2396 break;
2397
2398 case KERN_UNOPENEVT_PROC:
2399 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), (UInt32 *)&p->p_flag);
2400 break;
2401
2402 default:
2403 error = EINVAL;
2404 }
2405 }
2406 return(error);
2407 }
2408
2409 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY,
2410 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2411
2412
2413
2414 static int
2415 sysctl_nx
2416 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2417 {
2418 #ifdef SECURE_KERNEL
2419 return ENOTSUP;
2420 #endif
2421 int new_value, changed;
2422 int error;
2423
2424 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2425 if (error)
2426 return error;
2427
2428 if (changed) {
2429 #ifdef __i386__
2430 /*
2431 * Only allow setting if NX is supported on the chip
2432 */
2433 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2434 return ENOTSUP;
2435 #endif
2436 nx_enabled = new_value;
2437 }
2438 return(error);
2439 }
2440
2441
2442
2443 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2444 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN,
2445 0, 0, sysctl_nx, "I", "");
2446
2447 static int
2448 sysctl_loadavg
2449 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2450 {
2451 if (proc_is64bit(req->p)) {
2452 struct user_loadavg loadinfo64;
2453 loadavg32to64(&averunnable, &loadinfo64);
2454 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2455 } else {
2456 return sysctl_io_opaque(req, &averunnable, sizeof(averunnable), NULL);
2457 }
2458 }
2459
2460 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2461 CTLTYPE_STRUCT | CTLFLAG_RD,
2462 0, 0, sysctl_loadavg, "S,loadavg", "");
2463
2464 static int
2465 sysctl_swapusage
2466 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2467 {
2468 int error;
2469 uint64_t swap_total;
2470 uint64_t swap_avail;
2471 uint32_t swap_pagesize;
2472 boolean_t swap_encrypted;
2473 struct xsw_usage xsu;
2474
2475 error = macx_swapinfo(&swap_total,
2476 &swap_avail,
2477 &swap_pagesize,
2478 &swap_encrypted);
2479 if (error)
2480 return error;
2481
2482 xsu.xsu_total = swap_total;
2483 xsu.xsu_avail = swap_avail;
2484 xsu.xsu_used = swap_total - swap_avail;
2485 xsu.xsu_pagesize = swap_pagesize;
2486 xsu.xsu_encrypted = swap_encrypted;
2487 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2488 }
2489
2490
2491
2492 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2493 CTLTYPE_STRUCT | CTLFLAG_RD,
2494 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2495
2496
2497 /* this kernel does NOT implement shared_region_make_private_np() */
2498 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2499 CTLFLAG_RD,
2500 NULL, 0, "");
2501
2502 #if __i386__
2503 static int
2504 sysctl_sysctl_exec_affinity(__unused struct sysctl_oid *oidp,
2505 __unused void *arg1, __unused int arg2,
2506 struct sysctl_req *req)
2507 {
2508 proc_t cur_proc = req->p;
2509 int error;
2510
2511 if (req->oldptr != USER_ADDR_NULL) {
2512 cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
2513 if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
2514 return error;
2515 }
2516
2517 if (req->newptr != USER_ADDR_NULL) {
2518 cpu_type_t newcputype;
2519 if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
2520 return error;
2521 if (newcputype == CPU_TYPE_I386)
2522 OSBitAndAtomic(~((uint32_t)P_AFFINITY), (UInt32 *)&cur_proc->p_flag);
2523 else if (newcputype == CPU_TYPE_POWERPC)
2524 OSBitOrAtomic(P_AFFINITY, (UInt32 *)&cur_proc->p_flag);
2525 else
2526 return (EINVAL);
2527 }
2528
2529 return 0;
2530 }
2531 SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
2532 #endif
2533
2534 static int
2535 fetch_process_cputype(
2536 proc_t cur_proc,
2537 int *name,
2538 u_int namelen,
2539 cpu_type_t *cputype)
2540 {
2541 proc_t p = PROC_NULL;
2542 int refheld = 0;
2543 cpu_type_t ret = 0;
2544 int error = 0;
2545
2546 if (namelen == 0)
2547 p = cur_proc;
2548 else if (namelen == 1) {
2549 p = proc_find(name[0]);
2550 if (p == NULL)
2551 return (EINVAL);
2552 refheld = 1;
2553 } else {
2554 error = EINVAL;
2555 goto out;
2556 }
2557
2558 #if __i386__
2559 if (p->p_flag & P_TRANSLATED) {
2560 ret = CPU_TYPE_POWERPC;
2561 }
2562 else
2563 #endif
2564 {
2565 ret = cpu_type();
2566 if (IS_64BIT_PROCESS(p))
2567 ret |= CPU_ARCH_ABI64;
2568 }
2569 *cputype = ret;
2570
2571 if (refheld != 0)
2572 proc_rele(p);
2573 out:
2574 return (error);
2575 }
2576
2577 static int
2578 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2579 struct sysctl_req *req)
2580 {
2581 int error;
2582 cpu_type_t proc_cputype = 0;
2583 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2584 return error;
2585 int res = 1;
2586 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2587 res = 0;
2588 return SYSCTL_OUT(req, &res, sizeof(res));
2589 }
2590 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2591
2592 static int
2593 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2594 struct sysctl_req *req)
2595 {
2596 int error;
2597 cpu_type_t proc_cputype = 0;
2598 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2599 return error;
2600 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2601 }
2602 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2603
2604 static int
2605 sysctl_safeboot
2606 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2607 {
2608 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2609 }
2610
2611 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2612 CTLTYPE_INT | CTLFLAG_RD,
2613 0, 0, sysctl_safeboot, "I", "");
2614
2615 static int
2616 sysctl_singleuser
2617 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2618 {
2619 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2620 }
2621
2622 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2623 CTLTYPE_INT | CTLFLAG_RD,
2624 0, 0, sysctl_singleuser, "I", "");
2625
2626 /*
2627 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2628 */
2629 extern boolean_t affinity_sets_enabled;
2630 extern int affinity_sets_mapping;
2631
2632 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2633 CTLFLAG_RW, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2634 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2635 CTLFLAG_RW, &affinity_sets_mapping, 0, "mapping policy");
2636
2637 /*
2638 * Limit on total memory users can wire.
2639 *
2640 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2641 *
2642 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2643 *
2644 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2645 * kmem_init().
2646 *
2647 * All values are in bytes.
2648 */
2649
2650 vm_map_size_t vm_global_user_wire_limit;
2651 vm_map_size_t vm_user_wire_limit;
2652
2653 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW, &vm_global_user_wire_limit, "");
2654 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW, &vm_user_wire_limit, "");