]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
d1985d21d540fa589b10bbf8a283d9c45009c67e
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105
106 #include <bsm/audit_kernel.h>
107
108 #include <mach/machine.h>
109 #include <mach/mach_types.h>
110 #include <mach/vm_param.h>
111 #include <kern/task.h>
112 #include <kern/lock.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <mach/host_info.h>
116
117 #include <sys/mount_internal.h>
118 #include <sys/kdebug.h>
119 #include <sys/sysproto.h>
120
121 #include <IOKit/IOPlatformExpert.h>
122 #include <pexpert/pexpert.h>
123
124 #include <machine/machine_routines.h>
125 #include <machine/exec.h>
126
127 #include <vm/vm_protos.h>
128
129 #ifdef __i386__
130 #include <i386/cpuid.h>
131 #endif
132
133 sysctlfn kern_sysctl;
134 #ifdef DEBUG
135 sysctlfn debug_sysctl;
136 #endif
137 extern sysctlfn net_sysctl;
138 extern sysctlfn cpu_sysctl;
139 extern int aio_max_requests;
140 extern int aio_max_requests_per_process;
141 extern int aio_worker_threads;
142 extern int lowpri_IO_window_msecs;
143 extern int lowpri_IO_delay_msecs;
144 extern int nx_enabled;
145 extern int speculative_reads_disabled;
146
147 static void
148 fill_eproc(proc_t p, struct eproc *ep);
149 static void
150 fill_externproc(proc_t p, struct extern_proc *exp);
151 static void
152 fill_user_eproc(proc_t p, struct user_eproc *ep);
153 static void
154 fill_user_proc(proc_t p, struct user_kinfo_proc *kp);
155 static void
156 fill_user_externproc(proc_t p, struct user_extern_proc *exp);
157 extern int
158 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
159 int
160 kdebug_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, proc_t p);
161 #if NFSCLIENT
162 extern int
163 netboot_root(void);
164 #endif
165 int
166 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
167 proc_t p);
168 __private_extern__ kern_return_t
169 reset_vmobjectcache(unsigned int val1, unsigned int val2);
170 int
171 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep);
172 int
173 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
174 user_addr_t newp, size_t newlen);
175 static void
176 fill_proc(proc_t p, struct kinfo_proc *kp);
177 int
178 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
179 size_t *sizep, proc_t cur_proc);
180 static int
181 sysctl_procargs2(int *name, u_int namelen, user_addr_t where, size_t *sizep,
182 proc_t cur_proc);
183 static int
184 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
185 proc_t cur_proc, int argc_yes);
186 int
187 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
188 size_t newlen, void *sp, int len);
189
190 static int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
191 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
192 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
193 static int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
194 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
195 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
196 int sysdoproc_callback(proc_t p, void *arg);
197
198 static int __sysctl_funneled(proc_t p, struct __sysctl_args *uap, register_t *retval);
199
200 extern void IORegistrySetOSBuildVersion(char * build_version);
201
202 static void
203 loadavg32to64(struct loadavg *la32, struct user_loadavg *la64)
204 {
205 la64->ldavg[0] = la32->ldavg[0];
206 la64->ldavg[1] = la32->ldavg[1];
207 la64->ldavg[2] = la32->ldavg[2];
208 la64->fscale = (user_long_t)la32->fscale;
209 }
210
211 /*
212 * Locking and stats
213 */
214 static struct sysctl_lock memlock;
215
216 /* sysctl() syscall */
217 int
218 __sysctl(proc_t p, struct __sysctl_args *uap, register_t *retval)
219 {
220 boolean_t funnel_state;
221 int error;
222
223 funnel_state = thread_funnel_set(kernel_flock, TRUE);
224 error = __sysctl_funneled(p, uap, retval);
225 thread_funnel_set(kernel_flock, funnel_state);
226 return(error);
227 }
228
229 static int
230 __sysctl_funneled(proc_t p, struct __sysctl_args *uap, __unused register_t *retval)
231 {
232 int error, dolock = 1;
233 size_t savelen = 0, oldlen = 0, newlen;
234 sysctlfn *fnp = NULL;
235 int name[CTL_MAXNAME];
236 int error1;
237 boolean_t memlock_taken = FALSE;
238 boolean_t vslock_taken = FALSE;
239 #if CONFIG_MACF
240 kauth_cred_t my_cred;
241 #endif
242
243 /*
244 * all top-level sysctl names are non-terminal
245 */
246 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
247 return (EINVAL);
248 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
249 if (error)
250 return (error);
251
252 AUDIT_ARG(ctlname, name, uap->namelen);
253
254 if (proc_is64bit(p)) {
255 /* uap->newlen is a size_t value which grows to 64 bits
256 * when coming from a 64-bit process. since it's doubtful we'll
257 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
258 */
259 newlen = CAST_DOWN(size_t, uap->newlen);
260 }
261 else {
262 newlen = uap->newlen;
263 }
264
265 /* CTL_UNSPEC is used to get oid to AUTO_OID */
266 if (uap->new != USER_ADDR_NULL
267 && ((name[0] == CTL_KERN
268 && !(name[1] == KERN_IPC || name[1] == KERN_PANICINFO || name[1] == KERN_PROCDELAYTERM ||
269 name[1] == KERN_PROC_LOW_PRI_IO || name[1] == KERN_PROCNAME || name[1] == KERN_RAGEVNODE || name[1] == KERN_CHECKOPENEVT))
270 || (name[0] == CTL_HW)
271 || (name[0] == CTL_VM))
272 && (error = suser(kauth_cred_get(), &p->p_acflag)))
273 return (error);
274
275 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
276 * but there is a fallback for all sysctls other than VFS to
277 * userland_sysctl() - KILL THIS! */
278 switch (name[0]) {
279 case CTL_KERN:
280 fnp = kern_sysctl;
281 if ((name[1] != KERN_VNODE) && (name[1] != KERN_FILE)
282 && (name[1] != KERN_PROC))
283 dolock = 0;
284 break;
285 case CTL_VFS:
286 fnp = vfs_sysctl;
287 break;
288 #ifdef DEBUG
289 case CTL_DEBUG:
290 fnp = debug_sysctl;
291 break;
292 #endif
293 default:
294 fnp = NULL;
295 }
296
297 if (uap->oldlenp != USER_ADDR_NULL) {
298 uint64_t oldlen64 = fuulong(uap->oldlenp);
299
300 oldlen = CAST_DOWN(size_t, oldlen64);
301 /*
302 * If more than 4G, clamp to 4G - useracc() below will catch
303 * with an EFAULT, if it's actually necessary.
304 */
305 if (oldlen64 > 0x00000000ffffffffULL)
306 oldlen = 0xffffffffUL;
307 }
308
309 if (uap->old != USER_ADDR_NULL) {
310 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE))
311 return (EFAULT);
312 /*
313 * The kernel debug mechanism does not need to take this lock, and
314 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
315 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
316 */
317 if (!((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)) &&
318 !(name[1] == KERN_PROC)) {
319 MEMLOCK_LOCK();
320 memlock_taken = TRUE;
321 }
322
323 if (dolock && oldlen) {
324 if ((error = vslock(uap->old, (user_size_t)oldlen))) {
325 if (memlock_taken == TRUE)
326 MEMLOCK_UNLOCK();
327 return(error);
328 }
329 savelen = oldlen;
330 vslock_taken = TRUE;
331 }
332 }
333
334 #if CONFIG_MACF
335 my_cred = kauth_cred_proc_ref(p);
336 error = mac_system_check_sysctl(
337 my_cred,
338 (int *) name,
339 uap->namelen,
340 uap->old,
341 uap->oldlenp,
342 fnp == kern_sysctl ? 1 : 0,
343 uap->new,
344 newlen
345 );
346 kauth_cred_unref(&my_cred);
347 if (!error) {
348 #endif
349 if (fnp) {
350 error = (*fnp)(name + 1, uap->namelen - 1, uap->old,
351 &oldlen, uap->new, newlen, p);
352 }
353 else
354 error = ENOTSUP;
355 #if CONFIG_MACF
356 }
357 #endif
358
359 if (vslock_taken == TRUE) {
360 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
361 if (!error)
362 error = error1;
363 }
364 if (memlock_taken == TRUE)
365 MEMLOCK_UNLOCK();
366
367 if ( (name[0] != CTL_VFS) && (error == ENOTSUP)) {
368 size_t tmp = oldlen;
369 boolean_t funnel_state;
370
371 /*
372 * Drop the funnel when calling new sysctl code, which will conditionally
373 * grab the funnel if it really needs to.
374 */
375 funnel_state = thread_funnel_set(kernel_flock, FALSE);
376
377 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
378 1, uap->new, newlen, &oldlen);
379
380 thread_funnel_set(kernel_flock, funnel_state);
381 }
382
383 if ((error) && (error != ENOMEM))
384 return (error);
385
386 if (uap->oldlenp != USER_ADDR_NULL)
387 error = suulong(uap->oldlenp, oldlen);
388
389 return (error);
390 }
391
392 /*
393 * Attributes stored in the kernel.
394 */
395 __private_extern__ char corefilename[MAXPATHLEN+1];
396 __private_extern__ int do_coredump;
397 __private_extern__ int sugid_coredump;
398
399 #if COUNT_SYSCALLS
400 __private_extern__ int do_count_syscalls;
401 #endif
402
403 #ifdef INSECURE
404 int securelevel = -1;
405 #else
406 int securelevel;
407 #endif
408
409 static int
410 sysctl_affinity(
411 int *name,
412 u_int namelen,
413 user_addr_t oldBuf,
414 size_t *oldSize,
415 user_addr_t newBuf,
416 __unused size_t newSize,
417 proc_t cur_proc)
418 {
419 if (namelen < 1)
420 return (ENOTSUP);
421
422 if (name[0] == 0 && 1 == namelen) {
423 return sysctl_rdint(oldBuf, oldSize, newBuf,
424 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
425 } else if (name[0] == 1 && 2 == namelen) {
426 if (name[1] == 0) {
427 OSBitAndAtomic(~((uint32_t)P_AFFINITY), (UInt32 *)&cur_proc->p_flag);
428 } else {
429 OSBitOrAtomic(P_AFFINITY, (UInt32 *)&cur_proc->p_flag);
430 }
431 return 0;
432 }
433 return (ENOTSUP);
434 }
435
436 static int
437 sysctl_translate(
438 int *name,
439 u_int namelen,
440 user_addr_t oldBuf,
441 size_t *oldSize,
442 user_addr_t newBuf,
443 __unused size_t newSize,
444 proc_t cur_proc)
445 {
446 proc_t p;
447 int istranslated = 0;
448 kauth_cred_t my_cred;
449 uid_t uid;
450
451 if (namelen != 1)
452 return (ENOTSUP);
453
454 p = proc_find(name[0]);
455 if (p == NULL)
456 return (EINVAL);
457
458 my_cred = kauth_cred_proc_ref(p);
459 uid = kauth_cred_getuid(my_cred);
460 kauth_cred_unref(&my_cred);
461 if ((uid != kauth_cred_getuid(kauth_cred_get()))
462 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
463 proc_rele(p);
464 return (EPERM);
465 }
466
467 istranslated = (p->p_flag & P_TRANSLATED);
468 proc_rele(p);
469 return sysctl_rdint(oldBuf, oldSize, newBuf,
470 (istranslated != 0) ? 1 : 0);
471 }
472
473 int
474 set_archhandler(__unused proc_t p, int arch)
475 {
476 int error;
477 struct nameidata nd;
478 struct vnode_attr va;
479 vfs_context_t ctx = vfs_context_current();
480 struct exec_archhandler *archhandler;
481
482 switch(arch) {
483 case CPU_TYPE_POWERPC:
484 archhandler = &exec_archhandler_ppc;
485 break;
486 default:
487 return (EBADARCH);
488 }
489
490 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32,
491 CAST_USER_ADDR_T(archhandler->path), ctx);
492 error = namei(&nd);
493 if (error)
494 return (error);
495 nameidone(&nd);
496
497 /* Check mount point */
498 if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) ||
499 (nd.ni_vp->v_type != VREG)) {
500 vnode_put(nd.ni_vp);
501 return (EACCES);
502 }
503
504 VATTR_INIT(&va);
505 VATTR_WANTED(&va, va_fsid);
506 VATTR_WANTED(&va, va_fileid);
507 error = vnode_getattr(nd.ni_vp, &va, ctx);
508 if (error) {
509 vnode_put(nd.ni_vp);
510 return (error);
511 }
512 vnode_put(nd.ni_vp);
513
514 archhandler->fsid = va.va_fsid;
515 archhandler->fileid = (u_long)va.va_fileid;
516 return 0;
517 }
518
519 /* XXX remove once Rosetta is rev'ed */
520 /*****************************************************************************/
521 static int
522 sysctl_exec_archhandler_ppc(
523 __unused int *name,
524 __unused u_int namelen,
525 user_addr_t oldBuf,
526 size_t *oldSize,
527 user_addr_t newBuf,
528 size_t newSize,
529 proc_t p)
530 {
531 int error;
532 size_t len;
533 char handler[sizeof(exec_archhandler_ppc.path)];
534 vfs_context_t ctx = vfs_context_current();
535
536 if (oldSize) {
537 len = strlen(exec_archhandler_ppc.path) + 1;
538 if (oldBuf) {
539 if (*oldSize < len)
540 return (ENOMEM);
541 error = copyout(exec_archhandler_ppc.path, oldBuf, len);
542 if (error)
543 return (error);
544 }
545 *oldSize = len - 1;
546 }
547 if (newBuf) {
548 error = suser(vfs_context_ucred(ctx), &p->p_acflag);
549 if (error)
550 return (error);
551 if (newSize >= sizeof(exec_archhandler_ppc.path))
552 return (ENAMETOOLONG);
553 error = copyin(newBuf, handler, newSize);
554 if (error)
555 return (error);
556 handler[newSize] = 0;
557 strlcpy(exec_archhandler_ppc.path, handler, MAXPATHLEN);
558 error = set_archhandler(p, CPU_TYPE_POWERPC);
559 if (error)
560 return (error);
561 }
562 return 0;
563 }
564 /*****************************************************************************/
565
566 static int
567 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid *oidp, void *arg1,
568 int arg2, struct sysctl_req *req)
569 {
570 int error = 0;
571
572 error = sysctl_handle_string(oidp, arg1, arg2, req);
573
574 if (error)
575 goto done;
576
577 if (req->newptr)
578 error = set_archhandler(req->p, CPU_TYPE_POWERPC);
579
580 done:
581 return error;
582
583 }
584
585 SYSCTL_NODE(_kern, KERN_EXEC, exec, CTLFLAG_RD|CTLFLAG_LOCKED, 0, "");
586
587 SYSCTL_NODE(_kern_exec, OID_AUTO, archhandler, CTLFLAG_RD|CTLFLAG_LOCKED, 0, "");
588
589 SYSCTL_PROC(_kern_exec_archhandler, OID_AUTO, powerpc,
590 CTLTYPE_STRING | CTLFLAG_RW, exec_archhandler_ppc.path, 0,
591 sysctl_handle_exec_archhandler_ppc, "A", "");
592
593 extern int get_kernel_symfile(proc_t, char **);
594 __private_extern__ int
595 sysctl_dopanicinfo(int *, u_int, user_addr_t, size_t *, user_addr_t,
596 size_t, proc_t);
597
598 /*
599 * kernel related system variables.
600 */
601 int
602 kern_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
603 user_addr_t newp, size_t newlen, proc_t p)
604 {
605 /* all sysctl names not listed below are terminal at this level */
606 if (namelen != 1
607 && !(name[0] == KERN_PROC
608 || name[0] == KERN_PROF
609 || name[0] == KERN_KDEBUG
610 #if !CONFIG_EMBEDDED
611 || name[0] == KERN_PROCARGS
612 #endif
613 || name[0] == KERN_PROCARGS2
614 || name[0] == KERN_IPC
615 || name[0] == KERN_SYSV
616 || name[0] == KERN_AFFINITY
617 || name[0] == KERN_TRANSLATE
618 || name[0] == KERN_EXEC
619 || name[0] == KERN_PANICINFO
620 || name[0] == KERN_POSIX
621 || name[0] == KERN_TFP
622 || name[0] == KERN_TTY
623 #if CONFIG_LCTX
624 || name[0] == KERN_LCTX
625 #endif
626 )
627 )
628 return (ENOTDIR); /* overloaded */
629
630 switch (name[0]) {
631 case KERN_PROC:
632 return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp));
633 #ifdef GPROF
634 case KERN_PROF:
635 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
636 newp, newlen));
637 #endif
638 case KERN_KDEBUG:
639 return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p));
640 #if !CONFIG_EMBEDDED
641 case KERN_PROCARGS:
642 /* new one as it does not use kinfo_proc */
643 return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p));
644 #endif
645 case KERN_PROCARGS2:
646 /* new one as it does not use kinfo_proc */
647 return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p));
648 #if PANIC_INFO
649 case KERN_PANICINFO:
650 return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp,
651 newp, newlen, p));
652 #endif
653 case KERN_AFFINITY:
654 return sysctl_affinity(name+1, namelen-1, oldp, oldlenp,
655 newp, newlen, p);
656 case KERN_TRANSLATE:
657 return sysctl_translate(name+1, namelen-1, oldp, oldlenp, newp,
658 newlen, p);
659
660 /* XXX remove once Rosetta has rev'ed */
661 case KERN_EXEC:
662 return sysctl_exec_archhandler_ppc(name+1, namelen-1, oldp,
663 oldlenp, newp, newlen, p);
664 #if COUNT_SYSCALLS
665 case KERN_COUNT_SYSCALLS:
666 {
667 /* valid values passed in:
668 * = 0 means don't keep called counts for each bsd syscall
669 * > 0 means keep called counts for each bsd syscall
670 * = 2 means dump current counts to the system log
671 * = 3 means reset all counts
672 * for example, to dump current counts:
673 * sysctl -w kern.count_calls=2
674 */
675 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
676 if ( error != 0 ) {
677 return (error);
678 }
679
680 if ( tmp == 1 ) {
681 do_count_syscalls = 1;
682 }
683 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
684 extern int nsysent;
685 extern int syscalls_log[];
686 extern const char * syscallnames[];
687 int i;
688 for ( i = 0; i < nsysent; i++ ) {
689 if ( syscalls_log[i] != 0 ) {
690 if ( tmp == 2 ) {
691 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
692 }
693 else {
694 syscalls_log[i] = 0;
695 }
696 }
697 }
698 if ( tmp != 0 ) {
699 do_count_syscalls = 1;
700 }
701 }
702 return (0);
703 }
704 #endif
705 default:
706 return (ENOTSUP);
707 }
708 /* NOTREACHED */
709 }
710
711 #ifdef DEBUG
712 /*
713 * Debugging related system variables.
714 */
715 #if DIAGNOSTIC
716 extern
717 #endif /* DIAGNOSTIC */
718 struct ctldebug debug0, debug1;
719 struct ctldebug debug2, debug3, debug4;
720 struct ctldebug debug5, debug6, debug7, debug8, debug9;
721 struct ctldebug debug10, debug11, debug12, debug13, debug14;
722 struct ctldebug debug15, debug16, debug17, debug18, debug19;
723 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
724 &debug0, &debug1, &debug2, &debug3, &debug4,
725 &debug5, &debug6, &debug7, &debug8, &debug9,
726 &debug10, &debug11, &debug12, &debug13, &debug14,
727 &debug15, &debug16, &debug17, &debug18, &debug19,
728 };
729 int
730 debug_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
731 user_addr_t newp, size_t newlen, __unused proc_t p)
732 {
733 struct ctldebug *cdp;
734
735 /* all sysctl names at this level are name and field */
736 if (namelen != 2)
737 return (ENOTDIR); /* overloaded */
738 if (name[0] < 0 || name[0] >= CTL_DEBUG_MAXID)
739 return (ENOTSUP);
740 cdp = debugvars[name[0]];
741 if (cdp->debugname == 0)
742 return (ENOTSUP);
743 switch (name[1]) {
744 case CTL_DEBUG_NAME:
745 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
746 case CTL_DEBUG_VALUE:
747 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
748 default:
749 return (ENOTSUP);
750 }
751 /* NOTREACHED */
752 }
753 #endif /* DEBUG */
754
755 /*
756 * The following sysctl_* functions should not be used
757 * any more, as they can only cope with callers in
758 * user mode: Use new-style
759 * sysctl_io_number()
760 * sysctl_io_string()
761 * sysctl_io_opaque()
762 * instead.
763 */
764
765 /*
766 * Validate parameters and get old / set new parameters
767 * for an integer-valued sysctl function.
768 */
769 int
770 sysctl_int(user_addr_t oldp, size_t *oldlenp,
771 user_addr_t newp, size_t newlen, int *valp)
772 {
773 int error = 0;
774
775 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
776 return (EFAULT);
777 if (oldp && *oldlenp < sizeof(int))
778 return (ENOMEM);
779 if (newp && newlen != sizeof(int))
780 return (EINVAL);
781 *oldlenp = sizeof(int);
782 if (oldp)
783 error = copyout(valp, oldp, sizeof(int));
784 if (error == 0 && newp) {
785 error = copyin(newp, valp, sizeof(int));
786 AUDIT_ARG(value, *valp);
787 }
788 return (error);
789 }
790
791 /*
792 * As above, but read-only.
793 */
794 int
795 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
796 {
797 int error = 0;
798
799 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
800 return (EFAULT);
801 if (oldp && *oldlenp < sizeof(int))
802 return (ENOMEM);
803 if (newp)
804 return (EPERM);
805 *oldlenp = sizeof(int);
806 if (oldp)
807 error = copyout((caddr_t)&val, oldp, sizeof(int));
808 return (error);
809 }
810
811 /*
812 * Validate parameters and get old / set new parameters
813 * for an quad(64bit)-valued sysctl function.
814 */
815 int
816 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
817 user_addr_t newp, size_t newlen, quad_t *valp)
818 {
819 int error = 0;
820
821 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
822 return (EFAULT);
823 if (oldp && *oldlenp < sizeof(quad_t))
824 return (ENOMEM);
825 if (newp && newlen != sizeof(quad_t))
826 return (EINVAL);
827 *oldlenp = sizeof(quad_t);
828 if (oldp)
829 error = copyout(valp, oldp, sizeof(quad_t));
830 if (error == 0 && newp)
831 error = copyin(newp, valp, sizeof(quad_t));
832 return (error);
833 }
834
835 /*
836 * As above, but read-only.
837 */
838 int
839 sysctl_rdquad(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, quad_t val)
840 {
841 int error = 0;
842
843 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
844 return (EFAULT);
845 if (oldp && *oldlenp < sizeof(quad_t))
846 return (ENOMEM);
847 if (newp)
848 return (EPERM);
849 *oldlenp = sizeof(quad_t);
850 if (oldp)
851 error = copyout((caddr_t)&val, oldp, sizeof(quad_t));
852 return (error);
853 }
854
855 /*
856 * Validate parameters and get old / set new parameters
857 * for a string-valued sysctl function. Unlike sysctl_string, if you
858 * give it a too small (but larger than 0 bytes) buffer, instead of
859 * returning ENOMEM, it truncates the returned string to the buffer
860 * size. This preserves the semantics of some library routines
861 * implemented via sysctl, which truncate their returned data, rather
862 * than simply returning an error. The returned string is always NUL
863 * terminated.
864 */
865 int
866 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
867 user_addr_t newp, size_t newlen, char *str, int maxlen)
868 {
869 int len, copylen, error = 0;
870
871 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
872 return (EFAULT);
873 copylen = len = strlen(str) + 1;
874 if (oldp && (len < 0 || *oldlenp < 1))
875 return (ENOMEM);
876 if (oldp && (*oldlenp < (size_t)len))
877 copylen = *oldlenp + 1;
878 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
879 return (EINVAL);
880 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
881 if (oldp) {
882 error = copyout(str, oldp, copylen);
883 if (!error) {
884 unsigned char c = 0;
885 /* NUL terminate */
886 oldp += *oldlenp;
887 error = copyout((void *)&c, oldp, sizeof(char));
888 }
889 }
890 if (error == 0 && newp) {
891 error = copyin(newp, str, newlen);
892 str[newlen] = 0;
893 AUDIT_ARG(text, (char *)str);
894 }
895 return (error);
896 }
897
898 /*
899 * Validate parameters and get old / set new parameters
900 * for a string-valued sysctl function.
901 */
902 int
903 sysctl_string(user_addr_t oldp, size_t *oldlenp,
904 user_addr_t newp, size_t newlen, char *str, int maxlen)
905 {
906 int len, error = 0;
907
908 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
909 return (EFAULT);
910 len = strlen(str) + 1;
911 if (oldp && (len < 0 || *oldlenp < (size_t)len))
912 return (ENOMEM);
913 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
914 return (EINVAL);
915 *oldlenp = len -1; /* deal with NULL strings correctly */
916 if (oldp) {
917 error = copyout(str, oldp, len);
918 }
919 if (error == 0 && newp) {
920 error = copyin(newp, str, newlen);
921 str[newlen] = 0;
922 AUDIT_ARG(text, (char *)str);
923 }
924 return (error);
925 }
926
927 /*
928 * As above, but read-only.
929 */
930 int
931 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
932 user_addr_t newp, char *str)
933 {
934 int len, error = 0;
935
936 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
937 return (EFAULT);
938 len = strlen(str) + 1;
939 if (oldp && *oldlenp < (size_t)len)
940 return (ENOMEM);
941 if (newp)
942 return (EPERM);
943 *oldlenp = len;
944 if (oldp)
945 error = copyout(str, oldp, len);
946 return (error);
947 }
948
949 /*
950 * Validate parameters and get old / set new parameters
951 * for a structure oriented sysctl function.
952 */
953 int
954 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
955 user_addr_t newp, size_t newlen, void *sp, int len)
956 {
957 int error = 0;
958
959 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
960 return (EFAULT);
961 if (oldp && (len < 0 || *oldlenp < (size_t)len))
962 return (ENOMEM);
963 if (newp && (len < 0 || newlen > (size_t)len))
964 return (EINVAL);
965 if (oldp) {
966 *oldlenp = len;
967 error = copyout(sp, oldp, len);
968 }
969 if (error == 0 && newp)
970 error = copyin(newp, sp, len);
971 return (error);
972 }
973
974 /*
975 * Validate parameters and get old parameters
976 * for a structure oriented sysctl function.
977 */
978 int
979 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
980 user_addr_t newp, void *sp, int len)
981 {
982 int error = 0;
983
984 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
985 return (EFAULT);
986 if (oldp && (len < 0 || *oldlenp < (size_t)len))
987 return (ENOMEM);
988 if (newp)
989 return (EPERM);
990 *oldlenp = len;
991 if (oldp)
992 error = copyout(sp, oldp, len);
993 return (error);
994 }
995
996 /*
997 * Get file structures.
998 */
999 static int
1000 sysctl_file
1001 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1002 {
1003 int error;
1004 struct fileglob *fg;
1005 struct extern_file nef;
1006
1007 if (req->oldptr == USER_ADDR_NULL) {
1008 /*
1009 * overestimate by 10 files
1010 */
1011 req->oldidx = sizeof(filehead) + (nfiles + 10) * sizeof(struct extern_file);
1012 return (0);
1013 }
1014
1015 /*
1016 * first copyout filehead
1017 */
1018 error = SYSCTL_OUT(req, &filehead, sizeof(filehead));
1019 if (error)
1020 return (error);
1021
1022 /*
1023 * followed by an array of file structures
1024 */
1025 for (fg = filehead.lh_first; fg != 0; fg = fg->f_list.le_next) {
1026 nef.f_list.le_next = (struct extern_file *)fg->f_list.le_next;
1027 nef.f_list.le_prev = (struct extern_file **)fg->f_list.le_prev;
1028 nef.f_flag = (fg->fg_flag & FMASK);
1029 nef.f_type = fg->fg_type;
1030 nef.f_count = fg->fg_count;
1031 nef.f_msgcount = fg->fg_msgcount;
1032 nef.f_cred = fg->fg_cred;
1033 nef.f_ops = fg->fg_ops;
1034 nef.f_offset = fg->fg_offset;
1035 nef.f_data = fg->fg_data;
1036 error = SYSCTL_OUT(req, &nef, sizeof(nef));
1037 if (error)
1038 return (error);
1039 }
1040 return (0);
1041 }
1042
1043 SYSCTL_PROC(_kern, KERN_FILE, file,
1044 CTLTYPE_STRUCT | CTLFLAG_RW,
1045 0, 0, sysctl_file, "S,filehead", "");
1046
1047 static int
1048 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
1049 {
1050 if (p->p_pid != (pid_t)arg)
1051 return(0);
1052 else
1053 return(1);
1054 }
1055
1056 static int
1057 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
1058 {
1059 if (p->p_pgrpid != (pid_t)arg)
1060 return(0);
1061 else
1062 return(1);
1063 }
1064
1065 static int
1066 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
1067 {
1068 boolean_t funnel_state;
1069 int retval;
1070
1071
1072 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1073 /* This is very racy but list lock is held.. Hmmm. */
1074 if ((p->p_flag & P_CONTROLT) == 0 ||
1075 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
1076 p->p_pgrp->pg_session->s_ttyp == NULL ||
1077 p->p_pgrp->pg_session->s_ttyp->t_dev != (dev_t)arg)
1078 retval = 0;
1079 else
1080 retval = 1;
1081
1082 thread_funnel_set(kernel_flock, funnel_state);
1083
1084 return(retval);
1085 }
1086
1087 static int
1088 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
1089 {
1090 kauth_cred_t my_cred;
1091 uid_t uid;
1092
1093 if (p->p_ucred == NULL)
1094 return(0);
1095 my_cred = kauth_cred_proc_ref(p);
1096 uid = kauth_cred_getuid(my_cred);
1097 kauth_cred_unref(&my_cred);
1098
1099 if (uid != (uid_t)arg)
1100 return(0);
1101 else
1102 return(1);
1103 }
1104
1105
1106 static int
1107 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
1108 {
1109 kauth_cred_t my_cred;
1110 uid_t ruid;
1111
1112 if (p->p_ucred == NULL)
1113 return(0);
1114 my_cred = kauth_cred_proc_ref(p);
1115 ruid = my_cred->cr_ruid;
1116 kauth_cred_unref(&my_cred);
1117
1118 if (ruid != (uid_t)arg)
1119 return(0);
1120 else
1121 return(1);
1122 }
1123
1124 static int
1125 sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
1126 {
1127 if ((p->p_lctx == NULL) ||
1128 (p->p_lctx->lc_id != (pid_t)arg))
1129 return(0);
1130 else
1131 return(1);
1132 }
1133
1134 /*
1135 * try over estimating by 5 procs
1136 */
1137 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1138 struct sysdoproc_args {
1139 int buflen;
1140 caddr_t kprocp;
1141 boolean_t is_64_bit;
1142 user_addr_t dp;
1143 size_t needed;
1144 int sizeof_kproc;
1145 int * errorp;
1146 int uidcheck;
1147 int ruidcheck;
1148 int ttycheck;
1149 int uidval;
1150 };
1151
1152 int
1153 sysdoproc_callback(proc_t p, void * arg)
1154 {
1155 struct sysdoproc_args * args = (struct sysdoproc_args *)arg;
1156 int error=0;
1157
1158 if (args->buflen >= args->sizeof_kproc) {
1159 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, (void *)args->uidval) == 0))
1160 return(PROC_RETURNED);
1161 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, (void *)args->uidval) == 0))
1162 return(PROC_RETURNED);
1163 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, (void *)args->uidval) == 0))
1164 return(PROC_RETURNED);
1165
1166 bzero(args->kprocp, args->sizeof_kproc);
1167 if (args->is_64_bit) {
1168 fill_user_proc(p, (struct user_kinfo_proc *) args->kprocp);
1169 }
1170 else {
1171 fill_proc(p, (struct kinfo_proc *) args->kprocp);
1172 }
1173 error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
1174 if (error) {
1175 *args->errorp = error;
1176 return(PROC_RETURNED_DONE);
1177 return (error);
1178 }
1179 args->dp += args->sizeof_kproc;
1180 args->buflen -= args->sizeof_kproc;
1181 }
1182 args->needed += args->sizeof_kproc;
1183 return(PROC_RETURNED);
1184 }
1185
1186 int
1187 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1188 {
1189 user_addr_t dp = where;
1190 size_t needed = 0;
1191 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1192 int error = 0;
1193 boolean_t is_64_bit = FALSE;
1194 struct kinfo_proc kproc;
1195 struct user_kinfo_proc user_kproc;
1196 int sizeof_kproc;
1197 caddr_t kprocp;
1198 int (*filterfn)(proc_t, void *) = 0;
1199 struct sysdoproc_args args;
1200 int uidcheck = 0;
1201 int ruidcheck = 0;
1202 int ttycheck = 0;
1203
1204 if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
1205 return (EINVAL);
1206 is_64_bit = proc_is64bit(current_proc());
1207 if (is_64_bit) {
1208 sizeof_kproc = sizeof(user_kproc);
1209 kprocp = (caddr_t) &user_kproc;
1210 }
1211 else {
1212 sizeof_kproc = sizeof(kproc);
1213 kprocp = (caddr_t) &kproc;
1214 }
1215
1216
1217 switch (name[0]) {
1218
1219 case KERN_PROC_PID:
1220 filterfn = sysdoproc_filt_KERN_PROC_PID;
1221 break;
1222
1223 case KERN_PROC_PGRP:
1224 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
1225 break;
1226
1227 case KERN_PROC_TTY:
1228 ttycheck = 1;
1229 break;
1230
1231 case KERN_PROC_UID:
1232 uidcheck = 1;
1233 break;
1234
1235 case KERN_PROC_RUID:
1236 ruidcheck = 1;
1237 break;
1238
1239 #if CONFIG_LCTX
1240 case KERN_PROC_LCID:
1241 filterfn = sysdoproc_filt_KERN_PROC_LCID;
1242 break;
1243 #endif
1244 }
1245
1246 error = 0;
1247 args.buflen = buflen;
1248 args.kprocp = kprocp;
1249 args.is_64_bit = is_64_bit;
1250 args.dp = dp;
1251 args.needed = needed;
1252 args.errorp = &error;
1253 args.uidcheck = uidcheck;
1254 args.ruidcheck = ruidcheck;
1255 args.ttycheck = ttycheck;
1256 args.sizeof_kproc = sizeof_kproc;
1257 args.uidval = name[1];
1258
1259 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), sysdoproc_callback, &args, filterfn, (void *)name[1]);
1260
1261 if (error)
1262 return(error);
1263
1264 dp = args.dp;
1265 needed = args.needed;
1266
1267 if (where != USER_ADDR_NULL) {
1268 *sizep = dp - where;
1269 if (needed > *sizep)
1270 return (ENOMEM);
1271 } else {
1272 needed += KERN_PROCSLOP;
1273 *sizep = needed;
1274 }
1275 return (0);
1276 }
1277
1278 /*
1279 * Fill in an eproc structure for the specified process.
1280 */
1281 static void
1282 fill_eproc(proc_t p, struct eproc *ep)
1283 {
1284 struct tty *tp;
1285 kauth_cred_t my_cred;
1286 struct pgrp * pg;
1287 struct session * sessp;
1288
1289 pg = proc_pgrp(p);
1290 sessp = proc_session(p);
1291
1292 ep->e_paddr = p;
1293
1294 if (pg != PGRP_NULL) {
1295 ep->e_sess = sessp;
1296 ep->e_pgid = p->p_pgrpid;
1297 ep->e_jobc = pg->pg_jobc;
1298 if ((sessp != SESSION_NULL) && sessp->s_ttyvp)
1299 ep->e_flag = EPROC_CTTY;
1300 } else {
1301 ep->e_sess = (struct session *)0;
1302 ep->e_pgid = 0;
1303 ep->e_jobc = 0;
1304 }
1305 #if CONFIG_LCTX
1306 if (p->p_lctx) {
1307 ep->e_lcid = p->p_lctx->lc_id;
1308 } else {
1309 ep->e_lcid = 0;
1310 }
1311 #endif
1312 ep->e_ppid = p->p_ppid;
1313 /* Pre-zero the fake historical pcred */
1314 bzero(&ep->e_pcred, sizeof(struct _pcred));
1315 if (p->p_ucred) {
1316 my_cred = kauth_cred_proc_ref(p);
1317
1318 /* A fake historical pcred */
1319 ep->e_pcred.p_ruid = my_cred->cr_ruid;
1320 ep->e_pcred.p_svuid = my_cred->cr_svuid;
1321 ep->e_pcred.p_rgid = my_cred->cr_rgid;
1322 ep->e_pcred.p_svgid = my_cred->cr_svgid;
1323 /* A fake historical *kauth_cred_t */
1324 ep->e_ucred.cr_ref = my_cred->cr_ref;
1325 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1326 ep->e_ucred.cr_ngroups = my_cred->cr_ngroups;
1327 bcopy(my_cred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1328
1329 kauth_cred_unref(&my_cred);
1330 }
1331 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1332 ep->e_vm.vm_tsize = 0;
1333 ep->e_vm.vm_dsize = 0;
1334 ep->e_vm.vm_ssize = 0;
1335 }
1336 ep->e_vm.vm_rssize = 0;
1337
1338 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1339 (tp = sessp->s_ttyp)) {
1340 ep->e_tdev = tp->t_dev;
1341 ep->e_tpgid = sessp->s_ttypgrpid;
1342 ep->e_tsess = tp->t_session;
1343 } else
1344 ep->e_tdev = NODEV;
1345
1346 if (SESS_LEADER(p, sessp))
1347 ep->e_flag |= EPROC_SLEADER;
1348 bzero(&ep->e_wmesg[0], WMESGLEN+1);
1349 ep->e_xsize = ep->e_xrssize = 0;
1350 ep->e_xccount = ep->e_xswrss = 0;
1351 if (sessp != SESSION_NULL)
1352 session_rele(sessp);
1353 if(pg != PGRP_NULL)
1354 pg_rele(pg);
1355 }
1356
1357 /*
1358 * Fill in an LP64 version of eproc structure for the specified process.
1359 */
1360 static void
1361 fill_user_eproc(proc_t p, struct user_eproc *ep)
1362 {
1363 struct tty *tp;
1364 struct session *sessp = NULL;
1365 struct pgrp * pg;
1366 kauth_cred_t my_cred;
1367
1368 pg = proc_pgrp(p);
1369 sessp = proc_session(p);
1370
1371 ep->e_paddr = CAST_USER_ADDR_T(p);
1372 if (pg != PGRP_NULL) {
1373 ep->e_sess = CAST_USER_ADDR_T(sessp);
1374 ep->e_pgid = p->p_pgrpid;
1375 ep->e_jobc = pg->pg_jobc;
1376 if (sessp != SESSION_NULL) {
1377 if (sessp->s_ttyvp)
1378 ep->e_flag = EPROC_CTTY;
1379 }
1380 } else {
1381 ep->e_sess = USER_ADDR_NULL;
1382 ep->e_pgid = 0;
1383 ep->e_jobc = 0;
1384 }
1385 #if CONFIG_LCTX
1386 if (p->p_lctx) {
1387 ep->e_lcid = p->p_lctx->lc_id;
1388 } else {
1389 ep->e_lcid = 0;
1390 }
1391 #endif
1392 ep->e_ppid = p->p_ppid;
1393 /* Pre-zero the fake historical pcred */
1394 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1395 if (p->p_ucred) {
1396 my_cred = kauth_cred_proc_ref(p);
1397
1398 /* A fake historical pcred */
1399 ep->e_pcred.p_ruid = my_cred->cr_ruid;
1400 ep->e_pcred.p_svuid = my_cred->cr_svuid;
1401 ep->e_pcred.p_rgid = my_cred->cr_rgid;
1402 ep->e_pcred.p_svgid = my_cred->cr_svgid;
1403
1404 /* A fake historical *kauth_cred_t */
1405 ep->e_ucred.cr_ref = my_cred->cr_ref;
1406 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1407 ep->e_ucred.cr_ngroups = my_cred->cr_ngroups;
1408 bcopy(my_cred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1409
1410 kauth_cred_unref(&my_cred);
1411 }
1412 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1413 ep->e_vm.vm_tsize = 0;
1414 ep->e_vm.vm_dsize = 0;
1415 ep->e_vm.vm_ssize = 0;
1416 }
1417 ep->e_vm.vm_rssize = 0;
1418
1419 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1420 (tp = sessp->s_ttyp)) {
1421 ep->e_tdev = tp->t_dev;
1422 ep->e_tpgid = sessp->s_ttypgrpid;
1423 ep->e_tsess = CAST_USER_ADDR_T(tp->t_session);
1424 } else
1425 ep->e_tdev = NODEV;
1426
1427 if (SESS_LEADER(p, sessp))
1428 ep->e_flag |= EPROC_SLEADER;
1429 bzero(&ep->e_wmesg[0], WMESGLEN+1);
1430 ep->e_xsize = ep->e_xrssize = 0;
1431 ep->e_xccount = ep->e_xswrss = 0;
1432 if (sessp != SESSION_NULL)
1433 session_rele(sessp);
1434 if (pg != PGRP_NULL)
1435 pg_rele(pg);
1436 }
1437
1438 /*
1439 * Fill in an eproc structure for the specified process.
1440 */
1441 static void
1442 fill_externproc(proc_t p, struct extern_proc *exp)
1443 {
1444 exp->p_forw = exp->p_back = NULL;
1445 exp->p_starttime = p->p_start;
1446 exp->p_vmspace = NULL;
1447 exp->p_sigacts = p->p_sigacts;
1448 exp->p_flag = p->p_flag;
1449 if (p->p_lflag & P_LTRACED)
1450 exp->p_flag |= P_TRACED;
1451 if (p->p_lflag & P_LPPWAIT)
1452 exp->p_flag |= P_PPWAIT;
1453 if (p->p_lflag & P_LEXIT)
1454 exp->p_flag |= P_WEXIT;
1455 exp->p_stat = p->p_stat ;
1456 exp->p_pid = p->p_pid ;
1457 exp->p_oppid = p->p_oppid ;
1458 /* Mach related */
1459 exp->user_stack = CAST_DOWN(caddr_t, p->user_stack);
1460 exp->exit_thread = p->exit_thread ;
1461 exp->p_debugger = p->p_debugger ;
1462 exp->sigwait = p->sigwait ;
1463 /* scheduling */
1464 #ifdef _PROC_HAS_SCHEDINFO_
1465 exp->p_estcpu = p->p_estcpu ;
1466 exp->p_pctcpu = p->p_pctcpu ;
1467 exp->p_slptime = p->p_slptime ;
1468 #else
1469 exp->p_estcpu = 0 ;
1470 exp->p_pctcpu = 0 ;
1471 exp->p_slptime = 0 ;
1472 #endif
1473 exp->p_cpticks = 0 ;
1474 exp->p_wchan = 0 ;
1475 exp->p_wmesg = 0 ;
1476 exp->p_swtime = 0 ;
1477 bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval));
1478 bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval));
1479 exp->p_uticks = 0 ;
1480 exp->p_sticks = 0 ;
1481 exp->p_iticks = 0 ;
1482 exp->p_traceflag = 0;
1483 exp->p_tracep = 0 ;
1484 exp->p_siglist = 0 ; /* No longer relevant */
1485 exp->p_textvp = p->p_textvp ;
1486 exp->p_holdcnt = 0 ;
1487 exp->p_sigmask = 0 ; /* no longer avaialable */
1488 exp->p_sigignore = p->p_sigignore ;
1489 exp->p_sigcatch = p->p_sigcatch ;
1490 exp->p_priority = p->p_priority ;
1491 exp->p_usrpri = 0 ;
1492 exp->p_nice = p->p_nice ;
1493 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1494 exp->p_comm[MAXCOMLEN] = '\0';
1495 exp->p_pgrp = p->p_pgrp ;
1496 exp->p_addr = NULL;
1497 exp->p_xstat = p->p_xstat ;
1498 exp->p_acflag = p->p_acflag ;
1499 exp->p_ru = p->p_ru ; /* XXX may be NULL */
1500 }
1501
1502 /*
1503 * Fill in an LP64 version of extern_proc structure for the specified process.
1504 */
1505 static void
1506 fill_user_externproc(proc_t p, struct user_extern_proc *exp)
1507 {
1508 exp->p_forw = exp->p_back = USER_ADDR_NULL;
1509 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1510 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1511 exp->p_vmspace = USER_ADDR_NULL;
1512 exp->p_sigacts = CAST_USER_ADDR_T(p->p_sigacts);
1513 exp->p_flag = p->p_flag;
1514 if (p->p_lflag & P_LTRACED)
1515 exp->p_flag |= P_TRACED;
1516 if (p->p_lflag & P_LPPWAIT)
1517 exp->p_flag |= P_PPWAIT;
1518 if (p->p_lflag & P_LEXIT)
1519 exp->p_flag |= P_WEXIT;
1520 exp->p_stat = p->p_stat ;
1521 exp->p_pid = p->p_pid ;
1522 exp->p_oppid = p->p_oppid ;
1523 /* Mach related */
1524 exp->user_stack = p->user_stack;
1525 exp->exit_thread = CAST_USER_ADDR_T(p->exit_thread);
1526 exp->p_debugger = p->p_debugger ;
1527 exp->sigwait = p->sigwait ;
1528 /* scheduling */
1529 #ifdef _PROC_HAS_SCHEDINFO_
1530 exp->p_estcpu = p->p_estcpu ;
1531 exp->p_pctcpu = p->p_pctcpu ;
1532 exp->p_slptime = p->p_slptime ;
1533 #else
1534 exp->p_estcpu = 0 ;
1535 exp->p_pctcpu = 0 ;
1536 exp->p_slptime = 0 ;
1537 #endif
1538 exp->p_cpticks = 0 ;
1539 exp->p_wchan = 0;
1540 exp->p_wmesg = 0;
1541 exp->p_swtime = 0 ;
1542 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1543 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1544 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1545 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1546 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1547 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1548 exp->p_uticks = 0 ;
1549 exp->p_sticks = 0 ;
1550 exp->p_iticks = 0 ;
1551 exp->p_traceflag = 0 ;
1552 exp->p_tracep = 0;
1553 exp->p_siglist = 0 ; /* No longer relevant */
1554 exp->p_textvp = CAST_USER_ADDR_T(p->p_textvp);
1555 exp->p_holdcnt = 0 ;
1556 exp->p_sigmask = 0 ; /* no longer avaialable */
1557 exp->p_sigignore = p->p_sigignore ;
1558 exp->p_sigcatch = p->p_sigcatch ;
1559 exp->p_priority = p->p_priority ;
1560 exp->p_usrpri = 0 ;
1561 exp->p_nice = p->p_nice ;
1562 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1563 exp->p_comm[MAXCOMLEN] = '\0';
1564 exp->p_pgrp = CAST_USER_ADDR_T(p->p_pgrp);
1565 exp->p_addr = USER_ADDR_NULL;
1566 exp->p_xstat = p->p_xstat ;
1567 exp->p_acflag = p->p_acflag ;
1568 exp->p_ru = CAST_USER_ADDR_T(p->p_ru); /* XXX may be NULL */
1569 }
1570
1571 static void
1572 fill_proc(proc_t p, struct kinfo_proc *kp)
1573 {
1574 fill_externproc(p, &kp->kp_proc);
1575 fill_eproc(p, &kp->kp_eproc);
1576 }
1577
1578 static void
1579 fill_user_proc(proc_t p, struct user_kinfo_proc *kp)
1580 {
1581 fill_user_externproc(p, &kp->kp_proc);
1582 fill_user_eproc(p, &kp->kp_eproc);
1583 }
1584
1585 int
1586 kdebug_ops(int *name, u_int namelen, user_addr_t where,
1587 size_t *sizep, proc_t p)
1588 {
1589 int ret=0;
1590
1591 if (namelen == 0)
1592 return(ENOTSUP);
1593
1594 ret = suser(kauth_cred_get(), &p->p_acflag);
1595 if (ret)
1596 return(ret);
1597
1598 switch(name[0]) {
1599 case KERN_KDEFLAGS:
1600 case KERN_KDDFLAGS:
1601 case KERN_KDENABLE:
1602 case KERN_KDGETBUF:
1603 case KERN_KDSETUP:
1604 case KERN_KDREMOVE:
1605 case KERN_KDSETREG:
1606 case KERN_KDGETREG:
1607 case KERN_KDREADTR:
1608 case KERN_KDPIDTR:
1609 case KERN_KDTHRMAP:
1610 case KERN_KDPIDEX:
1611 case KERN_KDSETRTCDEC:
1612 case KERN_KDSETBUF:
1613 case KERN_KDGETENTROPY:
1614 ret = kdbg_control(name, namelen, where, sizep);
1615 break;
1616 default:
1617 ret= ENOTSUP;
1618 break;
1619 }
1620 return(ret);
1621 }
1622
1623
1624 /*
1625 * Return the top *sizep bytes of the user stack, or the entire area of the
1626 * user stack down through the saved exec_path, whichever is smaller.
1627 */
1628 int
1629 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
1630 size_t *sizep, proc_t cur_proc)
1631 {
1632 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0);
1633 }
1634
1635 static int
1636 sysctl_procargs2(int *name, u_int namelen, user_addr_t where,
1637 size_t *sizep, proc_t cur_proc)
1638 {
1639 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1);
1640 }
1641
1642 static int
1643 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1644 size_t *sizep, proc_t cur_proc, int argc_yes)
1645 {
1646 proc_t p;
1647 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1648 int error = 0;
1649 struct _vm_map *proc_map;
1650 struct task * task;
1651 vm_map_copy_t tmp;
1652 user_addr_t arg_addr;
1653 size_t arg_size;
1654 caddr_t data;
1655 size_t argslen=0;
1656 int size;
1657 vm_offset_t copy_start, copy_end;
1658 kern_return_t ret;
1659 int pid;
1660 kauth_cred_t my_cred;
1661 uid_t uid;
1662
1663 if ( namelen < 1 )
1664 return(EINVAL);
1665
1666 if (argc_yes)
1667 buflen -= sizeof(int); /* reserve first word to return argc */
1668
1669 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1670 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1671 /* is not NULL then the caller wants us to return the length needed to */
1672 /* hold the data we would return */
1673 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1674 return(EINVAL);
1675 }
1676 arg_size = buflen;
1677
1678 /*
1679 * Lookup process by pid
1680 */
1681 pid = name[0];
1682 p = proc_find(pid);
1683 if (p == NULL) {
1684 return(EINVAL);
1685 }
1686
1687 /*
1688 * Copy the top N bytes of the stack.
1689 * On all machines we have so far, the stack grows
1690 * downwards.
1691 *
1692 * If the user expects no more than N bytes of
1693 * argument list, use that as a guess for the
1694 * size.
1695 */
1696
1697 if (!p->user_stack) {
1698 proc_rele(p);
1699 return(EINVAL);
1700 }
1701
1702 if (where == USER_ADDR_NULL) {
1703 /* caller only wants to know length of proc args data */
1704 if (sizep == NULL) {
1705 proc_rele(p);
1706 return(EFAULT);
1707 }
1708
1709 size = p->p_argslen;
1710 proc_rele(p);
1711 if (argc_yes) {
1712 size += sizeof(int);
1713 }
1714 else {
1715 /*
1716 * old PROCARGS will return the executable's path and plus some
1717 * extra space for work alignment and data tags
1718 */
1719 size += PATH_MAX + (6 * sizeof(int));
1720 }
1721 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1722 *sizep = size;
1723 return (0);
1724 }
1725
1726 my_cred = kauth_cred_proc_ref(p);
1727 uid = kauth_cred_getuid(my_cred);
1728 kauth_cred_unref(&my_cred);
1729
1730 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1731 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1732 proc_rele(p);
1733 return (EINVAL);
1734 }
1735
1736 if ((u_int)arg_size > p->p_argslen)
1737 arg_size = round_page(p->p_argslen);
1738
1739 arg_addr = p->user_stack - arg_size;
1740
1741
1742 /*
1743 * Before we can block (any VM code), make another
1744 * reference to the map to keep it alive. We do
1745 * that by getting a reference on the task itself.
1746 */
1747 task = p->task;
1748 if (task == NULL) {
1749 proc_rele(p);
1750 return(EINVAL);
1751 }
1752
1753 argslen = p->p_argslen;
1754 /*
1755 * Once we have a task reference we can convert that into a
1756 * map reference, which we will use in the calls below. The
1757 * task/process may change its map after we take this reference
1758 * (see execve), but the worst that will happen then is a return
1759 * of stale info (which is always a possibility).
1760 */
1761 task_reference(task);
1762 proc_rele(p);
1763 proc_map = get_task_map_reference(task);
1764 task_deallocate(task);
1765
1766 if (proc_map == NULL)
1767 return(EINVAL);
1768
1769
1770 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1771 if (ret != KERN_SUCCESS) {
1772 vm_map_deallocate(proc_map);
1773 return(ENOMEM);
1774 }
1775
1776 copy_end = round_page(copy_start + arg_size);
1777
1778 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1779 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1780 vm_map_deallocate(proc_map);
1781 kmem_free(kernel_map, copy_start,
1782 round_page(arg_size));
1783 return (EIO);
1784 }
1785
1786 /*
1787 * Now that we've done the copyin from the process'
1788 * map, we can release the reference to it.
1789 */
1790 vm_map_deallocate(proc_map);
1791
1792 if( vm_map_copy_overwrite(kernel_map,
1793 (vm_map_address_t)copy_start,
1794 tmp, FALSE) != KERN_SUCCESS) {
1795 kmem_free(kernel_map, copy_start,
1796 round_page(arg_size));
1797 return (EIO);
1798 }
1799
1800 if (arg_size > argslen) {
1801 data = (caddr_t) (copy_end - argslen);
1802 size = argslen;
1803 } else {
1804 data = (caddr_t) (copy_end - arg_size);
1805 size = arg_size;
1806 }
1807
1808 if (argc_yes) {
1809 /* Put processes argc as the first word in the copyout buffer */
1810 suword(where, p->p_argc);
1811 error = copyout(data, (where + sizeof(int)), size);
1812 size += sizeof(int);
1813 } else {
1814 error = copyout(data, where, size);
1815
1816 /*
1817 * Make the old PROCARGS work to return the executable's path
1818 * But, only if there is enough space in the provided buffer
1819 *
1820 * on entry: data [possibily] points to the beginning of the path
1821 *
1822 * Note: we keep all pointers&sizes aligned to word boundries
1823 */
1824 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1825 {
1826 int binPath_sz, alignedBinPath_sz = 0;
1827 int extraSpaceNeeded, addThis;
1828 user_addr_t placeHere;
1829 char * str = (char *) data;
1830 int max_len = size;
1831
1832 /* Some apps are really bad about messing up their stacks
1833 So, we have to be extra careful about getting the length
1834 of the executing binary. If we encounter an error, we bail.
1835 */
1836
1837 /* Limit ourselves to PATH_MAX paths */
1838 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1839
1840 binPath_sz = 0;
1841
1842 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1843 binPath_sz++;
1844
1845 /* If we have a NUL terminator, copy it, too */
1846 if (binPath_sz < max_len-1) binPath_sz += 1;
1847
1848 /* Pre-Flight the space requiremnts */
1849
1850 /* Account for the padding that fills out binPath to the next word */
1851 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1852
1853 placeHere = where + size;
1854
1855 /* Account for the bytes needed to keep placeHere word aligned */
1856 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1857
1858 /* Add up all the space that is needed */
1859 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1860
1861 /* is there is room to tack on argv[0]? */
1862 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1863 {
1864 placeHere += addThis;
1865 suword(placeHere, 0);
1866 placeHere += sizeof(int);
1867 suword(placeHere, 0xBFFF0000);
1868 placeHere += sizeof(int);
1869 suword(placeHere, 0);
1870 placeHere += sizeof(int);
1871 error = copyout(data, placeHere, binPath_sz);
1872 if ( ! error )
1873 {
1874 placeHere += binPath_sz;
1875 suword(placeHere, 0);
1876 size += extraSpaceNeeded;
1877 }
1878 }
1879 }
1880 }
1881
1882 if (copy_start != (vm_offset_t) 0) {
1883 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1884 }
1885 if (error) {
1886 return(error);
1887 }
1888
1889 if (where != USER_ADDR_NULL)
1890 *sizep = size;
1891 return (0);
1892 }
1893
1894
1895 /*
1896 * Max number of concurrent aio requests
1897 */
1898 static int
1899 sysctl_aiomax
1900 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1901 {
1902 int new_value, changed;
1903 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1904 if (changed) {
1905 /* make sure the system-wide limit is greater than the per process limit */
1906 if (new_value >= aio_max_requests_per_process)
1907 aio_max_requests = new_value;
1908 else
1909 error = EINVAL;
1910 }
1911 return(error);
1912 }
1913
1914
1915 /*
1916 * Max number of concurrent aio requests per process
1917 */
1918 static int
1919 sysctl_aioprocmax
1920 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1921 {
1922 int new_value, changed;
1923 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1924 if (changed) {
1925 /* make sure per process limit is less than the system-wide limit */
1926 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1927 aio_max_requests_per_process = new_value;
1928 else
1929 error = EINVAL;
1930 }
1931 return(error);
1932 }
1933
1934
1935 /*
1936 * Max number of async IO worker threads
1937 */
1938 static int
1939 sysctl_aiothreads
1940 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1941 {
1942 int new_value, changed;
1943 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1944 if (changed) {
1945 /* we only allow an increase in the number of worker threads */
1946 if (new_value > aio_worker_threads ) {
1947 _aio_create_worker_threads((new_value - aio_worker_threads));
1948 aio_worker_threads = new_value;
1949 }
1950 else
1951 error = EINVAL;
1952 }
1953 return(error);
1954 }
1955
1956
1957 /*
1958 * System-wide limit on the max number of processes
1959 */
1960 static int
1961 sysctl_maxproc
1962 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1963 {
1964 int new_value, changed;
1965 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1966 if (changed) {
1967 AUDIT_ARG(value, new_value);
1968 /* make sure the system-wide limit is less than the configured hard
1969 limit set at kernel compilation */
1970 if (new_value <= hard_maxproc && new_value > 0)
1971 maxproc = new_value;
1972 else
1973 error = EINVAL;
1974 }
1975 return(error);
1976 }
1977
1978 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1979 CTLFLAG_RD | CTLFLAG_KERN,
1980 ostype, 0, "");
1981 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1982 CTLFLAG_RD | CTLFLAG_KERN,
1983 osrelease, 0, "");
1984 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1985 CTLFLAG_RD | CTLFLAG_KERN,
1986 NULL, BSD, "");
1987 SYSCTL_STRING(_kern, KERN_VERSION, version,
1988 CTLFLAG_RD | CTLFLAG_KERN,
1989 version, 0, "");
1990
1991 /* PR-5293665: need to use a callback function for kern.osversion to set
1992 * osversion in IORegistry */
1993
1994 static int
1995 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1996 {
1997 int rval = 0;
1998
1999 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2000
2001 if (req->newptr) {
2002 IORegistrySetOSBuildVersion((char *)arg1);
2003 }
2004
2005 return rval;
2006 }
2007
2008 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
2009 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING,
2010 osversion, 256 /* OSVERSIZE*/,
2011 sysctl_osversion, "A", "");
2012
2013 static int
2014 sysctl_sysctl_bootargs
2015 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2016 {
2017 int error;
2018 char buf[256];
2019
2020 strlcpy(buf, PE_boot_args(), 256);
2021 error = sysctl_io_string(req, buf, 256, 0, NULL);
2022 return(error);
2023 }
2024
2025 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
2026 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
2027 NULL, 0,
2028 sysctl_sysctl_bootargs, "A", "bootargs");
2029
2030 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
2031 CTLFLAG_RW | CTLFLAG_KERN,
2032 &maxfiles, 0, "");
2033 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
2034 CTLFLAG_RD | CTLFLAG_KERN,
2035 NULL, ARG_MAX, "");
2036 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
2037 CTLFLAG_RD | CTLFLAG_KERN,
2038 NULL, _POSIX_VERSION, "");
2039 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
2040 CTLFLAG_RD | CTLFLAG_KERN,
2041 NULL, NGROUPS_MAX, "");
2042 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
2043 CTLFLAG_RD | CTLFLAG_KERN,
2044 NULL, 1, "");
2045 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2046 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2047 CTLFLAG_RD | CTLFLAG_KERN,
2048 NULL, 1, "");
2049 #else
2050 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2051 CTLFLAG_RD | CTLFLAG_KERN,
2052 NULL, 0, "");
2053 #endif
2054
2055 static int
2056 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2057 {
2058 unsigned int oldval = desiredvnodes;
2059 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
2060 reset_vmobjectcache(oldval, desiredvnodes);
2061 resize_namecache(desiredvnodes);
2062 return(error);
2063 }
2064
2065 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
2066 CTLTYPE_INT | CTLFLAG_RW,
2067 0, 0, sysctl_maxvnodes, "I", "");
2068
2069 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
2070 CTLTYPE_INT | CTLFLAG_RW,
2071 0, 0, sysctl_maxproc, "I", "");
2072
2073 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
2074 CTLTYPE_INT | CTLFLAG_RW,
2075 0, 0, sysctl_aiomax, "I", "");
2076
2077 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
2078 CTLTYPE_INT | CTLFLAG_RW,
2079 0, 0, sysctl_aioprocmax, "I", "");
2080
2081 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
2082 CTLTYPE_INT | CTLFLAG_RW,
2083 0, 0, sysctl_aiothreads, "I", "");
2084
2085 static int
2086 sysctl_securelvl
2087 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2088 {
2089 int new_value, changed;
2090 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
2091 if (changed) {
2092 if (!(new_value < securelevel && req->p->p_pid != 1)) {
2093 proc_list_lock();
2094 securelevel = new_value;
2095 proc_list_unlock();
2096 } else {
2097 error = EPERM;
2098 }
2099 }
2100 return(error);
2101 }
2102
2103 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
2104 CTLTYPE_INT | CTLFLAG_RW,
2105 0, 0, sysctl_securelvl, "I", "");
2106
2107
2108 static int
2109 sysctl_domainname
2110 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2111 {
2112 int error, changed;
2113 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
2114 if (changed) {
2115 domainnamelen = strlen(domainname);
2116 }
2117 return(error);
2118 }
2119
2120 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
2121 CTLTYPE_STRING | CTLFLAG_RW,
2122 0, 0, sysctl_domainname, "A", "");
2123
2124 SYSCTL_INT(_kern, KERN_HOSTID, hostid,
2125 CTLFLAG_RW | CTLFLAG_KERN,
2126 &hostid, 0, "");
2127
2128 static int
2129 sysctl_hostname
2130 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2131 {
2132 int error, changed;
2133 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
2134 if (changed) {
2135 hostnamelen = req->newlen;
2136 }
2137 return(error);
2138 }
2139
2140
2141 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
2142 CTLTYPE_STRING | CTLFLAG_RW,
2143 0, 0, sysctl_hostname, "A", "");
2144
2145 static int
2146 sysctl_procname
2147 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2148 {
2149 /* Original code allowed writing, I'm copying this, although this all makes
2150 no sense to me. Besides, this sysctl is never used. */
2151 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
2152 }
2153
2154 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
2155 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY,
2156 0, 0, sysctl_procname, "A", "");
2157
2158 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
2159 CTLFLAG_RW | CTLFLAG_KERN,
2160 &speculative_reads_disabled, 0, "");
2161
2162 static int
2163 sysctl_boottime
2164 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2165 {
2166 struct timeval t;
2167
2168 t.tv_sec = boottime_sec();
2169 t.tv_usec = 0;
2170
2171 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2172 }
2173
2174 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2175 CTLTYPE_STRUCT | CTLFLAG_RD,
2176 0, 0, sysctl_boottime, "S,timeval", "");
2177
2178 static int
2179 sysctl_symfile
2180 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2181 {
2182 char *str;
2183 int error = get_kernel_symfile(req->p, &str);
2184 if (error)
2185 return (error);
2186 return sysctl_io_string(req, str, 0, 0, NULL);
2187 }
2188
2189
2190 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
2191 CTLTYPE_STRING | CTLFLAG_RD,
2192 0, 0, sysctl_symfile, "A", "");
2193
2194 #if NFSCLIENT
2195 static int
2196 sysctl_netboot
2197 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2198 {
2199 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2200 }
2201
2202 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
2203 CTLTYPE_INT | CTLFLAG_RD,
2204 0, 0, sysctl_netboot, "I", "");
2205 #endif
2206
2207 static int
2208 sysctl_usrstack
2209 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2210 {
2211 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2212 }
2213
2214 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack,
2215 CTLTYPE_INT | CTLFLAG_RD,
2216 0, 0, sysctl_usrstack, "I", "");
2217
2218 static int
2219 sysctl_usrstack64
2220 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2221 {
2222 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2223 }
2224
2225 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2226 CTLTYPE_QUAD | CTLFLAG_RD,
2227 0, 0, sysctl_usrstack64, "Q", "");
2228
2229 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2230 CTLFLAG_RW | CTLFLAG_KERN,
2231 corefilename, sizeof(corefilename), "");
2232
2233 static int
2234 sysctl_coredump
2235 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2236 {
2237 #ifdef SECURE_KERNEL
2238 return (ENOTSUP);
2239 #endif
2240 int new_value, changed;
2241 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2242 if (changed) {
2243 if ((new_value == 0) || (new_value == 1))
2244 do_coredump = new_value;
2245 else
2246 error = EINVAL;
2247 }
2248 return(error);
2249 }
2250
2251 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2252 CTLTYPE_INT | CTLFLAG_RW,
2253 0, 0, sysctl_coredump, "I", "");
2254
2255 static int
2256 sysctl_suid_coredump
2257 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2258 {
2259 #ifdef SECURE_KERNEL
2260 return (ENOTSUP);
2261 #endif
2262 int new_value, changed;
2263 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2264 if (changed) {
2265 if ((new_value == 0) || (new_value == 1))
2266 sugid_coredump = new_value;
2267 else
2268 error = EINVAL;
2269 }
2270 return(error);
2271 }
2272
2273 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2274 CTLTYPE_INT | CTLFLAG_RW,
2275 0, 0, sysctl_suid_coredump, "I", "");
2276
2277 static int
2278 sysctl_delayterm
2279 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2280 {
2281 struct proc *p = req->p;
2282 int new_value, changed;
2283 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2284 if (changed) {
2285 proc_lock(p);
2286 if (new_value)
2287 req->p->p_lflag |= P_LDELAYTERM;
2288 else
2289 req->p->p_lflag &= ~P_LDELAYTERM;
2290 proc_unlock(p);
2291 }
2292 return(error);
2293 }
2294
2295 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2296 CTLTYPE_INT | CTLFLAG_RW,
2297 0, 0, sysctl_delayterm, "I", "");
2298
2299 static int
2300 sysctl_proc_low_pri_io
2301 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2302 {
2303 struct proc *p = req->p;
2304 int new_value, old_value, changed;
2305 int error;
2306
2307 proc_lock(p);
2308 switch (req->p->p_iopol_disk) {
2309 case IOPOL_DEFAULT:
2310 case IOPOL_NORMAL:
2311 old_value = 0;
2312 break;
2313 case IOPOL_THROTTLE:
2314 old_value = 1;
2315 break;
2316 case IOPOL_PASSIVE:
2317 old_value = 2;
2318 break;
2319 default:
2320 /*\ 5 this should never happen, but to be robust, return the default value */
2321 old_value = 0;
2322 break;
2323 }
2324 proc_unlock(p);
2325
2326 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2327 if (changed) {
2328 proc_lock(p);
2329 if (new_value & 0x01)
2330 req->p->p_iopol_disk = IOPOL_THROTTLE;
2331 else if (new_value & 0x02)
2332 req->p->p_iopol_disk = IOPOL_PASSIVE;
2333 else if (new_value == 0)
2334 req->p->p_iopol_disk = IOPOL_NORMAL;
2335 proc_unlock(p);
2336 }
2337 return(error);
2338 }
2339
2340 SYSCTL_PROC(_kern, KERN_PROC_LOW_PRI_IO, proc_low_pri_io,
2341 CTLTYPE_INT | CTLFLAG_RW,
2342 0, 0, sysctl_proc_low_pri_io, "I", "");
2343
2344 static int
2345 sysctl_rage_vnode
2346 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2347 {
2348 struct proc *p = req->p;
2349 struct uthread *ut;
2350 int new_value, old_value, changed;
2351 int error;
2352
2353 ut = get_bsdthread_info(current_thread());
2354
2355 if (ut->uu_flag & UT_RAGE_VNODES)
2356 old_value = KERN_RAGE_THREAD;
2357 else if (p->p_lflag & P_LRAGE_VNODES)
2358 old_value = KERN_RAGE_PROC;
2359 else
2360 old_value = 0;
2361
2362 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2363
2364 if (error == 0) {
2365 switch (new_value) {
2366 case KERN_RAGE_PROC:
2367 proc_lock(p);
2368 p->p_lflag |= P_LRAGE_VNODES;
2369 proc_unlock(p);
2370 break;
2371 case KERN_UNRAGE_PROC:
2372 proc_lock(p);
2373 p->p_lflag &= ~P_LRAGE_VNODES;
2374 proc_unlock(p);
2375 break;
2376
2377 case KERN_RAGE_THREAD:
2378 ut->uu_flag |= UT_RAGE_VNODES;
2379 break;
2380 case KERN_UNRAGE_THREAD:
2381 ut = get_bsdthread_info(current_thread());
2382 ut->uu_flag &= ~UT_RAGE_VNODES;
2383 break;
2384 }
2385 }
2386 return(error);
2387 }
2388
2389 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2390 CTLTYPE_INT | CTLFLAG_RW,
2391 0, 0, sysctl_rage_vnode, "I", "");
2392
2393
2394 static int
2395 sysctl_kern_check_openevt
2396 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2397 {
2398 struct proc *p = req->p;
2399 int new_value, old_value, changed;
2400 int error;
2401
2402 if (p->p_flag & P_CHECKOPENEVT) {
2403 old_value = KERN_OPENEVT_PROC;
2404 } else {
2405 old_value = 0;
2406 }
2407
2408 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2409
2410 if (error == 0) {
2411 switch (new_value) {
2412 case KERN_OPENEVT_PROC:
2413 OSBitOrAtomic(P_CHECKOPENEVT, (UInt32 *)&p->p_flag);
2414 break;
2415
2416 case KERN_UNOPENEVT_PROC:
2417 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), (UInt32 *)&p->p_flag);
2418 break;
2419
2420 default:
2421 error = EINVAL;
2422 }
2423 }
2424 return(error);
2425 }
2426
2427 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY,
2428 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2429
2430
2431
2432 static int
2433 sysctl_nx
2434 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2435 {
2436 #ifdef SECURE_KERNEL
2437 return ENOTSUP;
2438 #endif
2439 int new_value, changed;
2440 int error;
2441
2442 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2443 if (error)
2444 return error;
2445
2446 if (changed) {
2447 #ifdef __i386__
2448 /*
2449 * Only allow setting if NX is supported on the chip
2450 */
2451 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2452 return ENOTSUP;
2453 #endif
2454 nx_enabled = new_value;
2455 }
2456 return(error);
2457 }
2458
2459
2460
2461 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2462 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN,
2463 0, 0, sysctl_nx, "I", "");
2464
2465 static int
2466 sysctl_loadavg
2467 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2468 {
2469 if (proc_is64bit(req->p)) {
2470 struct user_loadavg loadinfo64;
2471 loadavg32to64(&averunnable, &loadinfo64);
2472 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2473 } else {
2474 return sysctl_io_opaque(req, &averunnable, sizeof(averunnable), NULL);
2475 }
2476 }
2477
2478 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2479 CTLTYPE_STRUCT | CTLFLAG_RD,
2480 0, 0, sysctl_loadavg, "S,loadavg", "");
2481
2482 static int
2483 sysctl_swapusage
2484 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2485 {
2486 int error;
2487 uint64_t swap_total;
2488 uint64_t swap_avail;
2489 uint32_t swap_pagesize;
2490 boolean_t swap_encrypted;
2491 struct xsw_usage xsu;
2492
2493 error = macx_swapinfo(&swap_total,
2494 &swap_avail,
2495 &swap_pagesize,
2496 &swap_encrypted);
2497 if (error)
2498 return error;
2499
2500 xsu.xsu_total = swap_total;
2501 xsu.xsu_avail = swap_avail;
2502 xsu.xsu_used = swap_total - swap_avail;
2503 xsu.xsu_pagesize = swap_pagesize;
2504 xsu.xsu_encrypted = swap_encrypted;
2505 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2506 }
2507
2508
2509
2510 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2511 CTLTYPE_STRUCT | CTLFLAG_RD,
2512 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2513
2514
2515 /* this kernel does NOT implement shared_region_make_private_np() */
2516 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2517 CTLFLAG_RD,
2518 NULL, 0, "");
2519
2520 #if __i386__
2521 static int
2522 sysctl_sysctl_exec_affinity(__unused struct sysctl_oid *oidp,
2523 __unused void *arg1, __unused int arg2,
2524 struct sysctl_req *req)
2525 {
2526 proc_t cur_proc = req->p;
2527 int error;
2528
2529 if (req->oldptr != USER_ADDR_NULL) {
2530 cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
2531 if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
2532 return error;
2533 }
2534
2535 if (req->newptr != USER_ADDR_NULL) {
2536 cpu_type_t newcputype;
2537 if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
2538 return error;
2539 if (newcputype == CPU_TYPE_I386)
2540 OSBitAndAtomic(~((uint32_t)P_AFFINITY), (UInt32 *)&cur_proc->p_flag);
2541 else if (newcputype == CPU_TYPE_POWERPC)
2542 OSBitOrAtomic(P_AFFINITY, (UInt32 *)&cur_proc->p_flag);
2543 else
2544 return (EINVAL);
2545 }
2546
2547 return 0;
2548 }
2549 SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
2550 #endif
2551
2552 static int
2553 fetch_process_cputype(
2554 proc_t cur_proc,
2555 int *name,
2556 u_int namelen,
2557 cpu_type_t *cputype)
2558 {
2559 proc_t p = PROC_NULL;
2560 int refheld = 0;
2561 cpu_type_t ret = 0;
2562 int error = 0;
2563
2564 if (namelen == 0)
2565 p = cur_proc;
2566 else if (namelen == 1) {
2567 p = proc_find(name[0]);
2568 if (p == NULL)
2569 return (EINVAL);
2570 refheld = 1;
2571 } else {
2572 error = EINVAL;
2573 goto out;
2574 }
2575
2576 #if __i386__
2577 if (p->p_flag & P_TRANSLATED) {
2578 ret = CPU_TYPE_POWERPC;
2579 }
2580 else
2581 #endif
2582 {
2583 ret = cpu_type();
2584 if (IS_64BIT_PROCESS(p))
2585 ret |= CPU_ARCH_ABI64;
2586 }
2587 *cputype = ret;
2588
2589 if (refheld != 0)
2590 proc_rele(p);
2591 out:
2592 return (error);
2593 }
2594
2595 static int
2596 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2597 struct sysctl_req *req)
2598 {
2599 int error;
2600 cpu_type_t proc_cputype = 0;
2601 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2602 return error;
2603 int res = 1;
2604 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2605 res = 0;
2606 return SYSCTL_OUT(req, &res, sizeof(res));
2607 }
2608 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2609
2610 static int
2611 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2612 struct sysctl_req *req)
2613 {
2614 int error;
2615 cpu_type_t proc_cputype = 0;
2616 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2617 return error;
2618 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2619 }
2620 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2621
2622 static int
2623 sysctl_safeboot
2624 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2625 {
2626 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2627 }
2628
2629 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2630 CTLTYPE_INT | CTLFLAG_RD,
2631 0, 0, sysctl_safeboot, "I", "");
2632
2633 static int
2634 sysctl_singleuser
2635 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2636 {
2637 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2638 }
2639
2640 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2641 CTLTYPE_INT | CTLFLAG_RD,
2642 0, 0, sysctl_singleuser, "I", "");
2643
2644 /*
2645 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2646 */
2647 extern boolean_t affinity_sets_enabled;
2648 extern int affinity_sets_mapping;
2649
2650 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2651 CTLFLAG_RW, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2652 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2653 CTLFLAG_RW, &affinity_sets_mapping, 0, "mapping policy");
2654
2655 /*
2656 * Limit on total memory users can wire.
2657 *
2658 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2659 *
2660 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2661 *
2662 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2663 * kmem_init().
2664 *
2665 * All values are in bytes.
2666 */
2667
2668 vm_map_size_t vm_global_user_wire_limit;
2669 vm_map_size_t vm_user_wire_limit;
2670
2671 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW, &vm_global_user_wire_limit, "");
2672 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW, &vm_user_wire_limit, "");