]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
xnu-1228.9.59.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105
106 #include <bsm/audit_kernel.h>
107
108 #include <mach/machine.h>
109 #include <mach/mach_types.h>
110 #include <mach/vm_param.h>
111 #include <kern/task.h>
112 #include <kern/lock.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <mach/host_info.h>
116
117 #include <sys/mount_internal.h>
118 #include <sys/kdebug.h>
119 #include <sys/sysproto.h>
120
121 #include <IOKit/IOPlatformExpert.h>
122 #include <pexpert/pexpert.h>
123
124 #include <machine/machine_routines.h>
125 #include <machine/exec.h>
126
127 #include <vm/vm_protos.h>
128
129 #ifdef __i386__
130 #include <i386/cpuid.h>
131 #endif
132
133 sysctlfn kern_sysctl;
134 #ifdef DEBUG
135 sysctlfn debug_sysctl;
136 #endif
137 extern sysctlfn net_sysctl;
138 extern sysctlfn cpu_sysctl;
139 extern int aio_max_requests;
140 extern int aio_max_requests_per_process;
141 extern int aio_worker_threads;
142 extern int lowpri_IO_window_msecs;
143 extern int lowpri_IO_delay_msecs;
144 extern int nx_enabled;
145 extern int speculative_reads_disabled;
146
147 static void
148 fill_eproc(proc_t p, struct eproc *ep);
149 static void
150 fill_externproc(proc_t p, struct extern_proc *exp);
151 static void
152 fill_user_eproc(proc_t p, struct user_eproc *ep);
153 static void
154 fill_user_proc(proc_t p, struct user_kinfo_proc *kp);
155 static void
156 fill_user_externproc(proc_t p, struct user_extern_proc *exp);
157 extern int
158 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
159 int
160 kdebug_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, proc_t p);
161 #if NFSCLIENT
162 extern int
163 netboot_root(void);
164 #endif
165 int
166 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
167 proc_t p);
168 __private_extern__ kern_return_t
169 reset_vmobjectcache(unsigned int val1, unsigned int val2);
170 int
171 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep);
172 int
173 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
174 user_addr_t newp, size_t newlen);
175 static void
176 fill_proc(proc_t p, struct kinfo_proc *kp);
177 int
178 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
179 size_t *sizep, proc_t cur_proc);
180 static int
181 sysctl_procargs2(int *name, u_int namelen, user_addr_t where, size_t *sizep,
182 proc_t cur_proc);
183 static int
184 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
185 proc_t cur_proc, int argc_yes);
186 int
187 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
188 size_t newlen, void *sp, int len);
189
190 static int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
191 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
192 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
193 static int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
194 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
195 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
196 int sysdoproc_callback(proc_t p, void *arg);
197
198 static int __sysctl_funneled(proc_t p, struct __sysctl_args *uap, register_t *retval);
199
200 extern void IORegistrySetOSBuildVersion(char * build_version);
201
202 static void
203 loadavg32to64(struct loadavg *la32, struct user_loadavg *la64)
204 {
205 la64->ldavg[0] = la32->ldavg[0];
206 la64->ldavg[1] = la32->ldavg[1];
207 la64->ldavg[2] = la32->ldavg[2];
208 la64->fscale = (user_long_t)la32->fscale;
209 }
210
211 /*
212 * Locking and stats
213 */
214 static struct sysctl_lock memlock;
215
216 /* sysctl() syscall */
217 int
218 __sysctl(proc_t p, struct __sysctl_args *uap, register_t *retval)
219 {
220 boolean_t funnel_state;
221 int error;
222
223 funnel_state = thread_funnel_set(kernel_flock, TRUE);
224 error = __sysctl_funneled(p, uap, retval);
225 thread_funnel_set(kernel_flock, funnel_state);
226 return(error);
227 }
228
229 static int
230 __sysctl_funneled(proc_t p, struct __sysctl_args *uap, __unused register_t *retval)
231 {
232 int error, dolock = 1;
233 size_t savelen = 0, oldlen = 0, newlen;
234 sysctlfn *fnp = NULL;
235 int name[CTL_MAXNAME];
236 int error1;
237 boolean_t memlock_taken = FALSE;
238 boolean_t vslock_taken = FALSE;
239 #if CONFIG_MACF
240 kauth_cred_t my_cred;
241 #endif
242
243 /*
244 * all top-level sysctl names are non-terminal
245 */
246 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
247 return (EINVAL);
248 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
249 if (error)
250 return (error);
251
252 AUDIT_ARG(ctlname, name, uap->namelen);
253
254 if (proc_is64bit(p)) {
255 /* uap->newlen is a size_t value which grows to 64 bits
256 * when coming from a 64-bit process. since it's doubtful we'll
257 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
258 */
259 newlen = CAST_DOWN(size_t, uap->newlen);
260 }
261 else {
262 newlen = uap->newlen;
263 }
264
265 /* CTL_UNSPEC is used to get oid to AUTO_OID */
266 if (uap->new != USER_ADDR_NULL
267 && ((name[0] == CTL_KERN
268 && !(name[1] == KERN_IPC || name[1] == KERN_PANICINFO || name[1] == KERN_PROCDELAYTERM ||
269 name[1] == KERN_PROC_LOW_PRI_IO || name[1] == KERN_PROCNAME || name[1] == KERN_RAGEVNODE || name[1] == KERN_CHECKOPENEVT))
270 || (name[0] == CTL_HW)
271 || (name[0] == CTL_VM))
272 && (error = suser(kauth_cred_get(), &p->p_acflag)))
273 return (error);
274
275 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
276 * but there is a fallback for all sysctls other than VFS to
277 * userland_sysctl() - KILL THIS! */
278 switch (name[0]) {
279 case CTL_KERN:
280 fnp = kern_sysctl;
281 if ((name[1] != KERN_VNODE) && (name[1] != KERN_FILE)
282 && (name[1] != KERN_PROC))
283 dolock = 0;
284 break;
285 case CTL_VFS:
286 fnp = vfs_sysctl;
287 break;
288 #ifdef DEBUG
289 case CTL_DEBUG:
290 fnp = debug_sysctl;
291 break;
292 #endif
293 default:
294 fnp = NULL;
295 }
296
297 if (uap->oldlenp != USER_ADDR_NULL) {
298 uint64_t oldlen64 = fuulong(uap->oldlenp);
299
300 oldlen = CAST_DOWN(size_t, oldlen64);
301 /*
302 * If more than 4G, clamp to 4G - useracc() below will catch
303 * with an EFAULT, if it's actually necessary.
304 */
305 if (oldlen64 > 0x00000000ffffffffULL)
306 oldlen = 0xffffffffUL;
307 }
308
309 if (uap->old != USER_ADDR_NULL) {
310 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE))
311 return (EFAULT);
312 /*
313 * The kernel debug mechanism does not need to take this lock, and
314 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
315 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
316 */
317 if (!((name[1] == KERN_KDEBUG) && (name[2] == KERN_KDGETENTROPY)) &&
318 !(name[1] == KERN_PROC)) {
319 MEMLOCK_LOCK();
320 memlock_taken = TRUE;
321 }
322
323 if (dolock && oldlen) {
324 if ((error = vslock(uap->old, (user_size_t)oldlen))) {
325 if (memlock_taken == TRUE)
326 MEMLOCK_UNLOCK();
327 return(error);
328 }
329 savelen = oldlen;
330 vslock_taken = TRUE;
331 }
332 }
333
334 #if CONFIG_MACF
335 my_cred = kauth_cred_proc_ref(p);
336 error = mac_system_check_sysctl(
337 my_cred,
338 (int *) name,
339 uap->namelen,
340 uap->old,
341 uap->oldlenp,
342 fnp == kern_sysctl ? 1 : 0,
343 uap->new,
344 newlen
345 );
346 kauth_cred_unref(&my_cred);
347 if (!error) {
348 #endif
349 if (fnp) {
350 error = (*fnp)(name + 1, uap->namelen - 1, uap->old,
351 &oldlen, uap->new, newlen, p);
352 }
353 else
354 error = ENOTSUP;
355 #if CONFIG_MACF
356 }
357 #endif
358
359 if (vslock_taken == TRUE) {
360 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
361 if (!error)
362 error = error1;
363 }
364 if (memlock_taken == TRUE)
365 MEMLOCK_UNLOCK();
366
367 if ( (name[0] != CTL_VFS) && (error == ENOTSUP)) {
368 size_t tmp = oldlen;
369 boolean_t funnel_state;
370
371 /*
372 * Drop the funnel when calling new sysctl code, which will conditionally
373 * grab the funnel if it really needs to.
374 */
375 funnel_state = thread_funnel_set(kernel_flock, FALSE);
376
377 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
378 1, uap->new, newlen, &oldlen);
379
380 thread_funnel_set(kernel_flock, funnel_state);
381 }
382
383 if ((error) && (error != ENOMEM))
384 return (error);
385
386 if (uap->oldlenp != USER_ADDR_NULL)
387 error = suulong(uap->oldlenp, oldlen);
388
389 return (error);
390 }
391
392 /*
393 * Attributes stored in the kernel.
394 */
395 __private_extern__ char corefilename[MAXPATHLEN+1];
396 __private_extern__ int do_coredump;
397 __private_extern__ int sugid_coredump;
398
399 #if COUNT_SYSCALLS
400 __private_extern__ int do_count_syscalls;
401 #endif
402
403 #ifdef INSECURE
404 int securelevel = -1;
405 #else
406 int securelevel;
407 #endif
408
409 static int
410 sysctl_affinity(
411 int *name,
412 u_int namelen,
413 user_addr_t oldBuf,
414 size_t *oldSize,
415 user_addr_t newBuf,
416 __unused size_t newSize,
417 proc_t cur_proc)
418 {
419 if (namelen < 1)
420 return (ENOTSUP);
421
422 if (name[0] == 0 && 1 == namelen) {
423 return sysctl_rdint(oldBuf, oldSize, newBuf,
424 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
425 } else if (name[0] == 1 && 2 == namelen) {
426 if (name[1] == 0) {
427 OSBitAndAtomic(~((uint32_t)P_AFFINITY), (UInt32 *)&cur_proc->p_flag);
428 } else {
429 OSBitOrAtomic(P_AFFINITY, (UInt32 *)&cur_proc->p_flag);
430 }
431 return 0;
432 }
433 return (ENOTSUP);
434 }
435
436 static int
437 sysctl_translate(
438 int *name,
439 u_int namelen,
440 user_addr_t oldBuf,
441 size_t *oldSize,
442 user_addr_t newBuf,
443 __unused size_t newSize,
444 proc_t cur_proc)
445 {
446 proc_t p;
447 int istranslated = 0;
448 kauth_cred_t my_cred;
449 uid_t uid;
450
451 if (namelen != 1)
452 return (ENOTSUP);
453
454 p = proc_find(name[0]);
455 if (p == NULL)
456 return (EINVAL);
457
458 my_cred = kauth_cred_proc_ref(p);
459 uid = kauth_cred_getuid(my_cred);
460 kauth_cred_unref(&my_cred);
461 if ((uid != kauth_cred_getuid(kauth_cred_get()))
462 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
463 proc_rele(p);
464 return (EPERM);
465 }
466
467 istranslated = (p->p_flag & P_TRANSLATED);
468 proc_rele(p);
469 return sysctl_rdint(oldBuf, oldSize, newBuf,
470 (istranslated != 0) ? 1 : 0);
471 }
472
473 int
474 set_archhandler(__unused proc_t p, int arch)
475 {
476 int error;
477 struct nameidata nd;
478 struct vnode_attr va;
479 vfs_context_t ctx = vfs_context_current();
480 struct exec_archhandler *archhandler;
481
482 switch(arch) {
483 case CPU_TYPE_POWERPC:
484 archhandler = &exec_archhandler_ppc;
485 break;
486 default:
487 return (EBADARCH);
488 }
489
490 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32,
491 CAST_USER_ADDR_T(archhandler->path), ctx);
492 error = namei(&nd);
493 if (error)
494 return (error);
495 nameidone(&nd);
496
497 /* Check mount point */
498 if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) ||
499 (nd.ni_vp->v_type != VREG)) {
500 vnode_put(nd.ni_vp);
501 return (EACCES);
502 }
503
504 VATTR_INIT(&va);
505 VATTR_WANTED(&va, va_fsid);
506 VATTR_WANTED(&va, va_fileid);
507 error = vnode_getattr(nd.ni_vp, &va, ctx);
508 if (error) {
509 vnode_put(nd.ni_vp);
510 return (error);
511 }
512 vnode_put(nd.ni_vp);
513
514 archhandler->fsid = va.va_fsid;
515 archhandler->fileid = (u_long)va.va_fileid;
516 return 0;
517 }
518
519 /* XXX remove once Rosetta is rev'ed */
520 /*****************************************************************************/
521 static int
522 sysctl_exec_archhandler_ppc(
523 __unused int *name,
524 __unused u_int namelen,
525 user_addr_t oldBuf,
526 size_t *oldSize,
527 user_addr_t newBuf,
528 size_t newSize,
529 proc_t p)
530 {
531 int error;
532 size_t len;
533 char handler[sizeof(exec_archhandler_ppc.path)];
534 vfs_context_t ctx = vfs_context_current();
535
536 if (oldSize) {
537 len = strlen(exec_archhandler_ppc.path) + 1;
538 if (oldBuf) {
539 if (*oldSize < len)
540 return (ENOMEM);
541 error = copyout(exec_archhandler_ppc.path, oldBuf, len);
542 if (error)
543 return (error);
544 }
545 *oldSize = len - 1;
546 }
547 if (newBuf) {
548 error = suser(vfs_context_ucred(ctx), &p->p_acflag);
549 if (error)
550 return (error);
551 if (newSize >= sizeof(exec_archhandler_ppc.path))
552 return (ENAMETOOLONG);
553 error = copyin(newBuf, handler, newSize);
554 if (error)
555 return (error);
556 handler[newSize] = 0;
557 strlcpy(exec_archhandler_ppc.path, handler, MAXPATHLEN);
558 error = set_archhandler(p, CPU_TYPE_POWERPC);
559 if (error)
560 return (error);
561 }
562 return 0;
563 }
564 /*****************************************************************************/
565
566 static int
567 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid *oidp, void *arg1,
568 int arg2, struct sysctl_req *req)
569 {
570 int error = 0;
571
572 error = sysctl_handle_string(oidp, arg1, arg2, req);
573
574 if (error)
575 goto done;
576
577 if (req->newptr)
578 error = set_archhandler(req->p, CPU_TYPE_POWERPC);
579
580 done:
581 return error;
582
583 }
584
585 SYSCTL_NODE(_kern, KERN_EXEC, exec, CTLFLAG_RD|CTLFLAG_LOCKED, 0, "");
586
587 SYSCTL_NODE(_kern_exec, OID_AUTO, archhandler, CTLFLAG_RD|CTLFLAG_LOCKED, 0, "");
588
589 SYSCTL_PROC(_kern_exec_archhandler, OID_AUTO, powerpc,
590 CTLTYPE_STRING | CTLFLAG_RW, exec_archhandler_ppc.path, 0,
591 sysctl_handle_exec_archhandler_ppc, "A", "");
592
593 extern int get_kernel_symfile(proc_t, char **);
594 __private_extern__ int
595 sysctl_dopanicinfo(int *, u_int, user_addr_t, size_t *, user_addr_t,
596 size_t, proc_t);
597
598 /*
599 * kernel related system variables.
600 */
601 int
602 kern_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
603 user_addr_t newp, size_t newlen, proc_t p)
604 {
605 /* all sysctl names not listed below are terminal at this level */
606 if (namelen != 1
607 && !(name[0] == KERN_PROC
608 || name[0] == KERN_PROF
609 || name[0] == KERN_KDEBUG
610 #if !CONFIG_EMBEDDED
611 || name[0] == KERN_PROCARGS
612 #endif
613 || name[0] == KERN_PROCARGS2
614 || name[0] == KERN_IPC
615 || name[0] == KERN_SYSV
616 || name[0] == KERN_AFFINITY
617 || name[0] == KERN_TRANSLATE
618 || name[0] == KERN_EXEC
619 || name[0] == KERN_PANICINFO
620 || name[0] == KERN_POSIX
621 || name[0] == KERN_TFP
622 || name[0] == KERN_TTY
623 #if CONFIG_LCTX
624 || name[0] == KERN_LCTX
625 #endif
626 )
627 )
628 return (ENOTDIR); /* overloaded */
629
630 switch (name[0]) {
631 case KERN_PROC:
632 return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp));
633 #ifdef GPROF
634 case KERN_PROF:
635 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
636 newp, newlen));
637 #endif
638 case KERN_KDEBUG:
639 return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p));
640 #if !CONFIG_EMBEDDED
641 case KERN_PROCARGS:
642 /* new one as it does not use kinfo_proc */
643 return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p));
644 #endif
645 case KERN_PROCARGS2:
646 /* new one as it does not use kinfo_proc */
647 return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p));
648 #if PANIC_INFO
649 case KERN_PANICINFO:
650 return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp,
651 newp, newlen, p));
652 #endif
653 case KERN_AFFINITY:
654 return sysctl_affinity(name+1, namelen-1, oldp, oldlenp,
655 newp, newlen, p);
656 case KERN_TRANSLATE:
657 return sysctl_translate(name+1, namelen-1, oldp, oldlenp, newp,
658 newlen, p);
659
660 /* XXX remove once Rosetta has rev'ed */
661 case KERN_EXEC:
662 return sysctl_exec_archhandler_ppc(name+1, namelen-1, oldp,
663 oldlenp, newp, newlen, p);
664 #if COUNT_SYSCALLS
665 case KERN_COUNT_SYSCALLS:
666 {
667 /* valid values passed in:
668 * = 0 means don't keep called counts for each bsd syscall
669 * > 0 means keep called counts for each bsd syscall
670 * = 2 means dump current counts to the system log
671 * = 3 means reset all counts
672 * for example, to dump current counts:
673 * sysctl -w kern.count_calls=2
674 */
675 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
676 if ( error != 0 ) {
677 return (error);
678 }
679
680 if ( tmp == 1 ) {
681 do_count_syscalls = 1;
682 }
683 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
684 extern int nsysent;
685 extern int syscalls_log[];
686 extern const char * syscallnames[];
687 int i;
688 for ( i = 0; i < nsysent; i++ ) {
689 if ( syscalls_log[i] != 0 ) {
690 if ( tmp == 2 ) {
691 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
692 }
693 else {
694 syscalls_log[i] = 0;
695 }
696 }
697 }
698 if ( tmp != 0 ) {
699 do_count_syscalls = 1;
700 }
701 }
702 return (0);
703 }
704 #endif
705 default:
706 return (ENOTSUP);
707 }
708 /* NOTREACHED */
709 }
710
711 #ifdef DEBUG
712 /*
713 * Debugging related system variables.
714 */
715 #if DIAGNOSTIC
716 extern
717 #endif /* DIAGNOSTIC */
718 struct ctldebug debug0, debug1;
719 struct ctldebug debug2, debug3, debug4;
720 struct ctldebug debug5, debug6, debug7, debug8, debug9;
721 struct ctldebug debug10, debug11, debug12, debug13, debug14;
722 struct ctldebug debug15, debug16, debug17, debug18, debug19;
723 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
724 &debug0, &debug1, &debug2, &debug3, &debug4,
725 &debug5, &debug6, &debug7, &debug8, &debug9,
726 &debug10, &debug11, &debug12, &debug13, &debug14,
727 &debug15, &debug16, &debug17, &debug18, &debug19,
728 };
729 int
730 debug_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
731 user_addr_t newp, size_t newlen, __unused proc_t p)
732 {
733 struct ctldebug *cdp;
734
735 /* all sysctl names at this level are name and field */
736 if (namelen != 2)
737 return (ENOTDIR); /* overloaded */
738 if (name[0] < 0 || name[0] >= CTL_DEBUG_MAXID)
739 return (ENOTSUP);
740 cdp = debugvars[name[0]];
741 if (cdp->debugname == 0)
742 return (ENOTSUP);
743 switch (name[1]) {
744 case CTL_DEBUG_NAME:
745 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
746 case CTL_DEBUG_VALUE:
747 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
748 default:
749 return (ENOTSUP);
750 }
751 /* NOTREACHED */
752 }
753 #endif /* DEBUG */
754
755 /*
756 * The following sysctl_* functions should not be used
757 * any more, as they can only cope with callers in
758 * user mode: Use new-style
759 * sysctl_io_number()
760 * sysctl_io_string()
761 * sysctl_io_opaque()
762 * instead.
763 */
764
765 /*
766 * Validate parameters and get old / set new parameters
767 * for an integer-valued sysctl function.
768 */
769 int
770 sysctl_int(user_addr_t oldp, size_t *oldlenp,
771 user_addr_t newp, size_t newlen, int *valp)
772 {
773 int error = 0;
774
775 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
776 return (EFAULT);
777 if (oldp && *oldlenp < sizeof(int))
778 return (ENOMEM);
779 if (newp && newlen != sizeof(int))
780 return (EINVAL);
781 *oldlenp = sizeof(int);
782 if (oldp)
783 error = copyout(valp, oldp, sizeof(int));
784 if (error == 0 && newp) {
785 error = copyin(newp, valp, sizeof(int));
786 AUDIT_ARG(value, *valp);
787 }
788 return (error);
789 }
790
791 /*
792 * As above, but read-only.
793 */
794 int
795 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
796 {
797 int error = 0;
798
799 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
800 return (EFAULT);
801 if (oldp && *oldlenp < sizeof(int))
802 return (ENOMEM);
803 if (newp)
804 return (EPERM);
805 *oldlenp = sizeof(int);
806 if (oldp)
807 error = copyout((caddr_t)&val, oldp, sizeof(int));
808 return (error);
809 }
810
811 /*
812 * Validate parameters and get old / set new parameters
813 * for an quad(64bit)-valued sysctl function.
814 */
815 int
816 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
817 user_addr_t newp, size_t newlen, quad_t *valp)
818 {
819 int error = 0;
820
821 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
822 return (EFAULT);
823 if (oldp && *oldlenp < sizeof(quad_t))
824 return (ENOMEM);
825 if (newp && newlen != sizeof(quad_t))
826 return (EINVAL);
827 *oldlenp = sizeof(quad_t);
828 if (oldp)
829 error = copyout(valp, oldp, sizeof(quad_t));
830 if (error == 0 && newp)
831 error = copyin(newp, valp, sizeof(quad_t));
832 return (error);
833 }
834
835 /*
836 * As above, but read-only.
837 */
838 int
839 sysctl_rdquad(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, quad_t val)
840 {
841 int error = 0;
842
843 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
844 return (EFAULT);
845 if (oldp && *oldlenp < sizeof(quad_t))
846 return (ENOMEM);
847 if (newp)
848 return (EPERM);
849 *oldlenp = sizeof(quad_t);
850 if (oldp)
851 error = copyout((caddr_t)&val, oldp, sizeof(quad_t));
852 return (error);
853 }
854
855 /*
856 * Validate parameters and get old / set new parameters
857 * for a string-valued sysctl function. Unlike sysctl_string, if you
858 * give it a too small (but larger than 0 bytes) buffer, instead of
859 * returning ENOMEM, it truncates the returned string to the buffer
860 * size. This preserves the semantics of some library routines
861 * implemented via sysctl, which truncate their returned data, rather
862 * than simply returning an error. The returned string is always NUL
863 * terminated.
864 */
865 int
866 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
867 user_addr_t newp, size_t newlen, char *str, int maxlen)
868 {
869 int len, copylen, error = 0;
870
871 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
872 return (EFAULT);
873 copylen = len = strlen(str) + 1;
874 if (oldp && (len < 0 || *oldlenp < 1))
875 return (ENOMEM);
876 if (oldp && (*oldlenp < (size_t)len))
877 copylen = *oldlenp + 1;
878 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
879 return (EINVAL);
880 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
881 if (oldp) {
882 error = copyout(str, oldp, copylen);
883 if (!error) {
884 unsigned char c = 0;
885 /* NUL terminate */
886 oldp += *oldlenp;
887 error = copyout((void *)&c, oldp, sizeof(char));
888 }
889 }
890 if (error == 0 && newp) {
891 error = copyin(newp, str, newlen);
892 str[newlen] = 0;
893 AUDIT_ARG(text, (char *)str);
894 }
895 return (error);
896 }
897
898 /*
899 * Validate parameters and get old / set new parameters
900 * for a string-valued sysctl function.
901 */
902 int
903 sysctl_string(user_addr_t oldp, size_t *oldlenp,
904 user_addr_t newp, size_t newlen, char *str, int maxlen)
905 {
906 int len, error = 0;
907
908 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
909 return (EFAULT);
910 len = strlen(str) + 1;
911 if (oldp && (len < 0 || *oldlenp < (size_t)len))
912 return (ENOMEM);
913 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
914 return (EINVAL);
915 *oldlenp = len -1; /* deal with NULL strings correctly */
916 if (oldp) {
917 error = copyout(str, oldp, len);
918 }
919 if (error == 0 && newp) {
920 error = copyin(newp, str, newlen);
921 str[newlen] = 0;
922 AUDIT_ARG(text, (char *)str);
923 }
924 return (error);
925 }
926
927 /*
928 * As above, but read-only.
929 */
930 int
931 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
932 user_addr_t newp, char *str)
933 {
934 int len, error = 0;
935
936 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
937 return (EFAULT);
938 len = strlen(str) + 1;
939 if (oldp && *oldlenp < (size_t)len)
940 return (ENOMEM);
941 if (newp)
942 return (EPERM);
943 *oldlenp = len;
944 if (oldp)
945 error = copyout(str, oldp, len);
946 return (error);
947 }
948
949 /*
950 * Validate parameters and get old / set new parameters
951 * for a structure oriented sysctl function.
952 */
953 int
954 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
955 user_addr_t newp, size_t newlen, void *sp, int len)
956 {
957 int error = 0;
958
959 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
960 return (EFAULT);
961 if (oldp && (len < 0 || *oldlenp < (size_t)len))
962 return (ENOMEM);
963 if (newp && (len < 0 || newlen > (size_t)len))
964 return (EINVAL);
965 if (oldp) {
966 *oldlenp = len;
967 error = copyout(sp, oldp, len);
968 }
969 if (error == 0 && newp)
970 error = copyin(newp, sp, len);
971 return (error);
972 }
973
974 /*
975 * Validate parameters and get old parameters
976 * for a structure oriented sysctl function.
977 */
978 int
979 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
980 user_addr_t newp, void *sp, int len)
981 {
982 int error = 0;
983
984 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
985 return (EFAULT);
986 if (oldp && (len < 0 || *oldlenp < (size_t)len))
987 return (ENOMEM);
988 if (newp)
989 return (EPERM);
990 *oldlenp = len;
991 if (oldp)
992 error = copyout(sp, oldp, len);
993 return (error);
994 }
995
996 /*
997 * Get file structures.
998 */
999 static int
1000 sysctl_file
1001 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1002 {
1003 int error;
1004 struct fileglob *fg;
1005 struct extern_file nef;
1006
1007 if (req->oldptr == USER_ADDR_NULL) {
1008 /*
1009 * overestimate by 10 files
1010 */
1011 req->oldidx = sizeof(filehead) + (nfiles + 10) * sizeof(struct extern_file);
1012 return (0);
1013 }
1014
1015 /*
1016 * first copyout filehead
1017 */
1018 error = SYSCTL_OUT(req, &filehead, sizeof(filehead));
1019 if (error)
1020 return (error);
1021
1022 /*
1023 * followed by an array of file structures
1024 */
1025 for (fg = filehead.lh_first; fg != 0; fg = fg->f_list.le_next) {
1026 nef.f_list.le_next = (struct extern_file *)fg->f_list.le_next;
1027 nef.f_list.le_prev = (struct extern_file **)fg->f_list.le_prev;
1028 nef.f_flag = (fg->fg_flag & FMASK);
1029 nef.f_type = fg->fg_type;
1030 nef.f_count = fg->fg_count;
1031 nef.f_msgcount = fg->fg_msgcount;
1032 nef.f_cred = fg->fg_cred;
1033 nef.f_ops = fg->fg_ops;
1034 nef.f_offset = fg->fg_offset;
1035 nef.f_data = fg->fg_data;
1036 error = SYSCTL_OUT(req, &nef, sizeof(nef));
1037 if (error)
1038 return (error);
1039 }
1040 return (0);
1041 }
1042
1043 SYSCTL_PROC(_kern, KERN_FILE, file,
1044 CTLTYPE_STRUCT | CTLFLAG_RW,
1045 0, 0, sysctl_file, "S,filehead", "");
1046
1047 static int
1048 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
1049 {
1050 if (p->p_pid != (pid_t)arg)
1051 return(0);
1052 else
1053 return(1);
1054 }
1055
1056 static int
1057 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
1058 {
1059 if (p->p_pgrpid != (pid_t)arg)
1060 return(0);
1061 else
1062 return(1);
1063 }
1064
1065 static int
1066 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
1067 {
1068 boolean_t funnel_state;
1069 int retval;
1070
1071
1072 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1073 /* This is very racy but list lock is held.. Hmmm. */
1074 if ((p->p_flag & P_CONTROLT) == 0 ||
1075 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
1076 p->p_pgrp->pg_session->s_ttyp == NULL ||
1077 p->p_pgrp->pg_session->s_ttyp->t_dev != (dev_t)arg)
1078 retval = 0;
1079 else
1080 retval = 1;
1081
1082 thread_funnel_set(kernel_flock, funnel_state);
1083
1084 return(retval);
1085 }
1086
1087 static int
1088 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
1089 {
1090 kauth_cred_t my_cred;
1091 uid_t uid;
1092
1093 if (p->p_ucred == NULL)
1094 return(0);
1095 my_cred = kauth_cred_proc_ref(p);
1096 uid = kauth_cred_getuid(my_cred);
1097 kauth_cred_unref(&my_cred);
1098
1099 if (uid != (uid_t)arg)
1100 return(0);
1101 else
1102 return(1);
1103 }
1104
1105
1106 static int
1107 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
1108 {
1109 kauth_cred_t my_cred;
1110 uid_t ruid;
1111
1112 if (p->p_ucred == NULL)
1113 return(0);
1114 my_cred = kauth_cred_proc_ref(p);
1115 ruid = my_cred->cr_ruid;
1116 kauth_cred_unref(&my_cred);
1117
1118 if (ruid != (uid_t)arg)
1119 return(0);
1120 else
1121 return(1);
1122 }
1123
1124 static int
1125 sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
1126 {
1127 if ((p->p_lctx == NULL) ||
1128 (p->p_lctx->lc_id != (pid_t)arg))
1129 return(0);
1130 else
1131 return(1);
1132 }
1133
1134 /*
1135 * try over estimating by 5 procs
1136 */
1137 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1138 struct sysdoproc_args {
1139 int buflen;
1140 caddr_t kprocp;
1141 boolean_t is_64_bit;
1142 user_addr_t dp;
1143 size_t needed;
1144 int sizeof_kproc;
1145 int * errorp;
1146 int uidcheck;
1147 int ruidcheck;
1148 int ttycheck;
1149 int uidval;
1150 };
1151
1152 int
1153 sysdoproc_callback(proc_t p, void * arg)
1154 {
1155 struct sysdoproc_args * args = (struct sysdoproc_args *)arg;
1156 int error=0;
1157
1158 if (args->buflen >= args->sizeof_kproc) {
1159 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, (void *)args->uidval) == 0))
1160 return(PROC_RETURNED);
1161 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, (void *)args->uidval) == 0))
1162 return(PROC_RETURNED);
1163 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, (void *)args->uidval) == 0))
1164 return(PROC_RETURNED);
1165
1166 bzero(args->kprocp, args->sizeof_kproc);
1167 if (args->is_64_bit) {
1168 fill_user_proc(p, (struct user_kinfo_proc *) args->kprocp);
1169 }
1170 else {
1171 fill_proc(p, (struct kinfo_proc *) args->kprocp);
1172 }
1173 error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
1174 if (error) {
1175 *args->errorp = error;
1176 return(PROC_RETURNED_DONE);
1177 return (error);
1178 }
1179 args->dp += args->sizeof_kproc;
1180 args->buflen -= args->sizeof_kproc;
1181 }
1182 args->needed += args->sizeof_kproc;
1183 return(PROC_RETURNED);
1184 }
1185
1186 int
1187 sysctl_doproc(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1188 {
1189 user_addr_t dp = where;
1190 size_t needed = 0;
1191 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1192 int error = 0;
1193 boolean_t is_64_bit = FALSE;
1194 struct kinfo_proc kproc;
1195 struct user_kinfo_proc user_kproc;
1196 int sizeof_kproc;
1197 caddr_t kprocp;
1198 int (*filterfn)(proc_t, void *) = 0;
1199 struct sysdoproc_args args;
1200 int uidcheck = 0;
1201 int ruidcheck = 0;
1202 int ttycheck = 0;
1203
1204 if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
1205 return (EINVAL);
1206 is_64_bit = proc_is64bit(current_proc());
1207 if (is_64_bit) {
1208 sizeof_kproc = sizeof(user_kproc);
1209 kprocp = (caddr_t) &user_kproc;
1210 }
1211 else {
1212 sizeof_kproc = sizeof(kproc);
1213 kprocp = (caddr_t) &kproc;
1214 }
1215
1216
1217 switch (name[0]) {
1218
1219 case KERN_PROC_PID:
1220 filterfn = sysdoproc_filt_KERN_PROC_PID;
1221 break;
1222
1223 case KERN_PROC_PGRP:
1224 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
1225 break;
1226
1227 case KERN_PROC_TTY:
1228 ttycheck = 1;
1229 break;
1230
1231 case KERN_PROC_UID:
1232 uidcheck = 1;
1233 break;
1234
1235 case KERN_PROC_RUID:
1236 ruidcheck = 1;
1237 break;
1238
1239 #if CONFIG_LCTX
1240 case KERN_PROC_LCID:
1241 filterfn = sysdoproc_filt_KERN_PROC_LCID;
1242 break;
1243 #endif
1244 }
1245
1246 error = 0;
1247 args.buflen = buflen;
1248 args.kprocp = kprocp;
1249 args.is_64_bit = is_64_bit;
1250 args.dp = dp;
1251 args.needed = needed;
1252 args.errorp = &error;
1253 args.uidcheck = uidcheck;
1254 args.ruidcheck = ruidcheck;
1255 args.ttycheck = ttycheck;
1256 args.sizeof_kproc = sizeof_kproc;
1257 args.uidval = name[1];
1258
1259 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), sysdoproc_callback, &args, filterfn, (void *)name[1]);
1260
1261 if (error)
1262 return(error);
1263
1264 dp = args.dp;
1265 needed = args.needed;
1266
1267 if (where != USER_ADDR_NULL) {
1268 *sizep = dp - where;
1269 if (needed > *sizep)
1270 return (ENOMEM);
1271 } else {
1272 needed += KERN_PROCSLOP;
1273 *sizep = needed;
1274 }
1275 return (0);
1276 }
1277
1278 /*
1279 * Fill in an eproc structure for the specified process.
1280 */
1281 static void
1282 fill_eproc(proc_t p, struct eproc *ep)
1283 {
1284 struct tty *tp;
1285 kauth_cred_t my_cred;
1286 struct pgrp * pg;
1287 struct session * sessp;
1288
1289 pg = proc_pgrp(p);
1290 sessp = proc_session(p);
1291
1292 ep->e_paddr = p;
1293
1294 if (pg != PGRP_NULL) {
1295 ep->e_sess = sessp;
1296 ep->e_pgid = p->p_pgrpid;
1297 ep->e_jobc = pg->pg_jobc;
1298 if ((sessp != SESSION_NULL) && sessp->s_ttyvp)
1299 ep->e_flag = EPROC_CTTY;
1300 } else {
1301 ep->e_sess = (struct session *)0;
1302 ep->e_pgid = 0;
1303 ep->e_jobc = 0;
1304 }
1305 #if CONFIG_LCTX
1306 if (p->p_lctx) {
1307 ep->e_lcid = p->p_lctx->lc_id;
1308 } else {
1309 ep->e_lcid = 0;
1310 }
1311 #endif
1312 ep->e_ppid = p->p_ppid;
1313 /* Pre-zero the fake historical pcred */
1314 bzero(&ep->e_pcred, sizeof(struct _pcred));
1315 if (p->p_ucred) {
1316 my_cred = kauth_cred_proc_ref(p);
1317
1318 /* A fake historical pcred */
1319 ep->e_pcred.p_ruid = my_cred->cr_ruid;
1320 ep->e_pcred.p_svuid = my_cred->cr_svuid;
1321 ep->e_pcred.p_rgid = my_cred->cr_rgid;
1322 ep->e_pcred.p_svgid = my_cred->cr_svgid;
1323 /* A fake historical *kauth_cred_t */
1324 ep->e_ucred.cr_ref = my_cred->cr_ref;
1325 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1326 ep->e_ucred.cr_ngroups = my_cred->cr_ngroups;
1327 bcopy(my_cred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1328
1329 kauth_cred_unref(&my_cred);
1330 }
1331 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1332 ep->e_vm.vm_tsize = 0;
1333 ep->e_vm.vm_dsize = 0;
1334 ep->e_vm.vm_ssize = 0;
1335 }
1336 ep->e_vm.vm_rssize = 0;
1337
1338 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1339 (tp = sessp->s_ttyp)) {
1340 ep->e_tdev = tp->t_dev;
1341 ep->e_tpgid = sessp->s_ttypgrpid;
1342 ep->e_tsess = tp->t_session;
1343 } else
1344 ep->e_tdev = NODEV;
1345
1346 if (SESS_LEADER(p, sessp))
1347 ep->e_flag |= EPROC_SLEADER;
1348 bzero(&ep->e_wmesg[0], WMESGLEN+1);
1349 ep->e_xsize = ep->e_xrssize = 0;
1350 ep->e_xccount = ep->e_xswrss = 0;
1351 if (sessp != SESSION_NULL)
1352 session_rele(sessp);
1353 if(pg != PGRP_NULL)
1354 pg_rele(pg);
1355 }
1356
1357 /*
1358 * Fill in an LP64 version of eproc structure for the specified process.
1359 */
1360 static void
1361 fill_user_eproc(proc_t p, struct user_eproc *ep)
1362 {
1363 struct tty *tp;
1364 struct session *sessp = NULL;
1365 struct pgrp * pg;
1366 kauth_cred_t my_cred;
1367
1368 pg = proc_pgrp(p);
1369 sessp = proc_session(p);
1370
1371 ep->e_paddr = CAST_USER_ADDR_T(p);
1372 if (pg != PGRP_NULL) {
1373 ep->e_sess = CAST_USER_ADDR_T(sessp);
1374 ep->e_pgid = p->p_pgrpid;
1375 ep->e_jobc = pg->pg_jobc;
1376 if (sessp != SESSION_NULL) {
1377 if (sessp->s_ttyvp)
1378 ep->e_flag = EPROC_CTTY;
1379 }
1380 } else {
1381 ep->e_sess = USER_ADDR_NULL;
1382 ep->e_pgid = 0;
1383 ep->e_jobc = 0;
1384 }
1385 #if CONFIG_LCTX
1386 if (p->p_lctx) {
1387 ep->e_lcid = p->p_lctx->lc_id;
1388 } else {
1389 ep->e_lcid = 0;
1390 }
1391 #endif
1392 ep->e_ppid = p->p_ppid;
1393 /* Pre-zero the fake historical pcred */
1394 bzero(&ep->e_pcred, sizeof(ep->e_pcred));
1395 if (p->p_ucred) {
1396 my_cred = kauth_cred_proc_ref(p);
1397
1398 /* A fake historical pcred */
1399 ep->e_pcred.p_ruid = my_cred->cr_ruid;
1400 ep->e_pcred.p_svuid = my_cred->cr_svuid;
1401 ep->e_pcred.p_rgid = my_cred->cr_rgid;
1402 ep->e_pcred.p_svgid = my_cred->cr_svgid;
1403
1404 /* A fake historical *kauth_cred_t */
1405 ep->e_ucred.cr_ref = my_cred->cr_ref;
1406 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1407 ep->e_ucred.cr_ngroups = my_cred->cr_ngroups;
1408 bcopy(my_cred->cr_groups, ep->e_ucred.cr_groups, NGROUPS*sizeof(gid_t));
1409
1410 kauth_cred_unref(&my_cred);
1411 }
1412 if (p->p_stat == SIDL || p->p_stat == SZOMB) {
1413 ep->e_vm.vm_tsize = 0;
1414 ep->e_vm.vm_dsize = 0;
1415 ep->e_vm.vm_ssize = 0;
1416 }
1417 ep->e_vm.vm_rssize = 0;
1418
1419 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1420 (tp = sessp->s_ttyp)) {
1421 ep->e_tdev = tp->t_dev;
1422 ep->e_tpgid = sessp->s_ttypgrpid;
1423 ep->e_tsess = CAST_USER_ADDR_T(tp->t_session);
1424 } else
1425 ep->e_tdev = NODEV;
1426
1427 if (SESS_LEADER(p, sessp))
1428 ep->e_flag |= EPROC_SLEADER;
1429 bzero(&ep->e_wmesg[0], WMESGLEN+1);
1430 ep->e_xsize = ep->e_xrssize = 0;
1431 ep->e_xccount = ep->e_xswrss = 0;
1432 if (sessp != SESSION_NULL)
1433 session_rele(sessp);
1434 if (pg != PGRP_NULL)
1435 pg_rele(pg);
1436 }
1437
1438 /*
1439 * Fill in an eproc structure for the specified process.
1440 */
1441 static void
1442 fill_externproc(proc_t p, struct extern_proc *exp)
1443 {
1444 exp->p_forw = exp->p_back = NULL;
1445 exp->p_starttime = p->p_start;
1446 exp->p_vmspace = NULL;
1447 exp->p_sigacts = p->p_sigacts;
1448 exp->p_flag = p->p_flag;
1449 if (p->p_lflag & P_LTRACED)
1450 exp->p_flag |= P_TRACED;
1451 if (p->p_lflag & P_LPPWAIT)
1452 exp->p_flag |= P_PPWAIT;
1453 if (p->p_lflag & P_LEXIT)
1454 exp->p_flag |= P_WEXIT;
1455 exp->p_stat = p->p_stat ;
1456 exp->p_pid = p->p_pid ;
1457 exp->p_oppid = p->p_oppid ;
1458 /* Mach related */
1459 exp->user_stack = CAST_DOWN(caddr_t, p->user_stack);
1460 exp->exit_thread = p->exit_thread ;
1461 exp->p_debugger = p->p_debugger ;
1462 exp->sigwait = p->sigwait ;
1463 /* scheduling */
1464 #ifdef _PROC_HAS_SCHEDINFO_
1465 exp->p_estcpu = p->p_estcpu ;
1466 exp->p_pctcpu = p->p_pctcpu ;
1467 exp->p_slptime = p->p_slptime ;
1468 #else
1469 exp->p_estcpu = 0 ;
1470 exp->p_pctcpu = 0 ;
1471 exp->p_slptime = 0 ;
1472 #endif
1473 exp->p_cpticks = 0 ;
1474 exp->p_wchan = 0 ;
1475 exp->p_wmesg = 0 ;
1476 exp->p_swtime = 0 ;
1477 bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval));
1478 bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval));
1479 exp->p_uticks = 0 ;
1480 exp->p_sticks = 0 ;
1481 exp->p_iticks = 0 ;
1482 exp->p_traceflag = 0;
1483 exp->p_tracep = 0 ;
1484 exp->p_siglist = 0 ; /* No longer relevant */
1485 exp->p_textvp = p->p_textvp ;
1486 exp->p_holdcnt = 0 ;
1487 exp->p_sigmask = 0 ; /* no longer avaialable */
1488 exp->p_sigignore = p->p_sigignore ;
1489 exp->p_sigcatch = p->p_sigcatch ;
1490 exp->p_priority = p->p_priority ;
1491 exp->p_usrpri = 0 ;
1492 exp->p_nice = p->p_nice ;
1493 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1494 exp->p_comm[MAXCOMLEN] = '\0';
1495 exp->p_pgrp = p->p_pgrp ;
1496 exp->p_addr = NULL;
1497 exp->p_xstat = p->p_xstat ;
1498 exp->p_acflag = p->p_acflag ;
1499 exp->p_ru = p->p_ru ; /* XXX may be NULL */
1500 }
1501
1502 /*
1503 * Fill in an LP64 version of extern_proc structure for the specified process.
1504 */
1505 static void
1506 fill_user_externproc(proc_t p, struct user_extern_proc *exp)
1507 {
1508 exp->p_forw = exp->p_back = USER_ADDR_NULL;
1509 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1510 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1511 exp->p_vmspace = USER_ADDR_NULL;
1512 exp->p_sigacts = CAST_USER_ADDR_T(p->p_sigacts);
1513 exp->p_flag = p->p_flag;
1514 if (p->p_lflag & P_LTRACED)
1515 exp->p_flag |= P_TRACED;
1516 if (p->p_lflag & P_LPPWAIT)
1517 exp->p_flag |= P_PPWAIT;
1518 if (p->p_lflag & P_LEXIT)
1519 exp->p_flag |= P_WEXIT;
1520 exp->p_stat = p->p_stat ;
1521 exp->p_pid = p->p_pid ;
1522 exp->p_oppid = p->p_oppid ;
1523 /* Mach related */
1524 exp->user_stack = p->user_stack;
1525 exp->exit_thread = CAST_USER_ADDR_T(p->exit_thread);
1526 exp->p_debugger = p->p_debugger ;
1527 exp->sigwait = p->sigwait ;
1528 /* scheduling */
1529 #ifdef _PROC_HAS_SCHEDINFO_
1530 exp->p_estcpu = p->p_estcpu ;
1531 exp->p_pctcpu = p->p_pctcpu ;
1532 exp->p_slptime = p->p_slptime ;
1533 #else
1534 exp->p_estcpu = 0 ;
1535 exp->p_pctcpu = 0 ;
1536 exp->p_slptime = 0 ;
1537 #endif
1538 exp->p_cpticks = 0 ;
1539 exp->p_wchan = 0;
1540 exp->p_wmesg = 0;
1541 exp->p_swtime = 0 ;
1542 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1543 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1544 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1545 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1546 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1547 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1548 exp->p_uticks = 0 ;
1549 exp->p_sticks = 0 ;
1550 exp->p_iticks = 0 ;
1551 exp->p_traceflag = 0 ;
1552 exp->p_tracep = 0;
1553 exp->p_siglist = 0 ; /* No longer relevant */
1554 exp->p_textvp = CAST_USER_ADDR_T(p->p_textvp);
1555 exp->p_holdcnt = 0 ;
1556 exp->p_sigmask = 0 ; /* no longer avaialable */
1557 exp->p_sigignore = p->p_sigignore ;
1558 exp->p_sigcatch = p->p_sigcatch ;
1559 exp->p_priority = p->p_priority ;
1560 exp->p_usrpri = 0 ;
1561 exp->p_nice = p->p_nice ;
1562 bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN);
1563 exp->p_comm[MAXCOMLEN] = '\0';
1564 exp->p_pgrp = CAST_USER_ADDR_T(p->p_pgrp);
1565 exp->p_addr = USER_ADDR_NULL;
1566 exp->p_xstat = p->p_xstat ;
1567 exp->p_acflag = p->p_acflag ;
1568 exp->p_ru = CAST_USER_ADDR_T(p->p_ru); /* XXX may be NULL */
1569 }
1570
1571 static void
1572 fill_proc(proc_t p, struct kinfo_proc *kp)
1573 {
1574 fill_externproc(p, &kp->kp_proc);
1575 fill_eproc(p, &kp->kp_eproc);
1576 }
1577
1578 static void
1579 fill_user_proc(proc_t p, struct user_kinfo_proc *kp)
1580 {
1581 fill_user_externproc(p, &kp->kp_proc);
1582 fill_user_eproc(p, &kp->kp_eproc);
1583 }
1584
1585 int
1586 kdebug_ops(int *name, u_int namelen, user_addr_t where,
1587 size_t *sizep, proc_t p)
1588 {
1589 int ret=0;
1590
1591 ret = suser(kauth_cred_get(), &p->p_acflag);
1592 if (ret)
1593 return(ret);
1594
1595 switch(name[0]) {
1596 case KERN_KDEFLAGS:
1597 case KERN_KDDFLAGS:
1598 case KERN_KDENABLE:
1599 case KERN_KDGETBUF:
1600 case KERN_KDSETUP:
1601 case KERN_KDREMOVE:
1602 case KERN_KDSETREG:
1603 case KERN_KDGETREG:
1604 case KERN_KDREADTR:
1605 case KERN_KDPIDTR:
1606 case KERN_KDTHRMAP:
1607 case KERN_KDPIDEX:
1608 case KERN_KDSETRTCDEC:
1609 case KERN_KDSETBUF:
1610 case KERN_KDGETENTROPY:
1611 ret = kdbg_control(name, namelen, where, sizep);
1612 break;
1613 default:
1614 ret= ENOTSUP;
1615 break;
1616 }
1617 return(ret);
1618 }
1619
1620
1621 /*
1622 * Return the top *sizep bytes of the user stack, or the entire area of the
1623 * user stack down through the saved exec_path, whichever is smaller.
1624 */
1625 int
1626 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
1627 size_t *sizep, proc_t cur_proc)
1628 {
1629 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0);
1630 }
1631
1632 static int
1633 sysctl_procargs2(int *name, u_int namelen, user_addr_t where,
1634 size_t *sizep, proc_t cur_proc)
1635 {
1636 return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1);
1637 }
1638
1639 static int
1640 sysctl_procargsx(int *name, __unused u_int namelen, user_addr_t where,
1641 size_t *sizep, proc_t cur_proc, int argc_yes)
1642 {
1643 proc_t p;
1644 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1645 int error = 0;
1646 struct _vm_map *proc_map;
1647 struct task * task;
1648 vm_map_copy_t tmp;
1649 user_addr_t arg_addr;
1650 size_t arg_size;
1651 caddr_t data;
1652 size_t argslen=0;
1653 int size;
1654 vm_offset_t copy_start, copy_end;
1655 kern_return_t ret;
1656 int pid;
1657 kauth_cred_t my_cred;
1658 uid_t uid;
1659
1660 if (argc_yes)
1661 buflen -= sizeof(int); /* reserve first word to return argc */
1662
1663 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1664 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1665 /* is not NULL then the caller wants us to return the length needed to */
1666 /* hold the data we would return */
1667 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1668 return(EINVAL);
1669 }
1670 arg_size = buflen;
1671
1672 /*
1673 * Lookup process by pid
1674 */
1675 pid = name[0];
1676 p = proc_find(pid);
1677 if (p == NULL) {
1678 return(EINVAL);
1679 }
1680
1681 /*
1682 * Copy the top N bytes of the stack.
1683 * On all machines we have so far, the stack grows
1684 * downwards.
1685 *
1686 * If the user expects no more than N bytes of
1687 * argument list, use that as a guess for the
1688 * size.
1689 */
1690
1691 if (!p->user_stack) {
1692 proc_rele(p);
1693 return(EINVAL);
1694 }
1695
1696 if (where == USER_ADDR_NULL) {
1697 /* caller only wants to know length of proc args data */
1698 if (sizep == NULL) {
1699 proc_rele(p);
1700 return(EFAULT);
1701 }
1702
1703 size = p->p_argslen;
1704 proc_rele(p);
1705 if (argc_yes) {
1706 size += sizeof(int);
1707 }
1708 else {
1709 /*
1710 * old PROCARGS will return the executable's path and plus some
1711 * extra space for work alignment and data tags
1712 */
1713 size += PATH_MAX + (6 * sizeof(int));
1714 }
1715 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1716 *sizep = size;
1717 return (0);
1718 }
1719
1720 my_cred = kauth_cred_proc_ref(p);
1721 uid = kauth_cred_getuid(my_cred);
1722 kauth_cred_unref(&my_cred);
1723
1724 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1725 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1726 proc_rele(p);
1727 return (EINVAL);
1728 }
1729
1730 if ((u_int)arg_size > p->p_argslen)
1731 arg_size = round_page(p->p_argslen);
1732
1733 arg_addr = p->user_stack - arg_size;
1734
1735
1736 /*
1737 * Before we can block (any VM code), make another
1738 * reference to the map to keep it alive. We do
1739 * that by getting a reference on the task itself.
1740 */
1741 task = p->task;
1742 if (task == NULL) {
1743 proc_rele(p);
1744 return(EINVAL);
1745 }
1746
1747 argslen = p->p_argslen;
1748 /*
1749 * Once we have a task reference we can convert that into a
1750 * map reference, which we will use in the calls below. The
1751 * task/process may change its map after we take this reference
1752 * (see execve), but the worst that will happen then is a return
1753 * of stale info (which is always a possibility).
1754 */
1755 task_reference(task);
1756 proc_rele(p);
1757 proc_map = get_task_map_reference(task);
1758 task_deallocate(task);
1759
1760 if (proc_map == NULL)
1761 return(EINVAL);
1762
1763
1764 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1765 if (ret != KERN_SUCCESS) {
1766 vm_map_deallocate(proc_map);
1767 return(ENOMEM);
1768 }
1769
1770 copy_end = round_page(copy_start + arg_size);
1771
1772 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1773 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1774 vm_map_deallocate(proc_map);
1775 kmem_free(kernel_map, copy_start,
1776 round_page(arg_size));
1777 return (EIO);
1778 }
1779
1780 /*
1781 * Now that we've done the copyin from the process'
1782 * map, we can release the reference to it.
1783 */
1784 vm_map_deallocate(proc_map);
1785
1786 if( vm_map_copy_overwrite(kernel_map,
1787 (vm_map_address_t)copy_start,
1788 tmp, FALSE) != KERN_SUCCESS) {
1789 kmem_free(kernel_map, copy_start,
1790 round_page(arg_size));
1791 return (EIO);
1792 }
1793
1794 if (arg_size > argslen) {
1795 data = (caddr_t) (copy_end - argslen);
1796 size = argslen;
1797 } else {
1798 data = (caddr_t) (copy_end - arg_size);
1799 size = arg_size;
1800 }
1801
1802 if (argc_yes) {
1803 /* Put processes argc as the first word in the copyout buffer */
1804 suword(where, p->p_argc);
1805 error = copyout(data, (where + sizeof(int)), size);
1806 size += sizeof(int);
1807 } else {
1808 error = copyout(data, where, size);
1809
1810 /*
1811 * Make the old PROCARGS work to return the executable's path
1812 * But, only if there is enough space in the provided buffer
1813 *
1814 * on entry: data [possibily] points to the beginning of the path
1815 *
1816 * Note: we keep all pointers&sizes aligned to word boundries
1817 */
1818 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1819 {
1820 int binPath_sz, alignedBinPath_sz = 0;
1821 int extraSpaceNeeded, addThis;
1822 user_addr_t placeHere;
1823 char * str = (char *) data;
1824 int max_len = size;
1825
1826 /* Some apps are really bad about messing up their stacks
1827 So, we have to be extra careful about getting the length
1828 of the executing binary. If we encounter an error, we bail.
1829 */
1830
1831 /* Limit ourselves to PATH_MAX paths */
1832 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1833
1834 binPath_sz = 0;
1835
1836 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1837 binPath_sz++;
1838
1839 /* If we have a NUL terminator, copy it, too */
1840 if (binPath_sz < max_len-1) binPath_sz += 1;
1841
1842 /* Pre-Flight the space requiremnts */
1843
1844 /* Account for the padding that fills out binPath to the next word */
1845 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1846
1847 placeHere = where + size;
1848
1849 /* Account for the bytes needed to keep placeHere word aligned */
1850 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1851
1852 /* Add up all the space that is needed */
1853 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1854
1855 /* is there is room to tack on argv[0]? */
1856 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1857 {
1858 placeHere += addThis;
1859 suword(placeHere, 0);
1860 placeHere += sizeof(int);
1861 suword(placeHere, 0xBFFF0000);
1862 placeHere += sizeof(int);
1863 suword(placeHere, 0);
1864 placeHere += sizeof(int);
1865 error = copyout(data, placeHere, binPath_sz);
1866 if ( ! error )
1867 {
1868 placeHere += binPath_sz;
1869 suword(placeHere, 0);
1870 size += extraSpaceNeeded;
1871 }
1872 }
1873 }
1874 }
1875
1876 if (copy_start != (vm_offset_t) 0) {
1877 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1878 }
1879 if (error) {
1880 return(error);
1881 }
1882
1883 if (where != USER_ADDR_NULL)
1884 *sizep = size;
1885 return (0);
1886 }
1887
1888
1889 /*
1890 * Max number of concurrent aio requests
1891 */
1892 static int
1893 sysctl_aiomax
1894 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1895 {
1896 int new_value, changed;
1897 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1898 if (changed) {
1899 /* make sure the system-wide limit is greater than the per process limit */
1900 if (new_value >= aio_max_requests_per_process)
1901 aio_max_requests = new_value;
1902 else
1903 error = EINVAL;
1904 }
1905 return(error);
1906 }
1907
1908
1909 /*
1910 * Max number of concurrent aio requests per process
1911 */
1912 static int
1913 sysctl_aioprocmax
1914 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1915 {
1916 int new_value, changed;
1917 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1918 if (changed) {
1919 /* make sure per process limit is less than the system-wide limit */
1920 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1921 aio_max_requests_per_process = new_value;
1922 else
1923 error = EINVAL;
1924 }
1925 return(error);
1926 }
1927
1928
1929 /*
1930 * Max number of async IO worker threads
1931 */
1932 static int
1933 sysctl_aiothreads
1934 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1935 {
1936 int new_value, changed;
1937 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1938 if (changed) {
1939 /* we only allow an increase in the number of worker threads */
1940 if (new_value > aio_worker_threads ) {
1941 _aio_create_worker_threads((new_value - aio_worker_threads));
1942 aio_worker_threads = new_value;
1943 }
1944 else
1945 error = EINVAL;
1946 }
1947 return(error);
1948 }
1949
1950
1951 /*
1952 * System-wide limit on the max number of processes
1953 */
1954 static int
1955 sysctl_maxproc
1956 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1957 {
1958 int new_value, changed;
1959 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1960 if (changed) {
1961 AUDIT_ARG(value, new_value);
1962 /* make sure the system-wide limit is less than the configured hard
1963 limit set at kernel compilation */
1964 if (new_value <= hard_maxproc && new_value > 0)
1965 maxproc = new_value;
1966 else
1967 error = EINVAL;
1968 }
1969 return(error);
1970 }
1971
1972 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1973 CTLFLAG_RD | CTLFLAG_KERN,
1974 ostype, 0, "");
1975 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1976 CTLFLAG_RD | CTLFLAG_KERN,
1977 osrelease, 0, "");
1978 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1979 CTLFLAG_RD | CTLFLAG_KERN,
1980 NULL, BSD, "");
1981 SYSCTL_STRING(_kern, KERN_VERSION, version,
1982 CTLFLAG_RD | CTLFLAG_KERN,
1983 version, 0, "");
1984
1985 /* PR-5293665: need to use a callback function for kern.osversion to set
1986 * osversion in IORegistry */
1987
1988 static int
1989 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1990 {
1991 int rval = 0;
1992
1993 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1994
1995 if (req->newptr) {
1996 IORegistrySetOSBuildVersion((char *)arg1);
1997 }
1998
1999 return rval;
2000 }
2001
2002 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
2003 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING,
2004 osversion, 256 /* OSVERSIZE*/,
2005 sysctl_osversion, "A", "");
2006
2007 static int
2008 sysctl_sysctl_bootargs
2009 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2010 {
2011 int error;
2012 char buf[256];
2013
2014 strlcpy(buf, PE_boot_args(), 256);
2015 error = sysctl_io_string(req, buf, 256, 0, NULL);
2016 return(error);
2017 }
2018
2019 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
2020 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
2021 NULL, 0,
2022 sysctl_sysctl_bootargs, "A", "bootargs");
2023
2024 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
2025 CTLFLAG_RW | CTLFLAG_KERN,
2026 &maxfiles, 0, "");
2027 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
2028 CTLFLAG_RD | CTLFLAG_KERN,
2029 NULL, ARG_MAX, "");
2030 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
2031 CTLFLAG_RD | CTLFLAG_KERN,
2032 NULL, _POSIX_VERSION, "");
2033 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
2034 CTLFLAG_RD | CTLFLAG_KERN,
2035 NULL, NGROUPS_MAX, "");
2036 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
2037 CTLFLAG_RD | CTLFLAG_KERN,
2038 NULL, 1, "");
2039 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2040 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2041 CTLFLAG_RD | CTLFLAG_KERN,
2042 NULL, 1, "");
2043 #else
2044 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2045 CTLFLAG_RD | CTLFLAG_KERN,
2046 NULL, 0, "");
2047 #endif
2048
2049 static int
2050 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2051 {
2052 unsigned int oldval = desiredvnodes;
2053 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
2054 reset_vmobjectcache(oldval, desiredvnodes);
2055 resize_namecache(desiredvnodes);
2056 return(error);
2057 }
2058
2059 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
2060 CTLTYPE_INT | CTLFLAG_RW,
2061 0, 0, sysctl_maxvnodes, "I", "");
2062
2063 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
2064 CTLTYPE_INT | CTLFLAG_RW,
2065 0, 0, sysctl_maxproc, "I", "");
2066
2067 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
2068 CTLTYPE_INT | CTLFLAG_RW,
2069 0, 0, sysctl_aiomax, "I", "");
2070
2071 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
2072 CTLTYPE_INT | CTLFLAG_RW,
2073 0, 0, sysctl_aioprocmax, "I", "");
2074
2075 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
2076 CTLTYPE_INT | CTLFLAG_RW,
2077 0, 0, sysctl_aiothreads, "I", "");
2078
2079 static int
2080 sysctl_securelvl
2081 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2082 {
2083 int new_value, changed;
2084 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
2085 if (changed) {
2086 if (!(new_value < securelevel && req->p->p_pid != 1)) {
2087 proc_list_lock();
2088 securelevel = new_value;
2089 proc_list_unlock();
2090 } else {
2091 error = EPERM;
2092 }
2093 }
2094 return(error);
2095 }
2096
2097 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
2098 CTLTYPE_INT | CTLFLAG_RW,
2099 0, 0, sysctl_securelvl, "I", "");
2100
2101
2102 static int
2103 sysctl_domainname
2104 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2105 {
2106 int error, changed;
2107 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
2108 if (changed) {
2109 domainnamelen = strlen(domainname);
2110 }
2111 return(error);
2112 }
2113
2114 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
2115 CTLTYPE_STRING | CTLFLAG_RW,
2116 0, 0, sysctl_domainname, "A", "");
2117
2118 SYSCTL_INT(_kern, KERN_HOSTID, hostid,
2119 CTLFLAG_RW | CTLFLAG_KERN,
2120 &hostid, 0, "");
2121
2122 static int
2123 sysctl_hostname
2124 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2125 {
2126 int error, changed;
2127 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
2128 if (changed) {
2129 hostnamelen = req->newlen;
2130 }
2131 return(error);
2132 }
2133
2134
2135 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
2136 CTLTYPE_STRING | CTLFLAG_RW,
2137 0, 0, sysctl_hostname, "A", "");
2138
2139 static int
2140 sysctl_procname
2141 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2142 {
2143 /* Original code allowed writing, I'm copying this, although this all makes
2144 no sense to me. Besides, this sysctl is never used. */
2145 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
2146 }
2147
2148 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
2149 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY,
2150 0, 0, sysctl_procname, "A", "");
2151
2152 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
2153 CTLFLAG_RW | CTLFLAG_KERN,
2154 &speculative_reads_disabled, 0, "");
2155
2156 static int
2157 sysctl_boottime
2158 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2159 {
2160 struct timeval t;
2161
2162 t.tv_sec = boottime_sec();
2163 t.tv_usec = 0;
2164
2165 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2166 }
2167
2168 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2169 CTLTYPE_STRUCT | CTLFLAG_RD,
2170 0, 0, sysctl_boottime, "S,timeval", "");
2171
2172 static int
2173 sysctl_symfile
2174 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2175 {
2176 char *str;
2177 int error = get_kernel_symfile(req->p, &str);
2178 if (error)
2179 return (error);
2180 return sysctl_io_string(req, str, 0, 0, NULL);
2181 }
2182
2183
2184 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
2185 CTLTYPE_STRING | CTLFLAG_RD,
2186 0, 0, sysctl_symfile, "A", "");
2187
2188 #if NFSCLIENT
2189 static int
2190 sysctl_netboot
2191 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2192 {
2193 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2194 }
2195
2196 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
2197 CTLTYPE_INT | CTLFLAG_RD,
2198 0, 0, sysctl_netboot, "I", "");
2199 #endif
2200
2201 static int
2202 sysctl_usrstack
2203 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2204 {
2205 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2206 }
2207
2208 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack,
2209 CTLTYPE_INT | CTLFLAG_RD,
2210 0, 0, sysctl_usrstack, "I", "");
2211
2212 static int
2213 sysctl_usrstack64
2214 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2215 {
2216 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2217 }
2218
2219 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2220 CTLTYPE_QUAD | CTLFLAG_RD,
2221 0, 0, sysctl_usrstack64, "Q", "");
2222
2223 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2224 CTLFLAG_RW | CTLFLAG_KERN,
2225 corefilename, sizeof(corefilename), "");
2226
2227 static int
2228 sysctl_coredump
2229 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2230 {
2231 #ifdef SECURE_KERNEL
2232 return (ENOTSUP);
2233 #endif
2234 int new_value, changed;
2235 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2236 if (changed) {
2237 if ((new_value == 0) || (new_value == 1))
2238 do_coredump = new_value;
2239 else
2240 error = EINVAL;
2241 }
2242 return(error);
2243 }
2244
2245 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2246 CTLTYPE_INT | CTLFLAG_RW,
2247 0, 0, sysctl_coredump, "I", "");
2248
2249 static int
2250 sysctl_suid_coredump
2251 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2252 {
2253 #ifdef SECURE_KERNEL
2254 return (ENOTSUP);
2255 #endif
2256 int new_value, changed;
2257 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2258 if (changed) {
2259 if ((new_value == 0) || (new_value == 1))
2260 sugid_coredump = new_value;
2261 else
2262 error = EINVAL;
2263 }
2264 return(error);
2265 }
2266
2267 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2268 CTLTYPE_INT | CTLFLAG_RW,
2269 0, 0, sysctl_suid_coredump, "I", "");
2270
2271 static int
2272 sysctl_delayterm
2273 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2274 {
2275 struct proc *p = req->p;
2276 int new_value, changed;
2277 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2278 if (changed) {
2279 proc_lock(p);
2280 if (new_value)
2281 req->p->p_lflag |= P_LDELAYTERM;
2282 else
2283 req->p->p_lflag &= ~P_LDELAYTERM;
2284 proc_unlock(p);
2285 }
2286 return(error);
2287 }
2288
2289 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2290 CTLTYPE_INT | CTLFLAG_RW,
2291 0, 0, sysctl_delayterm, "I", "");
2292
2293 static int
2294 sysctl_proc_low_pri_io
2295 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2296 {
2297 struct proc *p = req->p;
2298 int new_value, old_value, changed;
2299 int error;
2300
2301 proc_lock(p);
2302 switch (req->p->p_iopol_disk) {
2303 case IOPOL_DEFAULT:
2304 case IOPOL_NORMAL:
2305 old_value = 0;
2306 break;
2307 case IOPOL_THROTTLE:
2308 old_value = 1;
2309 break;
2310 case IOPOL_PASSIVE:
2311 old_value = 2;
2312 break;
2313 default:
2314 /*\ 5 this should never happen, but to be robust, return the default value */
2315 old_value = 0;
2316 break;
2317 }
2318 proc_unlock(p);
2319
2320 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2321 if (changed) {
2322 proc_lock(p);
2323 if (new_value & 0x01)
2324 req->p->p_iopol_disk = IOPOL_THROTTLE;
2325 else if (new_value & 0x02)
2326 req->p->p_iopol_disk = IOPOL_PASSIVE;
2327 else if (new_value == 0)
2328 req->p->p_iopol_disk = IOPOL_NORMAL;
2329 proc_unlock(p);
2330 }
2331 return(error);
2332 }
2333
2334 SYSCTL_PROC(_kern, KERN_PROC_LOW_PRI_IO, proc_low_pri_io,
2335 CTLTYPE_INT | CTLFLAG_RW,
2336 0, 0, sysctl_proc_low_pri_io, "I", "");
2337
2338 static int
2339 sysctl_rage_vnode
2340 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2341 {
2342 struct proc *p = req->p;
2343 struct uthread *ut;
2344 int new_value, old_value, changed;
2345 int error;
2346
2347 ut = get_bsdthread_info(current_thread());
2348
2349 if (ut->uu_flag & UT_RAGE_VNODES)
2350 old_value = KERN_RAGE_THREAD;
2351 else if (p->p_lflag & P_LRAGE_VNODES)
2352 old_value = KERN_RAGE_PROC;
2353 else
2354 old_value = 0;
2355
2356 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2357
2358 if (error == 0) {
2359 switch (new_value) {
2360 case KERN_RAGE_PROC:
2361 proc_lock(p);
2362 p->p_lflag |= P_LRAGE_VNODES;
2363 proc_unlock(p);
2364 break;
2365 case KERN_UNRAGE_PROC:
2366 proc_lock(p);
2367 p->p_lflag &= ~P_LRAGE_VNODES;
2368 proc_unlock(p);
2369 break;
2370
2371 case KERN_RAGE_THREAD:
2372 ut->uu_flag |= UT_RAGE_VNODES;
2373 break;
2374 case KERN_UNRAGE_THREAD:
2375 ut = get_bsdthread_info(current_thread());
2376 ut->uu_flag &= ~UT_RAGE_VNODES;
2377 break;
2378 }
2379 }
2380 return(error);
2381 }
2382
2383 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2384 CTLTYPE_INT | CTLFLAG_RW,
2385 0, 0, sysctl_rage_vnode, "I", "");
2386
2387
2388 static int
2389 sysctl_kern_check_openevt
2390 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2391 {
2392 struct proc *p = req->p;
2393 int new_value, old_value, changed;
2394 int error;
2395
2396 if (p->p_flag & P_CHECKOPENEVT) {
2397 old_value = KERN_OPENEVT_PROC;
2398 } else {
2399 old_value = 0;
2400 }
2401
2402 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2403
2404 if (error == 0) {
2405 switch (new_value) {
2406 case KERN_OPENEVT_PROC:
2407 OSBitOrAtomic(P_CHECKOPENEVT, (UInt32 *)&p->p_flag);
2408 break;
2409
2410 case KERN_UNOPENEVT_PROC:
2411 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), (UInt32 *)&p->p_flag);
2412 break;
2413
2414 default:
2415 error = EINVAL;
2416 }
2417 }
2418 return(error);
2419 }
2420
2421 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY,
2422 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2423
2424
2425
2426 static int
2427 sysctl_nx
2428 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2429 {
2430 #ifdef SECURE_KERNEL
2431 return ENOTSUP;
2432 #endif
2433 int new_value, changed;
2434 int error;
2435
2436 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2437 if (error)
2438 return error;
2439
2440 if (changed) {
2441 #ifdef __i386__
2442 /*
2443 * Only allow setting if NX is supported on the chip
2444 */
2445 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2446 return ENOTSUP;
2447 #endif
2448 nx_enabled = new_value;
2449 }
2450 return(error);
2451 }
2452
2453
2454
2455 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2456 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN,
2457 0, 0, sysctl_nx, "I", "");
2458
2459 static int
2460 sysctl_loadavg
2461 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2462 {
2463 if (proc_is64bit(req->p)) {
2464 struct user_loadavg loadinfo64;
2465 loadavg32to64(&averunnable, &loadinfo64);
2466 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2467 } else {
2468 return sysctl_io_opaque(req, &averunnable, sizeof(averunnable), NULL);
2469 }
2470 }
2471
2472 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2473 CTLTYPE_STRUCT | CTLFLAG_RD,
2474 0, 0, sysctl_loadavg, "S,loadavg", "");
2475
2476 static int
2477 sysctl_swapusage
2478 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2479 {
2480 int error;
2481 uint64_t swap_total;
2482 uint64_t swap_avail;
2483 uint32_t swap_pagesize;
2484 boolean_t swap_encrypted;
2485 struct xsw_usage xsu;
2486
2487 error = macx_swapinfo(&swap_total,
2488 &swap_avail,
2489 &swap_pagesize,
2490 &swap_encrypted);
2491 if (error)
2492 return error;
2493
2494 xsu.xsu_total = swap_total;
2495 xsu.xsu_avail = swap_avail;
2496 xsu.xsu_used = swap_total - swap_avail;
2497 xsu.xsu_pagesize = swap_pagesize;
2498 xsu.xsu_encrypted = swap_encrypted;
2499 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2500 }
2501
2502
2503
2504 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2505 CTLTYPE_STRUCT | CTLFLAG_RD,
2506 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2507
2508
2509 /* this kernel does NOT implement shared_region_make_private_np() */
2510 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2511 CTLFLAG_RD,
2512 NULL, 0, "");
2513
2514 #if __i386__
2515 static int
2516 sysctl_sysctl_exec_affinity(__unused struct sysctl_oid *oidp,
2517 __unused void *arg1, __unused int arg2,
2518 struct sysctl_req *req)
2519 {
2520 proc_t cur_proc = req->p;
2521 int error;
2522
2523 if (req->oldptr != USER_ADDR_NULL) {
2524 cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
2525 if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
2526 return error;
2527 }
2528
2529 if (req->newptr != USER_ADDR_NULL) {
2530 cpu_type_t newcputype;
2531 if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
2532 return error;
2533 if (newcputype == CPU_TYPE_I386)
2534 OSBitAndAtomic(~((uint32_t)P_AFFINITY), (UInt32 *)&cur_proc->p_flag);
2535 else if (newcputype == CPU_TYPE_POWERPC)
2536 OSBitOrAtomic(P_AFFINITY, (UInt32 *)&cur_proc->p_flag);
2537 else
2538 return (EINVAL);
2539 }
2540
2541 return 0;
2542 }
2543 SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
2544 #endif
2545
2546 static int
2547 fetch_process_cputype(
2548 proc_t cur_proc,
2549 int *name,
2550 u_int namelen,
2551 cpu_type_t *cputype)
2552 {
2553 proc_t p = PROC_NULL;
2554 int refheld = 0;
2555 cpu_type_t ret = 0;
2556 int error = 0;
2557
2558 if (namelen == 0)
2559 p = cur_proc;
2560 else if (namelen == 1) {
2561 p = proc_find(name[0]);
2562 if (p == NULL)
2563 return (EINVAL);
2564 refheld = 1;
2565 } else {
2566 error = EINVAL;
2567 goto out;
2568 }
2569
2570 #if __i386__
2571 if (p->p_flag & P_TRANSLATED) {
2572 ret = CPU_TYPE_POWERPC;
2573 }
2574 else
2575 #endif
2576 {
2577 ret = cpu_type();
2578 if (IS_64BIT_PROCESS(p))
2579 ret |= CPU_ARCH_ABI64;
2580 }
2581 *cputype = ret;
2582
2583 if (refheld != 0)
2584 proc_rele(p);
2585 out:
2586 return (error);
2587 }
2588
2589 static int
2590 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2591 struct sysctl_req *req)
2592 {
2593 int error;
2594 cpu_type_t proc_cputype = 0;
2595 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2596 return error;
2597 int res = 1;
2598 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2599 res = 0;
2600 return SYSCTL_OUT(req, &res, sizeof(res));
2601 }
2602 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2603
2604 static int
2605 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2606 struct sysctl_req *req)
2607 {
2608 int error;
2609 cpu_type_t proc_cputype = 0;
2610 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2611 return error;
2612 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2613 }
2614 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2615
2616 static int
2617 sysctl_safeboot
2618 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2619 {
2620 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2621 }
2622
2623 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2624 CTLTYPE_INT | CTLFLAG_RD,
2625 0, 0, sysctl_safeboot, "I", "");
2626
2627 static int
2628 sysctl_singleuser
2629 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2630 {
2631 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2632 }
2633
2634 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2635 CTLTYPE_INT | CTLFLAG_RD,
2636 0, 0, sysctl_singleuser, "I", "");
2637
2638 /*
2639 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2640 */
2641 extern boolean_t affinity_sets_enabled;
2642 extern int affinity_sets_mapping;
2643
2644 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2645 CTLFLAG_RW, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2646 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2647 CTLFLAG_RW, &affinity_sets_mapping, 0, "mapping policy");
2648
2649 /*
2650 * Limit on total memory users can wire.
2651 *
2652 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2653 *
2654 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2655 *
2656 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2657 * kmem_init().
2658 *
2659 * All values are in bytes.
2660 */
2661
2662 vm_map_size_t vm_global_user_wire_limit;
2663 vm_map_size_t vm_user_wire_limit;
2664
2665 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW, &vm_global_user_wire_limit, "");
2666 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW, &vm_user_wire_limit, "");