2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <bsm/audit_kernel.h>
108 #include <mach/machine.h>
109 #include <mach/mach_types.h>
110 #include <mach/vm_param.h>
111 #include <kern/task.h>
112 #include <kern/lock.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <mach/host_info.h>
117 #include <sys/mount_internal.h>
118 #include <sys/kdebug.h>
119 #include <sys/sysproto.h>
121 #include <IOKit/IOPlatformExpert.h>
122 #include <pexpert/pexpert.h>
124 #include <machine/machine_routines.h>
125 #include <machine/exec.h>
127 #include <vm/vm_protos.h>
130 #include <i386/cpuid.h>
133 sysctlfn kern_sysctl
;
135 sysctlfn debug_sysctl
;
137 extern sysctlfn net_sysctl
;
138 extern sysctlfn cpu_sysctl
;
139 extern int aio_max_requests
;
140 extern int aio_max_requests_per_process
;
141 extern int aio_worker_threads
;
142 extern int lowpri_IO_window_msecs
;
143 extern int lowpri_IO_delay_msecs
;
144 extern int nx_enabled
;
145 extern int speculative_reads_disabled
;
148 fill_eproc(proc_t p
, struct eproc
*ep
);
150 fill_externproc(proc_t p
, struct extern_proc
*exp
);
152 fill_user_eproc(proc_t p
, struct user_eproc
*ep
);
154 fill_user_proc(proc_t p
, struct user_kinfo_proc
*kp
);
156 fill_user_externproc(proc_t p
, struct user_extern_proc
*exp
);
158 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
160 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, proc_t p
);
166 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
168 __private_extern__ kern_return_t
169 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
171 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
173 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
174 user_addr_t newp
, size_t newlen
);
176 fill_proc(proc_t p
, struct kinfo_proc
*kp
);
178 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
179 size_t *sizep
, proc_t cur_proc
);
181 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
184 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
185 proc_t cur_proc
, int argc_yes
);
187 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
188 size_t newlen
, void *sp
, int len
);
190 static int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
191 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
192 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
193 static int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
194 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
195 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
196 int sysdoproc_callback(proc_t p
, void *arg
);
198 static int __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, register_t
*retval
);
200 extern void IORegistrySetOSBuildVersion(char * build_version
);
203 loadavg32to64(struct loadavg
*la32
, struct user_loadavg
*la64
)
205 la64
->ldavg
[0] = la32
->ldavg
[0];
206 la64
->ldavg
[1] = la32
->ldavg
[1];
207 la64
->ldavg
[2] = la32
->ldavg
[2];
208 la64
->fscale
= (user_long_t
)la32
->fscale
;
214 static struct sysctl_lock memlock
;
216 /* sysctl() syscall */
218 __sysctl(proc_t p
, struct __sysctl_args
*uap
, register_t
*retval
)
220 boolean_t funnel_state
;
223 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
224 error
= __sysctl_funneled(p
, uap
, retval
);
225 thread_funnel_set(kernel_flock
, funnel_state
);
230 __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, __unused register_t
*retval
)
232 int error
, dolock
= 1;
233 size_t savelen
= 0, oldlen
= 0, newlen
;
234 sysctlfn
*fnp
= NULL
;
235 int name
[CTL_MAXNAME
];
237 boolean_t memlock_taken
= FALSE
;
238 boolean_t vslock_taken
= FALSE
;
240 kauth_cred_t my_cred
;
244 * all top-level sysctl names are non-terminal
246 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
248 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
252 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
254 if (proc_is64bit(p
)) {
255 /* uap->newlen is a size_t value which grows to 64 bits
256 * when coming from a 64-bit process. since it's doubtful we'll
257 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
259 newlen
= CAST_DOWN(size_t, uap
->newlen
);
262 newlen
= uap
->newlen
;
265 /* CTL_UNSPEC is used to get oid to AUTO_OID */
266 if (uap
->new != USER_ADDR_NULL
267 && ((name
[0] == CTL_KERN
268 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
269 name
[1] == KERN_PROC_LOW_PRI_IO
|| name
[1] == KERN_PROCNAME
|| name
[1] == KERN_RAGEVNODE
|| name
[1] == KERN_CHECKOPENEVT
))
270 || (name
[0] == CTL_HW
)
271 || (name
[0] == CTL_VM
))
272 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
275 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
276 * but there is a fallback for all sysctls other than VFS to
277 * userland_sysctl() - KILL THIS! */
281 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
282 && (name
[1] != KERN_PROC
))
297 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
298 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
300 oldlen
= CAST_DOWN(size_t, oldlen64
);
302 * If more than 4G, clamp to 4G - useracc() below will catch
303 * with an EFAULT, if it's actually necessary.
305 if (oldlen64
> 0x00000000ffffffffULL
)
306 oldlen
= 0xffffffffUL
;
309 if (uap
->old
!= USER_ADDR_NULL
) {
310 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
313 * The kernel debug mechanism does not need to take this lock, and
314 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
315 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
317 if (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)) &&
318 !(name
[1] == KERN_PROC
)) {
320 memlock_taken
= TRUE
;
323 if (dolock
&& oldlen
) {
324 if ((error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
325 if (memlock_taken
== TRUE
)
335 my_cred
= kauth_cred_proc_ref(p
);
336 error
= mac_system_check_sysctl(
342 fnp
== kern_sysctl
? 1 : 0,
346 kauth_cred_unref(&my_cred
);
350 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
351 &oldlen
, uap
->new, newlen
, p
);
359 if (vslock_taken
== TRUE
) {
360 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
364 if (memlock_taken
== TRUE
)
367 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
369 boolean_t funnel_state
;
372 * Drop the funnel when calling new sysctl code, which will conditionally
373 * grab the funnel if it really needs to.
375 funnel_state
= thread_funnel_set(kernel_flock
, FALSE
);
377 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
378 1, uap
->new, newlen
, &oldlen
);
380 thread_funnel_set(kernel_flock
, funnel_state
);
383 if ((error
) && (error
!= ENOMEM
))
386 if (uap
->oldlenp
!= USER_ADDR_NULL
)
387 error
= suulong(uap
->oldlenp
, oldlen
);
393 * Attributes stored in the kernel.
395 __private_extern__
char corefilename
[MAXPATHLEN
+1];
396 __private_extern__
int do_coredump
;
397 __private_extern__
int sugid_coredump
;
400 __private_extern__
int do_count_syscalls
;
404 int securelevel
= -1;
416 __unused
size_t newSize
,
422 if (name
[0] == 0 && 1 == namelen
) {
423 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
424 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
425 } else if (name
[0] == 1 && 2 == namelen
) {
427 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), (UInt32
*)&cur_proc
->p_flag
);
429 OSBitOrAtomic(P_AFFINITY
, (UInt32
*)&cur_proc
->p_flag
);
443 __unused
size_t newSize
,
447 int istranslated
= 0;
448 kauth_cred_t my_cred
;
454 p
= proc_find(name
[0]);
458 my_cred
= kauth_cred_proc_ref(p
);
459 uid
= kauth_cred_getuid(my_cred
);
460 kauth_cred_unref(&my_cred
);
461 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
462 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
467 istranslated
= (p
->p_flag
& P_TRANSLATED
);
469 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
470 (istranslated
!= 0) ? 1 : 0);
474 set_archhandler(__unused proc_t p
, int arch
)
478 struct vnode_attr va
;
479 vfs_context_t ctx
= vfs_context_current();
480 struct exec_archhandler
*archhandler
;
483 case CPU_TYPE_POWERPC
:
484 archhandler
= &exec_archhandler_ppc
;
490 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
,
491 CAST_USER_ADDR_T(archhandler
->path
), ctx
);
497 /* Check mount point */
498 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
499 (nd
.ni_vp
->v_type
!= VREG
)) {
505 VATTR_WANTED(&va
, va_fsid
);
506 VATTR_WANTED(&va
, va_fileid
);
507 error
= vnode_getattr(nd
.ni_vp
, &va
, ctx
);
514 archhandler
->fsid
= va
.va_fsid
;
515 archhandler
->fileid
= (u_long
)va
.va_fileid
;
519 /* XXX remove once Rosetta is rev'ed */
520 /*****************************************************************************/
522 sysctl_exec_archhandler_ppc(
524 __unused u_int namelen
,
533 char handler
[sizeof(exec_archhandler_ppc
.path
)];
534 vfs_context_t ctx
= vfs_context_current();
537 len
= strlen(exec_archhandler_ppc
.path
) + 1;
541 error
= copyout(exec_archhandler_ppc
.path
, oldBuf
, len
);
548 error
= suser(vfs_context_ucred(ctx
), &p
->p_acflag
);
551 if (newSize
>= sizeof(exec_archhandler_ppc
.path
))
552 return (ENAMETOOLONG
);
553 error
= copyin(newBuf
, handler
, newSize
);
556 handler
[newSize
] = 0;
557 strlcpy(exec_archhandler_ppc
.path
, handler
, MAXPATHLEN
);
558 error
= set_archhandler(p
, CPU_TYPE_POWERPC
);
564 /*****************************************************************************/
567 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid
*oidp
, void *arg1
,
568 int arg2
, struct sysctl_req
*req
)
572 error
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
578 error
= set_archhandler(req
->p
, CPU_TYPE_POWERPC
);
585 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
587 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
589 SYSCTL_PROC(_kern_exec_archhandler
, OID_AUTO
, powerpc
,
590 CTLTYPE_STRING
| CTLFLAG_RW
, exec_archhandler_ppc
.path
, 0,
591 sysctl_handle_exec_archhandler_ppc
, "A", "");
593 extern int get_kernel_symfile(proc_t
, char **);
594 __private_extern__
int
595 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
599 * kernel related system variables.
602 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
603 user_addr_t newp
, size_t newlen
, proc_t p
)
605 /* all sysctl names not listed below are terminal at this level */
607 && !(name
[0] == KERN_PROC
608 || name
[0] == KERN_PROF
609 || name
[0] == KERN_KDEBUG
611 || name
[0] == KERN_PROCARGS
613 || name
[0] == KERN_PROCARGS2
614 || name
[0] == KERN_IPC
615 || name
[0] == KERN_SYSV
616 || name
[0] == KERN_AFFINITY
617 || name
[0] == KERN_TRANSLATE
618 || name
[0] == KERN_EXEC
619 || name
[0] == KERN_PANICINFO
620 || name
[0] == KERN_POSIX
621 || name
[0] == KERN_TFP
622 || name
[0] == KERN_TTY
624 || name
[0] == KERN_LCTX
628 return (ENOTDIR
); /* overloaded */
632 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
635 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
639 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
642 /* new one as it does not use kinfo_proc */
643 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
646 /* new one as it does not use kinfo_proc */
647 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
650 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
654 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
657 return sysctl_translate(name
+1, namelen
-1, oldp
, oldlenp
, newp
,
660 /* XXX remove once Rosetta has rev'ed */
662 return sysctl_exec_archhandler_ppc(name
+1, namelen
-1, oldp
,
663 oldlenp
, newp
, newlen
, p
);
665 case KERN_COUNT_SYSCALLS
:
667 /* valid values passed in:
668 * = 0 means don't keep called counts for each bsd syscall
669 * > 0 means keep called counts for each bsd syscall
670 * = 2 means dump current counts to the system log
671 * = 3 means reset all counts
672 * for example, to dump current counts:
673 * sysctl -w kern.count_calls=2
675 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
681 do_count_syscalls
= 1;
683 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
685 extern int syscalls_log
[];
686 extern const char * syscallnames
[];
688 for ( i
= 0; i
< nsysent
; i
++ ) {
689 if ( syscalls_log
[i
] != 0 ) {
691 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
699 do_count_syscalls
= 1;
713 * Debugging related system variables.
717 #endif /* DIAGNOSTIC */
718 struct ctldebug debug0
, debug1
;
719 struct ctldebug debug2
, debug3
, debug4
;
720 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
721 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
722 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
723 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
724 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
725 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
726 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
727 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
730 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
731 user_addr_t newp
, size_t newlen
, __unused proc_t p
)
733 struct ctldebug
*cdp
;
735 /* all sysctl names at this level are name and field */
737 return (ENOTDIR
); /* overloaded */
738 if (name
[0] < 0 || name
[0] >= CTL_DEBUG_MAXID
)
740 cdp
= debugvars
[name
[0]];
741 if (cdp
->debugname
== 0)
745 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
746 case CTL_DEBUG_VALUE
:
747 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
756 * The following sysctl_* functions should not be used
757 * any more, as they can only cope with callers in
758 * user mode: Use new-style
766 * Validate parameters and get old / set new parameters
767 * for an integer-valued sysctl function.
770 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
771 user_addr_t newp
, size_t newlen
, int *valp
)
775 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
777 if (oldp
&& *oldlenp
< sizeof(int))
779 if (newp
&& newlen
!= sizeof(int))
781 *oldlenp
= sizeof(int);
783 error
= copyout(valp
, oldp
, sizeof(int));
784 if (error
== 0 && newp
) {
785 error
= copyin(newp
, valp
, sizeof(int));
786 AUDIT_ARG(value
, *valp
);
792 * As above, but read-only.
795 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
799 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
801 if (oldp
&& *oldlenp
< sizeof(int))
805 *oldlenp
= sizeof(int);
807 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
812 * Validate parameters and get old / set new parameters
813 * for an quad(64bit)-valued sysctl function.
816 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
817 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
821 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
823 if (oldp
&& *oldlenp
< sizeof(quad_t
))
825 if (newp
&& newlen
!= sizeof(quad_t
))
827 *oldlenp
= sizeof(quad_t
);
829 error
= copyout(valp
, oldp
, sizeof(quad_t
));
830 if (error
== 0 && newp
)
831 error
= copyin(newp
, valp
, sizeof(quad_t
));
836 * As above, but read-only.
839 sysctl_rdquad(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, quad_t val
)
843 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
845 if (oldp
&& *oldlenp
< sizeof(quad_t
))
849 *oldlenp
= sizeof(quad_t
);
851 error
= copyout((caddr_t
)&val
, oldp
, sizeof(quad_t
));
856 * Validate parameters and get old / set new parameters
857 * for a string-valued sysctl function. Unlike sysctl_string, if you
858 * give it a too small (but larger than 0 bytes) buffer, instead of
859 * returning ENOMEM, it truncates the returned string to the buffer
860 * size. This preserves the semantics of some library routines
861 * implemented via sysctl, which truncate their returned data, rather
862 * than simply returning an error. The returned string is always NUL
866 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
867 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
869 int len
, copylen
, error
= 0;
871 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
873 copylen
= len
= strlen(str
) + 1;
874 if (oldp
&& (len
< 0 || *oldlenp
< 1))
876 if (oldp
&& (*oldlenp
< (size_t)len
))
877 copylen
= *oldlenp
+ 1;
878 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
880 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
882 error
= copyout(str
, oldp
, copylen
);
887 error
= copyout((void *)&c
, oldp
, sizeof(char));
890 if (error
== 0 && newp
) {
891 error
= copyin(newp
, str
, newlen
);
893 AUDIT_ARG(text
, (char *)str
);
899 * Validate parameters and get old / set new parameters
900 * for a string-valued sysctl function.
903 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
904 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
908 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
910 len
= strlen(str
) + 1;
911 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
913 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
915 *oldlenp
= len
-1; /* deal with NULL strings correctly */
917 error
= copyout(str
, oldp
, len
);
919 if (error
== 0 && newp
) {
920 error
= copyin(newp
, str
, newlen
);
922 AUDIT_ARG(text
, (char *)str
);
928 * As above, but read-only.
931 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
932 user_addr_t newp
, char *str
)
936 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
938 len
= strlen(str
) + 1;
939 if (oldp
&& *oldlenp
< (size_t)len
)
945 error
= copyout(str
, oldp
, len
);
950 * Validate parameters and get old / set new parameters
951 * for a structure oriented sysctl function.
954 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
955 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
959 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
961 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
963 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
967 error
= copyout(sp
, oldp
, len
);
969 if (error
== 0 && newp
)
970 error
= copyin(newp
, sp
, len
);
975 * Validate parameters and get old parameters
976 * for a structure oriented sysctl function.
979 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
980 user_addr_t newp
, void *sp
, int len
)
984 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
986 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
992 error
= copyout(sp
, oldp
, len
);
997 * Get file structures.
1001 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1004 struct fileglob
*fg
;
1005 struct extern_file nef
;
1007 if (req
->oldptr
== USER_ADDR_NULL
) {
1009 * overestimate by 10 files
1011 req
->oldidx
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1016 * first copyout filehead
1018 error
= SYSCTL_OUT(req
, &filehead
, sizeof(filehead
));
1023 * followed by an array of file structures
1025 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1026 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1027 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1028 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1029 nef
.f_type
= fg
->fg_type
;
1030 nef
.f_count
= fg
->fg_count
;
1031 nef
.f_msgcount
= fg
->fg_msgcount
;
1032 nef
.f_cred
= fg
->fg_cred
;
1033 nef
.f_ops
= fg
->fg_ops
;
1034 nef
.f_offset
= fg
->fg_offset
;
1035 nef
.f_data
= fg
->fg_data
;
1036 error
= SYSCTL_OUT(req
, &nef
, sizeof(nef
));
1043 SYSCTL_PROC(_kern
, KERN_FILE
, file
,
1044 CTLTYPE_STRUCT
| CTLFLAG_RW
,
1045 0, 0, sysctl_file
, "S,filehead", "");
1048 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
1050 if (p
->p_pid
!= (pid_t
)arg
)
1057 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
1059 if (p
->p_pgrpid
!= (pid_t
)arg
)
1066 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
1068 boolean_t funnel_state
;
1072 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1073 /* This is very racy but list lock is held.. Hmmm. */
1074 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1075 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
1076 p
->p_pgrp
->pg_session
->s_ttyp
== NULL
||
1077 p
->p_pgrp
->pg_session
->s_ttyp
->t_dev
!= (dev_t
)arg
)
1082 thread_funnel_set(kernel_flock
, funnel_state
);
1088 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
1090 kauth_cred_t my_cred
;
1093 if (p
->p_ucred
== NULL
)
1095 my_cred
= kauth_cred_proc_ref(p
);
1096 uid
= kauth_cred_getuid(my_cred
);
1097 kauth_cred_unref(&my_cred
);
1099 if (uid
!= (uid_t
)arg
)
1107 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
1109 kauth_cred_t my_cred
;
1112 if (p
->p_ucred
== NULL
)
1114 my_cred
= kauth_cred_proc_ref(p
);
1115 ruid
= my_cred
->cr_ruid
;
1116 kauth_cred_unref(&my_cred
);
1118 if (ruid
!= (uid_t
)arg
)
1125 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
1127 if ((p
->p_lctx
== NULL
) ||
1128 (p
->p_lctx
->lc_id
!= (pid_t
)arg
))
1135 * try over estimating by 5 procs
1137 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1138 struct sysdoproc_args
{
1141 boolean_t is_64_bit
;
1153 sysdoproc_callback(proc_t p
, void * arg
)
1155 struct sysdoproc_args
* args
= (struct sysdoproc_args
*)arg
;
1158 if (args
->buflen
>= args
->sizeof_kproc
) {
1159 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, (void *)args
->uidval
) == 0))
1160 return(PROC_RETURNED
);
1161 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, (void *)args
->uidval
) == 0))
1162 return(PROC_RETURNED
);
1163 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, (void *)args
->uidval
) == 0))
1164 return(PROC_RETURNED
);
1166 bzero(args
->kprocp
, args
->sizeof_kproc
);
1167 if (args
->is_64_bit
) {
1168 fill_user_proc(p
, (struct user_kinfo_proc
*) args
->kprocp
);
1171 fill_proc(p
, (struct kinfo_proc
*) args
->kprocp
);
1173 error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
1175 *args
->errorp
= error
;
1176 return(PROC_RETURNED_DONE
);
1179 args
->dp
+= args
->sizeof_kproc
;
1180 args
->buflen
-= args
->sizeof_kproc
;
1182 args
->needed
+= args
->sizeof_kproc
;
1183 return(PROC_RETURNED
);
1187 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1189 user_addr_t dp
= where
;
1191 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1193 boolean_t is_64_bit
= FALSE
;
1194 struct kinfo_proc kproc
;
1195 struct user_kinfo_proc user_kproc
;
1198 int (*filterfn
)(proc_t
, void *) = 0;
1199 struct sysdoproc_args args
;
1204 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1206 is_64_bit
= proc_is64bit(current_proc());
1208 sizeof_kproc
= sizeof(user_kproc
);
1209 kprocp
= (caddr_t
) &user_kproc
;
1212 sizeof_kproc
= sizeof(kproc
);
1213 kprocp
= (caddr_t
) &kproc
;
1220 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
1223 case KERN_PROC_PGRP
:
1224 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
1235 case KERN_PROC_RUID
:
1240 case KERN_PROC_LCID
:
1241 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
1247 args
.buflen
= buflen
;
1248 args
.kprocp
= kprocp
;
1249 args
.is_64_bit
= is_64_bit
;
1251 args
.needed
= needed
;
1252 args
.errorp
= &error
;
1253 args
.uidcheck
= uidcheck
;
1254 args
.ruidcheck
= ruidcheck
;
1255 args
.ttycheck
= ttycheck
;
1256 args
.sizeof_kproc
= sizeof_kproc
;
1257 args
.uidval
= name
[1];
1259 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
), sysdoproc_callback
, &args
, filterfn
, (void *)name
[1]);
1265 needed
= args
.needed
;
1267 if (where
!= USER_ADDR_NULL
) {
1268 *sizep
= dp
- where
;
1269 if (needed
> *sizep
)
1272 needed
+= KERN_PROCSLOP
;
1279 * Fill in an eproc structure for the specified process.
1282 fill_eproc(proc_t p
, struct eproc
*ep
)
1285 kauth_cred_t my_cred
;
1287 struct session
* sessp
;
1290 sessp
= proc_session(p
);
1294 if (pg
!= PGRP_NULL
) {
1296 ep
->e_pgid
= p
->p_pgrpid
;
1297 ep
->e_jobc
= pg
->pg_jobc
;
1298 if ((sessp
!= SESSION_NULL
) && sessp
->s_ttyvp
)
1299 ep
->e_flag
= EPROC_CTTY
;
1301 ep
->e_sess
= (struct session
*)0;
1307 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1312 ep
->e_ppid
= p
->p_ppid
;
1313 /* Pre-zero the fake historical pcred */
1314 bzero(&ep
->e_pcred
, sizeof(struct _pcred
));
1316 my_cred
= kauth_cred_proc_ref(p
);
1318 /* A fake historical pcred */
1319 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1320 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1321 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1322 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1323 /* A fake historical *kauth_cred_t */
1324 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1325 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1326 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1327 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1329 kauth_cred_unref(&my_cred
);
1331 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1332 ep
->e_vm
.vm_tsize
= 0;
1333 ep
->e_vm
.vm_dsize
= 0;
1334 ep
->e_vm
.vm_ssize
= 0;
1336 ep
->e_vm
.vm_rssize
= 0;
1338 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1339 (tp
= sessp
->s_ttyp
)) {
1340 ep
->e_tdev
= tp
->t_dev
;
1341 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1342 ep
->e_tsess
= tp
->t_session
;
1346 if (SESS_LEADER(p
, sessp
))
1347 ep
->e_flag
|= EPROC_SLEADER
;
1348 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1349 ep
->e_xsize
= ep
->e_xrssize
= 0;
1350 ep
->e_xccount
= ep
->e_xswrss
= 0;
1351 if (sessp
!= SESSION_NULL
)
1352 session_rele(sessp
);
1358 * Fill in an LP64 version of eproc structure for the specified process.
1361 fill_user_eproc(proc_t p
, struct user_eproc
*ep
)
1364 struct session
*sessp
= NULL
;
1366 kauth_cred_t my_cred
;
1369 sessp
= proc_session(p
);
1371 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1372 if (pg
!= PGRP_NULL
) {
1373 ep
->e_sess
= CAST_USER_ADDR_T(sessp
);
1374 ep
->e_pgid
= p
->p_pgrpid
;
1375 ep
->e_jobc
= pg
->pg_jobc
;
1376 if (sessp
!= SESSION_NULL
) {
1378 ep
->e_flag
= EPROC_CTTY
;
1381 ep
->e_sess
= USER_ADDR_NULL
;
1387 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1392 ep
->e_ppid
= p
->p_ppid
;
1393 /* Pre-zero the fake historical pcred */
1394 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1396 my_cred
= kauth_cred_proc_ref(p
);
1398 /* A fake historical pcred */
1399 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1400 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1401 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1402 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1404 /* A fake historical *kauth_cred_t */
1405 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1406 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1407 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1408 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1410 kauth_cred_unref(&my_cred
);
1412 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1413 ep
->e_vm
.vm_tsize
= 0;
1414 ep
->e_vm
.vm_dsize
= 0;
1415 ep
->e_vm
.vm_ssize
= 0;
1417 ep
->e_vm
.vm_rssize
= 0;
1419 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1420 (tp
= sessp
->s_ttyp
)) {
1421 ep
->e_tdev
= tp
->t_dev
;
1422 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1423 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1427 if (SESS_LEADER(p
, sessp
))
1428 ep
->e_flag
|= EPROC_SLEADER
;
1429 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1430 ep
->e_xsize
= ep
->e_xrssize
= 0;
1431 ep
->e_xccount
= ep
->e_xswrss
= 0;
1432 if (sessp
!= SESSION_NULL
)
1433 session_rele(sessp
);
1434 if (pg
!= PGRP_NULL
)
1439 * Fill in an eproc structure for the specified process.
1442 fill_externproc(proc_t p
, struct extern_proc
*exp
)
1444 exp
->p_forw
= exp
->p_back
= NULL
;
1445 exp
->p_starttime
= p
->p_start
;
1446 exp
->p_vmspace
= NULL
;
1447 exp
->p_sigacts
= p
->p_sigacts
;
1448 exp
->p_flag
= p
->p_flag
;
1449 if (p
->p_lflag
& P_LTRACED
)
1450 exp
->p_flag
|= P_TRACED
;
1451 if (p
->p_lflag
& P_LPPWAIT
)
1452 exp
->p_flag
|= P_PPWAIT
;
1453 if (p
->p_lflag
& P_LEXIT
)
1454 exp
->p_flag
|= P_WEXIT
;
1455 exp
->p_stat
= p
->p_stat
;
1456 exp
->p_pid
= p
->p_pid
;
1457 exp
->p_oppid
= p
->p_oppid
;
1459 exp
->user_stack
= CAST_DOWN(caddr_t
, p
->user_stack
);
1460 exp
->exit_thread
= p
->exit_thread
;
1461 exp
->p_debugger
= p
->p_debugger
;
1462 exp
->sigwait
= p
->sigwait
;
1464 #ifdef _PROC_HAS_SCHEDINFO_
1465 exp
->p_estcpu
= p
->p_estcpu
;
1466 exp
->p_pctcpu
= p
->p_pctcpu
;
1467 exp
->p_slptime
= p
->p_slptime
;
1471 exp
->p_slptime
= 0 ;
1473 exp
->p_cpticks
= 0 ;
1477 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1478 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1482 exp
->p_traceflag
= 0;
1484 exp
->p_siglist
= 0 ; /* No longer relevant */
1485 exp
->p_textvp
= p
->p_textvp
;
1486 exp
->p_holdcnt
= 0 ;
1487 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1488 exp
->p_sigignore
= p
->p_sigignore
;
1489 exp
->p_sigcatch
= p
->p_sigcatch
;
1490 exp
->p_priority
= p
->p_priority
;
1492 exp
->p_nice
= p
->p_nice
;
1493 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1494 exp
->p_comm
[MAXCOMLEN
] = '\0';
1495 exp
->p_pgrp
= p
->p_pgrp
;
1497 exp
->p_xstat
= p
->p_xstat
;
1498 exp
->p_acflag
= p
->p_acflag
;
1499 exp
->p_ru
= p
->p_ru
; /* XXX may be NULL */
1503 * Fill in an LP64 version of extern_proc structure for the specified process.
1506 fill_user_externproc(proc_t p
, struct user_extern_proc
*exp
)
1508 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1509 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1510 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1511 exp
->p_vmspace
= USER_ADDR_NULL
;
1512 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1513 exp
->p_flag
= p
->p_flag
;
1514 if (p
->p_lflag
& P_LTRACED
)
1515 exp
->p_flag
|= P_TRACED
;
1516 if (p
->p_lflag
& P_LPPWAIT
)
1517 exp
->p_flag
|= P_PPWAIT
;
1518 if (p
->p_lflag
& P_LEXIT
)
1519 exp
->p_flag
|= P_WEXIT
;
1520 exp
->p_stat
= p
->p_stat
;
1521 exp
->p_pid
= p
->p_pid
;
1522 exp
->p_oppid
= p
->p_oppid
;
1524 exp
->user_stack
= p
->user_stack
;
1525 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1526 exp
->p_debugger
= p
->p_debugger
;
1527 exp
->sigwait
= p
->sigwait
;
1529 #ifdef _PROC_HAS_SCHEDINFO_
1530 exp
->p_estcpu
= p
->p_estcpu
;
1531 exp
->p_pctcpu
= p
->p_pctcpu
;
1532 exp
->p_slptime
= p
->p_slptime
;
1536 exp
->p_slptime
= 0 ;
1538 exp
->p_cpticks
= 0 ;
1542 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1543 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1544 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1545 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1546 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1547 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1551 exp
->p_traceflag
= 0 ;
1553 exp
->p_siglist
= 0 ; /* No longer relevant */
1554 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1555 exp
->p_holdcnt
= 0 ;
1556 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1557 exp
->p_sigignore
= p
->p_sigignore
;
1558 exp
->p_sigcatch
= p
->p_sigcatch
;
1559 exp
->p_priority
= p
->p_priority
;
1561 exp
->p_nice
= p
->p_nice
;
1562 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1563 exp
->p_comm
[MAXCOMLEN
] = '\0';
1564 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1565 exp
->p_addr
= USER_ADDR_NULL
;
1566 exp
->p_xstat
= p
->p_xstat
;
1567 exp
->p_acflag
= p
->p_acflag
;
1568 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1572 fill_proc(proc_t p
, struct kinfo_proc
*kp
)
1574 fill_externproc(p
, &kp
->kp_proc
);
1575 fill_eproc(p
, &kp
->kp_eproc
);
1579 fill_user_proc(proc_t p
, struct user_kinfo_proc
*kp
)
1581 fill_user_externproc(p
, &kp
->kp_proc
);
1582 fill_user_eproc(p
, &kp
->kp_eproc
);
1586 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1587 size_t *sizep
, proc_t p
)
1594 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1611 case KERN_KDSETRTCDEC
:
1613 case KERN_KDGETENTROPY
:
1614 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1625 * Return the top *sizep bytes of the user stack, or the entire area of the
1626 * user stack down through the saved exec_path, whichever is smaller.
1629 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1630 size_t *sizep
, proc_t cur_proc
)
1632 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1636 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1637 size_t *sizep
, proc_t cur_proc
)
1639 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1643 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1644 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1647 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1649 struct _vm_map
*proc_map
;
1652 user_addr_t arg_addr
;
1657 vm_offset_t copy_start
, copy_end
;
1660 kauth_cred_t my_cred
;
1667 buflen
-= sizeof(int); /* reserve first word to return argc */
1669 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1670 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1671 /* is not NULL then the caller wants us to return the length needed to */
1672 /* hold the data we would return */
1673 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1679 * Lookup process by pid
1688 * Copy the top N bytes of the stack.
1689 * On all machines we have so far, the stack grows
1692 * If the user expects no more than N bytes of
1693 * argument list, use that as a guess for the
1697 if (!p
->user_stack
) {
1702 if (where
== USER_ADDR_NULL
) {
1703 /* caller only wants to know length of proc args data */
1704 if (sizep
== NULL
) {
1709 size
= p
->p_argslen
;
1712 size
+= sizeof(int);
1716 * old PROCARGS will return the executable's path and plus some
1717 * extra space for work alignment and data tags
1719 size
+= PATH_MAX
+ (6 * sizeof(int));
1721 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1726 my_cred
= kauth_cred_proc_ref(p
);
1727 uid
= kauth_cred_getuid(my_cred
);
1728 kauth_cred_unref(&my_cred
);
1730 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1731 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1736 if ((u_int
)arg_size
> p
->p_argslen
)
1737 arg_size
= round_page(p
->p_argslen
);
1739 arg_addr
= p
->user_stack
- arg_size
;
1743 * Before we can block (any VM code), make another
1744 * reference to the map to keep it alive. We do
1745 * that by getting a reference on the task itself.
1753 argslen
= p
->p_argslen
;
1755 * Once we have a task reference we can convert that into a
1756 * map reference, which we will use in the calls below. The
1757 * task/process may change its map after we take this reference
1758 * (see execve), but the worst that will happen then is a return
1759 * of stale info (which is always a possibility).
1761 task_reference(task
);
1763 proc_map
= get_task_map_reference(task
);
1764 task_deallocate(task
);
1766 if (proc_map
== NULL
)
1770 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1771 if (ret
!= KERN_SUCCESS
) {
1772 vm_map_deallocate(proc_map
);
1776 copy_end
= round_page(copy_start
+ arg_size
);
1778 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1779 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1780 vm_map_deallocate(proc_map
);
1781 kmem_free(kernel_map
, copy_start
,
1782 round_page(arg_size
));
1787 * Now that we've done the copyin from the process'
1788 * map, we can release the reference to it.
1790 vm_map_deallocate(proc_map
);
1792 if( vm_map_copy_overwrite(kernel_map
,
1793 (vm_map_address_t
)copy_start
,
1794 tmp
, FALSE
) != KERN_SUCCESS
) {
1795 kmem_free(kernel_map
, copy_start
,
1796 round_page(arg_size
));
1800 if (arg_size
> argslen
) {
1801 data
= (caddr_t
) (copy_end
- argslen
);
1804 data
= (caddr_t
) (copy_end
- arg_size
);
1809 /* Put processes argc as the first word in the copyout buffer */
1810 suword(where
, p
->p_argc
);
1811 error
= copyout(data
, (where
+ sizeof(int)), size
);
1812 size
+= sizeof(int);
1814 error
= copyout(data
, where
, size
);
1817 * Make the old PROCARGS work to return the executable's path
1818 * But, only if there is enough space in the provided buffer
1820 * on entry: data [possibily] points to the beginning of the path
1822 * Note: we keep all pointers&sizes aligned to word boundries
1824 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1826 int binPath_sz
, alignedBinPath_sz
= 0;
1827 int extraSpaceNeeded
, addThis
;
1828 user_addr_t placeHere
;
1829 char * str
= (char *) data
;
1832 /* Some apps are really bad about messing up their stacks
1833 So, we have to be extra careful about getting the length
1834 of the executing binary. If we encounter an error, we bail.
1837 /* Limit ourselves to PATH_MAX paths */
1838 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1842 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1845 /* If we have a NUL terminator, copy it, too */
1846 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1848 /* Pre-Flight the space requiremnts */
1850 /* Account for the padding that fills out binPath to the next word */
1851 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1853 placeHere
= where
+ size
;
1855 /* Account for the bytes needed to keep placeHere word aligned */
1856 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1858 /* Add up all the space that is needed */
1859 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1861 /* is there is room to tack on argv[0]? */
1862 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1864 placeHere
+= addThis
;
1865 suword(placeHere
, 0);
1866 placeHere
+= sizeof(int);
1867 suword(placeHere
, 0xBFFF0000);
1868 placeHere
+= sizeof(int);
1869 suword(placeHere
, 0);
1870 placeHere
+= sizeof(int);
1871 error
= copyout(data
, placeHere
, binPath_sz
);
1874 placeHere
+= binPath_sz
;
1875 suword(placeHere
, 0);
1876 size
+= extraSpaceNeeded
;
1882 if (copy_start
!= (vm_offset_t
) 0) {
1883 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1889 if (where
!= USER_ADDR_NULL
)
1896 * Max number of concurrent aio requests
1900 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1902 int new_value
, changed
;
1903 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1905 /* make sure the system-wide limit is greater than the per process limit */
1906 if (new_value
>= aio_max_requests_per_process
)
1907 aio_max_requests
= new_value
;
1916 * Max number of concurrent aio requests per process
1920 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1922 int new_value
, changed
;
1923 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1925 /* make sure per process limit is less than the system-wide limit */
1926 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1927 aio_max_requests_per_process
= new_value
;
1936 * Max number of async IO worker threads
1940 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1942 int new_value
, changed
;
1943 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1945 /* we only allow an increase in the number of worker threads */
1946 if (new_value
> aio_worker_threads
) {
1947 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1948 aio_worker_threads
= new_value
;
1958 * System-wide limit on the max number of processes
1962 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1964 int new_value
, changed
;
1965 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1967 AUDIT_ARG(value
, new_value
);
1968 /* make sure the system-wide limit is less than the configured hard
1969 limit set at kernel compilation */
1970 if (new_value
<= hard_maxproc
&& new_value
> 0)
1971 maxproc
= new_value
;
1978 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1979 CTLFLAG_RD
| CTLFLAG_KERN
,
1981 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1982 CTLFLAG_RD
| CTLFLAG_KERN
,
1984 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1985 CTLFLAG_RD
| CTLFLAG_KERN
,
1987 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1988 CTLFLAG_RD
| CTLFLAG_KERN
,
1991 /* PR-5293665: need to use a callback function for kern.osversion to set
1992 * osversion in IORegistry */
1995 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1999 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
2002 IORegistrySetOSBuildVersion((char *)arg1
);
2008 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
2009 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2010 osversion
, 256 /* OSVERSIZE*/,
2011 sysctl_osversion
, "A", "");
2014 sysctl_sysctl_bootargs
2015 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2020 strlcpy(buf
, PE_boot_args(), 256);
2021 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
2025 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
2026 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2028 sysctl_sysctl_bootargs
, "A", "bootargs");
2030 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
2031 CTLFLAG_RW
| CTLFLAG_KERN
,
2033 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
2034 CTLFLAG_RD
| CTLFLAG_KERN
,
2036 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
2037 CTLFLAG_RD
| CTLFLAG_KERN
,
2038 NULL
, _POSIX_VERSION
, "");
2039 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
2040 CTLFLAG_RD
| CTLFLAG_KERN
,
2041 NULL
, NGROUPS_MAX
, "");
2042 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
2043 CTLFLAG_RD
| CTLFLAG_KERN
,
2045 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2046 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2047 CTLFLAG_RD
| CTLFLAG_KERN
,
2050 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2051 CTLFLAG_RD
| CTLFLAG_KERN
,
2056 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2058 unsigned int oldval
= desiredvnodes
;
2059 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2060 reset_vmobjectcache(oldval
, desiredvnodes
);
2061 resize_namecache(desiredvnodes
);
2065 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2066 CTLTYPE_INT
| CTLFLAG_RW
,
2067 0, 0, sysctl_maxvnodes
, "I", "");
2069 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2070 CTLTYPE_INT
| CTLFLAG_RW
,
2071 0, 0, sysctl_maxproc
, "I", "");
2073 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2074 CTLTYPE_INT
| CTLFLAG_RW
,
2075 0, 0, sysctl_aiomax
, "I", "");
2077 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2078 CTLTYPE_INT
| CTLFLAG_RW
,
2079 0, 0, sysctl_aioprocmax
, "I", "");
2081 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2082 CTLTYPE_INT
| CTLFLAG_RW
,
2083 0, 0, sysctl_aiothreads
, "I", "");
2087 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2089 int new_value
, changed
;
2090 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2092 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2094 securelevel
= new_value
;
2103 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2104 CTLTYPE_INT
| CTLFLAG_RW
,
2105 0, 0, sysctl_securelvl
, "I", "");
2110 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2113 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2115 domainnamelen
= strlen(domainname
);
2120 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2121 CTLTYPE_STRING
| CTLFLAG_RW
,
2122 0, 0, sysctl_domainname
, "A", "");
2124 SYSCTL_INT(_kern
, KERN_HOSTID
, hostid
,
2125 CTLFLAG_RW
| CTLFLAG_KERN
,
2130 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2133 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2135 hostnamelen
= req
->newlen
;
2141 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2142 CTLTYPE_STRING
| CTLFLAG_RW
,
2143 0, 0, sysctl_hostname
, "A", "");
2147 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2149 /* Original code allowed writing, I'm copying this, although this all makes
2150 no sense to me. Besides, this sysctl is never used. */
2151 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2154 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2155 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2156 0, 0, sysctl_procname
, "A", "");
2158 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2159 CTLFLAG_RW
| CTLFLAG_KERN
,
2160 &speculative_reads_disabled
, 0, "");
2164 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2168 t
.tv_sec
= boottime_sec();
2171 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2174 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2175 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2176 0, 0, sysctl_boottime
, "S,timeval", "");
2180 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2183 int error
= get_kernel_symfile(req
->p
, &str
);
2186 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2190 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2191 CTLTYPE_STRING
| CTLFLAG_RD
,
2192 0, 0, sysctl_symfile
, "A", "");
2197 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2199 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2202 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2203 CTLTYPE_INT
| CTLFLAG_RD
,
2204 0, 0, sysctl_netboot
, "I", "");
2209 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2211 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2214 SYSCTL_PROC(_kern
, KERN_USRSTACK
, usrstack
,
2215 CTLTYPE_INT
| CTLFLAG_RD
,
2216 0, 0, sysctl_usrstack
, "I", "");
2220 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2222 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2225 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2226 CTLTYPE_QUAD
| CTLFLAG_RD
,
2227 0, 0, sysctl_usrstack64
, "Q", "");
2229 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2230 CTLFLAG_RW
| CTLFLAG_KERN
,
2231 corefilename
, sizeof(corefilename
), "");
2235 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2237 #ifdef SECURE_KERNEL
2240 int new_value
, changed
;
2241 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2243 if ((new_value
== 0) || (new_value
== 1))
2244 do_coredump
= new_value
;
2251 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2252 CTLTYPE_INT
| CTLFLAG_RW
,
2253 0, 0, sysctl_coredump
, "I", "");
2256 sysctl_suid_coredump
2257 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2259 #ifdef SECURE_KERNEL
2262 int new_value
, changed
;
2263 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2265 if ((new_value
== 0) || (new_value
== 1))
2266 sugid_coredump
= new_value
;
2273 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2274 CTLTYPE_INT
| CTLFLAG_RW
,
2275 0, 0, sysctl_suid_coredump
, "I", "");
2279 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2281 struct proc
*p
= req
->p
;
2282 int new_value
, changed
;
2283 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2287 req
->p
->p_lflag
|= P_LDELAYTERM
;
2289 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2295 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2296 CTLTYPE_INT
| CTLFLAG_RW
,
2297 0, 0, sysctl_delayterm
, "I", "");
2300 sysctl_proc_low_pri_io
2301 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2303 struct proc
*p
= req
->p
;
2304 int new_value
, old_value
, changed
;
2308 switch (req
->p
->p_iopol_disk
) {
2313 case IOPOL_THROTTLE
:
2320 /*\ 5 this should never happen, but to be robust, return the default value */
2326 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2329 if (new_value
& 0x01)
2330 req
->p
->p_iopol_disk
= IOPOL_THROTTLE
;
2331 else if (new_value
& 0x02)
2332 req
->p
->p_iopol_disk
= IOPOL_PASSIVE
;
2333 else if (new_value
== 0)
2334 req
->p
->p_iopol_disk
= IOPOL_NORMAL
;
2340 SYSCTL_PROC(_kern
, KERN_PROC_LOW_PRI_IO
, proc_low_pri_io
,
2341 CTLTYPE_INT
| CTLFLAG_RW
,
2342 0, 0, sysctl_proc_low_pri_io
, "I", "");
2346 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2348 struct proc
*p
= req
->p
;
2350 int new_value
, old_value
, changed
;
2353 ut
= get_bsdthread_info(current_thread());
2355 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2356 old_value
= KERN_RAGE_THREAD
;
2357 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2358 old_value
= KERN_RAGE_PROC
;
2362 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2365 switch (new_value
) {
2366 case KERN_RAGE_PROC
:
2368 p
->p_lflag
|= P_LRAGE_VNODES
;
2371 case KERN_UNRAGE_PROC
:
2373 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2377 case KERN_RAGE_THREAD
:
2378 ut
->uu_flag
|= UT_RAGE_VNODES
;
2380 case KERN_UNRAGE_THREAD
:
2381 ut
= get_bsdthread_info(current_thread());
2382 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2389 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2390 CTLTYPE_INT
| CTLFLAG_RW
,
2391 0, 0, sysctl_rage_vnode
, "I", "");
2395 sysctl_kern_check_openevt
2396 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2398 struct proc
*p
= req
->p
;
2399 int new_value
, old_value
, changed
;
2402 if (p
->p_flag
& P_CHECKOPENEVT
) {
2403 old_value
= KERN_OPENEVT_PROC
;
2408 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2411 switch (new_value
) {
2412 case KERN_OPENEVT_PROC
:
2413 OSBitOrAtomic(P_CHECKOPENEVT
, (UInt32
*)&p
->p_flag
);
2416 case KERN_UNOPENEVT_PROC
:
2417 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), (UInt32
*)&p
->p_flag
);
2427 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2428 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2434 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2436 #ifdef SECURE_KERNEL
2439 int new_value
, changed
;
2442 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2449 * Only allow setting if NX is supported on the chip
2451 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2454 nx_enabled
= new_value
;
2461 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2462 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
,
2463 0, 0, sysctl_nx
, "I", "");
2467 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2469 if (proc_is64bit(req
->p
)) {
2470 struct user_loadavg loadinfo64
;
2471 loadavg32to64(&averunnable
, &loadinfo64
);
2472 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2474 return sysctl_io_opaque(req
, &averunnable
, sizeof(averunnable
), NULL
);
2478 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2479 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2480 0, 0, sysctl_loadavg
, "S,loadavg", "");
2484 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2487 uint64_t swap_total
;
2488 uint64_t swap_avail
;
2489 uint32_t swap_pagesize
;
2490 boolean_t swap_encrypted
;
2491 struct xsw_usage xsu
;
2493 error
= macx_swapinfo(&swap_total
,
2500 xsu
.xsu_total
= swap_total
;
2501 xsu
.xsu_avail
= swap_avail
;
2502 xsu
.xsu_used
= swap_total
- swap_avail
;
2503 xsu
.xsu_pagesize
= swap_pagesize
;
2504 xsu
.xsu_encrypted
= swap_encrypted
;
2505 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2510 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2511 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2512 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2515 /* this kernel does NOT implement shared_region_make_private_np() */
2516 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2522 sysctl_sysctl_exec_affinity(__unused
struct sysctl_oid
*oidp
,
2523 __unused
void *arg1
, __unused
int arg2
,
2524 struct sysctl_req
*req
)
2526 proc_t cur_proc
= req
->p
;
2529 if (req
->oldptr
!= USER_ADDR_NULL
) {
2530 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
2531 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
2535 if (req
->newptr
!= USER_ADDR_NULL
) {
2536 cpu_type_t newcputype
;
2537 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
2539 if (newcputype
== CPU_TYPE_I386
)
2540 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), (UInt32
*)&cur_proc
->p_flag
);
2541 else if (newcputype
== CPU_TYPE_POWERPC
)
2542 OSBitOrAtomic(P_AFFINITY
, (UInt32
*)&cur_proc
->p_flag
);
2549 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
2553 fetch_process_cputype(
2557 cpu_type_t
*cputype
)
2559 proc_t p
= PROC_NULL
;
2566 else if (namelen
== 1) {
2567 p
= proc_find(name
[0]);
2577 if (p
->p_flag
& P_TRANSLATED
) {
2578 ret
= CPU_TYPE_POWERPC
;
2584 if (IS_64BIT_PROCESS(p
))
2585 ret
|= CPU_ARCH_ABI64
;
2596 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2597 struct sysctl_req
*req
)
2600 cpu_type_t proc_cputype
= 0;
2601 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2604 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2606 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2608 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2611 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2612 struct sysctl_req
*req
)
2615 cpu_type_t proc_cputype
= 0;
2616 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2618 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2620 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2624 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2626 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2629 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2630 CTLTYPE_INT
| CTLFLAG_RD
,
2631 0, 0, sysctl_safeboot
, "I", "");
2635 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2637 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2640 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2641 CTLTYPE_INT
| CTLFLAG_RD
,
2642 0, 0, sysctl_singleuser
, "I", "");
2645 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2647 extern boolean_t affinity_sets_enabled
;
2648 extern int affinity_sets_mapping
;
2650 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2651 CTLFLAG_RW
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2652 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2653 CTLFLAG_RW
, &affinity_sets_mapping
, 0, "mapping policy");
2656 * Limit on total memory users can wire.
2658 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2660 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2662 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2665 * All values are in bytes.
2668 vm_map_size_t vm_global_user_wire_limit
;
2669 vm_map_size_t vm_user_wire_limit
;
2671 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
, &vm_global_user_wire_limit
, "");
2672 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
, &vm_user_wire_limit
, "");