2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <bsm/audit_kernel.h>
108 #include <mach/machine.h>
109 #include <mach/mach_types.h>
110 #include <mach/vm_param.h>
111 #include <kern/task.h>
112 #include <kern/lock.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <mach/host_info.h>
117 #include <sys/mount_internal.h>
118 #include <sys/kdebug.h>
119 #include <sys/sysproto.h>
121 #include <IOKit/IOPlatformExpert.h>
122 #include <pexpert/pexpert.h>
124 #include <machine/machine_routines.h>
125 #include <machine/exec.h>
127 #include <vm/vm_protos.h>
130 #include <i386/cpuid.h>
133 sysctlfn kern_sysctl
;
135 sysctlfn debug_sysctl
;
137 extern sysctlfn net_sysctl
;
138 extern sysctlfn cpu_sysctl
;
139 extern int aio_max_requests
;
140 extern int aio_max_requests_per_process
;
141 extern int aio_worker_threads
;
142 extern int lowpri_IO_window_msecs
;
143 extern int lowpri_IO_delay_msecs
;
144 extern int nx_enabled
;
145 extern int speculative_reads_disabled
;
148 fill_eproc(proc_t p
, struct eproc
*ep
);
150 fill_externproc(proc_t p
, struct extern_proc
*exp
);
152 fill_user_eproc(proc_t p
, struct user_eproc
*ep
);
154 fill_user_proc(proc_t p
, struct user_kinfo_proc
*kp
);
156 fill_user_externproc(proc_t p
, struct user_extern_proc
*exp
);
158 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
160 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, proc_t p
);
166 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
168 __private_extern__ kern_return_t
169 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
171 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
173 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
174 user_addr_t newp
, size_t newlen
);
176 fill_proc(proc_t p
, struct kinfo_proc
*kp
);
178 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
179 size_t *sizep
, proc_t cur_proc
);
181 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
184 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
185 proc_t cur_proc
, int argc_yes
);
187 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
188 size_t newlen
, void *sp
, int len
);
190 static int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
191 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
192 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
193 static int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
194 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
195 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
196 int sysdoproc_callback(proc_t p
, void *arg
);
198 static int __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, register_t
*retval
);
200 extern void IORegistrySetOSBuildVersion(char * build_version
);
203 loadavg32to64(struct loadavg
*la32
, struct user_loadavg
*la64
)
205 la64
->ldavg
[0] = la32
->ldavg
[0];
206 la64
->ldavg
[1] = la32
->ldavg
[1];
207 la64
->ldavg
[2] = la32
->ldavg
[2];
208 la64
->fscale
= (user_long_t
)la32
->fscale
;
214 static struct sysctl_lock memlock
;
216 /* sysctl() syscall */
218 __sysctl(proc_t p
, struct __sysctl_args
*uap
, register_t
*retval
)
220 boolean_t funnel_state
;
223 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
224 error
= __sysctl_funneled(p
, uap
, retval
);
225 thread_funnel_set(kernel_flock
, funnel_state
);
230 __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, __unused register_t
*retval
)
232 int error
, dolock
= 1;
233 size_t savelen
= 0, oldlen
= 0, newlen
;
234 sysctlfn
*fnp
= NULL
;
235 int name
[CTL_MAXNAME
];
237 boolean_t memlock_taken
= FALSE
;
238 boolean_t vslock_taken
= FALSE
;
240 kauth_cred_t my_cred
;
244 * all top-level sysctl names are non-terminal
246 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
248 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
252 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
254 if (proc_is64bit(p
)) {
255 /* uap->newlen is a size_t value which grows to 64 bits
256 * when coming from a 64-bit process. since it's doubtful we'll
257 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
259 newlen
= CAST_DOWN(size_t, uap
->newlen
);
262 newlen
= uap
->newlen
;
265 /* CTL_UNSPEC is used to get oid to AUTO_OID */
266 if (uap
->new != USER_ADDR_NULL
267 && ((name
[0] == CTL_KERN
268 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
269 name
[1] == KERN_PROC_LOW_PRI_IO
|| name
[1] == KERN_PROCNAME
|| name
[1] == KERN_RAGEVNODE
|| name
[1] == KERN_CHECKOPENEVT
))
270 || (name
[0] == CTL_HW
)
271 || (name
[0] == CTL_VM
))
272 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
275 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
276 * but there is a fallback for all sysctls other than VFS to
277 * userland_sysctl() - KILL THIS! */
281 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
282 && (name
[1] != KERN_PROC
))
297 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
298 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
300 oldlen
= CAST_DOWN(size_t, oldlen64
);
302 * If more than 4G, clamp to 4G - useracc() below will catch
303 * with an EFAULT, if it's actually necessary.
305 if (oldlen64
> 0x00000000ffffffffULL
)
306 oldlen
= 0xffffffffUL
;
309 if (uap
->old
!= USER_ADDR_NULL
) {
310 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
313 * The kernel debug mechanism does not need to take this lock, and
314 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
315 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
317 if (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)) &&
318 !(name
[1] == KERN_PROC
)) {
320 memlock_taken
= TRUE
;
323 if (dolock
&& oldlen
) {
324 if ((error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
325 if (memlock_taken
== TRUE
)
335 my_cred
= kauth_cred_proc_ref(p
);
336 error
= mac_system_check_sysctl(
342 fnp
== kern_sysctl
? 1 : 0,
346 kauth_cred_unref(&my_cred
);
350 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
351 &oldlen
, uap
->new, newlen
, p
);
359 if (vslock_taken
== TRUE
) {
360 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
364 if (memlock_taken
== TRUE
)
367 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
369 boolean_t funnel_state
;
372 * Drop the funnel when calling new sysctl code, which will conditionally
373 * grab the funnel if it really needs to.
375 funnel_state
= thread_funnel_set(kernel_flock
, FALSE
);
377 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
378 1, uap
->new, newlen
, &oldlen
);
380 thread_funnel_set(kernel_flock
, funnel_state
);
383 if ((error
) && (error
!= ENOMEM
))
386 if (uap
->oldlenp
!= USER_ADDR_NULL
)
387 error
= suulong(uap
->oldlenp
, oldlen
);
393 * Attributes stored in the kernel.
395 __private_extern__
char corefilename
[MAXPATHLEN
+1];
396 __private_extern__
int do_coredump
;
397 __private_extern__
int sugid_coredump
;
400 __private_extern__
int do_count_syscalls
;
404 int securelevel
= -1;
416 __unused
size_t newSize
,
422 if (name
[0] == 0 && 1 == namelen
) {
423 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
424 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
425 } else if (name
[0] == 1 && 2 == namelen
) {
427 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), (UInt32
*)&cur_proc
->p_flag
);
429 OSBitOrAtomic(P_AFFINITY
, (UInt32
*)&cur_proc
->p_flag
);
443 __unused
size_t newSize
,
447 int istranslated
= 0;
448 kauth_cred_t my_cred
;
454 p
= proc_find(name
[0]);
458 my_cred
= kauth_cred_proc_ref(p
);
459 uid
= kauth_cred_getuid(my_cred
);
460 kauth_cred_unref(&my_cred
);
461 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
462 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
467 istranslated
= (p
->p_flag
& P_TRANSLATED
);
469 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
470 (istranslated
!= 0) ? 1 : 0);
474 set_archhandler(__unused proc_t p
, int arch
)
478 struct vnode_attr va
;
479 vfs_context_t ctx
= vfs_context_current();
480 struct exec_archhandler
*archhandler
;
483 case CPU_TYPE_POWERPC
:
484 archhandler
= &exec_archhandler_ppc
;
490 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
,
491 CAST_USER_ADDR_T(archhandler
->path
), ctx
);
497 /* Check mount point */
498 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
499 (nd
.ni_vp
->v_type
!= VREG
)) {
505 VATTR_WANTED(&va
, va_fsid
);
506 VATTR_WANTED(&va
, va_fileid
);
507 error
= vnode_getattr(nd
.ni_vp
, &va
, ctx
);
514 archhandler
->fsid
= va
.va_fsid
;
515 archhandler
->fileid
= (u_long
)va
.va_fileid
;
519 /* XXX remove once Rosetta is rev'ed */
520 /*****************************************************************************/
522 sysctl_exec_archhandler_ppc(
524 __unused u_int namelen
,
533 char handler
[sizeof(exec_archhandler_ppc
.path
)];
534 vfs_context_t ctx
= vfs_context_current();
537 len
= strlen(exec_archhandler_ppc
.path
) + 1;
541 error
= copyout(exec_archhandler_ppc
.path
, oldBuf
, len
);
548 error
= suser(vfs_context_ucred(ctx
), &p
->p_acflag
);
551 if (newSize
>= sizeof(exec_archhandler_ppc
.path
))
552 return (ENAMETOOLONG
);
553 error
= copyin(newBuf
, handler
, newSize
);
556 handler
[newSize
] = 0;
557 strlcpy(exec_archhandler_ppc
.path
, handler
, MAXPATHLEN
);
558 error
= set_archhandler(p
, CPU_TYPE_POWERPC
);
564 /*****************************************************************************/
567 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid
*oidp
, void *arg1
,
568 int arg2
, struct sysctl_req
*req
)
572 error
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
578 error
= set_archhandler(req
->p
, CPU_TYPE_POWERPC
);
585 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
587 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
589 SYSCTL_PROC(_kern_exec_archhandler
, OID_AUTO
, powerpc
,
590 CTLTYPE_STRING
| CTLFLAG_RW
, exec_archhandler_ppc
.path
, 0,
591 sysctl_handle_exec_archhandler_ppc
, "A", "");
593 extern int get_kernel_symfile(proc_t
, char **);
594 __private_extern__
int
595 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
599 * kernel related system variables.
602 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
603 user_addr_t newp
, size_t newlen
, proc_t p
)
605 /* all sysctl names not listed below are terminal at this level */
607 && !(name
[0] == KERN_PROC
608 || name
[0] == KERN_PROF
609 || name
[0] == KERN_KDEBUG
611 || name
[0] == KERN_PROCARGS
613 || name
[0] == KERN_PROCARGS2
614 || name
[0] == KERN_IPC
615 || name
[0] == KERN_SYSV
616 || name
[0] == KERN_AFFINITY
617 || name
[0] == KERN_TRANSLATE
618 || name
[0] == KERN_EXEC
619 || name
[0] == KERN_PANICINFO
620 || name
[0] == KERN_POSIX
621 || name
[0] == KERN_TFP
622 || name
[0] == KERN_TTY
624 || name
[0] == KERN_LCTX
628 return (ENOTDIR
); /* overloaded */
632 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
635 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
639 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
642 /* new one as it does not use kinfo_proc */
643 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
646 /* new one as it does not use kinfo_proc */
647 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
650 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
654 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
657 return sysctl_translate(name
+1, namelen
-1, oldp
, oldlenp
, newp
,
660 /* XXX remove once Rosetta has rev'ed */
662 return sysctl_exec_archhandler_ppc(name
+1, namelen
-1, oldp
,
663 oldlenp
, newp
, newlen
, p
);
665 case KERN_COUNT_SYSCALLS
:
667 /* valid values passed in:
668 * = 0 means don't keep called counts for each bsd syscall
669 * > 0 means keep called counts for each bsd syscall
670 * = 2 means dump current counts to the system log
671 * = 3 means reset all counts
672 * for example, to dump current counts:
673 * sysctl -w kern.count_calls=2
675 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
681 do_count_syscalls
= 1;
683 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
685 extern int syscalls_log
[];
686 extern const char * syscallnames
[];
688 for ( i
= 0; i
< nsysent
; i
++ ) {
689 if ( syscalls_log
[i
] != 0 ) {
691 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
699 do_count_syscalls
= 1;
713 * Debugging related system variables.
717 #endif /* DIAGNOSTIC */
718 struct ctldebug debug0
, debug1
;
719 struct ctldebug debug2
, debug3
, debug4
;
720 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
721 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
722 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
723 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
724 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
725 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
726 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
727 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
730 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
731 user_addr_t newp
, size_t newlen
, __unused proc_t p
)
733 struct ctldebug
*cdp
;
735 /* all sysctl names at this level are name and field */
737 return (ENOTDIR
); /* overloaded */
738 if (name
[0] < 0 || name
[0] >= CTL_DEBUG_MAXID
)
740 cdp
= debugvars
[name
[0]];
741 if (cdp
->debugname
== 0)
745 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
746 case CTL_DEBUG_VALUE
:
747 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
756 * The following sysctl_* functions should not be used
757 * any more, as they can only cope with callers in
758 * user mode: Use new-style
766 * Validate parameters and get old / set new parameters
767 * for an integer-valued sysctl function.
770 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
771 user_addr_t newp
, size_t newlen
, int *valp
)
775 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
777 if (oldp
&& *oldlenp
< sizeof(int))
779 if (newp
&& newlen
!= sizeof(int))
781 *oldlenp
= sizeof(int);
783 error
= copyout(valp
, oldp
, sizeof(int));
784 if (error
== 0 && newp
) {
785 error
= copyin(newp
, valp
, sizeof(int));
786 AUDIT_ARG(value
, *valp
);
792 * As above, but read-only.
795 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
799 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
801 if (oldp
&& *oldlenp
< sizeof(int))
805 *oldlenp
= sizeof(int);
807 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
812 * Validate parameters and get old / set new parameters
813 * for an quad(64bit)-valued sysctl function.
816 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
817 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
821 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
823 if (oldp
&& *oldlenp
< sizeof(quad_t
))
825 if (newp
&& newlen
!= sizeof(quad_t
))
827 *oldlenp
= sizeof(quad_t
);
829 error
= copyout(valp
, oldp
, sizeof(quad_t
));
830 if (error
== 0 && newp
)
831 error
= copyin(newp
, valp
, sizeof(quad_t
));
836 * As above, but read-only.
839 sysctl_rdquad(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, quad_t val
)
843 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
845 if (oldp
&& *oldlenp
< sizeof(quad_t
))
849 *oldlenp
= sizeof(quad_t
);
851 error
= copyout((caddr_t
)&val
, oldp
, sizeof(quad_t
));
856 * Validate parameters and get old / set new parameters
857 * for a string-valued sysctl function. Unlike sysctl_string, if you
858 * give it a too small (but larger than 0 bytes) buffer, instead of
859 * returning ENOMEM, it truncates the returned string to the buffer
860 * size. This preserves the semantics of some library routines
861 * implemented via sysctl, which truncate their returned data, rather
862 * than simply returning an error. The returned string is always NUL
866 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
867 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
869 int len
, copylen
, error
= 0;
871 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
873 copylen
= len
= strlen(str
) + 1;
874 if (oldp
&& (len
< 0 || *oldlenp
< 1))
876 if (oldp
&& (*oldlenp
< (size_t)len
))
877 copylen
= *oldlenp
+ 1;
878 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
880 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
882 error
= copyout(str
, oldp
, copylen
);
887 error
= copyout((void *)&c
, oldp
, sizeof(char));
890 if (error
== 0 && newp
) {
891 error
= copyin(newp
, str
, newlen
);
893 AUDIT_ARG(text
, (char *)str
);
899 * Validate parameters and get old / set new parameters
900 * for a string-valued sysctl function.
903 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
904 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
908 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
910 len
= strlen(str
) + 1;
911 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
913 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
915 *oldlenp
= len
-1; /* deal with NULL strings correctly */
917 error
= copyout(str
, oldp
, len
);
919 if (error
== 0 && newp
) {
920 error
= copyin(newp
, str
, newlen
);
922 AUDIT_ARG(text
, (char *)str
);
928 * As above, but read-only.
931 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
932 user_addr_t newp
, char *str
)
936 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
938 len
= strlen(str
) + 1;
939 if (oldp
&& *oldlenp
< (size_t)len
)
945 error
= copyout(str
, oldp
, len
);
950 * Validate parameters and get old / set new parameters
951 * for a structure oriented sysctl function.
954 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
955 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
959 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
961 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
963 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
967 error
= copyout(sp
, oldp
, len
);
969 if (error
== 0 && newp
)
970 error
= copyin(newp
, sp
, len
);
975 * Validate parameters and get old parameters
976 * for a structure oriented sysctl function.
979 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
980 user_addr_t newp
, void *sp
, int len
)
984 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
986 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
992 error
= copyout(sp
, oldp
, len
);
997 * Get file structures.
1001 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1004 struct fileglob
*fg
;
1005 struct extern_file nef
;
1007 if (req
->oldptr
== USER_ADDR_NULL
) {
1009 * overestimate by 10 files
1011 req
->oldidx
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1016 * first copyout filehead
1018 error
= SYSCTL_OUT(req
, &filehead
, sizeof(filehead
));
1023 * followed by an array of file structures
1025 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1026 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1027 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1028 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1029 nef
.f_type
= fg
->fg_type
;
1030 nef
.f_count
= fg
->fg_count
;
1031 nef
.f_msgcount
= fg
->fg_msgcount
;
1032 nef
.f_cred
= fg
->fg_cred
;
1033 nef
.f_ops
= fg
->fg_ops
;
1034 nef
.f_offset
= fg
->fg_offset
;
1035 nef
.f_data
= fg
->fg_data
;
1036 error
= SYSCTL_OUT(req
, &nef
, sizeof(nef
));
1043 SYSCTL_PROC(_kern
, KERN_FILE
, file
,
1044 CTLTYPE_STRUCT
| CTLFLAG_RW
,
1045 0, 0, sysctl_file
, "S,filehead", "");
1048 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
1050 if (p
->p_pid
!= (pid_t
)arg
)
1057 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
1059 if (p
->p_pgrpid
!= (pid_t
)arg
)
1066 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
1068 boolean_t funnel_state
;
1072 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1073 /* This is very racy but list lock is held.. Hmmm. */
1074 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1075 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
1076 p
->p_pgrp
->pg_session
->s_ttyp
== NULL
||
1077 p
->p_pgrp
->pg_session
->s_ttyp
->t_dev
!= (dev_t
)arg
)
1082 thread_funnel_set(kernel_flock
, funnel_state
);
1088 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
1090 kauth_cred_t my_cred
;
1093 if (p
->p_ucred
== NULL
)
1095 my_cred
= kauth_cred_proc_ref(p
);
1096 uid
= kauth_cred_getuid(my_cred
);
1097 kauth_cred_unref(&my_cred
);
1099 if (uid
!= (uid_t
)arg
)
1107 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
1109 kauth_cred_t my_cred
;
1112 if (p
->p_ucred
== NULL
)
1114 my_cred
= kauth_cred_proc_ref(p
);
1115 ruid
= my_cred
->cr_ruid
;
1116 kauth_cred_unref(&my_cred
);
1118 if (ruid
!= (uid_t
)arg
)
1125 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
1127 if ((p
->p_lctx
== NULL
) ||
1128 (p
->p_lctx
->lc_id
!= (pid_t
)arg
))
1135 * try over estimating by 5 procs
1137 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1138 struct sysdoproc_args
{
1141 boolean_t is_64_bit
;
1153 sysdoproc_callback(proc_t p
, void * arg
)
1155 struct sysdoproc_args
* args
= (struct sysdoproc_args
*)arg
;
1158 if (args
->buflen
>= args
->sizeof_kproc
) {
1159 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, (void *)args
->uidval
) == 0))
1160 return(PROC_RETURNED
);
1161 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, (void *)args
->uidval
) == 0))
1162 return(PROC_RETURNED
);
1163 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, (void *)args
->uidval
) == 0))
1164 return(PROC_RETURNED
);
1166 bzero(args
->kprocp
, args
->sizeof_kproc
);
1167 if (args
->is_64_bit
) {
1168 fill_user_proc(p
, (struct user_kinfo_proc
*) args
->kprocp
);
1171 fill_proc(p
, (struct kinfo_proc
*) args
->kprocp
);
1173 error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
1175 *args
->errorp
= error
;
1176 return(PROC_RETURNED_DONE
);
1179 args
->dp
+= args
->sizeof_kproc
;
1180 args
->buflen
-= args
->sizeof_kproc
;
1182 args
->needed
+= args
->sizeof_kproc
;
1183 return(PROC_RETURNED
);
1187 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1189 user_addr_t dp
= where
;
1191 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1193 boolean_t is_64_bit
= FALSE
;
1194 struct kinfo_proc kproc
;
1195 struct user_kinfo_proc user_kproc
;
1198 int (*filterfn
)(proc_t
, void *) = 0;
1199 struct sysdoproc_args args
;
1204 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1206 is_64_bit
= proc_is64bit(current_proc());
1208 sizeof_kproc
= sizeof(user_kproc
);
1209 kprocp
= (caddr_t
) &user_kproc
;
1212 sizeof_kproc
= sizeof(kproc
);
1213 kprocp
= (caddr_t
) &kproc
;
1220 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
1223 case KERN_PROC_PGRP
:
1224 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
1235 case KERN_PROC_RUID
:
1240 case KERN_PROC_LCID
:
1241 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
1247 args
.buflen
= buflen
;
1248 args
.kprocp
= kprocp
;
1249 args
.is_64_bit
= is_64_bit
;
1251 args
.needed
= needed
;
1252 args
.errorp
= &error
;
1253 args
.uidcheck
= uidcheck
;
1254 args
.ruidcheck
= ruidcheck
;
1255 args
.ttycheck
= ttycheck
;
1256 args
.sizeof_kproc
= sizeof_kproc
;
1257 args
.uidval
= name
[1];
1259 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
), sysdoproc_callback
, &args
, filterfn
, (void *)name
[1]);
1265 needed
= args
.needed
;
1267 if (where
!= USER_ADDR_NULL
) {
1268 *sizep
= dp
- where
;
1269 if (needed
> *sizep
)
1272 needed
+= KERN_PROCSLOP
;
1279 * Fill in an eproc structure for the specified process.
1282 fill_eproc(proc_t p
, struct eproc
*ep
)
1285 kauth_cred_t my_cred
;
1287 struct session
* sessp
;
1290 sessp
= proc_session(p
);
1294 if (pg
!= PGRP_NULL
) {
1296 ep
->e_pgid
= p
->p_pgrpid
;
1297 ep
->e_jobc
= pg
->pg_jobc
;
1298 if ((sessp
!= SESSION_NULL
) && sessp
->s_ttyvp
)
1299 ep
->e_flag
= EPROC_CTTY
;
1301 ep
->e_sess
= (struct session
*)0;
1307 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1312 ep
->e_ppid
= p
->p_ppid
;
1313 /* Pre-zero the fake historical pcred */
1314 bzero(&ep
->e_pcred
, sizeof(struct _pcred
));
1316 my_cred
= kauth_cred_proc_ref(p
);
1318 /* A fake historical pcred */
1319 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1320 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1321 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1322 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1323 /* A fake historical *kauth_cred_t */
1324 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1325 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1326 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1327 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1329 kauth_cred_unref(&my_cred
);
1331 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1332 ep
->e_vm
.vm_tsize
= 0;
1333 ep
->e_vm
.vm_dsize
= 0;
1334 ep
->e_vm
.vm_ssize
= 0;
1336 ep
->e_vm
.vm_rssize
= 0;
1338 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1339 (tp
= sessp
->s_ttyp
)) {
1340 ep
->e_tdev
= tp
->t_dev
;
1341 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1342 ep
->e_tsess
= tp
->t_session
;
1346 if (SESS_LEADER(p
, sessp
))
1347 ep
->e_flag
|= EPROC_SLEADER
;
1348 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1349 ep
->e_xsize
= ep
->e_xrssize
= 0;
1350 ep
->e_xccount
= ep
->e_xswrss
= 0;
1351 if (sessp
!= SESSION_NULL
)
1352 session_rele(sessp
);
1358 * Fill in an LP64 version of eproc structure for the specified process.
1361 fill_user_eproc(proc_t p
, struct user_eproc
*ep
)
1364 struct session
*sessp
= NULL
;
1366 kauth_cred_t my_cred
;
1369 sessp
= proc_session(p
);
1371 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1372 if (pg
!= PGRP_NULL
) {
1373 ep
->e_sess
= CAST_USER_ADDR_T(sessp
);
1374 ep
->e_pgid
= p
->p_pgrpid
;
1375 ep
->e_jobc
= pg
->pg_jobc
;
1376 if (sessp
!= SESSION_NULL
) {
1378 ep
->e_flag
= EPROC_CTTY
;
1381 ep
->e_sess
= USER_ADDR_NULL
;
1387 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1392 ep
->e_ppid
= p
->p_ppid
;
1393 /* Pre-zero the fake historical pcred */
1394 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1396 my_cred
= kauth_cred_proc_ref(p
);
1398 /* A fake historical pcred */
1399 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1400 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1401 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1402 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1404 /* A fake historical *kauth_cred_t */
1405 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1406 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1407 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1408 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1410 kauth_cred_unref(&my_cred
);
1412 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1413 ep
->e_vm
.vm_tsize
= 0;
1414 ep
->e_vm
.vm_dsize
= 0;
1415 ep
->e_vm
.vm_ssize
= 0;
1417 ep
->e_vm
.vm_rssize
= 0;
1419 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1420 (tp
= sessp
->s_ttyp
)) {
1421 ep
->e_tdev
= tp
->t_dev
;
1422 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1423 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1427 if (SESS_LEADER(p
, sessp
))
1428 ep
->e_flag
|= EPROC_SLEADER
;
1429 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1430 ep
->e_xsize
= ep
->e_xrssize
= 0;
1431 ep
->e_xccount
= ep
->e_xswrss
= 0;
1432 if (sessp
!= SESSION_NULL
)
1433 session_rele(sessp
);
1434 if (pg
!= PGRP_NULL
)
1439 * Fill in an eproc structure for the specified process.
1442 fill_externproc(proc_t p
, struct extern_proc
*exp
)
1444 exp
->p_forw
= exp
->p_back
= NULL
;
1445 exp
->p_starttime
= p
->p_start
;
1446 exp
->p_vmspace
= NULL
;
1447 exp
->p_sigacts
= p
->p_sigacts
;
1448 exp
->p_flag
= p
->p_flag
;
1449 if (p
->p_lflag
& P_LTRACED
)
1450 exp
->p_flag
|= P_TRACED
;
1451 if (p
->p_lflag
& P_LPPWAIT
)
1452 exp
->p_flag
|= P_PPWAIT
;
1453 if (p
->p_lflag
& P_LEXIT
)
1454 exp
->p_flag
|= P_WEXIT
;
1455 exp
->p_stat
= p
->p_stat
;
1456 exp
->p_pid
= p
->p_pid
;
1457 exp
->p_oppid
= p
->p_oppid
;
1459 exp
->user_stack
= CAST_DOWN(caddr_t
, p
->user_stack
);
1460 exp
->exit_thread
= p
->exit_thread
;
1461 exp
->p_debugger
= p
->p_debugger
;
1462 exp
->sigwait
= p
->sigwait
;
1464 #ifdef _PROC_HAS_SCHEDINFO_
1465 exp
->p_estcpu
= p
->p_estcpu
;
1466 exp
->p_pctcpu
= p
->p_pctcpu
;
1467 exp
->p_slptime
= p
->p_slptime
;
1471 exp
->p_slptime
= 0 ;
1473 exp
->p_cpticks
= 0 ;
1477 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1478 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1482 exp
->p_traceflag
= 0;
1484 exp
->p_siglist
= 0 ; /* No longer relevant */
1485 exp
->p_textvp
= p
->p_textvp
;
1486 exp
->p_holdcnt
= 0 ;
1487 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1488 exp
->p_sigignore
= p
->p_sigignore
;
1489 exp
->p_sigcatch
= p
->p_sigcatch
;
1490 exp
->p_priority
= p
->p_priority
;
1492 exp
->p_nice
= p
->p_nice
;
1493 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1494 exp
->p_comm
[MAXCOMLEN
] = '\0';
1495 exp
->p_pgrp
= p
->p_pgrp
;
1497 exp
->p_xstat
= p
->p_xstat
;
1498 exp
->p_acflag
= p
->p_acflag
;
1499 exp
->p_ru
= p
->p_ru
; /* XXX may be NULL */
1503 * Fill in an LP64 version of extern_proc structure for the specified process.
1506 fill_user_externproc(proc_t p
, struct user_extern_proc
*exp
)
1508 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1509 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1510 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1511 exp
->p_vmspace
= USER_ADDR_NULL
;
1512 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1513 exp
->p_flag
= p
->p_flag
;
1514 if (p
->p_lflag
& P_LTRACED
)
1515 exp
->p_flag
|= P_TRACED
;
1516 if (p
->p_lflag
& P_LPPWAIT
)
1517 exp
->p_flag
|= P_PPWAIT
;
1518 if (p
->p_lflag
& P_LEXIT
)
1519 exp
->p_flag
|= P_WEXIT
;
1520 exp
->p_stat
= p
->p_stat
;
1521 exp
->p_pid
= p
->p_pid
;
1522 exp
->p_oppid
= p
->p_oppid
;
1524 exp
->user_stack
= p
->user_stack
;
1525 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1526 exp
->p_debugger
= p
->p_debugger
;
1527 exp
->sigwait
= p
->sigwait
;
1529 #ifdef _PROC_HAS_SCHEDINFO_
1530 exp
->p_estcpu
= p
->p_estcpu
;
1531 exp
->p_pctcpu
= p
->p_pctcpu
;
1532 exp
->p_slptime
= p
->p_slptime
;
1536 exp
->p_slptime
= 0 ;
1538 exp
->p_cpticks
= 0 ;
1542 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1543 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1544 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1545 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1546 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1547 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1551 exp
->p_traceflag
= 0 ;
1553 exp
->p_siglist
= 0 ; /* No longer relevant */
1554 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1555 exp
->p_holdcnt
= 0 ;
1556 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1557 exp
->p_sigignore
= p
->p_sigignore
;
1558 exp
->p_sigcatch
= p
->p_sigcatch
;
1559 exp
->p_priority
= p
->p_priority
;
1561 exp
->p_nice
= p
->p_nice
;
1562 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1563 exp
->p_comm
[MAXCOMLEN
] = '\0';
1564 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1565 exp
->p_addr
= USER_ADDR_NULL
;
1566 exp
->p_xstat
= p
->p_xstat
;
1567 exp
->p_acflag
= p
->p_acflag
;
1568 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1572 fill_proc(proc_t p
, struct kinfo_proc
*kp
)
1574 fill_externproc(p
, &kp
->kp_proc
);
1575 fill_eproc(p
, &kp
->kp_eproc
);
1579 fill_user_proc(proc_t p
, struct user_kinfo_proc
*kp
)
1581 fill_user_externproc(p
, &kp
->kp_proc
);
1582 fill_user_eproc(p
, &kp
->kp_eproc
);
1586 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1587 size_t *sizep
, proc_t p
)
1591 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1608 case KERN_KDSETRTCDEC
:
1610 case KERN_KDGETENTROPY
:
1611 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1622 * Return the top *sizep bytes of the user stack, or the entire area of the
1623 * user stack down through the saved exec_path, whichever is smaller.
1626 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1627 size_t *sizep
, proc_t cur_proc
)
1629 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1633 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1634 size_t *sizep
, proc_t cur_proc
)
1636 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1640 sysctl_procargsx(int *name
, __unused u_int namelen
, user_addr_t where
,
1641 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1644 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1646 struct _vm_map
*proc_map
;
1649 user_addr_t arg_addr
;
1654 vm_offset_t copy_start
, copy_end
;
1657 kauth_cred_t my_cred
;
1661 buflen
-= sizeof(int); /* reserve first word to return argc */
1663 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1664 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1665 /* is not NULL then the caller wants us to return the length needed to */
1666 /* hold the data we would return */
1667 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1673 * Lookup process by pid
1682 * Copy the top N bytes of the stack.
1683 * On all machines we have so far, the stack grows
1686 * If the user expects no more than N bytes of
1687 * argument list, use that as a guess for the
1691 if (!p
->user_stack
) {
1696 if (where
== USER_ADDR_NULL
) {
1697 /* caller only wants to know length of proc args data */
1698 if (sizep
== NULL
) {
1703 size
= p
->p_argslen
;
1706 size
+= sizeof(int);
1710 * old PROCARGS will return the executable's path and plus some
1711 * extra space for work alignment and data tags
1713 size
+= PATH_MAX
+ (6 * sizeof(int));
1715 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1720 my_cred
= kauth_cred_proc_ref(p
);
1721 uid
= kauth_cred_getuid(my_cred
);
1722 kauth_cred_unref(&my_cred
);
1724 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1725 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1730 if ((u_int
)arg_size
> p
->p_argslen
)
1731 arg_size
= round_page(p
->p_argslen
);
1733 arg_addr
= p
->user_stack
- arg_size
;
1737 * Before we can block (any VM code), make another
1738 * reference to the map to keep it alive. We do
1739 * that by getting a reference on the task itself.
1747 argslen
= p
->p_argslen
;
1749 * Once we have a task reference we can convert that into a
1750 * map reference, which we will use in the calls below. The
1751 * task/process may change its map after we take this reference
1752 * (see execve), but the worst that will happen then is a return
1753 * of stale info (which is always a possibility).
1755 task_reference(task
);
1757 proc_map
= get_task_map_reference(task
);
1758 task_deallocate(task
);
1760 if (proc_map
== NULL
)
1764 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1765 if (ret
!= KERN_SUCCESS
) {
1766 vm_map_deallocate(proc_map
);
1770 copy_end
= round_page(copy_start
+ arg_size
);
1772 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1773 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1774 vm_map_deallocate(proc_map
);
1775 kmem_free(kernel_map
, copy_start
,
1776 round_page(arg_size
));
1781 * Now that we've done the copyin from the process'
1782 * map, we can release the reference to it.
1784 vm_map_deallocate(proc_map
);
1786 if( vm_map_copy_overwrite(kernel_map
,
1787 (vm_map_address_t
)copy_start
,
1788 tmp
, FALSE
) != KERN_SUCCESS
) {
1789 kmem_free(kernel_map
, copy_start
,
1790 round_page(arg_size
));
1794 if (arg_size
> argslen
) {
1795 data
= (caddr_t
) (copy_end
- argslen
);
1798 data
= (caddr_t
) (copy_end
- arg_size
);
1803 /* Put processes argc as the first word in the copyout buffer */
1804 suword(where
, p
->p_argc
);
1805 error
= copyout(data
, (where
+ sizeof(int)), size
);
1806 size
+= sizeof(int);
1808 error
= copyout(data
, where
, size
);
1811 * Make the old PROCARGS work to return the executable's path
1812 * But, only if there is enough space in the provided buffer
1814 * on entry: data [possibily] points to the beginning of the path
1816 * Note: we keep all pointers&sizes aligned to word boundries
1818 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1820 int binPath_sz
, alignedBinPath_sz
= 0;
1821 int extraSpaceNeeded
, addThis
;
1822 user_addr_t placeHere
;
1823 char * str
= (char *) data
;
1826 /* Some apps are really bad about messing up their stacks
1827 So, we have to be extra careful about getting the length
1828 of the executing binary. If we encounter an error, we bail.
1831 /* Limit ourselves to PATH_MAX paths */
1832 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1836 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1839 /* If we have a NUL terminator, copy it, too */
1840 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1842 /* Pre-Flight the space requiremnts */
1844 /* Account for the padding that fills out binPath to the next word */
1845 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1847 placeHere
= where
+ size
;
1849 /* Account for the bytes needed to keep placeHere word aligned */
1850 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1852 /* Add up all the space that is needed */
1853 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1855 /* is there is room to tack on argv[0]? */
1856 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1858 placeHere
+= addThis
;
1859 suword(placeHere
, 0);
1860 placeHere
+= sizeof(int);
1861 suword(placeHere
, 0xBFFF0000);
1862 placeHere
+= sizeof(int);
1863 suword(placeHere
, 0);
1864 placeHere
+= sizeof(int);
1865 error
= copyout(data
, placeHere
, binPath_sz
);
1868 placeHere
+= binPath_sz
;
1869 suword(placeHere
, 0);
1870 size
+= extraSpaceNeeded
;
1876 if (copy_start
!= (vm_offset_t
) 0) {
1877 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1883 if (where
!= USER_ADDR_NULL
)
1890 * Max number of concurrent aio requests
1894 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1896 int new_value
, changed
;
1897 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1899 /* make sure the system-wide limit is greater than the per process limit */
1900 if (new_value
>= aio_max_requests_per_process
)
1901 aio_max_requests
= new_value
;
1910 * Max number of concurrent aio requests per process
1914 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1916 int new_value
, changed
;
1917 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1919 /* make sure per process limit is less than the system-wide limit */
1920 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1921 aio_max_requests_per_process
= new_value
;
1930 * Max number of async IO worker threads
1934 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1936 int new_value
, changed
;
1937 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1939 /* we only allow an increase in the number of worker threads */
1940 if (new_value
> aio_worker_threads
) {
1941 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1942 aio_worker_threads
= new_value
;
1952 * System-wide limit on the max number of processes
1956 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1958 int new_value
, changed
;
1959 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1961 AUDIT_ARG(value
, new_value
);
1962 /* make sure the system-wide limit is less than the configured hard
1963 limit set at kernel compilation */
1964 if (new_value
<= hard_maxproc
&& new_value
> 0)
1965 maxproc
= new_value
;
1972 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1973 CTLFLAG_RD
| CTLFLAG_KERN
,
1975 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1976 CTLFLAG_RD
| CTLFLAG_KERN
,
1978 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1979 CTLFLAG_RD
| CTLFLAG_KERN
,
1981 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1982 CTLFLAG_RD
| CTLFLAG_KERN
,
1985 /* PR-5293665: need to use a callback function for kern.osversion to set
1986 * osversion in IORegistry */
1989 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1993 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1996 IORegistrySetOSBuildVersion((char *)arg1
);
2002 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
2003 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2004 osversion
, 256 /* OSVERSIZE*/,
2005 sysctl_osversion
, "A", "");
2008 sysctl_sysctl_bootargs
2009 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2014 strlcpy(buf
, PE_boot_args(), 256);
2015 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
2019 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
2020 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2022 sysctl_sysctl_bootargs
, "A", "bootargs");
2024 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
2025 CTLFLAG_RW
| CTLFLAG_KERN
,
2027 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
2028 CTLFLAG_RD
| CTLFLAG_KERN
,
2030 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
2031 CTLFLAG_RD
| CTLFLAG_KERN
,
2032 NULL
, _POSIX_VERSION
, "");
2033 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
2034 CTLFLAG_RD
| CTLFLAG_KERN
,
2035 NULL
, NGROUPS_MAX
, "");
2036 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
2037 CTLFLAG_RD
| CTLFLAG_KERN
,
2039 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2040 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2041 CTLFLAG_RD
| CTLFLAG_KERN
,
2044 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2045 CTLFLAG_RD
| CTLFLAG_KERN
,
2050 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2052 unsigned int oldval
= desiredvnodes
;
2053 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2054 reset_vmobjectcache(oldval
, desiredvnodes
);
2055 resize_namecache(desiredvnodes
);
2059 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2060 CTLTYPE_INT
| CTLFLAG_RW
,
2061 0, 0, sysctl_maxvnodes
, "I", "");
2063 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2064 CTLTYPE_INT
| CTLFLAG_RW
,
2065 0, 0, sysctl_maxproc
, "I", "");
2067 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2068 CTLTYPE_INT
| CTLFLAG_RW
,
2069 0, 0, sysctl_aiomax
, "I", "");
2071 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2072 CTLTYPE_INT
| CTLFLAG_RW
,
2073 0, 0, sysctl_aioprocmax
, "I", "");
2075 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2076 CTLTYPE_INT
| CTLFLAG_RW
,
2077 0, 0, sysctl_aiothreads
, "I", "");
2081 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2083 int new_value
, changed
;
2084 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2086 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2088 securelevel
= new_value
;
2097 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2098 CTLTYPE_INT
| CTLFLAG_RW
,
2099 0, 0, sysctl_securelvl
, "I", "");
2104 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2107 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2109 domainnamelen
= strlen(domainname
);
2114 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2115 CTLTYPE_STRING
| CTLFLAG_RW
,
2116 0, 0, sysctl_domainname
, "A", "");
2118 SYSCTL_INT(_kern
, KERN_HOSTID
, hostid
,
2119 CTLFLAG_RW
| CTLFLAG_KERN
,
2124 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2127 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2129 hostnamelen
= req
->newlen
;
2135 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2136 CTLTYPE_STRING
| CTLFLAG_RW
,
2137 0, 0, sysctl_hostname
, "A", "");
2141 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2143 /* Original code allowed writing, I'm copying this, although this all makes
2144 no sense to me. Besides, this sysctl is never used. */
2145 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2148 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2149 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2150 0, 0, sysctl_procname
, "A", "");
2152 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2153 CTLFLAG_RW
| CTLFLAG_KERN
,
2154 &speculative_reads_disabled
, 0, "");
2158 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2162 t
.tv_sec
= boottime_sec();
2165 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2168 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2169 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2170 0, 0, sysctl_boottime
, "S,timeval", "");
2174 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2177 int error
= get_kernel_symfile(req
->p
, &str
);
2180 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2184 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2185 CTLTYPE_STRING
| CTLFLAG_RD
,
2186 0, 0, sysctl_symfile
, "A", "");
2191 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2193 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2196 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2197 CTLTYPE_INT
| CTLFLAG_RD
,
2198 0, 0, sysctl_netboot
, "I", "");
2203 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2205 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2208 SYSCTL_PROC(_kern
, KERN_USRSTACK
, usrstack
,
2209 CTLTYPE_INT
| CTLFLAG_RD
,
2210 0, 0, sysctl_usrstack
, "I", "");
2214 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2216 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2219 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2220 CTLTYPE_QUAD
| CTLFLAG_RD
,
2221 0, 0, sysctl_usrstack64
, "Q", "");
2223 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2224 CTLFLAG_RW
| CTLFLAG_KERN
,
2225 corefilename
, sizeof(corefilename
), "");
2229 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2231 #ifdef SECURE_KERNEL
2234 int new_value
, changed
;
2235 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2237 if ((new_value
== 0) || (new_value
== 1))
2238 do_coredump
= new_value
;
2245 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2246 CTLTYPE_INT
| CTLFLAG_RW
,
2247 0, 0, sysctl_coredump
, "I", "");
2250 sysctl_suid_coredump
2251 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2253 #ifdef SECURE_KERNEL
2256 int new_value
, changed
;
2257 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2259 if ((new_value
== 0) || (new_value
== 1))
2260 sugid_coredump
= new_value
;
2267 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2268 CTLTYPE_INT
| CTLFLAG_RW
,
2269 0, 0, sysctl_suid_coredump
, "I", "");
2273 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2275 struct proc
*p
= req
->p
;
2276 int new_value
, changed
;
2277 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2281 req
->p
->p_lflag
|= P_LDELAYTERM
;
2283 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2289 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2290 CTLTYPE_INT
| CTLFLAG_RW
,
2291 0, 0, sysctl_delayterm
, "I", "");
2294 sysctl_proc_low_pri_io
2295 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2297 struct proc
*p
= req
->p
;
2298 int new_value
, old_value
, changed
;
2302 switch (req
->p
->p_iopol_disk
) {
2307 case IOPOL_THROTTLE
:
2314 /*\ 5 this should never happen, but to be robust, return the default value */
2320 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2323 if (new_value
& 0x01)
2324 req
->p
->p_iopol_disk
= IOPOL_THROTTLE
;
2325 else if (new_value
& 0x02)
2326 req
->p
->p_iopol_disk
= IOPOL_PASSIVE
;
2327 else if (new_value
== 0)
2328 req
->p
->p_iopol_disk
= IOPOL_NORMAL
;
2334 SYSCTL_PROC(_kern
, KERN_PROC_LOW_PRI_IO
, proc_low_pri_io
,
2335 CTLTYPE_INT
| CTLFLAG_RW
,
2336 0, 0, sysctl_proc_low_pri_io
, "I", "");
2340 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2342 struct proc
*p
= req
->p
;
2344 int new_value
, old_value
, changed
;
2347 ut
= get_bsdthread_info(current_thread());
2349 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2350 old_value
= KERN_RAGE_THREAD
;
2351 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2352 old_value
= KERN_RAGE_PROC
;
2356 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2359 switch (new_value
) {
2360 case KERN_RAGE_PROC
:
2362 p
->p_lflag
|= P_LRAGE_VNODES
;
2365 case KERN_UNRAGE_PROC
:
2367 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2371 case KERN_RAGE_THREAD
:
2372 ut
->uu_flag
|= UT_RAGE_VNODES
;
2374 case KERN_UNRAGE_THREAD
:
2375 ut
= get_bsdthread_info(current_thread());
2376 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2383 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2384 CTLTYPE_INT
| CTLFLAG_RW
,
2385 0, 0, sysctl_rage_vnode
, "I", "");
2389 sysctl_kern_check_openevt
2390 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2392 struct proc
*p
= req
->p
;
2393 int new_value
, old_value
, changed
;
2396 if (p
->p_flag
& P_CHECKOPENEVT
) {
2397 old_value
= KERN_OPENEVT_PROC
;
2402 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2405 switch (new_value
) {
2406 case KERN_OPENEVT_PROC
:
2407 OSBitOrAtomic(P_CHECKOPENEVT
, (UInt32
*)&p
->p_flag
);
2410 case KERN_UNOPENEVT_PROC
:
2411 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), (UInt32
*)&p
->p_flag
);
2421 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2422 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2428 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2430 #ifdef SECURE_KERNEL
2433 int new_value
, changed
;
2436 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2443 * Only allow setting if NX is supported on the chip
2445 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2448 nx_enabled
= new_value
;
2455 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2456 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
,
2457 0, 0, sysctl_nx
, "I", "");
2461 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2463 if (proc_is64bit(req
->p
)) {
2464 struct user_loadavg loadinfo64
;
2465 loadavg32to64(&averunnable
, &loadinfo64
);
2466 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2468 return sysctl_io_opaque(req
, &averunnable
, sizeof(averunnable
), NULL
);
2472 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2473 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2474 0, 0, sysctl_loadavg
, "S,loadavg", "");
2478 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2481 uint64_t swap_total
;
2482 uint64_t swap_avail
;
2483 uint32_t swap_pagesize
;
2484 boolean_t swap_encrypted
;
2485 struct xsw_usage xsu
;
2487 error
= macx_swapinfo(&swap_total
,
2494 xsu
.xsu_total
= swap_total
;
2495 xsu
.xsu_avail
= swap_avail
;
2496 xsu
.xsu_used
= swap_total
- swap_avail
;
2497 xsu
.xsu_pagesize
= swap_pagesize
;
2498 xsu
.xsu_encrypted
= swap_encrypted
;
2499 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2504 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2505 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2506 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2509 /* this kernel does NOT implement shared_region_make_private_np() */
2510 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2516 sysctl_sysctl_exec_affinity(__unused
struct sysctl_oid
*oidp
,
2517 __unused
void *arg1
, __unused
int arg2
,
2518 struct sysctl_req
*req
)
2520 proc_t cur_proc
= req
->p
;
2523 if (req
->oldptr
!= USER_ADDR_NULL
) {
2524 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
2525 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
2529 if (req
->newptr
!= USER_ADDR_NULL
) {
2530 cpu_type_t newcputype
;
2531 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
2533 if (newcputype
== CPU_TYPE_I386
)
2534 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), (UInt32
*)&cur_proc
->p_flag
);
2535 else if (newcputype
== CPU_TYPE_POWERPC
)
2536 OSBitOrAtomic(P_AFFINITY
, (UInt32
*)&cur_proc
->p_flag
);
2543 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
2547 fetch_process_cputype(
2551 cpu_type_t
*cputype
)
2553 proc_t p
= PROC_NULL
;
2560 else if (namelen
== 1) {
2561 p
= proc_find(name
[0]);
2571 if (p
->p_flag
& P_TRANSLATED
) {
2572 ret
= CPU_TYPE_POWERPC
;
2578 if (IS_64BIT_PROCESS(p
))
2579 ret
|= CPU_ARCH_ABI64
;
2590 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2591 struct sysctl_req
*req
)
2594 cpu_type_t proc_cputype
= 0;
2595 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2598 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2600 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2602 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2605 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2606 struct sysctl_req
*req
)
2609 cpu_type_t proc_cputype
= 0;
2610 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2612 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2614 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2618 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2620 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2623 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2624 CTLTYPE_INT
| CTLFLAG_RD
,
2625 0, 0, sysctl_safeboot
, "I", "");
2629 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2631 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2634 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2635 CTLTYPE_INT
| CTLFLAG_RD
,
2636 0, 0, sysctl_singleuser
, "I", "");
2639 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2641 extern boolean_t affinity_sets_enabled
;
2642 extern int affinity_sets_mapping
;
2644 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2645 CTLFLAG_RW
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2646 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2647 CTLFLAG_RW
, &affinity_sets_mapping
, 0, "mapping policy");
2650 * Limit on total memory users can wire.
2652 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2654 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2656 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2659 * All values are in bytes.
2662 vm_map_size_t vm_global_user_wire_limit
;
2663 vm_map_size_t vm_user_wire_limit
;
2665 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
, &vm_global_user_wire_limit
, "");
2666 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
, &vm_user_wire_limit
, "");