2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <bsm/audit_kernel.h>
108 #include <mach/machine.h>
109 #include <mach/mach_types.h>
110 #include <mach/vm_param.h>
111 #include <kern/task.h>
112 #include <kern/lock.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <mach/host_info.h>
117 #include <sys/mount_internal.h>
118 #include <sys/kdebug.h>
119 #include <sys/sysproto.h>
121 #include <IOKit/IOPlatformExpert.h>
122 #include <pexpert/pexpert.h>
124 #include <machine/machine_routines.h>
125 #include <machine/exec.h>
127 #include <vm/vm_protos.h>
130 #include <i386/cpuid.h>
133 sysctlfn kern_sysctl
;
135 sysctlfn debug_sysctl
;
137 extern sysctlfn net_sysctl
;
138 extern sysctlfn cpu_sysctl
;
139 extern int aio_max_requests
;
140 extern int aio_max_requests_per_process
;
141 extern int aio_worker_threads
;
142 extern int lowpri_IO_window_msecs
;
143 extern int lowpri_IO_delay_msecs
;
144 extern int nx_enabled
;
145 extern int speculative_reads_disabled
;
148 fill_eproc(proc_t p
, struct eproc
*ep
);
150 fill_externproc(proc_t p
, struct extern_proc
*exp
);
152 fill_user_eproc(proc_t p
, struct user_eproc
*ep
);
154 fill_user_proc(proc_t p
, struct user_kinfo_proc
*kp
);
156 fill_user_externproc(proc_t p
, struct user_extern_proc
*exp
);
158 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
160 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, proc_t p
);
166 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
168 __private_extern__ kern_return_t
169 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
171 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
173 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
174 user_addr_t newp
, size_t newlen
);
176 fill_proc(proc_t p
, struct kinfo_proc
*kp
);
178 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
179 size_t *sizep
, proc_t cur_proc
);
181 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
184 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
185 proc_t cur_proc
, int argc_yes
);
187 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
188 size_t newlen
, void *sp
, int len
);
190 static int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
191 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
192 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
193 static int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
194 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
195 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
196 int sysdoproc_callback(proc_t p
, void *arg
);
198 static int __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, register_t
*retval
);
200 extern void IORegistrySetOSBuildVersion(char * build_version
);
203 loadavg32to64(struct loadavg
*la32
, struct user_loadavg
*la64
)
205 la64
->ldavg
[0] = la32
->ldavg
[0];
206 la64
->ldavg
[1] = la32
->ldavg
[1];
207 la64
->ldavg
[2] = la32
->ldavg
[2];
208 la64
->fscale
= (user_long_t
)la32
->fscale
;
214 static struct sysctl_lock memlock
;
216 /* sysctl() syscall */
218 __sysctl(proc_t p
, struct __sysctl_args
*uap
, register_t
*retval
)
220 boolean_t funnel_state
;
223 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
224 error
= __sysctl_funneled(p
, uap
, retval
);
225 thread_funnel_set(kernel_flock
, funnel_state
);
230 __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, __unused register_t
*retval
)
232 int error
, dolock
= 1;
233 size_t savelen
= 0, oldlen
= 0, newlen
;
234 sysctlfn
*fnp
= NULL
;
235 int name
[CTL_MAXNAME
];
237 boolean_t memlock_taken
= FALSE
;
238 boolean_t vslock_taken
= FALSE
;
240 kauth_cred_t my_cred
;
244 * all top-level sysctl names are non-terminal
246 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
248 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
252 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
254 if (proc_is64bit(p
)) {
255 /* uap->newlen is a size_t value which grows to 64 bits
256 * when coming from a 64-bit process. since it's doubtful we'll
257 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
259 newlen
= CAST_DOWN(size_t, uap
->newlen
);
262 newlen
= uap
->newlen
;
265 /* CTL_UNSPEC is used to get oid to AUTO_OID */
266 if (uap
->new != USER_ADDR_NULL
267 && ((name
[0] == CTL_KERN
268 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
269 name
[1] == KERN_PROC_LOW_PRI_IO
|| name
[1] == KERN_PROCNAME
|| name
[1] == KERN_RAGEVNODE
|| name
[1] == KERN_CHECKOPENEVT
))
270 || (name
[0] == CTL_HW
)
271 || (name
[0] == CTL_VM
))
272 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
275 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
276 * but there is a fallback for all sysctls other than VFS to
277 * userland_sysctl() - KILL THIS! */
281 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
282 && (name
[1] != KERN_PROC
))
297 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
298 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
300 oldlen
= CAST_DOWN(size_t, oldlen64
);
302 * If more than 4G, clamp to 4G - useracc() below will catch
303 * with an EFAULT, if it's actually necessary.
305 if (oldlen64
> 0x00000000ffffffffULL
)
306 oldlen
= 0xffffffffUL
;
309 if (uap
->old
!= USER_ADDR_NULL
) {
310 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
313 * The kernel debug mechanism does not need to take this lock, and
314 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
315 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
317 if (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)) &&
318 !(name
[1] == KERN_PROC
)) {
320 memlock_taken
= TRUE
;
323 if (dolock
&& oldlen
) {
324 if ((error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
325 if (memlock_taken
== TRUE
)
335 my_cred
= kauth_cred_proc_ref(p
);
336 error
= mac_system_check_sysctl(
342 fnp
== kern_sysctl
? 1 : 0,
346 kauth_cred_unref(&my_cred
);
350 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
351 &oldlen
, uap
->new, newlen
, p
);
359 if (vslock_taken
== TRUE
) {
360 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
364 if (memlock_taken
== TRUE
)
367 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
369 boolean_t funnel_state
;
372 * Drop the funnel when calling new sysctl code, which will conditionally
373 * grab the funnel if it really needs to.
375 funnel_state
= thread_funnel_set(kernel_flock
, FALSE
);
377 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
378 1, uap
->new, newlen
, &oldlen
);
380 thread_funnel_set(kernel_flock
, funnel_state
);
383 if ((error
) && (error
!= ENOMEM
))
386 if (uap
->oldlenp
!= USER_ADDR_NULL
)
387 error
= suulong(uap
->oldlenp
, oldlen
);
393 * Attributes stored in the kernel.
395 __private_extern__
char corefilename
[MAXPATHLEN
+1];
396 __private_extern__
int do_coredump
;
397 __private_extern__
int sugid_coredump
;
400 __private_extern__
int do_count_syscalls
;
404 int securelevel
= -1;
416 __unused
size_t newSize
,
422 if (name
[0] == 0 && 1 == namelen
) {
423 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
424 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
425 } else if (name
[0] == 1 && 2 == namelen
) {
427 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), (UInt32
*)&cur_proc
->p_flag
);
429 OSBitOrAtomic(P_AFFINITY
, (UInt32
*)&cur_proc
->p_flag
);
443 __unused
size_t newSize
,
447 int istranslated
= 0;
448 kauth_cred_t my_cred
;
454 p
= proc_find(name
[0]);
458 my_cred
= kauth_cred_proc_ref(p
);
459 uid
= kauth_cred_getuid(my_cred
);
460 kauth_cred_unref(&my_cred
);
461 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
462 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
467 istranslated
= (p
->p_flag
& P_TRANSLATED
);
469 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
470 (istranslated
!= 0) ? 1 : 0);
474 set_archhandler(__unused proc_t p
, int arch
)
478 struct vnode_attr va
;
479 vfs_context_t ctx
= vfs_context_current();
480 struct exec_archhandler
*archhandler
;
483 case CPU_TYPE_POWERPC
:
484 archhandler
= &exec_archhandler_ppc
;
490 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
,
491 CAST_USER_ADDR_T(archhandler
->path
), ctx
);
497 /* Check mount point */
498 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
499 (nd
.ni_vp
->v_type
!= VREG
)) {
505 VATTR_WANTED(&va
, va_fsid
);
506 VATTR_WANTED(&va
, va_fileid
);
507 error
= vnode_getattr(nd
.ni_vp
, &va
, ctx
);
514 archhandler
->fsid
= va
.va_fsid
;
515 archhandler
->fileid
= (u_long
)va
.va_fileid
;
519 /* XXX remove once Rosetta is rev'ed */
520 /*****************************************************************************/
522 sysctl_exec_archhandler_ppc(
524 __unused u_int namelen
,
533 char handler
[sizeof(exec_archhandler_ppc
.path
)];
534 vfs_context_t ctx
= vfs_context_current();
537 len
= strlen(exec_archhandler_ppc
.path
) + 1;
541 error
= copyout(exec_archhandler_ppc
.path
, oldBuf
, len
);
548 error
= suser(vfs_context_ucred(ctx
), &p
->p_acflag
);
551 if (newSize
>= sizeof(exec_archhandler_ppc
.path
))
552 return (ENAMETOOLONG
);
553 error
= copyin(newBuf
, handler
, newSize
);
556 handler
[newSize
] = 0;
557 strlcpy(exec_archhandler_ppc
.path
, handler
, MAXPATHLEN
);
558 error
= set_archhandler(p
, CPU_TYPE_POWERPC
);
564 /*****************************************************************************/
567 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid
*oidp
, void *arg1
,
568 int arg2
, struct sysctl_req
*req
)
572 error
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
578 error
= set_archhandler(req
->p
, CPU_TYPE_POWERPC
);
585 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
587 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
589 SYSCTL_PROC(_kern_exec_archhandler
, OID_AUTO
, powerpc
,
590 CTLTYPE_STRING
| CTLFLAG_RW
, exec_archhandler_ppc
.path
, 0,
591 sysctl_handle_exec_archhandler_ppc
, "A", "");
593 extern int get_kernel_symfile(proc_t
, char **);
594 __private_extern__
int
595 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
599 * kernel related system variables.
602 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
603 user_addr_t newp
, size_t newlen
, proc_t p
)
605 /* all sysctl names not listed below are terminal at this level */
607 && !(name
[0] == KERN_PROC
608 || name
[0] == KERN_PROF
609 || name
[0] == KERN_KDEBUG
610 || name
[0] == KERN_PROCARGS
611 || name
[0] == KERN_PROCARGS2
612 || name
[0] == KERN_IPC
613 || name
[0] == KERN_SYSV
614 || name
[0] == KERN_AFFINITY
615 || name
[0] == KERN_TRANSLATE
616 || name
[0] == KERN_EXEC
617 || name
[0] == KERN_PANICINFO
618 || name
[0] == KERN_POSIX
619 || name
[0] == KERN_TFP
620 || name
[0] == KERN_TTY
622 || name
[0] == KERN_LCTX
626 return (ENOTDIR
); /* overloaded */
630 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
633 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
637 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
639 /* new one as it does not use kinfo_proc */
640 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
642 /* new one as it does not use kinfo_proc */
643 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
646 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
650 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
653 return sysctl_translate(name
+1, namelen
-1, oldp
, oldlenp
, newp
,
656 /* XXX remove once Rosetta has rev'ed */
658 return sysctl_exec_archhandler_ppc(name
+1, namelen
-1, oldp
,
659 oldlenp
, newp
, newlen
, p
);
661 case KERN_COUNT_SYSCALLS
:
663 /* valid values passed in:
664 * = 0 means don't keep called counts for each bsd syscall
665 * > 0 means keep called counts for each bsd syscall
666 * = 2 means dump current counts to the system log
667 * = 3 means reset all counts
668 * for example, to dump current counts:
669 * sysctl -w kern.count_calls=2
671 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
677 do_count_syscalls
= 1;
679 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
681 extern int syscalls_log
[];
682 extern const char * syscallnames
[];
684 for ( i
= 0; i
< nsysent
; i
++ ) {
685 if ( syscalls_log
[i
] != 0 ) {
687 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
695 do_count_syscalls
= 1;
709 * Debugging related system variables.
713 #endif /* DIAGNOSTIC */
714 struct ctldebug debug0
, debug1
;
715 struct ctldebug debug2
, debug3
, debug4
;
716 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
717 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
718 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
719 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
720 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
721 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
722 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
723 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
726 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
727 user_addr_t newp
, size_t newlen
, __unused proc_t p
)
729 struct ctldebug
*cdp
;
731 /* all sysctl names at this level are name and field */
733 return (ENOTDIR
); /* overloaded */
734 if (name
[0] < 0 || name
[0] >= CTL_DEBUG_MAXID
)
736 cdp
= debugvars
[name
[0]];
737 if (cdp
->debugname
== 0)
741 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
742 case CTL_DEBUG_VALUE
:
743 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
752 * The following sysctl_* functions should not be used
753 * any more, as they can only cope with callers in
754 * user mode: Use new-style
762 * Validate parameters and get old / set new parameters
763 * for an integer-valued sysctl function.
766 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
767 user_addr_t newp
, size_t newlen
, int *valp
)
771 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
773 if (oldp
&& *oldlenp
< sizeof(int))
775 if (newp
&& newlen
!= sizeof(int))
777 *oldlenp
= sizeof(int);
779 error
= copyout(valp
, oldp
, sizeof(int));
780 if (error
== 0 && newp
) {
781 error
= copyin(newp
, valp
, sizeof(int));
782 AUDIT_ARG(value
, *valp
);
788 * As above, but read-only.
791 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
795 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
797 if (oldp
&& *oldlenp
< sizeof(int))
801 *oldlenp
= sizeof(int);
803 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
808 * Validate parameters and get old / set new parameters
809 * for an quad(64bit)-valued sysctl function.
812 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
813 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
817 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
819 if (oldp
&& *oldlenp
< sizeof(quad_t
))
821 if (newp
&& newlen
!= sizeof(quad_t
))
823 *oldlenp
= sizeof(quad_t
);
825 error
= copyout(valp
, oldp
, sizeof(quad_t
));
826 if (error
== 0 && newp
)
827 error
= copyin(newp
, valp
, sizeof(quad_t
));
832 * As above, but read-only.
835 sysctl_rdquad(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, quad_t val
)
839 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
841 if (oldp
&& *oldlenp
< sizeof(quad_t
))
845 *oldlenp
= sizeof(quad_t
);
847 error
= copyout((caddr_t
)&val
, oldp
, sizeof(quad_t
));
852 * Validate parameters and get old / set new parameters
853 * for a string-valued sysctl function. Unlike sysctl_string, if you
854 * give it a too small (but larger than 0 bytes) buffer, instead of
855 * returning ENOMEM, it truncates the returned string to the buffer
856 * size. This preserves the semantics of some library routines
857 * implemented via sysctl, which truncate their returned data, rather
858 * than simply returning an error. The returned string is always NUL
862 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
863 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
865 int len
, copylen
, error
= 0;
867 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
869 copylen
= len
= strlen(str
) + 1;
870 if (oldp
&& (len
< 0 || *oldlenp
< 1))
872 if (oldp
&& (*oldlenp
< (size_t)len
))
873 copylen
= *oldlenp
+ 1;
874 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
876 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
878 error
= copyout(str
, oldp
, copylen
);
883 error
= copyout((void *)&c
, oldp
, sizeof(char));
886 if (error
== 0 && newp
) {
887 error
= copyin(newp
, str
, newlen
);
889 AUDIT_ARG(text
, (char *)str
);
895 * Validate parameters and get old / set new parameters
896 * for a string-valued sysctl function.
899 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
900 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
904 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
906 len
= strlen(str
) + 1;
907 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
909 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
911 *oldlenp
= len
-1; /* deal with NULL strings correctly */
913 error
= copyout(str
, oldp
, len
);
915 if (error
== 0 && newp
) {
916 error
= copyin(newp
, str
, newlen
);
918 AUDIT_ARG(text
, (char *)str
);
924 * As above, but read-only.
927 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
928 user_addr_t newp
, char *str
)
932 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
934 len
= strlen(str
) + 1;
935 if (oldp
&& *oldlenp
< (size_t)len
)
941 error
= copyout(str
, oldp
, len
);
946 * Validate parameters and get old / set new parameters
947 * for a structure oriented sysctl function.
950 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
951 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
955 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
957 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
959 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
963 error
= copyout(sp
, oldp
, len
);
965 if (error
== 0 && newp
)
966 error
= copyin(newp
, sp
, len
);
971 * Validate parameters and get old parameters
972 * for a structure oriented sysctl function.
975 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
976 user_addr_t newp
, void *sp
, int len
)
980 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
982 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
988 error
= copyout(sp
, oldp
, len
);
993 * Get file structures.
997 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1000 struct fileglob
*fg
;
1001 struct extern_file nef
;
1003 if (req
->oldptr
== USER_ADDR_NULL
) {
1005 * overestimate by 10 files
1007 req
->oldidx
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1012 * first copyout filehead
1014 error
= SYSCTL_OUT(req
, &filehead
, sizeof(filehead
));
1019 * followed by an array of file structures
1021 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1022 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1023 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1024 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1025 nef
.f_type
= fg
->fg_type
;
1026 nef
.f_count
= fg
->fg_count
;
1027 nef
.f_msgcount
= fg
->fg_msgcount
;
1028 nef
.f_cred
= fg
->fg_cred
;
1029 nef
.f_ops
= fg
->fg_ops
;
1030 nef
.f_offset
= fg
->fg_offset
;
1031 nef
.f_data
= fg
->fg_data
;
1032 error
= SYSCTL_OUT(req
, &nef
, sizeof(nef
));
1039 SYSCTL_PROC(_kern
, KERN_FILE
, file
,
1040 CTLTYPE_STRUCT
| CTLFLAG_RW
,
1041 0, 0, sysctl_file
, "S,filehead", "");
1044 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
1046 if (p
->p_pid
!= (pid_t
)arg
)
1053 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
1055 if (p
->p_pgrpid
!= (pid_t
)arg
)
1062 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
1064 boolean_t funnel_state
;
1068 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1069 /* This is very racy but list lock is held.. Hmmm. */
1070 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1071 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
1072 p
->p_pgrp
->pg_session
->s_ttyp
== NULL
||
1073 p
->p_pgrp
->pg_session
->s_ttyp
->t_dev
!= (dev_t
)arg
)
1078 thread_funnel_set(kernel_flock
, funnel_state
);
1084 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
1086 kauth_cred_t my_cred
;
1089 if (p
->p_ucred
== NULL
)
1091 my_cred
= kauth_cred_proc_ref(p
);
1092 uid
= kauth_cred_getuid(my_cred
);
1093 kauth_cred_unref(&my_cred
);
1095 if (uid
!= (uid_t
)arg
)
1103 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
1105 kauth_cred_t my_cred
;
1108 if (p
->p_ucred
== NULL
)
1110 my_cred
= kauth_cred_proc_ref(p
);
1111 ruid
= my_cred
->cr_ruid
;
1112 kauth_cred_unref(&my_cred
);
1114 if (ruid
!= (uid_t
)arg
)
1121 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
1123 if ((p
->p_lctx
== NULL
) ||
1124 (p
->p_lctx
->lc_id
!= (pid_t
)arg
))
1131 * try over estimating by 5 procs
1133 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1134 struct sysdoproc_args
{
1137 boolean_t is_64_bit
;
1149 sysdoproc_callback(proc_t p
, void * arg
)
1151 struct sysdoproc_args
* args
= (struct sysdoproc_args
*)arg
;
1154 if (args
->buflen
>= args
->sizeof_kproc
) {
1155 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, (void *)args
->uidval
) == 0))
1156 return(PROC_RETURNED
);
1157 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, (void *)args
->uidval
) == 0))
1158 return(PROC_RETURNED
);
1159 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, (void *)args
->uidval
) == 0))
1160 return(PROC_RETURNED
);
1162 bzero(args
->kprocp
, args
->sizeof_kproc
);
1163 if (args
->is_64_bit
) {
1164 fill_user_proc(p
, (struct user_kinfo_proc
*) args
->kprocp
);
1167 fill_proc(p
, (struct kinfo_proc
*) args
->kprocp
);
1169 error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
1171 *args
->errorp
= error
;
1172 return(PROC_RETURNED_DONE
);
1175 args
->dp
+= args
->sizeof_kproc
;
1176 args
->buflen
-= args
->sizeof_kproc
;
1178 args
->needed
+= args
->sizeof_kproc
;
1179 return(PROC_RETURNED
);
1183 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1185 user_addr_t dp
= where
;
1187 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1189 boolean_t is_64_bit
= FALSE
;
1190 struct kinfo_proc kproc
;
1191 struct user_kinfo_proc user_kproc
;
1194 int (*filterfn
)(proc_t
, void *) = 0;
1195 struct sysdoproc_args args
;
1200 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1202 is_64_bit
= proc_is64bit(current_proc());
1204 sizeof_kproc
= sizeof(user_kproc
);
1205 kprocp
= (caddr_t
) &user_kproc
;
1208 sizeof_kproc
= sizeof(kproc
);
1209 kprocp
= (caddr_t
) &kproc
;
1216 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
1219 case KERN_PROC_PGRP
:
1220 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
1231 case KERN_PROC_RUID
:
1236 case KERN_PROC_LCID
:
1237 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
1243 args
.buflen
= buflen
;
1244 args
.kprocp
= kprocp
;
1245 args
.is_64_bit
= is_64_bit
;
1247 args
.needed
= needed
;
1248 args
.errorp
= &error
;
1249 args
.uidcheck
= uidcheck
;
1250 args
.ruidcheck
= ruidcheck
;
1251 args
.ttycheck
= ttycheck
;
1252 args
.sizeof_kproc
= sizeof_kproc
;
1253 args
.uidval
= name
[1];
1255 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
), sysdoproc_callback
, &args
, filterfn
, (void *)name
[1]);
1261 needed
= args
.needed
;
1263 if (where
!= USER_ADDR_NULL
) {
1264 *sizep
= dp
- where
;
1265 if (needed
> *sizep
)
1268 needed
+= KERN_PROCSLOP
;
1275 * Fill in an eproc structure for the specified process.
1278 fill_eproc(proc_t p
, struct eproc
*ep
)
1281 kauth_cred_t my_cred
;
1283 struct session
* sessp
;
1286 sessp
= proc_session(p
);
1290 if (pg
!= PGRP_NULL
) {
1292 ep
->e_pgid
= p
->p_pgrpid
;
1293 ep
->e_jobc
= pg
->pg_jobc
;
1294 if ((sessp
!= SESSION_NULL
) && sessp
->s_ttyvp
)
1295 ep
->e_flag
= EPROC_CTTY
;
1297 ep
->e_sess
= (struct session
*)0;
1303 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1308 ep
->e_ppid
= p
->p_ppid
;
1309 /* Pre-zero the fake historical pcred */
1310 bzero(&ep
->e_pcred
, sizeof(struct _pcred
));
1312 my_cred
= kauth_cred_proc_ref(p
);
1314 /* A fake historical pcred */
1315 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1316 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1317 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1318 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1319 /* A fake historical *kauth_cred_t */
1320 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1321 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1322 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1323 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1325 kauth_cred_unref(&my_cred
);
1327 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1328 ep
->e_vm
.vm_tsize
= 0;
1329 ep
->e_vm
.vm_dsize
= 0;
1330 ep
->e_vm
.vm_ssize
= 0;
1332 ep
->e_vm
.vm_rssize
= 0;
1334 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1335 (tp
= sessp
->s_ttyp
)) {
1336 ep
->e_tdev
= tp
->t_dev
;
1337 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1338 ep
->e_tsess
= tp
->t_session
;
1342 if (SESS_LEADER(p
, sessp
))
1343 ep
->e_flag
|= EPROC_SLEADER
;
1344 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1345 ep
->e_xsize
= ep
->e_xrssize
= 0;
1346 ep
->e_xccount
= ep
->e_xswrss
= 0;
1347 if (sessp
!= SESSION_NULL
)
1348 session_rele(sessp
);
1354 * Fill in an LP64 version of eproc structure for the specified process.
1357 fill_user_eproc(proc_t p
, struct user_eproc
*ep
)
1360 struct session
*sessp
= NULL
;
1362 kauth_cred_t my_cred
;
1365 sessp
= proc_session(p
);
1367 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1368 if (pg
!= PGRP_NULL
) {
1369 ep
->e_sess
= CAST_USER_ADDR_T(sessp
);
1370 ep
->e_pgid
= p
->p_pgrpid
;
1371 ep
->e_jobc
= pg
->pg_jobc
;
1372 if (sessp
!= SESSION_NULL
) {
1374 ep
->e_flag
= EPROC_CTTY
;
1377 ep
->e_sess
= USER_ADDR_NULL
;
1383 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1388 ep
->e_ppid
= p
->p_ppid
;
1389 /* Pre-zero the fake historical pcred */
1390 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1392 my_cred
= kauth_cred_proc_ref(p
);
1394 /* A fake historical pcred */
1395 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1396 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1397 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1398 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1400 /* A fake historical *kauth_cred_t */
1401 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1402 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1403 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1404 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1406 kauth_cred_unref(&my_cred
);
1408 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1409 ep
->e_vm
.vm_tsize
= 0;
1410 ep
->e_vm
.vm_dsize
= 0;
1411 ep
->e_vm
.vm_ssize
= 0;
1413 ep
->e_vm
.vm_rssize
= 0;
1415 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1416 (tp
= sessp
->s_ttyp
)) {
1417 ep
->e_tdev
= tp
->t_dev
;
1418 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1419 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1423 if (SESS_LEADER(p
, sessp
))
1424 ep
->e_flag
|= EPROC_SLEADER
;
1425 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1426 ep
->e_xsize
= ep
->e_xrssize
= 0;
1427 ep
->e_xccount
= ep
->e_xswrss
= 0;
1428 if (sessp
!= SESSION_NULL
)
1429 session_rele(sessp
);
1430 if (pg
!= PGRP_NULL
)
1435 * Fill in an eproc structure for the specified process.
1438 fill_externproc(proc_t p
, struct extern_proc
*exp
)
1440 exp
->p_forw
= exp
->p_back
= NULL
;
1441 exp
->p_starttime
= p
->p_start
;
1442 exp
->p_vmspace
= NULL
;
1443 exp
->p_sigacts
= p
->p_sigacts
;
1444 exp
->p_flag
= p
->p_flag
;
1445 if (p
->p_lflag
& P_LTRACED
)
1446 exp
->p_flag
|= P_TRACED
;
1447 if (p
->p_lflag
& P_LPPWAIT
)
1448 exp
->p_flag
|= P_PPWAIT
;
1449 if (p
->p_lflag
& P_LEXIT
)
1450 exp
->p_flag
|= P_WEXIT
;
1451 exp
->p_stat
= p
->p_stat
;
1452 exp
->p_pid
= p
->p_pid
;
1453 exp
->p_oppid
= p
->p_oppid
;
1455 exp
->user_stack
= CAST_DOWN(caddr_t
, p
->user_stack
);
1456 exp
->exit_thread
= p
->exit_thread
;
1457 exp
->p_debugger
= p
->p_debugger
;
1458 exp
->sigwait
= p
->sigwait
;
1460 #ifdef _PROC_HAS_SCHEDINFO_
1461 exp
->p_estcpu
= p
->p_estcpu
;
1462 exp
->p_pctcpu
= p
->p_pctcpu
;
1463 exp
->p_slptime
= p
->p_slptime
;
1467 exp
->p_slptime
= 0 ;
1469 exp
->p_cpticks
= 0 ;
1473 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1474 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1478 exp
->p_traceflag
= 0;
1480 exp
->p_siglist
= 0 ; /* No longer relevant */
1481 exp
->p_textvp
= p
->p_textvp
;
1482 exp
->p_holdcnt
= 0 ;
1483 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1484 exp
->p_sigignore
= p
->p_sigignore
;
1485 exp
->p_sigcatch
= p
->p_sigcatch
;
1486 exp
->p_priority
= p
->p_priority
;
1488 exp
->p_nice
= p
->p_nice
;
1489 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1490 exp
->p_comm
[MAXCOMLEN
] = '\0';
1491 exp
->p_pgrp
= p
->p_pgrp
;
1493 exp
->p_xstat
= p
->p_xstat
;
1494 exp
->p_acflag
= p
->p_acflag
;
1495 exp
->p_ru
= p
->p_ru
; /* XXX may be NULL */
1499 * Fill in an LP64 version of extern_proc structure for the specified process.
1502 fill_user_externproc(proc_t p
, struct user_extern_proc
*exp
)
1504 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1505 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1506 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1507 exp
->p_vmspace
= USER_ADDR_NULL
;
1508 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1509 exp
->p_flag
= p
->p_flag
;
1510 if (p
->p_lflag
& P_LTRACED
)
1511 exp
->p_flag
|= P_TRACED
;
1512 if (p
->p_lflag
& P_LPPWAIT
)
1513 exp
->p_flag
|= P_PPWAIT
;
1514 if (p
->p_lflag
& P_LEXIT
)
1515 exp
->p_flag
|= P_WEXIT
;
1516 exp
->p_stat
= p
->p_stat
;
1517 exp
->p_pid
= p
->p_pid
;
1518 exp
->p_oppid
= p
->p_oppid
;
1520 exp
->user_stack
= p
->user_stack
;
1521 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1522 exp
->p_debugger
= p
->p_debugger
;
1523 exp
->sigwait
= p
->sigwait
;
1525 #ifdef _PROC_HAS_SCHEDINFO_
1526 exp
->p_estcpu
= p
->p_estcpu
;
1527 exp
->p_pctcpu
= p
->p_pctcpu
;
1528 exp
->p_slptime
= p
->p_slptime
;
1532 exp
->p_slptime
= 0 ;
1534 exp
->p_cpticks
= 0 ;
1538 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1539 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1540 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1541 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1542 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1543 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1547 exp
->p_traceflag
= 0 ;
1549 exp
->p_siglist
= 0 ; /* No longer relevant */
1550 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1551 exp
->p_holdcnt
= 0 ;
1552 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1553 exp
->p_sigignore
= p
->p_sigignore
;
1554 exp
->p_sigcatch
= p
->p_sigcatch
;
1555 exp
->p_priority
= p
->p_priority
;
1557 exp
->p_nice
= p
->p_nice
;
1558 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1559 exp
->p_comm
[MAXCOMLEN
] = '\0';
1560 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1561 exp
->p_addr
= USER_ADDR_NULL
;
1562 exp
->p_xstat
= p
->p_xstat
;
1563 exp
->p_acflag
= p
->p_acflag
;
1564 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1568 fill_proc(proc_t p
, struct kinfo_proc
*kp
)
1570 fill_externproc(p
, &kp
->kp_proc
);
1571 fill_eproc(p
, &kp
->kp_eproc
);
1575 fill_user_proc(proc_t p
, struct user_kinfo_proc
*kp
)
1577 fill_user_externproc(p
, &kp
->kp_proc
);
1578 fill_user_eproc(p
, &kp
->kp_eproc
);
1582 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1583 size_t *sizep
, proc_t p
)
1587 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1604 case KERN_KDSETRTCDEC
:
1606 case KERN_KDGETENTROPY
:
1607 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1618 * Return the top *sizep bytes of the user stack, or the entire area of the
1619 * user stack down through the saved exec_path, whichever is smaller.
1622 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1623 size_t *sizep
, proc_t cur_proc
)
1625 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1629 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1630 size_t *sizep
, proc_t cur_proc
)
1632 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1636 sysctl_procargsx(int *name
, __unused u_int namelen
, user_addr_t where
,
1637 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1640 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1642 struct _vm_map
*proc_map
;
1645 user_addr_t arg_addr
;
1650 vm_offset_t copy_start
, copy_end
;
1653 kauth_cred_t my_cred
;
1657 buflen
-= sizeof(int); /* reserve first word to return argc */
1659 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1660 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1661 /* is not NULL then the caller wants us to return the length needed to */
1662 /* hold the data we would return */
1663 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1669 * Lookup process by pid
1678 * Copy the top N bytes of the stack.
1679 * On all machines we have so far, the stack grows
1682 * If the user expects no more than N bytes of
1683 * argument list, use that as a guess for the
1687 if (!p
->user_stack
) {
1692 if (where
== USER_ADDR_NULL
) {
1693 /* caller only wants to know length of proc args data */
1694 if (sizep
== NULL
) {
1699 size
= p
->p_argslen
;
1702 size
+= sizeof(int);
1706 * old PROCARGS will return the executable's path and plus some
1707 * extra space for work alignment and data tags
1709 size
+= PATH_MAX
+ (6 * sizeof(int));
1711 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1716 my_cred
= kauth_cred_proc_ref(p
);
1717 uid
= kauth_cred_getuid(my_cred
);
1718 kauth_cred_unref(&my_cred
);
1720 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1721 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1726 if ((u_int
)arg_size
> p
->p_argslen
)
1727 arg_size
= round_page(p
->p_argslen
);
1729 arg_addr
= p
->user_stack
- arg_size
;
1733 * Before we can block (any VM code), make another
1734 * reference to the map to keep it alive. We do
1735 * that by getting a reference on the task itself.
1743 argslen
= p
->p_argslen
;
1745 * Once we have a task reference we can convert that into a
1746 * map reference, which we will use in the calls below. The
1747 * task/process may change its map after we take this reference
1748 * (see execve), but the worst that will happen then is a return
1749 * of stale info (which is always a possibility).
1751 task_reference(task
);
1753 proc_map
= get_task_map_reference(task
);
1754 task_deallocate(task
);
1756 if (proc_map
== NULL
)
1760 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1761 if (ret
!= KERN_SUCCESS
) {
1762 vm_map_deallocate(proc_map
);
1766 copy_end
= round_page(copy_start
+ arg_size
);
1768 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1769 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1770 vm_map_deallocate(proc_map
);
1771 kmem_free(kernel_map
, copy_start
,
1772 round_page(arg_size
));
1777 * Now that we've done the copyin from the process'
1778 * map, we can release the reference to it.
1780 vm_map_deallocate(proc_map
);
1782 if( vm_map_copy_overwrite(kernel_map
,
1783 (vm_map_address_t
)copy_start
,
1784 tmp
, FALSE
) != KERN_SUCCESS
) {
1785 kmem_free(kernel_map
, copy_start
,
1786 round_page(arg_size
));
1790 if (arg_size
> argslen
) {
1791 data
= (caddr_t
) (copy_end
- argslen
);
1794 data
= (caddr_t
) (copy_end
- arg_size
);
1799 /* Put processes argc as the first word in the copyout buffer */
1800 suword(where
, p
->p_argc
);
1801 error
= copyout(data
, (where
+ sizeof(int)), size
);
1802 size
+= sizeof(int);
1804 error
= copyout(data
, where
, size
);
1807 * Make the old PROCARGS work to return the executable's path
1808 * But, only if there is enough space in the provided buffer
1810 * on entry: data [possibily] points to the beginning of the path
1812 * Note: we keep all pointers&sizes aligned to word boundries
1814 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1816 int binPath_sz
, alignedBinPath_sz
= 0;
1817 int extraSpaceNeeded
, addThis
;
1818 user_addr_t placeHere
;
1819 char * str
= (char *) data
;
1822 /* Some apps are really bad about messing up their stacks
1823 So, we have to be extra careful about getting the length
1824 of the executing binary. If we encounter an error, we bail.
1827 /* Limit ourselves to PATH_MAX paths */
1828 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1832 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1835 /* If we have a NUL terminator, copy it, too */
1836 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1838 /* Pre-Flight the space requiremnts */
1840 /* Account for the padding that fills out binPath to the next word */
1841 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1843 placeHere
= where
+ size
;
1845 /* Account for the bytes needed to keep placeHere word aligned */
1846 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1848 /* Add up all the space that is needed */
1849 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1851 /* is there is room to tack on argv[0]? */
1852 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1854 placeHere
+= addThis
;
1855 suword(placeHere
, 0);
1856 placeHere
+= sizeof(int);
1857 suword(placeHere
, 0xBFFF0000);
1858 placeHere
+= sizeof(int);
1859 suword(placeHere
, 0);
1860 placeHere
+= sizeof(int);
1861 error
= copyout(data
, placeHere
, binPath_sz
);
1864 placeHere
+= binPath_sz
;
1865 suword(placeHere
, 0);
1866 size
+= extraSpaceNeeded
;
1872 if (copy_start
!= (vm_offset_t
) 0) {
1873 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1879 if (where
!= USER_ADDR_NULL
)
1886 * Max number of concurrent aio requests
1890 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1892 int new_value
, changed
;
1893 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1895 /* make sure the system-wide limit is greater than the per process limit */
1896 if (new_value
>= aio_max_requests_per_process
)
1897 aio_max_requests
= new_value
;
1906 * Max number of concurrent aio requests per process
1910 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1912 int new_value
, changed
;
1913 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1915 /* make sure per process limit is less than the system-wide limit */
1916 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1917 aio_max_requests_per_process
= new_value
;
1926 * Max number of async IO worker threads
1930 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1932 int new_value
, changed
;
1933 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1935 /* we only allow an increase in the number of worker threads */
1936 if (new_value
> aio_worker_threads
) {
1937 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1938 aio_worker_threads
= new_value
;
1948 * System-wide limit on the max number of processes
1952 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1954 int new_value
, changed
;
1955 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1957 AUDIT_ARG(value
, new_value
);
1958 /* make sure the system-wide limit is less than the configured hard
1959 limit set at kernel compilation */
1960 if (new_value
<= hard_maxproc
&& new_value
> 0)
1961 maxproc
= new_value
;
1968 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1969 CTLFLAG_RD
| CTLFLAG_KERN
,
1971 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1972 CTLFLAG_RD
| CTLFLAG_KERN
,
1974 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1975 CTLFLAG_RD
| CTLFLAG_KERN
,
1977 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1978 CTLFLAG_RD
| CTLFLAG_KERN
,
1981 /* PR-5293665: need to use a callback function for kern.osversion to set
1982 * osversion in IORegistry */
1985 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1989 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1992 IORegistrySetOSBuildVersion((char *)arg1
);
1998 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1999 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2000 osversion
, 256 /* OSVERSIZE*/,
2001 sysctl_osversion
, "A", "");
2004 sysctl_sysctl_bootargs
2005 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2010 strlcpy(buf
, PE_boot_args(), 256);
2011 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
2015 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
2016 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2018 sysctl_sysctl_bootargs
, "A", "bootargs");
2020 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
2021 CTLFLAG_RW
| CTLFLAG_KERN
,
2023 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
2024 CTLFLAG_RD
| CTLFLAG_KERN
,
2026 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
2027 CTLFLAG_RD
| CTLFLAG_KERN
,
2028 NULL
, _POSIX_VERSION
, "");
2029 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
2030 CTLFLAG_RD
| CTLFLAG_KERN
,
2031 NULL
, NGROUPS_MAX
, "");
2032 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
2033 CTLFLAG_RD
| CTLFLAG_KERN
,
2035 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2036 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2037 CTLFLAG_RD
| CTLFLAG_KERN
,
2040 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2041 CTLFLAG_RD
| CTLFLAG_KERN
,
2046 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2048 unsigned int oldval
= desiredvnodes
;
2049 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2050 reset_vmobjectcache(oldval
, desiredvnodes
);
2051 resize_namecache(desiredvnodes
);
2055 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2056 CTLTYPE_INT
| CTLFLAG_RW
,
2057 0, 0, sysctl_maxvnodes
, "I", "");
2059 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2060 CTLTYPE_INT
| CTLFLAG_RW
,
2061 0, 0, sysctl_maxproc
, "I", "");
2063 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2064 CTLTYPE_INT
| CTLFLAG_RW
,
2065 0, 0, sysctl_aiomax
, "I", "");
2067 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2068 CTLTYPE_INT
| CTLFLAG_RW
,
2069 0, 0, sysctl_aioprocmax
, "I", "");
2071 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2072 CTLTYPE_INT
| CTLFLAG_RW
,
2073 0, 0, sysctl_aiothreads
, "I", "");
2077 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2079 int new_value
, changed
;
2080 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2082 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2084 securelevel
= new_value
;
2093 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2094 CTLTYPE_INT
| CTLFLAG_RW
,
2095 0, 0, sysctl_securelvl
, "I", "");
2100 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2103 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2105 domainnamelen
= strlen(domainname
);
2110 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2111 CTLTYPE_STRING
| CTLFLAG_RW
,
2112 0, 0, sysctl_domainname
, "A", "");
2114 SYSCTL_INT(_kern
, KERN_HOSTID
, hostid
,
2115 CTLFLAG_RW
| CTLFLAG_KERN
,
2120 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2123 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2125 hostnamelen
= req
->newlen
;
2131 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2132 CTLTYPE_STRING
| CTLFLAG_RW
,
2133 0, 0, sysctl_hostname
, "A", "");
2137 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2139 /* Original code allowed writing, I'm copying this, although this all makes
2140 no sense to me. Besides, this sysctl is never used. */
2141 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2144 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2145 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2146 0, 0, sysctl_procname
, "A", "");
2148 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2149 CTLFLAG_RW
| CTLFLAG_KERN
,
2150 &speculative_reads_disabled
, 0, "");
2154 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2158 t
.tv_sec
= boottime_sec();
2161 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2164 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2165 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2166 0, 0, sysctl_boottime
, "S,timeval", "");
2170 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2173 int error
= get_kernel_symfile(req
->p
, &str
);
2176 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2180 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2181 CTLTYPE_STRING
| CTLFLAG_RD
,
2182 0, 0, sysctl_symfile
, "A", "");
2187 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2189 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2192 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2193 CTLTYPE_INT
| CTLFLAG_RD
,
2194 0, 0, sysctl_netboot
, "I", "");
2199 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2201 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2204 SYSCTL_PROC(_kern
, KERN_USRSTACK
, usrstack
,
2205 CTLTYPE_INT
| CTLFLAG_RD
,
2206 0, 0, sysctl_usrstack
, "I", "");
2210 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2212 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2215 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2216 CTLTYPE_QUAD
| CTLFLAG_RD
,
2217 0, 0, sysctl_usrstack64
, "Q", "");
2219 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2220 CTLFLAG_RW
| CTLFLAG_KERN
,
2221 corefilename
, sizeof(corefilename
), "");
2225 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2227 int new_value
, changed
;
2228 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2230 if ((new_value
== 0) || (new_value
== 1))
2231 do_coredump
= new_value
;
2238 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2239 CTLTYPE_INT
| CTLFLAG_RW
,
2240 0, 0, sysctl_coredump
, "I", "");
2243 sysctl_suid_coredump
2244 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2246 int new_value
, changed
;
2247 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2249 if ((new_value
== 0) || (new_value
== 1))
2250 sugid_coredump
= new_value
;
2257 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2258 CTLTYPE_INT
| CTLFLAG_RW
,
2259 0, 0, sysctl_suid_coredump
, "I", "");
2263 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2265 struct proc
*p
= req
->p
;
2266 int new_value
, changed
;
2267 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2271 req
->p
->p_lflag
|= P_LDELAYTERM
;
2273 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2279 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2280 CTLTYPE_INT
| CTLFLAG_RW
,
2281 0, 0, sysctl_delayterm
, "I", "");
2284 sysctl_proc_low_pri_io
2285 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2287 struct proc
*p
= req
->p
;
2288 int new_value
, old_value
, changed
;
2292 switch (req
->p
->p_iopol_disk
) {
2297 case IOPOL_THROTTLE
:
2304 /*\ 5 this should never happen, but to be robust, return the default value */
2310 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2313 if (new_value
& 0x01)
2314 req
->p
->p_iopol_disk
= IOPOL_THROTTLE
;
2315 else if (new_value
& 0x02)
2316 req
->p
->p_iopol_disk
= IOPOL_PASSIVE
;
2317 else if (new_value
== 0)
2318 req
->p
->p_iopol_disk
= IOPOL_NORMAL
;
2324 SYSCTL_PROC(_kern
, KERN_PROC_LOW_PRI_IO
, proc_low_pri_io
,
2325 CTLTYPE_INT
| CTLFLAG_RW
,
2326 0, 0, sysctl_proc_low_pri_io
, "I", "");
2330 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2332 struct proc
*p
= req
->p
;
2334 int new_value
, old_value
, changed
;
2337 ut
= get_bsdthread_info(current_thread());
2339 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2340 old_value
= KERN_RAGE_THREAD
;
2341 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2342 old_value
= KERN_RAGE_PROC
;
2346 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2349 switch (new_value
) {
2350 case KERN_RAGE_PROC
:
2352 p
->p_lflag
|= P_LRAGE_VNODES
;
2355 case KERN_UNRAGE_PROC
:
2357 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2361 case KERN_RAGE_THREAD
:
2362 ut
->uu_flag
|= UT_RAGE_VNODES
;
2364 case KERN_UNRAGE_THREAD
:
2365 ut
= get_bsdthread_info(current_thread());
2366 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2373 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2374 CTLTYPE_INT
| CTLFLAG_RW
,
2375 0, 0, sysctl_rage_vnode
, "I", "");
2379 sysctl_kern_check_openevt
2380 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2382 struct proc
*p
= req
->p
;
2383 int new_value
, old_value
, changed
;
2386 if (p
->p_flag
& P_CHECKOPENEVT
) {
2387 old_value
= KERN_OPENEVT_PROC
;
2392 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2395 switch (new_value
) {
2396 case KERN_OPENEVT_PROC
:
2397 OSBitOrAtomic(P_CHECKOPENEVT
, (UInt32
*)&p
->p_flag
);
2400 case KERN_UNOPENEVT_PROC
:
2401 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), (UInt32
*)&p
->p_flag
);
2411 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2412 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2418 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2420 #ifdef SECURE_KERNEL
2423 int new_value
, changed
;
2426 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2433 * Only allow setting if NX is supported on the chip
2435 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2438 nx_enabled
= new_value
;
2445 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2446 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
,
2447 0, 0, sysctl_nx
, "I", "");
2451 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2453 if (proc_is64bit(req
->p
)) {
2454 struct user_loadavg loadinfo64
;
2455 loadavg32to64(&averunnable
, &loadinfo64
);
2456 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2458 return sysctl_io_opaque(req
, &averunnable
, sizeof(averunnable
), NULL
);
2462 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2463 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2464 0, 0, sysctl_loadavg
, "S,loadavg", "");
2468 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2471 uint64_t swap_total
;
2472 uint64_t swap_avail
;
2473 uint32_t swap_pagesize
;
2474 boolean_t swap_encrypted
;
2475 struct xsw_usage xsu
;
2477 error
= macx_swapinfo(&swap_total
,
2484 xsu
.xsu_total
= swap_total
;
2485 xsu
.xsu_avail
= swap_avail
;
2486 xsu
.xsu_used
= swap_total
- swap_avail
;
2487 xsu
.xsu_pagesize
= swap_pagesize
;
2488 xsu
.xsu_encrypted
= swap_encrypted
;
2489 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2494 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2495 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2496 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2499 /* this kernel does NOT implement shared_region_make_private_np() */
2500 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2506 sysctl_sysctl_exec_affinity(__unused
struct sysctl_oid
*oidp
,
2507 __unused
void *arg1
, __unused
int arg2
,
2508 struct sysctl_req
*req
)
2510 proc_t cur_proc
= req
->p
;
2513 if (req
->oldptr
!= USER_ADDR_NULL
) {
2514 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
2515 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
2519 if (req
->newptr
!= USER_ADDR_NULL
) {
2520 cpu_type_t newcputype
;
2521 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
2523 if (newcputype
== CPU_TYPE_I386
)
2524 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), (UInt32
*)&cur_proc
->p_flag
);
2525 else if (newcputype
== CPU_TYPE_POWERPC
)
2526 OSBitOrAtomic(P_AFFINITY
, (UInt32
*)&cur_proc
->p_flag
);
2533 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
2537 fetch_process_cputype(
2541 cpu_type_t
*cputype
)
2543 proc_t p
= PROC_NULL
;
2550 else if (namelen
== 1) {
2551 p
= proc_find(name
[0]);
2561 if (p
->p_flag
& P_TRANSLATED
) {
2562 ret
= CPU_TYPE_POWERPC
;
2568 if (IS_64BIT_PROCESS(p
))
2569 ret
|= CPU_ARCH_ABI64
;
2580 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2581 struct sysctl_req
*req
)
2584 cpu_type_t proc_cputype
= 0;
2585 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2588 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2590 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2592 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2595 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2596 struct sysctl_req
*req
)
2599 cpu_type_t proc_cputype
= 0;
2600 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2602 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2604 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2608 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2610 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2613 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2614 CTLTYPE_INT
| CTLFLAG_RD
,
2615 0, 0, sysctl_safeboot
, "I", "");
2619 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2621 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2624 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2625 CTLTYPE_INT
| CTLFLAG_RD
,
2626 0, 0, sysctl_singleuser
, "I", "");
2629 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2631 extern boolean_t affinity_sets_enabled
;
2632 extern int affinity_sets_mapping
;
2634 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2635 CTLFLAG_RW
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2636 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2637 CTLFLAG_RW
, &affinity_sets_mapping
, 0, "mapping policy");
2640 * Limit on total memory users can wire.
2642 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2644 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2646 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2649 * All values are in bytes.
2652 vm_map_size_t vm_global_user_wire_limit
;
2653 vm_map_size_t vm_user_wire_limit
;
2655 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
, &vm_global_user_wire_limit
, "");
2656 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
, &vm_user_wire_limit
, "");