2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <bsm/audit_kernel.h>
108 #include <mach/machine.h>
109 #include <mach/mach_types.h>
110 #include <mach/vm_param.h>
111 #include <kern/task.h>
112 #include <kern/lock.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <mach/host_info.h>
117 #include <sys/mount_internal.h>
118 #include <sys/kdebug.h>
119 #include <sys/sysproto.h>
121 #include <IOKit/IOPlatformExpert.h>
122 #include <pexpert/pexpert.h>
124 #include <machine/machine_routines.h>
125 #include <machine/exec.h>
127 #include <vm/vm_protos.h>
130 #include <i386/cpuid.h>
133 sysctlfn kern_sysctl
;
135 sysctlfn debug_sysctl
;
137 extern sysctlfn net_sysctl
;
138 extern sysctlfn cpu_sysctl
;
139 extern int aio_max_requests
;
140 extern int aio_max_requests_per_process
;
141 extern int aio_worker_threads
;
142 extern int lowpri_IO_window_msecs
;
143 extern int lowpri_IO_delay_msecs
;
144 extern int nx_enabled
;
145 extern int speculative_reads_disabled
;
148 fill_eproc(proc_t p
, struct eproc
*ep
);
150 fill_externproc(proc_t p
, struct extern_proc
*exp
);
152 fill_user_eproc(proc_t p
, struct user_eproc
*ep
);
154 fill_user_proc(proc_t p
, struct user_kinfo_proc
*kp
);
156 fill_user_externproc(proc_t p
, struct user_extern_proc
*exp
);
158 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
160 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, proc_t p
);
166 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
168 __private_extern__ kern_return_t
169 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
171 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
173 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
174 user_addr_t newp
, size_t newlen
);
176 fill_proc(proc_t p
, struct kinfo_proc
*kp
);
178 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
179 size_t *sizep
, proc_t cur_proc
);
181 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
184 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
185 proc_t cur_proc
, int argc_yes
);
187 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
188 size_t newlen
, void *sp
, int len
);
190 static int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
191 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
192 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
193 static int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
194 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
195 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
196 int sysdoproc_callback(proc_t p
, void *arg
);
198 static int __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, register_t
*retval
);
200 extern void IORegistrySetOSBuildVersion(char * build_version
);
203 loadavg32to64(struct loadavg
*la32
, struct user_loadavg
*la64
)
205 la64
->ldavg
[0] = la32
->ldavg
[0];
206 la64
->ldavg
[1] = la32
->ldavg
[1];
207 la64
->ldavg
[2] = la32
->ldavg
[2];
208 la64
->fscale
= (user_long_t
)la32
->fscale
;
214 static struct sysctl_lock memlock
;
216 /* sysctl() syscall */
218 __sysctl(proc_t p
, struct __sysctl_args
*uap
, register_t
*retval
)
220 boolean_t funnel_state
;
223 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
224 error
= __sysctl_funneled(p
, uap
, retval
);
225 thread_funnel_set(kernel_flock
, funnel_state
);
230 __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, __unused register_t
*retval
)
232 int error
, dolock
= 1;
233 size_t savelen
= 0, oldlen
= 0, newlen
;
234 sysctlfn
*fnp
= NULL
;
235 int name
[CTL_MAXNAME
];
237 boolean_t memlock_taken
= FALSE
;
238 boolean_t vslock_taken
= FALSE
;
240 kauth_cred_t my_cred
;
244 * all top-level sysctl names are non-terminal
246 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
248 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
252 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
254 if (proc_is64bit(p
)) {
255 /* uap->newlen is a size_t value which grows to 64 bits
256 * when coming from a 64-bit process. since it's doubtful we'll
257 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
259 newlen
= CAST_DOWN(size_t, uap
->newlen
);
262 newlen
= uap
->newlen
;
265 /* CTL_UNSPEC is used to get oid to AUTO_OID */
266 if (uap
->new != USER_ADDR_NULL
267 && ((name
[0] == CTL_KERN
268 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
269 name
[1] == KERN_PROC_LOW_PRI_IO
|| name
[1] == KERN_PROCNAME
|| name
[1] == KERN_RAGEVNODE
|| name
[1] == KERN_CHECKOPENEVT
))
270 || (name
[0] == CTL_HW
)
271 || (name
[0] == CTL_VM
))
272 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
275 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
276 * but there is a fallback for all sysctls other than VFS to
277 * userland_sysctl() - KILL THIS! */
281 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
282 && (name
[1] != KERN_PROC
))
297 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
298 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
300 oldlen
= CAST_DOWN(size_t, oldlen64
);
302 * If more than 4G, clamp to 4G - useracc() below will catch
303 * with an EFAULT, if it's actually necessary.
305 if (oldlen64
> 0x00000000ffffffffULL
)
306 oldlen
= 0xffffffffUL
;
309 if (uap
->old
!= USER_ADDR_NULL
) {
310 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
313 * The kernel debug mechanism does not need to take this lock, and
314 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
315 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
317 if (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)) &&
318 !(name
[1] == KERN_PROC
)) {
320 memlock_taken
= TRUE
;
323 if (dolock
&& oldlen
) {
324 if ((error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
325 if (memlock_taken
== TRUE
)
335 my_cred
= kauth_cred_proc_ref(p
);
336 error
= mac_system_check_sysctl(
342 fnp
== kern_sysctl
? 1 : 0,
346 kauth_cred_unref(&my_cred
);
350 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
351 &oldlen
, uap
->new, newlen
, p
);
359 if (vslock_taken
== TRUE
) {
360 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
364 if (memlock_taken
== TRUE
)
367 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
369 boolean_t funnel_state
;
372 * Drop the funnel when calling new sysctl code, which will conditionally
373 * grab the funnel if it really needs to.
375 funnel_state
= thread_funnel_set(kernel_flock
, FALSE
);
377 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
378 1, uap
->new, newlen
, &oldlen
);
380 thread_funnel_set(kernel_flock
, funnel_state
);
383 if ((error
) && (error
!= ENOMEM
))
386 if (uap
->oldlenp
!= USER_ADDR_NULL
)
387 error
= suulong(uap
->oldlenp
, oldlen
);
393 * Attributes stored in the kernel.
395 __private_extern__
char corefilename
[MAXPATHLEN
+1];
396 __private_extern__
int do_coredump
;
397 __private_extern__
int sugid_coredump
;
400 __private_extern__
int do_count_syscalls
;
404 int securelevel
= -1;
416 __unused
size_t newSize
,
422 if (name
[0] == 0 && 1 == namelen
) {
423 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
424 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
425 } else if (name
[0] == 1 && 2 == namelen
) {
427 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), (UInt32
*)&cur_proc
->p_flag
);
429 OSBitOrAtomic(P_AFFINITY
, (UInt32
*)&cur_proc
->p_flag
);
443 __unused
size_t newSize
,
447 int istranslated
= 0;
448 kauth_cred_t my_cred
;
454 p
= proc_find(name
[0]);
458 my_cred
= kauth_cred_proc_ref(p
);
459 uid
= kauth_cred_getuid(my_cred
);
460 kauth_cred_unref(&my_cred
);
461 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
462 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
467 istranslated
= (p
->p_flag
& P_TRANSLATED
);
469 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
470 (istranslated
!= 0) ? 1 : 0);
474 set_archhandler(__unused proc_t p
, int arch
)
478 struct vnode_attr va
;
479 vfs_context_t ctx
= vfs_context_current();
480 struct exec_archhandler
*archhandler
;
483 case CPU_TYPE_POWERPC
:
484 archhandler
= &exec_archhandler_ppc
;
490 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
,
491 CAST_USER_ADDR_T(archhandler
->path
), ctx
);
497 /* Check mount point */
498 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
499 (nd
.ni_vp
->v_type
!= VREG
)) {
505 VATTR_WANTED(&va
, va_fsid
);
506 VATTR_WANTED(&va
, va_fileid
);
507 error
= vnode_getattr(nd
.ni_vp
, &va
, ctx
);
514 archhandler
->fsid
= va
.va_fsid
;
515 archhandler
->fileid
= (u_long
)va
.va_fileid
;
519 /* XXX remove once Rosetta is rev'ed */
520 /*****************************************************************************/
522 sysctl_exec_archhandler_ppc(
524 __unused u_int namelen
,
533 char handler
[sizeof(exec_archhandler_ppc
.path
)];
534 vfs_context_t ctx
= vfs_context_current();
537 len
= strlen(exec_archhandler_ppc
.path
) + 1;
541 error
= copyout(exec_archhandler_ppc
.path
, oldBuf
, len
);
548 error
= suser(vfs_context_ucred(ctx
), &p
->p_acflag
);
551 if (newSize
>= sizeof(exec_archhandler_ppc
.path
))
552 return (ENAMETOOLONG
);
553 error
= copyin(newBuf
, handler
, newSize
);
556 handler
[newSize
] = 0;
557 strlcpy(exec_archhandler_ppc
.path
, handler
, MAXPATHLEN
);
558 error
= set_archhandler(p
, CPU_TYPE_POWERPC
);
564 /*****************************************************************************/
567 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid
*oidp
, void *arg1
,
568 int arg2
, struct sysctl_req
*req
)
572 error
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
578 error
= set_archhandler(req
->p
, CPU_TYPE_POWERPC
);
585 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
587 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
589 SYSCTL_PROC(_kern_exec_archhandler
, OID_AUTO
, powerpc
,
590 CTLTYPE_STRING
| CTLFLAG_RW
, exec_archhandler_ppc
.path
, 0,
591 sysctl_handle_exec_archhandler_ppc
, "A", "");
593 extern int get_kernel_symfile(proc_t
, char **);
594 __private_extern__
int
595 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
599 * kernel related system variables.
602 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
603 user_addr_t newp
, size_t newlen
, proc_t p
)
605 /* all sysctl names not listed below are terminal at this level */
607 && !(name
[0] == KERN_PROC
608 || name
[0] == KERN_PROF
609 || name
[0] == KERN_KDEBUG
610 || name
[0] == KERN_PROCARGS
611 || name
[0] == KERN_PROCARGS2
612 || name
[0] == KERN_IPC
613 || name
[0] == KERN_SYSV
614 || name
[0] == KERN_AFFINITY
615 || name
[0] == KERN_TRANSLATE
616 || name
[0] == KERN_EXEC
617 || name
[0] == KERN_PANICINFO
618 || name
[0] == KERN_POSIX
619 || name
[0] == KERN_TFP
620 || name
[0] == KERN_TTY
622 || name
[0] == KERN_LCTX
626 return (ENOTDIR
); /* overloaded */
630 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
633 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
637 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
639 /* new one as it does not use kinfo_proc */
640 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
642 /* new one as it does not use kinfo_proc */
643 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
646 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
650 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
653 return sysctl_translate(name
+1, namelen
-1, oldp
, oldlenp
, newp
,
656 /* XXX remove once Rosetta has rev'ed */
658 return sysctl_exec_archhandler_ppc(name
+1, namelen
-1, oldp
,
659 oldlenp
, newp
, newlen
, p
);
661 case KERN_COUNT_SYSCALLS
:
663 /* valid values passed in:
664 * = 0 means don't keep called counts for each bsd syscall
665 * > 0 means keep called counts for each bsd syscall
666 * = 2 means dump current counts to the system log
667 * = 3 means reset all counts
668 * for example, to dump current counts:
669 * sysctl -w kern.count_calls=2
671 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
677 do_count_syscalls
= 1;
679 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
681 extern int syscalls_log
[];
682 extern const char * syscallnames
[];
684 for ( i
= 0; i
< nsysent
; i
++ ) {
685 if ( syscalls_log
[i
] != 0 ) {
687 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
695 do_count_syscalls
= 1;
709 * Debugging related system variables.
713 #endif /* DIAGNOSTIC */
714 struct ctldebug debug0
, debug1
;
715 struct ctldebug debug2
, debug3
, debug4
;
716 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
717 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
718 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
719 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
720 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
721 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
722 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
723 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
726 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
727 user_addr_t newp
, size_t newlen
, __unused proc_t p
)
729 struct ctldebug
*cdp
;
731 /* all sysctl names at this level are name and field */
733 return (ENOTDIR
); /* overloaded */
734 cdp
= debugvars
[name
[0]];
735 if (cdp
->debugname
== 0)
739 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
740 case CTL_DEBUG_VALUE
:
741 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
750 * The following sysctl_* functions should not be used
751 * any more, as they can only cope with callers in
752 * user mode: Use new-style
760 * Validate parameters and get old / set new parameters
761 * for an integer-valued sysctl function.
764 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
765 user_addr_t newp
, size_t newlen
, int *valp
)
769 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
771 if (oldp
&& *oldlenp
< sizeof(int))
773 if (newp
&& newlen
!= sizeof(int))
775 *oldlenp
= sizeof(int);
777 error
= copyout(valp
, oldp
, sizeof(int));
778 if (error
== 0 && newp
) {
779 error
= copyin(newp
, valp
, sizeof(int));
780 AUDIT_ARG(value
, *valp
);
786 * As above, but read-only.
789 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
793 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
795 if (oldp
&& *oldlenp
< sizeof(int))
799 *oldlenp
= sizeof(int);
801 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
806 * Validate parameters and get old / set new parameters
807 * for an quad(64bit)-valued sysctl function.
810 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
811 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
815 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
817 if (oldp
&& *oldlenp
< sizeof(quad_t
))
819 if (newp
&& newlen
!= sizeof(quad_t
))
821 *oldlenp
= sizeof(quad_t
);
823 error
= copyout(valp
, oldp
, sizeof(quad_t
));
824 if (error
== 0 && newp
)
825 error
= copyin(newp
, valp
, sizeof(quad_t
));
830 * As above, but read-only.
833 sysctl_rdquad(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, quad_t val
)
837 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
839 if (oldp
&& *oldlenp
< sizeof(quad_t
))
843 *oldlenp
= sizeof(quad_t
);
845 error
= copyout((caddr_t
)&val
, oldp
, sizeof(quad_t
));
850 * Validate parameters and get old / set new parameters
851 * for a string-valued sysctl function. Unlike sysctl_string, if you
852 * give it a too small (but larger than 0 bytes) buffer, instead of
853 * returning ENOMEM, it truncates the returned string to the buffer
854 * size. This preserves the semantics of some library routines
855 * implemented via sysctl, which truncate their returned data, rather
856 * than simply returning an error. The returned string is always NUL
860 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
861 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
863 int len
, copylen
, error
= 0;
865 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
867 copylen
= len
= strlen(str
) + 1;
868 if (oldp
&& (len
< 0 || *oldlenp
< 1))
870 if (oldp
&& (*oldlenp
< (size_t)len
))
871 copylen
= *oldlenp
+ 1;
872 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
874 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
876 error
= copyout(str
, oldp
, copylen
);
881 error
= copyout((void *)&c
, oldp
, sizeof(char));
884 if (error
== 0 && newp
) {
885 error
= copyin(newp
, str
, newlen
);
887 AUDIT_ARG(text
, (char *)str
);
893 * Validate parameters and get old / set new parameters
894 * for a string-valued sysctl function.
897 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
898 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
902 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
904 len
= strlen(str
) + 1;
905 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
907 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
909 *oldlenp
= len
-1; /* deal with NULL strings correctly */
911 error
= copyout(str
, oldp
, len
);
913 if (error
== 0 && newp
) {
914 error
= copyin(newp
, str
, newlen
);
916 AUDIT_ARG(text
, (char *)str
);
922 * As above, but read-only.
925 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
926 user_addr_t newp
, char *str
)
930 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
932 len
= strlen(str
) + 1;
933 if (oldp
&& *oldlenp
< (size_t)len
)
939 error
= copyout(str
, oldp
, len
);
944 * Validate parameters and get old / set new parameters
945 * for a structure oriented sysctl function.
948 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
949 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
953 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
955 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
957 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
961 error
= copyout(sp
, oldp
, len
);
963 if (error
== 0 && newp
)
964 error
= copyin(newp
, sp
, len
);
969 * Validate parameters and get old parameters
970 * for a structure oriented sysctl function.
973 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
974 user_addr_t newp
, void *sp
, int len
)
978 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
980 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
986 error
= copyout(sp
, oldp
, len
);
991 * Get file structures.
995 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
999 struct extern_file nef
;
1001 if (req
->oldptr
== USER_ADDR_NULL
) {
1003 * overestimate by 10 files
1005 req
->oldidx
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1010 * first copyout filehead
1012 error
= SYSCTL_OUT(req
, &filehead
, sizeof(filehead
));
1017 * followed by an array of file structures
1019 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1020 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1021 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1022 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1023 nef
.f_type
= fg
->fg_type
;
1024 nef
.f_count
= fg
->fg_count
;
1025 nef
.f_msgcount
= fg
->fg_msgcount
;
1026 nef
.f_cred
= fg
->fg_cred
;
1027 nef
.f_ops
= fg
->fg_ops
;
1028 nef
.f_offset
= fg
->fg_offset
;
1029 nef
.f_data
= fg
->fg_data
;
1030 error
= SYSCTL_OUT(req
, &nef
, sizeof(nef
));
1037 SYSCTL_PROC(_kern
, KERN_FILE
, file
,
1038 CTLTYPE_STRUCT
| CTLFLAG_RW
,
1039 0, 0, sysctl_file
, "S,filehead", "");
1042 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
1044 if (p
->p_pid
!= (pid_t
)arg
)
1051 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
1053 if (p
->p_pgrpid
!= (pid_t
)arg
)
1060 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
1062 boolean_t funnel_state
;
1066 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1067 /* This is very racy but list lock is held.. Hmmm. */
1068 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1069 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
1070 p
->p_pgrp
->pg_session
->s_ttyp
== NULL
||
1071 p
->p_pgrp
->pg_session
->s_ttyp
->t_dev
!= (dev_t
)arg
)
1076 thread_funnel_set(kernel_flock
, funnel_state
);
1082 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
1084 kauth_cred_t my_cred
;
1087 if (p
->p_ucred
== NULL
)
1089 my_cred
= kauth_cred_proc_ref(p
);
1090 uid
= kauth_cred_getuid(my_cred
);
1091 kauth_cred_unref(&my_cred
);
1093 if (uid
!= (uid_t
)arg
)
1101 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
1103 kauth_cred_t my_cred
;
1106 if (p
->p_ucred
== NULL
)
1108 my_cred
= kauth_cred_proc_ref(p
);
1109 ruid
= my_cred
->cr_ruid
;
1110 kauth_cred_unref(&my_cred
);
1112 if (ruid
!= (uid_t
)arg
)
1119 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
1121 if ((p
->p_lctx
== NULL
) ||
1122 (p
->p_lctx
->lc_id
!= (pid_t
)arg
))
1129 * try over estimating by 5 procs
1131 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1132 struct sysdoproc_args
{
1135 boolean_t is_64_bit
;
1147 sysdoproc_callback(proc_t p
, void * arg
)
1149 struct sysdoproc_args
* args
= (struct sysdoproc_args
*)arg
;
1152 if (args
->buflen
>= args
->sizeof_kproc
) {
1153 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, (void *)args
->uidval
) == 0))
1154 return(PROC_RETURNED
);
1155 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, (void *)args
->uidval
) == 0))
1156 return(PROC_RETURNED
);
1157 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, (void *)args
->uidval
) == 0))
1158 return(PROC_RETURNED
);
1160 bzero(args
->kprocp
, args
->sizeof_kproc
);
1161 if (args
->is_64_bit
) {
1162 fill_user_proc(p
, (struct user_kinfo_proc
*) args
->kprocp
);
1165 fill_proc(p
, (struct kinfo_proc
*) args
->kprocp
);
1167 error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
1169 *args
->errorp
= error
;
1170 return(PROC_RETURNED_DONE
);
1173 args
->dp
+= args
->sizeof_kproc
;
1174 args
->buflen
-= args
->sizeof_kproc
;
1176 args
->needed
+= args
->sizeof_kproc
;
1177 return(PROC_RETURNED
);
1181 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1183 user_addr_t dp
= where
;
1185 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1187 boolean_t is_64_bit
= FALSE
;
1188 struct kinfo_proc kproc
;
1189 struct user_kinfo_proc user_kproc
;
1192 int (*filterfn
)(proc_t
, void *) = 0;
1193 struct sysdoproc_args args
;
1198 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1200 is_64_bit
= proc_is64bit(current_proc());
1202 sizeof_kproc
= sizeof(user_kproc
);
1203 kprocp
= (caddr_t
) &user_kproc
;
1206 sizeof_kproc
= sizeof(kproc
);
1207 kprocp
= (caddr_t
) &kproc
;
1214 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
1217 case KERN_PROC_PGRP
:
1218 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
1229 case KERN_PROC_RUID
:
1234 case KERN_PROC_LCID
:
1235 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
1241 args
.buflen
= buflen
;
1242 args
.kprocp
= kprocp
;
1243 args
.is_64_bit
= is_64_bit
;
1245 args
.needed
= needed
;
1246 args
.errorp
= &error
;
1247 args
.uidcheck
= uidcheck
;
1248 args
.ruidcheck
= ruidcheck
;
1249 args
.ttycheck
= ttycheck
;
1250 args
.sizeof_kproc
= sizeof_kproc
;
1251 args
.uidval
= name
[1];
1253 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
), sysdoproc_callback
, &args
, filterfn
, (void *)name
[1]);
1259 needed
= args
.needed
;
1261 if (where
!= USER_ADDR_NULL
) {
1262 *sizep
= dp
- where
;
1263 if (needed
> *sizep
)
1266 needed
+= KERN_PROCSLOP
;
1273 * Fill in an eproc structure for the specified process.
1276 fill_eproc(proc_t p
, struct eproc
*ep
)
1279 kauth_cred_t my_cred
;
1281 struct session
* sessp
;
1284 sessp
= proc_session(p
);
1288 if (pg
!= PGRP_NULL
) {
1290 ep
->e_pgid
= p
->p_pgrpid
;
1291 ep
->e_jobc
= pg
->pg_jobc
;
1292 if ((sessp
!= SESSION_NULL
) && sessp
->s_ttyvp
)
1293 ep
->e_flag
= EPROC_CTTY
;
1295 ep
->e_sess
= (struct session
*)0;
1301 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1306 ep
->e_ppid
= p
->p_ppid
;
1307 /* Pre-zero the fake historical pcred */
1308 bzero(&ep
->e_pcred
, sizeof(struct _pcred
));
1310 my_cred
= kauth_cred_proc_ref(p
);
1312 /* A fake historical pcred */
1313 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1314 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1315 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1316 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1317 /* A fake historical *kauth_cred_t */
1318 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1319 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1320 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1321 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1323 kauth_cred_unref(&my_cred
);
1325 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1326 ep
->e_vm
.vm_tsize
= 0;
1327 ep
->e_vm
.vm_dsize
= 0;
1328 ep
->e_vm
.vm_ssize
= 0;
1330 ep
->e_vm
.vm_rssize
= 0;
1332 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1333 (tp
= sessp
->s_ttyp
)) {
1334 ep
->e_tdev
= tp
->t_dev
;
1335 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1336 ep
->e_tsess
= tp
->t_session
;
1340 if (SESS_LEADER(p
, sessp
))
1341 ep
->e_flag
|= EPROC_SLEADER
;
1342 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1343 ep
->e_xsize
= ep
->e_xrssize
= 0;
1344 ep
->e_xccount
= ep
->e_xswrss
= 0;
1345 if (sessp
!= SESSION_NULL
)
1346 session_rele(sessp
);
1352 * Fill in an LP64 version of eproc structure for the specified process.
1355 fill_user_eproc(proc_t p
, struct user_eproc
*ep
)
1358 struct session
*sessp
= NULL
;
1360 kauth_cred_t my_cred
;
1363 sessp
= proc_session(p
);
1365 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1366 if (pg
!= PGRP_NULL
) {
1367 ep
->e_sess
= CAST_USER_ADDR_T(sessp
);
1368 ep
->e_pgid
= p
->p_pgrpid
;
1369 ep
->e_jobc
= pg
->pg_jobc
;
1370 if (sessp
!= SESSION_NULL
) {
1372 ep
->e_flag
= EPROC_CTTY
;
1375 ep
->e_sess
= USER_ADDR_NULL
;
1381 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1386 ep
->e_ppid
= p
->p_ppid
;
1387 /* Pre-zero the fake historical pcred */
1388 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1390 my_cred
= kauth_cred_proc_ref(p
);
1392 /* A fake historical pcred */
1393 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1394 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1395 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1396 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1398 /* A fake historical *kauth_cred_t */
1399 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1400 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1401 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1402 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1404 kauth_cred_unref(&my_cred
);
1406 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1407 ep
->e_vm
.vm_tsize
= 0;
1408 ep
->e_vm
.vm_dsize
= 0;
1409 ep
->e_vm
.vm_ssize
= 0;
1411 ep
->e_vm
.vm_rssize
= 0;
1413 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1414 (tp
= sessp
->s_ttyp
)) {
1415 ep
->e_tdev
= tp
->t_dev
;
1416 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1417 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1421 if (SESS_LEADER(p
, sessp
))
1422 ep
->e_flag
|= EPROC_SLEADER
;
1423 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1424 ep
->e_xsize
= ep
->e_xrssize
= 0;
1425 ep
->e_xccount
= ep
->e_xswrss
= 0;
1426 if (sessp
!= SESSION_NULL
)
1427 session_rele(sessp
);
1428 if (pg
!= PGRP_NULL
)
1433 * Fill in an eproc structure for the specified process.
1436 fill_externproc(proc_t p
, struct extern_proc
*exp
)
1438 exp
->p_forw
= exp
->p_back
= NULL
;
1439 exp
->p_starttime
= p
->p_start
;
1440 exp
->p_vmspace
= NULL
;
1441 exp
->p_sigacts
= p
->p_sigacts
;
1442 exp
->p_flag
= p
->p_flag
;
1443 if (p
->p_lflag
& P_LTRACED
)
1444 exp
->p_flag
|= P_TRACED
;
1445 if (p
->p_lflag
& P_LPPWAIT
)
1446 exp
->p_flag
|= P_PPWAIT
;
1447 if (p
->p_lflag
& P_LEXIT
)
1448 exp
->p_flag
|= P_WEXIT
;
1449 exp
->p_stat
= p
->p_stat
;
1450 exp
->p_pid
= p
->p_pid
;
1451 exp
->p_oppid
= p
->p_oppid
;
1453 exp
->user_stack
= CAST_DOWN(caddr_t
, p
->user_stack
);
1454 exp
->exit_thread
= p
->exit_thread
;
1455 exp
->p_debugger
= p
->p_debugger
;
1456 exp
->sigwait
= p
->sigwait
;
1458 #ifdef _PROC_HAS_SCHEDINFO_
1459 exp
->p_estcpu
= p
->p_estcpu
;
1460 exp
->p_pctcpu
= p
->p_pctcpu
;
1461 exp
->p_slptime
= p
->p_slptime
;
1465 exp
->p_slptime
= 0 ;
1467 exp
->p_cpticks
= 0 ;
1471 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1472 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1476 exp
->p_traceflag
= 0;
1478 exp
->p_siglist
= 0 ; /* No longer relevant */
1479 exp
->p_textvp
= p
->p_textvp
;
1480 exp
->p_holdcnt
= 0 ;
1481 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1482 exp
->p_sigignore
= p
->p_sigignore
;
1483 exp
->p_sigcatch
= p
->p_sigcatch
;
1484 exp
->p_priority
= p
->p_priority
;
1486 exp
->p_nice
= p
->p_nice
;
1487 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1488 exp
->p_comm
[MAXCOMLEN
] = '\0';
1489 exp
->p_pgrp
= p
->p_pgrp
;
1491 exp
->p_xstat
= p
->p_xstat
;
1492 exp
->p_acflag
= p
->p_acflag
;
1493 exp
->p_ru
= p
->p_ru
; /* XXX may be NULL */
1497 * Fill in an LP64 version of extern_proc structure for the specified process.
1500 fill_user_externproc(proc_t p
, struct user_extern_proc
*exp
)
1502 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1503 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1504 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1505 exp
->p_vmspace
= USER_ADDR_NULL
;
1506 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1507 exp
->p_flag
= p
->p_flag
;
1508 if (p
->p_lflag
& P_LTRACED
)
1509 exp
->p_flag
|= P_TRACED
;
1510 if (p
->p_lflag
& P_LPPWAIT
)
1511 exp
->p_flag
|= P_PPWAIT
;
1512 if (p
->p_lflag
& P_LEXIT
)
1513 exp
->p_flag
|= P_WEXIT
;
1514 exp
->p_stat
= p
->p_stat
;
1515 exp
->p_pid
= p
->p_pid
;
1516 exp
->p_oppid
= p
->p_oppid
;
1518 exp
->user_stack
= p
->user_stack
;
1519 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1520 exp
->p_debugger
= p
->p_debugger
;
1521 exp
->sigwait
= p
->sigwait
;
1523 #ifdef _PROC_HAS_SCHEDINFO_
1524 exp
->p_estcpu
= p
->p_estcpu
;
1525 exp
->p_pctcpu
= p
->p_pctcpu
;
1526 exp
->p_slptime
= p
->p_slptime
;
1530 exp
->p_slptime
= 0 ;
1532 exp
->p_cpticks
= 0 ;
1536 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1537 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1538 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1539 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1540 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1541 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1545 exp
->p_traceflag
= 0 ;
1547 exp
->p_siglist
= 0 ; /* No longer relevant */
1548 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1549 exp
->p_holdcnt
= 0 ;
1550 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1551 exp
->p_sigignore
= p
->p_sigignore
;
1552 exp
->p_sigcatch
= p
->p_sigcatch
;
1553 exp
->p_priority
= p
->p_priority
;
1555 exp
->p_nice
= p
->p_nice
;
1556 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1557 exp
->p_comm
[MAXCOMLEN
] = '\0';
1558 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1559 exp
->p_addr
= USER_ADDR_NULL
;
1560 exp
->p_xstat
= p
->p_xstat
;
1561 exp
->p_acflag
= p
->p_acflag
;
1562 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1566 fill_proc(proc_t p
, struct kinfo_proc
*kp
)
1568 fill_externproc(p
, &kp
->kp_proc
);
1569 fill_eproc(p
, &kp
->kp_eproc
);
1573 fill_user_proc(proc_t p
, struct user_kinfo_proc
*kp
)
1575 fill_user_externproc(p
, &kp
->kp_proc
);
1576 fill_user_eproc(p
, &kp
->kp_eproc
);
1580 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1581 size_t *sizep
, proc_t p
)
1585 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1602 case KERN_KDSETRTCDEC
:
1604 case KERN_KDGETENTROPY
:
1605 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1616 * Return the top *sizep bytes of the user stack, or the entire area of the
1617 * user stack down through the saved exec_path, whichever is smaller.
1620 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1621 size_t *sizep
, proc_t cur_proc
)
1623 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1627 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1628 size_t *sizep
, proc_t cur_proc
)
1630 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1634 sysctl_procargsx(int *name
, __unused u_int namelen
, user_addr_t where
,
1635 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1638 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1640 struct _vm_map
*proc_map
;
1643 user_addr_t arg_addr
;
1648 vm_offset_t copy_start
, copy_end
;
1651 kauth_cred_t my_cred
;
1655 buflen
-= sizeof(int); /* reserve first word to return argc */
1657 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1658 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1659 /* is not NULL then the caller wants us to return the length needed to */
1660 /* hold the data we would return */
1661 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1667 * Lookup process by pid
1676 * Copy the top N bytes of the stack.
1677 * On all machines we have so far, the stack grows
1680 * If the user expects no more than N bytes of
1681 * argument list, use that as a guess for the
1685 if (!p
->user_stack
) {
1690 if (where
== USER_ADDR_NULL
) {
1691 /* caller only wants to know length of proc args data */
1692 if (sizep
== NULL
) {
1697 size
= p
->p_argslen
;
1700 size
+= sizeof(int);
1704 * old PROCARGS will return the executable's path and plus some
1705 * extra space for work alignment and data tags
1707 size
+= PATH_MAX
+ (6 * sizeof(int));
1709 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1714 my_cred
= kauth_cred_proc_ref(p
);
1715 uid
= kauth_cred_getuid(my_cred
);
1716 kauth_cred_unref(&my_cred
);
1718 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1719 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1724 if ((u_int
)arg_size
> p
->p_argslen
)
1725 arg_size
= round_page(p
->p_argslen
);
1727 arg_addr
= p
->user_stack
- arg_size
;
1731 * Before we can block (any VM code), make another
1732 * reference to the map to keep it alive. We do
1733 * that by getting a reference on the task itself.
1741 argslen
= p
->p_argslen
;
1743 * Once we have a task reference we can convert that into a
1744 * map reference, which we will use in the calls below. The
1745 * task/process may change its map after we take this reference
1746 * (see execve), but the worst that will happen then is a return
1747 * of stale info (which is always a possibility).
1749 task_reference(task
);
1751 proc_map
= get_task_map_reference(task
);
1752 task_deallocate(task
);
1754 if (proc_map
== NULL
)
1758 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1759 if (ret
!= KERN_SUCCESS
) {
1760 vm_map_deallocate(proc_map
);
1764 copy_end
= round_page(copy_start
+ arg_size
);
1766 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1767 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1768 vm_map_deallocate(proc_map
);
1769 kmem_free(kernel_map
, copy_start
,
1770 round_page(arg_size
));
1775 * Now that we've done the copyin from the process'
1776 * map, we can release the reference to it.
1778 vm_map_deallocate(proc_map
);
1780 if( vm_map_copy_overwrite(kernel_map
,
1781 (vm_map_address_t
)copy_start
,
1782 tmp
, FALSE
) != KERN_SUCCESS
) {
1783 kmem_free(kernel_map
, copy_start
,
1784 round_page(arg_size
));
1788 if (arg_size
> argslen
) {
1789 data
= (caddr_t
) (copy_end
- argslen
);
1792 data
= (caddr_t
) (copy_end
- arg_size
);
1797 /* Put processes argc as the first word in the copyout buffer */
1798 suword(where
, p
->p_argc
);
1799 error
= copyout(data
, (where
+ sizeof(int)), size
);
1800 size
+= sizeof(int);
1802 error
= copyout(data
, where
, size
);
1805 * Make the old PROCARGS work to return the executable's path
1806 * But, only if there is enough space in the provided buffer
1808 * on entry: data [possibily] points to the beginning of the path
1810 * Note: we keep all pointers&sizes aligned to word boundries
1812 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1814 int binPath_sz
, alignedBinPath_sz
= 0;
1815 int extraSpaceNeeded
, addThis
;
1816 user_addr_t placeHere
;
1817 char * str
= (char *) data
;
1820 /* Some apps are really bad about messing up their stacks
1821 So, we have to be extra careful about getting the length
1822 of the executing binary. If we encounter an error, we bail.
1825 /* Limit ourselves to PATH_MAX paths */
1826 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1830 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1833 /* If we have a NUL terminator, copy it, too */
1834 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1836 /* Pre-Flight the space requiremnts */
1838 /* Account for the padding that fills out binPath to the next word */
1839 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1841 placeHere
= where
+ size
;
1843 /* Account for the bytes needed to keep placeHere word aligned */
1844 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1846 /* Add up all the space that is needed */
1847 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1849 /* is there is room to tack on argv[0]? */
1850 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1852 placeHere
+= addThis
;
1853 suword(placeHere
, 0);
1854 placeHere
+= sizeof(int);
1855 suword(placeHere
, 0xBFFF0000);
1856 placeHere
+= sizeof(int);
1857 suword(placeHere
, 0);
1858 placeHere
+= sizeof(int);
1859 error
= copyout(data
, placeHere
, binPath_sz
);
1862 placeHere
+= binPath_sz
;
1863 suword(placeHere
, 0);
1864 size
+= extraSpaceNeeded
;
1870 if (copy_start
!= (vm_offset_t
) 0) {
1871 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1877 if (where
!= USER_ADDR_NULL
)
1884 * Max number of concurrent aio requests
1888 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1890 int new_value
, changed
;
1891 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1893 /* make sure the system-wide limit is greater than the per process limit */
1894 if (new_value
>= aio_max_requests_per_process
)
1895 aio_max_requests
= new_value
;
1904 * Max number of concurrent aio requests per process
1908 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1910 int new_value
, changed
;
1911 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1913 /* make sure per process limit is less than the system-wide limit */
1914 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1915 aio_max_requests_per_process
= new_value
;
1924 * Max number of async IO worker threads
1928 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1930 int new_value
, changed
;
1931 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1933 /* we only allow an increase in the number of worker threads */
1934 if (new_value
> aio_worker_threads
) {
1935 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1936 aio_worker_threads
= new_value
;
1946 * System-wide limit on the max number of processes
1950 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1952 int new_value
, changed
;
1953 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1955 AUDIT_ARG(value
, new_value
);
1956 /* make sure the system-wide limit is less than the configured hard
1957 limit set at kernel compilation */
1958 if (new_value
<= hard_maxproc
&& new_value
> 0)
1959 maxproc
= new_value
;
1966 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1967 CTLFLAG_RD
| CTLFLAG_KERN
,
1969 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1970 CTLFLAG_RD
| CTLFLAG_KERN
,
1972 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1973 CTLFLAG_RD
| CTLFLAG_KERN
,
1975 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1976 CTLFLAG_RD
| CTLFLAG_KERN
,
1979 /* PR-5293665: need to use a callback function for kern.osversion to set
1980 * osversion in IORegistry */
1983 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1987 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1990 IORegistrySetOSBuildVersion((char *)arg1
);
1996 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1997 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1998 osversion
, 256 /* OSVERSIZE*/,
1999 sysctl_osversion
, "A", "");
2002 sysctl_sysctl_bootargs
2003 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2008 strlcpy(buf
, PE_boot_args(), 256);
2009 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
2013 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
2014 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2016 sysctl_sysctl_bootargs
, "A", "bootargs");
2018 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
2019 CTLFLAG_RW
| CTLFLAG_KERN
,
2021 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
2022 CTLFLAG_RD
| CTLFLAG_KERN
,
2024 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
2025 CTLFLAG_RD
| CTLFLAG_KERN
,
2026 NULL
, _POSIX_VERSION
, "");
2027 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
2028 CTLFLAG_RD
| CTLFLAG_KERN
,
2029 NULL
, NGROUPS_MAX
, "");
2030 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
2031 CTLFLAG_RD
| CTLFLAG_KERN
,
2033 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2034 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2035 CTLFLAG_RD
| CTLFLAG_KERN
,
2038 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2039 CTLFLAG_RD
| CTLFLAG_KERN
,
2044 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2046 unsigned int oldval
= desiredvnodes
;
2047 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2048 reset_vmobjectcache(oldval
, desiredvnodes
);
2049 resize_namecache(desiredvnodes
);
2053 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2054 CTLTYPE_INT
| CTLFLAG_RW
,
2055 0, 0, sysctl_maxvnodes
, "I", "");
2057 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2058 CTLTYPE_INT
| CTLFLAG_RW
,
2059 0, 0, sysctl_maxproc
, "I", "");
2061 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2062 CTLTYPE_INT
| CTLFLAG_RW
,
2063 0, 0, sysctl_aiomax
, "I", "");
2065 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2066 CTLTYPE_INT
| CTLFLAG_RW
,
2067 0, 0, sysctl_aioprocmax
, "I", "");
2069 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2070 CTLTYPE_INT
| CTLFLAG_RW
,
2071 0, 0, sysctl_aiothreads
, "I", "");
2075 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2077 int new_value
, changed
;
2078 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2080 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2082 securelevel
= new_value
;
2091 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2092 CTLTYPE_INT
| CTLFLAG_RW
,
2093 0, 0, sysctl_securelvl
, "I", "");
2098 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2101 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2103 domainnamelen
= strlen(domainname
);
2108 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2109 CTLTYPE_STRING
| CTLFLAG_RW
,
2110 0, 0, sysctl_domainname
, "A", "");
2112 SYSCTL_INT(_kern
, KERN_HOSTID
, hostid
,
2113 CTLFLAG_RW
| CTLFLAG_KERN
,
2118 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2121 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2123 hostnamelen
= req
->newlen
;
2129 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2130 CTLTYPE_STRING
| CTLFLAG_RW
,
2131 0, 0, sysctl_hostname
, "A", "");
2135 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2137 /* Original code allowed writing, I'm copying this, although this all makes
2138 no sense to me. Besides, this sysctl is never used. */
2139 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2142 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2143 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2144 0, 0, sysctl_procname
, "A", "");
2146 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2147 CTLFLAG_RW
| CTLFLAG_KERN
,
2148 &speculative_reads_disabled
, 0, "");
2152 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2156 t
.tv_sec
= boottime_sec();
2159 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2162 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2163 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2164 0, 0, sysctl_boottime
, "S,timeval", "");
2168 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2171 int error
= get_kernel_symfile(req
->p
, &str
);
2174 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2178 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2179 CTLTYPE_STRING
| CTLFLAG_RD
,
2180 0, 0, sysctl_symfile
, "A", "");
2185 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2187 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2190 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2191 CTLTYPE_INT
| CTLFLAG_RD
,
2192 0, 0, sysctl_netboot
, "I", "");
2197 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2199 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2202 SYSCTL_PROC(_kern
, KERN_USRSTACK
, usrstack
,
2203 CTLTYPE_INT
| CTLFLAG_RD
,
2204 0, 0, sysctl_usrstack
, "I", "");
2208 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2210 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2213 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2214 CTLTYPE_QUAD
| CTLFLAG_RD
,
2215 0, 0, sysctl_usrstack64
, "Q", "");
2217 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2218 CTLFLAG_RW
| CTLFLAG_KERN
,
2219 corefilename
, sizeof(corefilename
), "");
2223 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2225 int new_value
, changed
;
2226 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2228 if ((new_value
== 0) || (new_value
== 1))
2229 do_coredump
= new_value
;
2236 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2237 CTLTYPE_INT
| CTLFLAG_RW
,
2238 0, 0, sysctl_coredump
, "I", "");
2241 sysctl_suid_coredump
2242 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2244 int new_value
, changed
;
2245 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2247 if ((new_value
== 0) || (new_value
== 1))
2248 sugid_coredump
= new_value
;
2255 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2256 CTLTYPE_INT
| CTLFLAG_RW
,
2257 0, 0, sysctl_suid_coredump
, "I", "");
2261 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2263 struct proc
*p
= req
->p
;
2264 int new_value
, changed
;
2265 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2269 req
->p
->p_lflag
|= P_LDELAYTERM
;
2271 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2277 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2278 CTLTYPE_INT
| CTLFLAG_RW
,
2279 0, 0, sysctl_delayterm
, "I", "");
2282 sysctl_proc_low_pri_io
2283 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2285 struct proc
*p
= req
->p
;
2286 int new_value
, old_value
, changed
;
2290 switch (req
->p
->p_iopol_disk
) {
2295 case IOPOL_THROTTLE
:
2302 /*\ 5 this should never happen, but to be robust, return the default value */
2308 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2311 if (new_value
& 0x01)
2312 req
->p
->p_iopol_disk
= IOPOL_THROTTLE
;
2313 else if (new_value
& 0x02)
2314 req
->p
->p_iopol_disk
= IOPOL_PASSIVE
;
2315 else if (new_value
== 0)
2316 req
->p
->p_iopol_disk
= IOPOL_NORMAL
;
2322 SYSCTL_PROC(_kern
, KERN_PROC_LOW_PRI_IO
, proc_low_pri_io
,
2323 CTLTYPE_INT
| CTLFLAG_RW
,
2324 0, 0, sysctl_proc_low_pri_io
, "I", "");
2328 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2330 struct proc
*p
= req
->p
;
2332 int new_value
, old_value
, changed
;
2335 ut
= get_bsdthread_info(current_thread());
2337 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2338 old_value
= KERN_RAGE_THREAD
;
2339 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2340 old_value
= KERN_RAGE_PROC
;
2344 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2347 switch (new_value
) {
2348 case KERN_RAGE_PROC
:
2350 p
->p_lflag
|= P_LRAGE_VNODES
;
2353 case KERN_UNRAGE_PROC
:
2355 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2359 case KERN_RAGE_THREAD
:
2360 ut
->uu_flag
|= UT_RAGE_VNODES
;
2362 case KERN_UNRAGE_THREAD
:
2363 ut
= get_bsdthread_info(current_thread());
2364 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2371 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2372 CTLTYPE_INT
| CTLFLAG_RW
,
2373 0, 0, sysctl_rage_vnode
, "I", "");
2377 sysctl_kern_check_openevt
2378 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2380 struct proc
*p
= req
->p
;
2381 int new_value
, old_value
, changed
;
2384 if (p
->p_flag
& P_CHECKOPENEVT
) {
2385 old_value
= KERN_OPENEVT_PROC
;
2390 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2393 switch (new_value
) {
2394 case KERN_OPENEVT_PROC
:
2395 OSBitOrAtomic(P_CHECKOPENEVT
, (UInt32
*)&p
->p_flag
);
2398 case KERN_UNOPENEVT_PROC
:
2399 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), (UInt32
*)&p
->p_flag
);
2409 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2410 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2416 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2418 #ifdef SECURE_KERNEL
2421 int new_value
, changed
;
2424 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2431 * Only allow setting if NX is supported on the chip
2433 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2436 nx_enabled
= new_value
;
2443 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2444 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
,
2445 0, 0, sysctl_nx
, "I", "");
2449 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2451 if (proc_is64bit(req
->p
)) {
2452 struct user_loadavg loadinfo64
;
2453 loadavg32to64(&averunnable
, &loadinfo64
);
2454 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2456 return sysctl_io_opaque(req
, &averunnable
, sizeof(averunnable
), NULL
);
2460 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2461 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2462 0, 0, sysctl_loadavg
, "S,loadavg", "");
2466 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2469 uint64_t swap_total
;
2470 uint64_t swap_avail
;
2471 uint32_t swap_pagesize
;
2472 boolean_t swap_encrypted
;
2473 struct xsw_usage xsu
;
2475 error
= macx_swapinfo(&swap_total
,
2482 xsu
.xsu_total
= swap_total
;
2483 xsu
.xsu_avail
= swap_avail
;
2484 xsu
.xsu_used
= swap_total
- swap_avail
;
2485 xsu
.xsu_pagesize
= swap_pagesize
;
2486 xsu
.xsu_encrypted
= swap_encrypted
;
2487 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2492 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2493 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2494 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2497 /* this kernel does NOT implement shared_region_make_private_np() */
2498 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2504 sysctl_sysctl_exec_affinity(__unused
struct sysctl_oid
*oidp
,
2505 __unused
void *arg1
, __unused
int arg2
,
2506 struct sysctl_req
*req
)
2508 proc_t cur_proc
= req
->p
;
2511 if (req
->oldptr
!= USER_ADDR_NULL
) {
2512 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
2513 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
2517 if (req
->newptr
!= USER_ADDR_NULL
) {
2518 cpu_type_t newcputype
;
2519 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
2521 if (newcputype
== CPU_TYPE_I386
)
2522 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), (UInt32
*)&cur_proc
->p_flag
);
2523 else if (newcputype
== CPU_TYPE_POWERPC
)
2524 OSBitOrAtomic(P_AFFINITY
, (UInt32
*)&cur_proc
->p_flag
);
2531 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
2535 fetch_process_cputype(
2539 cpu_type_t
*cputype
)
2541 proc_t p
= PROC_NULL
;
2548 else if (namelen
== 1) {
2549 p
= proc_find(name
[0]);
2559 if (p
->p_flag
& P_TRANSLATED
) {
2560 ret
= CPU_TYPE_POWERPC
;
2566 if (IS_64BIT_PROCESS(p
))
2567 ret
|= CPU_ARCH_ABI64
;
2578 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2579 struct sysctl_req
*req
)
2582 cpu_type_t proc_cputype
= 0;
2583 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2586 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2588 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2590 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2593 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2594 struct sysctl_req
*req
)
2597 cpu_type_t proc_cputype
= 0;
2598 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2600 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2602 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2606 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2608 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2611 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2612 CTLTYPE_INT
| CTLFLAG_RD
,
2613 0, 0, sysctl_safeboot
, "I", "");
2617 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2619 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2622 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2623 CTLTYPE_INT
| CTLFLAG_RD
,
2624 0, 0, sysctl_singleuser
, "I", "");
2627 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2629 extern boolean_t affinity_sets_enabled
;
2630 extern int affinity_sets_mapping
;
2632 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2633 CTLFLAG_RW
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2634 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2635 CTLFLAG_RW
, &affinity_sets_mapping
, 0, "mapping policy");
2638 * Limit on total memory users can wire.
2640 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2642 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2644 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2647 * All values are in bytes.
2650 vm_map_size_t vm_global_user_wire_limit
;
2651 vm_map_size_t vm_user_wire_limit
;
2653 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
, &vm_global_user_wire_limit
, "");
2654 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
, &vm_user_wire_limit
, "");