2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <security/audit/audit.h>
107 #include <kern/kalloc.h>
109 #include <mach/machine.h>
110 #include <mach/mach_types.h>
111 #include <mach/vm_param.h>
112 #include <kern/mach_param.h>
113 #include <kern/task.h>
114 #include <kern/lock.h>
115 #include <kern/debug.h>
116 #include <vm/vm_kern.h>
117 #include <vm/vm_map.h>
118 #include <mach/host_info.h>
120 #include <sys/mount_internal.h>
121 #include <sys/kdebug.h>
122 #include <sys/sysproto.h>
124 #include <IOKit/IOPlatformExpert.h>
125 #include <pexpert/pexpert.h>
127 #include <machine/machine_routines.h>
128 #include <machine/exec.h>
130 #include <vm/vm_protos.h>
132 #if defined(__i386__) || defined(__x86_64__)
133 #include <i386/cpuid.h>
136 sysctlfn kern_sysctl
;
138 sysctlfn debug_sysctl
;
140 extern sysctlfn net_sysctl
;
141 extern sysctlfn cpu_sysctl
;
142 extern int aio_max_requests
;
143 extern int aio_max_requests_per_process
;
144 extern int aio_worker_threads
;
145 extern int lowpri_IO_window_msecs
;
146 extern int lowpri_IO_delay_msecs
;
147 extern int nx_enabled
;
148 extern int speculative_reads_disabled
;
149 extern unsigned int preheat_pages_max
;
150 extern unsigned int preheat_pages_min
;
151 extern unsigned int preheat_pages_mult
;
152 extern long numvnodes
;
155 fill_user32_eproc(proc_t p
, struct user32_eproc
*ep
);
157 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*exp
);
159 fill_user64_eproc(proc_t p
, struct user64_eproc
*ep
);
161 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*kp
);
163 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*exp
);
165 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
167 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, proc_t p
);
173 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
175 __private_extern__ kern_return_t
176 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
178 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
180 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
181 user_addr_t newp
, size_t newlen
);
183 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*kp
);
185 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
186 size_t *sizep
, proc_t cur_proc
);
188 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
191 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
192 proc_t cur_proc
, int argc_yes
);
194 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
195 size_t newlen
, void *sp
, int len
);
197 static int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
198 static int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
199 static int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
200 static int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
201 static int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
203 static int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
205 int sysdoproc_callback(proc_t p
, void *arg
);
207 static int __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, int32_t *retval
);
209 extern void IORegistrySetOSBuildVersion(char * build_version
);
212 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
214 la64
->ldavg
[0] = la
->ldavg
[0];
215 la64
->ldavg
[1] = la
->ldavg
[1];
216 la64
->ldavg
[2] = la
->ldavg
[2];
217 la64
->fscale
= (user64_long_t
)la
->fscale
;
221 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
223 la32
->ldavg
[0] = la
->ldavg
[0];
224 la32
->ldavg
[1] = la
->ldavg
[1];
225 la32
->ldavg
[2] = la
->ldavg
[2];
226 la32
->fscale
= (user32_long_t
)la
->fscale
;
232 static struct sysctl_lock memlock
;
234 /* sysctl() syscall */
236 __sysctl(proc_t p
, struct __sysctl_args
*uap
, int32_t *retval
)
238 boolean_t funnel_state
;
241 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
242 error
= __sysctl_funneled(p
, uap
, retval
);
243 thread_funnel_set(kernel_flock
, funnel_state
);
248 __sysctl_funneled(proc_t p
, struct __sysctl_args
*uap
, __unused
int32_t *retval
)
250 int error
, dolock
= 1;
251 size_t savelen
= 0, oldlen
= 0, newlen
;
252 sysctlfn
*fnp
= NULL
;
253 int name
[CTL_MAXNAME
];
255 boolean_t memlock_taken
= FALSE
;
256 boolean_t vslock_taken
= FALSE
;
258 kauth_cred_t my_cred
;
262 * all top-level sysctl names are non-terminal
264 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
266 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
270 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
272 if (proc_is64bit(p
)) {
273 /* uap->newlen is a size_t value which grows to 64 bits
274 * when coming from a 64-bit process. since it's doubtful we'll
275 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
277 newlen
= CAST_DOWN(size_t, uap
->newlen
);
280 newlen
= uap
->newlen
;
283 /* CTL_UNSPEC is used to get oid to AUTO_OID */
284 if (uap
->new != USER_ADDR_NULL
285 && ((name
[0] == CTL_KERN
286 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
287 name
[1] == KERN_PROCNAME
|| name
[1] == KERN_RAGEVNODE
|| name
[1] == KERN_CHECKOPENEVT
|| name
[1] == KERN_THREADNAME
))
288 || (name
[0] == CTL_HW
)
289 || (name
[0] == CTL_VM
))
290 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
293 /* XXX: KERN, VFS and DEBUG are handled by their respective functions,
294 * but there is a fallback for all sysctls other than VFS to
295 * userland_sysctl() - KILL THIS! */
299 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
300 && (name
[1] != KERN_PROC
))
315 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
316 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
318 oldlen
= CAST_DOWN(size_t, oldlen64
);
320 * If more than 4G, clamp to 4G - useracc() below will catch
321 * with an EFAULT, if it's actually necessary.
323 if (oldlen64
> 0x00000000ffffffffULL
)
324 oldlen
= 0xffffffffUL
;
327 if (uap
->old
!= USER_ADDR_NULL
) {
328 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
331 * The kernel debug mechanism does not need to take this lock, and
332 * we don't grab the memlock around calls to KERN_PROC because it is reentrant.
333 * Grabbing the lock for a KERN_PROC sysctl makes a deadlock possible 5024049.
335 if (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)) &&
336 !(name
[1] == KERN_PROC
)) {
338 memlock_taken
= TRUE
;
341 if (dolock
&& oldlen
) {
342 if ((error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
343 if (memlock_taken
== TRUE
)
353 my_cred
= kauth_cred_proc_ref(p
);
354 error
= mac_system_check_sysctl(
360 fnp
== kern_sysctl
? 1 : 0,
364 kauth_cred_unref(&my_cred
);
368 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
369 &oldlen
, uap
->new, newlen
, p
);
377 if (vslock_taken
== TRUE
) {
378 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
382 if (memlock_taken
== TRUE
)
385 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
387 boolean_t funnel_state
;
390 * Drop the funnel when calling new sysctl code, which will conditionally
391 * grab the funnel if it really needs to.
393 funnel_state
= thread_funnel_set(kernel_flock
, FALSE
);
395 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
396 uap
->new, newlen
, &oldlen
);
398 thread_funnel_set(kernel_flock
, funnel_state
);
401 if ((error
) && (error
!= ENOMEM
))
404 if (uap
->oldlenp
!= USER_ADDR_NULL
)
405 error
= suulong(uap
->oldlenp
, oldlen
);
411 * Attributes stored in the kernel.
413 __private_extern__
char corefilename
[MAXPATHLEN
+1];
414 __private_extern__
int do_coredump
;
415 __private_extern__
int sugid_coredump
;
418 __private_extern__
int do_count_syscalls
;
422 int securelevel
= -1;
434 __unused
size_t newSize
,
440 if (name
[0] == 0 && 1 == namelen
) {
441 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
442 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
443 } else if (name
[0] == 1 && 2 == namelen
) {
445 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), &cur_proc
->p_flag
);
447 OSBitOrAtomic(P_AFFINITY
, &cur_proc
->p_flag
);
461 __unused
size_t newSize
,
465 int istranslated
= 0;
466 kauth_cred_t my_cred
;
472 p
= proc_find(name
[0]);
476 my_cred
= kauth_cred_proc_ref(p
);
477 uid
= kauth_cred_getuid(my_cred
);
478 kauth_cred_unref(&my_cred
);
479 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
480 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
485 istranslated
= (p
->p_flag
& P_TRANSLATED
);
487 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
488 (istranslated
!= 0) ? 1 : 0);
492 set_archhandler(__unused proc_t p
, int arch
)
496 struct vnode_attr va
;
497 vfs_context_t ctx
= vfs_context_current();
498 struct exec_archhandler
*archhandler
;
501 case CPU_TYPE_POWERPC
:
502 archhandler
= &exec_archhandler_ppc
;
508 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
,
509 CAST_USER_ADDR_T(archhandler
->path
), ctx
);
515 /* Check mount point */
516 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
517 (nd
.ni_vp
->v_type
!= VREG
)) {
523 VATTR_WANTED(&va
, va_fsid
);
524 VATTR_WANTED(&va
, va_fileid
);
525 error
= vnode_getattr(nd
.ni_vp
, &va
, ctx
);
532 archhandler
->fsid
= va
.va_fsid
;
533 archhandler
->fileid
= (u_int32_t
)va
.va_fileid
;
537 /* XXX remove once Rosetta is rev'ed */
538 /*****************************************************************************/
540 sysctl_exec_archhandler_ppc(
542 __unused u_int namelen
,
551 char handler
[sizeof(exec_archhandler_ppc
.path
)];
552 vfs_context_t ctx
= vfs_context_current();
555 len
= strlen(exec_archhandler_ppc
.path
) + 1;
559 error
= copyout(exec_archhandler_ppc
.path
, oldBuf
, len
);
566 error
= suser(vfs_context_ucred(ctx
), &p
->p_acflag
);
569 if (newSize
>= sizeof(exec_archhandler_ppc
.path
))
570 return (ENAMETOOLONG
);
571 error
= copyin(newBuf
, handler
, newSize
);
574 handler
[newSize
] = 0;
575 strlcpy(exec_archhandler_ppc
.path
, handler
, MAXPATHLEN
);
576 error
= set_archhandler(p
, CPU_TYPE_POWERPC
);
582 /*****************************************************************************/
585 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid
*oidp
, void *arg1
,
586 int arg2
, struct sysctl_req
*req
)
590 error
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
596 error
= set_archhandler(req
->p
, CPU_TYPE_POWERPC
);
604 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
605 __unused
int arg2
, struct sysctl_req
*req
)
608 struct uthread
*ut
= get_bsdthread_info(current_thread());
609 user_addr_t oldp
=0, newp
=0;
610 size_t *oldlenp
=NULL
;
614 oldlenp
= &(req
->oldlen
);
616 newlen
= req
->newlen
;
618 /* We want the current length, and maybe the string itself */
620 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
621 size_t currlen
= MAXTHREADNAMESIZE
- 1;
624 /* use length of current thread name */
625 currlen
= strlen(ut
->pth_name
);
627 if(*oldlenp
< currlen
)
629 /* NOTE - we do not copy the NULL terminator */
631 error
= copyout(ut
->pth_name
,oldp
,currlen
);
636 /* return length of thread name minus NULL terminator (just like strlen) */
637 req
->oldidx
= currlen
;
640 /* We want to set the name to something */
643 if(newlen
> (MAXTHREADNAMESIZE
- 1))
647 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
651 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
652 error
= copyin(newp
, ut
->pth_name
, newlen
);
660 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
, 0, 0, sysctl_handle_kern_threadname
,"A","");
662 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
664 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
666 SYSCTL_PROC(_kern_exec_archhandler
, OID_AUTO
, powerpc
,
667 CTLTYPE_STRING
| CTLFLAG_RW
, exec_archhandler_ppc
.path
, 0,
668 sysctl_handle_exec_archhandler_ppc
, "A", "");
670 extern int get_kernel_symfile(proc_t
, char **);
671 __private_extern__
int
672 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
676 * kernel related system variables.
679 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
680 user_addr_t newp
, size_t newlen
, proc_t p
)
682 /* all sysctl names not listed below are terminal at this level */
684 && !(name
[0] == KERN_PROC
685 || name
[0] == KERN_PROF
686 || name
[0] == KERN_KDEBUG
688 || name
[0] == KERN_PROCARGS
690 || name
[0] == KERN_PROCARGS2
691 || name
[0] == KERN_IPC
692 || name
[0] == KERN_SYSV
693 || name
[0] == KERN_AFFINITY
694 || name
[0] == KERN_TRANSLATE
695 || name
[0] == KERN_EXEC
696 || name
[0] == KERN_PANICINFO
697 || name
[0] == KERN_POSIX
698 || name
[0] == KERN_TFP
699 || name
[0] == KERN_TTY
701 || name
[0] == KERN_LCTX
705 return (ENOTDIR
); /* overloaded */
709 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
712 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
716 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
719 /* new one as it does not use kinfo_proc */
720 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
723 /* new one as it does not use kinfo_proc */
724 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
727 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
731 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
734 return sysctl_translate(name
+1, namelen
-1, oldp
, oldlenp
, newp
,
737 /* XXX remove once Rosetta has rev'ed */
739 return sysctl_exec_archhandler_ppc(name
+1, namelen
-1, oldp
,
740 oldlenp
, newp
, newlen
, p
);
742 case KERN_COUNT_SYSCALLS
:
744 /* valid values passed in:
745 * = 0 means don't keep called counts for each bsd syscall
746 * > 0 means keep called counts for each bsd syscall
747 * = 2 means dump current counts to the system log
748 * = 3 means reset all counts
749 * for example, to dump current counts:
750 * sysctl -w kern.count_calls=2
752 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
758 do_count_syscalls
= 1;
760 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
762 extern int syscalls_log
[];
763 extern const char * syscallnames
[];
765 for ( i
= 0; i
< nsysent
; i
++ ) {
766 if ( syscalls_log
[i
] != 0 ) {
768 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
776 do_count_syscalls
= 1;
790 * Debugging related system variables.
794 #endif /* DIAGNOSTIC */
795 struct ctldebug debug0
, debug1
;
796 struct ctldebug debug2
, debug3
, debug4
;
797 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
798 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
799 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
800 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
801 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
802 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
803 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
804 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
807 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
808 user_addr_t newp
, size_t newlen
, __unused proc_t p
)
810 struct ctldebug
*cdp
;
812 /* all sysctl names at this level are name and field */
814 return (ENOTSUP
); /* overloaded */
815 if (name
[0] < 0 || name
[0] >= CTL_DEBUG_MAXID
)
817 cdp
= debugvars
[name
[0]];
818 if (cdp
->debugname
== 0)
822 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
823 case CTL_DEBUG_VALUE
:
824 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
833 * The following sysctl_* functions should not be used
834 * any more, as they can only cope with callers in
835 * user mode: Use new-style
843 * Validate parameters and get old / set new parameters
844 * for an integer-valued sysctl function.
847 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
848 user_addr_t newp
, size_t newlen
, int *valp
)
852 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
854 if (oldp
&& *oldlenp
< sizeof(int))
856 if (newp
&& newlen
!= sizeof(int))
858 *oldlenp
= sizeof(int);
860 error
= copyout(valp
, oldp
, sizeof(int));
861 if (error
== 0 && newp
) {
862 error
= copyin(newp
, valp
, sizeof(int));
863 AUDIT_ARG(value32
, *valp
);
869 * As above, but read-only.
872 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
876 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
878 if (oldp
&& *oldlenp
< sizeof(int))
882 *oldlenp
= sizeof(int);
884 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
889 * Validate parameters and get old / set new parameters
890 * for an quad(64bit)-valued sysctl function.
893 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
894 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
898 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
900 if (oldp
&& *oldlenp
< sizeof(quad_t
))
902 if (newp
&& newlen
!= sizeof(quad_t
))
904 *oldlenp
= sizeof(quad_t
);
906 error
= copyout(valp
, oldp
, sizeof(quad_t
));
907 if (error
== 0 && newp
)
908 error
= copyin(newp
, valp
, sizeof(quad_t
));
913 * As above, but read-only.
916 sysctl_rdquad(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, quad_t val
)
920 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
922 if (oldp
&& *oldlenp
< sizeof(quad_t
))
926 *oldlenp
= sizeof(quad_t
);
928 error
= copyout((caddr_t
)&val
, oldp
, sizeof(quad_t
));
933 * Validate parameters and get old / set new parameters
934 * for a string-valued sysctl function. Unlike sysctl_string, if you
935 * give it a too small (but larger than 0 bytes) buffer, instead of
936 * returning ENOMEM, it truncates the returned string to the buffer
937 * size. This preserves the semantics of some library routines
938 * implemented via sysctl, which truncate their returned data, rather
939 * than simply returning an error. The returned string is always NUL
943 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
944 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
946 int len
, copylen
, error
= 0;
948 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
950 copylen
= len
= strlen(str
) + 1;
951 if (oldp
&& (len
< 0 || *oldlenp
< 1))
953 if (oldp
&& (*oldlenp
< (size_t)len
))
954 copylen
= *oldlenp
+ 1;
955 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
957 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
959 error
= copyout(str
, oldp
, copylen
);
964 error
= copyout((void *)&c
, oldp
, sizeof(char));
967 if (error
== 0 && newp
) {
968 error
= copyin(newp
, str
, newlen
);
970 AUDIT_ARG(text
, (char *)str
);
976 * Validate parameters and get old / set new parameters
977 * for a string-valued sysctl function.
980 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
981 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
985 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
987 len
= strlen(str
) + 1;
988 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
990 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
992 *oldlenp
= len
-1; /* deal with NULL strings correctly */
994 error
= copyout(str
, oldp
, len
);
996 if (error
== 0 && newp
) {
997 error
= copyin(newp
, str
, newlen
);
999 AUDIT_ARG(text
, (char *)str
);
1005 * As above, but read-only.
1008 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
1009 user_addr_t newp
, char *str
)
1013 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1015 len
= strlen(str
) + 1;
1016 if (oldp
&& *oldlenp
< (size_t)len
)
1022 error
= copyout(str
, oldp
, len
);
1027 * Validate parameters and get old / set new parameters
1028 * for a structure oriented sysctl function.
1031 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
1032 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
1036 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1038 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1040 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
1044 error
= copyout(sp
, oldp
, len
);
1046 if (error
== 0 && newp
)
1047 error
= copyin(newp
, sp
, len
);
1052 * Validate parameters and get old parameters
1053 * for a structure oriented sysctl function.
1056 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
1057 user_addr_t newp
, void *sp
, int len
)
1061 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1063 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1069 error
= copyout(sp
, oldp
, len
);
1074 * Get file structures.
1078 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1081 struct fileglob
*fg
;
1082 struct extern_file nef
;
1084 if (req
->oldptr
== USER_ADDR_NULL
) {
1086 * overestimate by 10 files
1088 req
->oldidx
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1093 * first copyout filehead
1095 error
= SYSCTL_OUT(req
, &filehead
, sizeof(filehead
));
1100 * followed by an array of file structures
1102 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1103 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1104 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1105 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1106 nef
.f_type
= fg
->fg_type
;
1107 nef
.f_count
= fg
->fg_count
;
1108 nef
.f_msgcount
= fg
->fg_msgcount
;
1109 nef
.f_cred
= fg
->fg_cred
;
1110 nef
.f_ops
= fg
->fg_ops
;
1111 nef
.f_offset
= fg
->fg_offset
;
1112 nef
.f_data
= fg
->fg_data
;
1113 error
= SYSCTL_OUT(req
, &nef
, sizeof(nef
));
1120 SYSCTL_PROC(_kern
, KERN_FILE
, file
,
1121 CTLTYPE_STRUCT
| CTLFLAG_RW
,
1122 0, 0, sysctl_file
, "S,filehead", "");
1125 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
1127 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
1134 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
1136 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
1143 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
1145 boolean_t funnel_state
;
1150 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1151 /* This is very racy but list lock is held.. Hmmm. */
1152 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1153 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
1154 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
1155 tp
->t_dev
!= (dev_t
)*(int*)arg
)
1160 thread_funnel_set(kernel_flock
, funnel_state
);
1166 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
1168 kauth_cred_t my_cred
;
1171 if (p
->p_ucred
== NULL
)
1173 my_cred
= kauth_cred_proc_ref(p
);
1174 uid
= kauth_cred_getuid(my_cred
);
1175 kauth_cred_unref(&my_cred
);
1177 if (uid
!= (uid_t
)*(int*)arg
)
1185 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
1187 kauth_cred_t my_cred
;
1190 if (p
->p_ucred
== NULL
)
1192 my_cred
= kauth_cred_proc_ref(p
);
1193 ruid
= my_cred
->cr_ruid
;
1194 kauth_cred_unref(&my_cred
);
1196 if (ruid
!= (uid_t
)*(int*)arg
)
1204 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
1206 if ((p
->p_lctx
== NULL
) ||
1207 (p
->p_lctx
->lc_id
!= (pid_t
)*(int*)arg
))
1215 * try over estimating by 5 procs
1217 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1218 struct sysdoproc_args
{
1221 boolean_t is_64_bit
;
1233 sysdoproc_callback(proc_t p
, void * arg
)
1235 struct sysdoproc_args
* args
= (struct sysdoproc_args
*)arg
;
1238 if (args
->buflen
>= args
->sizeof_kproc
) {
1239 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
1240 return(PROC_RETURNED
);
1241 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
1242 return(PROC_RETURNED
);
1243 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
1244 return(PROC_RETURNED
);
1246 bzero(args
->kprocp
, args
->sizeof_kproc
);
1247 if (args
->is_64_bit
) {
1248 fill_user64_proc(p
, (struct user64_kinfo_proc
*) args
->kprocp
);
1251 fill_user32_proc(p
, (struct user32_kinfo_proc
*) args
->kprocp
);
1253 error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
1255 *args
->errorp
= error
;
1256 return(PROC_RETURNED_DONE
);
1259 args
->dp
+= args
->sizeof_kproc
;
1260 args
->buflen
-= args
->sizeof_kproc
;
1262 args
->needed
+= args
->sizeof_kproc
;
1263 return(PROC_RETURNED
);
1267 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1269 user_addr_t dp
= where
;
1271 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1273 boolean_t is_64_bit
= FALSE
;
1274 struct user32_kinfo_proc user32_kproc
;
1275 struct user64_kinfo_proc user_kproc
;
1278 int (*filterfn
)(proc_t
, void *) = 0;
1279 struct sysdoproc_args args
;
1284 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1286 is_64_bit
= proc_is64bit(current_proc());
1288 sizeof_kproc
= sizeof(user_kproc
);
1289 kprocp
= (caddr_t
) &user_kproc
;
1292 sizeof_kproc
= sizeof(user32_kproc
);
1293 kprocp
= (caddr_t
) &user32_kproc
;
1300 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
1303 case KERN_PROC_PGRP
:
1304 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
1315 case KERN_PROC_RUID
:
1320 case KERN_PROC_LCID
:
1321 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
1327 args
.buflen
= buflen
;
1328 args
.kprocp
= kprocp
;
1329 args
.is_64_bit
= is_64_bit
;
1331 args
.needed
= needed
;
1332 args
.errorp
= &error
;
1333 args
.uidcheck
= uidcheck
;
1334 args
.ruidcheck
= ruidcheck
;
1335 args
.ttycheck
= ttycheck
;
1336 args
.sizeof_kproc
= sizeof_kproc
;
1337 args
.uidval
= name
[1];
1339 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
), sysdoproc_callback
, &args
, filterfn
, &name
[1]);
1345 needed
= args
.needed
;
1347 if (where
!= USER_ADDR_NULL
) {
1348 *sizep
= dp
- where
;
1349 if (needed
> *sizep
)
1352 needed
+= KERN_PROCSLOP
;
1359 * Fill in an eproc structure for the specified process.
1362 fill_user32_eproc(proc_t p
, struct user32_eproc
*ep
)
1365 kauth_cred_t my_cred
;
1367 struct session
* sessp
;
1370 sessp
= proc_session(p
);
1372 ep
->e_paddr
= CAST_DOWN_EXPLICIT(uint32_t,p
);
1374 if (pg
!= PGRP_NULL
) {
1375 ep
->e_sess
= CAST_DOWN_EXPLICIT(uint32_t,sessp
);
1376 ep
->e_pgid
= p
->p_pgrpid
;
1377 ep
->e_jobc
= pg
->pg_jobc
;
1378 if ((sessp
!= SESSION_NULL
) && sessp
->s_ttyvp
)
1379 ep
->e_flag
= EPROC_CTTY
;
1387 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1392 ep
->e_ppid
= p
->p_ppid
;
1393 /* Pre-zero the fake historical pcred */
1394 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1396 my_cred
= kauth_cred_proc_ref(p
);
1398 /* A fake historical pcred */
1399 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1400 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1401 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1402 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1403 /* A fake historical *kauth_cred_t */
1404 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1405 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1406 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1407 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1409 kauth_cred_unref(&my_cred
);
1411 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1412 ep
->e_vm
.vm_tsize
= 0;
1413 ep
->e_vm
.vm_dsize
= 0;
1414 ep
->e_vm
.vm_ssize
= 0;
1416 ep
->e_vm
.vm_rssize
= 0;
1418 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1419 (tp
= SESSION_TP(sessp
))) {
1420 ep
->e_tdev
= tp
->t_dev
;
1421 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1422 ep
->e_tsess
= CAST_DOWN_EXPLICIT(uint32_t,tp
->t_session
);
1426 if (SESS_LEADER(p
, sessp
))
1427 ep
->e_flag
|= EPROC_SLEADER
;
1428 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1429 ep
->e_xsize
= ep
->e_xrssize
= 0;
1430 ep
->e_xccount
= ep
->e_xswrss
= 0;
1431 if (sessp
!= SESSION_NULL
)
1432 session_rele(sessp
);
1438 * Fill in an LP64 version of eproc structure for the specified process.
1441 fill_user64_eproc(proc_t p
, struct user64_eproc
*ep
)
1444 struct session
*sessp
= NULL
;
1446 kauth_cred_t my_cred
;
1449 sessp
= proc_session(p
);
1451 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1452 if (pg
!= PGRP_NULL
) {
1453 ep
->e_sess
= CAST_USER_ADDR_T(sessp
);
1454 ep
->e_pgid
= p
->p_pgrpid
;
1455 ep
->e_jobc
= pg
->pg_jobc
;
1456 if (sessp
!= SESSION_NULL
) {
1458 ep
->e_flag
= EPROC_CTTY
;
1461 ep
->e_sess
= USER_ADDR_NULL
;
1467 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1472 ep
->e_ppid
= p
->p_ppid
;
1473 /* Pre-zero the fake historical pcred */
1474 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1476 my_cred
= kauth_cred_proc_ref(p
);
1478 /* A fake historical pcred */
1479 ep
->e_pcred
.p_ruid
= my_cred
->cr_ruid
;
1480 ep
->e_pcred
.p_svuid
= my_cred
->cr_svuid
;
1481 ep
->e_pcred
.p_rgid
= my_cred
->cr_rgid
;
1482 ep
->e_pcred
.p_svgid
= my_cred
->cr_svgid
;
1484 /* A fake historical *kauth_cred_t */
1485 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1486 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1487 ep
->e_ucred
.cr_ngroups
= my_cred
->cr_ngroups
;
1488 bcopy(my_cred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1490 kauth_cred_unref(&my_cred
);
1492 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1493 ep
->e_vm
.vm_tsize
= 0;
1494 ep
->e_vm
.vm_dsize
= 0;
1495 ep
->e_vm
.vm_ssize
= 0;
1497 ep
->e_vm
.vm_rssize
= 0;
1499 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1500 (tp
= SESSION_TP(sessp
))) {
1501 ep
->e_tdev
= tp
->t_dev
;
1502 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1503 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1507 if (SESS_LEADER(p
, sessp
))
1508 ep
->e_flag
|= EPROC_SLEADER
;
1509 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1510 ep
->e_xsize
= ep
->e_xrssize
= 0;
1511 ep
->e_xccount
= ep
->e_xswrss
= 0;
1512 if (sessp
!= SESSION_NULL
)
1513 session_rele(sessp
);
1514 if (pg
!= PGRP_NULL
)
1519 * Fill in an eproc structure for the specified process.
1522 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*exp
)
1524 exp
->p_forw
= exp
->p_back
= 0;
1525 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1526 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1528 exp
->p_sigacts
= CAST_DOWN_EXPLICIT(uint32_t,p
->p_sigacts
);
1529 exp
->p_flag
= p
->p_flag
;
1530 if (p
->p_lflag
& P_LTRACED
)
1531 exp
->p_flag
|= P_TRACED
;
1532 if (p
->p_lflag
& P_LPPWAIT
)
1533 exp
->p_flag
|= P_PPWAIT
;
1534 if (p
->p_lflag
& P_LEXIT
)
1535 exp
->p_flag
|= P_WEXIT
;
1536 exp
->p_stat
= p
->p_stat
;
1537 exp
->p_pid
= p
->p_pid
;
1538 exp
->p_oppid
= p
->p_oppid
;
1540 exp
->user_stack
= p
->user_stack
;
1541 exp
->exit_thread
= CAST_DOWN_EXPLICIT(uint32_t,p
->exit_thread
);
1542 exp
->p_debugger
= p
->p_debugger
;
1543 exp
->sigwait
= p
->sigwait
;
1545 #ifdef _PROC_HAS_SCHEDINFO_
1546 exp
->p_estcpu
= p
->p_estcpu
;
1547 exp
->p_pctcpu
= p
->p_pctcpu
;
1548 exp
->p_slptime
= p
->p_slptime
;
1552 exp
->p_slptime
= 0 ;
1554 exp
->p_cpticks
= 0 ;
1558 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1559 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1563 exp
->p_traceflag
= 0;
1565 exp
->p_siglist
= 0 ; /* No longer relevant */
1566 exp
->p_textvp
= CAST_DOWN_EXPLICIT(uint32_t,p
->p_textvp
) ;
1567 exp
->p_holdcnt
= 0 ;
1568 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1569 exp
->p_sigignore
= p
->p_sigignore
;
1570 exp
->p_sigcatch
= p
->p_sigcatch
;
1571 exp
->p_priority
= p
->p_priority
;
1573 exp
->p_nice
= p
->p_nice
;
1574 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1575 exp
->p_comm
[MAXCOMLEN
] = '\0';
1576 exp
->p_pgrp
= CAST_DOWN_EXPLICIT(uint32_t,p
->p_pgrp
) ;
1578 exp
->p_xstat
= p
->p_xstat
;
1579 exp
->p_acflag
= p
->p_acflag
;
1580 exp
->p_ru
= CAST_DOWN_EXPLICIT(uint32_t,p
->p_ru
) ; /* XXX may be NULL */
1584 * Fill in an LP64 version of extern_proc structure for the specified process.
1587 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*exp
)
1589 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1590 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1591 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1592 exp
->p_vmspace
= USER_ADDR_NULL
;
1593 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1594 exp
->p_flag
= p
->p_flag
;
1595 if (p
->p_lflag
& P_LTRACED
)
1596 exp
->p_flag
|= P_TRACED
;
1597 if (p
->p_lflag
& P_LPPWAIT
)
1598 exp
->p_flag
|= P_PPWAIT
;
1599 if (p
->p_lflag
& P_LEXIT
)
1600 exp
->p_flag
|= P_WEXIT
;
1601 exp
->p_stat
= p
->p_stat
;
1602 exp
->p_pid
= p
->p_pid
;
1603 exp
->p_oppid
= p
->p_oppid
;
1605 exp
->user_stack
= p
->user_stack
;
1606 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1607 exp
->p_debugger
= p
->p_debugger
;
1608 exp
->sigwait
= p
->sigwait
;
1610 #ifdef _PROC_HAS_SCHEDINFO_
1611 exp
->p_estcpu
= p
->p_estcpu
;
1612 exp
->p_pctcpu
= p
->p_pctcpu
;
1613 exp
->p_slptime
= p
->p_slptime
;
1617 exp
->p_slptime
= 0 ;
1619 exp
->p_cpticks
= 0 ;
1623 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1624 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1625 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1626 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1627 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1628 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1632 exp
->p_traceflag
= 0 ;
1634 exp
->p_siglist
= 0 ; /* No longer relevant */
1635 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1636 exp
->p_holdcnt
= 0 ;
1637 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1638 exp
->p_sigignore
= p
->p_sigignore
;
1639 exp
->p_sigcatch
= p
->p_sigcatch
;
1640 exp
->p_priority
= p
->p_priority
;
1642 exp
->p_nice
= p
->p_nice
;
1643 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1644 exp
->p_comm
[MAXCOMLEN
] = '\0';
1645 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1646 exp
->p_addr
= USER_ADDR_NULL
;
1647 exp
->p_xstat
= p
->p_xstat
;
1648 exp
->p_acflag
= p
->p_acflag
;
1649 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1653 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*kp
)
1655 /* on a 64 bit kernel, 32 bit users will get some truncated information */
1656 fill_user32_externproc(p
, &kp
->kp_proc
);
1657 fill_user32_eproc(p
, &kp
->kp_eproc
);
1661 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*kp
)
1663 fill_user64_externproc(p
, &kp
->kp_proc
);
1664 fill_user64_eproc(p
, &kp
->kp_eproc
);
1668 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1669 size_t *sizep
, proc_t p
)
1676 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1693 case KERN_KDSETRTCDEC
:
1695 case KERN_KDGETENTROPY
:
1696 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1707 * Return the top *sizep bytes of the user stack, or the entire area of the
1708 * user stack down through the saved exec_path, whichever is smaller.
1711 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1712 size_t *sizep
, proc_t cur_proc
)
1714 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1718 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1719 size_t *sizep
, proc_t cur_proc
)
1721 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1725 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1726 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1729 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1731 struct _vm_map
*proc_map
;
1734 user_addr_t arg_addr
;
1739 vm_offset_t copy_start
, copy_end
;
1742 kauth_cred_t my_cred
;
1749 buflen
-= sizeof(int); /* reserve first word to return argc */
1751 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1752 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1753 /* is not NULL then the caller wants us to return the length needed to */
1754 /* hold the data we would return */
1755 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1761 * Lookup process by pid
1770 * Copy the top N bytes of the stack.
1771 * On all machines we have so far, the stack grows
1774 * If the user expects no more than N bytes of
1775 * argument list, use that as a guess for the
1779 if (!p
->user_stack
) {
1784 if (where
== USER_ADDR_NULL
) {
1785 /* caller only wants to know length of proc args data */
1786 if (sizep
== NULL
) {
1791 size
= p
->p_argslen
;
1794 size
+= sizeof(int);
1798 * old PROCARGS will return the executable's path and plus some
1799 * extra space for work alignment and data tags
1801 size
+= PATH_MAX
+ (6 * sizeof(int));
1803 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1808 my_cred
= kauth_cred_proc_ref(p
);
1809 uid
= kauth_cred_getuid(my_cred
);
1810 kauth_cred_unref(&my_cred
);
1812 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1813 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1818 if ((u_int
)arg_size
> p
->p_argslen
)
1819 arg_size
= round_page(p
->p_argslen
);
1821 arg_addr
= p
->user_stack
- arg_size
;
1825 * Before we can block (any VM code), make another
1826 * reference to the map to keep it alive. We do
1827 * that by getting a reference on the task itself.
1835 argslen
= p
->p_argslen
;
1837 * Once we have a task reference we can convert that into a
1838 * map reference, which we will use in the calls below. The
1839 * task/process may change its map after we take this reference
1840 * (see execve), but the worst that will happen then is a return
1841 * of stale info (which is always a possibility).
1843 task_reference(task
);
1845 proc_map
= get_task_map_reference(task
);
1846 task_deallocate(task
);
1848 if (proc_map
== NULL
)
1852 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1853 if (ret
!= KERN_SUCCESS
) {
1854 vm_map_deallocate(proc_map
);
1858 copy_end
= round_page(copy_start
+ arg_size
);
1860 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1861 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1862 vm_map_deallocate(proc_map
);
1863 kmem_free(kernel_map
, copy_start
,
1864 round_page(arg_size
));
1869 * Now that we've done the copyin from the process'
1870 * map, we can release the reference to it.
1872 vm_map_deallocate(proc_map
);
1874 if( vm_map_copy_overwrite(kernel_map
,
1875 (vm_map_address_t
)copy_start
,
1876 tmp
, FALSE
) != KERN_SUCCESS
) {
1877 kmem_free(kernel_map
, copy_start
,
1878 round_page(arg_size
));
1882 if (arg_size
> argslen
) {
1883 data
= (caddr_t
) (copy_end
- argslen
);
1886 data
= (caddr_t
) (copy_end
- arg_size
);
1891 /* Put processes argc as the first word in the copyout buffer */
1892 suword(where
, p
->p_argc
);
1893 error
= copyout(data
, (where
+ sizeof(int)), size
);
1894 size
+= sizeof(int);
1896 error
= copyout(data
, where
, size
);
1899 * Make the old PROCARGS work to return the executable's path
1900 * But, only if there is enough space in the provided buffer
1902 * on entry: data [possibily] points to the beginning of the path
1904 * Note: we keep all pointers&sizes aligned to word boundries
1906 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1908 int binPath_sz
, alignedBinPath_sz
= 0;
1909 int extraSpaceNeeded
, addThis
;
1910 user_addr_t placeHere
;
1911 char * str
= (char *) data
;
1914 /* Some apps are really bad about messing up their stacks
1915 So, we have to be extra careful about getting the length
1916 of the executing binary. If we encounter an error, we bail.
1919 /* Limit ourselves to PATH_MAX paths */
1920 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1924 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1927 /* If we have a NUL terminator, copy it, too */
1928 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1930 /* Pre-Flight the space requiremnts */
1932 /* Account for the padding that fills out binPath to the next word */
1933 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1935 placeHere
= where
+ size
;
1937 /* Account for the bytes needed to keep placeHere word aligned */
1938 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1940 /* Add up all the space that is needed */
1941 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1943 /* is there is room to tack on argv[0]? */
1944 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1946 placeHere
+= addThis
;
1947 suword(placeHere
, 0);
1948 placeHere
+= sizeof(int);
1949 suword(placeHere
, 0xBFFF0000);
1950 placeHere
+= sizeof(int);
1951 suword(placeHere
, 0);
1952 placeHere
+= sizeof(int);
1953 error
= copyout(data
, placeHere
, binPath_sz
);
1956 placeHere
+= binPath_sz
;
1957 suword(placeHere
, 0);
1958 size
+= extraSpaceNeeded
;
1964 if (copy_start
!= (vm_offset_t
) 0) {
1965 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1971 if (where
!= USER_ADDR_NULL
)
1978 * Max number of concurrent aio requests
1982 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1984 int new_value
, changed
;
1985 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1987 /* make sure the system-wide limit is greater than the per process limit */
1988 if (new_value
>= aio_max_requests_per_process
)
1989 aio_max_requests
= new_value
;
1998 * Max number of concurrent aio requests per process
2002 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2004 int new_value
, changed
;
2005 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
2007 /* make sure per process limit is less than the system-wide limit */
2008 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
2009 aio_max_requests_per_process
= new_value
;
2018 * Max number of async IO worker threads
2022 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2024 int new_value
, changed
;
2025 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
2027 /* we only allow an increase in the number of worker threads */
2028 if (new_value
> aio_worker_threads
) {
2029 _aio_create_worker_threads((new_value
- aio_worker_threads
));
2030 aio_worker_threads
= new_value
;
2040 * System-wide limit on the max number of processes
2044 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2046 int new_value
, changed
;
2047 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
2049 AUDIT_ARG(value32
, new_value
);
2050 /* make sure the system-wide limit is less than the configured hard
2051 limit set at kernel compilation */
2052 if (new_value
<= hard_maxproc
&& new_value
> 0)
2053 maxproc
= new_value
;
2060 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
2061 CTLFLAG_RD
| CTLFLAG_KERN
,
2063 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
2064 CTLFLAG_RD
| CTLFLAG_KERN
,
2066 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
2067 CTLFLAG_RD
| CTLFLAG_KERN
,
2068 (int *)NULL
, BSD
, "");
2069 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
2070 CTLFLAG_RD
| CTLFLAG_KERN
,
2074 int debug_kprint_syscall
= 0;
2075 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
2077 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
2078 CTLFLAG_RW
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
2079 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
2080 CTLFLAG_RW
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
2081 "name of process for kprintf syscall tracing");
2083 int debug_kprint_current_process(const char **namep
)
2085 struct proc
*p
= current_proc();
2091 if (debug_kprint_syscall_process
[0]) {
2092 /* user asked to scope tracing to a particular process name */
2093 if(0 == strncmp(debug_kprint_syscall_process
,
2094 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
2095 /* no value in telling the user that we traced what they asked */
2096 if(namep
) *namep
= NULL
;
2104 /* trace all processes. Tell user what we traced */
2113 /* PR-5293665: need to use a callback function for kern.osversion to set
2114 * osversion in IORegistry */
2117 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
2121 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
2124 IORegistrySetOSBuildVersion((char *)arg1
);
2130 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
2131 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2132 osversion
, 256 /* OSVERSIZE*/,
2133 sysctl_osversion
, "A", "");
2136 sysctl_sysctl_bootargs
2137 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2142 strlcpy(buf
, PE_boot_args(), 256);
2143 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
2147 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
2148 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2150 sysctl_sysctl_bootargs
, "A", "bootargs");
2152 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
2153 CTLFLAG_RW
| CTLFLAG_KERN
,
2155 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
2156 CTLFLAG_RD
| CTLFLAG_KERN
,
2157 (int *)NULL
, ARG_MAX
, "");
2158 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
2159 CTLFLAG_RD
| CTLFLAG_KERN
,
2160 (int *)NULL
, _POSIX_VERSION
, "");
2161 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
2162 CTLFLAG_RD
| CTLFLAG_KERN
,
2163 (int *)NULL
, NGROUPS_MAX
, "");
2164 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
2165 CTLFLAG_RD
| CTLFLAG_KERN
,
2166 (int *)NULL
, 1, "");
2167 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2168 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2169 CTLFLAG_RD
| CTLFLAG_KERN
,
2170 (int *)NULL
, 1, "");
2172 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2173 CTLFLAG_RD
| CTLFLAG_KERN
,
2176 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
2179 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
2182 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
2185 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
2187 &thread_max
, 0, "");
2188 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
2190 &task_threadmax
, 0, "");
2193 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2195 int oldval
= desiredvnodes
;
2196 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2198 if (oldval
!= desiredvnodes
) {
2199 reset_vmobjectcache(oldval
, desiredvnodes
);
2200 resize_namecache(desiredvnodes
);
2206 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2207 CTLTYPE_INT
| CTLFLAG_RW
,
2208 0, 0, sysctl_maxvnodes
, "I", "");
2210 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2211 CTLTYPE_INT
| CTLFLAG_RW
,
2212 0, 0, sysctl_maxproc
, "I", "");
2214 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2215 CTLTYPE_INT
| CTLFLAG_RW
,
2216 0, 0, sysctl_aiomax
, "I", "");
2218 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2219 CTLTYPE_INT
| CTLFLAG_RW
,
2220 0, 0, sysctl_aioprocmax
, "I", "");
2222 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2223 CTLTYPE_INT
| CTLFLAG_RW
,
2224 0, 0, sysctl_aiothreads
, "I", "");
2228 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2230 int new_value
, changed
;
2231 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2233 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2235 securelevel
= new_value
;
2244 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2245 CTLTYPE_INT
| CTLFLAG_RW
,
2246 0, 0, sysctl_securelvl
, "I", "");
2251 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2254 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2256 domainnamelen
= strlen(domainname
);
2261 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2262 CTLTYPE_STRING
| CTLFLAG_RW
,
2263 0, 0, sysctl_domainname
, "A", "");
2265 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
2266 CTLFLAG_RW
| CTLFLAG_KERN
,
2271 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2274 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2276 hostnamelen
= req
->newlen
;
2282 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2283 CTLTYPE_STRING
| CTLFLAG_RW
,
2284 0, 0, sysctl_hostname
, "A", "");
2288 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2290 /* Original code allowed writing, I'm copying this, although this all makes
2291 no sense to me. Besides, this sysctl is never used. */
2292 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2295 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2296 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2297 0, 0, sysctl_procname
, "A", "");
2299 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2300 CTLFLAG_RW
| CTLFLAG_KERN
,
2301 &speculative_reads_disabled
, 0, "");
2303 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_max
,
2304 CTLFLAG_RW
| CTLFLAG_KERN
,
2305 &preheat_pages_max
, 0, "");
2307 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_min
,
2308 CTLFLAG_RW
| CTLFLAG_KERN
,
2309 &preheat_pages_min
, 0, "");
2311 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_mult
,
2312 CTLFLAG_RW
| CTLFLAG_KERN
,
2313 &preheat_pages_mult
, 0, "");
2317 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2319 time_t tv_sec
= boottime_sec();
2320 struct proc
*p
= req
->p
;
2322 if (proc_is64bit(p
)) {
2323 struct user64_timeval t
;
2326 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2328 struct user32_timeval t
;
2331 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2335 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2336 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2337 0, 0, sysctl_boottime
, "S,timeval", "");
2341 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2344 int error
= get_kernel_symfile(req
->p
, &str
);
2347 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2351 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2352 CTLTYPE_STRING
| CTLFLAG_RD
,
2353 0, 0, sysctl_symfile
, "A", "");
2358 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2360 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2363 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2364 CTLTYPE_INT
| CTLFLAG_RD
,
2365 0, 0, sysctl_netboot
, "I", "");
2368 #ifdef CONFIG_IMGSRC_ACCESS
2371 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2373 vfs_context_t ctx
= vfs_context_current();
2377 if (!vfs_context_issuser(ctx
)) {
2381 if (imgsrc_rootvnode
== NULL
) {
2385 result
= vnode_getwithref(imgsrc_rootvnode
);
2390 devvp
= vnode_mount(imgsrc_rootvnode
)->mnt_devvp
;
2391 result
= vnode_getwithref(devvp
);
2396 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2400 vnode_put(imgsrc_rootvnode
);
2404 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2405 CTLTYPE_INT
| CTLFLAG_RD
,
2406 0, 0, sysctl_imgsrcdev
, "I", "");
2407 #endif /* CONFIG_IMGSRC_ACCESS */
2411 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2413 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2416 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2417 CTLTYPE_INT
| CTLFLAG_RD
,
2418 0, 0, sysctl_usrstack
, "I", "");
2422 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2424 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2427 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2428 CTLTYPE_QUAD
| CTLFLAG_RD
,
2429 0, 0, sysctl_usrstack64
, "Q", "");
2431 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2432 CTLFLAG_RW
| CTLFLAG_KERN
,
2433 corefilename
, sizeof(corefilename
), "");
2437 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2439 #ifdef SECURE_KERNEL
2442 int new_value
, changed
;
2443 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2445 if ((new_value
== 0) || (new_value
== 1))
2446 do_coredump
= new_value
;
2453 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2454 CTLTYPE_INT
| CTLFLAG_RW
,
2455 0, 0, sysctl_coredump
, "I", "");
2458 sysctl_suid_coredump
2459 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2461 #ifdef SECURE_KERNEL
2464 int new_value
, changed
;
2465 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2467 if ((new_value
== 0) || (new_value
== 1))
2468 sugid_coredump
= new_value
;
2475 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2476 CTLTYPE_INT
| CTLFLAG_RW
,
2477 0, 0, sysctl_suid_coredump
, "I", "");
2481 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2483 struct proc
*p
= req
->p
;
2484 int new_value
, changed
;
2485 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2489 req
->p
->p_lflag
|= P_LDELAYTERM
;
2491 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2497 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2498 CTLTYPE_INT
| CTLFLAG_RW
,
2499 0, 0, sysctl_delayterm
, "I", "");
2504 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2506 struct proc
*p
= req
->p
;
2508 int new_value
, old_value
, changed
;
2511 ut
= get_bsdthread_info(current_thread());
2513 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2514 old_value
= KERN_RAGE_THREAD
;
2515 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2516 old_value
= KERN_RAGE_PROC
;
2520 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2523 switch (new_value
) {
2524 case KERN_RAGE_PROC
:
2526 p
->p_lflag
|= P_LRAGE_VNODES
;
2529 case KERN_UNRAGE_PROC
:
2531 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2535 case KERN_RAGE_THREAD
:
2536 ut
->uu_flag
|= UT_RAGE_VNODES
;
2538 case KERN_UNRAGE_THREAD
:
2539 ut
= get_bsdthread_info(current_thread());
2540 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2547 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2548 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2549 0, 0, sysctl_rage_vnode
, "I", "");
2553 sysctl_kern_check_openevt
2554 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2556 struct proc
*p
= req
->p
;
2557 int new_value
, old_value
, changed
;
2560 if (p
->p_flag
& P_CHECKOPENEVT
) {
2561 old_value
= KERN_OPENEVT_PROC
;
2566 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2569 switch (new_value
) {
2570 case KERN_OPENEVT_PROC
:
2571 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2574 case KERN_UNOPENEVT_PROC
:
2575 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2585 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2586 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2592 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2594 #ifdef SECURE_KERNEL
2597 int new_value
, changed
;
2600 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2605 #if defined(__i386__) || defined(__x86_64__)
2607 * Only allow setting if NX is supported on the chip
2609 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2612 nx_enabled
= new_value
;
2619 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2620 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
,
2621 0, 0, sysctl_nx
, "I", "");
2625 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2627 if (proc_is64bit(req
->p
)) {
2628 struct user64_loadavg loadinfo64
;
2629 fill_loadavg64(&averunnable
, &loadinfo64
);
2630 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2632 struct user32_loadavg loadinfo32
;
2633 fill_loadavg32(&averunnable
, &loadinfo32
);
2634 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2638 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2639 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2640 0, 0, sysctl_loadavg
, "S,loadavg", "");
2644 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2647 uint64_t swap_total
;
2648 uint64_t swap_avail
;
2649 vm_size_t swap_pagesize
;
2650 boolean_t swap_encrypted
;
2651 struct xsw_usage xsu
;
2653 error
= macx_swapinfo(&swap_total
,
2660 xsu
.xsu_total
= swap_total
;
2661 xsu
.xsu_avail
= swap_avail
;
2662 xsu
.xsu_used
= swap_total
- swap_avail
;
2663 xsu
.xsu_pagesize
= swap_pagesize
;
2664 xsu
.xsu_encrypted
= swap_encrypted
;
2665 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2670 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2671 CTLTYPE_STRUCT
| CTLFLAG_RD
,
2672 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2675 /* this kernel does NOT implement shared_region_make_private_np() */
2676 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2678 (int *)NULL
, 0, "");
2680 #if defined(__i386__) || defined(__x86_64__)
2682 sysctl_sysctl_exec_affinity(__unused
struct sysctl_oid
*oidp
,
2683 __unused
void *arg1
, __unused
int arg2
,
2684 struct sysctl_req
*req
)
2686 proc_t cur_proc
= req
->p
;
2689 if (req
->oldptr
!= USER_ADDR_NULL
) {
2690 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
2691 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
2695 if (req
->newptr
!= USER_ADDR_NULL
) {
2696 cpu_type_t newcputype
;
2697 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
2699 if (newcputype
== CPU_TYPE_I386
)
2700 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), &cur_proc
->p_flag
);
2701 else if (newcputype
== CPU_TYPE_POWERPC
)
2702 OSBitOrAtomic(P_AFFINITY
, &cur_proc
->p_flag
);
2709 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
2713 fetch_process_cputype(
2717 cpu_type_t
*cputype
)
2719 proc_t p
= PROC_NULL
;
2726 else if (namelen
== 1) {
2727 p
= proc_find(name
[0]);
2736 #if defined(__i386__) || defined(__x86_64__)
2737 if (p
->p_flag
& P_TRANSLATED
) {
2738 ret
= CPU_TYPE_POWERPC
;
2744 if (IS_64BIT_PROCESS(p
))
2745 ret
|= CPU_ARCH_ABI64
;
2756 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2757 struct sysctl_req
*req
)
2760 cpu_type_t proc_cputype
= 0;
2761 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2764 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2766 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2768 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2771 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2772 struct sysctl_req
*req
)
2775 cpu_type_t proc_cputype
= 0;
2776 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2778 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2780 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2784 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2786 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2789 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2790 CTLTYPE_INT
| CTLFLAG_RD
,
2791 0, 0, sysctl_safeboot
, "I", "");
2795 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2797 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2800 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2801 CTLTYPE_INT
| CTLFLAG_RD
,
2802 0, 0, sysctl_singleuser
, "I", "");
2805 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2807 extern boolean_t affinity_sets_enabled
;
2808 extern int affinity_sets_mapping
;
2810 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2811 CTLFLAG_RW
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2812 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2813 CTLFLAG_RW
, &affinity_sets_mapping
, 0, "mapping policy");
2816 * Limit on total memory users can wire.
2818 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2820 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2822 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2825 * All values are in bytes.
2828 vm_map_size_t vm_global_no_user_wire_amount
;
2829 vm_map_size_t vm_global_user_wire_limit
;
2830 vm_map_size_t vm_user_wire_limit
;
2833 * There needs to be a more automatic/elegant way to do this
2836 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
, &vm_global_no_user_wire_amount
, "");
2837 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
, &vm_global_user_wire_limit
, "");
2838 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
, &vm_user_wire_limit
, "");
2843 * enable back trace events for thread blocks
2846 extern uint32_t kdebug_thread_block
;
2848 SYSCTL_INT (_kern
, OID_AUTO
, kdebug_thread_block
,
2849 CTLFLAG_RW
, &kdebug_thread_block
, 0, "kdebug thread_block");
2852 * Kernel stack size and depth
2854 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
2855 CTLFLAG_RD
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
2856 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
2857 CTLFLAG_RD
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
2860 * enable back trace for port allocations
2862 extern int ipc_portbt
;
2864 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
2865 CTLFLAG_RW
| CTLFLAG_KERN
,
2866 &ipc_portbt
, 0, "");