2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
35 * This code is derived from software contributed to Berkeley by
36 * Mike Karels at Berkeley Software Design, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/malloc.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/file_internal.h>
80 #include <sys/vnode_internal.h>
81 #include <sys/unistd.h>
83 #include <sys/ioctl.h>
84 #include <sys/namei.h>
86 #include <sys/disklabel.h>
88 #include <sys/sysctl.h>
90 #include <sys/aio_kern.h>
92 #include <bsm/audit_kernel.h>
94 #include <mach/machine.h>
95 #include <mach/mach_types.h>
96 #include <mach/vm_param.h>
97 #include <kern/task.h>
98 #include <kern/lock.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_map.h>
101 #include <mach/host_info.h>
103 extern vm_map_t bsd_pageable_map
;
105 #include <sys/mount_internal.h>
106 #include <sys/kdebug.h>
107 #include <sys/sysproto.h>
109 #include <IOKit/IOPlatformExpert.h>
110 #include <pexpert/pexpert.h>
112 #include <machine/machine_routines.h>
113 #include <machine/exec.h>
115 #include <vm/vm_protos.h>
118 #include <i386/cpuid.h>
121 sysctlfn kern_sysctl
;
123 sysctlfn debug_sysctl
;
125 extern sysctlfn vm_sysctl
;
126 extern sysctlfn vfs_sysctl
;
127 extern sysctlfn net_sysctl
;
128 extern sysctlfn cpu_sysctl
;
129 extern int aio_max_requests
;
130 extern int aio_max_requests_per_process
;
131 extern int aio_worker_threads
;
132 extern int maxfilesperproc
;
133 extern int lowpri_IO_window_msecs
;
134 extern int lowpri_IO_delay_msecs
;
135 extern int nx_enabled
;
138 fill_eproc(struct proc
*p
, struct eproc
*ep
);
140 fill_externproc(struct proc
*p
, struct extern_proc
*exp
);
142 fill_user_eproc(struct proc
*p
, struct user_eproc
*ep
);
144 fill_user_proc(struct proc
*p
, struct user_kinfo_proc
*kp
);
146 fill_user_externproc(struct proc
*p
, struct user_extern_proc
*exp
);
148 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
150 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, struct proc
*p
);
156 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
158 __private_extern__ kern_return_t
159 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
161 resize_namecache(u_int newsize
);
163 sysctl_aiomax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
165 sysctl_aioprocmax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
167 sysctl_aiothreads(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
169 sysctl_clockrate(user_addr_t where
, size_t *sizep
);
171 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
173 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
174 user_addr_t newp
, size_t newlen
);
176 sysctl_file(user_addr_t where
, size_t *sizep
);
178 fill_proc(struct proc
*p
, struct kinfo_proc
*kp
);
180 sysctl_maxfilesperproc(user_addr_t oldp
, size_t *oldlenp
,
181 user_addr_t newp
, size_t newlen
);
183 sysctl_maxprocperuid(user_addr_t oldp
, size_t *oldlenp
,
184 user_addr_t newp
, size_t newlen
);
186 sysctl_maxproc(user_addr_t oldp
, size_t *oldlenp
,
187 user_addr_t newp
, size_t newlen
);
189 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
190 size_t *sizep
, struct proc
*cur_proc
);
192 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
193 struct proc
*cur_proc
);
195 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
196 struct proc
*cur_proc
, int argc_yes
);
198 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
199 size_t newlen
, void *sp
, int len
);
201 sysctl_vnode(user_addr_t where
, size_t *sizep
);
205 * temporary location for vm_sysctl. This should be machine independant
208 extern uint32_t mach_factor
[3];
211 loadavg32to64(struct loadavg
*la32
, struct user_loadavg
*la64
)
213 la64
->ldavg
[0] = la32
->ldavg
[0];
214 la64
->ldavg
[1] = la32
->ldavg
[1];
215 la64
->ldavg
[2] = la32
->ldavg
[2];
216 la64
->fscale
= (user_long_t
)la32
->fscale
;
220 vm_sysctl(int *name
, __unused u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
221 user_addr_t newp
, size_t newlen
, __unused
struct proc
*p
)
223 struct loadavg loadinfo
;
227 if (proc_is64bit(p
)) {
228 struct user_loadavg loadinfo64
;
229 loadavg32to64(&averunnable
, &loadinfo64
);
230 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
231 &loadinfo64
, sizeof(loadinfo64
)));
233 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
234 &averunnable
, sizeof(struct loadavg
)));
237 loadinfo
.ldavg
[0] = mach_factor
[0];
238 loadinfo
.ldavg
[1] = mach_factor
[1];
239 loadinfo
.ldavg
[2] = mach_factor
[2];
240 loadinfo
.fscale
= LSCALE
;
241 if (proc_is64bit(p
)) {
242 struct user_loadavg loadinfo64
;
243 loadavg32to64(&loadinfo
, &loadinfo64
);
244 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
245 &loadinfo64
, sizeof(loadinfo64
)));
247 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
248 &loadinfo
, sizeof(struct loadavg
)));
254 uint32_t swap_pagesize
;
255 boolean_t swap_encrypted
;
256 struct xsw_usage xsu
;
258 error
= macx_swapinfo(&swap_total
,
265 xsu
.xsu_total
= swap_total
;
266 xsu
.xsu_avail
= swap_avail
;
267 xsu
.xsu_used
= swap_total
- swap_avail
;
268 xsu
.xsu_pagesize
= swap_pagesize
;
269 xsu
.xsu_encrypted
= swap_encrypted
;
270 return sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
271 &xsu
, sizeof (struct xsw_usage
));
287 static struct sysctl_lock
{
294 __sysctl(struct proc
*p
, struct __sysctl_args
*uap
, __unused register_t
*retval
)
296 int error
, dolock
= 1;
297 size_t savelen
= 0, oldlen
= 0, newlen
;
298 sysctlfn
*fnp
= NULL
;
299 int name
[CTL_MAXNAME
];
304 * all top-level sysctl names are non-terminal
306 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
308 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
312 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
314 if (proc_is64bit(p
)) {
315 /* uap->newlen is a size_t value which grows to 64 bits
316 * when coming from a 64-bit process. since it's doubtful we'll
317 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
319 newlen
= CAST_DOWN(size_t, uap
->newlen
);
322 newlen
= uap
->newlen
;
325 /* CTL_UNSPEC is used to get oid to AUTO_OID */
326 if (uap
->new != USER_ADDR_NULL
327 && ((name
[0] == CTL_KERN
328 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
329 name
[1] == KERN_PROC_LOW_PRI_IO
|| name
[1] == KERN_PROCNAME
|| name
[1] == KERN_THALTSTACK
))
330 || (name
[0] == CTL_HW
)
331 || (name
[0] == CTL_VM
)
332 || (name
[0] == CTL_VFS
))
333 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
339 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
340 && (name
[1] != KERN_PROC
))
359 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
360 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
362 oldlen
= CAST_DOWN(size_t, oldlen64
);
364 * If more than 4G, clamp to 4G - useracc() below will catch
365 * with an EFAULT, if it's actually necessary.
367 if (oldlen64
> 0x00000000ffffffffULL
)
368 oldlen
= 0xffffffffUL
;
371 if (uap
->old
!= USER_ADDR_NULL
) {
372 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
375 /* The pc sampling mechanism does not need to take this lock */
376 if ((name
[1] != KERN_PCSAMPLES
) &&
377 (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)))) {
378 while (memlock
.sl_lock
) {
380 sleep((caddr_t
)&memlock
, PRIBIO
+1);
386 if (dolock
&& oldlen
&&
387 (error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
388 if ((name
[1] != KERN_PCSAMPLES
) &&
389 (! ((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)))) {
391 if (memlock
.sl_want
) {
393 wakeup((caddr_t
)&memlock
);
402 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
403 &oldlen
, uap
->new, newlen
, p
);
408 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
410 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
411 1, uap
->new, newlen
, &oldlen
);
414 if (uap
->old
!= USER_ADDR_NULL
) {
415 if (dolock
&& savelen
) {
416 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
417 if (!error
&& error1
)
420 if (name
[1] != KERN_PCSAMPLES
) {
422 if (memlock
.sl_want
) {
424 wakeup((caddr_t
)&memlock
);
428 if ((error
) && (error
!= ENOMEM
))
431 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
432 i
= suulong(uap
->oldlenp
, oldlen
);
441 * Attributes stored in the kernel.
443 __private_extern__
char corefilename
[MAXPATHLEN
+1];
444 __private_extern__
int do_coredump
;
445 __private_extern__
int sugid_coredump
;
449 int securelevel
= -1;
461 __unused
size_t newSize
,
462 struct proc
*cur_proc
)
467 if (name
[0] == 0 && 1 == namelen
) {
468 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
469 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
470 } else if (name
[0] == 1 && 2 == namelen
) {
472 cur_proc
->p_flag
&= ~P_AFFINITY
;
474 cur_proc
->p_flag
|= P_AFFINITY
;
489 __unused
size_t newSize
,
490 struct proc
*cur_proc
)
501 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
502 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
505 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
506 (p
->p_flag
& P_TRANSLATED
) ? 1 : 0);
510 set_archhandler(struct proc
*p
, int arch
)
514 struct vnode_attr va
;
515 struct vfs_context context
;
519 case CPU_TYPE_POWERPC
:
520 archhandler
= exec_archhandler_ppc
.path
;
527 context
.vc_ucred
= kauth_cred_get();
529 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
,
530 CAST_USER_ADDR_T(archhandler
), &context
);
536 /* Check mount point */
537 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
538 (nd
.ni_vp
->v_type
!= VREG
)) {
544 VATTR_WANTED(&va
, va_fsid
);
545 VATTR_WANTED(&va
, va_fileid
);
546 error
= vnode_getattr(nd
.ni_vp
, &va
, &context
);
553 exec_archhandler_ppc
.fsid
= va
.va_fsid
;
554 exec_archhandler_ppc
.fileid
= (u_long
)va
.va_fileid
;
559 sysctl_exec_archhandler_ppc(
561 __unused u_int namelen
,
571 struct vnode_attr va
;
572 char handler
[sizeof(exec_archhandler_ppc
.path
)];
573 struct vfs_context context
;
576 context
.vc_ucred
= kauth_cred_get();
579 len
= strlen(exec_archhandler_ppc
.path
) + 1;
583 error
= copyout(exec_archhandler_ppc
.path
, oldBuf
, len
);
590 error
= suser(context
.vc_ucred
, &p
->p_acflag
);
593 if (newSize
>= sizeof(exec_archhandler_ppc
.path
))
594 return (ENAMETOOLONG
);
595 error
= copyin(newBuf
, handler
, newSize
);
598 handler
[newSize
] = 0;
599 strcpy(exec_archhandler_ppc
.path
, handler
);
600 error
= set_archhandler(p
, CPU_TYPE_POWERPC
);
607 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
, 0, "");
609 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
, 0, "");
611 SYSCTL_STRING(_kern_exec_archhandler
, OID_AUTO
, powerpc
, CTLFLAG_RD
,
612 exec_archhandler_ppc
.path
, 0, "");
614 extern int get_kernel_symfile( struct proc
*, char **);
615 __private_extern__
int
616 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
617 size_t, struct proc
*);
620 * kernel related system variables.
623 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
624 user_addr_t newp
, size_t newlen
, struct proc
*p
)
626 int error
, level
, inthostid
, tmp
;
627 unsigned int oldval
=0;
629 /* all sysctl names not listed below are terminal at this level */
631 && !(name
[0] == KERN_PROC
632 || name
[0] == KERN_PROF
633 || name
[0] == KERN_KDEBUG
634 || name
[0] == KERN_PROCARGS
635 || name
[0] == KERN_PROCARGS2
636 || name
[0] == KERN_PCSAMPLES
637 || name
[0] == KERN_IPC
638 || name
[0] == KERN_SYSV
639 || name
[0] == KERN_AFFINITY
640 || name
[0] == KERN_TRANSLATE
641 || name
[0] == KERN_EXEC
642 || name
[0] == KERN_PANICINFO
643 || name
[0] == KERN_POSIX
644 || name
[0] == KERN_TFP
)
646 return (ENOTDIR
); /* overloaded */
650 return (sysctl_rdstring(oldp
, oldlenp
, newp
, ostype
));
652 return (sysctl_rdstring(oldp
, oldlenp
, newp
, osrelease
));
654 return (sysctl_rdint(oldp
, oldlenp
, newp
, BSD
));
656 return (sysctl_rdstring(oldp
, oldlenp
, newp
, version
));
658 oldval
= desiredvnodes
;
659 error
= sysctl_int(oldp
, oldlenp
, newp
,
660 newlen
, &desiredvnodes
);
661 reset_vmobjectcache(oldval
, desiredvnodes
);
662 resize_namecache(desiredvnodes
);
665 return (sysctl_maxproc(oldp
, oldlenp
, newp
, newlen
));
667 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, &maxfiles
));
668 case KERN_MAXPROCPERUID
:
669 return( sysctl_maxprocperuid( oldp
, oldlenp
, newp
, newlen
) );
670 case KERN_MAXFILESPERPROC
:
671 return( sysctl_maxfilesperproc( oldp
, oldlenp
, newp
, newlen
) );
673 return (sysctl_rdint(oldp
, oldlenp
, newp
, ARG_MAX
));
676 if ((error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &level
)) ||
677 newp
== USER_ADDR_NULL
)
679 if (level
< securelevel
&& p
->p_pid
!= 1)
684 error
= sysctl_trstring(oldp
, oldlenp
, newp
, newlen
,
685 hostname
, sizeof(hostname
));
687 hostnamelen
= newlen
;
689 case KERN_DOMAINNAME
:
690 error
= sysctl_string(oldp
, oldlenp
, newp
, newlen
,
691 domainname
, sizeof(domainname
));
693 domainnamelen
= newlen
;
696 inthostid
= hostid
; /* XXX assumes sizeof long <= sizeof int */
697 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &inthostid
);
701 return (sysctl_clockrate(oldp
, oldlenp
));
706 t
.tv_sec
= boottime_sec();
709 return (sysctl_rdstruct(oldp
, oldlenp
, newp
, &t
,
710 sizeof(struct timeval
)));
713 return (sysctl_vnode(oldp
, oldlenp
));
715 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
717 return (sysctl_file(oldp
, oldlenp
));
720 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
724 return (sysctl_rdint(oldp
, oldlenp
, newp
, _POSIX_VERSION
));
726 return (sysctl_rdint(oldp
, oldlenp
, newp
, NGROUPS_MAX
));
727 case KERN_JOB_CONTROL
:
728 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
730 #ifdef _POSIX_SAVED_IDS
731 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
733 return (sysctl_rdint(oldp
, oldlenp
, newp
, 0));
736 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
738 return (pcsamples_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
740 /* new one as it does not use kinfo_proc */
741 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
743 /* new one as it does not use kinfo_proc */
744 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
746 error
= get_kernel_symfile( p
, &str
);
749 return (sysctl_rdstring(oldp
, oldlenp
, newp
, str
));
752 return (sysctl_rdint(oldp
, oldlenp
, newp
, netboot_root()));
755 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
758 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
761 return sysctl_translate(name
+1, namelen
-1, oldp
, oldlenp
, newp
,
763 case KERN_CLASSICHANDLER
:
764 return sysctl_exec_archhandler_ppc(name
+1, namelen
-1, oldp
,
765 oldlenp
, newp
, newlen
, p
);
767 return( sysctl_aiomax( oldp
, oldlenp
, newp
, newlen
) );
768 case KERN_AIOPROCMAX
:
769 return( sysctl_aioprocmax( oldp
, oldlenp
, newp
, newlen
) );
770 case KERN_AIOTHREADS
:
771 return( sysctl_aiothreads( oldp
, oldlenp
, newp
, newlen
) );
773 return (sysctl_rdint(oldp
, oldlenp
, newp
, (uintptr_t)p
->user_stack
));
774 case KERN_USRSTACK64
:
775 return (sysctl_rdquad(oldp
, oldlenp
, newp
, p
->user_stack
));
777 error
= sysctl_string(oldp
, oldlenp
, newp
, newlen
,
778 corefilename
, sizeof(corefilename
));
782 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &do_coredump
);
783 if (!error
&& ((do_coredump
< 0) || (do_coredump
> 1))) {
788 case KERN_SUGID_COREDUMP
:
789 tmp
= sugid_coredump
;
790 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &sugid_coredump
);
791 if (!error
&& ((sugid_coredump
< 0) || (sugid_coredump
> 1))) {
792 sugid_coredump
= tmp
;
796 case KERN_PROCDELAYTERM
:
798 int old_value
, new_value
;
801 if (oldp
&& *oldlenp
< sizeof(int))
803 if ( newp
&& newlen
!= sizeof(int) )
805 *oldlenp
= sizeof(int);
806 old_value
= (p
->p_lflag
& P_LDELAYTERM
)? 1: 0;
807 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
809 if (error
== 0 && newp
)
810 error
= copyin( newp
, &new_value
, sizeof(int) );
811 if (error
== 0 && newp
) {
813 p
->p_lflag
|= P_LDELAYTERM
;
815 p
->p_lflag
&= ~P_LDELAYTERM
;
819 case KERN_PROC_LOW_PRI_IO
:
821 int old_value
, new_value
;
824 if (oldp
&& *oldlenp
< sizeof(int))
826 if ( newp
&& newlen
!= sizeof(int) )
828 *oldlenp
= sizeof(int);
830 old_value
= (p
->p_lflag
& P_LLOW_PRI_IO
)? 0x01: 0;
831 if (p
->p_lflag
& P_LBACKGROUND_IO
)
834 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
836 if (error
== 0 && newp
)
837 error
= copyin( newp
, &new_value
, sizeof(int) );
838 if (error
== 0 && newp
) {
839 if (new_value
& 0x01)
840 p
->p_lflag
|= P_LLOW_PRI_IO
;
841 else if (new_value
& 0x02)
842 p
->p_lflag
|= P_LBACKGROUND_IO
;
843 else if (new_value
== 0)
844 p
->p_lflag
&= ~(P_LLOW_PRI_IO
| P_LBACKGROUND_IO
);
848 case KERN_LOW_PRI_WINDOW
:
850 int old_value
, new_value
;
853 if (oldp
&& *oldlenp
< sizeof(old_value
) )
855 if ( newp
&& newlen
!= sizeof(new_value
) )
857 *oldlenp
= sizeof(old_value
);
859 old_value
= lowpri_IO_window_msecs
;
861 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
863 if (error
== 0 && newp
)
864 error
= copyin( newp
, &new_value
, sizeof(newlen
) );
865 if (error
== 0 && newp
) {
866 lowpri_IO_window_msecs
= new_value
;
870 case KERN_LOW_PRI_DELAY
:
872 int old_value
, new_value
;
875 if (oldp
&& *oldlenp
< sizeof(old_value
) )
877 if ( newp
&& newlen
!= sizeof(new_value
) )
879 *oldlenp
= sizeof(old_value
);
881 old_value
= lowpri_IO_delay_msecs
;
883 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
885 if (error
== 0 && newp
)
886 error
= copyin( newp
, &new_value
, sizeof(newlen
) );
887 if (error
== 0 && newp
) {
888 lowpri_IO_delay_msecs
= new_value
;
892 case KERN_NX_PROTECTION
:
894 int old_value
, new_value
;
897 if (oldp
&& *oldlenp
< sizeof(old_value
) )
899 if ( newp
&& newlen
!= sizeof(new_value
) )
901 *oldlenp
= sizeof(old_value
);
903 old_value
= nx_enabled
;
905 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
909 * Only allow setting if NX is supported on the chip
911 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD
) {
913 if (error
== 0 && newp
)
914 error
= copyin(newp
, &new_value
,
916 if (error
== 0 && newp
)
917 nx_enabled
= new_value
;
925 case KERN_SHREG_PRIVATIZABLE
:
926 /* this kernel does implement shared_region_make_private_np() */
927 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
929 error
= sysctl_trstring(oldp
, oldlenp
, newp
, newlen
,
930 &p
->p_name
[0], (2*MAXCOMLEN
+1));
932 case KERN_THALTSTACK
:
934 int old_value
, new_value
;
937 if (oldp
&& *oldlenp
< sizeof(int))
939 if ( newp
&& newlen
!= sizeof(int) )
941 *oldlenp
= sizeof(int);
942 old_value
= (p
->p_lflag
& P_LTHSIGSTACK
)? 1: 0;
943 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
945 if (error
== 0 && newp
)
946 error
= copyin( newp
, &new_value
, sizeof(int) );
947 if (error
== 0 && newp
) {
949 /* we cannot swich midstream if inuse */
950 if ((p
->p_sigacts
->ps_flags
& SAS_ALTSTACK
) == SAS_ALTSTACK
)
952 p
->p_lflag
|= P_LTHSIGSTACK
;
954 /* we cannot swich midstream */
955 if ((p
->p_lflag
& P_LTHSIGSTACK
) == P_LTHSIGSTACK
)
957 p
->p_lflag
&= ~P_LTHSIGSTACK
;
970 * Debugging related system variables.
974 #endif /* DIAGNOSTIC */
975 struct ctldebug debug0
, debug1
;
976 struct ctldebug debug2
, debug3
, debug4
;
977 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
978 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
979 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
980 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
981 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
982 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
983 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
984 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
987 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
988 user_addr_t newp
, size_t newlen
, struct proc
*p
)
990 struct ctldebug
*cdp
;
992 /* all sysctl names at this level are name and field */
994 return (ENOTDIR
); /* overloaded */
995 cdp
= debugvars
[name
[0]];
996 if (cdp
->debugname
== 0)
1000 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
1001 case CTL_DEBUG_VALUE
:
1002 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
1011 * Validate parameters and get old / set new parameters
1012 * for an integer-valued sysctl function.
1015 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
1016 user_addr_t newp
, size_t newlen
, int *valp
)
1020 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1022 if (oldp
&& *oldlenp
< sizeof(int))
1024 if (newp
&& newlen
!= sizeof(int))
1026 *oldlenp
= sizeof(int);
1028 error
= copyout(valp
, oldp
, sizeof(int));
1029 if (error
== 0 && newp
) {
1030 error
= copyin(newp
, valp
, sizeof(int));
1031 AUDIT_ARG(value
, *valp
);
1037 * As above, but read-only.
1040 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
1044 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1046 if (oldp
&& *oldlenp
< sizeof(int))
1050 *oldlenp
= sizeof(int);
1052 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
1057 * Validate parameters and get old / set new parameters
1058 * for an quad(64bit)-valued sysctl function.
1061 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
1062 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
1066 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1068 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1070 if (newp
&& newlen
!= sizeof(quad_t
))
1072 *oldlenp
= sizeof(quad_t
);
1074 error
= copyout(valp
, oldp
, sizeof(quad_t
));
1075 if (error
== 0 && newp
)
1076 error
= copyin(newp
, valp
, sizeof(quad_t
));
1081 * As above, but read-only.
1084 sysctl_rdquad(oldp
, oldlenp
, newp
, val
)
1092 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1094 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1098 *oldlenp
= sizeof(quad_t
);
1100 error
= copyout((caddr_t
)&val
, CAST_USER_ADDR_T(oldp
), sizeof(quad_t
));
1105 * Validate parameters and get old / set new parameters
1106 * for a string-valued sysctl function. Unlike sysctl_string, if you
1107 * give it a too small (but larger than 0 bytes) buffer, instead of
1108 * returning ENOMEM, it truncates the returned string to the buffer
1109 * size. This preserves the semantics of some library routines
1110 * implemented via sysctl, which truncate their returned data, rather
1111 * than simply returning an error. The returned string is always NUL
1115 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
1116 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1118 int len
, copylen
, error
= 0;
1120 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1122 copylen
= len
= strlen(str
) + 1;
1123 if (oldp
&& (len
< 0 || *oldlenp
< 1))
1125 if (oldp
&& (*oldlenp
< (size_t)len
))
1126 copylen
= *oldlenp
+ 1;
1127 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1129 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
1131 error
= copyout(str
, oldp
, copylen
);
1133 unsigned char c
= 0;
1136 error
= copyout((void *)&c
, oldp
, sizeof(char));
1139 if (error
== 0 && newp
) {
1140 error
= copyin(newp
, str
, newlen
);
1142 AUDIT_ARG(text
, (char *)str
);
1148 * Validate parameters and get old / set new parameters
1149 * for a string-valued sysctl function.
1152 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
1153 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1157 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1159 len
= strlen(str
) + 1;
1160 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1162 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1164 *oldlenp
= len
-1; /* deal with NULL strings correctly */
1166 error
= copyout(str
, oldp
, len
);
1168 if (error
== 0 && newp
) {
1169 error
= copyin(newp
, str
, newlen
);
1171 AUDIT_ARG(text
, (char *)str
);
1177 * As above, but read-only.
1180 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
1181 user_addr_t newp
, char *str
)
1185 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1187 len
= strlen(str
) + 1;
1188 if (oldp
&& *oldlenp
< (size_t)len
)
1194 error
= copyout(str
, oldp
, len
);
1199 * Validate parameters and get old / set new parameters
1200 * for a structure oriented sysctl function.
1203 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
1204 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
1208 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1210 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1212 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
1216 error
= copyout(sp
, oldp
, len
);
1218 if (error
== 0 && newp
)
1219 error
= copyin(newp
, sp
, len
);
1224 * Validate parameters and get old parameters
1225 * for a structure oriented sysctl function.
1228 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
1229 user_addr_t newp
, void *sp
, int len
)
1233 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1235 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1241 error
= copyout(sp
, oldp
, len
);
1246 * Get file structures.
1249 sysctl_file(user_addr_t where
, size_t *sizep
)
1252 struct fileglob
*fg
;
1253 user_addr_t start
= where
;
1254 struct extern_file nef
;
1257 if (where
== USER_ADDR_NULL
) {
1259 * overestimate by 10 files
1261 *sizep
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1266 * first copyout filehead
1268 if (buflen
< 0 || (size_t)buflen
< sizeof(filehead
)) {
1272 error
= copyout((caddr_t
)&filehead
, where
, sizeof(filehead
));
1275 buflen
-= sizeof(filehead
);
1276 where
+= sizeof(filehead
);
1279 * followed by an array of file structures
1281 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1282 if (buflen
< 0 || (size_t)buflen
< sizeof(struct extern_file
)) {
1283 *sizep
= where
- start
;
1286 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1287 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1288 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1289 nef
.f_type
= fg
->fg_type
;
1290 nef
.f_count
= fg
->fg_count
;
1291 nef
.f_msgcount
= fg
->fg_msgcount
;
1292 nef
.f_cred
= fg
->fg_cred
;
1293 nef
.f_ops
= fg
->fg_ops
;
1294 nef
.f_offset
= fg
->fg_offset
;
1295 nef
.f_data
= fg
->fg_data
;
1296 error
= copyout((caddr_t
)&nef
, where
, sizeof (struct extern_file
));
1299 buflen
-= sizeof(struct extern_file
);
1300 where
+= sizeof(struct extern_file
);
1302 *sizep
= where
- start
;
1307 * try over estimating by 5 procs
1309 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1312 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1315 user_addr_t dp
= where
;
1317 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1320 boolean_t is_64_bit
= FALSE
;
1321 struct kinfo_proc kproc
;
1322 struct user_kinfo_proc user_kproc
;
1326 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1328 p
= allproc
.lh_first
;
1330 is_64_bit
= proc_is64bit(current_proc());
1332 sizeof_kproc
= sizeof(user_kproc
);
1333 kprocp
= (caddr_t
) &user_kproc
;
1336 sizeof_kproc
= sizeof(kproc
);
1337 kprocp
= (caddr_t
) &kproc
;
1340 for (; p
!= 0; p
= p
->p_list
.le_next
) {
1342 * Skip embryonic processes.
1344 if (p
->p_stat
== SIDL
)
1347 * TODO - make more efficient (see notes below).
1353 /* could do this with just a lookup */
1354 if (p
->p_pid
!= (pid_t
)name
[1])
1358 case KERN_PROC_PGRP
:
1359 /* could do this by traversing pgrp */
1360 if (p
->p_pgrp
->pg_id
!= (pid_t
)name
[1])
1365 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1366 (p
->p_session
== NULL
) ||
1367 p
->p_session
->s_ttyp
== NULL
||
1368 p
->p_session
->s_ttyp
->t_dev
!= (dev_t
)name
[1])
1373 if ((p
->p_ucred
== NULL
) ||
1374 (kauth_cred_getuid(p
->p_ucred
) != (uid_t
)name
[1]))
1378 case KERN_PROC_RUID
:
1379 if ((p
->p_ucred
== NULL
) ||
1380 (p
->p_ucred
->cr_ruid
!= (uid_t
)name
[1]))
1384 if (buflen
>= sizeof_kproc
) {
1385 bzero(kprocp
, sizeof_kproc
);
1387 fill_user_proc(p
, (struct user_kinfo_proc
*) kprocp
);
1390 fill_proc(p
, (struct kinfo_proc
*) kprocp
);
1392 error
= copyout(kprocp
, dp
, sizeof_kproc
);
1396 buflen
-= sizeof_kproc
;
1398 needed
+= sizeof_kproc
;
1400 if (doingzomb
== 0) {
1401 p
= zombproc
.lh_first
;
1405 if (where
!= USER_ADDR_NULL
) {
1406 *sizep
= dp
- where
;
1407 if (needed
> *sizep
)
1410 needed
+= KERN_PROCSLOP
;
1417 * Fill in an eproc structure for the specified process.
1421 register struct proc
*p
;
1422 register struct eproc
*ep
;
1424 register struct tty
*tp
;
1428 ep
->e_sess
= p
->p_pgrp
->pg_session
;
1429 ep
->e_pgid
= p
->p_pgrp
->pg_id
;
1430 ep
->e_jobc
= p
->p_pgrp
->pg_jobc
;
1431 if (ep
->e_sess
&& ep
->e_sess
->s_ttyvp
)
1432 ep
->e_flag
= EPROC_CTTY
;
1434 ep
->e_sess
= (struct session
*)0;
1438 ep
->e_ppid
= (p
->p_pptr
) ? p
->p_pptr
->p_pid
: 0;
1439 /* Pre-zero the fake historical pcred */
1440 bzero(&ep
->e_pcred
, sizeof(struct _pcred
));
1442 /* XXX not ref-counted */
1444 /* A fake historical pcred */
1445 ep
->e_pcred
.p_ruid
= p
->p_ucred
->cr_ruid
;
1446 ep
->e_pcred
.p_svuid
= p
->p_ucred
->cr_svuid
;
1447 ep
->e_pcred
.p_rgid
= p
->p_ucred
->cr_rgid
;
1448 ep
->e_pcred
.p_svgid
= p
->p_ucred
->cr_svgid
;
1450 /* A fake historical *kauth_cred_t */
1451 ep
->e_ucred
.cr_ref
= p
->p_ucred
->cr_ref
;
1452 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(p
->p_ucred
);
1453 ep
->e_ucred
.cr_ngroups
= p
->p_ucred
->cr_ngroups
;
1454 bcopy(p
->p_ucred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1457 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1458 ep
->e_vm
.vm_tsize
= 0;
1459 ep
->e_vm
.vm_dsize
= 0;
1460 ep
->e_vm
.vm_ssize
= 0;
1462 ep
->e_vm
.vm_rssize
= 0;
1464 if ((p
->p_flag
& P_CONTROLT
) && (ep
->e_sess
) &&
1465 (tp
= ep
->e_sess
->s_ttyp
)) {
1466 ep
->e_tdev
= tp
->t_dev
;
1467 ep
->e_tpgid
= tp
->t_pgrp
? tp
->t_pgrp
->pg_id
: NO_PID
;
1468 ep
->e_tsess
= tp
->t_session
;
1473 ep
->e_flag
|= EPROC_SLEADER
;
1475 strncpy(ep
->e_wmesg
, p
->p_wmesg
, WMESGLEN
);
1476 ep
->e_xsize
= ep
->e_xrssize
= 0;
1477 ep
->e_xccount
= ep
->e_xswrss
= 0;
1481 * Fill in an LP64 version of eproc structure for the specified process.
1484 fill_user_eproc(register struct proc
*p
, register struct user_eproc
*ep
)
1486 register struct tty
*tp
;
1487 struct session
*sessionp
= NULL
;
1489 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1491 sessionp
= p
->p_pgrp
->pg_session
;
1492 ep
->e_sess
= CAST_USER_ADDR_T(sessionp
);
1493 ep
->e_pgid
= p
->p_pgrp
->pg_id
;
1494 ep
->e_jobc
= p
->p_pgrp
->pg_jobc
;
1496 if (sessionp
->s_ttyvp
)
1497 ep
->e_flag
= EPROC_CTTY
;
1500 ep
->e_sess
= USER_ADDR_NULL
;
1504 ep
->e_ppid
= (p
->p_pptr
) ? p
->p_pptr
->p_pid
: 0;
1505 /* Pre-zero the fake historical pcred */
1506 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1508 /* XXX not ref-counted */
1510 /* A fake historical pcred */
1511 ep
->e_pcred
.p_ruid
= p
->p_ucred
->cr_ruid
;
1512 ep
->e_pcred
.p_svuid
= p
->p_ucred
->cr_svuid
;
1513 ep
->e_pcred
.p_rgid
= p
->p_ucred
->cr_rgid
;
1514 ep
->e_pcred
.p_svgid
= p
->p_ucred
->cr_svgid
;
1516 /* A fake historical *kauth_cred_t */
1517 ep
->e_ucred
.cr_ref
= p
->p_ucred
->cr_ref
;
1518 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(p
->p_ucred
);
1519 ep
->e_ucred
.cr_ngroups
= p
->p_ucred
->cr_ngroups
;
1520 bcopy(p
->p_ucred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1523 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1524 ep
->e_vm
.vm_tsize
= 0;
1525 ep
->e_vm
.vm_dsize
= 0;
1526 ep
->e_vm
.vm_ssize
= 0;
1528 ep
->e_vm
.vm_rssize
= 0;
1530 if ((p
->p_flag
& P_CONTROLT
) && (sessionp
) &&
1531 (tp
= sessionp
->s_ttyp
)) {
1532 ep
->e_tdev
= tp
->t_dev
;
1533 ep
->e_tpgid
= tp
->t_pgrp
? tp
->t_pgrp
->pg_id
: NO_PID
;
1534 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1539 ep
->e_flag
|= EPROC_SLEADER
;
1541 strncpy(ep
->e_wmesg
, p
->p_wmesg
, WMESGLEN
);
1542 ep
->e_xsize
= ep
->e_xrssize
= 0;
1543 ep
->e_xccount
= ep
->e_xswrss
= 0;
1547 * Fill in an eproc structure for the specified process.
1550 fill_externproc(p
, exp
)
1551 register struct proc
*p
;
1552 register struct extern_proc
*exp
;
1554 exp
->p_forw
= exp
->p_back
= NULL
;
1556 exp
->p_starttime
= p
->p_stats
->p_start
;
1557 exp
->p_vmspace
= NULL
;
1558 exp
->p_sigacts
= p
->p_sigacts
;
1559 exp
->p_flag
= p
->p_flag
;
1560 exp
->p_stat
= p
->p_stat
;
1561 exp
->p_pid
= p
->p_pid
;
1562 exp
->p_oppid
= p
->p_oppid
;
1563 exp
->p_dupfd
= p
->p_dupfd
;
1565 exp
->user_stack
= CAST_DOWN(caddr_t
, p
->user_stack
);
1566 exp
->exit_thread
= p
->exit_thread
;
1567 exp
->p_debugger
= p
->p_debugger
;
1568 exp
->sigwait
= p
->sigwait
;
1570 exp
->p_estcpu
= p
->p_estcpu
;
1571 exp
->p_cpticks
= p
->p_cpticks
;
1572 exp
->p_pctcpu
= p
->p_pctcpu
;
1573 exp
->p_wchan
= p
->p_wchan
;
1574 exp
->p_wmesg
= p
->p_wmesg
;
1575 exp
->p_swtime
= p
->p_swtime
;
1576 exp
->p_slptime
= p
->p_slptime
;
1577 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1578 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1579 exp
->p_uticks
= p
->p_uticks
;
1580 exp
->p_sticks
= p
->p_sticks
;
1581 exp
->p_iticks
= p
->p_iticks
;
1582 exp
->p_traceflag
= p
->p_traceflag
;
1583 exp
->p_tracep
= p
->p_tracep
;
1584 exp
->p_siglist
= 0 ; /* No longer relevant */
1585 exp
->p_textvp
= p
->p_textvp
;
1586 exp
->p_holdcnt
= 0 ;
1587 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1588 exp
->p_sigignore
= p
->p_sigignore
;
1589 exp
->p_sigcatch
= p
->p_sigcatch
;
1590 exp
->p_priority
= p
->p_priority
;
1591 exp
->p_usrpri
= p
->p_usrpri
;
1592 exp
->p_nice
= p
->p_nice
;
1593 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1594 exp
->p_comm
[MAXCOMLEN
] = '\0';
1595 exp
->p_pgrp
= p
->p_pgrp
;
1597 exp
->p_xstat
= p
->p_xstat
;
1598 exp
->p_acflag
= p
->p_acflag
;
1599 exp
->p_ru
= p
->p_ru
; /* XXX may be NULL */
1603 * Fill in an LP64 version of extern_proc structure for the specified process.
1606 fill_user_externproc(register struct proc
*p
, register struct user_extern_proc
*exp
)
1608 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1610 exp
->p_starttime
.tv_sec
= p
->p_stats
->p_start
.tv_sec
;
1611 exp
->p_starttime
.tv_usec
= p
->p_stats
->p_start
.tv_usec
;
1613 exp
->p_vmspace
= USER_ADDR_NULL
;
1614 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1615 exp
->p_flag
= p
->p_flag
;
1616 exp
->p_stat
= p
->p_stat
;
1617 exp
->p_pid
= p
->p_pid
;
1618 exp
->p_oppid
= p
->p_oppid
;
1619 exp
->p_dupfd
= p
->p_dupfd
;
1621 exp
->user_stack
= p
->user_stack
;
1622 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1623 exp
->p_debugger
= p
->p_debugger
;
1624 exp
->sigwait
= p
->sigwait
;
1626 exp
->p_estcpu
= p
->p_estcpu
;
1627 exp
->p_cpticks
= p
->p_cpticks
;
1628 exp
->p_pctcpu
= p
->p_pctcpu
;
1629 exp
->p_wchan
= CAST_USER_ADDR_T(p
->p_wchan
);
1630 exp
->p_wmesg
= CAST_USER_ADDR_T(p
->p_wmesg
);
1631 exp
->p_swtime
= p
->p_swtime
;
1632 exp
->p_slptime
= p
->p_slptime
;
1633 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1634 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1635 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1636 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1637 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1638 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1639 exp
->p_uticks
= p
->p_uticks
;
1640 exp
->p_sticks
= p
->p_sticks
;
1641 exp
->p_iticks
= p
->p_iticks
;
1642 exp
->p_traceflag
= p
->p_traceflag
;
1643 exp
->p_tracep
= CAST_USER_ADDR_T(p
->p_tracep
);
1644 exp
->p_siglist
= 0 ; /* No longer relevant */
1645 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1646 exp
->p_holdcnt
= 0 ;
1647 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1648 exp
->p_sigignore
= p
->p_sigignore
;
1649 exp
->p_sigcatch
= p
->p_sigcatch
;
1650 exp
->p_priority
= p
->p_priority
;
1651 exp
->p_usrpri
= p
->p_usrpri
;
1652 exp
->p_nice
= p
->p_nice
;
1653 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1654 exp
->p_comm
[MAXCOMLEN
] = '\0';
1655 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1656 exp
->p_addr
= USER_ADDR_NULL
;
1657 exp
->p_xstat
= p
->p_xstat
;
1658 exp
->p_acflag
= p
->p_acflag
;
1659 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1664 register struct proc
*p
;
1665 register struct kinfo_proc
*kp
;
1667 fill_externproc(p
, &kp
->kp_proc
);
1668 fill_eproc(p
, &kp
->kp_eproc
);
1672 fill_user_proc(register struct proc
*p
, register struct user_kinfo_proc
*kp
)
1674 fill_user_externproc(p
, &kp
->kp_proc
);
1675 fill_user_eproc(p
, &kp
->kp_eproc
);
1679 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1680 size_t *sizep
, struct proc
*p
)
1684 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1701 case KERN_KDSETRTCDEC
:
1703 case KERN_KDGETENTROPY
:
1704 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1713 extern int pcsamples_control(int *name
, u_int namelen
, user_addr_t where
,
1717 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
,
1718 size_t *sizep
, struct proc
*p
)
1722 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1727 case KERN_PCDISABLE
:
1731 case KERN_PCREADBUF
:
1735 ret
= pcsamples_control(name
, namelen
, where
, sizep
);
1745 * Return the top *sizep bytes of the user stack, or the entire area of the
1746 * user stack down through the saved exec_path, whichever is smaller.
1749 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1750 size_t *sizep
, struct proc
*cur_proc
)
1752 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1756 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1757 size_t *sizep
, struct proc
*cur_proc
)
1759 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1763 sysctl_procargsx(int *name
, __unused u_int namelen
, user_addr_t where
,
1764 size_t *sizep
, struct proc
*cur_proc
, int argc_yes
)
1767 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1769 struct vm_map
*proc_map
;
1772 user_addr_t arg_addr
;
1776 vm_offset_t copy_start
, copy_end
;
1781 buflen
-= sizeof(int); /* reserve first word to return argc */
1783 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1784 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1785 /* is not NULL then the caller wants us to return the length needed to */
1786 /* hold the data we would return */
1787 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1793 * Lookup process by pid
1802 * Copy the top N bytes of the stack.
1803 * On all machines we have so far, the stack grows
1806 * If the user expects no more than N bytes of
1807 * argument list, use that as a guess for the
1814 if (where
== USER_ADDR_NULL
) {
1815 /* caller only wants to know length of proc args data */
1819 size
= p
->p_argslen
;
1821 size
+= sizeof(int);
1825 * old PROCARGS will return the executable's path and plus some
1826 * extra space for work alignment and data tags
1828 size
+= PATH_MAX
+ (6 * sizeof(int));
1830 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1835 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
1836 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
1839 if ((u_int
)arg_size
> p
->p_argslen
)
1840 arg_size
= round_page(p
->p_argslen
);
1842 arg_addr
= p
->user_stack
- arg_size
;
1846 * Before we can block (any VM code), make another
1847 * reference to the map to keep it alive. We do
1848 * that by getting a reference on the task itself.
1855 * Once we have a task reference we can convert that into a
1856 * map reference, which we will use in the calls below. The
1857 * task/process may change its map after we take this reference
1858 * (see execve), but the worst that will happen then is a return
1859 * of stale info (which is always a possibility).
1861 task_reference(task
);
1862 proc_map
= get_task_map_reference(task
);
1863 task_deallocate(task
);
1864 if (proc_map
== NULL
)
1868 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1869 if (ret
!= KERN_SUCCESS
) {
1870 vm_map_deallocate(proc_map
);
1874 copy_end
= round_page(copy_start
+ arg_size
);
1876 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1877 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1878 vm_map_deallocate(proc_map
);
1879 kmem_free(kernel_map
, copy_start
,
1880 round_page(arg_size
));
1885 * Now that we've done the copyin from the process'
1886 * map, we can release the reference to it.
1888 vm_map_deallocate(proc_map
);
1890 if( vm_map_copy_overwrite(kernel_map
,
1891 (vm_map_address_t
)copy_start
,
1892 tmp
, FALSE
) != KERN_SUCCESS
) {
1893 kmem_free(kernel_map
, copy_start
,
1894 round_page(arg_size
));
1898 if (arg_size
> p
->p_argslen
) {
1899 data
= (caddr_t
) (copy_end
- p
->p_argslen
);
1900 size
= p
->p_argslen
;
1902 data
= (caddr_t
) (copy_end
- arg_size
);
1907 /* Put processes argc as the first word in the copyout buffer */
1908 suword(where
, p
->p_argc
);
1909 error
= copyout(data
, (where
+ sizeof(int)), size
);
1910 size
+= sizeof(int);
1912 error
= copyout(data
, where
, size
);
1915 * Make the old PROCARGS work to return the executable's path
1916 * But, only if there is enough space in the provided buffer
1918 * on entry: data [possibily] points to the beginning of the path
1920 * Note: we keep all pointers&sizes aligned to word boundries
1922 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> p
->p_argslen
) )
1924 int binPath_sz
, alignedBinPath_sz
= 0;
1925 int extraSpaceNeeded
, addThis
;
1926 user_addr_t placeHere
;
1927 char * str
= (char *) data
;
1930 /* Some apps are really bad about messing up their stacks
1931 So, we have to be extra careful about getting the length
1932 of the executing binary. If we encounter an error, we bail.
1935 /* Limit ourselves to PATH_MAX paths */
1936 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1940 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1943 /* If we have a NUL terminator, copy it, too */
1944 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1946 /* Pre-Flight the space requiremnts */
1948 /* Account for the padding that fills out binPath to the next word */
1949 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1951 placeHere
= where
+ size
;
1953 /* Account for the bytes needed to keep placeHere word aligned */
1954 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1956 /* Add up all the space that is needed */
1957 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1959 /* is there is room to tack on argv[0]? */
1960 if ( (buflen
& ~(sizeof(int)-1)) >= ( p
->p_argslen
+ extraSpaceNeeded
))
1962 placeHere
+= addThis
;
1963 suword(placeHere
, 0);
1964 placeHere
+= sizeof(int);
1965 suword(placeHere
, 0xBFFF0000);
1966 placeHere
+= sizeof(int);
1967 suword(placeHere
, 0);
1968 placeHere
+= sizeof(int);
1969 error
= copyout(data
, placeHere
, binPath_sz
);
1972 placeHere
+= binPath_sz
;
1973 suword(placeHere
, 0);
1974 size
+= extraSpaceNeeded
;
1980 if (copy_start
!= (vm_offset_t
) 0) {
1981 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1987 if (where
!= USER_ADDR_NULL
)
1994 * Validate parameters and get old / set new parameters
1995 * for max number of concurrent aio requests. Makes sure
1996 * the system wide limit is greater than the per process
2000 sysctl_aiomax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
2005 if ( oldp
&& *oldlenp
< sizeof(int) )
2007 if ( newp
&& newlen
!= sizeof(int) )
2010 *oldlenp
= sizeof(int);
2012 error
= copyout( &aio_max_requests
, oldp
, sizeof(int) );
2013 if ( error
== 0 && newp
)
2014 error
= copyin( newp
, &new_value
, sizeof(int) );
2015 if ( error
== 0 && newp
) {
2016 if ( new_value
>= aio_max_requests_per_process
)
2017 aio_max_requests
= new_value
;
2023 } /* sysctl_aiomax */
2027 * Validate parameters and get old / set new parameters
2028 * for max number of concurrent aio requests per process.
2029 * Makes sure per process limit is less than the system wide
2033 sysctl_aioprocmax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
2038 if ( oldp
&& *oldlenp
< sizeof(int) )
2040 if ( newp
&& newlen
!= sizeof(int) )
2043 *oldlenp
= sizeof(int);
2045 error
= copyout( &aio_max_requests_per_process
, oldp
, sizeof(int) );
2046 if ( error
== 0 && newp
)
2047 error
= copyin( newp
, &new_value
, sizeof(int) );
2048 if ( error
== 0 && newp
) {
2049 if ( new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
2050 aio_max_requests_per_process
= new_value
;
2056 } /* sysctl_aioprocmax */
2060 * Validate parameters and get old / set new parameters
2061 * for max number of async IO worker threads.
2062 * We only allow an increase in the number of worker threads.
2065 sysctl_aiothreads(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
2070 if ( oldp
&& *oldlenp
< sizeof(int) )
2072 if ( newp
&& newlen
!= sizeof(int) )
2075 *oldlenp
= sizeof(int);
2077 error
= copyout( &aio_worker_threads
, oldp
, sizeof(int) );
2078 if ( error
== 0 && newp
)
2079 error
= copyin( newp
, &new_value
, sizeof(int) );
2080 if ( error
== 0 && newp
) {
2081 if (new_value
> aio_worker_threads
) {
2082 _aio_create_worker_threads( (new_value
- aio_worker_threads
) );
2083 aio_worker_threads
= new_value
;
2090 } /* sysctl_aiothreads */
2094 * Validate parameters and get old / set new parameters
2095 * for max number of processes per UID.
2096 * Makes sure per UID limit is less than the system wide limit.
2099 sysctl_maxprocperuid(user_addr_t oldp
, size_t *oldlenp
,
2100 user_addr_t newp
, size_t newlen
)
2105 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2107 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2110 *oldlenp
= sizeof(int);
2111 if ( oldp
!= USER_ADDR_NULL
)
2112 error
= copyout( &maxprocperuid
, oldp
, sizeof(int) );
2113 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2114 error
= copyin( newp
, &new_value
, sizeof(int) );
2116 AUDIT_ARG(value
, new_value
);
2117 if ( new_value
<= maxproc
&& new_value
> 0 )
2118 maxprocperuid
= new_value
;
2127 } /* sysctl_maxprocperuid */
2131 * Validate parameters and get old / set new parameters
2132 * for max number of files per process.
2133 * Makes sure per process limit is less than the system-wide limit.
2136 sysctl_maxfilesperproc(user_addr_t oldp
, size_t *oldlenp
,
2137 user_addr_t newp
, size_t newlen
)
2142 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2144 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2147 *oldlenp
= sizeof(int);
2148 if ( oldp
!= USER_ADDR_NULL
)
2149 error
= copyout( &maxfilesperproc
, oldp
, sizeof(int) );
2150 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2151 error
= copyin( newp
, &new_value
, sizeof(int) );
2153 AUDIT_ARG(value
, new_value
);
2154 if ( new_value
< maxfiles
&& new_value
> 0 )
2155 maxfilesperproc
= new_value
;
2164 } /* sysctl_maxfilesperproc */
2168 * Validate parameters and get old / set new parameters
2169 * for the system-wide limit on the max number of processes.
2170 * Makes sure the system-wide limit is less than the configured hard
2171 * limit set at kernel compilation.
2174 sysctl_maxproc(user_addr_t oldp
, size_t *oldlenp
,
2175 user_addr_t newp
, size_t newlen
)
2180 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2182 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2185 *oldlenp
= sizeof(int);
2186 if ( oldp
!= USER_ADDR_NULL
)
2187 error
= copyout( &maxproc
, oldp
, sizeof(int) );
2188 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2189 error
= copyin( newp
, &new_value
, sizeof(int) );
2191 AUDIT_ARG(value
, new_value
);
2192 if ( new_value
<= hard_maxproc
&& new_value
> 0 )
2193 maxproc
= new_value
;
2202 } /* sysctl_maxproc */
2206 sysctl_sysctl_exec_affinity SYSCTL_HANDLER_ARGS
2208 struct proc
*cur_proc
= req
->p
;
2211 if (req
->oldptr
!= USER_ADDR_NULL
) {
2212 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
2213 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
2217 if (req
->newptr
!= USER_ADDR_NULL
) {
2218 cpu_type_t newcputype
;
2219 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
2221 if (newcputype
== CPU_TYPE_I386
)
2222 cur_proc
->p_flag
&= ~P_AFFINITY
;
2223 else if (newcputype
== CPU_TYPE_POWERPC
)
2224 cur_proc
->p_flag
|= P_AFFINITY
;
2231 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
2235 fetch_process_cputype(
2236 struct proc
*cur_proc
,
2239 cpu_type_t
*cputype
)
2241 struct proc
*p
= NULL
;
2246 else if (namelen
== 1) {
2250 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
2251 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
2258 if (p
->p_flag
& P_TRANSLATED
) {
2259 ret
= CPU_TYPE_POWERPC
;
2265 if (IS_64BIT_PROCESS(p
))
2266 ret
|= CPU_ARCH_ABI64
;
2274 sysctl_sysctl_native SYSCTL_HANDLER_ARGS
2277 cpu_type_t proc_cputype
= 0;
2278 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2281 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2283 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2285 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2288 sysctl_sysctl_cputype SYSCTL_HANDLER_ARGS
2291 cpu_type_t proc_cputype
= 0;
2292 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2294 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2296 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");