2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/proc_internal.h>
76 #include <sys/kauth.h>
77 #include <sys/file_internal.h>
78 #include <sys/vnode_internal.h>
79 #include <sys/unistd.h>
81 #include <sys/ioctl.h>
82 #include <sys/namei.h>
84 #include <sys/disklabel.h>
86 #include <sys/sysctl.h>
88 #include <sys/aio_kern.h>
90 #include <bsm/audit_kernel.h>
92 #include <mach/machine.h>
93 #include <mach/mach_types.h>
94 #include <mach/vm_param.h>
95 #include <kern/task.h>
96 #include <kern/lock.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_map.h>
99 #include <mach/host_info.h>
101 extern vm_map_t bsd_pageable_map
;
103 #include <sys/mount_internal.h>
104 #include <sys/kdebug.h>
105 #include <sys/sysproto.h>
107 #include <IOKit/IOPlatformExpert.h>
108 #include <pexpert/pexpert.h>
110 #include <machine/machine_routines.h>
111 #include <machine/exec.h>
113 #include <vm/vm_protos.h>
116 #include <i386/cpuid.h>
119 sysctlfn kern_sysctl
;
121 sysctlfn debug_sysctl
;
123 extern sysctlfn vm_sysctl
;
124 extern sysctlfn vfs_sysctl
;
125 extern sysctlfn net_sysctl
;
126 extern sysctlfn cpu_sysctl
;
127 extern int aio_max_requests
;
128 extern int aio_max_requests_per_process
;
129 extern int aio_worker_threads
;
130 extern int maxfilesperproc
;
131 extern int lowpri_IO_window_msecs
;
132 extern int lowpri_IO_delay_msecs
;
133 extern int nx_enabled
;
136 fill_eproc(struct proc
*p
, struct eproc
*ep
);
138 fill_externproc(struct proc
*p
, struct extern_proc
*exp
);
140 fill_user_eproc(struct proc
*p
, struct user_eproc
*ep
);
142 fill_user_proc(struct proc
*p
, struct user_kinfo_proc
*kp
);
144 fill_user_externproc(struct proc
*p
, struct user_extern_proc
*exp
);
146 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
148 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, struct proc
*p
);
154 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
156 __private_extern__ kern_return_t
157 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
159 resize_namecache(u_int newsize
);
161 sysctl_aiomax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
163 sysctl_aioprocmax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
165 sysctl_aiothreads(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
167 sysctl_clockrate(user_addr_t where
, size_t *sizep
);
169 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
171 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
172 user_addr_t newp
, size_t newlen
);
174 sysctl_file(user_addr_t where
, size_t *sizep
);
176 fill_proc(struct proc
*p
, struct kinfo_proc
*kp
);
178 sysctl_maxfilesperproc(user_addr_t oldp
, size_t *oldlenp
,
179 user_addr_t newp
, size_t newlen
);
181 sysctl_maxprocperuid(user_addr_t oldp
, size_t *oldlenp
,
182 user_addr_t newp
, size_t newlen
);
184 sysctl_maxproc(user_addr_t oldp
, size_t *oldlenp
,
185 user_addr_t newp
, size_t newlen
);
187 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
188 size_t *sizep
, struct proc
*cur_proc
);
190 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
191 struct proc
*cur_proc
);
193 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
194 struct proc
*cur_proc
, int argc_yes
);
196 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
197 size_t newlen
, void *sp
, int len
);
199 sysctl_vnode(user_addr_t where
, size_t *sizep
);
203 * temporary location for vm_sysctl. This should be machine independant
206 extern uint32_t mach_factor
[3];
209 loadavg32to64(struct loadavg
*la32
, struct user_loadavg
*la64
)
211 la64
->ldavg
[0] = la32
->ldavg
[0];
212 la64
->ldavg
[1] = la32
->ldavg
[1];
213 la64
->ldavg
[2] = la32
->ldavg
[2];
214 la64
->fscale
= (user_long_t
)la32
->fscale
;
218 vm_sysctl(int *name
, __unused u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
219 user_addr_t newp
, size_t newlen
, __unused
struct proc
*p
)
221 struct loadavg loadinfo
;
225 if (proc_is64bit(p
)) {
226 struct user_loadavg loadinfo64
;
227 loadavg32to64(&averunnable
, &loadinfo64
);
228 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
229 &loadinfo64
, sizeof(loadinfo64
)));
231 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
232 &averunnable
, sizeof(struct loadavg
)));
235 loadinfo
.ldavg
[0] = mach_factor
[0];
236 loadinfo
.ldavg
[1] = mach_factor
[1];
237 loadinfo
.ldavg
[2] = mach_factor
[2];
238 loadinfo
.fscale
= LSCALE
;
239 if (proc_is64bit(p
)) {
240 struct user_loadavg loadinfo64
;
241 loadavg32to64(&loadinfo
, &loadinfo64
);
242 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
243 &loadinfo64
, sizeof(loadinfo64
)));
245 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
246 &loadinfo
, sizeof(struct loadavg
)));
252 uint32_t swap_pagesize
;
253 boolean_t swap_encrypted
;
254 struct xsw_usage xsu
;
256 error
= macx_swapinfo(&swap_total
,
263 xsu
.xsu_total
= swap_total
;
264 xsu
.xsu_avail
= swap_avail
;
265 xsu
.xsu_used
= swap_total
- swap_avail
;
266 xsu
.xsu_pagesize
= swap_pagesize
;
267 xsu
.xsu_encrypted
= swap_encrypted
;
268 return sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
269 &xsu
, sizeof (struct xsw_usage
));
285 static struct sysctl_lock
{
292 __sysctl(struct proc
*p
, struct __sysctl_args
*uap
, __unused register_t
*retval
)
294 int error
, dolock
= 1;
295 size_t savelen
= 0, oldlen
= 0, newlen
;
296 sysctlfn
*fnp
= NULL
;
297 int name
[CTL_MAXNAME
];
302 * all top-level sysctl names are non-terminal
304 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
306 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
310 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
312 if (proc_is64bit(p
)) {
313 /* uap->newlen is a size_t value which grows to 64 bits
314 * when coming from a 64-bit process. since it's doubtful we'll
315 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
317 newlen
= CAST_DOWN(size_t, uap
->newlen
);
320 newlen
= uap
->newlen
;
323 /* CTL_UNSPEC is used to get oid to AUTO_OID */
324 if (uap
->new != USER_ADDR_NULL
325 && ((name
[0] == CTL_KERN
326 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
327 name
[1] == KERN_PROC_LOW_PRI_IO
|| name
[1] == KERN_PROCNAME
|| name
[1] == KERN_THALTSTACK
))
328 || (name
[0] == CTL_HW
)
329 || (name
[0] == CTL_VM
)
330 || (name
[0] == CTL_VFS
))
331 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
337 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
338 && (name
[1] != KERN_PROC
))
357 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
358 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
360 oldlen
= CAST_DOWN(size_t, oldlen64
);
362 * If more than 4G, clamp to 4G - useracc() below will catch
363 * with an EFAULT, if it's actually necessary.
365 if (oldlen64
> 0x00000000ffffffffULL
)
366 oldlen
= 0xffffffffUL
;
369 if (uap
->old
!= USER_ADDR_NULL
) {
370 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
373 /* The pc sampling mechanism does not need to take this lock */
374 if ((name
[1] != KERN_PCSAMPLES
) &&
375 (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)))) {
376 while (memlock
.sl_lock
) {
378 sleep((caddr_t
)&memlock
, PRIBIO
+1);
384 if (dolock
&& oldlen
&&
385 (error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
386 if ((name
[1] != KERN_PCSAMPLES
) &&
387 (! ((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)))) {
389 if (memlock
.sl_want
) {
391 wakeup((caddr_t
)&memlock
);
400 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
401 &oldlen
, uap
->new, newlen
, p
);
406 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
408 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
409 1, uap
->new, newlen
, &oldlen
);
412 if (uap
->old
!= USER_ADDR_NULL
) {
413 if (dolock
&& savelen
) {
414 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
415 if (!error
&& error1
)
418 if (name
[1] != KERN_PCSAMPLES
) {
420 if (memlock
.sl_want
) {
422 wakeup((caddr_t
)&memlock
);
426 if ((error
) && (error
!= ENOMEM
))
429 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
430 i
= suulong(uap
->oldlenp
, oldlen
);
439 * Attributes stored in the kernel.
441 __private_extern__
char corefilename
[MAXPATHLEN
+1];
442 __private_extern__
int do_coredump
;
443 __private_extern__
int sugid_coredump
;
447 int securelevel
= -1;
459 __unused
size_t newSize
,
460 struct proc
*cur_proc
)
465 if (name
[0] == 0 && 1 == namelen
) {
466 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
467 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
468 } else if (name
[0] == 1 && 2 == namelen
) {
470 cur_proc
->p_flag
&= ~P_AFFINITY
;
472 cur_proc
->p_flag
|= P_AFFINITY
;
487 __unused
size_t newSize
,
488 struct proc
*cur_proc
)
499 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
500 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
503 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
504 (p
->p_flag
& P_TRANSLATED
) ? 1 : 0);
508 set_archhandler(struct proc
*p
, int arch
)
512 struct vnode_attr va
;
513 struct vfs_context context
;
517 case CPU_TYPE_POWERPC
:
518 archhandler
= exec_archhandler_ppc
.path
;
525 context
.vc_ucred
= kauth_cred_get();
527 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
,
528 CAST_USER_ADDR_T(archhandler
), &context
);
534 /* Check mount point */
535 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
536 (nd
.ni_vp
->v_type
!= VREG
)) {
542 VATTR_WANTED(&va
, va_fsid
);
543 VATTR_WANTED(&va
, va_fileid
);
544 error
= vnode_getattr(nd
.ni_vp
, &va
, &context
);
551 exec_archhandler_ppc
.fsid
= va
.va_fsid
;
552 exec_archhandler_ppc
.fileid
= (u_long
)va
.va_fileid
;
557 sysctl_exec_archhandler_ppc(
559 __unused u_int namelen
,
569 struct vnode_attr va
;
570 char handler
[sizeof(exec_archhandler_ppc
.path
)];
571 struct vfs_context context
;
574 context
.vc_ucred
= kauth_cred_get();
577 len
= strlen(exec_archhandler_ppc
.path
) + 1;
581 error
= copyout(exec_archhandler_ppc
.path
, oldBuf
, len
);
588 error
= suser(context
.vc_ucred
, &p
->p_acflag
);
591 if (newSize
>= sizeof(exec_archhandler_ppc
.path
))
592 return (ENAMETOOLONG
);
593 error
= copyin(newBuf
, handler
, newSize
);
596 handler
[newSize
] = 0;
597 strcpy(exec_archhandler_ppc
.path
, handler
);
598 error
= set_archhandler(p
, CPU_TYPE_POWERPC
);
605 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
, 0, "");
607 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
, 0, "");
609 SYSCTL_STRING(_kern_exec_archhandler
, OID_AUTO
, powerpc
, CTLFLAG_RD
,
610 exec_archhandler_ppc
.path
, 0, "");
612 extern int get_kernel_symfile( struct proc
*, char **);
613 __private_extern__
int
614 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
615 size_t, struct proc
*);
618 * kernel related system variables.
621 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
622 user_addr_t newp
, size_t newlen
, struct proc
*p
)
624 int error
, level
, inthostid
, tmp
;
625 unsigned int oldval
=0;
627 /* all sysctl names not listed below are terminal at this level */
629 && !(name
[0] == KERN_PROC
630 || name
[0] == KERN_PROF
631 || name
[0] == KERN_KDEBUG
632 || name
[0] == KERN_PROCARGS
633 || name
[0] == KERN_PROCARGS2
634 || name
[0] == KERN_PCSAMPLES
635 || name
[0] == KERN_IPC
636 || name
[0] == KERN_SYSV
637 || name
[0] == KERN_AFFINITY
638 || name
[0] == KERN_TRANSLATE
639 || name
[0] == KERN_EXEC
640 || name
[0] == KERN_PANICINFO
641 || name
[0] == KERN_POSIX
642 || name
[0] == KERN_TFP
)
644 return (ENOTDIR
); /* overloaded */
648 return (sysctl_rdstring(oldp
, oldlenp
, newp
, ostype
));
650 return (sysctl_rdstring(oldp
, oldlenp
, newp
, osrelease
));
652 return (sysctl_rdint(oldp
, oldlenp
, newp
, BSD
));
654 return (sysctl_rdstring(oldp
, oldlenp
, newp
, version
));
656 oldval
= desiredvnodes
;
657 error
= sysctl_int(oldp
, oldlenp
, newp
,
658 newlen
, &desiredvnodes
);
659 reset_vmobjectcache(oldval
, desiredvnodes
);
660 resize_namecache(desiredvnodes
);
663 return (sysctl_maxproc(oldp
, oldlenp
, newp
, newlen
));
665 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, &maxfiles
));
666 case KERN_MAXPROCPERUID
:
667 return( sysctl_maxprocperuid( oldp
, oldlenp
, newp
, newlen
) );
668 case KERN_MAXFILESPERPROC
:
669 return( sysctl_maxfilesperproc( oldp
, oldlenp
, newp
, newlen
) );
671 return (sysctl_rdint(oldp
, oldlenp
, newp
, ARG_MAX
));
674 if ((error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &level
)) ||
675 newp
== USER_ADDR_NULL
)
677 if (level
< securelevel
&& p
->p_pid
!= 1)
682 error
= sysctl_trstring(oldp
, oldlenp
, newp
, newlen
,
683 hostname
, sizeof(hostname
));
685 hostnamelen
= newlen
;
687 case KERN_DOMAINNAME
:
688 error
= sysctl_string(oldp
, oldlenp
, newp
, newlen
,
689 domainname
, sizeof(domainname
));
691 domainnamelen
= newlen
;
694 inthostid
= hostid
; /* XXX assumes sizeof long <= sizeof int */
695 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &inthostid
);
699 return (sysctl_clockrate(oldp
, oldlenp
));
704 t
.tv_sec
= boottime_sec();
707 return (sysctl_rdstruct(oldp
, oldlenp
, newp
, &t
,
708 sizeof(struct timeval
)));
711 return (sysctl_vnode(oldp
, oldlenp
));
713 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
715 return (sysctl_file(oldp
, oldlenp
));
718 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
722 return (sysctl_rdint(oldp
, oldlenp
, newp
, _POSIX_VERSION
));
724 return (sysctl_rdint(oldp
, oldlenp
, newp
, NGROUPS_MAX
));
725 case KERN_JOB_CONTROL
:
726 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
728 #ifdef _POSIX_SAVED_IDS
729 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
731 return (sysctl_rdint(oldp
, oldlenp
, newp
, 0));
734 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
736 return (pcsamples_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
738 /* new one as it does not use kinfo_proc */
739 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
741 /* new one as it does not use kinfo_proc */
742 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
744 error
= get_kernel_symfile( p
, &str
);
747 return (sysctl_rdstring(oldp
, oldlenp
, newp
, str
));
750 return (sysctl_rdint(oldp
, oldlenp
, newp
, netboot_root()));
753 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
756 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
759 return sysctl_translate(name
+1, namelen
-1, oldp
, oldlenp
, newp
,
761 case KERN_CLASSICHANDLER
:
762 return sysctl_exec_archhandler_ppc(name
+1, namelen
-1, oldp
,
763 oldlenp
, newp
, newlen
, p
);
765 return( sysctl_aiomax( oldp
, oldlenp
, newp
, newlen
) );
766 case KERN_AIOPROCMAX
:
767 return( sysctl_aioprocmax( oldp
, oldlenp
, newp
, newlen
) );
768 case KERN_AIOTHREADS
:
769 return( sysctl_aiothreads( oldp
, oldlenp
, newp
, newlen
) );
771 return (sysctl_rdint(oldp
, oldlenp
, newp
, (uintptr_t)p
->user_stack
));
772 case KERN_USRSTACK64
:
773 return (sysctl_rdquad(oldp
, oldlenp
, newp
, p
->user_stack
));
775 error
= sysctl_string(oldp
, oldlenp
, newp
, newlen
,
776 corefilename
, sizeof(corefilename
));
780 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &do_coredump
);
781 if (!error
&& ((do_coredump
< 0) || (do_coredump
> 1))) {
786 case KERN_SUGID_COREDUMP
:
787 tmp
= sugid_coredump
;
788 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &sugid_coredump
);
789 if (!error
&& ((sugid_coredump
< 0) || (sugid_coredump
> 1))) {
790 sugid_coredump
= tmp
;
794 case KERN_PROCDELAYTERM
:
796 int old_value
, new_value
;
799 if (oldp
&& *oldlenp
< sizeof(int))
801 if ( newp
&& newlen
!= sizeof(int) )
803 *oldlenp
= sizeof(int);
804 old_value
= (p
->p_lflag
& P_LDELAYTERM
)? 1: 0;
805 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
807 if (error
== 0 && newp
)
808 error
= copyin( newp
, &new_value
, sizeof(int) );
809 if (error
== 0 && newp
) {
811 p
->p_lflag
|= P_LDELAYTERM
;
813 p
->p_lflag
&= ~P_LDELAYTERM
;
817 case KERN_PROC_LOW_PRI_IO
:
819 int old_value
, new_value
;
822 if (oldp
&& *oldlenp
< sizeof(int))
824 if ( newp
&& newlen
!= sizeof(int) )
826 *oldlenp
= sizeof(int);
828 old_value
= (p
->p_lflag
& P_LLOW_PRI_IO
)? 0x01: 0;
829 if (p
->p_lflag
& P_LBACKGROUND_IO
)
832 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
834 if (error
== 0 && newp
)
835 error
= copyin( newp
, &new_value
, sizeof(int) );
836 if (error
== 0 && newp
) {
837 if (new_value
& 0x01)
838 p
->p_lflag
|= P_LLOW_PRI_IO
;
839 else if (new_value
& 0x02)
840 p
->p_lflag
|= P_LBACKGROUND_IO
;
841 else if (new_value
== 0)
842 p
->p_lflag
&= ~(P_LLOW_PRI_IO
| P_LBACKGROUND_IO
);
846 case KERN_LOW_PRI_WINDOW
:
848 int old_value
, new_value
;
851 if (oldp
&& *oldlenp
< sizeof(old_value
) )
853 if ( newp
&& newlen
!= sizeof(new_value
) )
855 *oldlenp
= sizeof(old_value
);
857 old_value
= lowpri_IO_window_msecs
;
859 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
861 if (error
== 0 && newp
)
862 error
= copyin( newp
, &new_value
, sizeof(newlen
) );
863 if (error
== 0 && newp
) {
864 lowpri_IO_window_msecs
= new_value
;
868 case KERN_LOW_PRI_DELAY
:
870 int old_value
, new_value
;
873 if (oldp
&& *oldlenp
< sizeof(old_value
) )
875 if ( newp
&& newlen
!= sizeof(new_value
) )
877 *oldlenp
= sizeof(old_value
);
879 old_value
= lowpri_IO_delay_msecs
;
881 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
883 if (error
== 0 && newp
)
884 error
= copyin( newp
, &new_value
, sizeof(newlen
) );
885 if (error
== 0 && newp
) {
886 lowpri_IO_delay_msecs
= new_value
;
890 case KERN_NX_PROTECTION
:
892 int old_value
, new_value
;
895 if (oldp
&& *oldlenp
< sizeof(old_value
) )
897 if ( newp
&& newlen
!= sizeof(new_value
) )
899 *oldlenp
= sizeof(old_value
);
901 old_value
= nx_enabled
;
903 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
907 * Only allow setting if NX is supported on the chip
909 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD
) {
911 if (error
== 0 && newp
)
912 error
= copyin(newp
, &new_value
,
914 if (error
== 0 && newp
)
915 nx_enabled
= new_value
;
923 case KERN_SHREG_PRIVATIZABLE
:
924 /* this kernel does implement shared_region_make_private_np() */
925 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
927 error
= sysctl_trstring(oldp
, oldlenp
, newp
, newlen
,
928 &p
->p_name
[0], (2*MAXCOMLEN
+1));
930 case KERN_THALTSTACK
:
932 int old_value
, new_value
;
935 if (oldp
&& *oldlenp
< sizeof(int))
937 if ( newp
&& newlen
!= sizeof(int) )
939 *oldlenp
= sizeof(int);
940 old_value
= (p
->p_lflag
& P_LTHSIGSTACK
)? 1: 0;
941 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
943 if (error
== 0 && newp
)
944 error
= copyin( newp
, &new_value
, sizeof(int) );
945 if (error
== 0 && newp
) {
947 /* we cannot swich midstream if inuse */
948 if ((p
->p_sigacts
->ps_flags
& SAS_ALTSTACK
) == SAS_ALTSTACK
)
950 p
->p_lflag
|= P_LTHSIGSTACK
;
952 /* we cannot swich midstream */
953 if ((p
->p_lflag
& P_LTHSIGSTACK
) == P_LTHSIGSTACK
)
955 p
->p_lflag
&= ~P_LTHSIGSTACK
;
968 * Debugging related system variables.
972 #endif /* DIAGNOSTIC */
973 struct ctldebug debug0
, debug1
;
974 struct ctldebug debug2
, debug3
, debug4
;
975 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
976 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
977 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
978 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
979 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
980 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
981 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
982 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
985 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
986 user_addr_t newp
, size_t newlen
, struct proc
*p
)
988 struct ctldebug
*cdp
;
990 /* all sysctl names at this level are name and field */
992 return (ENOTDIR
); /* overloaded */
993 cdp
= debugvars
[name
[0]];
994 if (cdp
->debugname
== 0)
998 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
999 case CTL_DEBUG_VALUE
:
1000 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
1009 * Validate parameters and get old / set new parameters
1010 * for an integer-valued sysctl function.
1013 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
1014 user_addr_t newp
, size_t newlen
, int *valp
)
1018 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1020 if (oldp
&& *oldlenp
< sizeof(int))
1022 if (newp
&& newlen
!= sizeof(int))
1024 *oldlenp
= sizeof(int);
1026 error
= copyout(valp
, oldp
, sizeof(int));
1027 if (error
== 0 && newp
) {
1028 error
= copyin(newp
, valp
, sizeof(int));
1029 AUDIT_ARG(value
, *valp
);
1035 * As above, but read-only.
1038 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
1042 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1044 if (oldp
&& *oldlenp
< sizeof(int))
1048 *oldlenp
= sizeof(int);
1050 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
1055 * Validate parameters and get old / set new parameters
1056 * for an quad(64bit)-valued sysctl function.
1059 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
1060 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
1064 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1066 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1068 if (newp
&& newlen
!= sizeof(quad_t
))
1070 *oldlenp
= sizeof(quad_t
);
1072 error
= copyout(valp
, oldp
, sizeof(quad_t
));
1073 if (error
== 0 && newp
)
1074 error
= copyin(newp
, valp
, sizeof(quad_t
));
1079 * As above, but read-only.
1082 sysctl_rdquad(oldp
, oldlenp
, newp
, val
)
1090 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1092 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1096 *oldlenp
= sizeof(quad_t
);
1098 error
= copyout((caddr_t
)&val
, CAST_USER_ADDR_T(oldp
), sizeof(quad_t
));
1103 * Validate parameters and get old / set new parameters
1104 * for a string-valued sysctl function. Unlike sysctl_string, if you
1105 * give it a too small (but larger than 0 bytes) buffer, instead of
1106 * returning ENOMEM, it truncates the returned string to the buffer
1107 * size. This preserves the semantics of some library routines
1108 * implemented via sysctl, which truncate their returned data, rather
1109 * than simply returning an error. The returned string is always NUL
1113 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
1114 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1116 int len
, copylen
, error
= 0;
1118 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1120 copylen
= len
= strlen(str
) + 1;
1121 if (oldp
&& (len
< 0 || *oldlenp
< 1))
1123 if (oldp
&& (*oldlenp
< (size_t)len
))
1124 copylen
= *oldlenp
+ 1;
1125 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1127 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
1129 error
= copyout(str
, oldp
, copylen
);
1131 unsigned char c
= 0;
1134 error
= copyout((void *)&c
, oldp
, sizeof(char));
1137 if (error
== 0 && newp
) {
1138 error
= copyin(newp
, str
, newlen
);
1140 AUDIT_ARG(text
, (char *)str
);
1146 * Validate parameters and get old / set new parameters
1147 * for a string-valued sysctl function.
1150 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
1151 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1155 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1157 len
= strlen(str
) + 1;
1158 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1160 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1162 *oldlenp
= len
-1; /* deal with NULL strings correctly */
1164 error
= copyout(str
, oldp
, len
);
1166 if (error
== 0 && newp
) {
1167 error
= copyin(newp
, str
, newlen
);
1169 AUDIT_ARG(text
, (char *)str
);
1175 * As above, but read-only.
1178 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
1179 user_addr_t newp
, char *str
)
1183 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1185 len
= strlen(str
) + 1;
1186 if (oldp
&& *oldlenp
< (size_t)len
)
1192 error
= copyout(str
, oldp
, len
);
1197 * Validate parameters and get old / set new parameters
1198 * for a structure oriented sysctl function.
1201 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
1202 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
1206 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1208 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1210 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
1214 error
= copyout(sp
, oldp
, len
);
1216 if (error
== 0 && newp
)
1217 error
= copyin(newp
, sp
, len
);
1222 * Validate parameters and get old parameters
1223 * for a structure oriented sysctl function.
1226 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
1227 user_addr_t newp
, void *sp
, int len
)
1231 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1233 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1239 error
= copyout(sp
, oldp
, len
);
1244 * Get file structures.
1247 sysctl_file(user_addr_t where
, size_t *sizep
)
1250 struct fileglob
*fg
;
1251 user_addr_t start
= where
;
1252 struct extern_file nef
;
1255 if (where
== USER_ADDR_NULL
) {
1257 * overestimate by 10 files
1259 *sizep
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1264 * first copyout filehead
1266 if (buflen
< 0 || (size_t)buflen
< sizeof(filehead
)) {
1270 error
= copyout((caddr_t
)&filehead
, where
, sizeof(filehead
));
1273 buflen
-= sizeof(filehead
);
1274 where
+= sizeof(filehead
);
1277 * followed by an array of file structures
1279 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1280 if (buflen
< 0 || (size_t)buflen
< sizeof(struct extern_file
)) {
1281 *sizep
= where
- start
;
1284 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1285 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1286 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1287 nef
.f_type
= fg
->fg_type
;
1288 nef
.f_count
= fg
->fg_count
;
1289 nef
.f_msgcount
= fg
->fg_msgcount
;
1290 nef
.f_cred
= fg
->fg_cred
;
1291 nef
.f_ops
= fg
->fg_ops
;
1292 nef
.f_offset
= fg
->fg_offset
;
1293 nef
.f_data
= fg
->fg_data
;
1294 error
= copyout((caddr_t
)&nef
, where
, sizeof (struct extern_file
));
1297 buflen
-= sizeof(struct extern_file
);
1298 where
+= sizeof(struct extern_file
);
1300 *sizep
= where
- start
;
1305 * try over estimating by 5 procs
1307 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1310 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1313 user_addr_t dp
= where
;
1315 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1318 boolean_t is_64_bit
= FALSE
;
1319 struct kinfo_proc kproc
;
1320 struct user_kinfo_proc user_kproc
;
1324 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1326 p
= allproc
.lh_first
;
1328 is_64_bit
= proc_is64bit(current_proc());
1330 sizeof_kproc
= sizeof(user_kproc
);
1331 kprocp
= (caddr_t
) &user_kproc
;
1334 sizeof_kproc
= sizeof(kproc
);
1335 kprocp
= (caddr_t
) &kproc
;
1338 for (; p
!= 0; p
= p
->p_list
.le_next
) {
1340 * Skip embryonic processes.
1342 if (p
->p_stat
== SIDL
)
1345 * TODO - make more efficient (see notes below).
1351 /* could do this with just a lookup */
1352 if (p
->p_pid
!= (pid_t
)name
[1])
1356 case KERN_PROC_PGRP
:
1357 /* could do this by traversing pgrp */
1358 if (p
->p_pgrp
->pg_id
!= (pid_t
)name
[1])
1363 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1364 (p
->p_session
== NULL
) ||
1365 p
->p_session
->s_ttyp
== NULL
||
1366 p
->p_session
->s_ttyp
->t_dev
!= (dev_t
)name
[1])
1371 if ((p
->p_ucred
== NULL
) ||
1372 (kauth_cred_getuid(p
->p_ucred
) != (uid_t
)name
[1]))
1376 case KERN_PROC_RUID
:
1377 if ((p
->p_ucred
== NULL
) ||
1378 (p
->p_ucred
->cr_ruid
!= (uid_t
)name
[1]))
1382 if (buflen
>= sizeof_kproc
) {
1383 bzero(kprocp
, sizeof_kproc
);
1385 fill_user_proc(p
, (struct user_kinfo_proc
*) kprocp
);
1388 fill_proc(p
, (struct kinfo_proc
*) kprocp
);
1390 error
= copyout(kprocp
, dp
, sizeof_kproc
);
1394 buflen
-= sizeof_kproc
;
1396 needed
+= sizeof_kproc
;
1398 if (doingzomb
== 0) {
1399 p
= zombproc
.lh_first
;
1403 if (where
!= USER_ADDR_NULL
) {
1404 *sizep
= dp
- where
;
1405 if (needed
> *sizep
)
1408 needed
+= KERN_PROCSLOP
;
1415 * Fill in an eproc structure for the specified process.
1419 register struct proc
*p
;
1420 register struct eproc
*ep
;
1422 register struct tty
*tp
;
1426 ep
->e_sess
= p
->p_pgrp
->pg_session
;
1427 ep
->e_pgid
= p
->p_pgrp
->pg_id
;
1428 ep
->e_jobc
= p
->p_pgrp
->pg_jobc
;
1429 if (ep
->e_sess
&& ep
->e_sess
->s_ttyvp
)
1430 ep
->e_flag
= EPROC_CTTY
;
1432 ep
->e_sess
= (struct session
*)0;
1436 ep
->e_ppid
= (p
->p_pptr
) ? p
->p_pptr
->p_pid
: 0;
1437 /* Pre-zero the fake historical pcred */
1438 bzero(&ep
->e_pcred
, sizeof(struct _pcred
));
1440 /* XXX not ref-counted */
1442 /* A fake historical pcred */
1443 ep
->e_pcred
.p_ruid
= p
->p_ucred
->cr_ruid
;
1444 ep
->e_pcred
.p_svuid
= p
->p_ucred
->cr_svuid
;
1445 ep
->e_pcred
.p_rgid
= p
->p_ucred
->cr_rgid
;
1446 ep
->e_pcred
.p_svgid
= p
->p_ucred
->cr_svgid
;
1448 /* A fake historical *kauth_cred_t */
1449 ep
->e_ucred
.cr_ref
= p
->p_ucred
->cr_ref
;
1450 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(p
->p_ucred
);
1451 ep
->e_ucred
.cr_ngroups
= p
->p_ucred
->cr_ngroups
;
1452 bcopy(p
->p_ucred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1455 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1456 ep
->e_vm
.vm_tsize
= 0;
1457 ep
->e_vm
.vm_dsize
= 0;
1458 ep
->e_vm
.vm_ssize
= 0;
1460 ep
->e_vm
.vm_rssize
= 0;
1462 if ((p
->p_flag
& P_CONTROLT
) && (ep
->e_sess
) &&
1463 (tp
= ep
->e_sess
->s_ttyp
)) {
1464 ep
->e_tdev
= tp
->t_dev
;
1465 ep
->e_tpgid
= tp
->t_pgrp
? tp
->t_pgrp
->pg_id
: NO_PID
;
1466 ep
->e_tsess
= tp
->t_session
;
1471 ep
->e_flag
|= EPROC_SLEADER
;
1473 strncpy(ep
->e_wmesg
, p
->p_wmesg
, WMESGLEN
);
1474 ep
->e_xsize
= ep
->e_xrssize
= 0;
1475 ep
->e_xccount
= ep
->e_xswrss
= 0;
1479 * Fill in an LP64 version of eproc structure for the specified process.
1482 fill_user_eproc(register struct proc
*p
, register struct user_eproc
*ep
)
1484 register struct tty
*tp
;
1485 struct session
*sessionp
= NULL
;
1487 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1489 sessionp
= p
->p_pgrp
->pg_session
;
1490 ep
->e_sess
= CAST_USER_ADDR_T(sessionp
);
1491 ep
->e_pgid
= p
->p_pgrp
->pg_id
;
1492 ep
->e_jobc
= p
->p_pgrp
->pg_jobc
;
1494 if (sessionp
->s_ttyvp
)
1495 ep
->e_flag
= EPROC_CTTY
;
1498 ep
->e_sess
= USER_ADDR_NULL
;
1502 ep
->e_ppid
= (p
->p_pptr
) ? p
->p_pptr
->p_pid
: 0;
1503 /* Pre-zero the fake historical pcred */
1504 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1506 /* XXX not ref-counted */
1508 /* A fake historical pcred */
1509 ep
->e_pcred
.p_ruid
= p
->p_ucred
->cr_ruid
;
1510 ep
->e_pcred
.p_svuid
= p
->p_ucred
->cr_svuid
;
1511 ep
->e_pcred
.p_rgid
= p
->p_ucred
->cr_rgid
;
1512 ep
->e_pcred
.p_svgid
= p
->p_ucred
->cr_svgid
;
1514 /* A fake historical *kauth_cred_t */
1515 ep
->e_ucred
.cr_ref
= p
->p_ucred
->cr_ref
;
1516 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(p
->p_ucred
);
1517 ep
->e_ucred
.cr_ngroups
= p
->p_ucred
->cr_ngroups
;
1518 bcopy(p
->p_ucred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1521 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1522 ep
->e_vm
.vm_tsize
= 0;
1523 ep
->e_vm
.vm_dsize
= 0;
1524 ep
->e_vm
.vm_ssize
= 0;
1526 ep
->e_vm
.vm_rssize
= 0;
1528 if ((p
->p_flag
& P_CONTROLT
) && (sessionp
) &&
1529 (tp
= sessionp
->s_ttyp
)) {
1530 ep
->e_tdev
= tp
->t_dev
;
1531 ep
->e_tpgid
= tp
->t_pgrp
? tp
->t_pgrp
->pg_id
: NO_PID
;
1532 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1537 ep
->e_flag
|= EPROC_SLEADER
;
1539 strncpy(ep
->e_wmesg
, p
->p_wmesg
, WMESGLEN
);
1540 ep
->e_xsize
= ep
->e_xrssize
= 0;
1541 ep
->e_xccount
= ep
->e_xswrss
= 0;
1545 * Fill in an eproc structure for the specified process.
1548 fill_externproc(p
, exp
)
1549 register struct proc
*p
;
1550 register struct extern_proc
*exp
;
1552 exp
->p_forw
= exp
->p_back
= NULL
;
1554 exp
->p_starttime
= p
->p_stats
->p_start
;
1555 exp
->p_vmspace
= NULL
;
1556 exp
->p_sigacts
= p
->p_sigacts
;
1557 exp
->p_flag
= p
->p_flag
;
1558 exp
->p_stat
= p
->p_stat
;
1559 exp
->p_pid
= p
->p_pid
;
1560 exp
->p_oppid
= p
->p_oppid
;
1561 exp
->p_dupfd
= p
->p_dupfd
;
1563 exp
->user_stack
= CAST_DOWN(caddr_t
, p
->user_stack
);
1564 exp
->exit_thread
= p
->exit_thread
;
1565 exp
->p_debugger
= p
->p_debugger
;
1566 exp
->sigwait
= p
->sigwait
;
1568 exp
->p_estcpu
= p
->p_estcpu
;
1569 exp
->p_cpticks
= p
->p_cpticks
;
1570 exp
->p_pctcpu
= p
->p_pctcpu
;
1571 exp
->p_wchan
= p
->p_wchan
;
1572 exp
->p_wmesg
= p
->p_wmesg
;
1573 exp
->p_swtime
= p
->p_swtime
;
1574 exp
->p_slptime
= p
->p_slptime
;
1575 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1576 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1577 exp
->p_uticks
= p
->p_uticks
;
1578 exp
->p_sticks
= p
->p_sticks
;
1579 exp
->p_iticks
= p
->p_iticks
;
1580 exp
->p_traceflag
= p
->p_traceflag
;
1581 exp
->p_tracep
= p
->p_tracep
;
1582 exp
->p_siglist
= 0 ; /* No longer relevant */
1583 exp
->p_textvp
= p
->p_textvp
;
1584 exp
->p_holdcnt
= 0 ;
1585 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1586 exp
->p_sigignore
= p
->p_sigignore
;
1587 exp
->p_sigcatch
= p
->p_sigcatch
;
1588 exp
->p_priority
= p
->p_priority
;
1589 exp
->p_usrpri
= p
->p_usrpri
;
1590 exp
->p_nice
= p
->p_nice
;
1591 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1592 exp
->p_comm
[MAXCOMLEN
] = '\0';
1593 exp
->p_pgrp
= p
->p_pgrp
;
1595 exp
->p_xstat
= p
->p_xstat
;
1596 exp
->p_acflag
= p
->p_acflag
;
1597 exp
->p_ru
= p
->p_ru
; /* XXX may be NULL */
1601 * Fill in an LP64 version of extern_proc structure for the specified process.
1604 fill_user_externproc(register struct proc
*p
, register struct user_extern_proc
*exp
)
1606 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1608 exp
->p_starttime
.tv_sec
= p
->p_stats
->p_start
.tv_sec
;
1609 exp
->p_starttime
.tv_usec
= p
->p_stats
->p_start
.tv_usec
;
1611 exp
->p_vmspace
= USER_ADDR_NULL
;
1612 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1613 exp
->p_flag
= p
->p_flag
;
1614 exp
->p_stat
= p
->p_stat
;
1615 exp
->p_pid
= p
->p_pid
;
1616 exp
->p_oppid
= p
->p_oppid
;
1617 exp
->p_dupfd
= p
->p_dupfd
;
1619 exp
->user_stack
= p
->user_stack
;
1620 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1621 exp
->p_debugger
= p
->p_debugger
;
1622 exp
->sigwait
= p
->sigwait
;
1624 exp
->p_estcpu
= p
->p_estcpu
;
1625 exp
->p_cpticks
= p
->p_cpticks
;
1626 exp
->p_pctcpu
= p
->p_pctcpu
;
1627 exp
->p_wchan
= CAST_USER_ADDR_T(p
->p_wchan
);
1628 exp
->p_wmesg
= CAST_USER_ADDR_T(p
->p_wmesg
);
1629 exp
->p_swtime
= p
->p_swtime
;
1630 exp
->p_slptime
= p
->p_slptime
;
1631 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1632 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1633 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1634 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1635 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1636 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1637 exp
->p_uticks
= p
->p_uticks
;
1638 exp
->p_sticks
= p
->p_sticks
;
1639 exp
->p_iticks
= p
->p_iticks
;
1640 exp
->p_traceflag
= p
->p_traceflag
;
1641 exp
->p_tracep
= CAST_USER_ADDR_T(p
->p_tracep
);
1642 exp
->p_siglist
= 0 ; /* No longer relevant */
1643 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1644 exp
->p_holdcnt
= 0 ;
1645 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1646 exp
->p_sigignore
= p
->p_sigignore
;
1647 exp
->p_sigcatch
= p
->p_sigcatch
;
1648 exp
->p_priority
= p
->p_priority
;
1649 exp
->p_usrpri
= p
->p_usrpri
;
1650 exp
->p_nice
= p
->p_nice
;
1651 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1652 exp
->p_comm
[MAXCOMLEN
] = '\0';
1653 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1654 exp
->p_addr
= USER_ADDR_NULL
;
1655 exp
->p_xstat
= p
->p_xstat
;
1656 exp
->p_acflag
= p
->p_acflag
;
1657 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1662 register struct proc
*p
;
1663 register struct kinfo_proc
*kp
;
1665 fill_externproc(p
, &kp
->kp_proc
);
1666 fill_eproc(p
, &kp
->kp_eproc
);
1670 fill_user_proc(register struct proc
*p
, register struct user_kinfo_proc
*kp
)
1672 fill_user_externproc(p
, &kp
->kp_proc
);
1673 fill_user_eproc(p
, &kp
->kp_eproc
);
1677 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1678 size_t *sizep
, struct proc
*p
)
1682 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1699 case KERN_KDSETRTCDEC
:
1701 case KERN_KDGETENTROPY
:
1702 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1711 extern int pcsamples_control(int *name
, u_int namelen
, user_addr_t where
,
1715 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
,
1716 size_t *sizep
, struct proc
*p
)
1720 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1725 case KERN_PCDISABLE
:
1729 case KERN_PCREADBUF
:
1733 ret
= pcsamples_control(name
, namelen
, where
, sizep
);
1743 * Return the top *sizep bytes of the user stack, or the entire area of the
1744 * user stack down through the saved exec_path, whichever is smaller.
1747 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1748 size_t *sizep
, struct proc
*cur_proc
)
1750 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1754 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1755 size_t *sizep
, struct proc
*cur_proc
)
1757 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1761 sysctl_procargsx(int *name
, __unused u_int namelen
, user_addr_t where
,
1762 size_t *sizep
, struct proc
*cur_proc
, int argc_yes
)
1765 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1767 struct vm_map
*proc_map
;
1770 user_addr_t arg_addr
;
1774 vm_offset_t copy_start
, copy_end
;
1779 buflen
-= sizeof(int); /* reserve first word to return argc */
1781 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1782 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1783 /* is not NULL then the caller wants us to return the length needed to */
1784 /* hold the data we would return */
1785 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1791 * Lookup process by pid
1800 * Copy the top N bytes of the stack.
1801 * On all machines we have so far, the stack grows
1804 * If the user expects no more than N bytes of
1805 * argument list, use that as a guess for the
1812 if (where
== USER_ADDR_NULL
) {
1813 /* caller only wants to know length of proc args data */
1817 size
= p
->p_argslen
;
1819 size
+= sizeof(int);
1823 * old PROCARGS will return the executable's path and plus some
1824 * extra space for work alignment and data tags
1826 size
+= PATH_MAX
+ (6 * sizeof(int));
1828 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1833 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
1834 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
1837 if ((u_int
)arg_size
> p
->p_argslen
)
1838 arg_size
= round_page(p
->p_argslen
);
1840 arg_addr
= p
->user_stack
- arg_size
;
1844 * Before we can block (any VM code), make another
1845 * reference to the map to keep it alive. We do
1846 * that by getting a reference on the task itself.
1853 * Once we have a task reference we can convert that into a
1854 * map reference, which we will use in the calls below. The
1855 * task/process may change its map after we take this reference
1856 * (see execve), but the worst that will happen then is a return
1857 * of stale info (which is always a possibility).
1859 task_reference(task
);
1860 proc_map
= get_task_map_reference(task
);
1861 task_deallocate(task
);
1862 if (proc_map
== NULL
)
1866 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1867 if (ret
!= KERN_SUCCESS
) {
1868 vm_map_deallocate(proc_map
);
1872 copy_end
= round_page(copy_start
+ arg_size
);
1874 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1875 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1876 vm_map_deallocate(proc_map
);
1877 kmem_free(kernel_map
, copy_start
,
1878 round_page(arg_size
));
1883 * Now that we've done the copyin from the process'
1884 * map, we can release the reference to it.
1886 vm_map_deallocate(proc_map
);
1888 if( vm_map_copy_overwrite(kernel_map
,
1889 (vm_map_address_t
)copy_start
,
1890 tmp
, FALSE
) != KERN_SUCCESS
) {
1891 kmem_free(kernel_map
, copy_start
,
1892 round_page(arg_size
));
1896 if (arg_size
> p
->p_argslen
) {
1897 data
= (caddr_t
) (copy_end
- p
->p_argslen
);
1898 size
= p
->p_argslen
;
1900 data
= (caddr_t
) (copy_end
- arg_size
);
1905 /* Put processes argc as the first word in the copyout buffer */
1906 suword(where
, p
->p_argc
);
1907 error
= copyout(data
, (where
+ sizeof(int)), size
);
1908 size
+= sizeof(int);
1910 error
= copyout(data
, where
, size
);
1913 * Make the old PROCARGS work to return the executable's path
1914 * But, only if there is enough space in the provided buffer
1916 * on entry: data [possibily] points to the beginning of the path
1918 * Note: we keep all pointers&sizes aligned to word boundries
1920 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> p
->p_argslen
) )
1922 int binPath_sz
, alignedBinPath_sz
= 0;
1923 int extraSpaceNeeded
, addThis
;
1924 user_addr_t placeHere
;
1925 char * str
= (char *) data
;
1928 /* Some apps are really bad about messing up their stacks
1929 So, we have to be extra careful about getting the length
1930 of the executing binary. If we encounter an error, we bail.
1933 /* Limit ourselves to PATH_MAX paths */
1934 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1938 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1941 /* If we have a NUL terminator, copy it, too */
1942 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1944 /* Pre-Flight the space requiremnts */
1946 /* Account for the padding that fills out binPath to the next word */
1947 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1949 placeHere
= where
+ size
;
1951 /* Account for the bytes needed to keep placeHere word aligned */
1952 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1954 /* Add up all the space that is needed */
1955 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1957 /* is there is room to tack on argv[0]? */
1958 if ( (buflen
& ~(sizeof(int)-1)) >= ( p
->p_argslen
+ extraSpaceNeeded
))
1960 placeHere
+= addThis
;
1961 suword(placeHere
, 0);
1962 placeHere
+= sizeof(int);
1963 suword(placeHere
, 0xBFFF0000);
1964 placeHere
+= sizeof(int);
1965 suword(placeHere
, 0);
1966 placeHere
+= sizeof(int);
1967 error
= copyout(data
, placeHere
, binPath_sz
);
1970 placeHere
+= binPath_sz
;
1971 suword(placeHere
, 0);
1972 size
+= extraSpaceNeeded
;
1978 if (copy_start
!= (vm_offset_t
) 0) {
1979 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1985 if (where
!= USER_ADDR_NULL
)
1992 * Validate parameters and get old / set new parameters
1993 * for max number of concurrent aio requests. Makes sure
1994 * the system wide limit is greater than the per process
1998 sysctl_aiomax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
2003 if ( oldp
&& *oldlenp
< sizeof(int) )
2005 if ( newp
&& newlen
!= sizeof(int) )
2008 *oldlenp
= sizeof(int);
2010 error
= copyout( &aio_max_requests
, oldp
, sizeof(int) );
2011 if ( error
== 0 && newp
)
2012 error
= copyin( newp
, &new_value
, sizeof(int) );
2013 if ( error
== 0 && newp
) {
2014 if ( new_value
>= aio_max_requests_per_process
)
2015 aio_max_requests
= new_value
;
2021 } /* sysctl_aiomax */
2025 * Validate parameters and get old / set new parameters
2026 * for max number of concurrent aio requests per process.
2027 * Makes sure per process limit is less than the system wide
2031 sysctl_aioprocmax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
2036 if ( oldp
&& *oldlenp
< sizeof(int) )
2038 if ( newp
&& newlen
!= sizeof(int) )
2041 *oldlenp
= sizeof(int);
2043 error
= copyout( &aio_max_requests_per_process
, oldp
, sizeof(int) );
2044 if ( error
== 0 && newp
)
2045 error
= copyin( newp
, &new_value
, sizeof(int) );
2046 if ( error
== 0 && newp
) {
2047 if ( new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
2048 aio_max_requests_per_process
= new_value
;
2054 } /* sysctl_aioprocmax */
2058 * Validate parameters and get old / set new parameters
2059 * for max number of async IO worker threads.
2060 * We only allow an increase in the number of worker threads.
2063 sysctl_aiothreads(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
2068 if ( oldp
&& *oldlenp
< sizeof(int) )
2070 if ( newp
&& newlen
!= sizeof(int) )
2073 *oldlenp
= sizeof(int);
2075 error
= copyout( &aio_worker_threads
, oldp
, sizeof(int) );
2076 if ( error
== 0 && newp
)
2077 error
= copyin( newp
, &new_value
, sizeof(int) );
2078 if ( error
== 0 && newp
) {
2079 if (new_value
> aio_worker_threads
) {
2080 _aio_create_worker_threads( (new_value
- aio_worker_threads
) );
2081 aio_worker_threads
= new_value
;
2088 } /* sysctl_aiothreads */
2092 * Validate parameters and get old / set new parameters
2093 * for max number of processes per UID.
2094 * Makes sure per UID limit is less than the system wide limit.
2097 sysctl_maxprocperuid(user_addr_t oldp
, size_t *oldlenp
,
2098 user_addr_t newp
, size_t newlen
)
2103 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2105 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2108 *oldlenp
= sizeof(int);
2109 if ( oldp
!= USER_ADDR_NULL
)
2110 error
= copyout( &maxprocperuid
, oldp
, sizeof(int) );
2111 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2112 error
= copyin( newp
, &new_value
, sizeof(int) );
2114 AUDIT_ARG(value
, new_value
);
2115 if ( new_value
<= maxproc
&& new_value
> 0 )
2116 maxprocperuid
= new_value
;
2125 } /* sysctl_maxprocperuid */
2129 * Validate parameters and get old / set new parameters
2130 * for max number of files per process.
2131 * Makes sure per process limit is less than the system-wide limit.
2134 sysctl_maxfilesperproc(user_addr_t oldp
, size_t *oldlenp
,
2135 user_addr_t newp
, size_t newlen
)
2140 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2142 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2145 *oldlenp
= sizeof(int);
2146 if ( oldp
!= USER_ADDR_NULL
)
2147 error
= copyout( &maxfilesperproc
, oldp
, sizeof(int) );
2148 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2149 error
= copyin( newp
, &new_value
, sizeof(int) );
2151 AUDIT_ARG(value
, new_value
);
2152 if ( new_value
< maxfiles
&& new_value
> 0 )
2153 maxfilesperproc
= new_value
;
2162 } /* sysctl_maxfilesperproc */
2166 * Validate parameters and get old / set new parameters
2167 * for the system-wide limit on the max number of processes.
2168 * Makes sure the system-wide limit is less than the configured hard
2169 * limit set at kernel compilation.
2172 sysctl_maxproc(user_addr_t oldp
, size_t *oldlenp
,
2173 user_addr_t newp
, size_t newlen
)
2178 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2180 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2183 *oldlenp
= sizeof(int);
2184 if ( oldp
!= USER_ADDR_NULL
)
2185 error
= copyout( &maxproc
, oldp
, sizeof(int) );
2186 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2187 error
= copyin( newp
, &new_value
, sizeof(int) );
2189 AUDIT_ARG(value
, new_value
);
2190 if ( new_value
<= hard_maxproc
&& new_value
> 0 )
2191 maxproc
= new_value
;
2200 } /* sysctl_maxproc */
2204 sysctl_sysctl_exec_affinity SYSCTL_HANDLER_ARGS
2206 struct proc
*cur_proc
= req
->p
;
2209 if (req
->oldptr
!= USER_ADDR_NULL
) {
2210 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
2211 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
2215 if (req
->newptr
!= USER_ADDR_NULL
) {
2216 cpu_type_t newcputype
;
2217 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
2219 if (newcputype
== CPU_TYPE_I386
)
2220 cur_proc
->p_flag
&= ~P_AFFINITY
;
2221 else if (newcputype
== CPU_TYPE_POWERPC
)
2222 cur_proc
->p_flag
|= P_AFFINITY
;
2229 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
2233 fetch_process_cputype(
2234 struct proc
*cur_proc
,
2237 cpu_type_t
*cputype
)
2239 struct proc
*p
= NULL
;
2244 else if (namelen
== 1) {
2248 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
2249 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
2256 if (p
->p_flag
& P_TRANSLATED
) {
2257 ret
= CPU_TYPE_POWERPC
;
2263 if (IS_64BIT_PROCESS(p
))
2264 ret
|= CPU_ARCH_ABI64
;
2272 sysctl_sysctl_native SYSCTL_HANDLER_ARGS
2275 cpu_type_t proc_cputype
= 0;
2276 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2279 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2281 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2283 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2286 sysctl_sysctl_cputype SYSCTL_HANDLER_ARGS
2289 cpu_type_t proc_cputype
= 0;
2290 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2292 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2294 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");