2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
27 * This code is derived from software contributed to Berkeley by
28 * Mike Karels at Berkeley Software Design, Inc.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/proc_internal.h>
70 #include <sys/kauth.h>
71 #include <sys/file_internal.h>
72 #include <sys/vnode_internal.h>
73 #include <sys/unistd.h>
75 #include <sys/ioctl.h>
76 #include <sys/namei.h>
78 #include <sys/disklabel.h>
80 #include <sys/sysctl.h>
82 #include <sys/aio_kern.h>
84 #include <bsm/audit_kernel.h>
86 #include <mach/machine.h>
87 #include <mach/mach_types.h>
88 #include <mach/vm_param.h>
89 #include <kern/task.h>
90 #include <kern/lock.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_map.h>
93 #include <mach/host_info.h>
95 extern vm_map_t bsd_pageable_map
;
97 #include <sys/mount_internal.h>
98 #include <sys/kdebug.h>
99 #include <sys/sysproto.h>
101 #include <IOKit/IOPlatformExpert.h>
102 #include <pexpert/pexpert.h>
104 #include <machine/machine_routines.h>
105 #include <machine/exec.h>
107 #include <vm/vm_protos.h>
110 #include <i386/cpuid.h>
113 sysctlfn kern_sysctl
;
115 sysctlfn debug_sysctl
;
117 extern sysctlfn vm_sysctl
;
118 extern sysctlfn vfs_sysctl
;
119 extern sysctlfn net_sysctl
;
120 extern sysctlfn cpu_sysctl
;
121 extern int aio_max_requests
;
122 extern int aio_max_requests_per_process
;
123 extern int aio_worker_threads
;
124 extern int maxfilesperproc
;
125 extern int lowpri_IO_window_msecs
;
126 extern int lowpri_IO_delay_msecs
;
127 extern int nx_enabled
;
130 fill_eproc(struct proc
*p
, struct eproc
*ep
);
132 fill_externproc(struct proc
*p
, struct extern_proc
*exp
);
134 fill_user_eproc(struct proc
*p
, struct user_eproc
*ep
);
136 fill_user_proc(struct proc
*p
, struct user_kinfo_proc
*kp
);
138 fill_user_externproc(struct proc
*p
, struct user_extern_proc
*exp
);
140 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
142 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, struct proc
*p
);
148 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
150 __private_extern__ kern_return_t
151 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
153 resize_namecache(u_int newsize
);
155 sysctl_aiomax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
157 sysctl_aioprocmax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
159 sysctl_aiothreads(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
161 sysctl_clockrate(user_addr_t where
, size_t *sizep
);
163 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
165 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
166 user_addr_t newp
, size_t newlen
);
168 sysctl_file(user_addr_t where
, size_t *sizep
);
170 fill_proc(struct proc
*p
, struct kinfo_proc
*kp
);
172 sysctl_maxfilesperproc(user_addr_t oldp
, size_t *oldlenp
,
173 user_addr_t newp
, size_t newlen
);
175 sysctl_maxprocperuid(user_addr_t oldp
, size_t *oldlenp
,
176 user_addr_t newp
, size_t newlen
);
178 sysctl_maxproc(user_addr_t oldp
, size_t *oldlenp
,
179 user_addr_t newp
, size_t newlen
);
181 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
182 size_t *sizep
, struct proc
*cur_proc
);
184 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
185 struct proc
*cur_proc
);
187 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
188 struct proc
*cur_proc
, int argc_yes
);
190 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
191 size_t newlen
, void *sp
, int len
);
193 sysctl_vnode(user_addr_t where
, size_t *sizep
);
197 * temporary location for vm_sysctl. This should be machine independant
200 extern uint32_t mach_factor
[3];
203 loadavg32to64(struct loadavg
*la32
, struct user_loadavg
*la64
)
205 la64
->ldavg
[0] = la32
->ldavg
[0];
206 la64
->ldavg
[1] = la32
->ldavg
[1];
207 la64
->ldavg
[2] = la32
->ldavg
[2];
208 la64
->fscale
= (user_long_t
)la32
->fscale
;
212 vm_sysctl(int *name
, __unused u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
213 user_addr_t newp
, size_t newlen
, __unused
struct proc
*p
)
215 struct loadavg loadinfo
;
219 if (proc_is64bit(p
)) {
220 struct user_loadavg loadinfo64
;
221 loadavg32to64(&averunnable
, &loadinfo64
);
222 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
223 &loadinfo64
, sizeof(loadinfo64
)));
225 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
226 &averunnable
, sizeof(struct loadavg
)));
229 loadinfo
.ldavg
[0] = mach_factor
[0];
230 loadinfo
.ldavg
[1] = mach_factor
[1];
231 loadinfo
.ldavg
[2] = mach_factor
[2];
232 loadinfo
.fscale
= LSCALE
;
233 if (proc_is64bit(p
)) {
234 struct user_loadavg loadinfo64
;
235 loadavg32to64(&loadinfo
, &loadinfo64
);
236 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
237 &loadinfo64
, sizeof(loadinfo64
)));
239 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
240 &loadinfo
, sizeof(struct loadavg
)));
246 uint32_t swap_pagesize
;
247 boolean_t swap_encrypted
;
248 struct xsw_usage xsu
;
250 error
= macx_swapinfo(&swap_total
,
257 xsu
.xsu_total
= swap_total
;
258 xsu
.xsu_avail
= swap_avail
;
259 xsu
.xsu_used
= swap_total
- swap_avail
;
260 xsu
.xsu_pagesize
= swap_pagesize
;
261 xsu
.xsu_encrypted
= swap_encrypted
;
262 return sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
263 &xsu
, sizeof (struct xsw_usage
));
279 static struct sysctl_lock
{
286 __sysctl(struct proc
*p
, struct __sysctl_args
*uap
, __unused register_t
*retval
)
288 int error
, dolock
= 1;
289 size_t savelen
= 0, oldlen
= 0, newlen
;
290 sysctlfn
*fnp
= NULL
;
291 int name
[CTL_MAXNAME
];
296 * all top-level sysctl names are non-terminal
298 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
300 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
304 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
306 if (proc_is64bit(p
)) {
307 /* uap->newlen is a size_t value which grows to 64 bits
308 * when coming from a 64-bit process. since it's doubtful we'll
309 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
311 newlen
= CAST_DOWN(size_t, uap
->newlen
);
314 newlen
= uap
->newlen
;
317 /* CTL_UNSPEC is used to get oid to AUTO_OID */
318 if (uap
->new != USER_ADDR_NULL
319 && ((name
[0] == CTL_KERN
320 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
321 name
[1] == KERN_PROC_LOW_PRI_IO
|| name
[1] == KERN_PROCNAME
|| name
[1] == KERN_THALTSTACK
))
322 || (name
[0] == CTL_HW
)
323 || (name
[0] == CTL_VM
)
324 || (name
[0] == CTL_VFS
))
325 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
331 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
332 && (name
[1] != KERN_PROC
))
351 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
352 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
354 oldlen
= CAST_DOWN(size_t, oldlen64
);
356 * If more than 4G, clamp to 4G - useracc() below will catch
357 * with an EFAULT, if it's actually necessary.
359 if (oldlen64
> 0x00000000ffffffffULL
)
360 oldlen
= 0xffffffffUL
;
363 if (uap
->old
!= USER_ADDR_NULL
) {
364 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
367 /* The pc sampling mechanism does not need to take this lock */
368 if ((name
[1] != KERN_PCSAMPLES
) &&
369 (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)))) {
370 while (memlock
.sl_lock
) {
372 sleep((caddr_t
)&memlock
, PRIBIO
+1);
378 if (dolock
&& oldlen
&&
379 (error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
380 if ((name
[1] != KERN_PCSAMPLES
) &&
381 (! ((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)))) {
383 if (memlock
.sl_want
) {
385 wakeup((caddr_t
)&memlock
);
394 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
395 &oldlen
, uap
->new, newlen
, p
);
400 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
402 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
403 1, uap
->new, newlen
, &oldlen
);
406 if (uap
->old
!= USER_ADDR_NULL
) {
407 if (dolock
&& savelen
) {
408 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
409 if (!error
&& error1
)
412 if (name
[1] != KERN_PCSAMPLES
) {
414 if (memlock
.sl_want
) {
416 wakeup((caddr_t
)&memlock
);
420 if ((error
) && (error
!= ENOMEM
))
423 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
424 i
= suulong(uap
->oldlenp
, oldlen
);
433 * Attributes stored in the kernel.
435 __private_extern__
char corefilename
[MAXPATHLEN
+1];
436 __private_extern__
int do_coredump
;
437 __private_extern__
int sugid_coredump
;
441 int securelevel
= -1;
453 __unused
size_t newSize
,
454 struct proc
*cur_proc
)
459 if (name
[0] == 0 && 1 == namelen
) {
460 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
461 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
462 } else if (name
[0] == 1 && 2 == namelen
) {
464 cur_proc
->p_flag
&= ~P_AFFINITY
;
466 cur_proc
->p_flag
|= P_AFFINITY
;
481 __unused
size_t newSize
,
482 struct proc
*cur_proc
)
493 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
494 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
497 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
498 (p
->p_flag
& P_TRANSLATED
) ? 1 : 0);
502 set_archhandler(struct proc
*p
, int arch
)
506 struct vnode_attr va
;
507 struct vfs_context context
;
511 case CPU_TYPE_POWERPC
:
512 archhandler
= exec_archhandler_ppc
.path
;
519 context
.vc_ucred
= kauth_cred_get();
521 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
,
522 CAST_USER_ADDR_T(archhandler
), &context
);
528 /* Check mount point */
529 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
530 (nd
.ni_vp
->v_type
!= VREG
)) {
536 VATTR_WANTED(&va
, va_fsid
);
537 VATTR_WANTED(&va
, va_fileid
);
538 error
= vnode_getattr(nd
.ni_vp
, &va
, &context
);
545 exec_archhandler_ppc
.fsid
= va
.va_fsid
;
546 exec_archhandler_ppc
.fileid
= (u_long
)va
.va_fileid
;
551 sysctl_exec_archhandler_ppc(
553 __unused u_int namelen
,
563 struct vnode_attr va
;
564 char handler
[sizeof(exec_archhandler_ppc
.path
)];
565 struct vfs_context context
;
568 context
.vc_ucred
= kauth_cred_get();
571 len
= strlen(exec_archhandler_ppc
.path
) + 1;
575 error
= copyout(exec_archhandler_ppc
.path
, oldBuf
, len
);
582 error
= suser(context
.vc_ucred
, &p
->p_acflag
);
585 if (newSize
>= sizeof(exec_archhandler_ppc
.path
))
586 return (ENAMETOOLONG
);
587 error
= copyin(newBuf
, handler
, newSize
);
590 handler
[newSize
] = 0;
591 strcpy(exec_archhandler_ppc
.path
, handler
);
592 error
= set_archhandler(p
, CPU_TYPE_POWERPC
);
599 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
, 0, "");
601 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
, 0, "");
603 SYSCTL_STRING(_kern_exec_archhandler
, OID_AUTO
, powerpc
, CTLFLAG_RD
,
604 exec_archhandler_ppc
.path
, 0, "");
606 extern int get_kernel_symfile( struct proc
*, char **);
607 __private_extern__
int
608 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
609 size_t, struct proc
*);
612 * kernel related system variables.
615 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
616 user_addr_t newp
, size_t newlen
, struct proc
*p
)
618 int error
, level
, inthostid
, tmp
;
619 unsigned int oldval
=0;
621 /* all sysctl names not listed below are terminal at this level */
623 && !(name
[0] == KERN_PROC
624 || name
[0] == KERN_PROF
625 || name
[0] == KERN_KDEBUG
626 || name
[0] == KERN_PROCARGS
627 || name
[0] == KERN_PROCARGS2
628 || name
[0] == KERN_PCSAMPLES
629 || name
[0] == KERN_IPC
630 || name
[0] == KERN_SYSV
631 || name
[0] == KERN_AFFINITY
632 || name
[0] == KERN_TRANSLATE
633 || name
[0] == KERN_EXEC
634 || name
[0] == KERN_PANICINFO
635 || name
[0] == KERN_POSIX
636 || name
[0] == KERN_TFP
)
638 return (ENOTDIR
); /* overloaded */
642 return (sysctl_rdstring(oldp
, oldlenp
, newp
, ostype
));
644 return (sysctl_rdstring(oldp
, oldlenp
, newp
, osrelease
));
646 return (sysctl_rdint(oldp
, oldlenp
, newp
, BSD
));
648 return (sysctl_rdstring(oldp
, oldlenp
, newp
, version
));
650 oldval
= desiredvnodes
;
651 error
= sysctl_int(oldp
, oldlenp
, newp
,
652 newlen
, &desiredvnodes
);
653 reset_vmobjectcache(oldval
, desiredvnodes
);
654 resize_namecache(desiredvnodes
);
657 return (sysctl_maxproc(oldp
, oldlenp
, newp
, newlen
));
659 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, &maxfiles
));
660 case KERN_MAXPROCPERUID
:
661 return( sysctl_maxprocperuid( oldp
, oldlenp
, newp
, newlen
) );
662 case KERN_MAXFILESPERPROC
:
663 return( sysctl_maxfilesperproc( oldp
, oldlenp
, newp
, newlen
) );
665 return (sysctl_rdint(oldp
, oldlenp
, newp
, ARG_MAX
));
668 if ((error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &level
)) ||
669 newp
== USER_ADDR_NULL
)
671 if (level
< securelevel
&& p
->p_pid
!= 1)
676 error
= sysctl_trstring(oldp
, oldlenp
, newp
, newlen
,
677 hostname
, sizeof(hostname
));
679 hostnamelen
= newlen
;
681 case KERN_DOMAINNAME
:
682 error
= sysctl_string(oldp
, oldlenp
, newp
, newlen
,
683 domainname
, sizeof(domainname
));
685 domainnamelen
= newlen
;
688 inthostid
= hostid
; /* XXX assumes sizeof long <= sizeof int */
689 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &inthostid
);
693 return (sysctl_clockrate(oldp
, oldlenp
));
698 t
.tv_sec
= boottime_sec();
701 return (sysctl_rdstruct(oldp
, oldlenp
, newp
, &t
,
702 sizeof(struct timeval
)));
705 return (sysctl_vnode(oldp
, oldlenp
));
707 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
709 return (sysctl_file(oldp
, oldlenp
));
712 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
716 return (sysctl_rdint(oldp
, oldlenp
, newp
, _POSIX_VERSION
));
718 return (sysctl_rdint(oldp
, oldlenp
, newp
, NGROUPS_MAX
));
719 case KERN_JOB_CONTROL
:
720 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
722 #ifdef _POSIX_SAVED_IDS
723 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
725 return (sysctl_rdint(oldp
, oldlenp
, newp
, 0));
728 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
730 return (pcsamples_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
732 /* new one as it does not use kinfo_proc */
733 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
735 /* new one as it does not use kinfo_proc */
736 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
738 error
= get_kernel_symfile( p
, &str
);
741 return (sysctl_rdstring(oldp
, oldlenp
, newp
, str
));
744 return (sysctl_rdint(oldp
, oldlenp
, newp
, netboot_root()));
747 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
750 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
753 return sysctl_translate(name
+1, namelen
-1, oldp
, oldlenp
, newp
,
755 case KERN_CLASSICHANDLER
:
756 return sysctl_exec_archhandler_ppc(name
+1, namelen
-1, oldp
,
757 oldlenp
, newp
, newlen
, p
);
759 return( sysctl_aiomax( oldp
, oldlenp
, newp
, newlen
) );
760 case KERN_AIOPROCMAX
:
761 return( sysctl_aioprocmax( oldp
, oldlenp
, newp
, newlen
) );
762 case KERN_AIOTHREADS
:
763 return( sysctl_aiothreads( oldp
, oldlenp
, newp
, newlen
) );
765 return (sysctl_rdint(oldp
, oldlenp
, newp
, (uintptr_t)p
->user_stack
));
766 case KERN_USRSTACK64
:
767 return (sysctl_rdquad(oldp
, oldlenp
, newp
, p
->user_stack
));
769 error
= sysctl_string(oldp
, oldlenp
, newp
, newlen
,
770 corefilename
, sizeof(corefilename
));
774 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &do_coredump
);
775 if (!error
&& ((do_coredump
< 0) || (do_coredump
> 1))) {
780 case KERN_SUGID_COREDUMP
:
781 tmp
= sugid_coredump
;
782 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &sugid_coredump
);
783 if (!error
&& ((sugid_coredump
< 0) || (sugid_coredump
> 1))) {
784 sugid_coredump
= tmp
;
788 case KERN_PROCDELAYTERM
:
790 int old_value
, new_value
;
793 if (oldp
&& *oldlenp
< sizeof(int))
795 if ( newp
&& newlen
!= sizeof(int) )
797 *oldlenp
= sizeof(int);
798 old_value
= (p
->p_lflag
& P_LDELAYTERM
)? 1: 0;
799 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
801 if (error
== 0 && newp
)
802 error
= copyin( newp
, &new_value
, sizeof(int) );
803 if (error
== 0 && newp
) {
805 p
->p_lflag
|= P_LDELAYTERM
;
807 p
->p_lflag
&= ~P_LDELAYTERM
;
811 case KERN_PROC_LOW_PRI_IO
:
813 int old_value
, new_value
;
816 if (oldp
&& *oldlenp
< sizeof(int))
818 if ( newp
&& newlen
!= sizeof(int) )
820 *oldlenp
= sizeof(int);
822 old_value
= (p
->p_lflag
& P_LLOW_PRI_IO
)? 0x01: 0;
823 if (p
->p_lflag
& P_LBACKGROUND_IO
)
826 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
828 if (error
== 0 && newp
)
829 error
= copyin( newp
, &new_value
, sizeof(int) );
830 if (error
== 0 && newp
) {
831 if (new_value
& 0x01)
832 p
->p_lflag
|= P_LLOW_PRI_IO
;
833 else if (new_value
& 0x02)
834 p
->p_lflag
|= P_LBACKGROUND_IO
;
835 else if (new_value
== 0)
836 p
->p_lflag
&= ~(P_LLOW_PRI_IO
| P_LBACKGROUND_IO
);
840 case KERN_LOW_PRI_WINDOW
:
842 int old_value
, new_value
;
845 if (oldp
&& *oldlenp
< sizeof(old_value
) )
847 if ( newp
&& newlen
!= sizeof(new_value
) )
849 *oldlenp
= sizeof(old_value
);
851 old_value
= lowpri_IO_window_msecs
;
853 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
855 if (error
== 0 && newp
)
856 error
= copyin( newp
, &new_value
, sizeof(newlen
) );
857 if (error
== 0 && newp
) {
858 lowpri_IO_window_msecs
= new_value
;
862 case KERN_LOW_PRI_DELAY
:
864 int old_value
, new_value
;
867 if (oldp
&& *oldlenp
< sizeof(old_value
) )
869 if ( newp
&& newlen
!= sizeof(new_value
) )
871 *oldlenp
= sizeof(old_value
);
873 old_value
= lowpri_IO_delay_msecs
;
875 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
877 if (error
== 0 && newp
)
878 error
= copyin( newp
, &new_value
, sizeof(newlen
) );
879 if (error
== 0 && newp
) {
880 lowpri_IO_delay_msecs
= new_value
;
884 case KERN_NX_PROTECTION
:
886 int old_value
, new_value
;
889 if (oldp
&& *oldlenp
< sizeof(old_value
) )
891 if ( newp
&& newlen
!= sizeof(new_value
) )
893 *oldlenp
= sizeof(old_value
);
895 old_value
= nx_enabled
;
897 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
901 * Only allow setting if NX is supported on the chip
903 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD
) {
905 if (error
== 0 && newp
)
906 error
= copyin(newp
, &new_value
,
908 if (error
== 0 && newp
)
909 nx_enabled
= new_value
;
917 case KERN_SHREG_PRIVATIZABLE
:
918 /* this kernel does implement shared_region_make_private_np() */
919 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
921 error
= sysctl_trstring(oldp
, oldlenp
, newp
, newlen
,
922 &p
->p_name
[0], (2*MAXCOMLEN
+1));
924 case KERN_THALTSTACK
:
926 int old_value
, new_value
;
929 if (oldp
&& *oldlenp
< sizeof(int))
931 if ( newp
&& newlen
!= sizeof(int) )
933 *oldlenp
= sizeof(int);
934 old_value
= (p
->p_lflag
& P_LTHSIGSTACK
)? 1: 0;
935 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
937 if (error
== 0 && newp
)
938 error
= copyin( newp
, &new_value
, sizeof(int) );
939 if (error
== 0 && newp
) {
941 /* we cannot swich midstream if inuse */
942 if ((p
->p_sigacts
->ps_flags
& SAS_ALTSTACK
) == SAS_ALTSTACK
)
944 p
->p_lflag
|= P_LTHSIGSTACK
;
946 /* we cannot swich midstream */
947 if ((p
->p_lflag
& P_LTHSIGSTACK
) == P_LTHSIGSTACK
)
949 p
->p_lflag
&= ~P_LTHSIGSTACK
;
962 * Debugging related system variables.
966 #endif /* DIAGNOSTIC */
967 struct ctldebug debug0
, debug1
;
968 struct ctldebug debug2
, debug3
, debug4
;
969 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
970 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
971 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
972 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
973 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
974 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
975 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
976 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
979 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
980 user_addr_t newp
, size_t newlen
, struct proc
*p
)
982 struct ctldebug
*cdp
;
984 /* all sysctl names at this level are name and field */
986 return (ENOTDIR
); /* overloaded */
987 cdp
= debugvars
[name
[0]];
988 if (cdp
->debugname
== 0)
992 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
993 case CTL_DEBUG_VALUE
:
994 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
1003 * Validate parameters and get old / set new parameters
1004 * for an integer-valued sysctl function.
1007 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
1008 user_addr_t newp
, size_t newlen
, int *valp
)
1012 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1014 if (oldp
&& *oldlenp
< sizeof(int))
1016 if (newp
&& newlen
!= sizeof(int))
1018 *oldlenp
= sizeof(int);
1020 error
= copyout(valp
, oldp
, sizeof(int));
1021 if (error
== 0 && newp
) {
1022 error
= copyin(newp
, valp
, sizeof(int));
1023 AUDIT_ARG(value
, *valp
);
1029 * As above, but read-only.
1032 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
1036 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1038 if (oldp
&& *oldlenp
< sizeof(int))
1042 *oldlenp
= sizeof(int);
1044 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
1049 * Validate parameters and get old / set new parameters
1050 * for an quad(64bit)-valued sysctl function.
1053 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
1054 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
1058 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1060 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1062 if (newp
&& newlen
!= sizeof(quad_t
))
1064 *oldlenp
= sizeof(quad_t
);
1066 error
= copyout(valp
, oldp
, sizeof(quad_t
));
1067 if (error
== 0 && newp
)
1068 error
= copyin(newp
, valp
, sizeof(quad_t
));
1073 * As above, but read-only.
1076 sysctl_rdquad(oldp
, oldlenp
, newp
, val
)
1084 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1086 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1090 *oldlenp
= sizeof(quad_t
);
1092 error
= copyout((caddr_t
)&val
, CAST_USER_ADDR_T(oldp
), sizeof(quad_t
));
1097 * Validate parameters and get old / set new parameters
1098 * for a string-valued sysctl function. Unlike sysctl_string, if you
1099 * give it a too small (but larger than 0 bytes) buffer, instead of
1100 * returning ENOMEM, it truncates the returned string to the buffer
1101 * size. This preserves the semantics of some library routines
1102 * implemented via sysctl, which truncate their returned data, rather
1103 * than simply returning an error. The returned string is always NUL
1107 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
1108 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1110 int len
, copylen
, error
= 0;
1112 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1114 copylen
= len
= strlen(str
) + 1;
1115 if (oldp
&& (len
< 0 || *oldlenp
< 1))
1117 if (oldp
&& (*oldlenp
< (size_t)len
))
1118 copylen
= *oldlenp
+ 1;
1119 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1121 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
1123 error
= copyout(str
, oldp
, copylen
);
1125 unsigned char c
= 0;
1128 error
= copyout((void *)&c
, oldp
, sizeof(char));
1131 if (error
== 0 && newp
) {
1132 error
= copyin(newp
, str
, newlen
);
1134 AUDIT_ARG(text
, (char *)str
);
1140 * Validate parameters and get old / set new parameters
1141 * for a string-valued sysctl function.
1144 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
1145 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1149 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1151 len
= strlen(str
) + 1;
1152 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1154 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1156 *oldlenp
= len
-1; /* deal with NULL strings correctly */
1158 error
= copyout(str
, oldp
, len
);
1160 if (error
== 0 && newp
) {
1161 error
= copyin(newp
, str
, newlen
);
1163 AUDIT_ARG(text
, (char *)str
);
1169 * As above, but read-only.
1172 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
1173 user_addr_t newp
, char *str
)
1177 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1179 len
= strlen(str
) + 1;
1180 if (oldp
&& *oldlenp
< (size_t)len
)
1186 error
= copyout(str
, oldp
, len
);
1191 * Validate parameters and get old / set new parameters
1192 * for a structure oriented sysctl function.
1195 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
1196 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
1200 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1202 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1204 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
1208 error
= copyout(sp
, oldp
, len
);
1210 if (error
== 0 && newp
)
1211 error
= copyin(newp
, sp
, len
);
1216 * Validate parameters and get old parameters
1217 * for a structure oriented sysctl function.
1220 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
1221 user_addr_t newp
, void *sp
, int len
)
1225 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1227 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1233 error
= copyout(sp
, oldp
, len
);
1238 * Get file structures.
1241 sysctl_file(user_addr_t where
, size_t *sizep
)
1244 struct fileglob
*fg
;
1245 user_addr_t start
= where
;
1246 struct extern_file nef
;
1249 if (where
== USER_ADDR_NULL
) {
1251 * overestimate by 10 files
1253 *sizep
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1258 * first copyout filehead
1260 if (buflen
< 0 || (size_t)buflen
< sizeof(filehead
)) {
1264 error
= copyout((caddr_t
)&filehead
, where
, sizeof(filehead
));
1267 buflen
-= sizeof(filehead
);
1268 where
+= sizeof(filehead
);
1271 * followed by an array of file structures
1273 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1274 if (buflen
< 0 || (size_t)buflen
< sizeof(struct extern_file
)) {
1275 *sizep
= where
- start
;
1278 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1279 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1280 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1281 nef
.f_type
= fg
->fg_type
;
1282 nef
.f_count
= fg
->fg_count
;
1283 nef
.f_msgcount
= fg
->fg_msgcount
;
1284 nef
.f_cred
= fg
->fg_cred
;
1285 nef
.f_ops
= fg
->fg_ops
;
1286 nef
.f_offset
= fg
->fg_offset
;
1287 nef
.f_data
= fg
->fg_data
;
1288 error
= copyout((caddr_t
)&nef
, where
, sizeof (struct extern_file
));
1291 buflen
-= sizeof(struct extern_file
);
1292 where
+= sizeof(struct extern_file
);
1294 *sizep
= where
- start
;
1299 * try over estimating by 5 procs
1301 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1304 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1307 user_addr_t dp
= where
;
1309 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1312 boolean_t is_64_bit
= FALSE
;
1313 struct kinfo_proc kproc
;
1314 struct user_kinfo_proc user_kproc
;
1318 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1320 p
= allproc
.lh_first
;
1322 is_64_bit
= proc_is64bit(current_proc());
1324 sizeof_kproc
= sizeof(user_kproc
);
1325 kprocp
= (caddr_t
) &user_kproc
;
1328 sizeof_kproc
= sizeof(kproc
);
1329 kprocp
= (caddr_t
) &kproc
;
1332 for (; p
!= 0; p
= p
->p_list
.le_next
) {
1334 * Skip embryonic processes.
1336 if (p
->p_stat
== SIDL
)
1339 * TODO - make more efficient (see notes below).
1345 /* could do this with just a lookup */
1346 if (p
->p_pid
!= (pid_t
)name
[1])
1350 case KERN_PROC_PGRP
:
1351 /* could do this by traversing pgrp */
1352 if (p
->p_pgrp
->pg_id
!= (pid_t
)name
[1])
1357 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1358 (p
->p_session
== NULL
) ||
1359 p
->p_session
->s_ttyp
== NULL
||
1360 p
->p_session
->s_ttyp
->t_dev
!= (dev_t
)name
[1])
1365 if ((p
->p_ucred
== NULL
) ||
1366 (kauth_cred_getuid(p
->p_ucred
) != (uid_t
)name
[1]))
1370 case KERN_PROC_RUID
:
1371 if ((p
->p_ucred
== NULL
) ||
1372 (p
->p_ucred
->cr_ruid
!= (uid_t
)name
[1]))
1376 if (buflen
>= sizeof_kproc
) {
1377 bzero(kprocp
, sizeof_kproc
);
1379 fill_user_proc(p
, (struct user_kinfo_proc
*) kprocp
);
1382 fill_proc(p
, (struct kinfo_proc
*) kprocp
);
1384 error
= copyout(kprocp
, dp
, sizeof_kproc
);
1388 buflen
-= sizeof_kproc
;
1390 needed
+= sizeof_kproc
;
1392 if (doingzomb
== 0) {
1393 p
= zombproc
.lh_first
;
1397 if (where
!= USER_ADDR_NULL
) {
1398 *sizep
= dp
- where
;
1399 if (needed
> *sizep
)
1402 needed
+= KERN_PROCSLOP
;
1409 * Fill in an eproc structure for the specified process.
1413 register struct proc
*p
;
1414 register struct eproc
*ep
;
1416 register struct tty
*tp
;
1420 ep
->e_sess
= p
->p_pgrp
->pg_session
;
1421 ep
->e_pgid
= p
->p_pgrp
->pg_id
;
1422 ep
->e_jobc
= p
->p_pgrp
->pg_jobc
;
1423 if (ep
->e_sess
&& ep
->e_sess
->s_ttyvp
)
1424 ep
->e_flag
= EPROC_CTTY
;
1426 ep
->e_sess
= (struct session
*)0;
1430 ep
->e_ppid
= (p
->p_pptr
) ? p
->p_pptr
->p_pid
: 0;
1431 /* Pre-zero the fake historical pcred */
1432 bzero(&ep
->e_pcred
, sizeof(struct _pcred
));
1434 /* XXX not ref-counted */
1436 /* A fake historical pcred */
1437 ep
->e_pcred
.p_ruid
= p
->p_ucred
->cr_ruid
;
1438 ep
->e_pcred
.p_svuid
= p
->p_ucred
->cr_svuid
;
1439 ep
->e_pcred
.p_rgid
= p
->p_ucred
->cr_rgid
;
1440 ep
->e_pcred
.p_svgid
= p
->p_ucred
->cr_svgid
;
1442 /* A fake historical *kauth_cred_t */
1443 ep
->e_ucred
.cr_ref
= p
->p_ucred
->cr_ref
;
1444 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(p
->p_ucred
);
1445 ep
->e_ucred
.cr_ngroups
= p
->p_ucred
->cr_ngroups
;
1446 bcopy(p
->p_ucred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1449 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1450 ep
->e_vm
.vm_tsize
= 0;
1451 ep
->e_vm
.vm_dsize
= 0;
1452 ep
->e_vm
.vm_ssize
= 0;
1454 ep
->e_vm
.vm_rssize
= 0;
1456 if ((p
->p_flag
& P_CONTROLT
) && (ep
->e_sess
) &&
1457 (tp
= ep
->e_sess
->s_ttyp
)) {
1458 ep
->e_tdev
= tp
->t_dev
;
1459 ep
->e_tpgid
= tp
->t_pgrp
? tp
->t_pgrp
->pg_id
: NO_PID
;
1460 ep
->e_tsess
= tp
->t_session
;
1465 ep
->e_flag
|= EPROC_SLEADER
;
1467 strncpy(ep
->e_wmesg
, p
->p_wmesg
, WMESGLEN
);
1468 ep
->e_xsize
= ep
->e_xrssize
= 0;
1469 ep
->e_xccount
= ep
->e_xswrss
= 0;
1473 * Fill in an LP64 version of eproc structure for the specified process.
1476 fill_user_eproc(register struct proc
*p
, register struct user_eproc
*ep
)
1478 register struct tty
*tp
;
1479 struct session
*sessionp
= NULL
;
1481 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1483 sessionp
= p
->p_pgrp
->pg_session
;
1484 ep
->e_sess
= CAST_USER_ADDR_T(sessionp
);
1485 ep
->e_pgid
= p
->p_pgrp
->pg_id
;
1486 ep
->e_jobc
= p
->p_pgrp
->pg_jobc
;
1488 if (sessionp
->s_ttyvp
)
1489 ep
->e_flag
= EPROC_CTTY
;
1492 ep
->e_sess
= USER_ADDR_NULL
;
1496 ep
->e_ppid
= (p
->p_pptr
) ? p
->p_pptr
->p_pid
: 0;
1497 /* Pre-zero the fake historical pcred */
1498 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1500 /* XXX not ref-counted */
1502 /* A fake historical pcred */
1503 ep
->e_pcred
.p_ruid
= p
->p_ucred
->cr_ruid
;
1504 ep
->e_pcred
.p_svuid
= p
->p_ucred
->cr_svuid
;
1505 ep
->e_pcred
.p_rgid
= p
->p_ucred
->cr_rgid
;
1506 ep
->e_pcred
.p_svgid
= p
->p_ucred
->cr_svgid
;
1508 /* A fake historical *kauth_cred_t */
1509 ep
->e_ucred
.cr_ref
= p
->p_ucred
->cr_ref
;
1510 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(p
->p_ucred
);
1511 ep
->e_ucred
.cr_ngroups
= p
->p_ucred
->cr_ngroups
;
1512 bcopy(p
->p_ucred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1515 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1516 ep
->e_vm
.vm_tsize
= 0;
1517 ep
->e_vm
.vm_dsize
= 0;
1518 ep
->e_vm
.vm_ssize
= 0;
1520 ep
->e_vm
.vm_rssize
= 0;
1522 if ((p
->p_flag
& P_CONTROLT
) && (sessionp
) &&
1523 (tp
= sessionp
->s_ttyp
)) {
1524 ep
->e_tdev
= tp
->t_dev
;
1525 ep
->e_tpgid
= tp
->t_pgrp
? tp
->t_pgrp
->pg_id
: NO_PID
;
1526 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1531 ep
->e_flag
|= EPROC_SLEADER
;
1533 strncpy(ep
->e_wmesg
, p
->p_wmesg
, WMESGLEN
);
1534 ep
->e_xsize
= ep
->e_xrssize
= 0;
1535 ep
->e_xccount
= ep
->e_xswrss
= 0;
1539 * Fill in an eproc structure for the specified process.
1542 fill_externproc(p
, exp
)
1543 register struct proc
*p
;
1544 register struct extern_proc
*exp
;
1546 exp
->p_forw
= exp
->p_back
= NULL
;
1548 exp
->p_starttime
= p
->p_stats
->p_start
;
1549 exp
->p_vmspace
= NULL
;
1550 exp
->p_sigacts
= p
->p_sigacts
;
1551 exp
->p_flag
= p
->p_flag
;
1552 exp
->p_stat
= p
->p_stat
;
1553 exp
->p_pid
= p
->p_pid
;
1554 exp
->p_oppid
= p
->p_oppid
;
1555 exp
->p_dupfd
= p
->p_dupfd
;
1557 exp
->user_stack
= CAST_DOWN(caddr_t
, p
->user_stack
);
1558 exp
->exit_thread
= p
->exit_thread
;
1559 exp
->p_debugger
= p
->p_debugger
;
1560 exp
->sigwait
= p
->sigwait
;
1562 exp
->p_estcpu
= p
->p_estcpu
;
1563 exp
->p_cpticks
= p
->p_cpticks
;
1564 exp
->p_pctcpu
= p
->p_pctcpu
;
1565 exp
->p_wchan
= p
->p_wchan
;
1566 exp
->p_wmesg
= p
->p_wmesg
;
1567 exp
->p_swtime
= p
->p_swtime
;
1568 exp
->p_slptime
= p
->p_slptime
;
1569 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1570 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1571 exp
->p_uticks
= p
->p_uticks
;
1572 exp
->p_sticks
= p
->p_sticks
;
1573 exp
->p_iticks
= p
->p_iticks
;
1574 exp
->p_traceflag
= p
->p_traceflag
;
1575 exp
->p_tracep
= p
->p_tracep
;
1576 exp
->p_siglist
= 0 ; /* No longer relevant */
1577 exp
->p_textvp
= p
->p_textvp
;
1578 exp
->p_holdcnt
= 0 ;
1579 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1580 exp
->p_sigignore
= p
->p_sigignore
;
1581 exp
->p_sigcatch
= p
->p_sigcatch
;
1582 exp
->p_priority
= p
->p_priority
;
1583 exp
->p_usrpri
= p
->p_usrpri
;
1584 exp
->p_nice
= p
->p_nice
;
1585 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1586 exp
->p_comm
[MAXCOMLEN
] = '\0';
1587 exp
->p_pgrp
= p
->p_pgrp
;
1589 exp
->p_xstat
= p
->p_xstat
;
1590 exp
->p_acflag
= p
->p_acflag
;
1591 exp
->p_ru
= p
->p_ru
; /* XXX may be NULL */
1595 * Fill in an LP64 version of extern_proc structure for the specified process.
1598 fill_user_externproc(register struct proc
*p
, register struct user_extern_proc
*exp
)
1600 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1602 exp
->p_starttime
.tv_sec
= p
->p_stats
->p_start
.tv_sec
;
1603 exp
->p_starttime
.tv_usec
= p
->p_stats
->p_start
.tv_usec
;
1605 exp
->p_vmspace
= USER_ADDR_NULL
;
1606 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1607 exp
->p_flag
= p
->p_flag
;
1608 exp
->p_stat
= p
->p_stat
;
1609 exp
->p_pid
= p
->p_pid
;
1610 exp
->p_oppid
= p
->p_oppid
;
1611 exp
->p_dupfd
= p
->p_dupfd
;
1613 exp
->user_stack
= p
->user_stack
;
1614 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1615 exp
->p_debugger
= p
->p_debugger
;
1616 exp
->sigwait
= p
->sigwait
;
1618 exp
->p_estcpu
= p
->p_estcpu
;
1619 exp
->p_cpticks
= p
->p_cpticks
;
1620 exp
->p_pctcpu
= p
->p_pctcpu
;
1621 exp
->p_wchan
= CAST_USER_ADDR_T(p
->p_wchan
);
1622 exp
->p_wmesg
= CAST_USER_ADDR_T(p
->p_wmesg
);
1623 exp
->p_swtime
= p
->p_swtime
;
1624 exp
->p_slptime
= p
->p_slptime
;
1625 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1626 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1627 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1628 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1629 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1630 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1631 exp
->p_uticks
= p
->p_uticks
;
1632 exp
->p_sticks
= p
->p_sticks
;
1633 exp
->p_iticks
= p
->p_iticks
;
1634 exp
->p_traceflag
= p
->p_traceflag
;
1635 exp
->p_tracep
= CAST_USER_ADDR_T(p
->p_tracep
);
1636 exp
->p_siglist
= 0 ; /* No longer relevant */
1637 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1638 exp
->p_holdcnt
= 0 ;
1639 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1640 exp
->p_sigignore
= p
->p_sigignore
;
1641 exp
->p_sigcatch
= p
->p_sigcatch
;
1642 exp
->p_priority
= p
->p_priority
;
1643 exp
->p_usrpri
= p
->p_usrpri
;
1644 exp
->p_nice
= p
->p_nice
;
1645 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1646 exp
->p_comm
[MAXCOMLEN
] = '\0';
1647 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1648 exp
->p_addr
= USER_ADDR_NULL
;
1649 exp
->p_xstat
= p
->p_xstat
;
1650 exp
->p_acflag
= p
->p_acflag
;
1651 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1656 register struct proc
*p
;
1657 register struct kinfo_proc
*kp
;
1659 fill_externproc(p
, &kp
->kp_proc
);
1660 fill_eproc(p
, &kp
->kp_eproc
);
1664 fill_user_proc(register struct proc
*p
, register struct user_kinfo_proc
*kp
)
1666 fill_user_externproc(p
, &kp
->kp_proc
);
1667 fill_user_eproc(p
, &kp
->kp_eproc
);
1671 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1672 size_t *sizep
, struct proc
*p
)
1676 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1693 case KERN_KDSETRTCDEC
:
1695 case KERN_KDGETENTROPY
:
1696 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1705 extern int pcsamples_control(int *name
, u_int namelen
, user_addr_t where
,
1709 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
,
1710 size_t *sizep
, struct proc
*p
)
1714 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1719 case KERN_PCDISABLE
:
1723 case KERN_PCREADBUF
:
1727 ret
= pcsamples_control(name
, namelen
, where
, sizep
);
1737 * Return the top *sizep bytes of the user stack, or the entire area of the
1738 * user stack down through the saved exec_path, whichever is smaller.
1741 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1742 size_t *sizep
, struct proc
*cur_proc
)
1744 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1748 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1749 size_t *sizep
, struct proc
*cur_proc
)
1751 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1755 sysctl_procargsx(int *name
, __unused u_int namelen
, user_addr_t where
,
1756 size_t *sizep
, struct proc
*cur_proc
, int argc_yes
)
1759 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1761 struct vm_map
*proc_map
;
1764 user_addr_t arg_addr
;
1768 vm_offset_t copy_start
, copy_end
;
1773 buflen
-= sizeof(int); /* reserve first word to return argc */
1775 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1776 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1777 /* is not NULL then the caller wants us to return the length needed to */
1778 /* hold the data we would return */
1779 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1785 * Lookup process by pid
1794 * Copy the top N bytes of the stack.
1795 * On all machines we have so far, the stack grows
1798 * If the user expects no more than N bytes of
1799 * argument list, use that as a guess for the
1806 if (where
== USER_ADDR_NULL
) {
1807 /* caller only wants to know length of proc args data */
1811 size
= p
->p_argslen
;
1813 size
+= sizeof(int);
1817 * old PROCARGS will return the executable's path and plus some
1818 * extra space for work alignment and data tags
1820 size
+= PATH_MAX
+ (6 * sizeof(int));
1822 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1827 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
1828 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
1831 if ((u_int
)arg_size
> p
->p_argslen
)
1832 arg_size
= round_page(p
->p_argslen
);
1834 arg_addr
= p
->user_stack
- arg_size
;
1838 * Before we can block (any VM code), make another
1839 * reference to the map to keep it alive. We do
1840 * that by getting a reference on the task itself.
1847 * Once we have a task reference we can convert that into a
1848 * map reference, which we will use in the calls below. The
1849 * task/process may change its map after we take this reference
1850 * (see execve), but the worst that will happen then is a return
1851 * of stale info (which is always a possibility).
1853 task_reference(task
);
1854 proc_map
= get_task_map_reference(task
);
1855 task_deallocate(task
);
1856 if (proc_map
== NULL
)
1860 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1861 if (ret
!= KERN_SUCCESS
) {
1862 vm_map_deallocate(proc_map
);
1866 copy_end
= round_page(copy_start
+ arg_size
);
1868 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1869 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1870 vm_map_deallocate(proc_map
);
1871 kmem_free(kernel_map
, copy_start
,
1872 round_page(arg_size
));
1877 * Now that we've done the copyin from the process'
1878 * map, we can release the reference to it.
1880 vm_map_deallocate(proc_map
);
1882 if( vm_map_copy_overwrite(kernel_map
,
1883 (vm_map_address_t
)copy_start
,
1884 tmp
, FALSE
) != KERN_SUCCESS
) {
1885 kmem_free(kernel_map
, copy_start
,
1886 round_page(arg_size
));
1890 if (arg_size
> p
->p_argslen
) {
1891 data
= (caddr_t
) (copy_end
- p
->p_argslen
);
1892 size
= p
->p_argslen
;
1894 data
= (caddr_t
) (copy_end
- arg_size
);
1899 /* Put processes argc as the first word in the copyout buffer */
1900 suword(where
, p
->p_argc
);
1901 error
= copyout(data
, (where
+ sizeof(int)), size
);
1902 size
+= sizeof(int);
1904 error
= copyout(data
, where
, size
);
1907 * Make the old PROCARGS work to return the executable's path
1908 * But, only if there is enough space in the provided buffer
1910 * on entry: data [possibily] points to the beginning of the path
1912 * Note: we keep all pointers&sizes aligned to word boundries
1914 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> p
->p_argslen
) )
1916 int binPath_sz
, alignedBinPath_sz
= 0;
1917 int extraSpaceNeeded
, addThis
;
1918 user_addr_t placeHere
;
1919 char * str
= (char *) data
;
1922 /* Some apps are really bad about messing up their stacks
1923 So, we have to be extra careful about getting the length
1924 of the executing binary. If we encounter an error, we bail.
1927 /* Limit ourselves to PATH_MAX paths */
1928 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1932 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1935 /* If we have a NUL terminator, copy it, too */
1936 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1938 /* Pre-Flight the space requiremnts */
1940 /* Account for the padding that fills out binPath to the next word */
1941 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1943 placeHere
= where
+ size
;
1945 /* Account for the bytes needed to keep placeHere word aligned */
1946 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1948 /* Add up all the space that is needed */
1949 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1951 /* is there is room to tack on argv[0]? */
1952 if ( (buflen
& ~(sizeof(int)-1)) >= ( p
->p_argslen
+ extraSpaceNeeded
))
1954 placeHere
+= addThis
;
1955 suword(placeHere
, 0);
1956 placeHere
+= sizeof(int);
1957 suword(placeHere
, 0xBFFF0000);
1958 placeHere
+= sizeof(int);
1959 suword(placeHere
, 0);
1960 placeHere
+= sizeof(int);
1961 error
= copyout(data
, placeHere
, binPath_sz
);
1964 placeHere
+= binPath_sz
;
1965 suword(placeHere
, 0);
1966 size
+= extraSpaceNeeded
;
1972 if (copy_start
!= (vm_offset_t
) 0) {
1973 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1979 if (where
!= USER_ADDR_NULL
)
1986 * Validate parameters and get old / set new parameters
1987 * for max number of concurrent aio requests. Makes sure
1988 * the system wide limit is greater than the per process
1992 sysctl_aiomax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
1997 if ( oldp
&& *oldlenp
< sizeof(int) )
1999 if ( newp
&& newlen
!= sizeof(int) )
2002 *oldlenp
= sizeof(int);
2004 error
= copyout( &aio_max_requests
, oldp
, sizeof(int) );
2005 if ( error
== 0 && newp
)
2006 error
= copyin( newp
, &new_value
, sizeof(int) );
2007 if ( error
== 0 && newp
) {
2008 if ( new_value
>= aio_max_requests_per_process
)
2009 aio_max_requests
= new_value
;
2015 } /* sysctl_aiomax */
2019 * Validate parameters and get old / set new parameters
2020 * for max number of concurrent aio requests per process.
2021 * Makes sure per process limit is less than the system wide
2025 sysctl_aioprocmax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
2030 if ( oldp
&& *oldlenp
< sizeof(int) )
2032 if ( newp
&& newlen
!= sizeof(int) )
2035 *oldlenp
= sizeof(int);
2037 error
= copyout( &aio_max_requests_per_process
, oldp
, sizeof(int) );
2038 if ( error
== 0 && newp
)
2039 error
= copyin( newp
, &new_value
, sizeof(int) );
2040 if ( error
== 0 && newp
) {
2041 if ( new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
2042 aio_max_requests_per_process
= new_value
;
2048 } /* sysctl_aioprocmax */
2052 * Validate parameters and get old / set new parameters
2053 * for max number of async IO worker threads.
2054 * We only allow an increase in the number of worker threads.
2057 sysctl_aiothreads(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
2062 if ( oldp
&& *oldlenp
< sizeof(int) )
2064 if ( newp
&& newlen
!= sizeof(int) )
2067 *oldlenp
= sizeof(int);
2069 error
= copyout( &aio_worker_threads
, oldp
, sizeof(int) );
2070 if ( error
== 0 && newp
)
2071 error
= copyin( newp
, &new_value
, sizeof(int) );
2072 if ( error
== 0 && newp
) {
2073 if (new_value
> aio_worker_threads
) {
2074 _aio_create_worker_threads( (new_value
- aio_worker_threads
) );
2075 aio_worker_threads
= new_value
;
2082 } /* sysctl_aiothreads */
2086 * Validate parameters and get old / set new parameters
2087 * for max number of processes per UID.
2088 * Makes sure per UID limit is less than the system wide limit.
2091 sysctl_maxprocperuid(user_addr_t oldp
, size_t *oldlenp
,
2092 user_addr_t newp
, size_t newlen
)
2097 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2099 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2102 *oldlenp
= sizeof(int);
2103 if ( oldp
!= USER_ADDR_NULL
)
2104 error
= copyout( &maxprocperuid
, oldp
, sizeof(int) );
2105 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2106 error
= copyin( newp
, &new_value
, sizeof(int) );
2108 AUDIT_ARG(value
, new_value
);
2109 if ( new_value
<= maxproc
&& new_value
> 0 )
2110 maxprocperuid
= new_value
;
2119 } /* sysctl_maxprocperuid */
2123 * Validate parameters and get old / set new parameters
2124 * for max number of files per process.
2125 * Makes sure per process limit is less than the system-wide limit.
2128 sysctl_maxfilesperproc(user_addr_t oldp
, size_t *oldlenp
,
2129 user_addr_t newp
, size_t newlen
)
2134 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2136 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2139 *oldlenp
= sizeof(int);
2140 if ( oldp
!= USER_ADDR_NULL
)
2141 error
= copyout( &maxfilesperproc
, oldp
, sizeof(int) );
2142 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2143 error
= copyin( newp
, &new_value
, sizeof(int) );
2145 AUDIT_ARG(value
, new_value
);
2146 if ( new_value
< maxfiles
&& new_value
> 0 )
2147 maxfilesperproc
= new_value
;
2156 } /* sysctl_maxfilesperproc */
2160 * Validate parameters and get old / set new parameters
2161 * for the system-wide limit on the max number of processes.
2162 * Makes sure the system-wide limit is less than the configured hard
2163 * limit set at kernel compilation.
2166 sysctl_maxproc(user_addr_t oldp
, size_t *oldlenp
,
2167 user_addr_t newp
, size_t newlen
)
2172 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2174 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2177 *oldlenp
= sizeof(int);
2178 if ( oldp
!= USER_ADDR_NULL
)
2179 error
= copyout( &maxproc
, oldp
, sizeof(int) );
2180 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2181 error
= copyin( newp
, &new_value
, sizeof(int) );
2183 AUDIT_ARG(value
, new_value
);
2184 if ( new_value
<= hard_maxproc
&& new_value
> 0 )
2185 maxproc
= new_value
;
2194 } /* sysctl_maxproc */
2198 sysctl_sysctl_exec_affinity SYSCTL_HANDLER_ARGS
2200 struct proc
*cur_proc
= req
->p
;
2203 if (req
->oldptr
!= USER_ADDR_NULL
) {
2204 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
2205 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
2209 if (req
->newptr
!= USER_ADDR_NULL
) {
2210 cpu_type_t newcputype
;
2211 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
2213 if (newcputype
== CPU_TYPE_I386
)
2214 cur_proc
->p_flag
&= ~P_AFFINITY
;
2215 else if (newcputype
== CPU_TYPE_POWERPC
)
2216 cur_proc
->p_flag
|= P_AFFINITY
;
2223 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
2227 fetch_process_cputype(
2228 struct proc
*cur_proc
,
2231 cpu_type_t
*cputype
)
2233 struct proc
*p
= NULL
;
2238 else if (namelen
== 1) {
2242 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
2243 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
2250 if (p
->p_flag
& P_TRANSLATED
) {
2251 ret
= CPU_TYPE_POWERPC
;
2257 if (IS_64BIT_PROCESS(p
))
2258 ret
|= CPU_ARCH_ABI64
;
2266 sysctl_sysctl_native SYSCTL_HANDLER_ARGS
2269 cpu_type_t proc_cputype
= 0;
2270 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2273 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2275 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2277 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2280 sysctl_sysctl_cputype SYSCTL_HANDLER_ARGS
2283 cpu_type_t proc_cputype
= 0;
2284 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2286 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2288 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");