2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
27 * This code is derived from software contributed to Berkeley by
28 * Mike Karels at Berkeley Software Design, Inc.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/proc_internal.h>
70 #include <sys/kauth.h>
71 #include <sys/file_internal.h>
72 #include <sys/vnode_internal.h>
73 #include <sys/unistd.h>
75 #include <sys/ioctl.h>
76 #include <sys/namei.h>
78 #include <sys/disklabel.h>
80 #include <sys/sysctl.h>
82 #include <sys/aio_kern.h>
84 #include <bsm/audit_kernel.h>
86 #include <mach/machine.h>
87 #include <mach/mach_types.h>
88 #include <mach/vm_param.h>
89 #include <kern/task.h>
90 #include <kern/lock.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_map.h>
93 #include <mach/host_info.h>
95 extern vm_map_t bsd_pageable_map
;
97 #include <sys/mount_internal.h>
98 #include <sys/kdebug.h>
99 #include <sys/sysproto.h>
101 #include <IOKit/IOPlatformExpert.h>
102 #include <pexpert/pexpert.h>
104 #include <machine/machine_routines.h>
106 #include <vm/vm_protos.h>
108 sysctlfn kern_sysctl
;
110 sysctlfn debug_sysctl
;
112 extern sysctlfn vm_sysctl
;
113 extern sysctlfn vfs_sysctl
;
114 extern sysctlfn net_sysctl
;
115 extern sysctlfn cpu_sysctl
;
116 extern int aio_max_requests
;
117 extern int aio_max_requests_per_process
;
118 extern int aio_worker_threads
;
119 extern int maxprocperuid
;
120 extern int maxfilesperproc
;
121 extern int lowpri_IO_window_msecs
;
122 extern int lowpri_IO_delay_msecs
;
125 fill_eproc(struct proc
*p
, struct eproc
*ep
);
127 fill_externproc(struct proc
*p
, struct extern_proc
*exp
);
129 fill_user_eproc(struct proc
*p
, struct user_eproc
*ep
);
131 fill_user_proc(struct proc
*p
, struct user_kinfo_proc
*kp
);
133 fill_user_externproc(struct proc
*p
, struct user_extern_proc
*exp
);
135 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
137 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
, struct proc
*p
);
143 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
145 __private_extern__ kern_return_t
146 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
148 resize_namecache(u_int newsize
);
150 sysctl_aiomax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
152 sysctl_aioprocmax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
154 sysctl_aiothreads(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
);
156 sysctl_clockrate(user_addr_t where
, size_t *sizep
);
158 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
);
160 sysctl_doprof(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
161 user_addr_t newp
, size_t newlen
);
163 sysctl_file(user_addr_t where
, size_t *sizep
);
165 fill_proc(struct proc
*p
, struct kinfo_proc
*kp
);
167 sysctl_maxfilesperproc(user_addr_t oldp
, size_t *oldlenp
,
168 user_addr_t newp
, size_t newlen
);
170 sysctl_maxprocperuid(user_addr_t oldp
, size_t *oldlenp
,
171 user_addr_t newp
, size_t newlen
);
173 sysctl_maxproc(user_addr_t oldp
, size_t *oldlenp
,
174 user_addr_t newp
, size_t newlen
);
176 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
177 size_t *sizep
, struct proc
*cur_proc
);
179 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
180 struct proc
*cur_proc
);
182 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
183 struct proc
*cur_proc
, int argc_yes
);
185 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
186 size_t newlen
, void *sp
, int len
);
188 sysctl_vnode(user_addr_t where
, size_t *sizep
);
192 * temporary location for vm_sysctl. This should be machine independant
195 extern uint32_t mach_factor
[3];
198 loadavg32to64(struct loadavg
*la32
, struct user_loadavg
*la64
)
200 la64
->ldavg
[0] = la32
->ldavg
[0];
201 la64
->ldavg
[1] = la32
->ldavg
[1];
202 la64
->ldavg
[2] = la32
->ldavg
[2];
203 la64
->fscale
= (user_long_t
)la32
->fscale
;
207 vm_sysctl(int *name
, __unused u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
208 user_addr_t newp
, size_t newlen
, __unused
struct proc
*p
)
210 struct loadavg loadinfo
;
214 if (proc_is64bit(p
)) {
215 struct user_loadavg loadinfo64
;
216 loadavg32to64(&averunnable
, &loadinfo64
);
217 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
218 &loadinfo64
, sizeof(loadinfo64
)));
220 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
221 &averunnable
, sizeof(struct loadavg
)));
224 loadinfo
.ldavg
[0] = mach_factor
[0];
225 loadinfo
.ldavg
[1] = mach_factor
[1];
226 loadinfo
.ldavg
[2] = mach_factor
[2];
227 loadinfo
.fscale
= LSCALE
;
228 if (proc_is64bit(p
)) {
229 struct user_loadavg loadinfo64
;
230 loadavg32to64(&loadinfo
, &loadinfo64
);
231 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
232 &loadinfo64
, sizeof(loadinfo64
)));
234 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
235 &loadinfo
, sizeof(struct loadavg
)));
241 uint32_t swap_pagesize
;
242 boolean_t swap_encrypted
;
243 struct xsw_usage xsu
;
245 error
= macx_swapinfo(&swap_total
,
252 xsu
.xsu_total
= swap_total
;
253 xsu
.xsu_avail
= swap_avail
;
254 xsu
.xsu_used
= swap_total
- swap_avail
;
255 xsu
.xsu_pagesize
= swap_pagesize
;
256 xsu
.xsu_encrypted
= swap_encrypted
;
257 return sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
258 &xsu
, sizeof (struct xsw_usage
));
274 static struct sysctl_lock
{
281 __sysctl(struct proc
*p
, struct __sysctl_args
*uap
, __unused register_t
*retval
)
283 int error
, dolock
= 1;
284 size_t savelen
= 0, oldlen
= 0, newlen
;
285 sysctlfn
*fnp
= NULL
;
286 int name
[CTL_MAXNAME
];
291 * all top-level sysctl names are non-terminal
293 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
295 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
299 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
301 if (proc_is64bit(p
)) {
302 /* uap->newlen is a size_t value which grows to 64 bits
303 * when coming from a 64-bit process. since it's doubtful we'll
304 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
306 newlen
= CAST_DOWN(size_t, uap
->newlen
);
309 newlen
= uap
->newlen
;
312 /* CTL_UNSPEC is used to get oid to AUTO_OID */
313 if (uap
->new != USER_ADDR_NULL
314 && ((name
[0] == CTL_KERN
315 && !(name
[1] == KERN_IPC
|| name
[1] == KERN_PANICINFO
|| name
[1] == KERN_PROCDELAYTERM
||
316 name
[1] == KERN_PROC_LOW_PRI_IO
))
317 || (name
[0] == CTL_HW
)
318 || (name
[0] == CTL_VM
)
319 || (name
[0] == CTL_VFS
))
320 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
326 if ((name
[1] != KERN_VNODE
) && (name
[1] != KERN_FILE
)
327 && (name
[1] != KERN_PROC
))
346 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
347 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
349 oldlen
= CAST_DOWN(size_t, oldlen64
);
351 * If more than 4G, clamp to 4G - useracc() below will catch
352 * with an EFAULT, if it's actually necessary.
354 if (oldlen64
> 0x00000000ffffffffULL
)
355 oldlen
= 0xffffffffUL
;
358 if (uap
->old
!= USER_ADDR_NULL
) {
359 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
))
362 /* The pc sampling mechanism does not need to take this lock */
363 if ((name
[1] != KERN_PCSAMPLES
) &&
364 (!((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)))) {
365 while (memlock
.sl_lock
) {
367 sleep((caddr_t
)&memlock
, PRIBIO
+1);
373 if (dolock
&& oldlen
&&
374 (error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
375 if ((name
[1] != KERN_PCSAMPLES
) &&
376 (! ((name
[1] == KERN_KDEBUG
) && (name
[2] == KERN_KDGETENTROPY
)))) {
378 if (memlock
.sl_want
) {
380 wakeup((caddr_t
)&memlock
);
389 error
= (*fnp
)(name
+ 1, uap
->namelen
- 1, uap
->old
,
390 &oldlen
, uap
->new, newlen
, p
);
395 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
)) {
397 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
398 1, uap
->new, newlen
, &oldlen
);
401 if (uap
->old
!= USER_ADDR_NULL
) {
402 if (dolock
&& savelen
) {
403 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
404 if (!error
&& error1
)
407 if (name
[1] != KERN_PCSAMPLES
) {
409 if (memlock
.sl_want
) {
411 wakeup((caddr_t
)&memlock
);
415 if ((error
) && (error
!= ENOMEM
))
418 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
419 i
= suulong(uap
->oldlenp
, oldlen
);
428 * Attributes stored in the kernel.
430 extern char classichandler
[32];
431 extern uint32_t classichandler_fsid
;
432 extern long classichandler_fileid
;
433 __private_extern__
char corefilename
[MAXPATHLEN
+1];
434 __private_extern__
int do_coredump
;
435 __private_extern__
int sugid_coredump
;
439 int securelevel
= -1;
451 __unused
size_t newSize
,
452 struct proc
*cur_proc
)
457 if (name
[0] == 0 && 1 == namelen
) {
458 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
459 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
460 } else if (name
[0] == 1 && 2 == namelen
) {
462 cur_proc
->p_flag
&= ~P_AFFINITY
;
464 cur_proc
->p_flag
|= P_AFFINITY
;
478 __unused
size_t newSize
,
479 struct proc
*cur_proc
)
490 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
491 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
494 return sysctl_rdint(oldBuf
, oldSize
, newBuf
,
495 (p
->p_flag
& P_CLASSIC
) ? 1 : 0);
499 sysctl_classichandler(
501 __unused u_int namelen
,
511 struct vnode_attr va
;
512 char handler
[sizeof(classichandler
)];
513 struct vfs_context context
;
516 context
.vc_ucred
= kauth_cred_get();
519 len
= strlen(classichandler
) + 1;
523 error
= copyout(classichandler
, oldBuf
, len
);
530 error
= suser(context
.vc_ucred
, &p
->p_acflag
);
533 if (newSize
>= sizeof(classichandler
))
534 return (ENAMETOOLONG
);
535 error
= copyin(newBuf
, handler
, newSize
);
538 handler
[newSize
] = 0;
540 NDINIT(&nd
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
,
541 CAST_USER_ADDR_T(handler
), &context
);
547 /* Check mount point */
548 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
549 (nd
.ni_vp
->v_type
!= VREG
)) {
555 VATTR_WANTED(&va
, va_fsid
);
556 VATTR_WANTED(&va
, va_fileid
);
557 error
= vnode_getattr(nd
.ni_vp
, &va
, &context
);
564 classichandler_fsid
= va
.va_fsid
;
565 classichandler_fileid
= (u_long
)va
.va_fileid
;
566 strcpy(classichandler
, handler
);
572 extern int get_kernel_symfile( struct proc
*, char **);
573 __private_extern__
int
574 sysctl_dopanicinfo(int *, u_int
, user_addr_t
, size_t *, user_addr_t
,
575 size_t, struct proc
*);
578 * kernel related system variables.
581 kern_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
582 user_addr_t newp
, size_t newlen
, struct proc
*p
)
584 int error
, level
, inthostid
, tmp
;
585 unsigned int oldval
=0;
587 /* all sysctl names not listed below are terminal at this level */
589 && !(name
[0] == KERN_PROC
590 || name
[0] == KERN_PROF
591 || name
[0] == KERN_KDEBUG
592 || name
[0] == KERN_PROCARGS
593 || name
[0] == KERN_PROCARGS2
594 || name
[0] == KERN_PCSAMPLES
595 || name
[0] == KERN_IPC
596 || name
[0] == KERN_SYSV
597 || name
[0] == KERN_AFFINITY
598 || name
[0] == KERN_CLASSIC
599 || name
[0] == KERN_PANICINFO
600 || name
[0] == KERN_POSIX
)
602 return (ENOTDIR
); /* overloaded */
606 return (sysctl_rdstring(oldp
, oldlenp
, newp
, ostype
));
608 return (sysctl_rdstring(oldp
, oldlenp
, newp
, osrelease
));
610 return (sysctl_rdint(oldp
, oldlenp
, newp
, BSD
));
612 return (sysctl_rdstring(oldp
, oldlenp
, newp
, version
));
614 oldval
= desiredvnodes
;
615 error
= sysctl_int(oldp
, oldlenp
, newp
,
616 newlen
, &desiredvnodes
);
617 reset_vmobjectcache(oldval
, desiredvnodes
);
618 resize_namecache(desiredvnodes
);
621 return (sysctl_maxproc(oldp
, oldlenp
, newp
, newlen
));
623 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, &maxfiles
));
624 case KERN_MAXPROCPERUID
:
625 return( sysctl_maxprocperuid( oldp
, oldlenp
, newp
, newlen
) );
626 case KERN_MAXFILESPERPROC
:
627 return( sysctl_maxfilesperproc( oldp
, oldlenp
, newp
, newlen
) );
629 return (sysctl_rdint(oldp
, oldlenp
, newp
, ARG_MAX
));
632 if ((error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &level
)) ||
633 newp
== USER_ADDR_NULL
)
635 if (level
< securelevel
&& p
->p_pid
!= 1)
640 error
= sysctl_trstring(oldp
, oldlenp
, newp
, newlen
,
641 hostname
, sizeof(hostname
));
643 hostnamelen
= newlen
;
645 case KERN_DOMAINNAME
:
646 error
= sysctl_string(oldp
, oldlenp
, newp
, newlen
,
647 domainname
, sizeof(domainname
));
649 domainnamelen
= newlen
;
652 inthostid
= hostid
; /* XXX assumes sizeof long <= sizeof int */
653 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &inthostid
);
657 return (sysctl_clockrate(oldp
, oldlenp
));
662 t
.tv_sec
= boottime_sec();
665 return (sysctl_rdstruct(oldp
, oldlenp
, newp
, &t
,
666 sizeof(struct timeval
)));
669 return (sysctl_vnode(oldp
, oldlenp
));
671 return (sysctl_doproc(name
+ 1, namelen
- 1, oldp
, oldlenp
));
673 return (sysctl_file(oldp
, oldlenp
));
676 return (sysctl_doprof(name
+ 1, namelen
- 1, oldp
, oldlenp
,
680 return (sysctl_rdint(oldp
, oldlenp
, newp
, _POSIX_VERSION
));
682 return (sysctl_rdint(oldp
, oldlenp
, newp
, NGROUPS_MAX
));
683 case KERN_JOB_CONTROL
:
684 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
686 #ifdef _POSIX_SAVED_IDS
687 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
689 return (sysctl_rdint(oldp
, oldlenp
, newp
, 0));
692 return (kdebug_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
694 return (pcsamples_ops(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
696 /* new one as it does not use kinfo_proc */
697 return (sysctl_procargs(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
699 /* new one as it does not use kinfo_proc */
700 return (sysctl_procargs2(name
+ 1, namelen
- 1, oldp
, oldlenp
, p
));
702 error
= get_kernel_symfile( p
, &str
);
705 return (sysctl_rdstring(oldp
, oldlenp
, newp
, str
));
708 return (sysctl_rdint(oldp
, oldlenp
, newp
, netboot_root()));
711 return(sysctl_dopanicinfo(name
+ 1, namelen
- 1, oldp
, oldlenp
,
714 return sysctl_affinity(name
+1, namelen
-1, oldp
, oldlenp
,
717 return sysctl_classic(name
+1, namelen
-1, oldp
, oldlenp
,
719 case KERN_CLASSICHANDLER
:
720 return sysctl_classichandler(name
+1, namelen
-1, oldp
, oldlenp
,
723 return( sysctl_aiomax( oldp
, oldlenp
, newp
, newlen
) );
724 case KERN_AIOPROCMAX
:
725 return( sysctl_aioprocmax( oldp
, oldlenp
, newp
, newlen
) );
726 case KERN_AIOTHREADS
:
727 return( sysctl_aiothreads( oldp
, oldlenp
, newp
, newlen
) );
729 return (sysctl_rdint(oldp
, oldlenp
, newp
, (uintptr_t)p
->user_stack
));
730 case KERN_USRSTACK64
:
731 return (sysctl_rdquad(oldp
, oldlenp
, newp
, p
->user_stack
));
733 error
= sysctl_string(oldp
, oldlenp
, newp
, newlen
,
734 corefilename
, sizeof(corefilename
));
738 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &do_coredump
);
739 if (!error
&& ((do_coredump
< 0) || (do_coredump
> 1))) {
744 case KERN_SUGID_COREDUMP
:
745 tmp
= sugid_coredump
;
746 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &sugid_coredump
);
747 if (!error
&& ((sugid_coredump
< 0) || (sugid_coredump
> 1))) {
748 sugid_coredump
= tmp
;
752 case KERN_PROCDELAYTERM
:
754 int old_value
, new_value
;
757 if (oldp
&& *oldlenp
< sizeof(int))
759 if ( newp
&& newlen
!= sizeof(int) )
761 *oldlenp
= sizeof(int);
762 old_value
= (p
->p_lflag
& P_LDELAYTERM
)? 1: 0;
763 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
765 if (error
== 0 && newp
)
766 error
= copyin( newp
, &new_value
, sizeof(int) );
767 if (error
== 0 && newp
) {
769 p
->p_lflag
|= P_LDELAYTERM
;
771 p
->p_lflag
&= ~P_LDELAYTERM
;
775 case KERN_PROC_LOW_PRI_IO
:
777 int old_value
, new_value
;
780 if (oldp
&& *oldlenp
< sizeof(int))
782 if ( newp
&& newlen
!= sizeof(int) )
784 *oldlenp
= sizeof(int);
786 old_value
= (p
->p_lflag
& P_LLOW_PRI_IO
)? 0x01: 0;
787 if (p
->p_lflag
& P_LBACKGROUND_IO
)
790 if (oldp
&& (error
= copyout( &old_value
, oldp
, sizeof(int))))
792 if (error
== 0 && newp
)
793 error
= copyin( newp
, &new_value
, sizeof(int) );
794 if (error
== 0 && newp
) {
795 if (new_value
& 0x01)
796 p
->p_lflag
|= P_LLOW_PRI_IO
;
797 else if (new_value
& 0x02)
798 p
->p_lflag
|= P_LBACKGROUND_IO
;
799 else if (new_value
== 0)
800 p
->p_lflag
&= ~(P_LLOW_PRI_IO
| P_LBACKGROUND_IO
);
804 case KERN_LOW_PRI_WINDOW
:
806 int old_value
, new_value
;
809 if (oldp
&& *oldlenp
< sizeof(old_value
) )
811 if ( newp
&& newlen
!= sizeof(new_value
) )
813 *oldlenp
= sizeof(old_value
);
815 old_value
= lowpri_IO_window_msecs
;
817 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
819 if (error
== 0 && newp
)
820 error
= copyin( newp
, &new_value
, sizeof(newlen
) );
821 if (error
== 0 && newp
) {
822 lowpri_IO_window_msecs
= new_value
;
826 case KERN_LOW_PRI_DELAY
:
828 int old_value
, new_value
;
831 if (oldp
&& *oldlenp
< sizeof(old_value
) )
833 if ( newp
&& newlen
!= sizeof(new_value
) )
835 *oldlenp
= sizeof(old_value
);
837 old_value
= lowpri_IO_delay_msecs
;
839 if (oldp
&& (error
= copyout( &old_value
, oldp
, *oldlenp
)))
841 if (error
== 0 && newp
)
842 error
= copyin( newp
, &new_value
, sizeof(newlen
) );
843 if (error
== 0 && newp
) {
844 lowpri_IO_delay_msecs
= new_value
;
848 case KERN_SHREG_PRIVATIZABLE
:
849 /* this kernel does implement shared_region_make_private_np() */
850 return (sysctl_rdint(oldp
, oldlenp
, newp
, 1));
859 * Debugging related system variables.
863 #endif /* DIAGNOSTIC */
864 struct ctldebug debug0
, debug1
;
865 struct ctldebug debug2
, debug3
, debug4
;
866 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
867 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
868 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
869 static struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
870 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
871 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
872 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
873 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
876 debug_sysctl(int *name
, u_int namelen
, user_addr_t oldp
, size_t *oldlenp
,
877 user_addr_t newp
, size_t newlen
, struct proc
*p
)
879 struct ctldebug
*cdp
;
881 /* all sysctl names at this level are name and field */
883 return (ENOTDIR
); /* overloaded */
884 cdp
= debugvars
[name
[0]];
885 if (cdp
->debugname
== 0)
889 return (sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
));
890 case CTL_DEBUG_VALUE
:
891 return (sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
));
900 * Validate parameters and get old / set new parameters
901 * for an integer-valued sysctl function.
904 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
905 user_addr_t newp
, size_t newlen
, int *valp
)
909 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
911 if (oldp
&& *oldlenp
< sizeof(int))
913 if (newp
&& newlen
!= sizeof(int))
915 *oldlenp
= sizeof(int);
917 error
= copyout(valp
, oldp
, sizeof(int));
918 if (error
== 0 && newp
) {
919 error
= copyin(newp
, valp
, sizeof(int));
920 AUDIT_ARG(value
, *valp
);
926 * As above, but read-only.
929 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
933 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
935 if (oldp
&& *oldlenp
< sizeof(int))
939 *oldlenp
= sizeof(int);
941 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
946 * Validate parameters and get old / set new parameters
947 * for an quad(64bit)-valued sysctl function.
950 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
951 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
955 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
957 if (oldp
&& *oldlenp
< sizeof(quad_t
))
959 if (newp
&& newlen
!= sizeof(quad_t
))
961 *oldlenp
= sizeof(quad_t
);
963 error
= copyout(valp
, oldp
, sizeof(quad_t
));
964 if (error
== 0 && newp
)
965 error
= copyin(newp
, valp
, sizeof(quad_t
));
970 * As above, but read-only.
973 sysctl_rdquad(oldp
, oldlenp
, newp
, val
)
981 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
983 if (oldp
&& *oldlenp
< sizeof(quad_t
))
987 *oldlenp
= sizeof(quad_t
);
989 error
= copyout((caddr_t
)&val
, CAST_USER_ADDR_T(oldp
), sizeof(quad_t
));
994 * Validate parameters and get old / set new parameters
995 * for a string-valued sysctl function. Unlike sysctl_string, if you
996 * give it a too small (but larger than 0 bytes) buffer, instead of
997 * returning ENOMEM, it truncates the returned string to the buffer
998 * size. This preserves the semantics of some library routines
999 * implemented via sysctl, which truncate their returned data, rather
1000 * than simply returning an error. The returned string is always NUL
1004 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
1005 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1007 int len
, copylen
, error
= 0;
1009 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1011 copylen
= len
= strlen(str
) + 1;
1012 if (oldp
&& (len
< 0 || *oldlenp
< 1))
1014 if (oldp
&& (*oldlenp
< (size_t)len
))
1015 copylen
= *oldlenp
+ 1;
1016 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1018 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
1020 error
= copyout(str
, oldp
, copylen
);
1022 unsigned char c
= 0;
1025 error
= copyout((void *)&c
, oldp
, sizeof(char));
1028 if (error
== 0 && newp
) {
1029 error
= copyin(newp
, str
, newlen
);
1031 AUDIT_ARG(text
, (char *)str
);
1037 * Validate parameters and get old / set new parameters
1038 * for a string-valued sysctl function.
1041 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
1042 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1046 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1048 len
= strlen(str
) + 1;
1049 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1051 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1053 *oldlenp
= len
-1; /* deal with NULL strings correctly */
1055 error
= copyout(str
, oldp
, len
);
1057 if (error
== 0 && newp
) {
1058 error
= copyin(newp
, str
, newlen
);
1060 AUDIT_ARG(text
, (char *)str
);
1066 * As above, but read-only.
1069 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
1070 user_addr_t newp
, char *str
)
1074 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1076 len
= strlen(str
) + 1;
1077 if (oldp
&& *oldlenp
< (size_t)len
)
1083 error
= copyout(str
, oldp
, len
);
1088 * Validate parameters and get old / set new parameters
1089 * for a structure oriented sysctl function.
1092 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
1093 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
1097 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1099 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1101 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
1105 error
= copyout(sp
, oldp
, len
);
1107 if (error
== 0 && newp
)
1108 error
= copyin(newp
, sp
, len
);
1113 * Validate parameters and get old parameters
1114 * for a structure oriented sysctl function.
1117 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
1118 user_addr_t newp
, void *sp
, int len
)
1122 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1124 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1130 error
= copyout(sp
, oldp
, len
);
1135 * Get file structures.
1138 sysctl_file(user_addr_t where
, size_t *sizep
)
1141 struct fileglob
*fg
;
1142 user_addr_t start
= where
;
1143 struct extern_file nef
;
1146 if (where
== USER_ADDR_NULL
) {
1148 * overestimate by 10 files
1150 *sizep
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1155 * first copyout filehead
1157 if (buflen
< 0 || (size_t)buflen
< sizeof(filehead
)) {
1161 error
= copyout((caddr_t
)&filehead
, where
, sizeof(filehead
));
1164 buflen
-= sizeof(filehead
);
1165 where
+= sizeof(filehead
);
1168 * followed by an array of file structures
1170 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1171 if (buflen
< 0 || (size_t)buflen
< sizeof(struct extern_file
)) {
1172 *sizep
= where
- start
;
1175 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1176 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1177 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1178 nef
.f_type
= fg
->fg_type
;
1179 nef
.f_count
= fg
->fg_count
;
1180 nef
.f_msgcount
= fg
->fg_msgcount
;
1181 nef
.f_cred
= fg
->fg_cred
;
1182 nef
.f_ops
= fg
->fg_ops
;
1183 nef
.f_offset
= fg
->fg_offset
;
1184 nef
.f_data
= fg
->fg_data
;
1185 error
= copyout((caddr_t
)&nef
, where
, sizeof (struct extern_file
));
1188 buflen
-= sizeof(struct extern_file
);
1189 where
+= sizeof(struct extern_file
);
1191 *sizep
= where
- start
;
1196 * try over estimating by 5 procs
1198 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1201 sysctl_doproc(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1204 user_addr_t dp
= where
;
1206 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1209 boolean_t is_64_bit
= FALSE
;
1210 struct kinfo_proc kproc
;
1211 struct user_kinfo_proc user_kproc
;
1215 if (namelen
!= 2 && !(namelen
== 1 && name
[0] == KERN_PROC_ALL
))
1217 p
= allproc
.lh_first
;
1219 is_64_bit
= proc_is64bit(current_proc());
1221 sizeof_kproc
= sizeof(user_kproc
);
1222 kprocp
= (caddr_t
) &user_kproc
;
1225 sizeof_kproc
= sizeof(kproc
);
1226 kprocp
= (caddr_t
) &kproc
;
1229 for (; p
!= 0; p
= p
->p_list
.le_next
) {
1231 * Skip embryonic processes.
1233 if (p
->p_stat
== SIDL
)
1236 * TODO - make more efficient (see notes below).
1242 /* could do this with just a lookup */
1243 if (p
->p_pid
!= (pid_t
)name
[1])
1247 case KERN_PROC_PGRP
:
1248 /* could do this by traversing pgrp */
1249 if (p
->p_pgrp
->pg_id
!= (pid_t
)name
[1])
1254 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1255 (p
->p_session
== NULL
) ||
1256 p
->p_session
->s_ttyp
== NULL
||
1257 p
->p_session
->s_ttyp
->t_dev
!= (dev_t
)name
[1])
1262 if ((p
->p_ucred
== NULL
) ||
1263 (kauth_cred_getuid(p
->p_ucred
) != (uid_t
)name
[1]))
1267 case KERN_PROC_RUID
:
1268 if ((p
->p_ucred
== NULL
) ||
1269 (p
->p_ucred
->cr_ruid
!= (uid_t
)name
[1]))
1273 if (buflen
>= sizeof_kproc
) {
1274 bzero(kprocp
, sizeof_kproc
);
1276 fill_user_proc(p
, (struct user_kinfo_proc
*) kprocp
);
1279 fill_proc(p
, (struct kinfo_proc
*) kprocp
);
1281 error
= copyout(kprocp
, dp
, sizeof_kproc
);
1285 buflen
-= sizeof_kproc
;
1287 needed
+= sizeof_kproc
;
1289 if (doingzomb
== 0) {
1290 p
= zombproc
.lh_first
;
1294 if (where
!= USER_ADDR_NULL
) {
1295 *sizep
= dp
- where
;
1296 if (needed
> *sizep
)
1299 needed
+= KERN_PROCSLOP
;
1306 * Fill in an eproc structure for the specified process.
1310 register struct proc
*p
;
1311 register struct eproc
*ep
;
1313 register struct tty
*tp
;
1317 ep
->e_sess
= p
->p_pgrp
->pg_session
;
1318 ep
->e_pgid
= p
->p_pgrp
->pg_id
;
1319 ep
->e_jobc
= p
->p_pgrp
->pg_jobc
;
1320 if (ep
->e_sess
&& ep
->e_sess
->s_ttyvp
)
1321 ep
->e_flag
= EPROC_CTTY
;
1323 ep
->e_sess
= (struct session
*)0;
1327 ep
->e_ppid
= (p
->p_pptr
) ? p
->p_pptr
->p_pid
: 0;
1328 /* Pre-zero the fake historical pcred */
1329 bzero(&ep
->e_pcred
, sizeof(struct _pcred
));
1331 /* XXX not ref-counted */
1333 /* A fake historical pcred */
1334 ep
->e_pcred
.p_ruid
= p
->p_ucred
->cr_ruid
;
1335 ep
->e_pcred
.p_svuid
= p
->p_ucred
->cr_svuid
;
1336 ep
->e_pcred
.p_rgid
= p
->p_ucred
->cr_rgid
;
1337 ep
->e_pcred
.p_svgid
= p
->p_ucred
->cr_svgid
;
1339 /* A fake historical *kauth_cred_t */
1340 ep
->e_ucred
.cr_ref
= p
->p_ucred
->cr_ref
;
1341 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(p
->p_ucred
);
1342 ep
->e_ucred
.cr_ngroups
= p
->p_ucred
->cr_ngroups
;
1343 bcopy(p
->p_ucred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1346 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1347 ep
->e_vm
.vm_tsize
= 0;
1348 ep
->e_vm
.vm_dsize
= 0;
1349 ep
->e_vm
.vm_ssize
= 0;
1351 ep
->e_vm
.vm_rssize
= 0;
1353 if ((p
->p_flag
& P_CONTROLT
) && (ep
->e_sess
) &&
1354 (tp
= ep
->e_sess
->s_ttyp
)) {
1355 ep
->e_tdev
= tp
->t_dev
;
1356 ep
->e_tpgid
= tp
->t_pgrp
? tp
->t_pgrp
->pg_id
: NO_PID
;
1357 ep
->e_tsess
= tp
->t_session
;
1362 ep
->e_flag
|= EPROC_SLEADER
;
1364 strncpy(ep
->e_wmesg
, p
->p_wmesg
, WMESGLEN
);
1365 ep
->e_xsize
= ep
->e_xrssize
= 0;
1366 ep
->e_xccount
= ep
->e_xswrss
= 0;
1370 * Fill in an LP64 version of eproc structure for the specified process.
1373 fill_user_eproc(register struct proc
*p
, register struct user_eproc
*ep
)
1375 register struct tty
*tp
;
1376 struct session
*sessionp
= NULL
;
1378 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1380 sessionp
= p
->p_pgrp
->pg_session
;
1381 ep
->e_sess
= CAST_USER_ADDR_T(sessionp
);
1382 ep
->e_pgid
= p
->p_pgrp
->pg_id
;
1383 ep
->e_jobc
= p
->p_pgrp
->pg_jobc
;
1385 if (sessionp
->s_ttyvp
)
1386 ep
->e_flag
= EPROC_CTTY
;
1389 ep
->e_sess
= USER_ADDR_NULL
;
1393 ep
->e_ppid
= (p
->p_pptr
) ? p
->p_pptr
->p_pid
: 0;
1394 /* Pre-zero the fake historical pcred */
1395 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1397 /* XXX not ref-counted */
1399 /* A fake historical pcred */
1400 ep
->e_pcred
.p_ruid
= p
->p_ucred
->cr_ruid
;
1401 ep
->e_pcred
.p_svuid
= p
->p_ucred
->cr_svuid
;
1402 ep
->e_pcred
.p_rgid
= p
->p_ucred
->cr_rgid
;
1403 ep
->e_pcred
.p_svgid
= p
->p_ucred
->cr_svgid
;
1405 /* A fake historical *kauth_cred_t */
1406 ep
->e_ucred
.cr_ref
= p
->p_ucred
->cr_ref
;
1407 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(p
->p_ucred
);
1408 ep
->e_ucred
.cr_ngroups
= p
->p_ucred
->cr_ngroups
;
1409 bcopy(p
->p_ucred
->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1412 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1413 ep
->e_vm
.vm_tsize
= 0;
1414 ep
->e_vm
.vm_dsize
= 0;
1415 ep
->e_vm
.vm_ssize
= 0;
1417 ep
->e_vm
.vm_rssize
= 0;
1419 if ((p
->p_flag
& P_CONTROLT
) && (sessionp
) &&
1420 (tp
= sessionp
->s_ttyp
)) {
1421 ep
->e_tdev
= tp
->t_dev
;
1422 ep
->e_tpgid
= tp
->t_pgrp
? tp
->t_pgrp
->pg_id
: NO_PID
;
1423 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1428 ep
->e_flag
|= EPROC_SLEADER
;
1430 strncpy(ep
->e_wmesg
, p
->p_wmesg
, WMESGLEN
);
1431 ep
->e_xsize
= ep
->e_xrssize
= 0;
1432 ep
->e_xccount
= ep
->e_xswrss
= 0;
1436 * Fill in an eproc structure for the specified process.
1439 fill_externproc(p
, exp
)
1440 register struct proc
*p
;
1441 register struct extern_proc
*exp
;
1443 exp
->p_forw
= exp
->p_back
= NULL
;
1445 exp
->p_starttime
= p
->p_stats
->p_start
;
1446 exp
->p_vmspace
= NULL
;
1447 exp
->p_sigacts
= p
->p_sigacts
;
1448 exp
->p_flag
= p
->p_flag
;
1449 exp
->p_stat
= p
->p_stat
;
1450 exp
->p_pid
= p
->p_pid
;
1451 exp
->p_oppid
= p
->p_oppid
;
1452 exp
->p_dupfd
= p
->p_dupfd
;
1454 exp
->user_stack
= CAST_DOWN(caddr_t
, p
->user_stack
);
1455 exp
->exit_thread
= p
->exit_thread
;
1456 exp
->p_debugger
= p
->p_debugger
;
1457 exp
->sigwait
= p
->sigwait
;
1459 exp
->p_estcpu
= p
->p_estcpu
;
1460 exp
->p_cpticks
= p
->p_cpticks
;
1461 exp
->p_pctcpu
= p
->p_pctcpu
;
1462 exp
->p_wchan
= p
->p_wchan
;
1463 exp
->p_wmesg
= p
->p_wmesg
;
1464 exp
->p_swtime
= p
->p_swtime
;
1465 exp
->p_slptime
= p
->p_slptime
;
1466 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1467 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1468 exp
->p_uticks
= p
->p_uticks
;
1469 exp
->p_sticks
= p
->p_sticks
;
1470 exp
->p_iticks
= p
->p_iticks
;
1471 exp
->p_traceflag
= p
->p_traceflag
;
1472 exp
->p_tracep
= p
->p_tracep
;
1473 exp
->p_siglist
= 0 ; /* No longer relevant */
1474 exp
->p_textvp
= p
->p_textvp
;
1475 exp
->p_holdcnt
= 0 ;
1476 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1477 exp
->p_sigignore
= p
->p_sigignore
;
1478 exp
->p_sigcatch
= p
->p_sigcatch
;
1479 exp
->p_priority
= p
->p_priority
;
1480 exp
->p_usrpri
= p
->p_usrpri
;
1481 exp
->p_nice
= p
->p_nice
;
1482 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1483 exp
->p_comm
[MAXCOMLEN
] = '\0';
1484 exp
->p_pgrp
= p
->p_pgrp
;
1486 exp
->p_xstat
= p
->p_xstat
;
1487 exp
->p_acflag
= p
->p_acflag
;
1488 exp
->p_ru
= p
->p_ru
; /* XXX may be NULL */
1492 * Fill in an LP64 version of extern_proc structure for the specified process.
1495 fill_user_externproc(register struct proc
*p
, register struct user_extern_proc
*exp
)
1497 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1499 exp
->p_starttime
.tv_sec
= p
->p_stats
->p_start
.tv_sec
;
1500 exp
->p_starttime
.tv_usec
= p
->p_stats
->p_start
.tv_usec
;
1502 exp
->p_vmspace
= USER_ADDR_NULL
;
1503 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1504 exp
->p_flag
= p
->p_flag
;
1505 exp
->p_stat
= p
->p_stat
;
1506 exp
->p_pid
= p
->p_pid
;
1507 exp
->p_oppid
= p
->p_oppid
;
1508 exp
->p_dupfd
= p
->p_dupfd
;
1510 exp
->user_stack
= p
->user_stack
;
1511 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1512 exp
->p_debugger
= p
->p_debugger
;
1513 exp
->sigwait
= p
->sigwait
;
1515 exp
->p_estcpu
= p
->p_estcpu
;
1516 exp
->p_cpticks
= p
->p_cpticks
;
1517 exp
->p_pctcpu
= p
->p_pctcpu
;
1518 exp
->p_wchan
= CAST_USER_ADDR_T(p
->p_wchan
);
1519 exp
->p_wmesg
= CAST_USER_ADDR_T(p
->p_wmesg
);
1520 exp
->p_swtime
= p
->p_swtime
;
1521 exp
->p_slptime
= p
->p_slptime
;
1522 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1523 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1524 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1525 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1526 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1527 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1528 exp
->p_uticks
= p
->p_uticks
;
1529 exp
->p_sticks
= p
->p_sticks
;
1530 exp
->p_iticks
= p
->p_iticks
;
1531 exp
->p_traceflag
= p
->p_traceflag
;
1532 exp
->p_tracep
= CAST_USER_ADDR_T(p
->p_tracep
);
1533 exp
->p_siglist
= 0 ; /* No longer relevant */
1534 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1535 exp
->p_holdcnt
= 0 ;
1536 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1537 exp
->p_sigignore
= p
->p_sigignore
;
1538 exp
->p_sigcatch
= p
->p_sigcatch
;
1539 exp
->p_priority
= p
->p_priority
;
1540 exp
->p_usrpri
= p
->p_usrpri
;
1541 exp
->p_nice
= p
->p_nice
;
1542 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1543 exp
->p_comm
[MAXCOMLEN
] = '\0';
1544 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1545 exp
->p_addr
= USER_ADDR_NULL
;
1546 exp
->p_xstat
= p
->p_xstat
;
1547 exp
->p_acflag
= p
->p_acflag
;
1548 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1553 register struct proc
*p
;
1554 register struct kinfo_proc
*kp
;
1556 fill_externproc(p
, &kp
->kp_proc
);
1557 fill_eproc(p
, &kp
->kp_eproc
);
1561 fill_user_proc(register struct proc
*p
, register struct user_kinfo_proc
*kp
)
1563 fill_user_externproc(p
, &kp
->kp_proc
);
1564 fill_user_eproc(p
, &kp
->kp_eproc
);
1568 kdebug_ops(int *name
, u_int namelen
, user_addr_t where
,
1569 size_t *sizep
, struct proc
*p
)
1573 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1590 case KERN_KDSETRTCDEC
:
1592 case KERN_KDGETENTROPY
:
1593 ret
= kdbg_control(name
, namelen
, where
, sizep
);
1602 extern int pcsamples_control(int *name
, u_int namelen
, user_addr_t where
,
1606 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
,
1607 size_t *sizep
, struct proc
*p
)
1611 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1616 case KERN_PCDISABLE
:
1620 case KERN_PCREADBUF
:
1624 ret
= pcsamples_control(name
, namelen
, where
, sizep
);
1634 * Return the top *sizep bytes of the user stack, or the entire area of the
1635 * user stack down through the saved exec_path, whichever is smaller.
1638 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
1639 size_t *sizep
, struct proc
*cur_proc
)
1641 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 0);
1645 sysctl_procargs2(int *name
, u_int namelen
, user_addr_t where
,
1646 size_t *sizep
, struct proc
*cur_proc
)
1648 return sysctl_procargsx( name
, namelen
, where
, sizep
, cur_proc
, 1);
1652 sysctl_procargsx(int *name
, __unused u_int namelen
, user_addr_t where
,
1653 size_t *sizep
, struct proc
*cur_proc
, int argc_yes
)
1656 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1658 struct vm_map
*proc_map
;
1661 user_addr_t arg_addr
;
1665 vm_offset_t copy_start
, copy_end
;
1670 buflen
-= sizeof(int); /* reserve first word to return argc */
1672 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1673 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1674 /* is not NULL then the caller wants us to return the length needed to */
1675 /* hold the data we would return */
1676 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1682 * Lookup process by pid
1691 * Copy the top N bytes of the stack.
1692 * On all machines we have so far, the stack grows
1695 * If the user expects no more than N bytes of
1696 * argument list, use that as a guess for the
1703 if (where
== USER_ADDR_NULL
) {
1704 /* caller only wants to know length of proc args data */
1708 size
= p
->p_argslen
;
1710 size
+= sizeof(int);
1714 * old PROCARGS will return the executable's path and plus some
1715 * extra space for work alignment and data tags
1717 size
+= PATH_MAX
+ (6 * sizeof(int));
1719 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1724 if ((kauth_cred_getuid(p
->p_ucred
) != kauth_cred_getuid(kauth_cred_get()))
1725 && suser(kauth_cred_get(), &cur_proc
->p_acflag
))
1728 if ((u_int
)arg_size
> p
->p_argslen
)
1729 arg_size
= round_page(p
->p_argslen
);
1731 arg_addr
= p
->user_stack
- arg_size
;
1735 * Before we can block (any VM code), make another
1736 * reference to the map to keep it alive. We do
1737 * that by getting a reference on the task itself.
1744 * Once we have a task reference we can convert that into a
1745 * map reference, which we will use in the calls below. The
1746 * task/process may change its map after we take this reference
1747 * (see execve), but the worst that will happen then is a return
1748 * of stale info (which is always a possibility).
1750 task_reference(task
);
1751 proc_map
= get_task_map_reference(task
);
1752 task_deallocate(task
);
1753 if (proc_map
== NULL
)
1757 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1758 if (ret
!= KERN_SUCCESS
) {
1759 vm_map_deallocate(proc_map
);
1763 copy_end
= round_page(copy_start
+ arg_size
);
1765 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1766 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1767 vm_map_deallocate(proc_map
);
1768 kmem_free(kernel_map
, copy_start
,
1769 round_page(arg_size
));
1774 * Now that we've done the copyin from the process'
1775 * map, we can release the reference to it.
1777 vm_map_deallocate(proc_map
);
1779 if( vm_map_copy_overwrite(kernel_map
,
1780 (vm_map_address_t
)copy_start
,
1781 tmp
, FALSE
) != KERN_SUCCESS
) {
1782 kmem_free(kernel_map
, copy_start
,
1783 round_page(arg_size
));
1787 if (arg_size
> p
->p_argslen
) {
1788 data
= (caddr_t
) (copy_end
- p
->p_argslen
);
1789 size
= p
->p_argslen
;
1791 data
= (caddr_t
) (copy_end
- arg_size
);
1796 /* Put processes argc as the first word in the copyout buffer */
1797 suword(where
, p
->p_argc
);
1798 error
= copyout(data
, (where
+ sizeof(int)), size
);
1799 size
+= sizeof(int);
1801 error
= copyout(data
, where
, size
);
1804 * Make the old PROCARGS work to return the executable's path
1805 * But, only if there is enough space in the provided buffer
1807 * on entry: data [possibily] points to the beginning of the path
1809 * Note: we keep all pointers&sizes aligned to word boundries
1811 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> p
->p_argslen
) )
1813 int binPath_sz
, alignedBinPath_sz
= 0;
1814 int extraSpaceNeeded
, addThis
;
1815 user_addr_t placeHere
;
1816 char * str
= (char *) data
;
1819 /* Some apps are really bad about messing up their stacks
1820 So, we have to be extra careful about getting the length
1821 of the executing binary. If we encounter an error, we bail.
1824 /* Limit ourselves to PATH_MAX paths */
1825 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1829 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1832 /* If we have a NUL terminator, copy it, too */
1833 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1835 /* Pre-Flight the space requiremnts */
1837 /* Account for the padding that fills out binPath to the next word */
1838 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1840 placeHere
= where
+ size
;
1842 /* Account for the bytes needed to keep placeHere word aligned */
1843 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1845 /* Add up all the space that is needed */
1846 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1848 /* is there is room to tack on argv[0]? */
1849 if ( (buflen
& ~(sizeof(int)-1)) >= ( p
->p_argslen
+ extraSpaceNeeded
))
1851 placeHere
+= addThis
;
1852 suword(placeHere
, 0);
1853 placeHere
+= sizeof(int);
1854 suword(placeHere
, 0xBFFF0000);
1855 placeHere
+= sizeof(int);
1856 suword(placeHere
, 0);
1857 placeHere
+= sizeof(int);
1858 error
= copyout(data
, placeHere
, binPath_sz
);
1861 placeHere
+= binPath_sz
;
1862 suword(placeHere
, 0);
1863 size
+= extraSpaceNeeded
;
1869 if (copy_start
!= (vm_offset_t
) 0) {
1870 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1876 if (where
!= USER_ADDR_NULL
)
1883 * Validate parameters and get old / set new parameters
1884 * for max number of concurrent aio requests. Makes sure
1885 * the system wide limit is greater than the per process
1889 sysctl_aiomax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
1894 if ( oldp
&& *oldlenp
< sizeof(int) )
1896 if ( newp
&& newlen
!= sizeof(int) )
1899 *oldlenp
= sizeof(int);
1901 error
= copyout( &aio_max_requests
, oldp
, sizeof(int) );
1902 if ( error
== 0 && newp
)
1903 error
= copyin( newp
, &new_value
, sizeof(int) );
1904 if ( error
== 0 && newp
) {
1905 if ( new_value
>= aio_max_requests_per_process
)
1906 aio_max_requests
= new_value
;
1912 } /* sysctl_aiomax */
1916 * Validate parameters and get old / set new parameters
1917 * for max number of concurrent aio requests per process.
1918 * Makes sure per process limit is less than the system wide
1922 sysctl_aioprocmax(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
1927 if ( oldp
&& *oldlenp
< sizeof(int) )
1929 if ( newp
&& newlen
!= sizeof(int) )
1932 *oldlenp
= sizeof(int);
1934 error
= copyout( &aio_max_requests_per_process
, oldp
, sizeof(int) );
1935 if ( error
== 0 && newp
)
1936 error
= copyin( newp
, &new_value
, sizeof(int) );
1937 if ( error
== 0 && newp
) {
1938 if ( new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1939 aio_max_requests_per_process
= new_value
;
1945 } /* sysctl_aioprocmax */
1949 * Validate parameters and get old / set new parameters
1950 * for max number of async IO worker threads.
1951 * We only allow an increase in the number of worker threads.
1954 sysctl_aiothreads(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, size_t newlen
)
1959 if ( oldp
&& *oldlenp
< sizeof(int) )
1961 if ( newp
&& newlen
!= sizeof(int) )
1964 *oldlenp
= sizeof(int);
1966 error
= copyout( &aio_worker_threads
, oldp
, sizeof(int) );
1967 if ( error
== 0 && newp
)
1968 error
= copyin( newp
, &new_value
, sizeof(int) );
1969 if ( error
== 0 && newp
) {
1970 if (new_value
> aio_worker_threads
) {
1971 _aio_create_worker_threads( (new_value
- aio_worker_threads
) );
1972 aio_worker_threads
= new_value
;
1979 } /* sysctl_aiothreads */
1983 * Validate parameters and get old / set new parameters
1984 * for max number of processes per UID.
1985 * Makes sure per UID limit is less than the system wide limit.
1988 sysctl_maxprocperuid(user_addr_t oldp
, size_t *oldlenp
,
1989 user_addr_t newp
, size_t newlen
)
1994 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
1996 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
1999 *oldlenp
= sizeof(int);
2000 if ( oldp
!= USER_ADDR_NULL
)
2001 error
= copyout( &maxprocperuid
, oldp
, sizeof(int) );
2002 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2003 error
= copyin( newp
, &new_value
, sizeof(int) );
2005 AUDIT_ARG(value
, new_value
);
2006 if ( new_value
<= maxproc
&& new_value
> 0 )
2007 maxprocperuid
= new_value
;
2016 } /* sysctl_maxprocperuid */
2020 * Validate parameters and get old / set new parameters
2021 * for max number of files per process.
2022 * Makes sure per process limit is less than the system-wide limit.
2025 sysctl_maxfilesperproc(user_addr_t oldp
, size_t *oldlenp
,
2026 user_addr_t newp
, size_t newlen
)
2031 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2033 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2036 *oldlenp
= sizeof(int);
2037 if ( oldp
!= USER_ADDR_NULL
)
2038 error
= copyout( &maxfilesperproc
, oldp
, sizeof(int) );
2039 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2040 error
= copyin( newp
, &new_value
, sizeof(int) );
2042 AUDIT_ARG(value
, new_value
);
2043 if ( new_value
< maxfiles
&& new_value
> 0 )
2044 maxfilesperproc
= new_value
;
2053 } /* sysctl_maxfilesperproc */
2057 * Validate parameters and get old / set new parameters
2058 * for the system-wide limit on the max number of processes.
2059 * Makes sure the system-wide limit is less than the configured hard
2060 * limit set at kernel compilation.
2063 sysctl_maxproc(user_addr_t oldp
, size_t *oldlenp
,
2064 user_addr_t newp
, size_t newlen
)
2069 if ( oldp
!= USER_ADDR_NULL
&& *oldlenp
< sizeof(int) )
2071 if ( newp
!= USER_ADDR_NULL
&& newlen
!= sizeof(int) )
2074 *oldlenp
= sizeof(int);
2075 if ( oldp
!= USER_ADDR_NULL
)
2076 error
= copyout( &maxproc
, oldp
, sizeof(int) );
2077 if ( error
== 0 && newp
!= USER_ADDR_NULL
) {
2078 error
= copyin( newp
, &new_value
, sizeof(int) );
2080 AUDIT_ARG(value
, new_value
);
2081 if ( new_value
<= hard_maxproc
&& new_value
> 0 )
2082 maxproc
= new_value
;
2091 } /* sysctl_maxproc */