2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <security/audit/audit.h>
107 #include <kern/kalloc.h>
109 #include <mach/machine.h>
110 #include <mach/mach_host.h>
111 #include <mach/mach_types.h>
112 #include <mach/vm_param.h>
113 #include <kern/mach_param.h>
114 #include <kern/task.h>
115 #include <kern/thread.h>
116 #include <kern/processor.h>
117 #include <kern/debug.h>
118 #include <vm/vm_kern.h>
119 #include <vm/vm_map.h>
120 #include <mach/host_info.h>
122 #include <sys/mount_internal.h>
123 #include <sys/kdebug.h>
125 #include <IOKit/IOPlatformExpert.h>
126 #include <pexpert/pexpert.h>
128 #include <machine/machine_routines.h>
129 #include <machine/exec.h>
131 #include <vm/vm_protos.h>
132 #include <vm/vm_pageout.h>
133 #include <sys/imgsrc.h>
134 #include <kern/timer_call.h>
136 #if defined(__i386__) || defined(__x86_64__)
137 #include <i386/cpuid.h>
141 #include <sys/kern_memorystatus.h>
145 #include <kperf/kperf.h>
149 #include <kern/hv_support.h>
153 * deliberately setting max requests to really high number
154 * so that runaway settings do not cause MALLOC overflows
156 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
158 extern int aio_max_requests
;
159 extern int aio_max_requests_per_process
;
160 extern int aio_worker_threads
;
161 extern int lowpri_IO_window_msecs
;
162 extern int lowpri_IO_delay_msecs
;
163 extern int nx_enabled
;
164 extern int speculative_reads_disabled
;
165 extern int ignore_is_ssd
;
166 extern unsigned int speculative_prefetch_max
;
167 extern unsigned int speculative_prefetch_max_iosize
;
168 extern unsigned int preheat_max_bytes
;
169 extern unsigned int preheat_min_bytes
;
170 extern long numvnodes
;
172 extern uuid_string_t bootsessionuuid_string
;
174 extern unsigned int vm_max_delayed_work_limit
;
175 extern unsigned int vm_max_batch
;
177 extern unsigned int vm_page_free_min
;
178 extern unsigned int vm_page_free_target
;
179 extern unsigned int vm_page_free_reserved
;
180 extern unsigned int vm_page_speculative_percentage
;
181 extern unsigned int vm_page_speculative_q_age_ms
;
184 * Conditionally allow dtrace to see these functions for debugging purposes.
192 #define STATIC static
195 extern boolean_t mach_timer_coalescing_enabled
;
197 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
200 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
202 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
204 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
206 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
208 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
210 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
213 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
219 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
221 __private_extern__ kern_return_t
222 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
224 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
225 size_t *sizep
, proc_t cur_proc
);
227 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
228 proc_t cur_proc
, int argc_yes
);
230 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
231 size_t newlen
, void *sp
, int len
);
233 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
234 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
235 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
236 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
237 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
239 STATIC
int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
241 int sysdoproc_callback(proc_t p
, void *arg
);
244 /* forward declarations for non-static STATIC */
245 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
246 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
247 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
248 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
249 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
250 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
252 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
253 #endif /* COUNT_SYSCALLS */
254 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
255 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
256 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
257 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
258 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
259 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
260 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
261 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
262 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
267 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 #ifdef CONFIG_IMGSRC_ACCESS
274 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
285 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
288 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 extern void IORegistrySetOSBuildVersion(char * build_version
);
298 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
300 la64
->ldavg
[0] = la
->ldavg
[0];
301 la64
->ldavg
[1] = la
->ldavg
[1];
302 la64
->ldavg
[2] = la
->ldavg
[2];
303 la64
->fscale
= (user64_long_t
)la
->fscale
;
307 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
309 la32
->ldavg
[0] = la
->ldavg
[0];
310 la32
->ldavg
[1] = la
->ldavg
[1];
311 la32
->ldavg
[2] = la
->ldavg
[2];
312 la32
->fscale
= (user32_long_t
)la
->fscale
;
316 * Attributes stored in the kernel.
318 extern char corefilename
[MAXPATHLEN
+1];
319 extern int do_coredump
;
320 extern int sugid_coredump
;
323 extern int do_count_syscalls
;
327 int securelevel
= -1;
333 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
334 __unused
int arg2
, struct sysctl_req
*req
)
337 struct uthread
*ut
= get_bsdthread_info(current_thread());
338 user_addr_t oldp
=0, newp
=0;
339 size_t *oldlenp
=NULL
;
343 oldlenp
= &(req
->oldlen
);
345 newlen
= req
->newlen
;
347 /* We want the current length, and maybe the string itself */
349 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
350 size_t currlen
= MAXTHREADNAMESIZE
- 1;
353 /* use length of current thread name */
354 currlen
= strlen(ut
->pth_name
);
356 if(*oldlenp
< currlen
)
358 /* NOTE - we do not copy the NULL terminator */
360 error
= copyout(ut
->pth_name
,oldp
,currlen
);
365 /* return length of thread name minus NULL terminator (just like strlen) */
366 req
->oldidx
= currlen
;
369 /* We want to set the name to something */
372 if(newlen
> (MAXTHREADNAMESIZE
- 1))
376 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
380 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
381 error
= copyin(newp
, ut
->pth_name
, newlen
);
389 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
393 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
395 host_basic_info_data_t hinfo
;
399 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
400 struct _processor_statistics_np
*buf
;
403 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
404 if (kret
!= KERN_SUCCESS
) {
408 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
410 if (req
->oldlen
< size
) {
414 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
416 kret
= get_sched_statistics(buf
, &size
);
417 if (kret
!= KERN_SUCCESS
) {
422 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
428 panic("Sched info changed?!");
435 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
438 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
443 if (req
->newlen
!= sizeof(active
)) {
447 res
= copyin(req
->newptr
, &active
, sizeof(active
));
452 return set_sched_stats_active(active
);
455 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
457 extern int get_kernel_symfile(proc_t
, char **);
460 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
463 extern int syscalls_log
[];
464 extern const char *syscallnames
[];
467 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
469 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
470 __unused
int *name
= arg1
; /* oid element argument vector */
471 __unused
int namelen
= arg2
; /* number of oid element arguments */
472 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
473 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
474 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
475 size_t newlen
= req
->newlen
; /* user buffer copy in size */
480 /* valid values passed in:
481 * = 0 means don't keep called counts for each bsd syscall
482 * > 0 means keep called counts for each bsd syscall
483 * = 2 means dump current counts to the system log
484 * = 3 means reset all counts
485 * for example, to dump current counts:
486 * sysctl -w kern.count_calls=2
488 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
494 do_count_syscalls
= 1;
496 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
498 for ( i
= 0; i
< nsysent
; i
++ ) {
499 if ( syscalls_log
[i
] != 0 ) {
501 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
509 do_count_syscalls
= 1;
513 /* adjust index so we return the right required/consumed amount */
515 req
->oldidx
+= req
->oldlen
;
519 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
520 0, /* Pointer argument (arg1) */
521 0, /* Integer argument (arg2) */
522 sysctl_docountsyscalls
, /* Handler function */
523 NULL
, /* Data pointer */
525 #endif /* COUNT_SYSCALLS */
528 * The following sysctl_* functions should not be used
529 * any more, as they can only cope with callers in
530 * user mode: Use new-style
538 * Validate parameters and get old / set new parameters
539 * for an integer-valued sysctl function.
542 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
543 user_addr_t newp
, size_t newlen
, int *valp
)
547 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
549 if (oldp
&& *oldlenp
< sizeof(int))
551 if (newp
&& newlen
!= sizeof(int))
553 *oldlenp
= sizeof(int);
555 error
= copyout(valp
, oldp
, sizeof(int));
556 if (error
== 0 && newp
) {
557 error
= copyin(newp
, valp
, sizeof(int));
558 AUDIT_ARG(value32
, *valp
);
564 * Validate parameters and get old / set new parameters
565 * for an quad(64bit)-valued sysctl function.
568 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
569 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
573 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
575 if (oldp
&& *oldlenp
< sizeof(quad_t
))
577 if (newp
&& newlen
!= sizeof(quad_t
))
579 *oldlenp
= sizeof(quad_t
);
581 error
= copyout(valp
, oldp
, sizeof(quad_t
));
582 if (error
== 0 && newp
)
583 error
= copyin(newp
, valp
, sizeof(quad_t
));
588 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
590 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
597 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
599 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
606 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
611 /* This is very racy but list lock is held.. Hmmm. */
612 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
613 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
614 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
615 tp
->t_dev
!= (dev_t
)*(int*)arg
)
624 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
626 kauth_cred_t my_cred
;
629 if (p
->p_ucred
== NULL
)
631 my_cred
= kauth_cred_proc_ref(p
);
632 uid
= kauth_cred_getuid(my_cred
);
633 kauth_cred_unref(&my_cred
);
635 if (uid
!= (uid_t
)*(int*)arg
)
643 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
645 kauth_cred_t my_cred
;
648 if (p
->p_ucred
== NULL
)
650 my_cred
= kauth_cred_proc_ref(p
);
651 ruid
= kauth_cred_getruid(my_cred
);
652 kauth_cred_unref(&my_cred
);
654 if (ruid
!= (uid_t
)*(int*)arg
)
662 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
664 if ((p
->p_lctx
== NULL
) ||
665 (p
->p_lctx
->lc_id
!= (pid_t
)*(int*)arg
))
673 * try over estimating by 5 procs
675 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
676 struct sysdoproc_args
{
691 sysdoproc_callback(proc_t p
, void *arg
)
693 struct sysdoproc_args
*args
= arg
;
695 if (args
->buflen
>= args
->sizeof_kproc
) {
696 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
697 return (PROC_RETURNED
);
698 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
699 return (PROC_RETURNED
);
700 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
701 return (PROC_RETURNED
);
703 bzero(args
->kprocp
, args
->sizeof_kproc
);
705 fill_user64_proc(p
, args
->kprocp
);
707 fill_user32_proc(p
, args
->kprocp
);
708 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
710 *args
->errorp
= error
;
711 return (PROC_RETURNED_DONE
);
713 args
->dp
+= args
->sizeof_kproc
;
714 args
->buflen
-= args
->sizeof_kproc
;
716 args
->needed
+= args
->sizeof_kproc
;
717 return (PROC_RETURNED
);
720 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
722 sysctl_prochandle SYSCTL_HANDLER_ARGS
724 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
725 int *name
= arg1
; /* oid element argument vector */
726 int namelen
= arg2
; /* number of oid element arguments */
727 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
729 user_addr_t dp
= where
;
731 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
733 boolean_t is_64_bit
= proc_is64bit(current_proc());
734 struct user32_kinfo_proc user32_kproc
;
735 struct user64_kinfo_proc user_kproc
;
738 int (*filterfn
)(proc_t
, void *) = 0;
739 struct sysdoproc_args args
;
744 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
748 sizeof_kproc
= sizeof(user_kproc
);
749 kprocp
= &user_kproc
;
751 sizeof_kproc
= sizeof(user32_kproc
);
752 kprocp
= &user32_kproc
;
758 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
762 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
779 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
786 /* must be kern.proc.<unknown> */
791 args
.buflen
= buflen
;
792 args
.kprocp
= kprocp
;
793 args
.is_64_bit
= is_64_bit
;
795 args
.needed
= needed
;
796 args
.errorp
= &error
;
797 args
.uidcheck
= uidcheck
;
798 args
.ruidcheck
= ruidcheck
;
799 args
.ttycheck
= ttycheck
;
800 args
.sizeof_kproc
= sizeof_kproc
;
802 args
.uidval
= name
[0];
804 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
805 sysdoproc_callback
, &args
, filterfn
, name
);
811 needed
= args
.needed
;
813 if (where
!= USER_ADDR_NULL
) {
814 req
->oldlen
= dp
- where
;
815 if (needed
> req
->oldlen
)
818 needed
+= KERN_PROCSLOP
;
819 req
->oldlen
= needed
;
821 /* adjust index so we return the right required/consumed amount */
822 req
->oldidx
+= req
->oldlen
;
827 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
828 * in the sysctl declaration itself, which comes into the handler function
829 * as 'oidp->oid_arg2'.
831 * For these particular sysctls, since they have well known OIDs, we could
832 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
833 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
834 * of a well known value with a common handler function. This is desirable,
835 * because we want well known values to "go away" at some future date.
837 * It should be noted that the value of '((int *)arg1)[1]' is used for many
838 * an integer parameter to the subcommand for many of these sysctls; we'd
839 * rather have used '((int *)arg1)[0]' for that, or even better, an element
840 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
841 * and then use leaf-node permissions enforcement, but that would have
842 * necessitated modifying user space code to correspond to the interface
843 * change, and we are striving for binary backward compatibility here; even
844 * though these are SPI, and not intended for use by user space applications
845 * which are not themselves system tools or libraries, some applications
846 * have erroneously used them.
848 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
849 0, /* Pointer argument (arg1) */
850 KERN_PROC_ALL
, /* Integer argument (arg2) */
851 sysctl_prochandle
, /* Handler function */
852 NULL
, /* Data is size variant on ILP32/LP64 */
854 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
855 0, /* Pointer argument (arg1) */
856 KERN_PROC_PID
, /* Integer argument (arg2) */
857 sysctl_prochandle
, /* Handler function */
858 NULL
, /* Data is size variant on ILP32/LP64 */
860 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
861 0, /* Pointer argument (arg1) */
862 KERN_PROC_TTY
, /* Integer argument (arg2) */
863 sysctl_prochandle
, /* Handler function */
864 NULL
, /* Data is size variant on ILP32/LP64 */
866 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
867 0, /* Pointer argument (arg1) */
868 KERN_PROC_PGRP
, /* Integer argument (arg2) */
869 sysctl_prochandle
, /* Handler function */
870 NULL
, /* Data is size variant on ILP32/LP64 */
872 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
873 0, /* Pointer argument (arg1) */
874 KERN_PROC_UID
, /* Integer argument (arg2) */
875 sysctl_prochandle
, /* Handler function */
876 NULL
, /* Data is size variant on ILP32/LP64 */
878 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
879 0, /* Pointer argument (arg1) */
880 KERN_PROC_RUID
, /* Integer argument (arg2) */
881 sysctl_prochandle
, /* Handler function */
882 NULL
, /* Data is size variant on ILP32/LP64 */
884 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
885 0, /* Pointer argument (arg1) */
886 KERN_PROC_LCID
, /* Integer argument (arg2) */
887 sysctl_prochandle
, /* Handler function */
888 NULL
, /* Data is size variant on ILP32/LP64 */
893 * Fill in non-zero fields of an eproc structure for the specified process.
896 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
900 struct session
*sessp
;
901 kauth_cred_t my_cred
;
904 sessp
= proc_session(p
);
906 if (pg
!= PGRP_NULL
) {
907 ep
->e_pgid
= p
->p_pgrpid
;
908 ep
->e_jobc
= pg
->pg_jobc
;
909 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
910 ep
->e_flag
= EPROC_CTTY
;
914 ep
->e_lcid
= p
->p_lctx
->lc_id
;
916 ep
->e_ppid
= p
->p_ppid
;
918 my_cred
= kauth_cred_proc_ref(p
);
920 /* A fake historical pcred */
921 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
922 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
923 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
924 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
926 /* A fake historical *kauth_cred_t */
927 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
928 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
929 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
930 bcopy(posix_cred_get(my_cred
)->cr_groups
,
931 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
933 kauth_cred_unref(&my_cred
);
936 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
937 (tp
= SESSION_TP(sessp
))) {
938 ep
->e_tdev
= tp
->t_dev
;
939 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
943 if (sessp
!= SESSION_NULL
) {
944 if (SESS_LEADER(p
, sessp
))
945 ep
->e_flag
|= EPROC_SLEADER
;
953 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
956 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
960 struct session
*sessp
;
961 kauth_cred_t my_cred
;
964 sessp
= proc_session(p
);
966 if (pg
!= PGRP_NULL
) {
967 ep
->e_pgid
= p
->p_pgrpid
;
968 ep
->e_jobc
= pg
->pg_jobc
;
969 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
970 ep
->e_flag
= EPROC_CTTY
;
974 ep
->e_lcid
= p
->p_lctx
->lc_id
;
976 ep
->e_ppid
= p
->p_ppid
;
978 my_cred
= kauth_cred_proc_ref(p
);
980 /* A fake historical pcred */
981 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
982 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
983 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
984 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
986 /* A fake historical *kauth_cred_t */
987 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
988 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
989 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
990 bcopy(posix_cred_get(my_cred
)->cr_groups
,
991 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
993 kauth_cred_unref(&my_cred
);
996 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
997 (tp
= SESSION_TP(sessp
))) {
998 ep
->e_tdev
= tp
->t_dev
;
999 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1003 if (sessp
!= SESSION_NULL
) {
1004 if (SESS_LEADER(p
, sessp
))
1005 ep
->e_flag
|= EPROC_SLEADER
;
1006 session_rele(sessp
);
1008 if (pg
!= PGRP_NULL
)
1013 * Fill in an eproc structure for the specified process.
1014 * bzeroed by our caller, so only set non-zero fields.
1017 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1019 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1020 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1021 exp
->p_flag
= p
->p_flag
;
1022 if (p
->p_lflag
& P_LTRACED
)
1023 exp
->p_flag
|= P_TRACED
;
1024 if (p
->p_lflag
& P_LPPWAIT
)
1025 exp
->p_flag
|= P_PPWAIT
;
1026 if (p
->p_lflag
& P_LEXIT
)
1027 exp
->p_flag
|= P_WEXIT
;
1028 exp
->p_stat
= p
->p_stat
;
1029 exp
->p_pid
= p
->p_pid
;
1030 exp
->p_oppid
= p
->p_oppid
;
1032 exp
->user_stack
= p
->user_stack
;
1033 exp
->p_debugger
= p
->p_debugger
;
1034 exp
->sigwait
= p
->sigwait
;
1036 #ifdef _PROC_HAS_SCHEDINFO_
1037 exp
->p_estcpu
= p
->p_estcpu
;
1038 exp
->p_pctcpu
= p
->p_pctcpu
;
1039 exp
->p_slptime
= p
->p_slptime
;
1041 exp
->p_realtimer
.it_interval
.tv_sec
=
1042 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1043 exp
->p_realtimer
.it_interval
.tv_usec
=
1044 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1046 exp
->p_realtimer
.it_value
.tv_sec
=
1047 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1048 exp
->p_realtimer
.it_value
.tv_usec
=
1049 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1051 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1052 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1054 exp
->p_sigignore
= p
->p_sigignore
;
1055 exp
->p_sigcatch
= p
->p_sigcatch
;
1056 exp
->p_priority
= p
->p_priority
;
1057 exp
->p_nice
= p
->p_nice
;
1058 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1059 exp
->p_xstat
= p
->p_xstat
;
1060 exp
->p_acflag
= p
->p_acflag
;
1064 * Fill in an LP64 version of extern_proc structure for the specified process.
1067 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1069 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1070 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1071 exp
->p_flag
= p
->p_flag
;
1072 if (p
->p_lflag
& P_LTRACED
)
1073 exp
->p_flag
|= P_TRACED
;
1074 if (p
->p_lflag
& P_LPPWAIT
)
1075 exp
->p_flag
|= P_PPWAIT
;
1076 if (p
->p_lflag
& P_LEXIT
)
1077 exp
->p_flag
|= P_WEXIT
;
1078 exp
->p_stat
= p
->p_stat
;
1079 exp
->p_pid
= p
->p_pid
;
1080 exp
->p_oppid
= p
->p_oppid
;
1082 exp
->user_stack
= p
->user_stack
;
1083 exp
->p_debugger
= p
->p_debugger
;
1084 exp
->sigwait
= p
->sigwait
;
1086 #ifdef _PROC_HAS_SCHEDINFO_
1087 exp
->p_estcpu
= p
->p_estcpu
;
1088 exp
->p_pctcpu
= p
->p_pctcpu
;
1089 exp
->p_slptime
= p
->p_slptime
;
1091 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1092 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1094 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1095 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1097 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1098 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1100 exp
->p_sigignore
= p
->p_sigignore
;
1101 exp
->p_sigcatch
= p
->p_sigcatch
;
1102 exp
->p_priority
= p
->p_priority
;
1103 exp
->p_nice
= p
->p_nice
;
1104 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1105 exp
->p_xstat
= p
->p_xstat
;
1106 exp
->p_acflag
= p
->p_acflag
;
1110 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1112 /* on a 64 bit kernel, 32 bit users get some truncated information */
1113 fill_user32_externproc(p
, &kp
->kp_proc
);
1114 fill_user32_eproc(p
, &kp
->kp_eproc
);
1118 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1120 fill_user64_externproc(p
, &kp
->kp_proc
);
1121 fill_user64_eproc(p
, &kp
->kp_eproc
);
1125 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1127 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1128 int *name
= arg1
; /* oid element argument vector */
1129 int namelen
= arg2
; /* number of oid element arguments */
1130 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1131 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1132 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1133 // size_t newlen = req->newlen; /* user buffer copy in size */
1135 proc_t p
= current_proc();
1141 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1143 /* Non-root processes may be blessed by kperf to access data
1144 * logged into trace.
1147 ret
= kperf_access_check();
1162 case KERN_KDWRITETR
:
1163 case KERN_KDWRITEMAP
:
1167 case KERN_KDSETRTCDEC
:
1169 case KERN_KDGETENTROPY
:
1170 case KERN_KDENABLE_BG_TRACE
:
1171 case KERN_KDDISABLE_BG_TRACE
:
1172 case KERN_KDREADCURTHRMAP
:
1173 case KERN_KDSET_TYPEFILTER
:
1174 case KERN_KDBUFWAIT
:
1177 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1184 /* adjust index so we return the right required/consumed amount */
1186 req
->oldidx
+= req
->oldlen
;
1190 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1191 0, /* Pointer argument (arg1) */
1192 0, /* Integer argument (arg2) */
1193 sysctl_kdebug_ops
, /* Handler function */
1194 NULL
, /* Data pointer */
1199 * Return the top *sizep bytes of the user stack, or the entire area of the
1200 * user stack down through the saved exec_path, whichever is smaller.
1203 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1205 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1206 int *name
= arg1
; /* oid element argument vector */
1207 int namelen
= arg2
; /* number of oid element arguments */
1208 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1209 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1210 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1211 // size_t newlen = req->newlen; /* user buffer copy in size */
1214 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1216 /* adjust index so we return the right required/consumed amount */
1218 req
->oldidx
+= req
->oldlen
;
1222 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1223 0, /* Pointer argument (arg1) */
1224 0, /* Integer argument (arg2) */
1225 sysctl_doprocargs
, /* Handler function */
1226 NULL
, /* Data pointer */
1230 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1232 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1233 int *name
= arg1
; /* oid element argument vector */
1234 int namelen
= arg2
; /* number of oid element arguments */
1235 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1236 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1237 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1238 // size_t newlen = req->newlen; /* user buffer copy in size */
1241 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1243 /* adjust index so we return the right required/consumed amount */
1245 req
->oldidx
+= req
->oldlen
;
1249 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1250 0, /* Pointer argument (arg1) */
1251 0, /* Integer argument (arg2) */
1252 sysctl_doprocargs2
, /* Handler function */
1253 NULL
, /* Data pointer */
1257 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1258 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1261 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1263 struct _vm_map
*proc_map
;
1266 user_addr_t arg_addr
;
1271 vm_offset_t copy_start
, copy_end
;
1274 kauth_cred_t my_cred
;
1281 buflen
-= sizeof(int); /* reserve first word to return argc */
1283 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1284 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1285 /* is not NULL then the caller wants us to return the length needed to */
1286 /* hold the data we would return */
1287 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1293 * Lookup process by pid
1302 * Copy the top N bytes of the stack.
1303 * On all machines we have so far, the stack grows
1306 * If the user expects no more than N bytes of
1307 * argument list, use that as a guess for the
1311 if (!p
->user_stack
) {
1316 if (where
== USER_ADDR_NULL
) {
1317 /* caller only wants to know length of proc args data */
1318 if (sizep
== NULL
) {
1323 size
= p
->p_argslen
;
1326 size
+= sizeof(int);
1330 * old PROCARGS will return the executable's path and plus some
1331 * extra space for work alignment and data tags
1333 size
+= PATH_MAX
+ (6 * sizeof(int));
1335 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1340 my_cred
= kauth_cred_proc_ref(p
);
1341 uid
= kauth_cred_getuid(my_cred
);
1342 kauth_cred_unref(&my_cred
);
1344 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1345 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1350 if ((u_int
)arg_size
> p
->p_argslen
)
1351 arg_size
= round_page(p
->p_argslen
);
1353 arg_addr
= p
->user_stack
- arg_size
;
1357 * Before we can block (any VM code), make another
1358 * reference to the map to keep it alive. We do
1359 * that by getting a reference on the task itself.
1367 argslen
= p
->p_argslen
;
1369 * Once we have a task reference we can convert that into a
1370 * map reference, which we will use in the calls below. The
1371 * task/process may change its map after we take this reference
1372 * (see execve), but the worst that will happen then is a return
1373 * of stale info (which is always a possibility).
1375 task_reference(task
);
1377 proc_map
= get_task_map_reference(task
);
1378 task_deallocate(task
);
1380 if (proc_map
== NULL
)
1384 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1385 if (ret
!= KERN_SUCCESS
) {
1386 vm_map_deallocate(proc_map
);
1390 copy_end
= round_page(copy_start
+ arg_size
);
1392 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1393 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1394 vm_map_deallocate(proc_map
);
1395 kmem_free(kernel_map
, copy_start
,
1396 round_page(arg_size
));
1401 * Now that we've done the copyin from the process'
1402 * map, we can release the reference to it.
1404 vm_map_deallocate(proc_map
);
1406 if( vm_map_copy_overwrite(kernel_map
,
1407 (vm_map_address_t
)copy_start
,
1408 tmp
, FALSE
) != KERN_SUCCESS
) {
1409 kmem_free(kernel_map
, copy_start
,
1410 round_page(arg_size
));
1414 if (arg_size
> argslen
) {
1415 data
= (caddr_t
) (copy_end
- argslen
);
1418 data
= (caddr_t
) (copy_end
- arg_size
);
1423 /* Put processes argc as the first word in the copyout buffer */
1424 suword(where
, p
->p_argc
);
1425 error
= copyout(data
, (where
+ sizeof(int)), size
);
1426 size
+= sizeof(int);
1428 error
= copyout(data
, where
, size
);
1431 * Make the old PROCARGS work to return the executable's path
1432 * But, only if there is enough space in the provided buffer
1434 * on entry: data [possibily] points to the beginning of the path
1436 * Note: we keep all pointers&sizes aligned to word boundries
1438 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1440 int binPath_sz
, alignedBinPath_sz
= 0;
1441 int extraSpaceNeeded
, addThis
;
1442 user_addr_t placeHere
;
1443 char * str
= (char *) data
;
1446 /* Some apps are really bad about messing up their stacks
1447 So, we have to be extra careful about getting the length
1448 of the executing binary. If we encounter an error, we bail.
1451 /* Limit ourselves to PATH_MAX paths */
1452 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1456 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1459 /* If we have a NUL terminator, copy it, too */
1460 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1462 /* Pre-Flight the space requiremnts */
1464 /* Account for the padding that fills out binPath to the next word */
1465 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1467 placeHere
= where
+ size
;
1469 /* Account for the bytes needed to keep placeHere word aligned */
1470 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1472 /* Add up all the space that is needed */
1473 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1475 /* is there is room to tack on argv[0]? */
1476 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1478 placeHere
+= addThis
;
1479 suword(placeHere
, 0);
1480 placeHere
+= sizeof(int);
1481 suword(placeHere
, 0xBFFF0000);
1482 placeHere
+= sizeof(int);
1483 suword(placeHere
, 0);
1484 placeHere
+= sizeof(int);
1485 error
= copyout(data
, placeHere
, binPath_sz
);
1488 placeHere
+= binPath_sz
;
1489 suword(placeHere
, 0);
1490 size
+= extraSpaceNeeded
;
1496 if (copy_start
!= (vm_offset_t
) 0) {
1497 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1503 if (where
!= USER_ADDR_NULL
)
1510 * Max number of concurrent aio requests
1514 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1516 int new_value
, changed
;
1517 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1519 /* make sure the system-wide limit is greater than the per process limit */
1520 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1521 aio_max_requests
= new_value
;
1530 * Max number of concurrent aio requests per process
1534 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1536 int new_value
, changed
;
1537 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1539 /* make sure per process limit is less than the system-wide limit */
1540 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1541 aio_max_requests_per_process
= new_value
;
1550 * Max number of async IO worker threads
1554 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1556 int new_value
, changed
;
1557 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1559 /* we only allow an increase in the number of worker threads */
1560 if (new_value
> aio_worker_threads
) {
1561 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1562 aio_worker_threads
= new_value
;
1572 * System-wide limit on the max number of processes
1576 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1578 int new_value
, changed
;
1579 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1581 AUDIT_ARG(value32
, new_value
);
1582 /* make sure the system-wide limit is less than the configured hard
1583 limit set at kernel compilation */
1584 if (new_value
<= hard_maxproc
&& new_value
> 0)
1585 maxproc
= new_value
;
1592 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1593 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1595 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1596 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1598 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1599 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1600 (int *)NULL
, BSD
, "");
1601 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1602 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1604 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1605 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1606 &kernel_uuid_string
[0], 0, "");
1609 int debug_kprint_syscall
= 0;
1610 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1612 /* Thread safe: bits and string value are not used to reclaim state */
1613 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1614 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1615 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1616 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1617 "name of process for kprintf syscall tracing");
1619 int debug_kprint_current_process(const char **namep
)
1621 struct proc
*p
= current_proc();
1627 if (debug_kprint_syscall_process
[0]) {
1628 /* user asked to scope tracing to a particular process name */
1629 if(0 == strncmp(debug_kprint_syscall_process
,
1630 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1631 /* no value in telling the user that we traced what they asked */
1632 if(namep
) *namep
= NULL
;
1640 /* trace all processes. Tell user what we traced */
1649 /* PR-5293665: need to use a callback function for kern.osversion to set
1650 * osversion in IORegistry */
1653 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1657 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1660 IORegistrySetOSBuildVersion((char *)arg1
);
1666 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1667 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1668 osversion
, 256 /* OSVERSIZE*/,
1669 sysctl_osversion
, "A", "");
1672 sysctl_sysctl_bootargs
1673 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1678 strlcpy(buf
, PE_boot_args(), 256);
1679 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
1683 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1684 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1686 sysctl_sysctl_bootargs
, "A", "bootargs");
1688 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1689 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1691 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1692 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1693 (int *)NULL
, ARG_MAX
, "");
1694 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1695 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1696 (int *)NULL
, _POSIX_VERSION
, "");
1697 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1698 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1699 (int *)NULL
, NGROUPS_MAX
, "");
1700 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1701 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1702 (int *)NULL
, 1, "");
1703 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1704 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1705 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1706 (int *)NULL
, 1, "");
1708 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1709 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1712 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1713 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1715 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1716 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1718 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1719 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1721 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1722 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1723 &thread_max
, 0, "");
1724 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1725 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1726 &task_threadmax
, 0, "");
1729 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1731 int oldval
= desiredvnodes
;
1732 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1734 if (oldval
!= desiredvnodes
) {
1735 reset_vmobjectcache(oldval
, desiredvnodes
);
1736 resize_namecache(desiredvnodes
);
1742 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1743 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1744 &nc_disabled
, 0, "");
1746 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1747 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1748 0, 0, sysctl_maxvnodes
, "I", "");
1750 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1751 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1752 0, 0, sysctl_maxproc
, "I", "");
1754 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1755 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1756 0, 0, sysctl_aiomax
, "I", "");
1758 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1759 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1760 0, 0, sysctl_aioprocmax
, "I", "");
1762 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1763 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1764 0, 0, sysctl_aiothreads
, "I", "");
1766 #if (DEVELOPMENT || DEBUG)
1767 extern int sched_smt_balance
;
1768 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1769 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1770 &sched_smt_balance
, 0, "");
1775 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1777 int new_value
, changed
;
1778 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1780 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1782 securelevel
= new_value
;
1791 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1792 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1793 0, 0, sysctl_securelvl
, "I", "");
1798 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1801 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1803 domainnamelen
= strlen(domainname
);
1808 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1809 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1810 0, 0, sysctl_domainname
, "A", "");
1812 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1813 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1818 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1821 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1823 hostnamelen
= req
->newlen
;
1829 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1830 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1831 0, 0, sysctl_hostname
, "A", "");
1835 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1837 /* Original code allowed writing, I'm copying this, although this all makes
1838 no sense to me. Besides, this sysctl is never used. */
1839 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
1842 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
1843 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1844 0, 0, sysctl_procname
, "A", "");
1846 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
1847 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1848 &speculative_reads_disabled
, 0, "");
1850 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
1851 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1852 &ignore_is_ssd
, 0, "");
1854 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
1855 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1856 &preheat_max_bytes
, 0, "");
1858 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
1859 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1860 &preheat_min_bytes
, 0, "");
1862 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
1863 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1864 &speculative_prefetch_max
, 0, "");
1866 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
1867 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1868 &speculative_prefetch_max_iosize
, 0, "");
1870 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
1871 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1872 &vm_page_free_target
, 0, "");
1874 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
1875 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1876 &vm_page_free_min
, 0, "");
1878 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
1879 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1880 &vm_page_free_reserved
, 0, "");
1882 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
1883 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1884 &vm_page_speculative_percentage
, 0, "");
1886 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
1887 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1888 &vm_page_speculative_q_age_ms
, 0, "");
1890 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
1891 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1892 &vm_max_delayed_work_limit
, 0, "");
1894 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
1895 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1896 &vm_max_batch
, 0, "");
1898 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
1899 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1900 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
1904 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1906 time_t tv_sec
= boottime_sec();
1907 struct proc
*p
= req
->p
;
1909 if (proc_is64bit(p
)) {
1910 struct user64_timeval t
;
1913 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1915 struct user32_timeval t
;
1918 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1922 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
1923 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1924 0, 0, sysctl_boottime
, "S,timeval", "");
1928 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1931 int error
= get_kernel_symfile(req
->p
, &str
);
1934 return sysctl_io_string(req
, str
, 0, 0, NULL
);
1938 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
1939 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1940 0, 0, sysctl_symfile
, "A", "");
1945 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1947 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
1950 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
1951 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1952 0, 0, sysctl_netboot
, "I", "");
1955 #ifdef CONFIG_IMGSRC_ACCESS
1957 * Legacy--act as if only one layer of nesting is possible.
1961 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1963 vfs_context_t ctx
= vfs_context_current();
1967 if (!vfs_context_issuser(ctx
)) {
1971 if (imgsrc_rootvnodes
[0] == NULL
) {
1975 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
1980 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
1981 result
= vnode_getwithref(devvp
);
1986 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
1990 vnode_put(imgsrc_rootvnodes
[0]);
1994 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
1995 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1996 0, 0, sysctl_imgsrcdev
, "I", "");
2000 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2003 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2007 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2011 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2013 * Go get the root vnode.
2015 rvp
= imgsrc_rootvnodes
[i
];
2016 if (rvp
== NULLVP
) {
2020 error
= vnode_get(rvp
);
2026 * For now, no getting at a non-local volume.
2028 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2029 if (devvp
== NULL
) {
2034 error
= vnode_getwithref(devvp
);
2043 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2044 info
[i
].ii_flags
= 0;
2045 info
[i
].ii_height
= i
;
2046 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2052 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2055 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2056 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2057 0, 0, sysctl_imgsrcinfo
, "I", "");
2059 #endif /* CONFIG_IMGSRC_ACCESS */
2062 SYSCTL_DECL(_kern_timer
);
2063 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2066 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2067 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2068 &mach_timer_coalescing_enabled
, 0, "");
2070 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2071 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2072 &timer_deadline_tracking_bin_1
, "");
2073 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2074 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2075 &timer_deadline_tracking_bin_2
, "");
2077 SYSCTL_DECL(_kern_timer_longterm
);
2078 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2081 /* Must match definition in osfmk/kern/timer_call.c */
2084 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2085 LATENCY
, LATENCY_MIN
, LATENCY_MAX
2087 extern uint64_t timer_sysctl_get(int);
2088 extern int timer_sysctl_set(int, uint64_t);
2092 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2094 int oid
= (int)arg1
;
2095 uint64_t value
= timer_sysctl_get(oid
);
2100 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2102 error
= timer_sysctl_set(oid
, new_value
);
2107 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2108 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2109 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2110 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2111 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2112 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2114 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2115 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2116 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2117 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2118 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2119 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2120 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2121 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2122 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2123 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2124 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2125 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2126 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2127 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2128 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2129 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2130 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2131 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2132 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2133 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2134 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2135 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2136 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2137 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2142 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2144 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2147 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2148 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2149 0, 0, sysctl_usrstack
, "I", "");
2153 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2155 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2158 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2159 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2160 0, 0, sysctl_usrstack64
, "Q", "");
2162 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2163 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2164 corefilename
, sizeof(corefilename
), "");
2168 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2170 #ifdef SECURE_KERNEL
2174 int new_value
, changed
;
2175 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2177 if ((new_value
== 0) || (new_value
== 1))
2178 do_coredump
= new_value
;
2186 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2187 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2188 0, 0, sysctl_coredump
, "I", "");
2191 sysctl_suid_coredump
2192 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2194 #ifdef SECURE_KERNEL
2198 int new_value
, changed
;
2199 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2201 if ((new_value
== 0) || (new_value
== 1))
2202 sugid_coredump
= new_value
;
2210 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2211 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2212 0, 0, sysctl_suid_coredump
, "I", "");
2216 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2218 struct proc
*p
= req
->p
;
2219 int new_value
, changed
;
2220 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2224 req
->p
->p_lflag
|= P_LDELAYTERM
;
2226 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2232 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2233 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2234 0, 0, sysctl_delayterm
, "I", "");
2239 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2241 struct proc
*p
= req
->p
;
2243 int new_value
, old_value
, changed
;
2246 ut
= get_bsdthread_info(current_thread());
2248 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2249 old_value
= KERN_RAGE_THREAD
;
2250 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2251 old_value
= KERN_RAGE_PROC
;
2255 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2258 switch (new_value
) {
2259 case KERN_RAGE_PROC
:
2261 p
->p_lflag
|= P_LRAGE_VNODES
;
2264 case KERN_UNRAGE_PROC
:
2266 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2270 case KERN_RAGE_THREAD
:
2271 ut
->uu_flag
|= UT_RAGE_VNODES
;
2273 case KERN_UNRAGE_THREAD
:
2274 ut
= get_bsdthread_info(current_thread());
2275 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2282 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2283 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2284 0, 0, sysctl_rage_vnode
, "I", "");
2286 /* XXX move this interface into libproc and remove this sysctl */
2288 sysctl_setthread_cpupercent
2289 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2291 int new_value
, old_value
;
2293 kern_return_t kret
= KERN_SUCCESS
;
2294 uint8_t percent
= 0;
2302 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2305 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2306 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2311 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2313 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2319 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2320 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2321 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2325 sysctl_kern_check_openevt
2326 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2328 struct proc
*p
= req
->p
;
2329 int new_value
, old_value
, changed
;
2332 if (p
->p_flag
& P_CHECKOPENEVT
) {
2333 old_value
= KERN_OPENEVT_PROC
;
2338 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2341 switch (new_value
) {
2342 case KERN_OPENEVT_PROC
:
2343 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2346 case KERN_UNOPENEVT_PROC
:
2347 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2357 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2358 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2364 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2366 #ifdef SECURE_KERNEL
2370 int new_value
, changed
;
2373 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2378 #if defined(__i386__) || defined(__x86_64__)
2380 * Only allow setting if NX is supported on the chip
2382 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2385 nx_enabled
= new_value
;
2388 #endif /* SECURE_KERNEL */
2393 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2394 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2395 0, 0, sysctl_nx
, "I", "");
2399 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2401 if (proc_is64bit(req
->p
)) {
2402 struct user64_loadavg loadinfo64
;
2403 fill_loadavg64(&averunnable
, &loadinfo64
);
2404 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2406 struct user32_loadavg loadinfo32
;
2407 fill_loadavg32(&averunnable
, &loadinfo32
);
2408 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2412 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2413 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2414 0, 0, sysctl_loadavg
, "S,loadavg", "");
2417 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2420 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2421 __unused
int arg2
, struct sysctl_req
*req
)
2423 int old_value
=0, new_value
=0, error
=0;
2425 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2427 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2429 return (vm_toggle_entry_reuse(new_value
, NULL
));
2434 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2438 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2441 uint64_t swap_total
;
2442 uint64_t swap_avail
;
2443 vm_size_t swap_pagesize
;
2444 boolean_t swap_encrypted
;
2445 struct xsw_usage xsu
;
2447 error
= macx_swapinfo(&swap_total
,
2454 xsu
.xsu_total
= swap_total
;
2455 xsu
.xsu_avail
= swap_avail
;
2456 xsu
.xsu_used
= swap_total
- swap_avail
;
2457 xsu
.xsu_pagesize
= swap_pagesize
;
2458 xsu
.xsu_encrypted
= swap_encrypted
;
2459 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2464 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2465 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2466 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2469 extern void vm_page_reactivate_all_throttled(void);
2472 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2474 #pragma unused(arg1, arg2)
2475 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2478 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2479 if (error
|| !req
->newptr
)
2482 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2483 //assert(req->newptr);
2484 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2489 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2491 disabled
= (!val
&& memorystatus_freeze_enabled
);
2493 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2496 vm_page_reactivate_all_throttled();
2502 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2503 #endif /* CONFIG_FREEZE */
2505 /* this kernel does NOT implement shared_region_make_private_np() */
2506 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2507 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2508 (int *)NULL
, 0, "");
2511 fetch_process_cputype(
2515 cpu_type_t
*cputype
)
2517 proc_t p
= PROC_NULL
;
2524 else if (namelen
== 1) {
2525 p
= proc_find(name
[0]);
2534 ret
= cpu_type() & ~CPU_ARCH_MASK
;
2535 if (IS_64BIT_PROCESS(p
))
2536 ret
|= CPU_ARCH_ABI64
;
2547 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2548 struct sysctl_req
*req
)
2551 cpu_type_t proc_cputype
= 0;
2552 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2555 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2557 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2559 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2562 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2563 struct sysctl_req
*req
)
2566 cpu_type_t proc_cputype
= 0;
2567 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2569 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2571 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2575 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2577 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2580 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2581 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2582 0, 0, sysctl_safeboot
, "I", "");
2586 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2588 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2591 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2592 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2593 0, 0, sysctl_singleuser
, "I", "");
2596 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2598 extern boolean_t affinity_sets_enabled
;
2599 extern int affinity_sets_mapping
;
2601 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2602 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2603 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2604 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
2607 * Boolean indicating if KASLR is active.
2611 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2615 slide
= vm_kernel_slide
? 1 : 0;
2617 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
2620 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
2621 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2622 0, 0, sysctl_slide
, "I", "");
2625 * Limit on total memory users can wire.
2627 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2629 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2631 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2634 * All values are in bytes.
2637 vm_map_size_t vm_global_no_user_wire_amount
;
2638 vm_map_size_t vm_global_user_wire_limit
;
2639 vm_map_size_t vm_user_wire_limit
;
2642 * There needs to be a more automatic/elegant way to do this
2644 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
2645 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
2646 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
2648 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
2649 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
2650 extern int vm_map_copy_overwrite_aligned_src_large
;
2651 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
2652 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
2653 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
2656 extern uint32_t vm_page_external_count
;
2657 extern uint32_t vm_page_filecache_min
;
2659 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
2660 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
2662 extern int vm_compressor_mode
;
2663 extern uint32_t swapout_target_age
;
2664 extern int64_t compressor_bytes_used
;
2665 extern uint32_t compressor_eval_period_in_msecs
;
2666 extern uint32_t compressor_sample_min_in_msecs
;
2667 extern uint32_t compressor_sample_max_in_msecs
;
2668 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
2669 extern uint32_t compressor_thrashing_min_per_10msecs
;
2670 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
2671 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
2672 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
2673 extern uint32_t vm_compressor_catchup_threshold_divisor
;
2675 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
2676 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
2677 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
2679 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
2680 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
2681 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
2682 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
2683 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
2684 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
2685 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
2686 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
2687 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
2689 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
2691 #if CONFIG_PHANTOM_CACHE
2692 extern uint32_t phantom_cache_thrashing_threshold
;
2693 extern uint32_t phantom_cache_eval_period_in_msecs
;
2694 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
2697 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
2698 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
2699 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
2703 * Enable tracing of voucher contents
2705 extern uint32_t ipc_voucher_trace_contents
;
2707 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
2708 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
2711 * Kernel stack size and depth
2713 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
2714 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
2715 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
2716 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
2719 * enable back trace for port allocations
2721 extern int ipc_portbt
;
2723 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
2724 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2725 &ipc_portbt
, 0, "");
2732 * See osfmk/kern/sched_prim.c for the corresponding definition
2733 * in osfmk/. If either version changes, update the other.
2735 #define SCHED_STRING_MAX_LENGTH (48)
2737 extern char sched_string
[SCHED_STRING_MAX_LENGTH
];
2738 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
2739 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2740 sched_string
, sizeof(sched_string
),
2741 "Timeshare scheduler implementation");
2744 * Only support runtime modification on embedded platforms
2745 * with development config enabled
2749 /* Parameters related to timer coalescing tuning, to be replaced
2750 * with a dedicated systemcall in the future.
2752 /* Enable processing pending timers in the context of any other interrupt
2753 * Coalescing tuning parameters for various thread/task attributes */
2755 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2757 #pragma unused(oidp)
2758 int size
= arg2
; /* subcommand*/
2761 uint64_t old_value_ns
;
2762 uint64_t new_value_ns
;
2763 uint64_t value_abstime
;
2764 if (size
== sizeof(uint32_t))
2765 value_abstime
= *((uint32_t *)arg1
);
2766 else if (size
== sizeof(uint64_t))
2767 value_abstime
= *((uint64_t *)arg1
);
2768 else return ENOTSUP
;
2770 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
2771 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
2772 if ((error
) || (!changed
))
2775 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
2776 if (size
== sizeof(uint32_t))
2777 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
2779 *((uint64_t *)arg1
) = value_abstime
;
2783 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
2784 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2785 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
2786 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
2787 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2788 &tcoal_prio_params
.timer_resort_threshold_abstime
,
2789 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
2790 sysctl_timer_user_us_kernel_abstime
,
2792 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
2793 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2794 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
2795 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
2796 sysctl_timer_user_us_kernel_abstime
,
2799 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
2800 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2801 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
2803 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
2804 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2805 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
2806 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
2807 sysctl_timer_user_us_kernel_abstime
,
2810 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
2811 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2812 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
2814 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
2815 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2816 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
2817 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
2818 sysctl_timer_user_us_kernel_abstime
,
2821 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
2822 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2823 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
2825 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
2826 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2827 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
2828 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
2829 sysctl_timer_user_us_kernel_abstime
,
2832 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
2833 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2834 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
2836 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
2837 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2838 &tcoal_prio_params
.latency_qos_abstime_max
[0],
2839 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
2840 sysctl_timer_user_us_kernel_abstime
,
2843 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
2844 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2845 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
2847 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
2848 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2849 &tcoal_prio_params
.latency_qos_abstime_max
[1],
2850 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
2851 sysctl_timer_user_us_kernel_abstime
,
2854 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
2855 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2856 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
2858 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
2859 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2860 &tcoal_prio_params
.latency_qos_abstime_max
[2],
2861 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
2862 sysctl_timer_user_us_kernel_abstime
,
2865 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
2866 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2867 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
2869 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
2870 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2871 &tcoal_prio_params
.latency_qos_abstime_max
[3],
2872 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
2873 sysctl_timer_user_us_kernel_abstime
,
2876 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
2877 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2878 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
2880 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
2881 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2882 &tcoal_prio_params
.latency_qos_abstime_max
[4],
2883 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
2884 sysctl_timer_user_us_kernel_abstime
,
2887 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
2888 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2889 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
2891 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
2892 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2893 &tcoal_prio_params
.latency_qos_abstime_max
[5],
2894 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
2895 sysctl_timer_user_us_kernel_abstime
,
2898 /* Communicate the "user idle level" heuristic to the timer layer, and
2899 * potentially other layers in the future.
2903 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
2904 int new_value
= 0, old_value
= 0, changed
= 0, error
;
2906 old_value
= timer_get_user_idle_level();
2908 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2910 if (error
== 0 && changed
) {
2911 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
2918 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
2919 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2921 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
2924 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
2925 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2926 &hv_support_available
, 0, "");