2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <security/audit/audit.h>
107 #include <kern/kalloc.h>
109 #include <mach/machine.h>
110 #include <mach/mach_host.h>
111 #include <mach/mach_types.h>
112 #include <mach/vm_param.h>
113 #include <kern/mach_param.h>
114 #include <kern/task.h>
115 #include <kern/thread.h>
116 #include <kern/processor.h>
117 #include <kern/debug.h>
118 #include <vm/vm_kern.h>
119 #include <vm/vm_map.h>
120 #include <mach/host_info.h>
122 #include <sys/mount_internal.h>
123 #include <sys/kdebug.h>
125 #include <IOKit/IOPlatformExpert.h>
126 #include <pexpert/pexpert.h>
128 #include <machine/machine_routines.h>
129 #include <machine/exec.h>
131 #include <vm/vm_protos.h>
132 #include <vm/vm_pageout.h>
133 #include <sys/imgsrc.h>
134 #include <kern/timer_call.h>
136 #if defined(__i386__) || defined(__x86_64__)
137 #include <i386/cpuid.h>
141 #include <sys/kern_memorystatus.h>
145 #include <kperf/kperf.h>
149 #include <kern/hv_support.h>
153 * deliberately setting max requests to really high number
154 * so that runaway settings do not cause MALLOC overflows
156 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
158 extern int aio_max_requests
;
159 extern int aio_max_requests_per_process
;
160 extern int aio_worker_threads
;
161 extern int lowpri_IO_window_msecs
;
162 extern int lowpri_IO_delay_msecs
;
163 extern int nx_enabled
;
164 extern int speculative_reads_disabled
;
165 extern int ignore_is_ssd
;
166 extern unsigned int speculative_prefetch_max
;
167 extern unsigned int speculative_prefetch_max_iosize
;
168 extern unsigned int preheat_max_bytes
;
169 extern unsigned int preheat_min_bytes
;
170 extern long numvnodes
;
172 extern uuid_string_t bootsessionuuid_string
;
174 extern unsigned int vm_max_delayed_work_limit
;
175 extern unsigned int vm_max_batch
;
177 extern unsigned int vm_page_free_min
;
178 extern unsigned int vm_page_free_target
;
179 extern unsigned int vm_page_free_reserved
;
180 extern unsigned int vm_page_speculative_percentage
;
181 extern unsigned int vm_page_speculative_q_age_ms
;
183 #if (DEVELOPMENT || DEBUG)
184 extern uint32_t vm_page_creation_throttled_hard
;
185 extern uint32_t vm_page_creation_throttled_soft
;
186 #endif /* DEVELOPMENT || DEBUG */
189 * Conditionally allow dtrace to see these functions for debugging purposes.
197 #define STATIC static
200 extern boolean_t mach_timer_coalescing_enabled
;
202 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
205 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
207 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
209 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
211 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
213 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
215 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
218 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
224 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
226 __private_extern__ kern_return_t
227 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
229 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
230 size_t *sizep
, proc_t cur_proc
);
232 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
233 proc_t cur_proc
, int argc_yes
);
235 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
236 size_t newlen
, void *sp
, int len
);
238 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
239 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
240 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
241 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
242 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
244 STATIC
int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
246 int sysdoproc_callback(proc_t p
, void *arg
);
249 /* forward declarations for non-static STATIC */
250 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
251 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
252 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
253 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
254 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
255 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
257 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
258 #endif /* COUNT_SYSCALLS */
259 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
260 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
261 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
262 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
267 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 #ifdef CONFIG_IMGSRC_ACCESS
279 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
285 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
293 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
300 extern void IORegistrySetOSBuildVersion(char * build_version
);
303 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
305 la64
->ldavg
[0] = la
->ldavg
[0];
306 la64
->ldavg
[1] = la
->ldavg
[1];
307 la64
->ldavg
[2] = la
->ldavg
[2];
308 la64
->fscale
= (user64_long_t
)la
->fscale
;
312 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
314 la32
->ldavg
[0] = la
->ldavg
[0];
315 la32
->ldavg
[1] = la
->ldavg
[1];
316 la32
->ldavg
[2] = la
->ldavg
[2];
317 la32
->fscale
= (user32_long_t
)la
->fscale
;
321 * Attributes stored in the kernel.
323 extern char corefilename
[MAXPATHLEN
+1];
324 extern int do_coredump
;
325 extern int sugid_coredump
;
328 extern int do_count_syscalls
;
332 int securelevel
= -1;
338 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
339 __unused
int arg2
, struct sysctl_req
*req
)
342 struct uthread
*ut
= get_bsdthread_info(current_thread());
343 user_addr_t oldp
=0, newp
=0;
344 size_t *oldlenp
=NULL
;
348 oldlenp
= &(req
->oldlen
);
350 newlen
= req
->newlen
;
352 /* We want the current length, and maybe the string itself */
354 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
355 size_t currlen
= MAXTHREADNAMESIZE
- 1;
358 /* use length of current thread name */
359 currlen
= strlen(ut
->pth_name
);
361 if(*oldlenp
< currlen
)
363 /* NOTE - we do not copy the NULL terminator */
365 error
= copyout(ut
->pth_name
,oldp
,currlen
);
370 /* return length of thread name minus NULL terminator (just like strlen) */
371 req
->oldidx
= currlen
;
374 /* We want to set the name to something */
377 if(newlen
> (MAXTHREADNAMESIZE
- 1))
381 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
385 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
386 error
= copyin(newp
, ut
->pth_name
, newlen
);
394 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
398 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
400 host_basic_info_data_t hinfo
;
404 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
405 struct _processor_statistics_np
*buf
;
408 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
409 if (kret
!= KERN_SUCCESS
) {
413 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
415 if (req
->oldlen
< size
) {
419 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
421 kret
= get_sched_statistics(buf
, &size
);
422 if (kret
!= KERN_SUCCESS
) {
427 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
433 panic("Sched info changed?!");
440 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
443 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
448 if (req
->newlen
!= sizeof(active
)) {
452 res
= copyin(req
->newptr
, &active
, sizeof(active
));
457 return set_sched_stats_active(active
);
460 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
462 extern int get_kernel_symfile(proc_t
, char **);
465 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
468 extern int syscalls_log
[];
469 extern const char *syscallnames
[];
472 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
474 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
475 __unused
int *name
= arg1
; /* oid element argument vector */
476 __unused
int namelen
= arg2
; /* number of oid element arguments */
477 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
478 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
479 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
480 size_t newlen
= req
->newlen
; /* user buffer copy in size */
485 /* valid values passed in:
486 * = 0 means don't keep called counts for each bsd syscall
487 * > 0 means keep called counts for each bsd syscall
488 * = 2 means dump current counts to the system log
489 * = 3 means reset all counts
490 * for example, to dump current counts:
491 * sysctl -w kern.count_calls=2
493 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
499 do_count_syscalls
= 1;
501 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
503 for ( i
= 0; i
< nsysent
; i
++ ) {
504 if ( syscalls_log
[i
] != 0 ) {
506 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
514 do_count_syscalls
= 1;
518 /* adjust index so we return the right required/consumed amount */
520 req
->oldidx
+= req
->oldlen
;
524 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
525 0, /* Pointer argument (arg1) */
526 0, /* Integer argument (arg2) */
527 sysctl_docountsyscalls
, /* Handler function */
528 NULL
, /* Data pointer */
530 #endif /* COUNT_SYSCALLS */
533 * The following sysctl_* functions should not be used
534 * any more, as they can only cope with callers in
535 * user mode: Use new-style
543 * Validate parameters and get old / set new parameters
544 * for an integer-valued sysctl function.
547 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
548 user_addr_t newp
, size_t newlen
, int *valp
)
552 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
554 if (oldp
&& *oldlenp
< sizeof(int))
556 if (newp
&& newlen
!= sizeof(int))
558 *oldlenp
= sizeof(int);
560 error
= copyout(valp
, oldp
, sizeof(int));
561 if (error
== 0 && newp
) {
562 error
= copyin(newp
, valp
, sizeof(int));
563 AUDIT_ARG(value32
, *valp
);
569 * Validate parameters and get old / set new parameters
570 * for an quad(64bit)-valued sysctl function.
573 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
574 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
578 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
580 if (oldp
&& *oldlenp
< sizeof(quad_t
))
582 if (newp
&& newlen
!= sizeof(quad_t
))
584 *oldlenp
= sizeof(quad_t
);
586 error
= copyout(valp
, oldp
, sizeof(quad_t
));
587 if (error
== 0 && newp
)
588 error
= copyin(newp
, valp
, sizeof(quad_t
));
593 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
595 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
602 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
604 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
611 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
616 /* This is very racy but list lock is held.. Hmmm. */
617 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
618 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
619 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
620 tp
->t_dev
!= (dev_t
)*(int*)arg
)
629 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
631 kauth_cred_t my_cred
;
634 if (p
->p_ucred
== NULL
)
636 my_cred
= kauth_cred_proc_ref(p
);
637 uid
= kauth_cred_getuid(my_cred
);
638 kauth_cred_unref(&my_cred
);
640 if (uid
!= (uid_t
)*(int*)arg
)
648 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
650 kauth_cred_t my_cred
;
653 if (p
->p_ucred
== NULL
)
655 my_cred
= kauth_cred_proc_ref(p
);
656 ruid
= kauth_cred_getruid(my_cred
);
657 kauth_cred_unref(&my_cred
);
659 if (ruid
!= (uid_t
)*(int*)arg
)
667 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
669 if ((p
->p_lctx
== NULL
) ||
670 (p
->p_lctx
->lc_id
!= (pid_t
)*(int*)arg
))
678 * try over estimating by 5 procs
680 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
681 struct sysdoproc_args
{
696 sysdoproc_callback(proc_t p
, void *arg
)
698 struct sysdoproc_args
*args
= arg
;
700 if (args
->buflen
>= args
->sizeof_kproc
) {
701 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
702 return (PROC_RETURNED
);
703 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
704 return (PROC_RETURNED
);
705 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
706 return (PROC_RETURNED
);
708 bzero(args
->kprocp
, args
->sizeof_kproc
);
710 fill_user64_proc(p
, args
->kprocp
);
712 fill_user32_proc(p
, args
->kprocp
);
713 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
715 *args
->errorp
= error
;
716 return (PROC_RETURNED_DONE
);
718 args
->dp
+= args
->sizeof_kproc
;
719 args
->buflen
-= args
->sizeof_kproc
;
721 args
->needed
+= args
->sizeof_kproc
;
722 return (PROC_RETURNED
);
725 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
727 sysctl_prochandle SYSCTL_HANDLER_ARGS
729 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
730 int *name
= arg1
; /* oid element argument vector */
731 int namelen
= arg2
; /* number of oid element arguments */
732 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
734 user_addr_t dp
= where
;
736 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
738 boolean_t is_64_bit
= proc_is64bit(current_proc());
739 struct user32_kinfo_proc user32_kproc
;
740 struct user64_kinfo_proc user_kproc
;
743 int (*filterfn
)(proc_t
, void *) = 0;
744 struct sysdoproc_args args
;
749 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
753 sizeof_kproc
= sizeof(user_kproc
);
754 kprocp
= &user_kproc
;
756 sizeof_kproc
= sizeof(user32_kproc
);
757 kprocp
= &user32_kproc
;
763 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
767 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
784 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
791 /* must be kern.proc.<unknown> */
796 args
.buflen
= buflen
;
797 args
.kprocp
= kprocp
;
798 args
.is_64_bit
= is_64_bit
;
800 args
.needed
= needed
;
801 args
.errorp
= &error
;
802 args
.uidcheck
= uidcheck
;
803 args
.ruidcheck
= ruidcheck
;
804 args
.ttycheck
= ttycheck
;
805 args
.sizeof_kproc
= sizeof_kproc
;
807 args
.uidval
= name
[0];
809 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
810 sysdoproc_callback
, &args
, filterfn
, name
);
816 needed
= args
.needed
;
818 if (where
!= USER_ADDR_NULL
) {
819 req
->oldlen
= dp
- where
;
820 if (needed
> req
->oldlen
)
823 needed
+= KERN_PROCSLOP
;
824 req
->oldlen
= needed
;
826 /* adjust index so we return the right required/consumed amount */
827 req
->oldidx
+= req
->oldlen
;
832 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
833 * in the sysctl declaration itself, which comes into the handler function
834 * as 'oidp->oid_arg2'.
836 * For these particular sysctls, since they have well known OIDs, we could
837 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
838 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
839 * of a well known value with a common handler function. This is desirable,
840 * because we want well known values to "go away" at some future date.
842 * It should be noted that the value of '((int *)arg1)[1]' is used for many
843 * an integer parameter to the subcommand for many of these sysctls; we'd
844 * rather have used '((int *)arg1)[0]' for that, or even better, an element
845 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
846 * and then use leaf-node permissions enforcement, but that would have
847 * necessitated modifying user space code to correspond to the interface
848 * change, and we are striving for binary backward compatibility here; even
849 * though these are SPI, and not intended for use by user space applications
850 * which are not themselves system tools or libraries, some applications
851 * have erroneously used them.
853 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
854 0, /* Pointer argument (arg1) */
855 KERN_PROC_ALL
, /* Integer argument (arg2) */
856 sysctl_prochandle
, /* Handler function */
857 NULL
, /* Data is size variant on ILP32/LP64 */
859 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
860 0, /* Pointer argument (arg1) */
861 KERN_PROC_PID
, /* Integer argument (arg2) */
862 sysctl_prochandle
, /* Handler function */
863 NULL
, /* Data is size variant on ILP32/LP64 */
865 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
866 0, /* Pointer argument (arg1) */
867 KERN_PROC_TTY
, /* Integer argument (arg2) */
868 sysctl_prochandle
, /* Handler function */
869 NULL
, /* Data is size variant on ILP32/LP64 */
871 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
872 0, /* Pointer argument (arg1) */
873 KERN_PROC_PGRP
, /* Integer argument (arg2) */
874 sysctl_prochandle
, /* Handler function */
875 NULL
, /* Data is size variant on ILP32/LP64 */
877 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
878 0, /* Pointer argument (arg1) */
879 KERN_PROC_UID
, /* Integer argument (arg2) */
880 sysctl_prochandle
, /* Handler function */
881 NULL
, /* Data is size variant on ILP32/LP64 */
883 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
884 0, /* Pointer argument (arg1) */
885 KERN_PROC_RUID
, /* Integer argument (arg2) */
886 sysctl_prochandle
, /* Handler function */
887 NULL
, /* Data is size variant on ILP32/LP64 */
889 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
890 0, /* Pointer argument (arg1) */
891 KERN_PROC_LCID
, /* Integer argument (arg2) */
892 sysctl_prochandle
, /* Handler function */
893 NULL
, /* Data is size variant on ILP32/LP64 */
898 * Fill in non-zero fields of an eproc structure for the specified process.
901 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
905 struct session
*sessp
;
906 kauth_cred_t my_cred
;
909 sessp
= proc_session(p
);
911 if (pg
!= PGRP_NULL
) {
912 ep
->e_pgid
= p
->p_pgrpid
;
913 ep
->e_jobc
= pg
->pg_jobc
;
914 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
915 ep
->e_flag
= EPROC_CTTY
;
919 ep
->e_lcid
= p
->p_lctx
->lc_id
;
921 ep
->e_ppid
= p
->p_ppid
;
923 my_cred
= kauth_cred_proc_ref(p
);
925 /* A fake historical pcred */
926 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
927 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
928 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
929 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
931 /* A fake historical *kauth_cred_t */
932 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
933 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
934 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
935 bcopy(posix_cred_get(my_cred
)->cr_groups
,
936 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
938 kauth_cred_unref(&my_cred
);
941 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
942 (tp
= SESSION_TP(sessp
))) {
943 ep
->e_tdev
= tp
->t_dev
;
944 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
948 if (sessp
!= SESSION_NULL
) {
949 if (SESS_LEADER(p
, sessp
))
950 ep
->e_flag
|= EPROC_SLEADER
;
958 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
961 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
965 struct session
*sessp
;
966 kauth_cred_t my_cred
;
969 sessp
= proc_session(p
);
971 if (pg
!= PGRP_NULL
) {
972 ep
->e_pgid
= p
->p_pgrpid
;
973 ep
->e_jobc
= pg
->pg_jobc
;
974 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
975 ep
->e_flag
= EPROC_CTTY
;
979 ep
->e_lcid
= p
->p_lctx
->lc_id
;
981 ep
->e_ppid
= p
->p_ppid
;
983 my_cred
= kauth_cred_proc_ref(p
);
985 /* A fake historical pcred */
986 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
987 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
988 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
989 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
991 /* A fake historical *kauth_cred_t */
992 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
993 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
994 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
995 bcopy(posix_cred_get(my_cred
)->cr_groups
,
996 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
998 kauth_cred_unref(&my_cred
);
1001 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1002 (tp
= SESSION_TP(sessp
))) {
1003 ep
->e_tdev
= tp
->t_dev
;
1004 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1008 if (sessp
!= SESSION_NULL
) {
1009 if (SESS_LEADER(p
, sessp
))
1010 ep
->e_flag
|= EPROC_SLEADER
;
1011 session_rele(sessp
);
1013 if (pg
!= PGRP_NULL
)
1018 * Fill in an eproc structure for the specified process.
1019 * bzeroed by our caller, so only set non-zero fields.
1022 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1024 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1025 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1026 exp
->p_flag
= p
->p_flag
;
1027 if (p
->p_lflag
& P_LTRACED
)
1028 exp
->p_flag
|= P_TRACED
;
1029 if (p
->p_lflag
& P_LPPWAIT
)
1030 exp
->p_flag
|= P_PPWAIT
;
1031 if (p
->p_lflag
& P_LEXIT
)
1032 exp
->p_flag
|= P_WEXIT
;
1033 exp
->p_stat
= p
->p_stat
;
1034 exp
->p_pid
= p
->p_pid
;
1035 exp
->p_oppid
= p
->p_oppid
;
1037 exp
->user_stack
= p
->user_stack
;
1038 exp
->p_debugger
= p
->p_debugger
;
1039 exp
->sigwait
= p
->sigwait
;
1041 #ifdef _PROC_HAS_SCHEDINFO_
1042 exp
->p_estcpu
= p
->p_estcpu
;
1043 exp
->p_pctcpu
= p
->p_pctcpu
;
1044 exp
->p_slptime
= p
->p_slptime
;
1046 exp
->p_realtimer
.it_interval
.tv_sec
=
1047 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1048 exp
->p_realtimer
.it_interval
.tv_usec
=
1049 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1051 exp
->p_realtimer
.it_value
.tv_sec
=
1052 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1053 exp
->p_realtimer
.it_value
.tv_usec
=
1054 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1056 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1057 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1059 exp
->p_sigignore
= p
->p_sigignore
;
1060 exp
->p_sigcatch
= p
->p_sigcatch
;
1061 exp
->p_priority
= p
->p_priority
;
1062 exp
->p_nice
= p
->p_nice
;
1063 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1064 exp
->p_xstat
= p
->p_xstat
;
1065 exp
->p_acflag
= p
->p_acflag
;
1069 * Fill in an LP64 version of extern_proc structure for the specified process.
1072 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1074 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1075 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1076 exp
->p_flag
= p
->p_flag
;
1077 if (p
->p_lflag
& P_LTRACED
)
1078 exp
->p_flag
|= P_TRACED
;
1079 if (p
->p_lflag
& P_LPPWAIT
)
1080 exp
->p_flag
|= P_PPWAIT
;
1081 if (p
->p_lflag
& P_LEXIT
)
1082 exp
->p_flag
|= P_WEXIT
;
1083 exp
->p_stat
= p
->p_stat
;
1084 exp
->p_pid
= p
->p_pid
;
1085 exp
->p_oppid
= p
->p_oppid
;
1087 exp
->user_stack
= p
->user_stack
;
1088 exp
->p_debugger
= p
->p_debugger
;
1089 exp
->sigwait
= p
->sigwait
;
1091 #ifdef _PROC_HAS_SCHEDINFO_
1092 exp
->p_estcpu
= p
->p_estcpu
;
1093 exp
->p_pctcpu
= p
->p_pctcpu
;
1094 exp
->p_slptime
= p
->p_slptime
;
1096 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1097 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1099 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1100 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1102 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1103 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1105 exp
->p_sigignore
= p
->p_sigignore
;
1106 exp
->p_sigcatch
= p
->p_sigcatch
;
1107 exp
->p_priority
= p
->p_priority
;
1108 exp
->p_nice
= p
->p_nice
;
1109 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1110 exp
->p_xstat
= p
->p_xstat
;
1111 exp
->p_acflag
= p
->p_acflag
;
1115 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1117 /* on a 64 bit kernel, 32 bit users get some truncated information */
1118 fill_user32_externproc(p
, &kp
->kp_proc
);
1119 fill_user32_eproc(p
, &kp
->kp_eproc
);
1123 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1125 fill_user64_externproc(p
, &kp
->kp_proc
);
1126 fill_user64_eproc(p
, &kp
->kp_eproc
);
1130 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1132 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1133 int *name
= arg1
; /* oid element argument vector */
1134 int namelen
= arg2
; /* number of oid element arguments */
1135 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1136 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1137 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1138 // size_t newlen = req->newlen; /* user buffer copy in size */
1140 proc_t p
= current_proc();
1146 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1148 /* Non-root processes may be blessed by kperf to access data
1149 * logged into trace.
1152 ret
= kperf_access_check();
1167 case KERN_KDWRITETR
:
1168 case KERN_KDWRITEMAP
:
1172 case KERN_KDSETRTCDEC
:
1174 case KERN_KDGETENTROPY
:
1175 case KERN_KDENABLE_BG_TRACE
:
1176 case KERN_KDDISABLE_BG_TRACE
:
1177 case KERN_KDREADCURTHRMAP
:
1178 case KERN_KDSET_TYPEFILTER
:
1179 case KERN_KDBUFWAIT
:
1182 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1189 /* adjust index so we return the right required/consumed amount */
1191 req
->oldidx
+= req
->oldlen
;
1195 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1196 0, /* Pointer argument (arg1) */
1197 0, /* Integer argument (arg2) */
1198 sysctl_kdebug_ops
, /* Handler function */
1199 NULL
, /* Data pointer */
1204 * Return the top *sizep bytes of the user stack, or the entire area of the
1205 * user stack down through the saved exec_path, whichever is smaller.
1208 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1210 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1211 int *name
= arg1
; /* oid element argument vector */
1212 int namelen
= arg2
; /* number of oid element arguments */
1213 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1214 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1215 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1216 // size_t newlen = req->newlen; /* user buffer copy in size */
1219 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1221 /* adjust index so we return the right required/consumed amount */
1223 req
->oldidx
+= req
->oldlen
;
1227 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1228 0, /* Pointer argument (arg1) */
1229 0, /* Integer argument (arg2) */
1230 sysctl_doprocargs
, /* Handler function */
1231 NULL
, /* Data pointer */
1235 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1237 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1238 int *name
= arg1
; /* oid element argument vector */
1239 int namelen
= arg2
; /* number of oid element arguments */
1240 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1241 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1242 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1243 // size_t newlen = req->newlen; /* user buffer copy in size */
1246 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1248 /* adjust index so we return the right required/consumed amount */
1250 req
->oldidx
+= req
->oldlen
;
1254 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1255 0, /* Pointer argument (arg1) */
1256 0, /* Integer argument (arg2) */
1257 sysctl_doprocargs2
, /* Handler function */
1258 NULL
, /* Data pointer */
1262 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1263 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1266 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1268 struct _vm_map
*proc_map
;
1271 user_addr_t arg_addr
;
1276 vm_offset_t copy_start
, copy_end
;
1279 kauth_cred_t my_cred
;
1286 buflen
-= sizeof(int); /* reserve first word to return argc */
1288 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1289 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1290 /* is not NULL then the caller wants us to return the length needed to */
1291 /* hold the data we would return */
1292 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1298 * Lookup process by pid
1307 * Copy the top N bytes of the stack.
1308 * On all machines we have so far, the stack grows
1311 * If the user expects no more than N bytes of
1312 * argument list, use that as a guess for the
1316 if (!p
->user_stack
) {
1321 if (where
== USER_ADDR_NULL
) {
1322 /* caller only wants to know length of proc args data */
1323 if (sizep
== NULL
) {
1328 size
= p
->p_argslen
;
1331 size
+= sizeof(int);
1335 * old PROCARGS will return the executable's path and plus some
1336 * extra space for work alignment and data tags
1338 size
+= PATH_MAX
+ (6 * sizeof(int));
1340 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1345 my_cred
= kauth_cred_proc_ref(p
);
1346 uid
= kauth_cred_getuid(my_cred
);
1347 kauth_cred_unref(&my_cred
);
1349 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1350 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1355 if ((u_int
)arg_size
> p
->p_argslen
)
1356 arg_size
= round_page(p
->p_argslen
);
1358 arg_addr
= p
->user_stack
- arg_size
;
1362 * Before we can block (any VM code), make another
1363 * reference to the map to keep it alive. We do
1364 * that by getting a reference on the task itself.
1372 argslen
= p
->p_argslen
;
1374 * Once we have a task reference we can convert that into a
1375 * map reference, which we will use in the calls below. The
1376 * task/process may change its map after we take this reference
1377 * (see execve), but the worst that will happen then is a return
1378 * of stale info (which is always a possibility).
1380 task_reference(task
);
1382 proc_map
= get_task_map_reference(task
);
1383 task_deallocate(task
);
1385 if (proc_map
== NULL
)
1389 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1390 if (ret
!= KERN_SUCCESS
) {
1391 vm_map_deallocate(proc_map
);
1395 copy_end
= round_page(copy_start
+ arg_size
);
1397 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1398 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1399 vm_map_deallocate(proc_map
);
1400 kmem_free(kernel_map
, copy_start
,
1401 round_page(arg_size
));
1406 * Now that we've done the copyin from the process'
1407 * map, we can release the reference to it.
1409 vm_map_deallocate(proc_map
);
1411 if( vm_map_copy_overwrite(kernel_map
,
1412 (vm_map_address_t
)copy_start
,
1413 tmp
, FALSE
) != KERN_SUCCESS
) {
1414 kmem_free(kernel_map
, copy_start
,
1415 round_page(arg_size
));
1419 if (arg_size
> argslen
) {
1420 data
= (caddr_t
) (copy_end
- argslen
);
1423 data
= (caddr_t
) (copy_end
- arg_size
);
1428 /* Put processes argc as the first word in the copyout buffer */
1429 suword(where
, p
->p_argc
);
1430 error
= copyout(data
, (where
+ sizeof(int)), size
);
1431 size
+= sizeof(int);
1433 error
= copyout(data
, where
, size
);
1436 * Make the old PROCARGS work to return the executable's path
1437 * But, only if there is enough space in the provided buffer
1439 * on entry: data [possibily] points to the beginning of the path
1441 * Note: we keep all pointers&sizes aligned to word boundries
1443 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1445 int binPath_sz
, alignedBinPath_sz
= 0;
1446 int extraSpaceNeeded
, addThis
;
1447 user_addr_t placeHere
;
1448 char * str
= (char *) data
;
1451 /* Some apps are really bad about messing up their stacks
1452 So, we have to be extra careful about getting the length
1453 of the executing binary. If we encounter an error, we bail.
1456 /* Limit ourselves to PATH_MAX paths */
1457 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1461 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1464 /* If we have a NUL terminator, copy it, too */
1465 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1467 /* Pre-Flight the space requiremnts */
1469 /* Account for the padding that fills out binPath to the next word */
1470 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1472 placeHere
= where
+ size
;
1474 /* Account for the bytes needed to keep placeHere word aligned */
1475 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1477 /* Add up all the space that is needed */
1478 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1480 /* is there is room to tack on argv[0]? */
1481 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1483 placeHere
+= addThis
;
1484 suword(placeHere
, 0);
1485 placeHere
+= sizeof(int);
1486 suword(placeHere
, 0xBFFF0000);
1487 placeHere
+= sizeof(int);
1488 suword(placeHere
, 0);
1489 placeHere
+= sizeof(int);
1490 error
= copyout(data
, placeHere
, binPath_sz
);
1493 placeHere
+= binPath_sz
;
1494 suword(placeHere
, 0);
1495 size
+= extraSpaceNeeded
;
1501 if (copy_start
!= (vm_offset_t
) 0) {
1502 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1508 if (where
!= USER_ADDR_NULL
)
1515 * Max number of concurrent aio requests
1519 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1521 int new_value
, changed
;
1522 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1524 /* make sure the system-wide limit is greater than the per process limit */
1525 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1526 aio_max_requests
= new_value
;
1535 * Max number of concurrent aio requests per process
1539 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1541 int new_value
, changed
;
1542 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1544 /* make sure per process limit is less than the system-wide limit */
1545 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1546 aio_max_requests_per_process
= new_value
;
1555 * Max number of async IO worker threads
1559 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1561 int new_value
, changed
;
1562 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1564 /* we only allow an increase in the number of worker threads */
1565 if (new_value
> aio_worker_threads
) {
1566 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1567 aio_worker_threads
= new_value
;
1577 * System-wide limit on the max number of processes
1581 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1583 int new_value
, changed
;
1584 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1586 AUDIT_ARG(value32
, new_value
);
1587 /* make sure the system-wide limit is less than the configured hard
1588 limit set at kernel compilation */
1589 if (new_value
<= hard_maxproc
&& new_value
> 0)
1590 maxproc
= new_value
;
1597 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1598 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1600 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1601 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1603 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1604 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1605 (int *)NULL
, BSD
, "");
1606 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1607 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1609 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1610 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1611 &kernel_uuid_string
[0], 0, "");
1614 int debug_kprint_syscall
= 0;
1615 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1617 /* Thread safe: bits and string value are not used to reclaim state */
1618 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1619 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1620 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1621 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1622 "name of process for kprintf syscall tracing");
1624 int debug_kprint_current_process(const char **namep
)
1626 struct proc
*p
= current_proc();
1632 if (debug_kprint_syscall_process
[0]) {
1633 /* user asked to scope tracing to a particular process name */
1634 if(0 == strncmp(debug_kprint_syscall_process
,
1635 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1636 /* no value in telling the user that we traced what they asked */
1637 if(namep
) *namep
= NULL
;
1645 /* trace all processes. Tell user what we traced */
1654 /* PR-5293665: need to use a callback function for kern.osversion to set
1655 * osversion in IORegistry */
1658 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1662 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1665 IORegistrySetOSBuildVersion((char *)arg1
);
1671 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1672 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1673 osversion
, 256 /* OSVERSIZE*/,
1674 sysctl_osversion
, "A", "");
1677 sysctl_sysctl_bootargs
1678 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1683 strlcpy(buf
, PE_boot_args(), 256);
1684 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
1688 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1689 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1691 sysctl_sysctl_bootargs
, "A", "bootargs");
1693 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1694 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1696 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1697 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1698 (int *)NULL
, ARG_MAX
, "");
1699 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1700 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1701 (int *)NULL
, _POSIX_VERSION
, "");
1702 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1703 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1704 (int *)NULL
, NGROUPS_MAX
, "");
1705 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1706 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1707 (int *)NULL
, 1, "");
1708 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1709 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1710 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1711 (int *)NULL
, 1, "");
1713 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1714 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1717 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1718 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1720 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1721 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1723 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1724 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1726 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1727 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1728 &thread_max
, 0, "");
1729 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1730 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1731 &task_threadmax
, 0, "");
1734 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1736 int oldval
= desiredvnodes
;
1737 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1739 if (oldval
!= desiredvnodes
) {
1740 reset_vmobjectcache(oldval
, desiredvnodes
);
1741 resize_namecache(desiredvnodes
);
1747 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1748 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1749 &nc_disabled
, 0, "");
1751 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1752 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1753 0, 0, sysctl_maxvnodes
, "I", "");
1755 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1756 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1757 0, 0, sysctl_maxproc
, "I", "");
1759 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1760 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1761 0, 0, sysctl_aiomax
, "I", "");
1763 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1764 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1765 0, 0, sysctl_aioprocmax
, "I", "");
1767 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1768 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1769 0, 0, sysctl_aiothreads
, "I", "");
1771 #if (DEVELOPMENT || DEBUG)
1772 extern int sched_smt_balance
;
1773 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1774 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1775 &sched_smt_balance
, 0, "");
1780 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1782 int new_value
, changed
;
1783 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1785 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1787 securelevel
= new_value
;
1796 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1797 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1798 0, 0, sysctl_securelvl
, "I", "");
1803 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1806 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1808 domainnamelen
= strlen(domainname
);
1813 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1814 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1815 0, 0, sysctl_domainname
, "A", "");
1817 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1818 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1823 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1826 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1828 hostnamelen
= req
->newlen
;
1834 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1835 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1836 0, 0, sysctl_hostname
, "A", "");
1840 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1842 /* Original code allowed writing, I'm copying this, although this all makes
1843 no sense to me. Besides, this sysctl is never used. */
1844 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
1847 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
1848 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1849 0, 0, sysctl_procname
, "A", "");
1851 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
1852 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1853 &speculative_reads_disabled
, 0, "");
1855 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
1856 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1857 &ignore_is_ssd
, 0, "");
1859 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
1860 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1861 &preheat_max_bytes
, 0, "");
1863 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
1864 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1865 &preheat_min_bytes
, 0, "");
1867 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
1868 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1869 &speculative_prefetch_max
, 0, "");
1871 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
1872 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1873 &speculative_prefetch_max_iosize
, 0, "");
1875 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
1876 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1877 &vm_page_free_target
, 0, "");
1879 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
1880 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1881 &vm_page_free_min
, 0, "");
1883 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
1884 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1885 &vm_page_free_reserved
, 0, "");
1887 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
1888 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1889 &vm_page_speculative_percentage
, 0, "");
1891 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
1892 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1893 &vm_page_speculative_q_age_ms
, 0, "");
1895 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
1896 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1897 &vm_max_delayed_work_limit
, 0, "");
1899 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
1900 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1901 &vm_max_batch
, 0, "");
1903 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
1904 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1905 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
1909 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1911 time_t tv_sec
= boottime_sec();
1912 struct proc
*p
= req
->p
;
1914 if (proc_is64bit(p
)) {
1915 struct user64_timeval t
;
1918 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1920 struct user32_timeval t
;
1923 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1927 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
1928 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1929 0, 0, sysctl_boottime
, "S,timeval", "");
1933 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1936 int error
= get_kernel_symfile(req
->p
, &str
);
1939 return sysctl_io_string(req
, str
, 0, 0, NULL
);
1943 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
1944 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1945 0, 0, sysctl_symfile
, "A", "");
1950 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1952 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
1955 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
1956 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1957 0, 0, sysctl_netboot
, "I", "");
1960 #ifdef CONFIG_IMGSRC_ACCESS
1962 * Legacy--act as if only one layer of nesting is possible.
1966 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1968 vfs_context_t ctx
= vfs_context_current();
1972 if (!vfs_context_issuser(ctx
)) {
1976 if (imgsrc_rootvnodes
[0] == NULL
) {
1980 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
1985 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
1986 result
= vnode_getwithref(devvp
);
1991 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
1995 vnode_put(imgsrc_rootvnodes
[0]);
1999 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2000 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2001 0, 0, sysctl_imgsrcdev
, "I", "");
2005 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2008 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2012 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2016 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2018 * Go get the root vnode.
2020 rvp
= imgsrc_rootvnodes
[i
];
2021 if (rvp
== NULLVP
) {
2025 error
= vnode_get(rvp
);
2031 * For now, no getting at a non-local volume.
2033 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2034 if (devvp
== NULL
) {
2039 error
= vnode_getwithref(devvp
);
2048 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2049 info
[i
].ii_flags
= 0;
2050 info
[i
].ii_height
= i
;
2051 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2057 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2060 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2061 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2062 0, 0, sysctl_imgsrcinfo
, "I", "");
2064 #endif /* CONFIG_IMGSRC_ACCESS */
2067 SYSCTL_DECL(_kern_timer
);
2068 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2071 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2072 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2073 &mach_timer_coalescing_enabled
, 0, "");
2075 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2076 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2077 &timer_deadline_tracking_bin_1
, "");
2078 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2079 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2080 &timer_deadline_tracking_bin_2
, "");
2082 SYSCTL_DECL(_kern_timer_longterm
);
2083 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2086 /* Must match definition in osfmk/kern/timer_call.c */
2089 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2090 LATENCY
, LATENCY_MIN
, LATENCY_MAX
2092 extern uint64_t timer_sysctl_get(int);
2093 extern int timer_sysctl_set(int, uint64_t);
2097 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2099 int oid
= (int)arg1
;
2100 uint64_t value
= timer_sysctl_get(oid
);
2105 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2107 error
= timer_sysctl_set(oid
, new_value
);
2112 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2113 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2114 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2115 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2116 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2117 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2119 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2120 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2121 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2122 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2123 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2124 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2125 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2126 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2127 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2128 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2129 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2130 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2131 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2132 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2133 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2134 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2135 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2136 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2137 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2138 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2139 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2140 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2141 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2142 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2147 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2149 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2152 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2153 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2154 0, 0, sysctl_usrstack
, "I", "");
2158 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2160 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2163 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2164 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2165 0, 0, sysctl_usrstack64
, "Q", "");
2167 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2168 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2169 corefilename
, sizeof(corefilename
), "");
2173 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2175 #ifdef SECURE_KERNEL
2179 int new_value
, changed
;
2180 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2182 if ((new_value
== 0) || (new_value
== 1))
2183 do_coredump
= new_value
;
2191 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2192 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2193 0, 0, sysctl_coredump
, "I", "");
2196 sysctl_suid_coredump
2197 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2199 #ifdef SECURE_KERNEL
2203 int new_value
, changed
;
2204 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2206 if ((new_value
== 0) || (new_value
== 1))
2207 sugid_coredump
= new_value
;
2215 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2216 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2217 0, 0, sysctl_suid_coredump
, "I", "");
2221 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2223 struct proc
*p
= req
->p
;
2224 int new_value
, changed
;
2225 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2229 req
->p
->p_lflag
|= P_LDELAYTERM
;
2231 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2237 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2238 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2239 0, 0, sysctl_delayterm
, "I", "");
2244 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2246 struct proc
*p
= req
->p
;
2248 int new_value
, old_value
, changed
;
2251 ut
= get_bsdthread_info(current_thread());
2253 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2254 old_value
= KERN_RAGE_THREAD
;
2255 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2256 old_value
= KERN_RAGE_PROC
;
2260 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2263 switch (new_value
) {
2264 case KERN_RAGE_PROC
:
2266 p
->p_lflag
|= P_LRAGE_VNODES
;
2269 case KERN_UNRAGE_PROC
:
2271 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2275 case KERN_RAGE_THREAD
:
2276 ut
->uu_flag
|= UT_RAGE_VNODES
;
2278 case KERN_UNRAGE_THREAD
:
2279 ut
= get_bsdthread_info(current_thread());
2280 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2287 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2288 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2289 0, 0, sysctl_rage_vnode
, "I", "");
2291 /* XXX move this interface into libproc and remove this sysctl */
2293 sysctl_setthread_cpupercent
2294 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2296 int new_value
, old_value
;
2298 kern_return_t kret
= KERN_SUCCESS
;
2299 uint8_t percent
= 0;
2307 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2310 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2311 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2316 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2318 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2324 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2325 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2326 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2330 sysctl_kern_check_openevt
2331 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2333 struct proc
*p
= req
->p
;
2334 int new_value
, old_value
, changed
;
2337 if (p
->p_flag
& P_CHECKOPENEVT
) {
2338 old_value
= KERN_OPENEVT_PROC
;
2343 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2346 switch (new_value
) {
2347 case KERN_OPENEVT_PROC
:
2348 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2351 case KERN_UNOPENEVT_PROC
:
2352 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2362 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2363 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2369 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2371 #ifdef SECURE_KERNEL
2375 int new_value
, changed
;
2378 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2383 #if defined(__i386__) || defined(__x86_64__)
2385 * Only allow setting if NX is supported on the chip
2387 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2390 nx_enabled
= new_value
;
2393 #endif /* SECURE_KERNEL */
2398 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2399 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2400 0, 0, sysctl_nx
, "I", "");
2404 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2406 if (proc_is64bit(req
->p
)) {
2407 struct user64_loadavg loadinfo64
;
2408 fill_loadavg64(&averunnable
, &loadinfo64
);
2409 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2411 struct user32_loadavg loadinfo32
;
2412 fill_loadavg32(&averunnable
, &loadinfo32
);
2413 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2417 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2418 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2419 0, 0, sysctl_loadavg
, "S,loadavg", "");
2422 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2425 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2426 __unused
int arg2
, struct sysctl_req
*req
)
2428 int old_value
=0, new_value
=0, error
=0;
2430 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2432 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2434 return (vm_toggle_entry_reuse(new_value
, NULL
));
2439 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2443 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2446 uint64_t swap_total
;
2447 uint64_t swap_avail
;
2448 vm_size_t swap_pagesize
;
2449 boolean_t swap_encrypted
;
2450 struct xsw_usage xsu
;
2452 error
= macx_swapinfo(&swap_total
,
2459 xsu
.xsu_total
= swap_total
;
2460 xsu
.xsu_avail
= swap_avail
;
2461 xsu
.xsu_used
= swap_total
- swap_avail
;
2462 xsu
.xsu_pagesize
= swap_pagesize
;
2463 xsu
.xsu_encrypted
= swap_encrypted
;
2464 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2469 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2470 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2471 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2474 extern void vm_page_reactivate_all_throttled(void);
2477 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2479 #pragma unused(arg1, arg2)
2480 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2483 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2484 if (error
|| !req
->newptr
)
2487 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2488 //assert(req->newptr);
2489 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2494 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2496 disabled
= (!val
&& memorystatus_freeze_enabled
);
2498 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2501 vm_page_reactivate_all_throttled();
2507 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2508 #endif /* CONFIG_FREEZE */
2510 /* this kernel does NOT implement shared_region_make_private_np() */
2511 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2512 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2513 (int *)NULL
, 0, "");
2516 fetch_process_cputype(
2520 cpu_type_t
*cputype
)
2522 proc_t p
= PROC_NULL
;
2529 else if (namelen
== 1) {
2530 p
= proc_find(name
[0]);
2539 ret
= cpu_type() & ~CPU_ARCH_MASK
;
2540 if (IS_64BIT_PROCESS(p
))
2541 ret
|= CPU_ARCH_ABI64
;
2552 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2553 struct sysctl_req
*req
)
2556 cpu_type_t proc_cputype
= 0;
2557 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2560 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2562 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2564 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2567 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2568 struct sysctl_req
*req
)
2571 cpu_type_t proc_cputype
= 0;
2572 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2574 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2576 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2580 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2582 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2585 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2586 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2587 0, 0, sysctl_safeboot
, "I", "");
2591 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2593 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2596 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2597 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2598 0, 0, sysctl_singleuser
, "I", "");
2601 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2603 extern boolean_t affinity_sets_enabled
;
2604 extern int affinity_sets_mapping
;
2606 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2607 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2608 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2609 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
2612 * Boolean indicating if KASLR is active.
2616 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2620 slide
= vm_kernel_slide
? 1 : 0;
2622 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
2625 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
2626 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2627 0, 0, sysctl_slide
, "I", "");
2630 * Limit on total memory users can wire.
2632 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2634 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2636 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2639 * All values are in bytes.
2642 vm_map_size_t vm_global_no_user_wire_amount
;
2643 vm_map_size_t vm_global_user_wire_limit
;
2644 vm_map_size_t vm_user_wire_limit
;
2647 * There needs to be a more automatic/elegant way to do this
2649 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
2650 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
2651 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
2653 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
2654 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
2655 extern int vm_map_copy_overwrite_aligned_src_large
;
2656 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
2657 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
2658 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
2661 extern uint32_t vm_page_external_count
;
2662 extern uint32_t vm_page_filecache_min
;
2664 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
2665 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
2667 extern int vm_compressor_mode
;
2668 extern int vm_compressor_is_active
;
2669 extern uint32_t swapout_target_age
;
2670 extern int64_t compressor_bytes_used
;
2671 extern uint32_t compressor_eval_period_in_msecs
;
2672 extern uint32_t compressor_sample_min_in_msecs
;
2673 extern uint32_t compressor_sample_max_in_msecs
;
2674 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
2675 extern uint32_t compressor_thrashing_min_per_10msecs
;
2676 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
2677 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
2678 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
2679 extern uint32_t vm_compressor_catchup_threshold_divisor
;
2681 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
2682 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
2683 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
2684 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
2686 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
2687 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
2688 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
2689 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
2690 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
2691 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
2692 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
2693 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
2694 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
2696 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
2698 #if CONFIG_PHANTOM_CACHE
2699 extern uint32_t phantom_cache_thrashing_threshold
;
2700 extern uint32_t phantom_cache_eval_period_in_msecs
;
2701 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
2704 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
2705 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
2706 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
2709 #if (DEVELOPMENT || DEBUG)
2711 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
2712 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2713 &vm_page_creation_throttled_hard
, 0, "");
2715 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
2716 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2717 &vm_page_creation_throttled_soft
, 0, "");
2719 #endif /* DEVELOPMENT || DEBUG */
2722 * Enable tracing of voucher contents
2724 extern uint32_t ipc_voucher_trace_contents
;
2726 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
2727 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
2730 * Kernel stack size and depth
2732 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
2733 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
2734 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
2735 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
2738 * enable back trace for port allocations
2740 extern int ipc_portbt
;
2742 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
2743 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2744 &ipc_portbt
, 0, "");
2751 * See osfmk/kern/sched_prim.c for the corresponding definition
2752 * in osfmk/. If either version changes, update the other.
2754 #define SCHED_STRING_MAX_LENGTH (48)
2756 extern char sched_string
[SCHED_STRING_MAX_LENGTH
];
2757 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
2758 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2759 sched_string
, sizeof(sched_string
),
2760 "Timeshare scheduler implementation");
2763 * Only support runtime modification on embedded platforms
2764 * with development config enabled
2768 /* Parameters related to timer coalescing tuning, to be replaced
2769 * with a dedicated systemcall in the future.
2771 /* Enable processing pending timers in the context of any other interrupt
2772 * Coalescing tuning parameters for various thread/task attributes */
2774 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2776 #pragma unused(oidp)
2777 int size
= arg2
; /* subcommand*/
2780 uint64_t old_value_ns
;
2781 uint64_t new_value_ns
;
2782 uint64_t value_abstime
;
2783 if (size
== sizeof(uint32_t))
2784 value_abstime
= *((uint32_t *)arg1
);
2785 else if (size
== sizeof(uint64_t))
2786 value_abstime
= *((uint64_t *)arg1
);
2787 else return ENOTSUP
;
2789 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
2790 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
2791 if ((error
) || (!changed
))
2794 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
2795 if (size
== sizeof(uint32_t))
2796 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
2798 *((uint64_t *)arg1
) = value_abstime
;
2802 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
2803 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2804 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
2805 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
2806 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2807 &tcoal_prio_params
.timer_resort_threshold_abstime
,
2808 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
2809 sysctl_timer_user_us_kernel_abstime
,
2811 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
2812 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2813 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
2814 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
2815 sysctl_timer_user_us_kernel_abstime
,
2818 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
2819 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2820 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
2822 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
2823 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2824 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
2825 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
2826 sysctl_timer_user_us_kernel_abstime
,
2829 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
2830 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2831 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
2833 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
2834 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2835 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
2836 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
2837 sysctl_timer_user_us_kernel_abstime
,
2840 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
2841 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2842 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
2844 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
2845 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2846 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
2847 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
2848 sysctl_timer_user_us_kernel_abstime
,
2851 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
2852 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2853 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
2855 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
2856 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2857 &tcoal_prio_params
.latency_qos_abstime_max
[0],
2858 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
2859 sysctl_timer_user_us_kernel_abstime
,
2862 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
2863 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2864 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
2866 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
2867 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2868 &tcoal_prio_params
.latency_qos_abstime_max
[1],
2869 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
2870 sysctl_timer_user_us_kernel_abstime
,
2873 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
2874 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2875 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
2877 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
2878 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2879 &tcoal_prio_params
.latency_qos_abstime_max
[2],
2880 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
2881 sysctl_timer_user_us_kernel_abstime
,
2884 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
2885 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2886 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
2888 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
2889 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2890 &tcoal_prio_params
.latency_qos_abstime_max
[3],
2891 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
2892 sysctl_timer_user_us_kernel_abstime
,
2895 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
2896 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2897 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
2899 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
2900 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2901 &tcoal_prio_params
.latency_qos_abstime_max
[4],
2902 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
2903 sysctl_timer_user_us_kernel_abstime
,
2906 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
2907 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2908 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
2910 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
2911 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2912 &tcoal_prio_params
.latency_qos_abstime_max
[5],
2913 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
2914 sysctl_timer_user_us_kernel_abstime
,
2917 /* Communicate the "user idle level" heuristic to the timer layer, and
2918 * potentially other layers in the future.
2922 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
2923 int new_value
= 0, old_value
= 0, changed
= 0, error
;
2925 old_value
= timer_get_user_idle_level();
2927 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2929 if (error
== 0 && changed
) {
2930 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
2937 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
2938 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2940 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
2943 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
2944 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2945 &hv_support_available
, 0, "");