2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
112 #include <machine/smp.h>
113 #include <mach/machine.h>
114 #include <mach/mach_host.h>
115 #include <mach/mach_types.h>
116 #include <mach/processor_info.h>
117 #include <mach/vm_param.h>
118 #include <kern/debug.h>
119 #include <kern/mach_param.h>
120 #include <kern/task.h>
121 #include <kern/thread.h>
122 #include <kern/thread_group.h>
123 #include <kern/processor.h>
124 #include <kern/cpu_number.h>
125 #include <kern/debug.h>
126 #include <kern/sched_prim.h>
127 #include <vm/vm_kern.h>
128 #include <vm/vm_map.h>
129 #include <mach/host_info.h>
131 #include <sys/mount_internal.h>
132 #include <sys/kdebug.h>
134 #include <IOKit/IOPlatformExpert.h>
135 #include <pexpert/pexpert.h>
137 #include <machine/machine_routines.h>
138 #include <machine/exec.h>
140 #include <vm/vm_protos.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_compressor_algorithms.h>
143 #include <sys/imgsrc.h>
144 #include <kern/timer_call.h>
146 #if defined(__i386__) || defined(__x86_64__)
147 #include <i386/cpuid.h>
151 #include <sys/kern_memorystatus.h>
155 #include <kperf/kperf.h>
159 #include <kern/hv_support.h>
163 * deliberately setting max requests to really high number
164 * so that runaway settings do not cause MALLOC overflows
166 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
168 extern int aio_max_requests
;
169 extern int aio_max_requests_per_process
;
170 extern int aio_worker_threads
;
171 extern int lowpri_IO_window_msecs
;
172 extern int lowpri_IO_delay_msecs
;
173 extern int nx_enabled
;
174 extern int speculative_reads_disabled
;
175 extern unsigned int speculative_prefetch_max
;
176 extern unsigned int speculative_prefetch_max_iosize
;
177 extern unsigned int preheat_max_bytes
;
178 extern unsigned int preheat_min_bytes
;
179 extern long numvnodes
;
181 extern uuid_string_t bootsessionuuid_string
;
183 extern unsigned int vm_max_delayed_work_limit
;
184 extern unsigned int vm_max_batch
;
186 extern unsigned int vm_page_free_min
;
187 extern unsigned int vm_page_free_target
;
188 extern unsigned int vm_page_free_reserved
;
189 extern unsigned int vm_page_speculative_percentage
;
190 extern unsigned int vm_page_speculative_q_age_ms
;
192 #if (DEVELOPMENT || DEBUG)
193 extern uint32_t vm_page_creation_throttled_hard
;
194 extern uint32_t vm_page_creation_throttled_soft
;
195 #endif /* DEVELOPMENT || DEBUG */
198 * Conditionally allow dtrace to see these functions for debugging purposes.
206 #define STATIC static
209 extern boolean_t mach_timer_coalescing_enabled
;
211 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
214 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
216 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
218 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
220 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
222 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
224 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
227 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
233 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
236 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
237 size_t *sizep
, proc_t cur_proc
);
239 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
240 proc_t cur_proc
, int argc_yes
);
242 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
243 size_t newlen
, void *sp
, int len
);
245 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
246 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
247 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
248 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
249 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
250 int sysdoproc_callback(proc_t p
, void *arg
);
253 /* forward declarations for non-static STATIC */
254 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
255 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
256 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
257 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
258 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
259 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
261 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
262 #endif /* COUNT_SYSCALLS */
264 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
265 #endif /* !CONFIG_EMBEDDED */
266 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
267 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
268 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 #ifdef CONFIG_IMGSRC_ACCESS
285 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
293 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
299 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
300 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
301 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
302 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
303 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
304 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
305 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
306 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
309 extern void IORegistrySetOSBuildVersion(char * build_version
);
312 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
314 la64
->ldavg
[0] = la
->ldavg
[0];
315 la64
->ldavg
[1] = la
->ldavg
[1];
316 la64
->ldavg
[2] = la
->ldavg
[2];
317 la64
->fscale
= (user64_long_t
)la
->fscale
;
321 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
323 la32
->ldavg
[0] = la
->ldavg
[0];
324 la32
->ldavg
[1] = la
->ldavg
[1];
325 la32
->ldavg
[2] = la
->ldavg
[2];
326 la32
->fscale
= (user32_long_t
)la
->fscale
;
331 * Attributes stored in the kernel.
333 extern char corefilename
[MAXPATHLEN
+1];
334 extern int do_coredump
;
335 extern int sugid_coredump
;
339 extern int do_count_syscalls
;
343 int securelevel
= -1;
349 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
350 __unused
int arg2
, struct sysctl_req
*req
)
353 struct uthread
*ut
= get_bsdthread_info(current_thread());
354 user_addr_t oldp
=0, newp
=0;
355 size_t *oldlenp
=NULL
;
359 oldlenp
= &(req
->oldlen
);
361 newlen
= req
->newlen
;
363 /* We want the current length, and maybe the string itself */
365 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
366 size_t currlen
= MAXTHREADNAMESIZE
- 1;
369 /* use length of current thread name */
370 currlen
= strlen(ut
->pth_name
);
372 if(*oldlenp
< currlen
)
374 /* NOTE - we do not copy the NULL terminator */
376 error
= copyout(ut
->pth_name
,oldp
,currlen
);
381 /* return length of thread name minus NULL terminator (just like strlen) */
382 req
->oldidx
= currlen
;
385 /* We want to set the name to something */
388 if(newlen
> (MAXTHREADNAMESIZE
- 1))
392 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
396 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV
, ut
->pth_name
);
398 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
399 error
= copyin(newp
, ut
->pth_name
, newlen
);
404 kernel_debug_string_simple(TRACE_STRING_THREADNAME
, ut
->pth_name
);
410 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
414 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
416 host_basic_info_data_t hinfo
;
420 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
421 struct _processor_statistics_np
*buf
;
424 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
425 if (kret
!= KERN_SUCCESS
) {
429 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
431 if (req
->oldlen
< size
) {
435 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
437 kret
= get_sched_statistics(buf
, &size
);
438 if (kret
!= KERN_SUCCESS
) {
443 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
449 panic("Sched info changed?!");
456 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
459 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
464 if (req
->newlen
!= sizeof(active
)) {
468 res
= copyin(req
->newptr
, &active
, sizeof(active
));
473 return set_sched_stats_active(active
);
476 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
478 extern uint32_t sched_debug_flags
;
479 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
481 #if (DEBUG || DEVELOPMENT)
482 extern boolean_t doprnt_hide_pointers
;
483 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
486 extern int get_kernel_symfile(proc_t
, char **);
489 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
491 extern unsigned int nsysent
;
492 extern int syscalls_log
[];
493 extern const char *syscallnames
[];
496 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
498 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
499 __unused
int *name
= arg1
; /* oid element argument vector */
500 __unused
int namelen
= arg2
; /* number of oid element arguments */
501 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
502 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
503 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
504 size_t newlen
= req
->newlen
; /* user buffer copy in size */
509 /* valid values passed in:
510 * = 0 means don't keep called counts for each bsd syscall
511 * > 0 means keep called counts for each bsd syscall
512 * = 2 means dump current counts to the system log
513 * = 3 means reset all counts
514 * for example, to dump current counts:
515 * sysctl -w kern.count_calls=2
517 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
523 do_count_syscalls
= 1;
525 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
527 for ( i
= 0; i
< nsysent
; i
++ ) {
528 if ( syscalls_log
[i
] != 0 ) {
530 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
538 do_count_syscalls
= 1;
542 /* adjust index so we return the right required/consumed amount */
544 req
->oldidx
+= req
->oldlen
;
548 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
549 0, /* Pointer argument (arg1) */
550 0, /* Integer argument (arg2) */
551 sysctl_docountsyscalls
, /* Handler function */
552 NULL
, /* Data pointer */
554 #endif /* COUNT_SYSCALLS */
557 * The following sysctl_* functions should not be used
558 * any more, as they can only cope with callers in
559 * user mode: Use new-style
567 * Validate parameters and get old / set new parameters
568 * for an integer-valued sysctl function.
571 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
572 user_addr_t newp
, size_t newlen
, int *valp
)
576 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
578 if (oldp
&& *oldlenp
< sizeof(int))
580 if (newp
&& newlen
!= sizeof(int))
582 *oldlenp
= sizeof(int);
584 error
= copyout(valp
, oldp
, sizeof(int));
585 if (error
== 0 && newp
) {
586 error
= copyin(newp
, valp
, sizeof(int));
587 AUDIT_ARG(value32
, *valp
);
593 * Validate parameters and get old / set new parameters
594 * for an quad(64bit)-valued sysctl function.
597 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
598 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
602 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
604 if (oldp
&& *oldlenp
< sizeof(quad_t
))
606 if (newp
&& newlen
!= sizeof(quad_t
))
608 *oldlenp
= sizeof(quad_t
);
610 error
= copyout(valp
, oldp
, sizeof(quad_t
));
611 if (error
== 0 && newp
)
612 error
= copyin(newp
, valp
, sizeof(quad_t
));
617 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
619 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
626 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
628 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
635 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
640 /* This is very racy but list lock is held.. Hmmm. */
641 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
642 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
643 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
644 tp
->t_dev
!= (dev_t
)*(int*)arg
)
653 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
655 kauth_cred_t my_cred
;
658 if (p
->p_ucred
== NULL
)
660 my_cred
= kauth_cred_proc_ref(p
);
661 uid
= kauth_cred_getuid(my_cred
);
662 kauth_cred_unref(&my_cred
);
664 if (uid
!= (uid_t
)*(int*)arg
)
672 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
674 kauth_cred_t my_cred
;
677 if (p
->p_ucred
== NULL
)
679 my_cred
= kauth_cred_proc_ref(p
);
680 ruid
= kauth_cred_getruid(my_cred
);
681 kauth_cred_unref(&my_cred
);
683 if (ruid
!= (uid_t
)*(int*)arg
)
690 * try over estimating by 5 procs
692 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
693 struct sysdoproc_args
{
708 sysdoproc_callback(proc_t p
, void *arg
)
710 struct sysdoproc_args
*args
= arg
;
712 if (args
->buflen
>= args
->sizeof_kproc
) {
713 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
714 return (PROC_RETURNED
);
715 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
716 return (PROC_RETURNED
);
717 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
718 return (PROC_RETURNED
);
720 bzero(args
->kprocp
, args
->sizeof_kproc
);
722 fill_user64_proc(p
, args
->kprocp
);
724 fill_user32_proc(p
, args
->kprocp
);
725 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
727 *args
->errorp
= error
;
728 return (PROC_RETURNED_DONE
);
730 args
->dp
+= args
->sizeof_kproc
;
731 args
->buflen
-= args
->sizeof_kproc
;
733 args
->needed
+= args
->sizeof_kproc
;
734 return (PROC_RETURNED
);
737 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
739 sysctl_prochandle SYSCTL_HANDLER_ARGS
741 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
742 int *name
= arg1
; /* oid element argument vector */
743 int namelen
= arg2
; /* number of oid element arguments */
744 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
746 user_addr_t dp
= where
;
748 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
750 boolean_t is_64_bit
= proc_is64bit(current_proc());
751 struct user32_kinfo_proc user32_kproc
;
752 struct user64_kinfo_proc user_kproc
;
755 int (*filterfn
)(proc_t
, void *) = 0;
756 struct sysdoproc_args args
;
762 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
766 sizeof_kproc
= sizeof(user_kproc
);
767 kprocp
= &user_kproc
;
769 sizeof_kproc
= sizeof(user32_kproc
);
770 kprocp
= &user32_kproc
;
776 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
780 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
799 /* must be kern.proc.<unknown> */
804 args
.buflen
= buflen
;
805 args
.kprocp
= kprocp
;
806 args
.is_64_bit
= is_64_bit
;
808 args
.needed
= needed
;
809 args
.errorp
= &error
;
810 args
.uidcheck
= uidcheck
;
811 args
.ruidcheck
= ruidcheck
;
812 args
.ttycheck
= ttycheck
;
813 args
.sizeof_kproc
= sizeof_kproc
;
815 args
.uidval
= name
[0];
817 success
= proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
818 sysdoproc_callback
, &args
, filterfn
, name
);
821 * rdar://problem/28433391: if we can't iterate over the processes,
822 * make sure to return an error.
832 needed
= args
.needed
;
834 if (where
!= USER_ADDR_NULL
) {
835 req
->oldlen
= dp
- where
;
836 if (needed
> req
->oldlen
)
839 needed
+= KERN_PROCSLOP
;
840 req
->oldlen
= needed
;
842 /* adjust index so we return the right required/consumed amount */
843 req
->oldidx
+= req
->oldlen
;
848 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
849 * in the sysctl declaration itself, which comes into the handler function
850 * as 'oidp->oid_arg2'.
852 * For these particular sysctls, since they have well known OIDs, we could
853 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
854 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
855 * of a well known value with a common handler function. This is desirable,
856 * because we want well known values to "go away" at some future date.
858 * It should be noted that the value of '((int *)arg1)[1]' is used for many
859 * an integer parameter to the subcommand for many of these sysctls; we'd
860 * rather have used '((int *)arg1)[0]' for that, or even better, an element
861 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
862 * and then use leaf-node permissions enforcement, but that would have
863 * necessitated modifying user space code to correspond to the interface
864 * change, and we are striving for binary backward compatibility here; even
865 * though these are SPI, and not intended for use by user space applications
866 * which are not themselves system tools or libraries, some applications
867 * have erroneously used them.
869 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_ALL
, /* Integer argument (arg2) */
872 sysctl_prochandle
, /* Handler function */
873 NULL
, /* Data is size variant on ILP32/LP64 */
875 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_PID
, /* Integer argument (arg2) */
878 sysctl_prochandle
, /* Handler function */
879 NULL
, /* Data is size variant on ILP32/LP64 */
881 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_TTY
, /* Integer argument (arg2) */
884 sysctl_prochandle
, /* Handler function */
885 NULL
, /* Data is size variant on ILP32/LP64 */
887 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
888 0, /* Pointer argument (arg1) */
889 KERN_PROC_PGRP
, /* Integer argument (arg2) */
890 sysctl_prochandle
, /* Handler function */
891 NULL
, /* Data is size variant on ILP32/LP64 */
893 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
894 0, /* Pointer argument (arg1) */
895 KERN_PROC_UID
, /* Integer argument (arg2) */
896 sysctl_prochandle
, /* Handler function */
897 NULL
, /* Data is size variant on ILP32/LP64 */
899 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
900 0, /* Pointer argument (arg1) */
901 KERN_PROC_RUID
, /* Integer argument (arg2) */
902 sysctl_prochandle
, /* Handler function */
903 NULL
, /* Data is size variant on ILP32/LP64 */
905 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
906 0, /* Pointer argument (arg1) */
907 KERN_PROC_LCID
, /* Integer argument (arg2) */
908 sysctl_prochandle
, /* Handler function */
909 NULL
, /* Data is size variant on ILP32/LP64 */
914 * Fill in non-zero fields of an eproc structure for the specified process.
917 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
921 struct session
*sessp
;
922 kauth_cred_t my_cred
;
925 sessp
= proc_session(p
);
927 if (pg
!= PGRP_NULL
) {
928 ep
->e_pgid
= p
->p_pgrpid
;
929 ep
->e_jobc
= pg
->pg_jobc
;
930 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
931 ep
->e_flag
= EPROC_CTTY
;
933 ep
->e_ppid
= p
->p_ppid
;
935 my_cred
= kauth_cred_proc_ref(p
);
937 /* A fake historical pcred */
938 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
939 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
940 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
941 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
943 /* A fake historical *kauth_cred_t */
944 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
945 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
946 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
947 bcopy(posix_cred_get(my_cred
)->cr_groups
,
948 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
950 kauth_cred_unref(&my_cred
);
953 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
954 (tp
= SESSION_TP(sessp
))) {
955 ep
->e_tdev
= tp
->t_dev
;
956 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
960 if (sessp
!= SESSION_NULL
) {
961 if (SESS_LEADER(p
, sessp
))
962 ep
->e_flag
|= EPROC_SLEADER
;
970 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
973 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
977 struct session
*sessp
;
978 kauth_cred_t my_cred
;
981 sessp
= proc_session(p
);
983 if (pg
!= PGRP_NULL
) {
984 ep
->e_pgid
= p
->p_pgrpid
;
985 ep
->e_jobc
= pg
->pg_jobc
;
986 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
987 ep
->e_flag
= EPROC_CTTY
;
989 ep
->e_ppid
= p
->p_ppid
;
991 my_cred
= kauth_cred_proc_ref(p
);
993 /* A fake historical pcred */
994 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
995 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
996 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
997 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
999 /* A fake historical *kauth_cred_t */
1000 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1001 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1002 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1003 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1004 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
1006 kauth_cred_unref(&my_cred
);
1009 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1010 (tp
= SESSION_TP(sessp
))) {
1011 ep
->e_tdev
= tp
->t_dev
;
1012 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1016 if (sessp
!= SESSION_NULL
) {
1017 if (SESS_LEADER(p
, sessp
))
1018 ep
->e_flag
|= EPROC_SLEADER
;
1019 session_rele(sessp
);
1021 if (pg
!= PGRP_NULL
)
1026 * Fill in an eproc structure for the specified process.
1027 * bzeroed by our caller, so only set non-zero fields.
1030 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1032 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1033 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1034 exp
->p_flag
= p
->p_flag
;
1035 if (p
->p_lflag
& P_LTRACED
)
1036 exp
->p_flag
|= P_TRACED
;
1037 if (p
->p_lflag
& P_LPPWAIT
)
1038 exp
->p_flag
|= P_PPWAIT
;
1039 if (p
->p_lflag
& P_LEXIT
)
1040 exp
->p_flag
|= P_WEXIT
;
1041 exp
->p_stat
= p
->p_stat
;
1042 exp
->p_pid
= p
->p_pid
;
1043 exp
->p_oppid
= p
->p_oppid
;
1045 exp
->user_stack
= p
->user_stack
;
1046 exp
->p_debugger
= p
->p_debugger
;
1047 exp
->sigwait
= p
->sigwait
;
1049 #ifdef _PROC_HAS_SCHEDINFO_
1050 exp
->p_estcpu
= p
->p_estcpu
;
1051 exp
->p_pctcpu
= p
->p_pctcpu
;
1052 exp
->p_slptime
= p
->p_slptime
;
1054 exp
->p_realtimer
.it_interval
.tv_sec
=
1055 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1056 exp
->p_realtimer
.it_interval
.tv_usec
=
1057 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1059 exp
->p_realtimer
.it_value
.tv_sec
=
1060 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1061 exp
->p_realtimer
.it_value
.tv_usec
=
1062 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1064 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1065 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1067 exp
->p_sigignore
= p
->p_sigignore
;
1068 exp
->p_sigcatch
= p
->p_sigcatch
;
1069 exp
->p_priority
= p
->p_priority
;
1070 exp
->p_nice
= p
->p_nice
;
1071 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1072 exp
->p_xstat
= p
->p_xstat
;
1073 exp
->p_acflag
= p
->p_acflag
;
1077 * Fill in an LP64 version of extern_proc structure for the specified process.
1080 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1082 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1083 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1084 exp
->p_flag
= p
->p_flag
;
1085 if (p
->p_lflag
& P_LTRACED
)
1086 exp
->p_flag
|= P_TRACED
;
1087 if (p
->p_lflag
& P_LPPWAIT
)
1088 exp
->p_flag
|= P_PPWAIT
;
1089 if (p
->p_lflag
& P_LEXIT
)
1090 exp
->p_flag
|= P_WEXIT
;
1091 exp
->p_stat
= p
->p_stat
;
1092 exp
->p_pid
= p
->p_pid
;
1093 exp
->p_oppid
= p
->p_oppid
;
1095 exp
->user_stack
= p
->user_stack
;
1096 exp
->p_debugger
= p
->p_debugger
;
1097 exp
->sigwait
= p
->sigwait
;
1099 #ifdef _PROC_HAS_SCHEDINFO_
1100 exp
->p_estcpu
= p
->p_estcpu
;
1101 exp
->p_pctcpu
= p
->p_pctcpu
;
1102 exp
->p_slptime
= p
->p_slptime
;
1104 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1105 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1107 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1108 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1110 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1111 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1113 exp
->p_sigignore
= p
->p_sigignore
;
1114 exp
->p_sigcatch
= p
->p_sigcatch
;
1115 exp
->p_priority
= p
->p_priority
;
1116 exp
->p_nice
= p
->p_nice
;
1117 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1118 exp
->p_xstat
= p
->p_xstat
;
1119 exp
->p_acflag
= p
->p_acflag
;
1123 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1125 /* on a 64 bit kernel, 32 bit users get some truncated information */
1126 fill_user32_externproc(p
, &kp
->kp_proc
);
1127 fill_user32_eproc(p
, &kp
->kp_eproc
);
1131 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1133 fill_user64_externproc(p
, &kp
->kp_proc
);
1134 fill_user64_eproc(p
, &kp
->kp_eproc
);
1138 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1140 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1141 int *name
= arg1
; /* oid element argument vector */
1142 int namelen
= arg2
; /* number of oid element arguments */
1143 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1144 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1145 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1146 // size_t newlen = req->newlen; /* user buffer copy in size */
1163 case KERN_KDWRITETR
:
1164 case KERN_KDWRITEMAP
:
1170 case KERN_KDREADCURTHRMAP
:
1171 case KERN_KDSET_TYPEFILTER
:
1172 case KERN_KDBUFWAIT
:
1174 case KERN_KDWRITEMAP_V3
:
1175 case KERN_KDWRITETR_V3
:
1176 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1183 /* adjust index so we return the right required/consumed amount */
1185 req
->oldidx
+= req
->oldlen
;
1189 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1190 0, /* Pointer argument (arg1) */
1191 0, /* Integer argument (arg2) */
1192 sysctl_kdebug_ops
, /* Handler function */
1193 NULL
, /* Data pointer */
1197 #if !CONFIG_EMBEDDED
1199 * Return the top *sizep bytes of the user stack, or the entire area of the
1200 * user stack down through the saved exec_path, whichever is smaller.
1203 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1205 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1206 int *name
= arg1
; /* oid element argument vector */
1207 int namelen
= arg2
; /* number of oid element arguments */
1208 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1209 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1210 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1211 // size_t newlen = req->newlen; /* user buffer copy in size */
1214 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1216 /* adjust index so we return the right required/consumed amount */
1218 req
->oldidx
+= req
->oldlen
;
1222 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1223 0, /* Pointer argument (arg1) */
1224 0, /* Integer argument (arg2) */
1225 sysctl_doprocargs
, /* Handler function */
1226 NULL
, /* Data pointer */
1228 #endif /* !CONFIG_EMBEDDED */
1231 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1233 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1234 int *name
= arg1
; /* oid element argument vector */
1235 int namelen
= arg2
; /* number of oid element arguments */
1236 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1237 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1238 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1239 // size_t newlen = req->newlen; /* user buffer copy in size */
1242 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1244 /* adjust index so we return the right required/consumed amount */
1246 req
->oldidx
+= req
->oldlen
;
1250 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1251 0, /* Pointer argument (arg1) */
1252 0, /* Integer argument (arg2) */
1253 sysctl_doprocargs2
, /* Handler function */
1254 NULL
, /* Data pointer */
1258 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1259 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1262 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1264 struct _vm_map
*proc_map
;
1267 user_addr_t arg_addr
;
1272 vm_offset_t copy_start
, copy_end
;
1275 kauth_cred_t my_cred
;
1283 buflen
-= sizeof(int); /* reserve first word to return argc */
1285 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1286 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1287 /* is not NULL then the caller wants us to return the length needed to */
1288 /* hold the data we would return */
1289 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1295 * Lookup process by pid
1304 * Copy the top N bytes of the stack.
1305 * On all machines we have so far, the stack grows
1308 * If the user expects no more than N bytes of
1309 * argument list, use that as a guess for the
1313 if (!p
->user_stack
) {
1318 if (where
== USER_ADDR_NULL
) {
1319 /* caller only wants to know length of proc args data */
1320 if (sizep
== NULL
) {
1325 size
= p
->p_argslen
;
1328 size
+= sizeof(int);
1331 * old PROCARGS will return the executable's path and plus some
1332 * extra space for work alignment and data tags
1334 size
+= PATH_MAX
+ (6 * sizeof(int));
1336 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1341 my_cred
= kauth_cred_proc_ref(p
);
1342 uid
= kauth_cred_getuid(my_cred
);
1343 kauth_cred_unref(&my_cred
);
1345 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1346 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1351 if ((u_int
)arg_size
> p
->p_argslen
)
1352 arg_size
= round_page(p
->p_argslen
);
1354 arg_addr
= p
->user_stack
- arg_size
;
1357 * Before we can block (any VM code), make another
1358 * reference to the map to keep it alive. We do
1359 * that by getting a reference on the task itself.
1367 /* save off argc before releasing the proc */
1370 argslen
= p
->p_argslen
;
1372 * Once we have a task reference we can convert that into a
1373 * map reference, which we will use in the calls below. The
1374 * task/process may change its map after we take this reference
1375 * (see execve), but the worst that will happen then is a return
1376 * of stale info (which is always a possibility).
1378 task_reference(task
);
1380 proc_map
= get_task_map_reference(task
);
1381 task_deallocate(task
);
1383 if (proc_map
== NULL
)
1387 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
), VM_KERN_MEMORY_BSD
);
1388 if (ret
!= KERN_SUCCESS
) {
1389 vm_map_deallocate(proc_map
);
1393 copy_end
= round_page(copy_start
+ arg_size
);
1395 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1396 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1397 vm_map_deallocate(proc_map
);
1398 kmem_free(kernel_map
, copy_start
,
1399 round_page(arg_size
));
1404 * Now that we've done the copyin from the process'
1405 * map, we can release the reference to it.
1407 vm_map_deallocate(proc_map
);
1409 if( vm_map_copy_overwrite(kernel_map
,
1410 (vm_map_address_t
)copy_start
,
1411 tmp
, FALSE
) != KERN_SUCCESS
) {
1412 kmem_free(kernel_map
, copy_start
,
1413 round_page(arg_size
));
1414 vm_map_copy_discard(tmp
);
1418 if (arg_size
> argslen
) {
1419 data
= (caddr_t
) (copy_end
- argslen
);
1422 data
= (caddr_t
) (copy_end
- arg_size
);
1427 * When these sysctls were introduced, the first string in the strings
1428 * section was just the bare path of the executable. However, for security
1429 * reasons we now prefix this string with executable_path= so it can be
1430 * parsed getenv style. To avoid binary compatability issues with exising
1431 * callers of this sysctl, we strip it off here if present.
1432 * (rdar://problem/13746466)
1434 #define EXECUTABLE_KEY "executable_path="
1435 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0){
1436 data
+= strlen(EXECUTABLE_KEY
);
1437 size
-= strlen(EXECUTABLE_KEY
);
1441 /* Put processes argc as the first word in the copyout buffer */
1442 suword(where
, argc
);
1443 error
= copyout(data
, (where
+ sizeof(int)), size
);
1444 size
+= sizeof(int);
1446 error
= copyout(data
, where
, size
);
1449 * Make the old PROCARGS work to return the executable's path
1450 * But, only if there is enough space in the provided buffer
1452 * on entry: data [possibily] points to the beginning of the path
1454 * Note: we keep all pointers&sizes aligned to word boundries
1456 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1458 int binPath_sz
, alignedBinPath_sz
= 0;
1459 int extraSpaceNeeded
, addThis
;
1460 user_addr_t placeHere
;
1461 char * str
= (char *) data
;
1464 /* Some apps are really bad about messing up their stacks
1465 So, we have to be extra careful about getting the length
1466 of the executing binary. If we encounter an error, we bail.
1469 /* Limit ourselves to PATH_MAX paths */
1470 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1474 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1477 /* If we have a NUL terminator, copy it, too */
1478 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1480 /* Pre-Flight the space requiremnts */
1482 /* Account for the padding that fills out binPath to the next word */
1483 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1485 placeHere
= where
+ size
;
1487 /* Account for the bytes needed to keep placeHere word aligned */
1488 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1490 /* Add up all the space that is needed */
1491 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1493 /* is there is room to tack on argv[0]? */
1494 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1496 placeHere
+= addThis
;
1497 suword(placeHere
, 0);
1498 placeHere
+= sizeof(int);
1499 suword(placeHere
, 0xBFFF0000);
1500 placeHere
+= sizeof(int);
1501 suword(placeHere
, 0);
1502 placeHere
+= sizeof(int);
1503 error
= copyout(data
, placeHere
, binPath_sz
);
1506 placeHere
+= binPath_sz
;
1507 suword(placeHere
, 0);
1508 size
+= extraSpaceNeeded
;
1514 if (copy_start
!= (vm_offset_t
) 0) {
1515 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1521 if (where
!= USER_ADDR_NULL
)
1528 * Max number of concurrent aio requests
1532 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1534 int new_value
, changed
;
1535 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1537 /* make sure the system-wide limit is greater than the per process limit */
1538 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1539 aio_max_requests
= new_value
;
1548 * Max number of concurrent aio requests per process
1552 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1554 int new_value
, changed
;
1555 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1557 /* make sure per process limit is less than the system-wide limit */
1558 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1559 aio_max_requests_per_process
= new_value
;
1568 * Max number of async IO worker threads
1572 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1574 int new_value
, changed
;
1575 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1577 /* we only allow an increase in the number of worker threads */
1578 if (new_value
> aio_worker_threads
) {
1579 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1580 aio_worker_threads
= new_value
;
1590 * System-wide limit on the max number of processes
1594 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1596 int new_value
, changed
;
1597 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1599 AUDIT_ARG(value32
, new_value
);
1600 /* make sure the system-wide limit is less than the configured hard
1601 limit set at kernel compilation */
1602 if (new_value
<= hard_maxproc
&& new_value
> 0)
1603 maxproc
= new_value
;
1610 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1611 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1613 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1614 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1616 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1617 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1618 (int *)NULL
, BSD
, "");
1619 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1620 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1622 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1623 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1624 &kernel_uuid_string
[0], 0, "");
1627 int debug_kprint_syscall
= 0;
1628 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1630 /* Thread safe: bits and string value are not used to reclaim state */
1631 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1632 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1633 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1634 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1635 "name of process for kprintf syscall tracing");
1637 int debug_kprint_current_process(const char **namep
)
1639 struct proc
*p
= current_proc();
1645 if (debug_kprint_syscall_process
[0]) {
1646 /* user asked to scope tracing to a particular process name */
1647 if(0 == strncmp(debug_kprint_syscall_process
,
1648 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1649 /* no value in telling the user that we traced what they asked */
1650 if(namep
) *namep
= NULL
;
1658 /* trace all processes. Tell user what we traced */
1667 /* PR-5293665: need to use a callback function for kern.osversion to set
1668 * osversion in IORegistry */
1671 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1675 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1678 IORegistrySetOSBuildVersion((char *)arg1
);
1684 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1685 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1686 osversion
, 256 /* OSVERSIZE*/,
1687 sysctl_osversion
, "A", "");
1689 static uint64_t osvariant_status
= 0;
1692 sysctl_osvariant_status(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1694 if (req
->newptr
!= 0) {
1696 * Can only ever be set by launchd, and only once at boot.
1698 if (req
->p
->p_pid
!= 1 || osvariant_status
!= 0) {
1703 return sysctl_handle_quad(oidp
, arg1
, arg2
, req
);
1706 SYSCTL_PROC(_kern
, OID_AUTO
, osvariant_status
,
1707 CTLFLAG_RW
| CTLTYPE_QUAD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1708 &osvariant_status
, sizeof(osvariant_status
),
1709 sysctl_osvariant_status
, "Q", "Opaque flags used to cache OS variant information");
1712 sysctl_sysctl_bootargs
1713 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1716 /* BOOT_LINE_LENGTH */
1718 size_t boot_args_len
= 256;
1720 size_t boot_args_len
= 1024;
1722 char buf
[boot_args_len
];
1724 strlcpy(buf
, PE_boot_args(), boot_args_len
);
1725 error
= sysctl_io_string(req
, buf
, boot_args_len
, 0, NULL
);
1729 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1730 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1732 sysctl_sysctl_bootargs
, "A", "bootargs");
1734 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1735 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1737 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1738 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1739 (int *)NULL
, ARG_MAX
, "");
1740 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1741 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1742 (int *)NULL
, _POSIX_VERSION
, "");
1743 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1744 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1745 (int *)NULL
, NGROUPS_MAX
, "");
1746 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1747 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1748 (int *)NULL
, 1, "");
1749 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1750 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1751 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1752 (int *)NULL
, 1, "");
1754 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1755 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1758 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1759 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1761 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1762 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1764 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1765 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1767 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1768 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1769 &thread_max
, 0, "");
1770 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1771 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1772 &task_threadmax
, 0, "");
1775 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1777 int oldval
= desiredvnodes
;
1778 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1780 if (oldval
!= desiredvnodes
) {
1781 resize_namecache(desiredvnodes
);
1787 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1788 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1789 &nc_disabled
, 0, "");
1791 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1792 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1793 0, 0, sysctl_maxvnodes
, "I", "");
1795 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1796 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1797 0, 0, sysctl_maxproc
, "I", "");
1799 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1800 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1801 0, 0, sysctl_aiomax
, "I", "");
1803 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1804 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1805 0, 0, sysctl_aioprocmax
, "I", "");
1807 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1808 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1809 0, 0, sysctl_aiothreads
, "I", "");
1811 #if (DEVELOPMENT || DEBUG)
1812 extern int sched_smt_balance
;
1813 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1814 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1815 &sched_smt_balance
, 0, "");
1816 #if __arm__ || __arm64__
1817 extern uint32_t perfcontrol_requested_recommended_cores
;
1818 SYSCTL_UINT(_kern
, OID_AUTO
, sched_recommended_cores
,
1819 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1820 &perfcontrol_requested_recommended_cores
, 0, "");
1822 /* Scheduler perfcontrol callouts sysctls */
1823 SYSCTL_DECL(_kern_perfcontrol_callout
);
1824 SYSCTL_NODE(_kern
, OID_AUTO
, perfcontrol_callout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
1825 "scheduler perfcontrol callouts");
1827 extern int perfcontrol_callout_stats_enabled
;
1828 SYSCTL_INT(_kern_perfcontrol_callout
, OID_AUTO
, stats_enabled
,
1829 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1830 &perfcontrol_callout_stats_enabled
, 0, "");
1832 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
1833 perfcontrol_callout_stat_t stat
);
1835 /* On-Core Callout */
1837 sysctl_perfcontrol_callout_stat
1838 (__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1840 perfcontrol_callout_stat_t stat
= (perfcontrol_callout_stat_t
)arg1
;
1841 perfcontrol_callout_type_t type
= (perfcontrol_callout_type_t
)arg2
;
1842 return sysctl_io_number(req
, (int)perfcontrol_callout_stat_avg(type
, stat
),
1843 sizeof(int), NULL
, NULL
);
1846 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_instr
,
1847 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1848 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_ON_CORE
,
1849 sysctl_perfcontrol_callout_stat
, "I", "");
1850 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_cycles
,
1851 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1852 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_ON_CORE
,
1853 sysctl_perfcontrol_callout_stat
, "I", "");
1854 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_instr
,
1855 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1856 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_OFF_CORE
,
1857 sysctl_perfcontrol_callout_stat
, "I", "");
1858 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_cycles
,
1859 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1860 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_OFF_CORE
,
1861 sysctl_perfcontrol_callout_stat
, "I", "");
1862 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_instr
,
1863 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1864 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_CONTEXT
,
1865 sysctl_perfcontrol_callout_stat
, "I", "");
1866 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_cycles
,
1867 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1868 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_CONTEXT
,
1869 sysctl_perfcontrol_callout_stat
, "I", "");
1870 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_instr
,
1871 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1872 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
1873 sysctl_perfcontrol_callout_stat
, "I", "");
1874 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_cycles
,
1875 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1876 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
1877 sysctl_perfcontrol_callout_stat
, "I", "");
1879 #endif /* __arm__ || __arm64__ */
1880 #endif /* (DEVELOPMENT || DEBUG) */
1884 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1886 int new_value
, changed
;
1887 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1889 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1891 securelevel
= new_value
;
1900 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1901 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1902 0, 0, sysctl_securelvl
, "I", "");
1907 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1910 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1912 domainnamelen
= strlen(domainname
);
1917 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1918 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1919 0, 0, sysctl_domainname
, "A", "");
1921 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1922 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1927 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1930 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1932 hostnamelen
= req
->newlen
;
1938 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1939 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1940 0, 0, sysctl_hostname
, "A", "");
1944 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1946 /* Original code allowed writing, I'm copying this, although this all makes
1947 no sense to me. Besides, this sysctl is never used. */
1948 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
1951 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
1952 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1953 0, 0, sysctl_procname
, "A", "");
1955 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
1956 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1957 &speculative_reads_disabled
, 0, "");
1959 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
1960 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1961 &preheat_max_bytes
, 0, "");
1963 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
1964 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1965 &preheat_min_bytes
, 0, "");
1967 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
1968 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1969 &speculative_prefetch_max
, 0, "");
1971 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
1972 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1973 &speculative_prefetch_max_iosize
, 0, "");
1975 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
1976 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1977 &vm_page_free_target
, 0, "");
1979 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
1980 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1981 &vm_page_free_min
, 0, "");
1983 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
1984 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1985 &vm_page_free_reserved
, 0, "");
1987 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
1988 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1989 &vm_page_speculative_percentage
, 0, "");
1991 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
1992 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1993 &vm_page_speculative_q_age_ms
, 0, "");
1995 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
1996 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1997 &vm_max_delayed_work_limit
, 0, "");
1999 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2000 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2001 &vm_max_batch
, 0, "");
2003 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
2004 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2005 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
2009 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2012 boottime_timeval(&tv
);
2013 struct proc
*p
= req
->p
;
2015 if (proc_is64bit(p
)) {
2016 struct user64_timeval t
= {};
2017 t
.tv_sec
= tv
.tv_sec
;
2018 t
.tv_usec
= tv
.tv_usec
;
2019 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2021 struct user32_timeval t
= {};
2022 t
.tv_sec
= tv
.tv_sec
;
2023 t
.tv_usec
= tv
.tv_usec
;
2024 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2028 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2029 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2030 0, 0, sysctl_boottime
, "S,timeval", "");
2034 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2037 int error
= get_kernel_symfile(req
->p
, &str
);
2040 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2044 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2045 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2046 0, 0, sysctl_symfile
, "A", "");
2051 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2053 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2056 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2057 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2058 0, 0, sysctl_netboot
, "I", "");
2061 #ifdef CONFIG_IMGSRC_ACCESS
2063 * Legacy--act as if only one layer of nesting is possible.
2067 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2069 vfs_context_t ctx
= vfs_context_current();
2073 if (!vfs_context_issuser(ctx
)) {
2077 if (imgsrc_rootvnodes
[0] == NULL
) {
2081 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2086 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2087 result
= vnode_getwithref(devvp
);
2092 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2096 vnode_put(imgsrc_rootvnodes
[0]);
2100 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2101 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2102 0, 0, sysctl_imgsrcdev
, "I", "");
2106 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2109 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
] = {}; /* 2 for now, no problem */
2113 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2117 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2119 * Go get the root vnode.
2121 rvp
= imgsrc_rootvnodes
[i
];
2122 if (rvp
== NULLVP
) {
2126 error
= vnode_get(rvp
);
2132 * For now, no getting at a non-local volume.
2134 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2135 if (devvp
== NULL
) {
2140 error
= vnode_getwithref(devvp
);
2149 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2150 info
[i
].ii_flags
= 0;
2151 info
[i
].ii_height
= i
;
2152 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2158 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2161 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2162 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2163 0, 0, sysctl_imgsrcinfo
, "I", "");
2165 #endif /* CONFIG_IMGSRC_ACCESS */
2168 SYSCTL_DECL(_kern_timer
);
2169 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2172 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2173 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2174 &mach_timer_coalescing_enabled
, 0, "");
2176 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2177 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2178 &timer_deadline_tracking_bin_1
, "");
2179 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2180 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2181 &timer_deadline_tracking_bin_2
, "");
2183 SYSCTL_DECL(_kern_timer_longterm
);
2184 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2187 /* Must match definition in osfmk/kern/timer_call.c */
2190 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2191 LATENCY
, LATENCY_MIN
, LATENCY_MAX
, SCAN_LIMIT
, PAUSES
2193 extern uint64_t timer_sysctl_get(int);
2194 extern int timer_sysctl_set(int, uint64_t);
2198 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2200 int oid
= (int)arg1
;
2201 uint64_t value
= timer_sysctl_get(oid
);
2206 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2208 error
= timer_sysctl_set(oid
, new_value
);
2213 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2214 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2215 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2216 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_limit
,
2217 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2218 (void *) SCAN_LIMIT
, 0, sysctl_timer
, "Q", "");
2219 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2220 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2221 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2223 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2224 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2225 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2226 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2227 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2228 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2229 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2230 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2231 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2232 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2233 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2234 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2235 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2236 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2237 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2238 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2239 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2240 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2241 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2242 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2243 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2244 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2245 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2246 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2247 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_pauses
,
2248 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2249 (void *) PAUSES
, 0, sysctl_timer
, "Q", "");
2254 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2256 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2259 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2260 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2261 0, 0, sysctl_usrstack
, "I", "");
2265 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2267 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2270 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2271 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2272 0, 0, sysctl_usrstack64
, "Q", "");
2276 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2277 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2278 corefilename
, sizeof(corefilename
), "");
2282 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2284 #ifdef SECURE_KERNEL
2288 int new_value
, changed
;
2289 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2291 if ((new_value
== 0) || (new_value
== 1))
2292 do_coredump
= new_value
;
2300 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2301 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2302 0, 0, sysctl_coredump
, "I", "");
2305 sysctl_suid_coredump
2306 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2308 #ifdef SECURE_KERNEL
2312 int new_value
, changed
;
2313 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2315 if ((new_value
== 0) || (new_value
== 1))
2316 sugid_coredump
= new_value
;
2324 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2325 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2326 0, 0, sysctl_suid_coredump
, "I", "");
2328 #endif /* CONFIG_COREDUMP */
2332 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2334 struct proc
*p
= req
->p
;
2335 int new_value
, changed
;
2336 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2340 req
->p
->p_lflag
|= P_LDELAYTERM
;
2342 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2348 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2349 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2350 0, 0, sysctl_delayterm
, "I", "");
2355 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2357 struct proc
*p
= req
->p
;
2359 int new_value
, old_value
, changed
;
2362 ut
= get_bsdthread_info(current_thread());
2364 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2365 old_value
= KERN_RAGE_THREAD
;
2366 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2367 old_value
= KERN_RAGE_PROC
;
2371 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2374 switch (new_value
) {
2375 case KERN_RAGE_PROC
:
2377 p
->p_lflag
|= P_LRAGE_VNODES
;
2380 case KERN_UNRAGE_PROC
:
2382 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2386 case KERN_RAGE_THREAD
:
2387 ut
->uu_flag
|= UT_RAGE_VNODES
;
2389 case KERN_UNRAGE_THREAD
:
2390 ut
= get_bsdthread_info(current_thread());
2391 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2398 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2399 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2400 0, 0, sysctl_rage_vnode
, "I", "");
2402 /* XXX move this interface into libproc and remove this sysctl */
2404 sysctl_setthread_cpupercent
2405 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2407 int new_value
, old_value
;
2409 kern_return_t kret
= KERN_SUCCESS
;
2410 uint8_t percent
= 0;
2418 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2421 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2422 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2427 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2429 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2435 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2436 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2437 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2441 sysctl_kern_check_openevt
2442 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2444 struct proc
*p
= req
->p
;
2445 int new_value
, old_value
, changed
;
2448 if (p
->p_flag
& P_CHECKOPENEVT
) {
2449 old_value
= KERN_OPENEVT_PROC
;
2454 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2457 switch (new_value
) {
2458 case KERN_OPENEVT_PROC
:
2459 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2462 case KERN_UNOPENEVT_PROC
:
2463 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2473 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2474 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2480 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2482 #ifdef SECURE_KERNEL
2486 int new_value
, changed
;
2489 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2494 #if defined(__i386__) || defined(__x86_64__)
2496 * Only allow setting if NX is supported on the chip
2498 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2501 nx_enabled
= new_value
;
2504 #endif /* SECURE_KERNEL */
2509 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2510 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2511 0, 0, sysctl_nx
, "I", "");
2515 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2517 if (proc_is64bit(req
->p
)) {
2518 struct user64_loadavg loadinfo64
= {};
2519 fill_loadavg64(&averunnable
, &loadinfo64
);
2520 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2522 struct user32_loadavg loadinfo32
= {};
2523 fill_loadavg32(&averunnable
, &loadinfo32
);
2524 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2528 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2529 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2530 0, 0, sysctl_loadavg
, "S,loadavg", "");
2533 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2536 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2537 __unused
int arg2
, struct sysctl_req
*req
)
2539 int old_value
=0, new_value
=0, error
=0;
2541 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2543 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2545 return (vm_toggle_entry_reuse(new_value
, NULL
));
2550 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2555 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2558 uint64_t swap_total
;
2559 uint64_t swap_avail
;
2560 vm_size_t swap_pagesize
;
2561 boolean_t swap_encrypted
;
2562 struct xsw_usage xsu
= {};
2564 error
= macx_swapinfo(&swap_total
,
2571 xsu
.xsu_total
= swap_total
;
2572 xsu
.xsu_avail
= swap_avail
;
2573 xsu
.xsu_used
= swap_total
- swap_avail
;
2574 xsu
.xsu_pagesize
= swap_pagesize
;
2575 xsu
.xsu_encrypted
= swap_encrypted
;
2576 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2581 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2582 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2583 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2586 extern void vm_page_reactivate_all_throttled(void);
2589 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2591 #pragma unused(arg1, arg2)
2592 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2595 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2596 if (error
|| !req
->newptr
)
2599 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
2600 //assert(req->newptr);
2601 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2606 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2608 disabled
= (!val
&& memorystatus_freeze_enabled
);
2610 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2613 vm_page_reactivate_all_throttled();
2619 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2620 #endif /* CONFIG_FREEZE */
2622 /* this kernel does NOT implement shared_region_make_private_np() */
2623 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2624 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2625 (int *)NULL
, 0, "");
2628 fetch_process_cputype(
2632 cpu_type_t
*cputype
)
2634 proc_t p
= PROC_NULL
;
2641 else if (namelen
== 1) {
2642 p
= proc_find(name
[0]);
2651 ret
= cpu_type() & ~CPU_ARCH_MASK
;
2652 if (IS_64BIT_PROCESS(p
))
2653 ret
|= CPU_ARCH_ABI64
;
2664 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2665 struct sysctl_req
*req
)
2668 cpu_type_t proc_cputype
= 0;
2669 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2672 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2674 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2676 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2679 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2680 struct sysctl_req
*req
)
2683 cpu_type_t proc_cputype
= 0;
2684 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2686 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2688 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2692 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2694 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2697 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2698 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2699 0, 0, sysctl_safeboot
, "I", "");
2703 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2705 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2708 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2709 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2710 0, 0, sysctl_singleuser
, "I", "");
2712 STATIC
int sysctl_minimalboot
2713 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2715 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
2718 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
2719 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2720 0, 0, sysctl_minimalboot
, "I", "");
2723 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2725 extern boolean_t affinity_sets_enabled
;
2726 extern int affinity_sets_mapping
;
2728 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2729 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2730 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2731 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
2734 * Boolean indicating if KASLR is active.
2738 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2742 slide
= vm_kernel_slide
? 1 : 0;
2744 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
2747 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
2748 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2749 0, 0, sysctl_slide
, "I", "");
2752 * Limit on total memory users can wire.
2754 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2756 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2758 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2761 * All values are in bytes.
2764 vm_map_size_t vm_global_no_user_wire_amount
;
2765 vm_map_size_t vm_global_user_wire_limit
;
2766 vm_map_size_t vm_user_wire_limit
;
2769 * There needs to be a more automatic/elegant way to do this
2771 #if defined(__ARM__)
2772 SYSCTL_INT(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, 0, "");
2773 SYSCTL_INT(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, 0, "");
2774 SYSCTL_INT(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, 0, "");
2776 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
2777 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
2778 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
2781 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
2782 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
2783 extern int vm_map_copy_overwrite_aligned_src_large
;
2784 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
2785 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
2786 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
2789 extern uint32_t vm_page_external_count
;
2790 extern uint32_t vm_page_filecache_min
;
2792 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
2793 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
2795 extern int vm_compressor_mode
;
2796 extern int vm_compressor_is_active
;
2797 extern int vm_compressor_available
;
2798 extern uint32_t vm_ripe_target_age
;
2799 extern uint32_t swapout_target_age
;
2800 extern int64_t compressor_bytes_used
;
2801 extern int64_t c_segment_input_bytes
;
2802 extern int64_t c_segment_compressed_bytes
;
2803 extern uint32_t compressor_eval_period_in_msecs
;
2804 extern uint32_t compressor_sample_min_in_msecs
;
2805 extern uint32_t compressor_sample_max_in_msecs
;
2806 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
2807 extern uint32_t compressor_thrashing_min_per_10msecs
;
2808 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
2809 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
2810 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
2811 extern uint32_t vm_compressor_catchup_threshold_divisor
;
2812 extern uint32_t vm_compressor_time_thread
;
2813 #if DEVELOPMENT || DEBUG
2814 extern vmct_stats_t vmct_stats
;
2817 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
2818 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
2819 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
2821 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
2822 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
2823 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
2824 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
2826 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
2828 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
2829 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
2830 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
2831 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
2832 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
2833 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
2834 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
2835 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
2836 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
2838 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
2840 SYSCTL_INT(_vm
, OID_AUTO
, compressor_timing_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_time_thread
, 0, "");
2842 #if DEVELOPMENT || DEBUG
2843 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[0], "");
2844 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[1], "");
2846 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_threads_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_cthreads_total
, "");
2848 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[0], "");
2849 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[1], "");
2851 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[0], "");
2852 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[1], "");
2854 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[0], 0, "");
2855 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[1], 0, "");
2857 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[0], 0, "");
2858 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[1], 0, "");
2862 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressions
, "");
2863 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compression_failures
, "");
2864 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressed_bytes
, "");
2865 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_delta
, "");
2866 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_negative_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_negative_delta
, "");
2868 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressions
, "");
2869 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressed_bytes
, "");
2871 SYSCTL_QUAD(_vm
, OID_AUTO
, uc_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.uc_decompressions
, "");
2873 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions
, "");
2875 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_cabstime
, "");
2877 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_cabstime
, "");
2878 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_compressions
, "");
2880 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_cabstime
, "");
2881 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressions
, "");
2883 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions_exclusive
, "");
2884 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_compressions
, "");
2885 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_mzv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_mzv_compressions
, "");
2886 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compression_failures
, "");
2887 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_exclusive
, "");
2888 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_total
, "");
2890 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressed_bytes
, "");
2891 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compression_failures
, "");
2892 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_sv_compressions
, "");
2895 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressions
, "");
2897 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_dabstime
, "");
2899 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_dabstime
, "");
2900 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_decompressions
, "");
2902 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_dabstime
, "");
2903 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_decompressions
, "");
2905 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressed_bytes
, "");
2906 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_decompressions
, "");
2908 SYSCTL_INT(_vm
, OID_AUTO
, lz4_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_threshold
, 0, "");
2909 SYSCTL_INT(_vm
, OID_AUTO
, wkdm_reeval_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.wkdm_reeval_threshold
, 0, "");
2910 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_skips
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_skips
, 0, "");
2911 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_run_length
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_run_length
, 0, "");
2912 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_preselects
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_preselects
, 0, "");
2913 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_preselection_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_preselection_threshold
, 0, "");
2914 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_continue_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_continue_bytes
, 0, "");
2915 SYSCTL_INT(_vm
, OID_AUTO
, lz4_profitable_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_profitable_bytes
, 0, "");
2916 #if DEVELOPMENT || DEBUG
2917 extern int vm_compressor_current_codec
;
2918 extern int vm_compressor_test_seg_wp
;
2919 extern boolean_t vm_compressor_force_sw_wkdm
;
2920 SYSCTL_INT(_vm
, OID_AUTO
, compressor_codec
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_current_codec
, 0, "");
2921 SYSCTL_INT(_vm
, OID_AUTO
, compressor_test_wp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_test_seg_wp
, 0, "");
2923 SYSCTL_INT(_vm
, OID_AUTO
, wksw_force
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_force_sw_wkdm
, 0, "");
2924 extern int precompy
, wkswhw
;
2926 SYSCTL_INT(_vm
, OID_AUTO
, precompy
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &precompy
, 0, "");
2927 SYSCTL_INT(_vm
, OID_AUTO
, wkswhw
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &wkswhw
, 0, "");
2928 extern unsigned int vm_ktrace_enabled
;
2929 SYSCTL_INT(_vm
, OID_AUTO
, vm_ktrace
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ktrace_enabled
, 0, "");
2932 #if CONFIG_PHANTOM_CACHE
2933 extern uint32_t phantom_cache_thrashing_threshold
;
2934 extern uint32_t phantom_cache_eval_period_in_msecs
;
2935 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
2938 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
2939 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
2940 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
2943 #if CONFIG_BACKGROUND_QUEUE
2945 extern uint32_t vm_page_background_count
;
2946 extern uint32_t vm_page_background_target
;
2947 extern uint32_t vm_page_background_internal_count
;
2948 extern uint32_t vm_page_background_external_count
;
2949 extern uint32_t vm_page_background_mode
;
2950 extern uint32_t vm_page_background_exclude_external
;
2951 extern uint64_t vm_page_background_promoted_count
;
2952 extern uint64_t vm_pageout_considered_bq_internal
;
2953 extern uint64_t vm_pageout_considered_bq_external
;
2954 extern uint64_t vm_pageout_rejected_bq_internal
;
2955 extern uint64_t vm_pageout_rejected_bq_external
;
2957 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_mode
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_mode
, 0, "");
2958 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_exclude_external
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_exclude_external
, 0, "");
2959 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_target
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_target
, 0, "");
2960 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_count
, 0, "");
2961 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_internal_count
, 0, "");
2962 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_external_count
, 0, "");
2964 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_page_background_promoted_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_promoted_count
, "");
2965 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_considered_bq_internal
, "");
2966 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_considered_bq_external
, "");
2967 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_internal
, "");
2968 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_external
, "");
2972 #if (DEVELOPMENT || DEBUG)
2974 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
2975 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2976 &vm_page_creation_throttled_hard
, 0, "");
2978 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
2979 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2980 &vm_page_creation_throttled_soft
, 0, "");
2982 extern uint32_t vm_pageout_memorystatus_fb_factor_nr
;
2983 extern uint32_t vm_pageout_memorystatus_fb_factor_dr
;
2984 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_nr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_nr
, 0, "");
2985 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_dr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_dr
, 0, "");
2987 extern uint32_t vm_grab_anon_overrides
;
2988 extern uint32_t vm_grab_anon_nops
;
2990 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_overrides
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_grab_anon_overrides
, 0, "");
2991 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_nops
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_grab_anon_nops
, 0, "");
2993 /* log message counters for persistence mode */
2994 extern uint32_t oslog_p_total_msgcount
;
2995 extern uint32_t oslog_p_metadata_saved_msgcount
;
2996 extern uint32_t oslog_p_metadata_dropped_msgcount
;
2997 extern uint32_t oslog_p_error_count
;
2998 extern uint32_t oslog_p_saved_msgcount
;
2999 extern uint32_t oslog_p_dropped_msgcount
;
3000 extern uint32_t oslog_p_boot_dropped_msgcount
;
3002 /* log message counters for streaming mode */
3003 extern uint32_t oslog_s_total_msgcount
;
3004 extern uint32_t oslog_s_metadata_msgcount
;
3005 extern uint32_t oslog_s_error_count
;
3006 extern uint32_t oslog_s_streamed_msgcount
;
3007 extern uint32_t oslog_s_dropped_msgcount
;
3009 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_total_msgcount
, 0, "");
3010 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_saved_msgcount
, 0, "");
3011 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_dropped_msgcount
, 0, "");
3012 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_error_count
, 0, "");
3013 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_saved_msgcount
, 0, "");
3014 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_dropped_msgcount
, 0, "");
3015 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_boot_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_boot_dropped_msgcount
, 0, "");
3017 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_total_msgcount
, 0, "");
3018 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_metadata_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_metadata_msgcount
, 0, "");
3019 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_error_count
, 0, "");
3020 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_streamed_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_streamed_msgcount
, 0, "");
3021 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_dropped_msgcount
, 0, "");
3024 #endif /* DEVELOPMENT || DEBUG */
3027 * Enable tracing of voucher contents
3029 extern uint32_t ipc_voucher_trace_contents
;
3031 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
3032 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
3035 * Kernel stack size and depth
3037 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
3038 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3039 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
3040 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3042 extern unsigned int kern_feature_overrides
;
3043 SYSCTL_INT (_kern
, OID_AUTO
, kern_feature_overrides
,
3044 CTLFLAG_RD
| CTLFLAG_LOCKED
, &kern_feature_overrides
, 0, "Kernel feature override mask");
3047 * enable back trace for port allocations
3049 extern int ipc_portbt
;
3051 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3052 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3053 &ipc_portbt
, 0, "");
3059 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3060 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3061 sched_string
, sizeof(sched_string
),
3062 "Timeshare scheduler implementation");
3065 * Only support runtime modification on embedded platforms
3066 * with development config enabled
3070 extern int precise_user_kernel_time
;
3071 SYSCTL_INT(_kern
, OID_AUTO
, precise_user_kernel_time
,
3072 CTLFLAG_RW
| CTLFLAG_LOCKED
,
3073 &precise_user_kernel_time
, 0, "Precise accounting of kernel vs. user time");
3078 /* Parameters related to timer coalescing tuning, to be replaced
3079 * with a dedicated systemcall in the future.
3081 /* Enable processing pending timers in the context of any other interrupt
3082 * Coalescing tuning parameters for various thread/task attributes */
3084 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3086 #pragma unused(oidp)
3087 int size
= arg2
; /* subcommand*/
3090 uint64_t old_value_ns
;
3091 uint64_t new_value_ns
;
3092 uint64_t value_abstime
;
3093 if (size
== sizeof(uint32_t))
3094 value_abstime
= *((uint32_t *)arg1
);
3095 else if (size
== sizeof(uint64_t))
3096 value_abstime
= *((uint64_t *)arg1
);
3097 else return ENOTSUP
;
3099 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
3100 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
3101 if ((error
) || (!changed
))
3104 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
3105 if (size
== sizeof(uint32_t))
3106 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
3108 *((uint64_t *)arg1
) = value_abstime
;
3112 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
3113 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3114 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
3115 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
3116 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3117 &tcoal_prio_params
.timer_resort_threshold_abstime
,
3118 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
3119 sysctl_timer_user_us_kernel_abstime
,
3121 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
3122 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3123 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
3124 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
3125 sysctl_timer_user_us_kernel_abstime
,
3128 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
3129 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3130 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
3132 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
3133 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3134 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
3135 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
3136 sysctl_timer_user_us_kernel_abstime
,
3139 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
3140 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3141 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
3143 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
3144 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3145 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
3146 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
3147 sysctl_timer_user_us_kernel_abstime
,
3150 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
3151 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3152 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
3154 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
3155 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3156 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
3157 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
3158 sysctl_timer_user_us_kernel_abstime
,
3161 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
3162 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3163 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
3165 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
3166 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3167 &tcoal_prio_params
.latency_qos_abstime_max
[0],
3168 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
3169 sysctl_timer_user_us_kernel_abstime
,
3172 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
3173 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3174 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
3176 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
3177 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3178 &tcoal_prio_params
.latency_qos_abstime_max
[1],
3179 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
3180 sysctl_timer_user_us_kernel_abstime
,
3183 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
3184 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3185 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
3187 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
3188 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3189 &tcoal_prio_params
.latency_qos_abstime_max
[2],
3190 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
3191 sysctl_timer_user_us_kernel_abstime
,
3194 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
3195 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3196 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
3198 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
3199 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3200 &tcoal_prio_params
.latency_qos_abstime_max
[3],
3201 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
3202 sysctl_timer_user_us_kernel_abstime
,
3205 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
3206 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3207 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
3209 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
3210 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3211 &tcoal_prio_params
.latency_qos_abstime_max
[4],
3212 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
3213 sysctl_timer_user_us_kernel_abstime
,
3216 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
3217 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3218 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
3220 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
3221 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3222 &tcoal_prio_params
.latency_qos_abstime_max
[5],
3223 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
3224 sysctl_timer_user_us_kernel_abstime
,
3227 /* Communicate the "user idle level" heuristic to the timer layer, and
3228 * potentially other layers in the future.
3232 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3233 int new_value
= 0, old_value
= 0, changed
= 0, error
;
3235 old_value
= timer_get_user_idle_level();
3237 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
3239 if (error
== 0 && changed
) {
3240 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
3247 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
3248 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3250 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
3253 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
3254 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3255 &hv_support_available
, 0, "");
3260 sysctl_darkboot SYSCTL_HANDLER_ARGS
3262 int err
= 0, value
= 0;
3263 #pragma unused(oidp, arg1, arg2, err, value, req)
3266 * Handle the sysctl request.
3268 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
3269 * we'll get the request identifier into "value" and then we can honor it.
3271 if ((err
= sysctl_io_number(req
, darkboot
, sizeof(int), &value
, NULL
)) != 0) {
3275 /* writing requested, let's process the request */
3277 /* writing is protected by an entitlement */
3278 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT
, 0) != 0) {
3284 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET
:
3286 * If the darkboot sysctl is unset, the NVRAM variable
3287 * must be unset too. If that's not the case, it means
3288 * someone is doing something crazy and not supported.
3290 if (darkboot
!= 0) {
3291 int ret
= PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
);
3299 case MEMORY_MAINTENANCE_DARK_BOOT_SET
:
3302 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT
: {
3304 * Set the NVRAM and update 'darkboot' in case
3305 * of success. Otherwise, do not update
3306 * 'darkboot' and report the failure.
3308 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
, TRUE
)) {
3325 SYSCTL_PROC(_kern
, OID_AUTO
, darkboot
,
3326 CTLFLAG_KERN
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
,
3327 0, 0, sysctl_darkboot
, "I", "");
3331 * This is set by core audio to tell tailspin (ie background tracing) how long
3332 * its smallest buffer is. Background tracing can then try to make a reasonable
3333 * decisions to try to avoid introducing so much latency that the buffers will
3337 int min_audio_buffer_usec
;
3340 sysctl_audio_buffer SYSCTL_HANDLER_ARGS
3342 #pragma unused(oidp, arg1, arg2)
3343 int err
= 0, value
= 0, changed
= 0;
3344 err
= sysctl_io_number(req
, min_audio_buffer_usec
, sizeof(int), &value
, &changed
);
3348 /* writing is protected by an entitlement */
3349 if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY
, 0) != 0) {
3353 min_audio_buffer_usec
= value
;
3359 SYSCTL_PROC(_kern
, OID_AUTO
, min_audio_buffer_usec
, CTLFLAG_RW
| CTLFLAG_ANYBODY
, 0, 0, sysctl_audio_buffer
, "I", "Minimum audio buffer size, in microseconds");
3361 #if DEVELOPMENT || DEBUG
3362 #include <sys/sysent.h>
3363 /* This should result in a fatal exception, verifying that "sysent" is
3367 kern_sysent_write(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3368 uint64_t new_value
= 0, old_value
= 0;
3369 int changed
= 0, error
;
3371 error
= sysctl_io_number(req
, old_value
, sizeof(uint64_t), &new_value
, &changed
);
3372 if ((error
== 0) && changed
) {
3373 volatile uint32_t *wraddr
= (uint32_t *) &sysent
[0];
3375 printf("sysent[0] write succeeded\n");
3380 SYSCTL_PROC(_kern
, OID_AUTO
, sysent_const_check
,
3381 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3383 kern_sysent_write
, "I", "Attempt sysent[0] write");
3387 #if DEVELOPMENT || DEBUG
3388 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 1, "");
3390 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 0, "");
3394 #if DEVELOPMENT || DEBUG
3397 sysctl_panic_test SYSCTL_HANDLER_ARGS
3399 #pragma unused(arg1, arg2)
3401 char str
[32] = "entry prelog postlog postcore";
3403 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
3405 if (rval
== 0 && req
->newptr
) {
3406 if (strncmp("entry", str
, strlen("entry")) == 0) {
3407 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_ENTRY
, "test recursive panic at entry");
3408 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
3409 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_PRELOG
, "test recursive panic prior to writing a paniclog");
3410 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
3411 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTLOG
, "test recursive panic subsequent to paniclog");
3412 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
3413 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTCORE
, "test recursive panic subsequent to on-device core");
3421 sysctl_debugger_test SYSCTL_HANDLER_ARGS
3423 #pragma unused(arg1, arg2)
3425 char str
[32] = "entry prelog postlog postcore";
3427 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
3429 if (rval
== 0 && req
->newptr
) {
3430 if (strncmp("entry", str
, strlen("entry")) == 0) {
3431 DebuggerWithContext(0, NULL
, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY
);
3432 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
3433 DebuggerWithContext(0, NULL
, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG
);
3434 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
3435 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG
);
3436 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
3437 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE
);
3444 decl_lck_spin_data(, spinlock_panic_test_lock
)
3446 __attribute__((noreturn
))
3448 spinlock_panic_test_acquire_spinlock(void * arg __unused
, wait_result_t wres __unused
)
3450 lck_spin_lock(&spinlock_panic_test_lock
);
3455 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
3457 #pragma unused(oidp, arg1, arg2)
3458 if (req
->newlen
== 0)
3461 thread_t panic_spinlock_thread
;
3462 /* Initialize panic spinlock */
3463 lck_grp_t
* panic_spinlock_grp
;
3464 lck_grp_attr_t
* panic_spinlock_grp_attr
;
3465 lck_attr_t
* panic_spinlock_attr
;
3467 panic_spinlock_grp_attr
= lck_grp_attr_alloc_init();
3468 panic_spinlock_grp
= lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr
);
3469 panic_spinlock_attr
= lck_attr_alloc_init();
3471 lck_spin_init(&spinlock_panic_test_lock
, panic_spinlock_grp
, panic_spinlock_attr
);
3474 /* Create thread to acquire spinlock */
3475 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock
, NULL
, &panic_spinlock_thread
) != KERN_SUCCESS
) {
3479 /* Try to acquire spinlock -- should panic eventually */
3480 lck_spin_lock(&spinlock_panic_test_lock
);
3484 __attribute__((noreturn
))
3486 simultaneous_panic_worker
3487 (void * arg
, wait_result_t wres __unused
)
3489 atomic_int
*start_panic
= (atomic_int
*)arg
;
3491 while (!atomic_load(start_panic
)) { ; }
3492 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
3493 __builtin_unreachable();
3497 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
3499 #pragma unused(oidp, arg1, arg2)
3500 if (req
->newlen
== 0)
3503 int i
= 0, threads_to_create
= 2 * processor_count
;
3504 atomic_int start_panic
= 0;
3505 unsigned int threads_created
= 0;
3506 thread_t new_panic_thread
;
3508 for (i
= threads_to_create
; i
> 0; i
--) {
3509 if (kernel_thread_start(simultaneous_panic_worker
, (void *) &start_panic
, &new_panic_thread
) == KERN_SUCCESS
) {
3514 /* FAIL if we couldn't create at least processor_count threads */
3515 if (threads_created
< processor_count
) {
3516 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
3517 threads_created
, threads_to_create
);
3520 atomic_exchange(&start_panic
, 1);
3524 SYSCTL_PROC(_debug
, OID_AUTO
, panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_panic_test
, "A", "panic test");
3525 SYSCTL_PROC(_debug
, OID_AUTO
, debugger_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_debugger_test
, "A", "debugger test");
3526 SYSCTL_PROC(_debug
, OID_AUTO
, spinlock_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_spinlock_panic_test
, "A", "spinlock panic test");
3527 SYSCTL_PROC(_debug
, OID_AUTO
, simultaneous_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_simultaneous_panic_test
, "A", "simultaneous panic test");
3530 #endif /* DEVELOPMENT || DEBUG */
3532 const uint32_t thread_groups_supported
= 0;
3535 sysctl_thread_groups_supported (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3537 int value
= thread_groups_supported
;
3538 return sysctl_io_number(req
, value
, sizeof(value
), NULL
, NULL
);
3541 SYSCTL_PROC(_kern
, OID_AUTO
, thread_groups_supported
, CTLFLAG_RD
| CTLFLAG_LOCKED
| CTLFLAG_KERN
,
3542 0, 0, &sysctl_thread_groups_supported
, "I", "thread groups supported");
3545 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
3547 #pragma unused(arg1, arg2, oidp)
3549 int type_tuple
[2] = {};
3550 int return_value
= 0;
3552 error
= SYSCTL_IN(req
, &type_tuple
, sizeof(type_tuple
));
3558 return_value
= grade_binary(type_tuple
[0], type_tuple
[1]);
3560 error
= SYSCTL_OUT(req
, &return_value
, sizeof(return_value
));
3569 SYSCTL_PROC(_kern
, OID_AUTO
, grade_cputype
,
3570 CTLFLAG_RW
|CTLFLAG_ANYBODY
|CTLFLAG_MASKED
|CTLFLAG_LOCKED
|CTLTYPE_OPAQUE
,
3571 0, 0, &sysctl_grade_cputype
, "S",
3572 "grade value of cpu_type_t+cpu_sub_type_t");