2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
112 #include <machine/smp.h>
113 #include <mach/machine.h>
114 #include <mach/mach_host.h>
115 #include <mach/mach_types.h>
116 #include <mach/processor_info.h>
117 #include <mach/vm_param.h>
118 #include <kern/debug.h>
119 #include <kern/mach_param.h>
120 #include <kern/task.h>
121 #include <kern/thread.h>
122 #include <kern/thread_group.h>
123 #include <kern/processor.h>
124 #include <kern/cpu_number.h>
125 #include <kern/debug.h>
126 #include <kern/sched_prim.h>
127 #include <vm/vm_kern.h>
128 #include <vm/vm_map.h>
129 #include <mach/host_info.h>
131 #include <sys/mount_internal.h>
132 #include <sys/kdebug.h>
134 #include <IOKit/IOPlatformExpert.h>
135 #include <pexpert/pexpert.h>
137 #include <machine/machine_routines.h>
138 #include <machine/exec.h>
140 #include <vm/vm_protos.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_compressor_algorithms.h>
143 #include <sys/imgsrc.h>
144 #include <kern/timer_call.h>
146 #if defined(__i386__) || defined(__x86_64__)
147 #include <i386/cpuid.h>
151 #include <sys/kern_memorystatus.h>
155 #include <kperf/kperf.h>
159 #include <kern/hv_support.h>
163 * deliberately setting max requests to really high number
164 * so that runaway settings do not cause MALLOC overflows
166 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
168 extern int aio_max_requests
;
169 extern int aio_max_requests_per_process
;
170 extern int aio_worker_threads
;
171 extern int lowpri_IO_window_msecs
;
172 extern int lowpri_IO_delay_msecs
;
173 extern int nx_enabled
;
174 extern int speculative_reads_disabled
;
175 extern unsigned int speculative_prefetch_max
;
176 extern unsigned int speculative_prefetch_max_iosize
;
177 extern unsigned int preheat_max_bytes
;
178 extern unsigned int preheat_min_bytes
;
179 extern long numvnodes
;
181 extern uuid_string_t bootsessionuuid_string
;
183 extern unsigned int vm_max_delayed_work_limit
;
184 extern unsigned int vm_max_batch
;
186 extern unsigned int vm_page_free_min
;
187 extern unsigned int vm_page_free_target
;
188 extern unsigned int vm_page_free_reserved
;
189 extern unsigned int vm_page_speculative_percentage
;
190 extern unsigned int vm_page_speculative_q_age_ms
;
192 #if (DEVELOPMENT || DEBUG)
193 extern uint32_t vm_page_creation_throttled_hard
;
194 extern uint32_t vm_page_creation_throttled_soft
;
195 #endif /* DEVELOPMENT || DEBUG */
198 * Conditionally allow dtrace to see these functions for debugging purposes.
206 #define STATIC static
209 extern boolean_t mach_timer_coalescing_enabled
;
211 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
214 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
216 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
218 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
220 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
222 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
224 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
227 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
233 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
236 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
237 size_t *sizep
, proc_t cur_proc
);
239 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
240 proc_t cur_proc
, int argc_yes
);
242 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
243 size_t newlen
, void *sp
, int len
);
245 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
246 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
247 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
248 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
249 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
250 int sysdoproc_callback(proc_t p
, void *arg
);
253 /* forward declarations for non-static STATIC */
254 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
255 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
256 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
257 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
258 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
259 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
261 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
262 #endif /* COUNT_SYSCALLS */
264 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
265 #endif /* !CONFIG_EMBEDDED */
266 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
267 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
268 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 #ifdef CONFIG_IMGSRC_ACCESS
285 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
293 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
299 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
300 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
301 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
302 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
303 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
304 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
305 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
306 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
309 extern void IORegistrySetOSBuildVersion(char * build_version
);
312 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
314 la64
->ldavg
[0] = la
->ldavg
[0];
315 la64
->ldavg
[1] = la
->ldavg
[1];
316 la64
->ldavg
[2] = la
->ldavg
[2];
317 la64
->fscale
= (user64_long_t
)la
->fscale
;
321 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
323 la32
->ldavg
[0] = la
->ldavg
[0];
324 la32
->ldavg
[1] = la
->ldavg
[1];
325 la32
->ldavg
[2] = la
->ldavg
[2];
326 la32
->fscale
= (user32_long_t
)la
->fscale
;
331 * Attributes stored in the kernel.
333 extern char corefilename
[MAXPATHLEN
+1];
334 extern int do_coredump
;
335 extern int sugid_coredump
;
339 extern int do_count_syscalls
;
343 int securelevel
= -1;
349 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
350 __unused
int arg2
, struct sysctl_req
*req
)
353 struct uthread
*ut
= get_bsdthread_info(current_thread());
354 user_addr_t oldp
=0, newp
=0;
355 size_t *oldlenp
=NULL
;
359 oldlenp
= &(req
->oldlen
);
361 newlen
= req
->newlen
;
363 /* We want the current length, and maybe the string itself */
365 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
366 size_t currlen
= MAXTHREADNAMESIZE
- 1;
369 /* use length of current thread name */
370 currlen
= strlen(ut
->pth_name
);
372 if(*oldlenp
< currlen
)
374 /* NOTE - we do not copy the NULL terminator */
376 error
= copyout(ut
->pth_name
,oldp
,currlen
);
381 /* return length of thread name minus NULL terminator (just like strlen) */
382 req
->oldidx
= currlen
;
385 /* We want to set the name to something */
388 if(newlen
> (MAXTHREADNAMESIZE
- 1))
392 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
396 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV
, ut
->pth_name
);
398 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
399 error
= copyin(newp
, ut
->pth_name
, newlen
);
404 kernel_debug_string_simple(TRACE_STRING_THREADNAME
, ut
->pth_name
);
410 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
414 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
416 host_basic_info_data_t hinfo
;
420 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
421 struct _processor_statistics_np
*buf
;
424 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
425 if (kret
!= KERN_SUCCESS
) {
429 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
431 if (req
->oldlen
< size
) {
435 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
437 kret
= get_sched_statistics(buf
, &size
);
438 if (kret
!= KERN_SUCCESS
) {
443 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
449 panic("Sched info changed?!");
456 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
459 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
464 if (req
->newlen
!= sizeof(active
)) {
468 res
= copyin(req
->newptr
, &active
, sizeof(active
));
473 return set_sched_stats_active(active
);
476 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
478 extern uint32_t sched_debug_flags
;
479 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
481 #if (DEBUG || DEVELOPMENT)
482 extern boolean_t doprnt_hide_pointers
;
483 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
486 extern int get_kernel_symfile(proc_t
, char **);
489 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
491 extern unsigned int nsysent
;
492 extern int syscalls_log
[];
493 extern const char *syscallnames
[];
496 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
498 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
499 __unused
int *name
= arg1
; /* oid element argument vector */
500 __unused
int namelen
= arg2
; /* number of oid element arguments */
501 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
502 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
503 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
504 size_t newlen
= req
->newlen
; /* user buffer copy in size */
509 /* valid values passed in:
510 * = 0 means don't keep called counts for each bsd syscall
511 * > 0 means keep called counts for each bsd syscall
512 * = 2 means dump current counts to the system log
513 * = 3 means reset all counts
514 * for example, to dump current counts:
515 * sysctl -w kern.count_calls=2
517 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
523 do_count_syscalls
= 1;
525 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
527 for ( i
= 0; i
< nsysent
; i
++ ) {
528 if ( syscalls_log
[i
] != 0 ) {
530 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
538 do_count_syscalls
= 1;
542 /* adjust index so we return the right required/consumed amount */
544 req
->oldidx
+= req
->oldlen
;
548 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
549 0, /* Pointer argument (arg1) */
550 0, /* Integer argument (arg2) */
551 sysctl_docountsyscalls
, /* Handler function */
552 NULL
, /* Data pointer */
554 #endif /* COUNT_SYSCALLS */
557 * The following sysctl_* functions should not be used
558 * any more, as they can only cope with callers in
559 * user mode: Use new-style
567 * Validate parameters and get old / set new parameters
568 * for an integer-valued sysctl function.
571 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
572 user_addr_t newp
, size_t newlen
, int *valp
)
576 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
578 if (oldp
&& *oldlenp
< sizeof(int))
580 if (newp
&& newlen
!= sizeof(int))
582 *oldlenp
= sizeof(int);
584 error
= copyout(valp
, oldp
, sizeof(int));
585 if (error
== 0 && newp
) {
586 error
= copyin(newp
, valp
, sizeof(int));
587 AUDIT_ARG(value32
, *valp
);
593 * Validate parameters and get old / set new parameters
594 * for an quad(64bit)-valued sysctl function.
597 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
598 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
602 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
604 if (oldp
&& *oldlenp
< sizeof(quad_t
))
606 if (newp
&& newlen
!= sizeof(quad_t
))
608 *oldlenp
= sizeof(quad_t
);
610 error
= copyout(valp
, oldp
, sizeof(quad_t
));
611 if (error
== 0 && newp
)
612 error
= copyin(newp
, valp
, sizeof(quad_t
));
617 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
619 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
626 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
628 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
635 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
640 /* This is very racy but list lock is held.. Hmmm. */
641 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
642 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
643 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
644 tp
->t_dev
!= (dev_t
)*(int*)arg
)
653 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
655 kauth_cred_t my_cred
;
658 if (p
->p_ucred
== NULL
)
660 my_cred
= kauth_cred_proc_ref(p
);
661 uid
= kauth_cred_getuid(my_cred
);
662 kauth_cred_unref(&my_cred
);
664 if (uid
!= (uid_t
)*(int*)arg
)
672 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
674 kauth_cred_t my_cred
;
677 if (p
->p_ucred
== NULL
)
679 my_cred
= kauth_cred_proc_ref(p
);
680 ruid
= kauth_cred_getruid(my_cred
);
681 kauth_cred_unref(&my_cred
);
683 if (ruid
!= (uid_t
)*(int*)arg
)
690 * try over estimating by 5 procs
692 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
693 struct sysdoproc_args
{
708 sysdoproc_callback(proc_t p
, void *arg
)
710 struct sysdoproc_args
*args
= arg
;
712 if (args
->buflen
>= args
->sizeof_kproc
) {
713 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
714 return (PROC_RETURNED
);
715 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
716 return (PROC_RETURNED
);
717 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
718 return (PROC_RETURNED
);
720 bzero(args
->kprocp
, args
->sizeof_kproc
);
722 fill_user64_proc(p
, args
->kprocp
);
724 fill_user32_proc(p
, args
->kprocp
);
725 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
727 *args
->errorp
= error
;
728 return (PROC_RETURNED_DONE
);
730 args
->dp
+= args
->sizeof_kproc
;
731 args
->buflen
-= args
->sizeof_kproc
;
733 args
->needed
+= args
->sizeof_kproc
;
734 return (PROC_RETURNED
);
737 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
739 sysctl_prochandle SYSCTL_HANDLER_ARGS
741 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
742 int *name
= arg1
; /* oid element argument vector */
743 int namelen
= arg2
; /* number of oid element arguments */
744 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
746 user_addr_t dp
= where
;
748 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
750 boolean_t is_64_bit
= proc_is64bit(current_proc());
751 struct user32_kinfo_proc user32_kproc
;
752 struct user64_kinfo_proc user_kproc
;
755 int (*filterfn
)(proc_t
, void *) = 0;
756 struct sysdoproc_args args
;
762 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
766 sizeof_kproc
= sizeof(user_kproc
);
767 kprocp
= &user_kproc
;
769 sizeof_kproc
= sizeof(user32_kproc
);
770 kprocp
= &user32_kproc
;
776 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
780 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
799 /* must be kern.proc.<unknown> */
804 args
.buflen
= buflen
;
805 args
.kprocp
= kprocp
;
806 args
.is_64_bit
= is_64_bit
;
808 args
.needed
= needed
;
809 args
.errorp
= &error
;
810 args
.uidcheck
= uidcheck
;
811 args
.ruidcheck
= ruidcheck
;
812 args
.ttycheck
= ttycheck
;
813 args
.sizeof_kproc
= sizeof_kproc
;
815 args
.uidval
= name
[0];
817 success
= proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
818 sysdoproc_callback
, &args
, filterfn
, name
);
821 * rdar://problem/28433391: if we can't iterate over the processes,
822 * make sure to return an error.
832 needed
= args
.needed
;
834 if (where
!= USER_ADDR_NULL
) {
835 req
->oldlen
= dp
- where
;
836 if (needed
> req
->oldlen
)
839 needed
+= KERN_PROCSLOP
;
840 req
->oldlen
= needed
;
842 /* adjust index so we return the right required/consumed amount */
843 req
->oldidx
+= req
->oldlen
;
848 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
849 * in the sysctl declaration itself, which comes into the handler function
850 * as 'oidp->oid_arg2'.
852 * For these particular sysctls, since they have well known OIDs, we could
853 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
854 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
855 * of a well known value with a common handler function. This is desirable,
856 * because we want well known values to "go away" at some future date.
858 * It should be noted that the value of '((int *)arg1)[1]' is used for many
859 * an integer parameter to the subcommand for many of these sysctls; we'd
860 * rather have used '((int *)arg1)[0]' for that, or even better, an element
861 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
862 * and then use leaf-node permissions enforcement, but that would have
863 * necessitated modifying user space code to correspond to the interface
864 * change, and we are striving for binary backward compatibility here; even
865 * though these are SPI, and not intended for use by user space applications
866 * which are not themselves system tools or libraries, some applications
867 * have erroneously used them.
869 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_ALL
, /* Integer argument (arg2) */
872 sysctl_prochandle
, /* Handler function */
873 NULL
, /* Data is size variant on ILP32/LP64 */
875 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_PID
, /* Integer argument (arg2) */
878 sysctl_prochandle
, /* Handler function */
879 NULL
, /* Data is size variant on ILP32/LP64 */
881 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_TTY
, /* Integer argument (arg2) */
884 sysctl_prochandle
, /* Handler function */
885 NULL
, /* Data is size variant on ILP32/LP64 */
887 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
888 0, /* Pointer argument (arg1) */
889 KERN_PROC_PGRP
, /* Integer argument (arg2) */
890 sysctl_prochandle
, /* Handler function */
891 NULL
, /* Data is size variant on ILP32/LP64 */
893 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
894 0, /* Pointer argument (arg1) */
895 KERN_PROC_UID
, /* Integer argument (arg2) */
896 sysctl_prochandle
, /* Handler function */
897 NULL
, /* Data is size variant on ILP32/LP64 */
899 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
900 0, /* Pointer argument (arg1) */
901 KERN_PROC_RUID
, /* Integer argument (arg2) */
902 sysctl_prochandle
, /* Handler function */
903 NULL
, /* Data is size variant on ILP32/LP64 */
905 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
906 0, /* Pointer argument (arg1) */
907 KERN_PROC_LCID
, /* Integer argument (arg2) */
908 sysctl_prochandle
, /* Handler function */
909 NULL
, /* Data is size variant on ILP32/LP64 */
914 * Fill in non-zero fields of an eproc structure for the specified process.
917 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
921 struct session
*sessp
;
922 kauth_cred_t my_cred
;
925 sessp
= proc_session(p
);
927 if (pg
!= PGRP_NULL
) {
928 ep
->e_pgid
= p
->p_pgrpid
;
929 ep
->e_jobc
= pg
->pg_jobc
;
930 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
931 ep
->e_flag
= EPROC_CTTY
;
933 ep
->e_ppid
= p
->p_ppid
;
935 my_cred
= kauth_cred_proc_ref(p
);
937 /* A fake historical pcred */
938 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
939 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
940 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
941 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
943 /* A fake historical *kauth_cred_t */
944 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
945 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
946 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
947 bcopy(posix_cred_get(my_cred
)->cr_groups
,
948 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
950 kauth_cred_unref(&my_cred
);
953 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
954 (tp
= SESSION_TP(sessp
))) {
955 ep
->e_tdev
= tp
->t_dev
;
956 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
960 if (sessp
!= SESSION_NULL
) {
961 if (SESS_LEADER(p
, sessp
))
962 ep
->e_flag
|= EPROC_SLEADER
;
970 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
973 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
977 struct session
*sessp
;
978 kauth_cred_t my_cred
;
981 sessp
= proc_session(p
);
983 if (pg
!= PGRP_NULL
) {
984 ep
->e_pgid
= p
->p_pgrpid
;
985 ep
->e_jobc
= pg
->pg_jobc
;
986 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
987 ep
->e_flag
= EPROC_CTTY
;
989 ep
->e_ppid
= p
->p_ppid
;
991 my_cred
= kauth_cred_proc_ref(p
);
993 /* A fake historical pcred */
994 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
995 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
996 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
997 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
999 /* A fake historical *kauth_cred_t */
1000 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1001 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1002 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1003 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1004 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
1006 kauth_cred_unref(&my_cred
);
1009 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1010 (tp
= SESSION_TP(sessp
))) {
1011 ep
->e_tdev
= tp
->t_dev
;
1012 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1016 if (sessp
!= SESSION_NULL
) {
1017 if (SESS_LEADER(p
, sessp
))
1018 ep
->e_flag
|= EPROC_SLEADER
;
1019 session_rele(sessp
);
1021 if (pg
!= PGRP_NULL
)
1026 * Fill in an eproc structure for the specified process.
1027 * bzeroed by our caller, so only set non-zero fields.
1030 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1032 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1033 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1034 exp
->p_flag
= p
->p_flag
;
1035 if (p
->p_lflag
& P_LTRACED
)
1036 exp
->p_flag
|= P_TRACED
;
1037 if (p
->p_lflag
& P_LPPWAIT
)
1038 exp
->p_flag
|= P_PPWAIT
;
1039 if (p
->p_lflag
& P_LEXIT
)
1040 exp
->p_flag
|= P_WEXIT
;
1041 exp
->p_stat
= p
->p_stat
;
1042 exp
->p_pid
= p
->p_pid
;
1043 exp
->p_oppid
= p
->p_oppid
;
1045 exp
->user_stack
= p
->user_stack
;
1046 exp
->p_debugger
= p
->p_debugger
;
1047 exp
->sigwait
= p
->sigwait
;
1049 #ifdef _PROC_HAS_SCHEDINFO_
1050 exp
->p_estcpu
= p
->p_estcpu
;
1051 exp
->p_pctcpu
= p
->p_pctcpu
;
1052 exp
->p_slptime
= p
->p_slptime
;
1054 exp
->p_realtimer
.it_interval
.tv_sec
=
1055 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1056 exp
->p_realtimer
.it_interval
.tv_usec
=
1057 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1059 exp
->p_realtimer
.it_value
.tv_sec
=
1060 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1061 exp
->p_realtimer
.it_value
.tv_usec
=
1062 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1064 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1065 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1067 exp
->p_sigignore
= p
->p_sigignore
;
1068 exp
->p_sigcatch
= p
->p_sigcatch
;
1069 exp
->p_priority
= p
->p_priority
;
1070 exp
->p_nice
= p
->p_nice
;
1071 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1072 exp
->p_xstat
= p
->p_xstat
;
1073 exp
->p_acflag
= p
->p_acflag
;
1077 * Fill in an LP64 version of extern_proc structure for the specified process.
1080 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1082 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1083 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1084 exp
->p_flag
= p
->p_flag
;
1085 if (p
->p_lflag
& P_LTRACED
)
1086 exp
->p_flag
|= P_TRACED
;
1087 if (p
->p_lflag
& P_LPPWAIT
)
1088 exp
->p_flag
|= P_PPWAIT
;
1089 if (p
->p_lflag
& P_LEXIT
)
1090 exp
->p_flag
|= P_WEXIT
;
1091 exp
->p_stat
= p
->p_stat
;
1092 exp
->p_pid
= p
->p_pid
;
1093 exp
->p_oppid
= p
->p_oppid
;
1095 exp
->user_stack
= p
->user_stack
;
1096 exp
->p_debugger
= p
->p_debugger
;
1097 exp
->sigwait
= p
->sigwait
;
1099 #ifdef _PROC_HAS_SCHEDINFO_
1100 exp
->p_estcpu
= p
->p_estcpu
;
1101 exp
->p_pctcpu
= p
->p_pctcpu
;
1102 exp
->p_slptime
= p
->p_slptime
;
1104 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1105 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1107 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1108 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1110 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1111 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1113 exp
->p_sigignore
= p
->p_sigignore
;
1114 exp
->p_sigcatch
= p
->p_sigcatch
;
1115 exp
->p_priority
= p
->p_priority
;
1116 exp
->p_nice
= p
->p_nice
;
1117 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1118 exp
->p_xstat
= p
->p_xstat
;
1119 exp
->p_acflag
= p
->p_acflag
;
1123 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1125 /* on a 64 bit kernel, 32 bit users get some truncated information */
1126 fill_user32_externproc(p
, &kp
->kp_proc
);
1127 fill_user32_eproc(p
, &kp
->kp_eproc
);
1131 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1133 fill_user64_externproc(p
, &kp
->kp_proc
);
1134 fill_user64_eproc(p
, &kp
->kp_eproc
);
1138 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1140 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1141 int *name
= arg1
; /* oid element argument vector */
1142 int namelen
= arg2
; /* number of oid element arguments */
1143 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1144 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1145 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1146 // size_t newlen = req->newlen; /* user buffer copy in size */
1163 case KERN_KDWRITETR
:
1164 case KERN_KDWRITEMAP
:
1170 case KERN_KDREADCURTHRMAP
:
1171 case KERN_KDSET_TYPEFILTER
:
1172 case KERN_KDBUFWAIT
:
1174 case KERN_KDWRITEMAP_V3
:
1175 case KERN_KDWRITETR_V3
:
1176 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1183 /* adjust index so we return the right required/consumed amount */
1185 req
->oldidx
+= req
->oldlen
;
1189 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1190 0, /* Pointer argument (arg1) */
1191 0, /* Integer argument (arg2) */
1192 sysctl_kdebug_ops
, /* Handler function */
1193 NULL
, /* Data pointer */
1197 #if !CONFIG_EMBEDDED
1199 * Return the top *sizep bytes of the user stack, or the entire area of the
1200 * user stack down through the saved exec_path, whichever is smaller.
1203 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1205 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1206 int *name
= arg1
; /* oid element argument vector */
1207 int namelen
= arg2
; /* number of oid element arguments */
1208 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1209 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1210 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1211 // size_t newlen = req->newlen; /* user buffer copy in size */
1214 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1216 /* adjust index so we return the right required/consumed amount */
1218 req
->oldidx
+= req
->oldlen
;
1222 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1223 0, /* Pointer argument (arg1) */
1224 0, /* Integer argument (arg2) */
1225 sysctl_doprocargs
, /* Handler function */
1226 NULL
, /* Data pointer */
1228 #endif /* !CONFIG_EMBEDDED */
1231 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1233 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1234 int *name
= arg1
; /* oid element argument vector */
1235 int namelen
= arg2
; /* number of oid element arguments */
1236 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1237 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1238 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1239 // size_t newlen = req->newlen; /* user buffer copy in size */
1242 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1244 /* adjust index so we return the right required/consumed amount */
1246 req
->oldidx
+= req
->oldlen
;
1250 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1251 0, /* Pointer argument (arg1) */
1252 0, /* Integer argument (arg2) */
1253 sysctl_doprocargs2
, /* Handler function */
1254 NULL
, /* Data pointer */
1258 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1259 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1262 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1264 struct _vm_map
*proc_map
;
1267 user_addr_t arg_addr
;
1272 vm_offset_t copy_start
, copy_end
;
1275 kauth_cred_t my_cred
;
1283 buflen
-= sizeof(int); /* reserve first word to return argc */
1285 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1286 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1287 /* is not NULL then the caller wants us to return the length needed to */
1288 /* hold the data we would return */
1289 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1295 * Lookup process by pid
1304 * Copy the top N bytes of the stack.
1305 * On all machines we have so far, the stack grows
1308 * If the user expects no more than N bytes of
1309 * argument list, use that as a guess for the
1313 if (!p
->user_stack
) {
1318 if (where
== USER_ADDR_NULL
) {
1319 /* caller only wants to know length of proc args data */
1320 if (sizep
== NULL
) {
1325 size
= p
->p_argslen
;
1328 size
+= sizeof(int);
1331 * old PROCARGS will return the executable's path and plus some
1332 * extra space for work alignment and data tags
1334 size
+= PATH_MAX
+ (6 * sizeof(int));
1336 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1341 my_cred
= kauth_cred_proc_ref(p
);
1342 uid
= kauth_cred_getuid(my_cred
);
1343 kauth_cred_unref(&my_cred
);
1345 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1346 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1351 if ((u_int
)arg_size
> p
->p_argslen
)
1352 arg_size
= round_page(p
->p_argslen
);
1354 arg_addr
= p
->user_stack
- arg_size
;
1357 * Before we can block (any VM code), make another
1358 * reference to the map to keep it alive. We do
1359 * that by getting a reference on the task itself.
1367 /* save off argc before releasing the proc */
1370 argslen
= p
->p_argslen
;
1372 * Once we have a task reference we can convert that into a
1373 * map reference, which we will use in the calls below. The
1374 * task/process may change its map after we take this reference
1375 * (see execve), but the worst that will happen then is a return
1376 * of stale info (which is always a possibility).
1378 task_reference(task
);
1380 proc_map
= get_task_map_reference(task
);
1381 task_deallocate(task
);
1383 if (proc_map
== NULL
)
1387 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
), VM_KERN_MEMORY_BSD
);
1388 if (ret
!= KERN_SUCCESS
) {
1389 vm_map_deallocate(proc_map
);
1393 copy_end
= round_page(copy_start
+ arg_size
);
1395 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1396 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1397 vm_map_deallocate(proc_map
);
1398 kmem_free(kernel_map
, copy_start
,
1399 round_page(arg_size
));
1404 * Now that we've done the copyin from the process'
1405 * map, we can release the reference to it.
1407 vm_map_deallocate(proc_map
);
1409 if( vm_map_copy_overwrite(kernel_map
,
1410 (vm_map_address_t
)copy_start
,
1411 tmp
, FALSE
) != KERN_SUCCESS
) {
1412 kmem_free(kernel_map
, copy_start
,
1413 round_page(arg_size
));
1414 vm_map_copy_discard(tmp
);
1418 if (arg_size
> argslen
) {
1419 data
= (caddr_t
) (copy_end
- argslen
);
1422 data
= (caddr_t
) (copy_end
- arg_size
);
1427 * When these sysctls were introduced, the first string in the strings
1428 * section was just the bare path of the executable. However, for security
1429 * reasons we now prefix this string with executable_path= so it can be
1430 * parsed getenv style. To avoid binary compatability issues with exising
1431 * callers of this sysctl, we strip it off here if present.
1432 * (rdar://problem/13746466)
1434 #define EXECUTABLE_KEY "executable_path="
1435 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0){
1436 data
+= strlen(EXECUTABLE_KEY
);
1437 size
-= strlen(EXECUTABLE_KEY
);
1441 /* Put processes argc as the first word in the copyout buffer */
1442 suword(where
, argc
);
1443 error
= copyout(data
, (where
+ sizeof(int)), size
);
1444 size
+= sizeof(int);
1446 error
= copyout(data
, where
, size
);
1449 * Make the old PROCARGS work to return the executable's path
1450 * But, only if there is enough space in the provided buffer
1452 * on entry: data [possibily] points to the beginning of the path
1454 * Note: we keep all pointers&sizes aligned to word boundries
1456 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1458 int binPath_sz
, alignedBinPath_sz
= 0;
1459 int extraSpaceNeeded
, addThis
;
1460 user_addr_t placeHere
;
1461 char * str
= (char *) data
;
1464 /* Some apps are really bad about messing up their stacks
1465 So, we have to be extra careful about getting the length
1466 of the executing binary. If we encounter an error, we bail.
1469 /* Limit ourselves to PATH_MAX paths */
1470 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1474 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1477 /* If we have a NUL terminator, copy it, too */
1478 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1480 /* Pre-Flight the space requiremnts */
1482 /* Account for the padding that fills out binPath to the next word */
1483 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1485 placeHere
= where
+ size
;
1487 /* Account for the bytes needed to keep placeHere word aligned */
1488 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1490 /* Add up all the space that is needed */
1491 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1493 /* is there is room to tack on argv[0]? */
1494 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1496 placeHere
+= addThis
;
1497 suword(placeHere
, 0);
1498 placeHere
+= sizeof(int);
1499 suword(placeHere
, 0xBFFF0000);
1500 placeHere
+= sizeof(int);
1501 suword(placeHere
, 0);
1502 placeHere
+= sizeof(int);
1503 error
= copyout(data
, placeHere
, binPath_sz
);
1506 placeHere
+= binPath_sz
;
1507 suword(placeHere
, 0);
1508 size
+= extraSpaceNeeded
;
1514 if (copy_start
!= (vm_offset_t
) 0) {
1515 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1521 if (where
!= USER_ADDR_NULL
)
1528 * Max number of concurrent aio requests
1532 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1534 int new_value
, changed
;
1535 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1537 /* make sure the system-wide limit is greater than the per process limit */
1538 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1539 aio_max_requests
= new_value
;
1548 * Max number of concurrent aio requests per process
1552 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1554 int new_value
, changed
;
1555 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1557 /* make sure per process limit is less than the system-wide limit */
1558 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1559 aio_max_requests_per_process
= new_value
;
1568 * Max number of async IO worker threads
1572 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1574 int new_value
, changed
;
1575 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1577 /* we only allow an increase in the number of worker threads */
1578 if (new_value
> aio_worker_threads
) {
1579 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1580 aio_worker_threads
= new_value
;
1590 * System-wide limit on the max number of processes
1594 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1596 int new_value
, changed
;
1597 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1599 AUDIT_ARG(value32
, new_value
);
1600 /* make sure the system-wide limit is less than the configured hard
1601 limit set at kernel compilation */
1602 if (new_value
<= hard_maxproc
&& new_value
> 0)
1603 maxproc
= new_value
;
1610 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1611 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1613 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1614 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1616 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1617 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1618 (int *)NULL
, BSD
, "");
1619 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1620 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1622 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1623 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1624 &kernel_uuid_string
[0], 0, "");
1632 int debug_kprint_syscall
= 0;
1633 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1635 /* Thread safe: bits and string value are not used to reclaim state */
1636 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1637 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1638 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1639 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1640 "name of process for kprintf syscall tracing");
1642 int debug_kprint_current_process(const char **namep
)
1644 struct proc
*p
= current_proc();
1650 if (debug_kprint_syscall_process
[0]) {
1651 /* user asked to scope tracing to a particular process name */
1652 if(0 == strncmp(debug_kprint_syscall_process
,
1653 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1654 /* no value in telling the user that we traced what they asked */
1655 if(namep
) *namep
= NULL
;
1663 /* trace all processes. Tell user what we traced */
1672 /* PR-5293665: need to use a callback function for kern.osversion to set
1673 * osversion in IORegistry */
1676 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1680 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1683 IORegistrySetOSBuildVersion((char *)arg1
);
1689 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1690 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1691 osversion
, 256 /* OSVERSIZE*/,
1692 sysctl_osversion
, "A", "");
1694 static uint64_t osvariant_status
= 0;
1697 sysctl_osvariant_status(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1699 if (req
->newptr
!= 0) {
1701 * Can only ever be set by launchd, and only once at boot.
1703 if (req
->p
->p_pid
!= 1 || osvariant_status
!= 0) {
1708 return sysctl_handle_quad(oidp
, arg1
, arg2
, req
);
1711 SYSCTL_PROC(_kern
, OID_AUTO
, osvariant_status
,
1712 CTLFLAG_RW
| CTLTYPE_QUAD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1713 &osvariant_status
, sizeof(osvariant_status
),
1714 sysctl_osvariant_status
, "Q", "Opaque flags used to cache OS variant information");
1717 sysctl_sysctl_bootargs
1718 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1721 /* BOOT_LINE_LENGTH */
1723 size_t boot_args_len
= 256;
1725 size_t boot_args_len
= 1024;
1727 char buf
[boot_args_len
];
1729 strlcpy(buf
, PE_boot_args(), boot_args_len
);
1730 error
= sysctl_io_string(req
, buf
, boot_args_len
, 0, NULL
);
1734 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1735 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1737 sysctl_sysctl_bootargs
, "A", "bootargs");
1739 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1740 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1742 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1743 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1744 (int *)NULL
, ARG_MAX
, "");
1745 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1746 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1747 (int *)NULL
, _POSIX_VERSION
, "");
1748 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1749 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1750 (int *)NULL
, NGROUPS_MAX
, "");
1751 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1752 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1753 (int *)NULL
, 1, "");
1754 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1755 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1756 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1757 (int *)NULL
, 1, "");
1759 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1760 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1763 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1764 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1766 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1767 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1769 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1770 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1772 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1773 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1774 &thread_max
, 0, "");
1775 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1776 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1777 &task_threadmax
, 0, "");
1780 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1782 int oldval
= desiredvnodes
;
1783 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1785 if (oldval
!= desiredvnodes
) {
1786 resize_namecache(desiredvnodes
);
1792 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1793 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1794 &nc_disabled
, 0, "");
1796 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1797 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1798 0, 0, sysctl_maxvnodes
, "I", "");
1800 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1801 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1802 0, 0, sysctl_maxproc
, "I", "");
1804 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1805 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1806 0, 0, sysctl_aiomax
, "I", "");
1808 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1809 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1810 0, 0, sysctl_aioprocmax
, "I", "");
1812 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1813 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1814 0, 0, sysctl_aiothreads
, "I", "");
1816 #if (DEVELOPMENT || DEBUG)
1817 extern int sched_smt_balance
;
1818 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1819 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1820 &sched_smt_balance
, 0, "");
1821 #if __arm__ || __arm64__
1822 extern uint32_t perfcontrol_requested_recommended_cores
;
1823 SYSCTL_UINT(_kern
, OID_AUTO
, sched_recommended_cores
,
1824 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1825 &perfcontrol_requested_recommended_cores
, 0, "");
1827 /* Scheduler perfcontrol callouts sysctls */
1828 SYSCTL_DECL(_kern_perfcontrol_callout
);
1829 SYSCTL_NODE(_kern
, OID_AUTO
, perfcontrol_callout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
1830 "scheduler perfcontrol callouts");
1832 extern int perfcontrol_callout_stats_enabled
;
1833 SYSCTL_INT(_kern_perfcontrol_callout
, OID_AUTO
, stats_enabled
,
1834 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1835 &perfcontrol_callout_stats_enabled
, 0, "");
1837 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
1838 perfcontrol_callout_stat_t stat
);
1840 /* On-Core Callout */
1842 sysctl_perfcontrol_callout_stat
1843 (__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1845 perfcontrol_callout_stat_t stat
= (perfcontrol_callout_stat_t
)arg1
;
1846 perfcontrol_callout_type_t type
= (perfcontrol_callout_type_t
)arg2
;
1847 return sysctl_io_number(req
, (int)perfcontrol_callout_stat_avg(type
, stat
),
1848 sizeof(int), NULL
, NULL
);
1851 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_instr
,
1852 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1853 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_ON_CORE
,
1854 sysctl_perfcontrol_callout_stat
, "I", "");
1855 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_cycles
,
1856 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1857 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_ON_CORE
,
1858 sysctl_perfcontrol_callout_stat
, "I", "");
1859 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_instr
,
1860 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1861 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_OFF_CORE
,
1862 sysctl_perfcontrol_callout_stat
, "I", "");
1863 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_cycles
,
1864 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1865 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_OFF_CORE
,
1866 sysctl_perfcontrol_callout_stat
, "I", "");
1867 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_instr
,
1868 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1869 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_CONTEXT
,
1870 sysctl_perfcontrol_callout_stat
, "I", "");
1871 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_cycles
,
1872 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1873 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_CONTEXT
,
1874 sysctl_perfcontrol_callout_stat
, "I", "");
1875 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_instr
,
1876 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1877 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
1878 sysctl_perfcontrol_callout_stat
, "I", "");
1879 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_cycles
,
1880 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1881 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
1882 sysctl_perfcontrol_callout_stat
, "I", "");
1884 #endif /* __arm__ || __arm64__ */
1885 #endif /* (DEVELOPMENT || DEBUG) */
1889 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1891 int new_value
, changed
;
1892 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1894 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1896 securelevel
= new_value
;
1905 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1906 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1907 0, 0, sysctl_securelvl
, "I", "");
1912 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1915 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1917 domainnamelen
= strlen(domainname
);
1922 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1923 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1924 0, 0, sysctl_domainname
, "A", "");
1926 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1927 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1932 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1935 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1937 hostnamelen
= req
->newlen
;
1943 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1944 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1945 0, 0, sysctl_hostname
, "A", "");
1949 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1951 /* Original code allowed writing, I'm copying this, although this all makes
1952 no sense to me. Besides, this sysctl is never used. */
1953 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
1956 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
1957 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1958 0, 0, sysctl_procname
, "A", "");
1960 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
1961 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1962 &speculative_reads_disabled
, 0, "");
1964 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
1965 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1966 &preheat_max_bytes
, 0, "");
1968 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
1969 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1970 &preheat_min_bytes
, 0, "");
1972 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
1973 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1974 &speculative_prefetch_max
, 0, "");
1976 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
1977 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1978 &speculative_prefetch_max_iosize
, 0, "");
1980 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
1981 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1982 &vm_page_free_target
, 0, "");
1984 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
1985 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1986 &vm_page_free_min
, 0, "");
1988 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
1989 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1990 &vm_page_free_reserved
, 0, "");
1992 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
1993 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1994 &vm_page_speculative_percentage
, 0, "");
1996 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
1997 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1998 &vm_page_speculative_q_age_ms
, 0, "");
2000 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
2001 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2002 &vm_max_delayed_work_limit
, 0, "");
2004 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2005 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2006 &vm_max_batch
, 0, "");
2008 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
2009 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2010 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
2014 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2017 boottime_timeval(&tv
);
2018 struct proc
*p
= req
->p
;
2020 if (proc_is64bit(p
)) {
2021 struct user64_timeval t
= {};
2022 t
.tv_sec
= tv
.tv_sec
;
2023 t
.tv_usec
= tv
.tv_usec
;
2024 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2026 struct user32_timeval t
= {};
2027 t
.tv_sec
= tv
.tv_sec
;
2028 t
.tv_usec
= tv
.tv_usec
;
2029 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2033 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2034 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2035 0, 0, sysctl_boottime
, "S,timeval", "");
2039 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2042 int error
= get_kernel_symfile(req
->p
, &str
);
2045 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2049 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2050 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2051 0, 0, sysctl_symfile
, "A", "");
2056 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2058 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2061 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2062 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2063 0, 0, sysctl_netboot
, "I", "");
2066 #ifdef CONFIG_IMGSRC_ACCESS
2068 * Legacy--act as if only one layer of nesting is possible.
2072 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2074 vfs_context_t ctx
= vfs_context_current();
2078 if (!vfs_context_issuser(ctx
)) {
2082 if (imgsrc_rootvnodes
[0] == NULL
) {
2086 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2091 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2092 result
= vnode_getwithref(devvp
);
2097 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2101 vnode_put(imgsrc_rootvnodes
[0]);
2105 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2106 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2107 0, 0, sysctl_imgsrcdev
, "I", "");
2111 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2114 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
] = {}; /* 2 for now, no problem */
2118 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2122 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2124 * Go get the root vnode.
2126 rvp
= imgsrc_rootvnodes
[i
];
2127 if (rvp
== NULLVP
) {
2131 error
= vnode_get(rvp
);
2137 * For now, no getting at a non-local volume.
2139 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2140 if (devvp
== NULL
) {
2145 error
= vnode_getwithref(devvp
);
2154 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2155 info
[i
].ii_flags
= 0;
2156 info
[i
].ii_height
= i
;
2157 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2163 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2166 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2167 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2168 0, 0, sysctl_imgsrcinfo
, "I", "");
2170 #endif /* CONFIG_IMGSRC_ACCESS */
2173 SYSCTL_DECL(_kern_timer
);
2174 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2177 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2178 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2179 &mach_timer_coalescing_enabled
, 0, "");
2181 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2182 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2183 &timer_deadline_tracking_bin_1
, "");
2184 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2185 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2186 &timer_deadline_tracking_bin_2
, "");
2188 SYSCTL_DECL(_kern_timer_longterm
);
2189 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2192 /* Must match definition in osfmk/kern/timer_call.c */
2195 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2196 LATENCY
, LATENCY_MIN
, LATENCY_MAX
, SCAN_LIMIT
, PAUSES
2198 extern uint64_t timer_sysctl_get(int);
2199 extern int timer_sysctl_set(int, uint64_t);
2203 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2205 int oid
= (int)arg1
;
2206 uint64_t value
= timer_sysctl_get(oid
);
2211 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2213 error
= timer_sysctl_set(oid
, new_value
);
2218 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2219 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2220 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2221 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_limit
,
2222 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2223 (void *) SCAN_LIMIT
, 0, sysctl_timer
, "Q", "");
2224 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2225 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2226 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2228 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2229 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2230 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2231 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2232 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2233 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2234 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2235 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2236 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2237 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2238 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2239 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2240 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2241 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2242 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2243 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2244 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2245 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2246 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2247 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2248 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2249 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2250 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2251 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2252 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_pauses
,
2253 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2254 (void *) PAUSES
, 0, sysctl_timer
, "Q", "");
2259 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2261 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2264 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2265 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2266 0, 0, sysctl_usrstack
, "I", "");
2270 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2272 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2275 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2276 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2277 0, 0, sysctl_usrstack64
, "Q", "");
2281 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2282 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2283 corefilename
, sizeof(corefilename
), "");
2287 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2289 #ifdef SECURE_KERNEL
2293 int new_value
, changed
;
2294 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2296 if ((new_value
== 0) || (new_value
== 1))
2297 do_coredump
= new_value
;
2305 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2306 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2307 0, 0, sysctl_coredump
, "I", "");
2310 sysctl_suid_coredump
2311 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2313 #ifdef SECURE_KERNEL
2317 int new_value
, changed
;
2318 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2320 if ((new_value
== 0) || (new_value
== 1))
2321 sugid_coredump
= new_value
;
2329 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2330 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2331 0, 0, sysctl_suid_coredump
, "I", "");
2333 #endif /* CONFIG_COREDUMP */
2337 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2339 struct proc
*p
= req
->p
;
2340 int new_value
, changed
;
2341 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2345 req
->p
->p_lflag
|= P_LDELAYTERM
;
2347 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2353 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2354 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2355 0, 0, sysctl_delayterm
, "I", "");
2360 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2362 struct proc
*p
= req
->p
;
2364 int new_value
, old_value
, changed
;
2367 ut
= get_bsdthread_info(current_thread());
2369 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2370 old_value
= KERN_RAGE_THREAD
;
2371 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2372 old_value
= KERN_RAGE_PROC
;
2376 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2379 switch (new_value
) {
2380 case KERN_RAGE_PROC
:
2382 p
->p_lflag
|= P_LRAGE_VNODES
;
2385 case KERN_UNRAGE_PROC
:
2387 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2391 case KERN_RAGE_THREAD
:
2392 ut
->uu_flag
|= UT_RAGE_VNODES
;
2394 case KERN_UNRAGE_THREAD
:
2395 ut
= get_bsdthread_info(current_thread());
2396 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2403 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2404 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2405 0, 0, sysctl_rage_vnode
, "I", "");
2407 /* XXX move this interface into libproc and remove this sysctl */
2409 sysctl_setthread_cpupercent
2410 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2412 int new_value
, old_value
;
2414 kern_return_t kret
= KERN_SUCCESS
;
2415 uint8_t percent
= 0;
2423 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2426 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2427 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2432 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2434 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2440 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2441 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2442 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2446 sysctl_kern_check_openevt
2447 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2449 struct proc
*p
= req
->p
;
2450 int new_value
, old_value
, changed
;
2453 if (p
->p_flag
& P_CHECKOPENEVT
) {
2454 old_value
= KERN_OPENEVT_PROC
;
2459 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2462 switch (new_value
) {
2463 case KERN_OPENEVT_PROC
:
2464 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2467 case KERN_UNOPENEVT_PROC
:
2468 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2478 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2479 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2485 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2487 #ifdef SECURE_KERNEL
2491 int new_value
, changed
;
2494 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2499 #if defined(__i386__) || defined(__x86_64__)
2501 * Only allow setting if NX is supported on the chip
2503 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2506 nx_enabled
= new_value
;
2509 #endif /* SECURE_KERNEL */
2514 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2515 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2516 0, 0, sysctl_nx
, "I", "");
2520 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2522 if (proc_is64bit(req
->p
)) {
2523 struct user64_loadavg loadinfo64
= {};
2524 fill_loadavg64(&averunnable
, &loadinfo64
);
2525 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2527 struct user32_loadavg loadinfo32
= {};
2528 fill_loadavg32(&averunnable
, &loadinfo32
);
2529 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2533 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2534 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2535 0, 0, sysctl_loadavg
, "S,loadavg", "");
2538 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2541 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2542 __unused
int arg2
, struct sysctl_req
*req
)
2544 int old_value
=0, new_value
=0, error
=0;
2546 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2548 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2550 return (vm_toggle_entry_reuse(new_value
, NULL
));
2555 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2560 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2563 uint64_t swap_total
;
2564 uint64_t swap_avail
;
2565 vm_size_t swap_pagesize
;
2566 boolean_t swap_encrypted
;
2567 struct xsw_usage xsu
= {};
2569 error
= macx_swapinfo(&swap_total
,
2576 xsu
.xsu_total
= swap_total
;
2577 xsu
.xsu_avail
= swap_avail
;
2578 xsu
.xsu_used
= swap_total
- swap_avail
;
2579 xsu
.xsu_pagesize
= swap_pagesize
;
2580 xsu
.xsu_encrypted
= swap_encrypted
;
2581 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2586 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2587 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2588 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2591 extern void vm_page_reactivate_all_throttled(void);
2594 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2596 #pragma unused(arg1, arg2)
2597 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2600 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2601 if (error
|| !req
->newptr
)
2604 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
2605 //assert(req->newptr);
2606 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2611 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2613 disabled
= (!val
&& memorystatus_freeze_enabled
);
2615 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2618 vm_page_reactivate_all_throttled();
2624 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2625 #endif /* CONFIG_FREEZE */
2627 /* this kernel does NOT implement shared_region_make_private_np() */
2628 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2629 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2630 (int *)NULL
, 0, "");
2633 fetch_process_cputype(
2637 cpu_type_t
*cputype
)
2639 proc_t p
= PROC_NULL
;
2646 else if (namelen
== 1) {
2647 p
= proc_find(name
[0]);
2656 ret
= cpu_type() & ~CPU_ARCH_MASK
;
2657 if (IS_64BIT_PROCESS(p
))
2658 ret
|= CPU_ARCH_ABI64
;
2669 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2670 struct sysctl_req
*req
)
2673 cpu_type_t proc_cputype
= 0;
2674 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2677 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2679 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2681 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2684 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2685 struct sysctl_req
*req
)
2688 cpu_type_t proc_cputype
= 0;
2689 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2691 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2693 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2697 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2699 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2702 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2703 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2704 0, 0, sysctl_safeboot
, "I", "");
2708 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2710 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2713 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2714 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2715 0, 0, sysctl_singleuser
, "I", "");
2717 STATIC
int sysctl_minimalboot
2718 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2720 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
2723 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
2724 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2725 0, 0, sysctl_minimalboot
, "I", "");
2728 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2730 extern boolean_t affinity_sets_enabled
;
2731 extern int affinity_sets_mapping
;
2733 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2734 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2735 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2736 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
2739 * Boolean indicating if KASLR is active.
2743 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2747 slide
= vm_kernel_slide
? 1 : 0;
2749 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
2752 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
2753 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2754 0, 0, sysctl_slide
, "I", "");
2757 * Limit on total memory users can wire.
2759 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2761 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2763 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2766 * All values are in bytes.
2769 vm_map_size_t vm_global_no_user_wire_amount
;
2770 vm_map_size_t vm_global_user_wire_limit
;
2771 vm_map_size_t vm_user_wire_limit
;
2774 * There needs to be a more automatic/elegant way to do this
2776 #if defined(__ARM__)
2777 SYSCTL_INT(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, 0, "");
2778 SYSCTL_INT(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, 0, "");
2779 SYSCTL_INT(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, 0, "");
2781 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
2782 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
2783 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
2786 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
2787 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
2788 extern int vm_map_copy_overwrite_aligned_src_large
;
2789 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
2790 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
2791 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
2794 extern uint32_t vm_page_external_count
;
2795 extern uint32_t vm_page_filecache_min
;
2797 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
2798 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
2800 extern int vm_compressor_mode
;
2801 extern int vm_compressor_is_active
;
2802 extern int vm_compressor_available
;
2803 extern uint32_t vm_ripe_target_age
;
2804 extern uint32_t swapout_target_age
;
2805 extern int64_t compressor_bytes_used
;
2806 extern int64_t c_segment_input_bytes
;
2807 extern int64_t c_segment_compressed_bytes
;
2808 extern uint32_t compressor_eval_period_in_msecs
;
2809 extern uint32_t compressor_sample_min_in_msecs
;
2810 extern uint32_t compressor_sample_max_in_msecs
;
2811 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
2812 extern uint32_t compressor_thrashing_min_per_10msecs
;
2813 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
2814 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
2815 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
2816 extern uint32_t vm_compressor_catchup_threshold_divisor
;
2817 extern uint32_t vm_compressor_time_thread
;
2818 #if DEVELOPMENT || DEBUG
2819 extern vmct_stats_t vmct_stats
;
2822 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
2823 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
2824 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
2826 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
2827 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
2828 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
2829 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
2831 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
2833 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
2834 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
2835 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
2836 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
2837 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
2838 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
2839 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
2840 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
2841 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
2843 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
2845 SYSCTL_INT(_vm
, OID_AUTO
, compressor_timing_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_time_thread
, 0, "");
2847 #if DEVELOPMENT || DEBUG
2848 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[0], "");
2849 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[1], "");
2851 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_threads_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_cthreads_total
, "");
2853 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[0], "");
2854 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[1], "");
2856 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[0], "");
2857 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[1], "");
2859 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[0], 0, "");
2860 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[1], 0, "");
2862 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[0], 0, "");
2863 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[1], 0, "");
2867 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressions
, "");
2868 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compression_failures
, "");
2869 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressed_bytes
, "");
2870 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_delta
, "");
2871 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_negative_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_negative_delta
, "");
2873 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressions
, "");
2874 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressed_bytes
, "");
2876 SYSCTL_QUAD(_vm
, OID_AUTO
, uc_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.uc_decompressions
, "");
2878 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions
, "");
2880 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_cabstime
, "");
2882 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_cabstime
, "");
2883 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_compressions
, "");
2885 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_cabstime
, "");
2886 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressions
, "");
2888 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions_exclusive
, "");
2889 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_compressions
, "");
2890 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_mzv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_mzv_compressions
, "");
2891 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compression_failures
, "");
2892 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_exclusive
, "");
2893 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_total
, "");
2895 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressed_bytes
, "");
2896 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compression_failures
, "");
2897 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_sv_compressions
, "");
2900 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressions
, "");
2902 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_dabstime
, "");
2904 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_dabstime
, "");
2905 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_decompressions
, "");
2907 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_dabstime
, "");
2908 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_decompressions
, "");
2910 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressed_bytes
, "");
2911 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_decompressions
, "");
2913 SYSCTL_INT(_vm
, OID_AUTO
, lz4_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_threshold
, 0, "");
2914 SYSCTL_INT(_vm
, OID_AUTO
, wkdm_reeval_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.wkdm_reeval_threshold
, 0, "");
2915 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_skips
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_skips
, 0, "");
2916 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_run_length
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_run_length
, 0, "");
2917 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_preselects
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_preselects
, 0, "");
2918 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_preselection_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_preselection_threshold
, 0, "");
2919 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_continue_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_continue_bytes
, 0, "");
2920 SYSCTL_INT(_vm
, OID_AUTO
, lz4_profitable_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_profitable_bytes
, 0, "");
2921 #if DEVELOPMENT || DEBUG
2922 extern int vm_compressor_current_codec
;
2923 extern int vm_compressor_test_seg_wp
;
2924 extern boolean_t vm_compressor_force_sw_wkdm
;
2925 SYSCTL_INT(_vm
, OID_AUTO
, compressor_codec
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_current_codec
, 0, "");
2926 SYSCTL_INT(_vm
, OID_AUTO
, compressor_test_wp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_test_seg_wp
, 0, "");
2928 SYSCTL_INT(_vm
, OID_AUTO
, wksw_force
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_force_sw_wkdm
, 0, "");
2929 extern int precompy
, wkswhw
;
2931 SYSCTL_INT(_vm
, OID_AUTO
, precompy
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &precompy
, 0, "");
2932 SYSCTL_INT(_vm
, OID_AUTO
, wkswhw
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &wkswhw
, 0, "");
2933 extern unsigned int vm_ktrace_enabled
;
2934 SYSCTL_INT(_vm
, OID_AUTO
, vm_ktrace
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ktrace_enabled
, 0, "");
2937 #if CONFIG_PHANTOM_CACHE
2938 extern uint32_t phantom_cache_thrashing_threshold
;
2939 extern uint32_t phantom_cache_eval_period_in_msecs
;
2940 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
2943 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
2944 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
2945 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
2948 #if CONFIG_BACKGROUND_QUEUE
2950 extern uint32_t vm_page_background_count
;
2951 extern uint32_t vm_page_background_target
;
2952 extern uint32_t vm_page_background_internal_count
;
2953 extern uint32_t vm_page_background_external_count
;
2954 extern uint32_t vm_page_background_mode
;
2955 extern uint32_t vm_page_background_exclude_external
;
2956 extern uint64_t vm_page_background_promoted_count
;
2957 extern uint64_t vm_pageout_considered_bq_internal
;
2958 extern uint64_t vm_pageout_considered_bq_external
;
2959 extern uint64_t vm_pageout_rejected_bq_internal
;
2960 extern uint64_t vm_pageout_rejected_bq_external
;
2962 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_mode
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_mode
, 0, "");
2963 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_exclude_external
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_exclude_external
, 0, "");
2964 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_target
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_target
, 0, "");
2965 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_count
, 0, "");
2966 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_internal_count
, 0, "");
2967 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_external_count
, 0, "");
2969 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_page_background_promoted_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_promoted_count
, "");
2970 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_considered_bq_internal
, "");
2971 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_considered_bq_external
, "");
2972 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_internal
, "");
2973 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_external
, "");
2977 #if (DEVELOPMENT || DEBUG)
2979 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
2980 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2981 &vm_page_creation_throttled_hard
, 0, "");
2983 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
2984 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2985 &vm_page_creation_throttled_soft
, 0, "");
2987 extern uint32_t vm_pageout_memorystatus_fb_factor_nr
;
2988 extern uint32_t vm_pageout_memorystatus_fb_factor_dr
;
2989 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_nr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_nr
, 0, "");
2990 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_dr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_dr
, 0, "");
2992 extern uint32_t vm_grab_anon_overrides
;
2993 extern uint32_t vm_grab_anon_nops
;
2995 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_overrides
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_grab_anon_overrides
, 0, "");
2996 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_nops
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_grab_anon_nops
, 0, "");
2998 /* log message counters for persistence mode */
2999 extern uint32_t oslog_p_total_msgcount
;
3000 extern uint32_t oslog_p_metadata_saved_msgcount
;
3001 extern uint32_t oslog_p_metadata_dropped_msgcount
;
3002 extern uint32_t oslog_p_error_count
;
3003 extern uint32_t oslog_p_saved_msgcount
;
3004 extern uint32_t oslog_p_dropped_msgcount
;
3005 extern uint32_t oslog_p_boot_dropped_msgcount
;
3007 /* log message counters for streaming mode */
3008 extern uint32_t oslog_s_total_msgcount
;
3009 extern uint32_t oslog_s_metadata_msgcount
;
3010 extern uint32_t oslog_s_error_count
;
3011 extern uint32_t oslog_s_streamed_msgcount
;
3012 extern uint32_t oslog_s_dropped_msgcount
;
3014 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_total_msgcount
, 0, "");
3015 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_saved_msgcount
, 0, "");
3016 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_dropped_msgcount
, 0, "");
3017 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_error_count
, 0, "");
3018 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_saved_msgcount
, 0, "");
3019 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_dropped_msgcount
, 0, "");
3020 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_boot_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_boot_dropped_msgcount
, 0, "");
3022 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_total_msgcount
, 0, "");
3023 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_metadata_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_metadata_msgcount
, 0, "");
3024 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_error_count
, 0, "");
3025 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_streamed_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_streamed_msgcount
, 0, "");
3026 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_dropped_msgcount
, 0, "");
3029 #endif /* DEVELOPMENT || DEBUG */
3032 * Enable tracing of voucher contents
3034 extern uint32_t ipc_voucher_trace_contents
;
3036 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
3037 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
3040 * Kernel stack size and depth
3042 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
3043 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3044 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
3045 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3047 extern unsigned int kern_feature_overrides
;
3048 SYSCTL_INT (_kern
, OID_AUTO
, kern_feature_overrides
,
3049 CTLFLAG_RD
| CTLFLAG_LOCKED
, &kern_feature_overrides
, 0, "Kernel feature override mask");
3052 * enable back trace for port allocations
3054 extern int ipc_portbt
;
3056 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3057 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3058 &ipc_portbt
, 0, "");
3064 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3065 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3066 sched_string
, sizeof(sched_string
),
3067 "Timeshare scheduler implementation");
3070 * Only support runtime modification on embedded platforms
3071 * with development config enabled
3075 extern int precise_user_kernel_time
;
3076 SYSCTL_INT(_kern
, OID_AUTO
, precise_user_kernel_time
,
3077 CTLFLAG_RW
| CTLFLAG_LOCKED
,
3078 &precise_user_kernel_time
, 0, "Precise accounting of kernel vs. user time");
3083 /* Parameters related to timer coalescing tuning, to be replaced
3084 * with a dedicated systemcall in the future.
3086 /* Enable processing pending timers in the context of any other interrupt
3087 * Coalescing tuning parameters for various thread/task attributes */
3089 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3091 #pragma unused(oidp)
3092 int size
= arg2
; /* subcommand*/
3095 uint64_t old_value_ns
;
3096 uint64_t new_value_ns
;
3097 uint64_t value_abstime
;
3098 if (size
== sizeof(uint32_t))
3099 value_abstime
= *((uint32_t *)arg1
);
3100 else if (size
== sizeof(uint64_t))
3101 value_abstime
= *((uint64_t *)arg1
);
3102 else return ENOTSUP
;
3104 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
3105 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
3106 if ((error
) || (!changed
))
3109 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
3110 if (size
== sizeof(uint32_t))
3111 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
3113 *((uint64_t *)arg1
) = value_abstime
;
3117 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
3118 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3119 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
3120 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
3121 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3122 &tcoal_prio_params
.timer_resort_threshold_abstime
,
3123 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
3124 sysctl_timer_user_us_kernel_abstime
,
3126 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
3127 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3128 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
3129 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
3130 sysctl_timer_user_us_kernel_abstime
,
3133 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
3134 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3135 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
3137 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
3138 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3139 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
3140 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
3141 sysctl_timer_user_us_kernel_abstime
,
3144 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
3145 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3146 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
3148 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
3149 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3150 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
3151 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
3152 sysctl_timer_user_us_kernel_abstime
,
3155 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
3156 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3157 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
3159 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
3160 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3161 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
3162 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
3163 sysctl_timer_user_us_kernel_abstime
,
3166 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
3167 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3168 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
3170 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
3171 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3172 &tcoal_prio_params
.latency_qos_abstime_max
[0],
3173 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
3174 sysctl_timer_user_us_kernel_abstime
,
3177 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
3178 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3179 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
3181 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
3182 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3183 &tcoal_prio_params
.latency_qos_abstime_max
[1],
3184 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
3185 sysctl_timer_user_us_kernel_abstime
,
3188 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
3189 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3190 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
3192 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
3193 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3194 &tcoal_prio_params
.latency_qos_abstime_max
[2],
3195 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
3196 sysctl_timer_user_us_kernel_abstime
,
3199 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
3200 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3201 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
3203 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
3204 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3205 &tcoal_prio_params
.latency_qos_abstime_max
[3],
3206 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
3207 sysctl_timer_user_us_kernel_abstime
,
3210 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
3211 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3212 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
3214 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
3215 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3216 &tcoal_prio_params
.latency_qos_abstime_max
[4],
3217 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
3218 sysctl_timer_user_us_kernel_abstime
,
3221 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
3222 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3223 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
3225 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
3226 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3227 &tcoal_prio_params
.latency_qos_abstime_max
[5],
3228 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
3229 sysctl_timer_user_us_kernel_abstime
,
3232 /* Communicate the "user idle level" heuristic to the timer layer, and
3233 * potentially other layers in the future.
3237 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3238 int new_value
= 0, old_value
= 0, changed
= 0, error
;
3240 old_value
= timer_get_user_idle_level();
3242 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
3244 if (error
== 0 && changed
) {
3245 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
3252 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
3253 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3255 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
3258 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
3259 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3260 &hv_support_available
, 0, "");
3265 sysctl_darkboot SYSCTL_HANDLER_ARGS
3267 int err
= 0, value
= 0;
3268 #pragma unused(oidp, arg1, arg2, err, value, req)
3271 * Handle the sysctl request.
3273 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
3274 * we'll get the request identifier into "value" and then we can honor it.
3276 if ((err
= sysctl_io_number(req
, darkboot
, sizeof(int), &value
, NULL
)) != 0) {
3280 /* writing requested, let's process the request */
3282 /* writing is protected by an entitlement */
3283 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT
, 0) != 0) {
3289 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET
:
3291 * If the darkboot sysctl is unset, the NVRAM variable
3292 * must be unset too. If that's not the case, it means
3293 * someone is doing something crazy and not supported.
3295 if (darkboot
!= 0) {
3296 int ret
= PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
);
3304 case MEMORY_MAINTENANCE_DARK_BOOT_SET
:
3307 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT
: {
3309 * Set the NVRAM and update 'darkboot' in case
3310 * of success. Otherwise, do not update
3311 * 'darkboot' and report the failure.
3313 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
, TRUE
)) {
3330 SYSCTL_PROC(_kern
, OID_AUTO
, darkboot
,
3331 CTLFLAG_KERN
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
,
3332 0, 0, sysctl_darkboot
, "I", "");
3336 * This is set by core audio to tell tailspin (ie background tracing) how long
3337 * its smallest buffer is. Background tracing can then try to make a reasonable
3338 * decisions to try to avoid introducing so much latency that the buffers will
3342 int min_audio_buffer_usec
;
3345 sysctl_audio_buffer SYSCTL_HANDLER_ARGS
3347 #pragma unused(oidp, arg1, arg2)
3348 int err
= 0, value
= 0, changed
= 0;
3349 err
= sysctl_io_number(req
, min_audio_buffer_usec
, sizeof(int), &value
, &changed
);
3353 /* writing is protected by an entitlement */
3354 if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY
, 0) != 0) {
3358 min_audio_buffer_usec
= value
;
3364 SYSCTL_PROC(_kern
, OID_AUTO
, min_audio_buffer_usec
, CTLFLAG_RW
| CTLFLAG_ANYBODY
, 0, 0, sysctl_audio_buffer
, "I", "Minimum audio buffer size, in microseconds");
3366 #if DEVELOPMENT || DEBUG
3367 #include <sys/sysent.h>
3368 /* This should result in a fatal exception, verifying that "sysent" is
3372 kern_sysent_write(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3373 uint64_t new_value
= 0, old_value
= 0;
3374 int changed
= 0, error
;
3376 error
= sysctl_io_number(req
, old_value
, sizeof(uint64_t), &new_value
, &changed
);
3377 if ((error
== 0) && changed
) {
3378 volatile uint32_t *wraddr
= (uint32_t *) &sysent
[0];
3380 printf("sysent[0] write succeeded\n");
3385 SYSCTL_PROC(_kern
, OID_AUTO
, sysent_const_check
,
3386 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3388 kern_sysent_write
, "I", "Attempt sysent[0] write");
3392 #if DEVELOPMENT || DEBUG
3393 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 1, "");
3395 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 0, "");
3399 #if DEVELOPMENT || DEBUG
3402 sysctl_panic_test SYSCTL_HANDLER_ARGS
3404 #pragma unused(arg1, arg2)
3406 char str
[32] = "entry prelog postlog postcore";
3408 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
3410 if (rval
== 0 && req
->newptr
) {
3411 if (strncmp("entry", str
, strlen("entry")) == 0) {
3412 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_ENTRY
, "test recursive panic at entry");
3413 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
3414 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_PRELOG
, "test recursive panic prior to writing a paniclog");
3415 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
3416 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTLOG
, "test recursive panic subsequent to paniclog");
3417 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
3418 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTCORE
, "test recursive panic subsequent to on-device core");
3426 sysctl_debugger_test SYSCTL_HANDLER_ARGS
3428 #pragma unused(arg1, arg2)
3430 char str
[32] = "entry prelog postlog postcore";
3432 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
3434 if (rval
== 0 && req
->newptr
) {
3435 if (strncmp("entry", str
, strlen("entry")) == 0) {
3436 DebuggerWithContext(0, NULL
, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY
);
3437 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
3438 DebuggerWithContext(0, NULL
, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG
);
3439 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
3440 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG
);
3441 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
3442 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE
);
3449 decl_lck_spin_data(, spinlock_panic_test_lock
)
3451 __attribute__((noreturn
))
3453 spinlock_panic_test_acquire_spinlock(void * arg __unused
, wait_result_t wres __unused
)
3455 lck_spin_lock(&spinlock_panic_test_lock
);
3460 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
3462 #pragma unused(oidp, arg1, arg2)
3463 if (req
->newlen
== 0)
3466 thread_t panic_spinlock_thread
;
3467 /* Initialize panic spinlock */
3468 lck_grp_t
* panic_spinlock_grp
;
3469 lck_grp_attr_t
* panic_spinlock_grp_attr
;
3470 lck_attr_t
* panic_spinlock_attr
;
3472 panic_spinlock_grp_attr
= lck_grp_attr_alloc_init();
3473 panic_spinlock_grp
= lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr
);
3474 panic_spinlock_attr
= lck_attr_alloc_init();
3476 lck_spin_init(&spinlock_panic_test_lock
, panic_spinlock_grp
, panic_spinlock_attr
);
3479 /* Create thread to acquire spinlock */
3480 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock
, NULL
, &panic_spinlock_thread
) != KERN_SUCCESS
) {
3484 /* Try to acquire spinlock -- should panic eventually */
3485 lck_spin_lock(&spinlock_panic_test_lock
);
3489 __attribute__((noreturn
))
3491 simultaneous_panic_worker
3492 (void * arg
, wait_result_t wres __unused
)
3494 atomic_int
*start_panic
= (atomic_int
*)arg
;
3496 while (!atomic_load(start_panic
)) { ; }
3497 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
3498 __builtin_unreachable();
3502 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
3504 #pragma unused(oidp, arg1, arg2)
3505 if (req
->newlen
== 0)
3508 int i
= 0, threads_to_create
= 2 * processor_count
;
3509 atomic_int start_panic
= 0;
3510 unsigned int threads_created
= 0;
3511 thread_t new_panic_thread
;
3513 for (i
= threads_to_create
; i
> 0; i
--) {
3514 if (kernel_thread_start(simultaneous_panic_worker
, (void *) &start_panic
, &new_panic_thread
) == KERN_SUCCESS
) {
3519 /* FAIL if we couldn't create at least processor_count threads */
3520 if (threads_created
< processor_count
) {
3521 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
3522 threads_created
, threads_to_create
);
3525 atomic_exchange(&start_panic
, 1);
3529 SYSCTL_PROC(_debug
, OID_AUTO
, panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_panic_test
, "A", "panic test");
3530 SYSCTL_PROC(_debug
, OID_AUTO
, debugger_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_debugger_test
, "A", "debugger test");
3531 SYSCTL_PROC(_debug
, OID_AUTO
, spinlock_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_spinlock_panic_test
, "A", "spinlock panic test");
3532 SYSCTL_PROC(_debug
, OID_AUTO
, simultaneous_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_simultaneous_panic_test
, "A", "simultaneous panic test");
3535 #endif /* DEVELOPMENT || DEBUG */
3537 const uint32_t thread_groups_supported
= 0;
3540 sysctl_thread_groups_supported (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3542 int value
= thread_groups_supported
;
3543 return sysctl_io_number(req
, value
, sizeof(value
), NULL
, NULL
);
3546 SYSCTL_PROC(_kern
, OID_AUTO
, thread_groups_supported
, CTLFLAG_RD
| CTLFLAG_LOCKED
| CTLFLAG_KERN
,
3547 0, 0, &sysctl_thread_groups_supported
, "I", "thread groups supported");
3550 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
3552 #pragma unused(arg1, arg2, oidp)
3554 int type_tuple
[2] = {};
3555 int return_value
= 0;
3557 error
= SYSCTL_IN(req
, &type_tuple
, sizeof(type_tuple
));
3563 return_value
= grade_binary(type_tuple
[0], type_tuple
[1]);
3565 error
= SYSCTL_OUT(req
, &return_value
, sizeof(return_value
));
3574 SYSCTL_PROC(_kern
, OID_AUTO
, grade_cputype
,
3575 CTLFLAG_RW
|CTLFLAG_ANYBODY
|CTLFLAG_MASKED
|CTLFLAG_LOCKED
|CTLTYPE_OPAQUE
,
3576 0, 0, &sysctl_grade_cputype
, "S",
3577 "grade value of cpu_type_t+cpu_sub_type_t");