2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
112 #include <machine/smp.h>
113 #include <machine/atomic.h>
114 #include <mach/machine.h>
115 #include <mach/mach_host.h>
116 #include <mach/mach_types.h>
117 #include <mach/processor_info.h>
118 #include <mach/vm_param.h>
119 #include <kern/debug.h>
120 #include <kern/mach_param.h>
121 #include <kern/task.h>
122 #include <kern/thread.h>
123 #include <kern/thread_group.h>
124 #include <kern/processor.h>
125 #include <kern/cpu_number.h>
126 #include <kern/cpu_quiesce.h>
127 #include <kern/sched_prim.h>
128 #include <vm/vm_kern.h>
129 #include <vm/vm_map.h>
130 #include <mach/host_info.h>
132 #include <sys/mount_internal.h>
133 #include <sys/kdebug.h>
134 #include <sys/kern_sysctl.h>
136 #include <IOKit/IOPlatformExpert.h>
137 #include <pexpert/pexpert.h>
139 #include <machine/machine_routines.h>
140 #include <machine/exec.h>
142 #include <nfs/nfs_conf.h>
144 #include <vm/vm_protos.h>
145 #include <vm/vm_pageout.h>
146 #include <vm/vm_compressor_algorithms.h>
147 #include <sys/imgsrc.h>
148 #include <kern/timer_call.h>
150 #if defined(__i386__) || defined(__x86_64__)
151 #include <i386/cpuid.h>
155 #include <sys/kern_memorystatus.h>
159 #include <kperf/kperf.h>
163 #include <kern/hv_support.h>
167 * deliberately setting max requests to really high number
168 * so that runaway settings do not cause MALLOC overflows
170 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
172 extern int aio_max_requests
;
173 extern int aio_max_requests_per_process
;
174 extern int aio_worker_threads
;
175 extern int lowpri_IO_window_msecs
;
176 extern int lowpri_IO_delay_msecs
;
177 #if DEVELOPMENT || DEBUG
178 extern int nx_enabled
;
180 extern int speculative_reads_disabled
;
181 extern unsigned int speculative_prefetch_max
;
182 extern unsigned int speculative_prefetch_max_iosize
;
183 extern unsigned int preheat_max_bytes
;
184 extern unsigned int preheat_min_bytes
;
185 extern long numvnodes
;
186 extern long num_recycledvnodes
;
188 extern uuid_string_t bootsessionuuid_string
;
190 extern unsigned int vm_max_delayed_work_limit
;
191 extern unsigned int vm_max_batch
;
193 extern unsigned int vm_page_free_min
;
194 extern unsigned int vm_page_free_target
;
195 extern unsigned int vm_page_free_reserved
;
197 #if (DEVELOPMENT || DEBUG)
198 extern uint32_t vm_page_creation_throttled_hard
;
199 extern uint32_t vm_page_creation_throttled_soft
;
200 #endif /* DEVELOPMENT || DEBUG */
202 #if CONFIG_LOCKERBOOT
203 extern const char kernel_protoboot_mount
[];
207 * Conditionally allow dtrace to see these functions for debugging purposes.
215 #define STATIC static
218 extern boolean_t mach_timer_coalescing_enabled
;
220 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
223 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
225 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
227 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
229 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
231 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
233 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
236 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
237 #if CONFIG_NFS_CLIENT
242 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
243 size_t *sizep
, proc_t cur_proc
);
245 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
246 proc_t cur_proc
, int argc_yes
);
248 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
249 size_t newlen
, void *sp
, int len
);
251 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
252 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
253 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
254 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
255 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
256 int sysdoproc_callback(proc_t p
, void *arg
);
259 /* forward declarations for non-static STATIC */
260 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
261 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
262 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
267 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
268 #endif /* COUNT_SYSCALLS */
270 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
271 #endif /* !CONFIG_EMBEDDED */
272 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
273 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
274 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
285 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 #if CONFIG_NFS_CLIENT
288 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 #ifdef CONFIG_IMGSRC_ACCESS
291 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
293 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
299 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
300 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
301 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
302 #if DEVELOPMENT || DEBUG
303 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
305 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
306 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
307 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
308 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
309 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
310 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
311 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
312 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
313 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
314 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
316 #ifdef CONFIG_XNUPOST
317 #include <tests/xnupost.h>
319 STATIC
int sysctl_debug_test_oslog_ctl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
320 STATIC
int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
321 STATIC
int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
324 extern void IORegistrySetOSBuildVersion(char * build_version
);
327 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
329 la64
->ldavg
[0] = la
->ldavg
[0];
330 la64
->ldavg
[1] = la
->ldavg
[1];
331 la64
->ldavg
[2] = la
->ldavg
[2];
332 la64
->fscale
= (user64_long_t
)la
->fscale
;
336 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
338 la32
->ldavg
[0] = la
->ldavg
[0];
339 la32
->ldavg
[1] = la
->ldavg
[1];
340 la32
->ldavg
[2] = la
->ldavg
[2];
341 la32
->fscale
= (user32_long_t
)la
->fscale
;
346 * Attributes stored in the kernel.
348 extern char corefilename
[MAXPATHLEN
+ 1];
349 extern int do_coredump
;
350 extern int sugid_coredump
;
354 extern int do_count_syscalls
;
358 int securelevel
= -1;
364 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
365 __unused
int arg2
, struct sysctl_req
*req
)
368 struct uthread
*ut
= get_bsdthread_info(current_thread());
369 user_addr_t oldp
= 0, newp
= 0;
370 size_t *oldlenp
= NULL
;
374 oldlenp
= &(req
->oldlen
);
376 newlen
= req
->newlen
;
378 /* We want the current length, and maybe the string itself */
380 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
381 size_t currlen
= MAXTHREADNAMESIZE
- 1;
384 /* use length of current thread name */
385 currlen
= strlen(ut
->pth_name
);
388 if (*oldlenp
< currlen
) {
391 /* NOTE - we do not copy the NULL terminator */
393 error
= copyout(ut
->pth_name
, oldp
, currlen
);
399 /* return length of thread name minus NULL terminator (just like strlen) */
400 req
->oldidx
= currlen
;
403 /* We want to set the name to something */
405 if (newlen
> (MAXTHREADNAMESIZE
- 1)) {
409 char *tmp_pth_name
= (char *)kalloc(MAXTHREADNAMESIZE
);
413 bzero(tmp_pth_name
, MAXTHREADNAMESIZE
);
414 if (!OSCompareAndSwapPtr(NULL
, tmp_pth_name
, &ut
->pth_name
)) {
415 kfree(tmp_pth_name
, MAXTHREADNAMESIZE
);
419 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV
, ut
->pth_name
);
420 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
422 error
= copyin(newp
, ut
->pth_name
, newlen
);
427 kernel_debug_string_simple(TRACE_STRING_THREADNAME
, ut
->pth_name
);
433 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
, "A", "");
437 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
439 host_basic_info_data_t hinfo
;
443 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
444 struct _processor_statistics_np
*buf
;
447 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
448 if (kret
!= KERN_SUCCESS
) {
452 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
454 if (req
->oldlen
< size
) {
458 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
460 kret
= get_sched_statistics(buf
, &size
);
461 if (kret
!= KERN_SUCCESS
) {
466 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
472 panic("Sched info changed?!");
479 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
482 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
487 if (req
->newlen
!= sizeof(active
)) {
491 res
= copyin(req
->newptr
, &active
, sizeof(active
));
496 return set_sched_stats_active(active
);
499 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
501 extern uint32_t sched_debug_flags
;
502 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
504 #if (DEBUG || DEVELOPMENT)
505 extern boolean_t doprnt_hide_pointers
;
506 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
509 extern int get_kernel_symfile(proc_t
, char **);
512 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
514 extern const unsigned int nsysent
;
515 extern int syscalls_log
[];
516 extern const char *syscallnames
[];
519 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
521 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
522 __unused
int *name
= arg1
; /* oid element argument vector */
523 __unused
int namelen
= arg2
; /* number of oid element arguments */
524 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
525 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
526 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
527 size_t newlen
= req
->newlen
; /* user buffer copy in size */
532 /* valid values passed in:
533 * = 0 means don't keep called counts for each bsd syscall
534 * > 0 means keep called counts for each bsd syscall
535 * = 2 means dump current counts to the system log
536 * = 3 means reset all counts
537 * for example, to dump current counts:
538 * sysctl -w kern.count_calls=2
540 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
546 do_count_syscalls
= 1;
547 } else if (tmp
== 0 || tmp
== 2 || tmp
== 3) {
549 for (i
= 0; i
< nsysent
; i
++) {
550 if (syscalls_log
[i
] != 0) {
552 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
559 do_count_syscalls
= 1;
563 /* adjust index so we return the right required/consumed amount */
565 req
->oldidx
+= req
->oldlen
;
570 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
571 0, /* Pointer argument (arg1) */
572 0, /* Integer argument (arg2) */
573 sysctl_docountsyscalls
, /* Handler function */
574 NULL
, /* Data pointer */
576 #endif /* COUNT_SYSCALLS */
579 * The following sysctl_* functions should not be used
580 * any more, as they can only cope with callers in
581 * user mode: Use new-style
589 * Validate parameters and get old / set new parameters
590 * for an integer-valued sysctl function.
593 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
594 user_addr_t newp
, size_t newlen
, int *valp
)
598 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
) {
601 if (oldp
&& *oldlenp
< sizeof(int)) {
604 if (newp
&& newlen
!= sizeof(int)) {
607 *oldlenp
= sizeof(int);
609 error
= copyout(valp
, oldp
, sizeof(int));
611 if (error
== 0 && newp
) {
612 error
= copyin(newp
, valp
, sizeof(int));
613 AUDIT_ARG(value32
, *valp
);
619 * Validate parameters and get old / set new parameters
620 * for an quad(64bit)-valued sysctl function.
623 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
624 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
628 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
) {
631 if (oldp
&& *oldlenp
< sizeof(quad_t
)) {
634 if (newp
&& newlen
!= sizeof(quad_t
)) {
637 *oldlenp
= sizeof(quad_t
);
639 error
= copyout(valp
, oldp
, sizeof(quad_t
));
641 if (error
== 0 && newp
) {
642 error
= copyin(newp
, valp
, sizeof(quad_t
));
648 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
650 if (p
->p_pid
!= (pid_t
)*(int*)arg
) {
658 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
660 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
) {
668 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
673 /* This is very racy but list lock is held.. Hmmm. */
674 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
675 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
676 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
677 tp
->t_dev
!= (dev_t
)*(int*)arg
) {
687 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
689 kauth_cred_t my_cred
;
692 if (p
->p_ucred
== NULL
) {
695 my_cred
= kauth_cred_proc_ref(p
);
696 uid
= kauth_cred_getuid(my_cred
);
697 kauth_cred_unref(&my_cred
);
699 if (uid
!= (uid_t
)*(int*)arg
) {
708 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
710 kauth_cred_t my_cred
;
713 if (p
->p_ucred
== NULL
) {
716 my_cred
= kauth_cred_proc_ref(p
);
717 ruid
= kauth_cred_getruid(my_cred
);
718 kauth_cred_unref(&my_cred
);
720 if (ruid
!= (uid_t
)*(int*)arg
) {
728 * try over estimating by 5 procs
730 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
731 struct sysdoproc_args
{
746 sysdoproc_callback(proc_t p
, void *arg
)
748 struct sysdoproc_args
*args
= arg
;
750 if (args
->buflen
>= args
->sizeof_kproc
) {
751 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0)) {
752 return PROC_RETURNED
;
754 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0)) {
755 return PROC_RETURNED
;
757 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0)) {
758 return PROC_RETURNED
;
761 bzero(args
->kprocp
, args
->sizeof_kproc
);
762 if (args
->is_64_bit
) {
763 fill_user64_proc(p
, args
->kprocp
);
765 fill_user32_proc(p
, args
->kprocp
);
767 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
769 *args
->errorp
= error
;
770 return PROC_RETURNED_DONE
;
772 args
->dp
+= args
->sizeof_kproc
;
773 args
->buflen
-= args
->sizeof_kproc
;
775 args
->needed
+= args
->sizeof_kproc
;
776 return PROC_RETURNED
;
779 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
781 sysctl_prochandle SYSCTL_HANDLER_ARGS
783 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
784 int *name
= arg1
; /* oid element argument vector */
785 int namelen
= arg2
; /* number of oid element arguments */
786 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
788 user_addr_t dp
= where
;
790 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
792 boolean_t is_64_bit
= proc_is64bit(current_proc());
793 struct user32_kinfo_proc user32_kproc
;
794 struct user64_kinfo_proc user_kproc
;
797 int (*filterfn
)(proc_t
, void *) = 0;
798 struct sysdoproc_args args
;
803 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
)) {
808 sizeof_kproc
= sizeof(user_kproc
);
809 kprocp
= &user_kproc
;
811 sizeof_kproc
= sizeof(user32_kproc
);
812 kprocp
= &user32_kproc
;
817 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
821 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
840 /* must be kern.proc.<unknown> */
845 args
.buflen
= buflen
;
846 args
.kprocp
= kprocp
;
847 args
.is_64_bit
= is_64_bit
;
849 args
.needed
= needed
;
850 args
.errorp
= &error
;
851 args
.uidcheck
= uidcheck
;
852 args
.ruidcheck
= ruidcheck
;
853 args
.ttycheck
= ttycheck
;
854 args
.sizeof_kproc
= sizeof_kproc
;
856 args
.uidval
= name
[0];
859 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
860 sysdoproc_callback
, &args
, filterfn
, name
);
867 needed
= args
.needed
;
869 if (where
!= USER_ADDR_NULL
) {
870 req
->oldlen
= dp
- where
;
871 if (needed
> req
->oldlen
) {
875 needed
+= KERN_PROCSLOP
;
876 req
->oldlen
= needed
;
878 /* adjust index so we return the right required/consumed amount */
879 req
->oldidx
+= req
->oldlen
;
884 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
885 * in the sysctl declaration itself, which comes into the handler function
886 * as 'oidp->oid_arg2'.
888 * For these particular sysctls, since they have well known OIDs, we could
889 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
890 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
891 * of a well known value with a common handler function. This is desirable,
892 * because we want well known values to "go away" at some future date.
894 * It should be noted that the value of '((int *)arg1)[1]' is used for many
895 * an integer parameter to the subcommand for many of these sysctls; we'd
896 * rather have used '((int *)arg1)[0]' for that, or even better, an element
897 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
898 * and then use leaf-node permissions enforcement, but that would have
899 * necessitated modifying user space code to correspond to the interface
900 * change, and we are striving for binary backward compatibility here; even
901 * though these are SPI, and not intended for use by user space applications
902 * which are not themselves system tools or libraries, some applications
903 * have erroneously used them.
905 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
906 0, /* Pointer argument (arg1) */
907 KERN_PROC_ALL
, /* Integer argument (arg2) */
908 sysctl_prochandle
, /* Handler function */
909 NULL
, /* Data is size variant on ILP32/LP64 */
911 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
912 0, /* Pointer argument (arg1) */
913 KERN_PROC_PID
, /* Integer argument (arg2) */
914 sysctl_prochandle
, /* Handler function */
915 NULL
, /* Data is size variant on ILP32/LP64 */
917 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
918 0, /* Pointer argument (arg1) */
919 KERN_PROC_TTY
, /* Integer argument (arg2) */
920 sysctl_prochandle
, /* Handler function */
921 NULL
, /* Data is size variant on ILP32/LP64 */
923 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
924 0, /* Pointer argument (arg1) */
925 KERN_PROC_PGRP
, /* Integer argument (arg2) */
926 sysctl_prochandle
, /* Handler function */
927 NULL
, /* Data is size variant on ILP32/LP64 */
929 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
930 0, /* Pointer argument (arg1) */
931 KERN_PROC_UID
, /* Integer argument (arg2) */
932 sysctl_prochandle
, /* Handler function */
933 NULL
, /* Data is size variant on ILP32/LP64 */
935 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
936 0, /* Pointer argument (arg1) */
937 KERN_PROC_RUID
, /* Integer argument (arg2) */
938 sysctl_prochandle
, /* Handler function */
939 NULL
, /* Data is size variant on ILP32/LP64 */
941 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
942 0, /* Pointer argument (arg1) */
943 KERN_PROC_LCID
, /* Integer argument (arg2) */
944 sysctl_prochandle
, /* Handler function */
945 NULL
, /* Data is size variant on ILP32/LP64 */
950 * Fill in non-zero fields of an eproc structure for the specified process.
953 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
957 struct session
*sessp
;
958 kauth_cred_t my_cred
;
961 sessp
= proc_session(p
);
963 if (pg
!= PGRP_NULL
) {
964 ep
->e_pgid
= p
->p_pgrpid
;
965 ep
->e_jobc
= pg
->pg_jobc
;
966 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
) {
967 ep
->e_flag
= EPROC_CTTY
;
970 ep
->e_ppid
= p
->p_ppid
;
972 my_cred
= kauth_cred_proc_ref(p
);
974 /* A fake historical pcred */
975 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
976 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
977 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
978 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
980 /* A fake historical *kauth_cred_t */
981 ep
->e_ucred
.cr_ref
= os_atomic_load(&my_cred
->cr_ref
, relaxed
);
982 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
983 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
984 bcopy(posix_cred_get(my_cred
)->cr_groups
,
985 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof(gid_t
));
987 kauth_cred_unref(&my_cred
);
990 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
991 (tp
= SESSION_TP(sessp
))) {
992 ep
->e_tdev
= tp
->t_dev
;
993 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
998 if (sessp
!= SESSION_NULL
) {
999 if (SESS_LEADER(p
, sessp
)) {
1000 ep
->e_flag
|= EPROC_SLEADER
;
1002 session_rele(sessp
);
1004 if (pg
!= PGRP_NULL
) {
1010 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
1013 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
1017 struct session
*sessp
;
1018 kauth_cred_t my_cred
;
1021 sessp
= proc_session(p
);
1023 if (pg
!= PGRP_NULL
) {
1024 ep
->e_pgid
= p
->p_pgrpid
;
1025 ep
->e_jobc
= pg
->pg_jobc
;
1026 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
) {
1027 ep
->e_flag
= EPROC_CTTY
;
1030 ep
->e_ppid
= p
->p_ppid
;
1032 my_cred
= kauth_cred_proc_ref(p
);
1034 /* A fake historical pcred */
1035 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1036 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1037 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1038 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1040 /* A fake historical *kauth_cred_t */
1041 ep
->e_ucred
.cr_ref
= os_atomic_load(&my_cred
->cr_ref
, relaxed
);
1042 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1043 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1044 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1045 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof(gid_t
));
1047 kauth_cred_unref(&my_cred
);
1050 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1051 (tp
= SESSION_TP(sessp
))) {
1052 ep
->e_tdev
= tp
->t_dev
;
1053 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1058 if (sessp
!= SESSION_NULL
) {
1059 if (SESS_LEADER(p
, sessp
)) {
1060 ep
->e_flag
|= EPROC_SLEADER
;
1062 session_rele(sessp
);
1064 if (pg
!= PGRP_NULL
) {
1070 * Fill in an eproc structure for the specified process.
1071 * bzeroed by our caller, so only set non-zero fields.
1074 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1076 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1077 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1078 exp
->p_flag
= p
->p_flag
;
1079 if (p
->p_lflag
& P_LTRACED
) {
1080 exp
->p_flag
|= P_TRACED
;
1082 if (p
->p_lflag
& P_LPPWAIT
) {
1083 exp
->p_flag
|= P_PPWAIT
;
1085 if (p
->p_lflag
& P_LEXIT
) {
1086 exp
->p_flag
|= P_WEXIT
;
1088 exp
->p_stat
= p
->p_stat
;
1089 exp
->p_pid
= p
->p_pid
;
1090 exp
->p_oppid
= p
->p_oppid
;
1092 exp
->user_stack
= p
->user_stack
;
1093 exp
->p_debugger
= p
->p_debugger
;
1094 exp
->sigwait
= p
->sigwait
;
1096 #ifdef _PROC_HAS_SCHEDINFO_
1097 exp
->p_estcpu
= p
->p_estcpu
;
1098 exp
->p_pctcpu
= p
->p_pctcpu
;
1099 exp
->p_slptime
= p
->p_slptime
;
1101 exp
->p_realtimer
.it_interval
.tv_sec
=
1102 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1103 exp
->p_realtimer
.it_interval
.tv_usec
=
1104 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1106 exp
->p_realtimer
.it_value
.tv_sec
=
1107 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1108 exp
->p_realtimer
.it_value
.tv_usec
=
1109 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1111 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1112 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1114 exp
->p_sigignore
= p
->p_sigignore
;
1115 exp
->p_sigcatch
= p
->p_sigcatch
;
1116 exp
->p_priority
= p
->p_priority
;
1117 exp
->p_nice
= p
->p_nice
;
1118 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1119 exp
->p_xstat
= p
->p_xstat
;
1120 exp
->p_acflag
= p
->p_acflag
;
1124 * Fill in an LP64 version of extern_proc structure for the specified process.
1127 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1129 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1130 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1131 exp
->p_flag
= p
->p_flag
;
1132 if (p
->p_lflag
& P_LTRACED
) {
1133 exp
->p_flag
|= P_TRACED
;
1135 if (p
->p_lflag
& P_LPPWAIT
) {
1136 exp
->p_flag
|= P_PPWAIT
;
1138 if (p
->p_lflag
& P_LEXIT
) {
1139 exp
->p_flag
|= P_WEXIT
;
1141 exp
->p_stat
= p
->p_stat
;
1142 exp
->p_pid
= p
->p_pid
;
1143 exp
->p_oppid
= p
->p_oppid
;
1145 exp
->user_stack
= p
->user_stack
;
1146 exp
->p_debugger
= p
->p_debugger
;
1147 exp
->sigwait
= p
->sigwait
;
1149 #ifdef _PROC_HAS_SCHEDINFO_
1150 exp
->p_estcpu
= p
->p_estcpu
;
1151 exp
->p_pctcpu
= p
->p_pctcpu
;
1152 exp
->p_slptime
= p
->p_slptime
;
1154 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1155 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1157 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1158 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1160 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1161 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1163 exp
->p_sigignore
= p
->p_sigignore
;
1164 exp
->p_sigcatch
= p
->p_sigcatch
;
1165 exp
->p_priority
= p
->p_priority
;
1166 exp
->p_nice
= p
->p_nice
;
1167 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1168 exp
->p_xstat
= p
->p_xstat
;
1169 exp
->p_acflag
= p
->p_acflag
;
1173 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1175 /* on a 64 bit kernel, 32 bit users get some truncated information */
1176 fill_user32_externproc(p
, &kp
->kp_proc
);
1177 fill_user32_eproc(p
, &kp
->kp_eproc
);
1181 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1183 fill_user64_externproc(p
, &kp
->kp_proc
);
1184 fill_user64_eproc(p
, &kp
->kp_eproc
);
1188 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1190 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1191 int *name
= arg1
; /* oid element argument vector */
1192 int namelen
= arg2
; /* number of oid element arguments */
1193 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1194 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1195 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1196 // size_t newlen = req->newlen; /* user buffer copy in size */
1214 case KERN_KDWRITETR
:
1215 case KERN_KDWRITEMAP
:
1221 case KERN_KDREADCURTHRMAP
:
1222 case KERN_KDSET_TYPEFILTER
:
1223 case KERN_KDBUFWAIT
:
1225 case KERN_KDWRITEMAP_V3
:
1226 case KERN_KDWRITETR_V3
:
1227 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1234 /* adjust index so we return the right required/consumed amount */
1236 req
->oldidx
+= req
->oldlen
;
1241 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1242 0, /* Pointer argument (arg1) */
1243 0, /* Integer argument (arg2) */
1244 sysctl_kdebug_ops
, /* Handler function */
1245 NULL
, /* Data pointer */
1249 #if !CONFIG_EMBEDDED
1251 * Return the top *sizep bytes of the user stack, or the entire area of the
1252 * user stack down through the saved exec_path, whichever is smaller.
1255 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1257 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1258 int *name
= arg1
; /* oid element argument vector */
1259 int namelen
= arg2
; /* number of oid element arguments */
1260 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1261 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1262 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1263 // size_t newlen = req->newlen; /* user buffer copy in size */
1266 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1268 /* adjust index so we return the right required/consumed amount */
1270 req
->oldidx
+= req
->oldlen
;
1275 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1276 0, /* Pointer argument (arg1) */
1277 0, /* Integer argument (arg2) */
1278 sysctl_doprocargs
, /* Handler function */
1279 NULL
, /* Data pointer */
1281 #endif /* !CONFIG_EMBEDDED */
1284 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1286 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1287 int *name
= arg1
; /* oid element argument vector */
1288 int namelen
= arg2
; /* number of oid element arguments */
1289 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1290 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1291 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1292 // size_t newlen = req->newlen; /* user buffer copy in size */
1295 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1297 /* adjust index so we return the right required/consumed amount */
1299 req
->oldidx
+= req
->oldlen
;
1304 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1305 0, /* Pointer argument (arg1) */
1306 0, /* Integer argument (arg2) */
1307 sysctl_doprocargs2
, /* Handler function */
1308 NULL
, /* Data pointer */
1312 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1313 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1316 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1318 struct _vm_map
*proc_map
;
1321 user_addr_t arg_addr
;
1326 vm_size_t alloc_size
= 0;
1327 vm_offset_t copy_start
, copy_end
;
1330 kauth_cred_t my_cred
;
1339 buflen
-= sizeof(int); /* reserve first word to return argc */
1341 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1342 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1343 /* is not NULL then the caller wants us to return the length needed to */
1344 /* hold the data we would return */
1345 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1351 * Lookup process by pid
1360 * Copy the top N bytes of the stack.
1361 * On all machines we have so far, the stack grows
1364 * If the user expects no more than N bytes of
1365 * argument list, use that as a guess for the
1369 if (!p
->user_stack
) {
1374 if (where
== USER_ADDR_NULL
) {
1375 /* caller only wants to know length of proc args data */
1376 if (sizep
== NULL
) {
1381 size
= p
->p_argslen
;
1384 size
+= sizeof(int);
1387 * old PROCARGS will return the executable's path and plus some
1388 * extra space for work alignment and data tags
1390 size
+= PATH_MAX
+ (6 * sizeof(int));
1392 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1397 my_cred
= kauth_cred_proc_ref(p
);
1398 uid
= kauth_cred_getuid(my_cred
);
1399 kauth_cred_unref(&my_cred
);
1401 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1402 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1407 if ((u_int
)arg_size
> p
->p_argslen
) {
1408 arg_size
= round_page(p
->p_argslen
);
1411 arg_addr
= p
->user_stack
- arg_size
;
1414 * Before we can block (any VM code), make another
1415 * reference to the map to keep it alive. We do
1416 * that by getting a reference on the task itself.
1424 /* save off argc before releasing the proc */
1427 argslen
= p
->p_argslen
;
1429 * Once we have a task reference we can convert that into a
1430 * map reference, which we will use in the calls below. The
1431 * task/process may change its map after we take this reference
1432 * (see execve), but the worst that will happen then is a return
1433 * of stale info (which is always a possibility).
1435 task_reference(task
);
1437 proc_map
= get_task_map_reference(task
);
1438 task_deallocate(task
);
1440 if (proc_map
== NULL
) {
1444 alloc_size
= round_page(arg_size
);
1445 ret
= kmem_alloc(kernel_map
, ©_start
, alloc_size
, VM_KERN_MEMORY_BSD
);
1446 if (ret
!= KERN_SUCCESS
) {
1447 vm_map_deallocate(proc_map
);
1450 bzero((void *)copy_start
, alloc_size
);
1452 copy_end
= round_page(copy_start
+ arg_size
);
1454 if (vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1455 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1456 vm_map_deallocate(proc_map
);
1457 kmem_free(kernel_map
, copy_start
,
1458 round_page(arg_size
));
1463 * Now that we've done the copyin from the process'
1464 * map, we can release the reference to it.
1466 vm_map_deallocate(proc_map
);
1468 if (vm_map_copy_overwrite(kernel_map
,
1469 (vm_map_address_t
)copy_start
,
1470 tmp
, FALSE
) != KERN_SUCCESS
) {
1471 kmem_free(kernel_map
, copy_start
,
1472 round_page(arg_size
));
1473 vm_map_copy_discard(tmp
);
1477 if (arg_size
> argslen
) {
1478 data
= (caddr_t
) (copy_end
- argslen
);
1481 data
= (caddr_t
) (copy_end
- arg_size
);
1486 * When these sysctls were introduced, the first string in the strings
1487 * section was just the bare path of the executable. However, for security
1488 * reasons we now prefix this string with executable_path= so it can be
1489 * parsed getenv style. To avoid binary compatability issues with exising
1490 * callers of this sysctl, we strip it off here if present.
1491 * (rdar://problem/13746466)
1493 #define EXECUTABLE_KEY "executable_path="
1494 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0) {
1495 data
+= strlen(EXECUTABLE_KEY
);
1496 size
-= strlen(EXECUTABLE_KEY
);
1500 /* Put processes argc as the first word in the copyout buffer */
1501 suword(where
, argc
);
1502 error
= copyout(data
, (where
+ sizeof(int)), size
);
1503 size
+= sizeof(int);
1505 error
= copyout(data
, where
, size
);
1508 * Make the old PROCARGS work to return the executable's path
1509 * But, only if there is enough space in the provided buffer
1511 * on entry: data [possibily] points to the beginning of the path
1513 * Note: we keep all pointers&sizes aligned to word boundries
1515 if ((!error
) && (buflen
> 0 && (u_int
)buflen
> argslen
)) {
1516 int binPath_sz
, alignedBinPath_sz
= 0;
1517 int extraSpaceNeeded
, addThis
;
1518 user_addr_t placeHere
;
1519 char * str
= (char *) data
;
1522 /* Some apps are really bad about messing up their stacks
1523 * So, we have to be extra careful about getting the length
1524 * of the executing binary. If we encounter an error, we bail.
1527 /* Limit ourselves to PATH_MAX paths */
1528 if (max_len
> PATH_MAX
) {
1534 while ((binPath_sz
< max_len
- 1) && (*str
++ != 0)) {
1538 /* If we have a NUL terminator, copy it, too */
1539 if (binPath_sz
< max_len
- 1) {
1543 /* Pre-Flight the space requiremnts */
1545 /* Account for the padding that fills out binPath to the next word */
1546 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int) - 1)) ? (sizeof(int) - (binPath_sz
& (sizeof(int) - 1))) : 0;
1548 placeHere
= where
+ size
;
1550 /* Account for the bytes needed to keep placeHere word aligned */
1551 addThis
= (placeHere
& (sizeof(int) - 1)) ? (sizeof(int) - (placeHere
& (sizeof(int) - 1))) : 0;
1553 /* Add up all the space that is needed */
1554 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1556 /* is there is room to tack on argv[0]? */
1557 if ((buflen
& ~(sizeof(int) - 1)) >= (argslen
+ extraSpaceNeeded
)) {
1558 placeHere
+= addThis
;
1559 suword(placeHere
, 0);
1560 placeHere
+= sizeof(int);
1561 suword(placeHere
, 0xBFFF0000);
1562 placeHere
+= sizeof(int);
1563 suword(placeHere
, 0);
1564 placeHere
+= sizeof(int);
1565 error
= copyout(data
, placeHere
, binPath_sz
);
1567 placeHere
+= binPath_sz
;
1568 suword(placeHere
, 0);
1569 size
+= extraSpaceNeeded
;
1575 if (copy_start
!= (vm_offset_t
) 0) {
1576 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1582 if (where
!= USER_ADDR_NULL
) {
1590 * Max number of concurrent aio requests
1594 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1596 int new_value
, changed
;
1597 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1599 /* make sure the system-wide limit is greater than the per process limit */
1600 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
) {
1601 aio_max_requests
= new_value
;
1611 * Max number of concurrent aio requests per process
1615 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1617 int new_value
, changed
;
1618 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1620 /* make sure per process limit is less than the system-wide limit */
1621 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
) {
1622 aio_max_requests_per_process
= new_value
;
1632 * Max number of async IO worker threads
1636 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1638 int new_value
, changed
;
1639 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1641 /* we only allow an increase in the number of worker threads */
1642 if (new_value
> aio_worker_threads
) {
1643 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1644 aio_worker_threads
= new_value
;
1654 * System-wide limit on the max number of processes
1658 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1660 int new_value
, changed
;
1661 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1663 AUDIT_ARG(value32
, new_value
);
1664 /* make sure the system-wide limit is less than the configured hard
1665 * limit set at kernel compilation */
1666 if (new_value
<= hard_maxproc
&& new_value
> 0) {
1667 maxproc
= new_value
;
1675 extern int sched_enable_smt
;
1677 sysctl_sched_enable_smt
1678 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1680 int new_value
, changed
;
1681 int error
= sysctl_io_number(req
, sched_enable_smt
, sizeof(int), &new_value
, &changed
);
1685 kern_return_t kret
= KERN_SUCCESS
;
1687 AUDIT_ARG(value32
, new_value
);
1688 if (new_value
== 0) {
1689 sched_enable_smt
= 0;
1690 kret
= enable_smt_processors(false);
1692 sched_enable_smt
= 1;
1693 kret
= enable_smt_processors(true);
1700 case KERN_INVALID_ARGUMENT
:
1714 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1715 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1717 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1718 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1720 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1721 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1722 (int *)NULL
, BSD
, "");
1723 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1724 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1726 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1727 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1728 &kernel_uuid_string
[0], 0, "");
1730 SYSCTL_STRING(_kern
, OID_AUTO
, osbuildconfig
,
1731 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1732 &osbuild_config
[0], 0, "");
1735 sysctl_protoboot(__unused
struct sysctl_oid
*oidp
,
1736 __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1739 #if CONFIG_LOCKERBOOT
1740 char protoboot_buff
[24];
1741 size_t protoboot_len
= sizeof(protoboot_buff
);
1743 if (vnode_tag(rootvnode
) == VT_LOCKERFS
) {
1744 strlcpy(protoboot_buff
, kernel_protoboot_mount
, protoboot_len
);
1745 error
= sysctl_io_string(req
, protoboot_buff
, protoboot_len
, 0, NULL
);
1758 SYSCTL_PROC(_kern
, OID_AUTO
, protoboot
,
1759 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1760 0, 0, sysctl_protoboot
, "A", "");
1769 int debug_kprint_syscall
= 0;
1770 char debug_kprint_syscall_process
[MAXCOMLEN
+ 1];
1772 /* Thread safe: bits and string value are not used to reclaim state */
1773 SYSCTL_INT(_debug
, OID_AUTO
, kprint_syscall
,
1774 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1775 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1776 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1777 "name of process for kprintf syscall tracing");
1780 debug_kprint_current_process(const char **namep
)
1782 struct proc
*p
= current_proc();
1788 if (debug_kprint_syscall_process
[0]) {
1789 /* user asked to scope tracing to a particular process name */
1790 if (0 == strncmp(debug_kprint_syscall_process
,
1791 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1792 /* no value in telling the user that we traced what they asked */
1803 /* trace all processes. Tell user what we traced */
1812 /* PR-5293665: need to use a callback function for kern.osversion to set
1813 * osversion in IORegistry */
1816 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1820 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1823 IORegistrySetOSBuildVersion((char *)arg1
);
1829 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1830 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1831 osversion
, 256 /* OSVERSIZE*/,
1832 sysctl_osversion
, "A", "");
1834 char osproductversion
[48] = { '\0' };
1837 sysctl_osproductversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1839 if (req
->newptr
!= 0) {
1841 * Can only ever be set by launchd, and only once at boot.
1843 if (req
->p
->p_pid
!= 1 || osproductversion
[0] != '\0') {
1848 return sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1851 SYSCTL_PROC(_kern
, OID_AUTO
, osproductversion
,
1852 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1853 osproductversion
, sizeof(osproductversion
),
1854 sysctl_osproductversion
, "A", "The ProductVersion from SystemVersion.plist");
1856 static uint64_t iossupportversion_string
[48];
1859 sysctl_iossupportversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1861 if (req
->newptr
!= 0) {
1863 * Can only ever be set by launchd, and only once at boot.
1865 if (req
->p
->p_pid
!= 1 || iossupportversion_string
[0] != '\0') {
1870 return sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1873 SYSCTL_PROC(_kern
, OID_AUTO
, iossupportversion
,
1874 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1875 iossupportversion_string
, sizeof(iossupportversion_string
),
1876 sysctl_iossupportversion
, "A", "The iOSSupportVersion from SystemVersion.plist");
1878 static uint64_t osvariant_status
= 0;
1881 sysctl_osvariant_status(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1883 if (req
->newptr
!= 0) {
1885 * Can only ever be set by launchd, and only once at boot.
1887 if (req
->p
->p_pid
!= 1 || osvariant_status
!= 0) {
1892 return sysctl_handle_quad(oidp
, arg1
, arg2
, req
);
1895 SYSCTL_PROC(_kern
, OID_AUTO
, osvariant_status
,
1896 CTLFLAG_RW
| CTLTYPE_QUAD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1897 &osvariant_status
, sizeof(osvariant_status
),
1898 sysctl_osvariant_status
, "Q", "Opaque flags used to cache OS variant information");
1900 extern void commpage_update_dyld_flags(uint64_t);
1901 static uint64_t dyld_system_flags
= 0;
1904 sysctl_dyld_system_flags(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1907 * Can only ever be set by launchd, possibly several times
1908 * as dyld may change its mind after a userspace reboot.
1910 if (req
->newptr
!= 0 && req
->p
->p_pid
!= 1) {
1914 int res
= sysctl_handle_quad(oidp
, arg1
, arg2
, req
);
1915 if (req
->newptr
&& res
== 0) {
1916 commpage_update_dyld_flags(osvariant_status
);
1921 SYSCTL_PROC(_kern
, OID_AUTO
, dyld_system_flags
,
1922 CTLFLAG_RW
| CTLTYPE_QUAD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1923 &dyld_system_flags
, sizeof(dyld_system_flags
),
1924 sysctl_dyld_system_flags
, "Q", "Opaque flags used to cache dyld system-wide configuration");
1926 #if defined(XNU_TARGET_OS_BRIDGE)
1927 char macosproductversion
[MACOS_VERS_LEN
] = { '\0' };
1929 SYSCTL_STRING(_kern
, OID_AUTO
, macosproductversion
,
1930 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1931 &macosproductversion
[0], MACOS_VERS_LEN
, "The currently running macOS ProductVersion (from SystemVersion.plist on macOS)");
1933 char macosversion
[MACOS_VERS_LEN
] = { '\0' };
1935 SYSCTL_STRING(_kern
, OID_AUTO
, macosversion
,
1936 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1937 &macosversion
[0], MACOS_VERS_LEN
, "The currently running macOS build version");
1941 sysctl_sysctl_bootargs
1942 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1945 char buf
[BOOT_LINE_LENGTH
];
1947 strlcpy(buf
, PE_boot_args(), BOOT_LINE_LENGTH
);
1948 error
= sysctl_io_string(req
, buf
, BOOT_LINE_LENGTH
, 0, NULL
);
1952 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1953 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1955 sysctl_sysctl_bootargs
, "A", "bootargs");
1958 sysctl_kernelcacheuuid(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1961 if (kernelcache_uuid_valid
) {
1962 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1967 SYSCTL_PROC(_kern
, OID_AUTO
, kernelcacheuuid
,
1968 CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1969 kernelcache_uuid_string
, sizeof(kernelcache_uuid_string
),
1970 sysctl_kernelcacheuuid
, "A", "");
1972 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1973 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1975 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1976 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1977 (int *)NULL
, ARG_MAX
, "");
1978 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1979 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1980 (int *)NULL
, _POSIX_VERSION
, "");
1981 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1982 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1983 (int *)NULL
, NGROUPS_MAX
, "");
1984 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1985 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1986 (int *)NULL
, 1, "");
1987 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1988 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1989 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1990 (int *)NULL
, 1, "");
1992 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1993 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1996 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1997 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1999 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
2000 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2002 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
2003 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2005 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
2006 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2007 &thread_max
, 0, "");
2008 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
2009 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2010 &task_threadmax
, 0, "");
2011 SYSCTL_LONG(_kern
, OID_AUTO
, num_recycledvnodes
,
2012 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2013 &num_recycledvnodes
, "");
2016 sysctl_maxvnodes(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2018 int oldval
= desiredvnodes
;
2019 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2021 if (oldval
!= desiredvnodes
) {
2022 resize_namecache(desiredvnodes
);
2028 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
2029 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2030 &nc_disabled
, 0, "");
2032 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2033 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2034 0, 0, sysctl_maxvnodes
, "I", "");
2036 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2037 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2038 0, 0, sysctl_maxproc
, "I", "");
2040 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2041 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2042 0, 0, sysctl_aiomax
, "I", "");
2044 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2045 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2046 0, 0, sysctl_aioprocmax
, "I", "");
2048 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2049 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2050 0, 0, sysctl_aiothreads
, "I", "");
2052 SYSCTL_PROC(_kern
, OID_AUTO
, sched_enable_smt
,
2053 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
,
2054 0, 0, sysctl_sched_enable_smt
, "I", "");
2056 extern int sched_allow_NO_SMT_threads
;
2057 SYSCTL_INT(_kern
, OID_AUTO
, sched_allow_NO_SMT_threads
,
2058 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2059 &sched_allow_NO_SMT_threads
, 0, "");
2061 #if (DEVELOPMENT || DEBUG)
2062 extern int sched_smt_balance
;
2063 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
2064 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2065 &sched_smt_balance
, 0, "");
2066 extern int sched_allow_rt_smt
;
2067 SYSCTL_INT(_kern
, OID_AUTO
, sched_allow_rt_smt
,
2068 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2069 &sched_allow_rt_smt
, 0, "");
2070 extern int sched_avoid_cpu0
;
2071 SYSCTL_INT(_kern
, OID_AUTO
, sched_avoid_cpu0
,
2072 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2073 &sched_avoid_cpu0
, 0, "");
2074 #if __arm__ || __arm64__
2075 extern uint32_t perfcontrol_requested_recommended_cores
;
2076 SYSCTL_UINT(_kern
, OID_AUTO
, sched_recommended_cores
,
2077 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2078 &perfcontrol_requested_recommended_cores
, 0, "");
2080 /* Scheduler perfcontrol callouts sysctls */
2081 SYSCTL_DECL(_kern_perfcontrol_callout
);
2082 SYSCTL_NODE(_kern
, OID_AUTO
, perfcontrol_callout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
2083 "scheduler perfcontrol callouts");
2085 extern int perfcontrol_callout_stats_enabled
;
2086 SYSCTL_INT(_kern_perfcontrol_callout
, OID_AUTO
, stats_enabled
,
2087 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2088 &perfcontrol_callout_stats_enabled
, 0, "");
2090 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
2091 perfcontrol_callout_stat_t stat
);
2093 /* On-Core Callout */
2095 sysctl_perfcontrol_callout_stat
2096 (__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
2098 perfcontrol_callout_stat_t stat
= (perfcontrol_callout_stat_t
)arg1
;
2099 perfcontrol_callout_type_t type
= (perfcontrol_callout_type_t
)arg2
;
2100 return sysctl_io_number(req
, (int)perfcontrol_callout_stat_avg(type
, stat
),
2101 sizeof(int), NULL
, NULL
);
2104 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_instr
,
2105 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2106 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_ON_CORE
,
2107 sysctl_perfcontrol_callout_stat
, "I", "");
2108 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_cycles
,
2109 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2110 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_ON_CORE
,
2111 sysctl_perfcontrol_callout_stat
, "I", "");
2112 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_instr
,
2113 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2114 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_OFF_CORE
,
2115 sysctl_perfcontrol_callout_stat
, "I", "");
2116 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_cycles
,
2117 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2118 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_OFF_CORE
,
2119 sysctl_perfcontrol_callout_stat
, "I", "");
2120 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_instr
,
2121 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2122 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_CONTEXT
,
2123 sysctl_perfcontrol_callout_stat
, "I", "");
2124 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_cycles
,
2125 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2126 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_CONTEXT
,
2127 sysctl_perfcontrol_callout_stat
, "I", "");
2128 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_instr
,
2129 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2130 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
2131 sysctl_perfcontrol_callout_stat
, "I", "");
2132 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_cycles
,
2133 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2134 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
2135 sysctl_perfcontrol_callout_stat
, "I", "");
2138 extern int sched_amp_idle_steal
;
2139 SYSCTL_INT(_kern
, OID_AUTO
, sched_amp_idle_steal
,
2140 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2141 &sched_amp_idle_steal
, 0, "");
2142 extern int sched_amp_spill_steal
;
2143 SYSCTL_INT(_kern
, OID_AUTO
, sched_amp_spill_steal
,
2144 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2145 &sched_amp_spill_steal
, 0, "");
2146 extern int sched_amp_spill_count
;
2147 SYSCTL_INT(_kern
, OID_AUTO
, sched_amp_spill_count
,
2148 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2149 &sched_amp_spill_count
, 0, "");
2150 extern int sched_amp_spill_deferred_ipi
;
2151 SYSCTL_INT(_kern
, OID_AUTO
, sched_amp_spill_deferred_ipi
,
2152 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2153 &sched_amp_spill_deferred_ipi
, 0, "");
2154 extern int sched_amp_pcores_preempt_immediate_ipi
;
2155 SYSCTL_INT(_kern
, OID_AUTO
, sched_amp_pcores_preempt_immediate_ipi
,
2156 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2157 &sched_amp_pcores_preempt_immediate_ipi
, 0, "");
2158 #endif /* __AMP__ */
2159 #endif /* __arm__ || __arm64__ */
2162 extern int legacy_footprint_entitlement_mode
;
2163 SYSCTL_INT(_kern
, OID_AUTO
, legacy_footprint_entitlement_mode
,
2164 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2165 &legacy_footprint_entitlement_mode
, 0, "");
2166 #endif /* __arm64__ */
2168 #endif /* (DEVELOPMENT || DEBUG) */
2172 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2174 int new_value
, changed
;
2175 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2177 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2179 securelevel
= new_value
;
2188 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2189 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2190 0, 0, sysctl_securelvl
, "I", "");
2195 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2198 char tmpname
[MAXHOSTNAMELEN
] = {};
2200 lck_mtx_lock(&domainname_lock
);
2201 strlcpy(tmpname
, domainname
, sizeof(tmpname
));
2202 lck_mtx_unlock(&domainname_lock
);
2204 error
= sysctl_io_string(req
, tmpname
, sizeof(tmpname
), 0, &changed
);
2205 if (!error
&& changed
) {
2206 lck_mtx_lock(&hostname_lock
);
2207 strlcpy(domainname
, tmpname
, sizeof(domainname
));
2208 lck_mtx_unlock(&hostname_lock
);
2213 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2214 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2215 0, 0, sysctl_domainname
, "A", "");
2217 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
2218 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2223 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2226 char tmpname
[MAXHOSTNAMELEN
] = {};
2228 lck_mtx_lock(&hostname_lock
);
2229 strlcpy(tmpname
, hostname
, sizeof(tmpname
));
2230 lck_mtx_unlock(&hostname_lock
);
2232 error
= sysctl_io_string(req
, tmpname
, sizeof(tmpname
), 1, &changed
);
2233 if (!error
&& changed
) {
2234 lck_mtx_lock(&hostname_lock
);
2235 strlcpy(hostname
, tmpname
, sizeof(hostname
));
2236 lck_mtx_unlock(&hostname_lock
);
2241 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2242 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2243 0, 0, sysctl_hostname
, "A", "");
2247 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2249 /* Original code allowed writing, I'm copying this, although this all makes
2250 * no sense to me. Besides, this sysctl is never used. */
2251 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2 * MAXCOMLEN
+ 1), 1, NULL
);
2254 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2255 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2256 0, 0, sysctl_procname
, "A", "");
2258 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2259 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2260 &speculative_reads_disabled
, 0, "");
2262 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
2263 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2264 &preheat_max_bytes
, 0, "");
2266 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
2267 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2268 &preheat_min_bytes
, 0, "");
2270 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
2271 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2272 &speculative_prefetch_max
, 0, "");
2274 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
2275 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2276 &speculative_prefetch_max_iosize
, 0, "");
2278 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
2279 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2280 &vm_page_free_target
, 0, "");
2282 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
2283 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2284 &vm_page_free_min
, 0, "");
2286 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
2287 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2288 &vm_page_free_reserved
, 0, "");
2290 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
2291 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2292 &vm_pageout_state
.vm_page_speculative_percentage
, 0, "");
2294 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
2295 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2296 &vm_pageout_state
.vm_page_speculative_q_age_ms
, 0, "");
2298 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
2299 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2300 &vm_max_delayed_work_limit
, 0, "");
2302 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2303 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2304 &vm_max_batch
, 0, "");
2306 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
2307 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2308 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
), "");
2312 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2315 boottime_timeval(&tv
);
2316 struct proc
*p
= req
->p
;
2318 if (proc_is64bit(p
)) {
2319 struct user64_timeval t
= {};
2320 t
.tv_sec
= tv
.tv_sec
;
2321 t
.tv_usec
= tv
.tv_usec
;
2322 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2324 struct user32_timeval t
= {};
2325 t
.tv_sec
= tv
.tv_sec
;
2326 t
.tv_usec
= tv
.tv_usec
;
2327 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2331 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2332 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2333 0, 0, sysctl_boottime
, "S,timeval", "");
2337 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2340 int error
= get_kernel_symfile(req
->p
, &str
);
2344 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2348 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2349 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2350 0, 0, sysctl_symfile
, "A", "");
2352 #if CONFIG_NFS_CLIENT
2355 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2357 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2360 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2361 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2362 0, 0, sysctl_netboot
, "I", "");
2365 #ifdef CONFIG_IMGSRC_ACCESS
2367 * Legacy--act as if only one layer of nesting is possible.
2371 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2373 vfs_context_t ctx
= vfs_context_current();
2377 if (!vfs_context_issuser(ctx
)) {
2381 if (imgsrc_rootvnodes
[0] == NULL
) {
2385 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2390 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2391 result
= vnode_getwithref(devvp
);
2396 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2400 vnode_put(imgsrc_rootvnodes
[0]);
2404 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2405 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2406 0, 0, sysctl_imgsrcdev
, "I", "");
2410 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2413 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
] = {}; /* 2 for now, no problem */
2417 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2421 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2423 * Go get the root vnode.
2425 rvp
= imgsrc_rootvnodes
[i
];
2426 if (rvp
== NULLVP
) {
2430 error
= vnode_get(rvp
);
2436 * For now, no getting at a non-local volume.
2438 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2439 if (devvp
== NULL
) {
2444 error
= vnode_getwithref(devvp
);
2453 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2454 info
[i
].ii_flags
= 0;
2455 info
[i
].ii_height
= i
;
2456 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2462 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2465 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2466 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2467 0, 0, sysctl_imgsrcinfo
, "I", "");
2469 #endif /* CONFIG_IMGSRC_ACCESS */
2472 SYSCTL_DECL(_kern_timer
);
2473 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2476 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2477 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2478 &mach_timer_coalescing_enabled
, 0, "");
2480 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2481 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2482 &timer_deadline_tracking_bin_1
, "");
2483 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2484 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2485 &timer_deadline_tracking_bin_2
, "");
2487 SYSCTL_DECL(_kern_timer_longterm
);
2488 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2491 /* Must match definition in osfmk/kern/timer_call.c */
2494 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2495 LATENCY
, LATENCY_MIN
, LATENCY_MAX
, SCAN_LIMIT
, SCAN_INTERVAL
, PAUSES
2497 extern uint64_t timer_sysctl_get(int);
2498 extern int timer_sysctl_set(int, uint64_t);
2502 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2504 int oid
= (int)arg1
;
2505 uint64_t value
= timer_sysctl_get(oid
);
2510 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2512 error
= timer_sysctl_set(oid
, new_value
);
2518 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2519 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2520 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2521 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_limit
,
2522 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2523 (void *) SCAN_LIMIT
, 0, sysctl_timer
, "Q", "");
2524 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_interval
,
2525 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2526 (void *) SCAN_INTERVAL
, 0, sysctl_timer
, "Q", "");
2528 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2529 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2530 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2531 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_pauses
,
2532 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2533 (void *) PAUSES
, 0, sysctl_timer
, "Q", "");
2536 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2537 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2538 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2539 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2540 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2541 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2542 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2543 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2544 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2545 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2546 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2547 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2548 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2549 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2550 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2551 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2552 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2553 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2554 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2555 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2556 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2557 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2558 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2559 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2564 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2566 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2569 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2570 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2571 0, 0, sysctl_usrstack
, "I", "");
2575 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2577 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2580 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2581 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2582 0, 0, sysctl_usrstack64
, "Q", "");
2586 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2587 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2588 corefilename
, sizeof(corefilename
), "");
2592 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2594 #ifdef SECURE_KERNEL
2598 int new_value
, changed
;
2599 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2601 if ((new_value
== 0) || (new_value
== 1)) {
2602 do_coredump
= new_value
;
2611 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2612 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2613 0, 0, sysctl_coredump
, "I", "");
2616 sysctl_suid_coredump
2617 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2619 #ifdef SECURE_KERNEL
2623 int new_value
, changed
;
2624 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2626 if ((new_value
== 0) || (new_value
== 1)) {
2627 sugid_coredump
= new_value
;
2636 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2637 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2638 0, 0, sysctl_suid_coredump
, "I", "");
2640 #endif /* CONFIG_COREDUMP */
2644 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2646 struct proc
*p
= req
->p
;
2647 int new_value
, changed
;
2648 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2652 req
->p
->p_lflag
|= P_LDELAYTERM
;
2654 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2661 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2662 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2663 0, 0, sysctl_delayterm
, "I", "");
2668 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2670 struct proc
*p
= req
->p
;
2672 int new_value
, old_value
, changed
;
2675 ut
= get_bsdthread_info(current_thread());
2677 if (ut
->uu_flag
& UT_RAGE_VNODES
) {
2678 old_value
= KERN_RAGE_THREAD
;
2679 } else if (p
->p_lflag
& P_LRAGE_VNODES
) {
2680 old_value
= KERN_RAGE_PROC
;
2685 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2687 if ((error
== 0) && (changed
!= 0)) {
2688 switch (new_value
) {
2689 case KERN_RAGE_PROC
:
2691 p
->p_lflag
|= P_LRAGE_VNODES
;
2694 case KERN_UNRAGE_PROC
:
2696 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2700 case KERN_RAGE_THREAD
:
2701 ut
->uu_flag
|= UT_RAGE_VNODES
;
2703 case KERN_UNRAGE_THREAD
:
2704 ut
= get_bsdthread_info(current_thread());
2705 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2712 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2713 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2714 0, 0, sysctl_rage_vnode
, "I", "");
2716 /* XXX until filecoordinationd fixes a bit of inverted logic. */
2719 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2721 int old_value
= 0, new_value
, changed
;
2723 return sysctl_io_number(req
, old_value
, sizeof(int), &new_value
,
2727 SYSCTL_PROC(_kern
, OID_AUTO
, vfsnspace
,
2728 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2729 0, 0, sysctl_vfsnspace
, "I", "");
2731 /* XXX move this interface into libproc and remove this sysctl */
2733 sysctl_setthread_cpupercent
2734 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2736 int new_value
, old_value
;
2738 kern_return_t kret
= KERN_SUCCESS
;
2739 uint8_t percent
= 0;
2748 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0) {
2752 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2753 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2754 if (percent
> 100) {
2759 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2761 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0) {
2768 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2769 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2770 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2774 sysctl_kern_check_openevt
2775 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2777 struct proc
*p
= req
->p
;
2778 int new_value
, old_value
, changed
;
2781 if (p
->p_flag
& P_CHECKOPENEVT
) {
2782 old_value
= KERN_OPENEVT_PROC
;
2787 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2789 if ((error
== 0) && (changed
!= 0)) {
2790 switch (new_value
) {
2791 case KERN_OPENEVT_PROC
:
2792 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2795 case KERN_UNOPENEVT_PROC
:
2796 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2806 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2807 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2810 #if DEVELOPMENT || DEBUG
2813 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2815 #ifdef SECURE_KERNEL
2819 int new_value
, changed
;
2822 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2828 #if defined(__x86_64__)
2830 * Only allow setting if NX is supported on the chip
2832 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
)) {
2836 nx_enabled
= new_value
;
2839 #endif /* SECURE_KERNEL */
2843 #if DEVELOPMENT || DEBUG
2844 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2845 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2846 0, 0, sysctl_nx
, "I", "");
2851 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2853 if (proc_is64bit(req
->p
)) {
2854 struct user64_loadavg loadinfo64
= {};
2855 fill_loadavg64(&averunnable
, &loadinfo64
);
2856 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2858 struct user32_loadavg loadinfo32
= {};
2859 fill_loadavg32(&averunnable
, &loadinfo32
);
2860 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2864 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2865 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2866 0, 0, sysctl_loadavg
, "S,loadavg", "");
2869 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2872 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2873 __unused
int arg2
, struct sysctl_req
*req
)
2875 int old_value
= 0, new_value
= 0, error
= 0;
2877 if (vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
)) {
2880 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2882 return vm_toggle_entry_reuse(new_value
, NULL
);
2887 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
, "I", "");
2889 #ifdef CONFIG_XNUPOST
2891 extern int xnupost_export_testdata(void *outp
, uint32_t size
, uint32_t *lenp
);
2892 extern uint32_t xnupost_get_estimated_testdata_size(void);
2894 extern int xnupost_reset_all_tests(void);
2897 sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS
2899 /* fixup unused arguments warnings */
2900 __unused
int _oa2
= arg2
;
2901 __unused
void * _oa1
= arg1
;
2902 __unused
struct sysctl_oid
* _oidp
= oidp
;
2905 user_addr_t oldp
= 0;
2906 user_addr_t newp
= 0;
2907 uint32_t usedbytes
= 0;
2916 if ((void *)oldp
== NULL
) {
2917 /* return estimated size for second call where info can be placed */
2918 req
->oldidx
= xnupost_get_estimated_testdata_size();
2920 error
= xnupost_export_testdata((void *)oldp
, req
->oldlen
, &usedbytes
);
2921 req
->oldidx
= usedbytes
;
2930 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2933 sysctl_handle_xnupost_get_tests
,
2935 "read xnupost test data in kernel");
2938 sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS
2940 /* fixup unused arguments warnings */
2941 __unused
int _oa2
= arg2
;
2942 __unused
void * _oa1
= arg1
;
2943 __unused
struct sysctl_oid
* _oidp
= oidp
;
2947 * INPUT: ACTION, PARAM1, PARAM2, PARAM3
2948 * OUTPUT: RESULTCODE, ADDITIONAL DATA
2950 int32_t outval
[ARRCOUNT
] = {0};
2951 int32_t input
[ARRCOUNT
] = {0};
2952 int32_t out_size
= sizeof(outval
);
2953 int32_t in_size
= sizeof(input
);
2956 /* if this is NULL call to find out size, send out size info */
2961 /* pull in provided value from userspace */
2962 error
= SYSCTL_IN(req
, &input
[0], in_size
);
2967 if (input
[0] == XTCTL_RESET_TESTDATA
) {
2968 outval
[0] = xnupost_reset_all_tests();
2973 error
= SYSCTL_OUT(req
, &outval
[0], out_size
);
2980 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2983 sysctl_debug_xnupost_ctl
,
2985 "xnupost control for kernel testing");
2987 extern void test_oslog_handleOSLogCtl(int32_t * in
, int32_t * out
, int32_t arraycount
);
2990 sysctl_debug_test_oslog_ctl(__unused
struct sysctl_oid
* oidp
, __unused
void * arg1
, __unused
int arg2
, struct sysctl_req
* req
)
2993 int32_t outval
[ARRCOUNT
] = {0};
2994 int32_t input
[ARRCOUNT
] = {0};
2995 int32_t size_outval
= sizeof(outval
);
2996 int32_t size_inval
= sizeof(input
);
2999 /* if this is NULL call to find out size, send out size info */
3001 error
= SYSCTL_OUT(req
, &outval
[0], size_outval
);
3005 /* pull in provided value from userspace */
3006 error
= SYSCTL_IN(req
, &input
[0], size_inval
);
3011 test_oslog_handleOSLogCtl(input
, outval
, ARRCOUNT
);
3013 error
= SYSCTL_OUT(req
, &outval
[0], size_outval
);
3021 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3024 sysctl_debug_test_oslog_ctl
,
3026 "testing oslog in kernel");
3028 #include <mach/task.h>
3029 #include <mach/semaphore.h>
3031 extern lck_grp_t
* sysctl_debug_test_stackshot_owner_grp
; /* used for both mutexes and rwlocks */
3032 extern lck_mtx_t
* sysctl_debug_test_stackshot_owner_init_mtx
; /* used to protect lck_*_init */
3034 /* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded
3035 * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a
3036 * stackshot is taken to see if the owner of the lock can be identified.
3038 * We can't return to userland with a kernel lock held, so be sure to unlock before we leave.
3039 * the semaphores allow us to artificially create cases where the lock is being held and the
3040 * thread is hanging / taking a long time to do something. */
3042 volatile char sysctl_debug_test_stackshot_mtx_inited
= 0;
3043 semaphore_t sysctl_debug_test_stackshot_mutex_sem
;
3044 lck_mtx_t sysctl_debug_test_stackshot_owner_lck
;
3046 #define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT 1
3047 #define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2
3048 #define SYSCTL_DEBUG_MTX_SIGNAL 3
3049 #define SYSCTL_DEBUG_MTX_TEARDOWN 4
3052 sysctl_debug_test_stackshot_mutex_owner(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3054 long long option
= -1;
3055 /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */
3056 long long mtx_unslid_addr
= (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck
);
3057 int error
= sysctl_io_number(req
, mtx_unslid_addr
, sizeof(long long), (void*)&option
, NULL
);
3059 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3060 if (!sysctl_debug_test_stackshot_mtx_inited
) {
3061 lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck
,
3062 sysctl_debug_test_stackshot_owner_grp
,
3064 semaphore_create(kernel_task
,
3065 &sysctl_debug_test_stackshot_mutex_sem
,
3066 SYNC_POLICY_FIFO
, 0);
3067 sysctl_debug_test_stackshot_mtx_inited
= 1;
3069 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3073 case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT
:
3074 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck
);
3075 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck
);
3077 case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT
:
3078 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck
);
3079 semaphore_wait(sysctl_debug_test_stackshot_mutex_sem
);
3080 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck
);
3082 case SYSCTL_DEBUG_MTX_SIGNAL
:
3083 semaphore_signal(sysctl_debug_test_stackshot_mutex_sem
);
3085 case SYSCTL_DEBUG_MTX_TEARDOWN
:
3086 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3088 lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck
,
3089 sysctl_debug_test_stackshot_owner_grp
);
3090 semaphore_destroy(kernel_task
,
3091 sysctl_debug_test_stackshot_mutex_sem
);
3092 sysctl_debug_test_stackshot_mtx_inited
= 0;
3094 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3096 case -1: /* user just wanted to read the value, so do nothing */
3106 /* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave.
3107 * the semaphores allow us to artificially create cases where the lock is being held and the
3108 * thread is hanging / taking a long time to do something. */
3113 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3116 sysctl_debug_test_stackshot_mutex_owner
,
3118 "Testing mutex owner in kernel");
3120 volatile char sysctl_debug_test_stackshot_rwlck_inited
= 0;
3121 lck_rw_t sysctl_debug_test_stackshot_owner_rwlck
;
3122 semaphore_t sysctl_debug_test_stackshot_rwlck_sem
;
3124 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1
3125 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT 2
3126 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3
3127 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT 4
3128 #define SYSCTL_DEBUG_KRWLCK_SIGNAL 5
3129 #define SYSCTL_DEBUG_KRWLCK_TEARDOWN 6
3132 sysctl_debug_test_stackshot_rwlck_owner(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3134 long long option
= -1;
3135 /* if the user tries to read the sysctl, we tell them what the address of the lock is
3136 * (to test against stackshot's output) */
3137 long long rwlck_unslid_addr
= (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck
);
3138 int error
= sysctl_io_number(req
, rwlck_unslid_addr
, sizeof(long long), (void*)&option
, NULL
);
3140 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3141 if (!sysctl_debug_test_stackshot_rwlck_inited
) {
3142 lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck
,
3143 sysctl_debug_test_stackshot_owner_grp
,
3145 semaphore_create(kernel_task
,
3146 &sysctl_debug_test_stackshot_rwlck_sem
,
3149 sysctl_debug_test_stackshot_rwlck_inited
= 1;
3151 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3155 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT
:
3156 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3157 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3159 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT
:
3160 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3161 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem
);
3162 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3164 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT
:
3165 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3166 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3168 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT
:
3169 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3170 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem
);
3171 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3173 case SYSCTL_DEBUG_KRWLCK_SIGNAL
:
3174 semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem
);
3176 case SYSCTL_DEBUG_KRWLCK_TEARDOWN
:
3177 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3179 lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck
,
3180 sysctl_debug_test_stackshot_owner_grp
);
3181 semaphore_destroy(kernel_task
,
3182 sysctl_debug_test_stackshot_rwlck_sem
);
3183 sysctl_debug_test_stackshot_rwlck_inited
= 0;
3185 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3187 case -1: /* user just wanted to read the value, so do nothing */
3200 test_RWLockOwnerCtl
,
3201 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3204 sysctl_debug_test_stackshot_rwlck_owner
,
3206 "Testing rwlock owner in kernel");
3207 #endif /* !CONFIG_XNUPOST */
3211 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3214 uint64_t swap_total
;
3215 uint64_t swap_avail
;
3216 vm_size_t swap_pagesize
;
3217 boolean_t swap_encrypted
;
3218 struct xsw_usage xsu
= {};
3220 error
= macx_swapinfo(&swap_total
,
3228 xsu
.xsu_total
= swap_total
;
3229 xsu
.xsu_avail
= swap_avail
;
3230 xsu
.xsu_used
= swap_total
- swap_avail
;
3231 xsu
.xsu_pagesize
= swap_pagesize
;
3232 xsu
.xsu_encrypted
= swap_encrypted
;
3233 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
3238 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
3239 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3240 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
3243 extern void vm_page_reactivate_all_throttled(void);
3244 extern void memorystatus_disable_freeze(void);
3247 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
3249 #pragma unused(arg1, arg2)
3250 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
3253 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3254 if (error
|| !req
->newptr
) {
3258 if (!VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
3259 //assert(req->newptr);
3260 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
3265 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
3267 disabled
= (!val
&& memorystatus_freeze_enabled
);
3269 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
3272 vm_page_reactivate_all_throttled();
3273 memorystatus_disable_freeze();
3279 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
3280 #endif /* CONFIG_FREEZE */
3282 #if DEVELOPMENT || DEBUG
3283 extern int vm_num_swap_files_config
;
3284 extern int vm_num_swap_files
;
3285 extern lck_mtx_t vm_swap_data_lock
;
3286 #define VM_MAX_SWAP_FILE_NUM 100
3289 sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS
3291 #pragma unused(arg1, arg2)
3292 int error
= 0, val
= vm_num_swap_files_config
;
3294 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3295 if (error
|| !req
->newptr
) {
3299 if (!VM_CONFIG_SWAP_IS_ACTIVE
&& !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
3300 printf("Swap is disabled\n");
3305 lck_mtx_lock(&vm_swap_data_lock
);
3307 if (val
< vm_num_swap_files
) {
3308 printf("Cannot configure fewer swap files than already exist.\n");
3310 lck_mtx_unlock(&vm_swap_data_lock
);
3314 if (val
> VM_MAX_SWAP_FILE_NUM
) {
3315 printf("Capping number of swap files to upper bound.\n");
3316 val
= VM_MAX_SWAP_FILE_NUM
;
3319 vm_num_swap_files_config
= val
;
3320 lck_mtx_unlock(&vm_swap_data_lock
);
3326 SYSCTL_PROC(_debug
, OID_AUTO
, num_swap_files_configured
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_config_num_swap_files
, "I", "");
3327 #endif /* DEVELOPMENT || DEBUG */
3329 /* this kernel does NOT implement shared_region_make_private_np() */
3330 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
3331 CTLFLAG_RD
| CTLFLAG_LOCKED
,
3332 (int *)NULL
, 0, "");
3335 fetch_process_cputype(
3339 cpu_type_t
*cputype
)
3341 proc_t p
= PROC_NULL
;
3348 } else if (namelen
== 1) {
3349 p
= proc_find(name
[0]);
3359 ret
= cpu_type() & ~CPU_ARCH_MASK
;
3360 if (IS_64BIT_PROCESS(p
)) {
3361 ret
|= CPU_ARCH_ABI64
;
3374 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3375 struct sysctl_req
*req
)
3378 cpu_type_t proc_cputype
= 0;
3379 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0) {
3383 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
)) {
3386 return SYSCTL_OUT(req
, &res
, sizeof(res
));
3388 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
, "I", "proc_native");
3391 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3392 struct sysctl_req
*req
)
3395 cpu_type_t proc_cputype
= 0;
3396 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0) {
3399 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
3401 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
, "I", "proc_cputype");
3405 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3407 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
3410 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
3411 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3412 0, 0, sysctl_safeboot
, "I", "");
3416 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3418 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
3421 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
3422 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3423 0, 0, sysctl_singleuser
, "I", "");
3427 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3429 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
3432 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
3433 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3434 0, 0, sysctl_minimalboot
, "I", "");
3437 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3439 extern boolean_t affinity_sets_enabled
;
3440 extern int affinity_sets_mapping
;
3442 SYSCTL_INT(_kern
, OID_AUTO
, affinity_sets_enabled
,
3443 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
3444 SYSCTL_INT(_kern
, OID_AUTO
, affinity_sets_mapping
,
3445 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
3448 * Boolean indicating if KASLR is active.
3452 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3456 slide
= vm_kernel_slide
? 1 : 0;
3458 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
3461 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
3462 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3463 0, 0, sysctl_slide
, "I", "");
3466 * Limit on total memory users can wire.
3468 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3470 * vm_per_task_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3472 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3475 * All values are in bytes.
3478 vm_map_size_t vm_global_user_wire_limit
;
3479 vm_map_size_t vm_per_task_user_wire_limit
;
3480 extern uint64_t max_mem
;
3483 * We used to have a global in the kernel called vm_global_no_user_wire_limit which was the inverse
3484 * of vm_global_user_wire_limit. But maintaining both of those is silly, and vm_global_user_wire_limit is the
3486 * This function is for backwards compatibility with userspace
3487 * since we exposed the old global via a sysctl.
3490 sysctl_global_no_user_wire_amount(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3492 vm_map_size_t old_value
;
3493 vm_map_size_t new_value
;
3497 old_value
= max_mem
- vm_global_user_wire_limit
;
3498 error
= sysctl_io_number(req
, old_value
, sizeof(vm_map_size_t
), &new_value
, &changed
);
3500 if ((uint64_t)new_value
> max_mem
) {
3503 vm_global_user_wire_limit
= max_mem
- new_value
;
3509 * There needs to be a more automatic/elegant way to do this
3511 #if defined(__ARM__)
3512 SYSCTL_INT(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, 0, "");
3513 SYSCTL_INT(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_per_task_user_wire_limit
, 0, "");
3514 SYSCTL_PROC(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, &sysctl_global_no_user_wire_amount
, "I", "");
3516 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
3517 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_per_task_user_wire_limit
, "");
3518 SYSCTL_PROC(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, &sysctl_global_no_user_wire_amount
, "Q", "");
3521 #if DEVELOPMENT || DEBUG
3522 /* These sysyctls are used to test the wired limit. */
3523 extern unsigned int vm_page_wire_count
;
3524 extern uint32_t vm_lopage_free_count
;
3525 SYSCTL_INT(_vm
, OID_AUTO
, page_wire_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_wire_count
, 0, "");
3526 SYSCTL_INT(_vm
, OID_AUTO
, lopage_free_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_lopage_free_count
, 0, "");
3527 #endif /* DEVELOPMENT */
3529 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
3530 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
3531 extern int vm_map_copy_overwrite_aligned_src_large
;
3532 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
3533 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
3534 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
3537 extern uint32_t vm_page_external_count
;
3539 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
3541 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_filecache_min
, 0, "");
3542 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_xpmapped_min
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_xpmapped_min
, 0, "");
3544 #if DEVELOPMENT || DEBUG
3545 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_filecache_min_divisor
, 0, "");
3546 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_xpmapped_min_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_xpmapped_min_divisor
, 0, "");
3549 extern int vm_compressor_mode
;
3550 extern int vm_compressor_is_active
;
3551 extern int vm_compressor_available
;
3552 extern uint32_t vm_ripe_target_age
;
3553 extern uint32_t swapout_target_age
;
3554 extern int64_t compressor_bytes_used
;
3555 extern int64_t c_segment_input_bytes
;
3556 extern int64_t c_segment_compressed_bytes
;
3557 extern uint32_t compressor_eval_period_in_msecs
;
3558 extern uint32_t compressor_sample_min_in_msecs
;
3559 extern uint32_t compressor_sample_max_in_msecs
;
3560 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
3561 extern uint32_t compressor_thrashing_min_per_10msecs
;
3562 extern uint32_t vm_compressor_time_thread
;
3564 #if DEVELOPMENT || DEBUG
3565 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
3566 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
3567 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
3568 extern uint32_t vm_compressor_catchup_threshold_divisor
;
3570 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden
;
3571 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden
;
3572 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden
;
3573 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden
;
3575 extern vmct_stats_t vmct_stats
;
3579 sysctl_minorcompact_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3581 int new_value
, changed
;
3582 int error
= sysctl_io_number(req
, vm_compressor_minorcompact_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3585 vm_compressor_minorcompact_threshold_divisor
= new_value
;
3586 vm_compressor_minorcompact_threshold_divisor_overridden
= 1;
3591 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
,
3592 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3593 0, 0, sysctl_minorcompact_threshold_divisor
, "I", "");
3597 sysctl_majorcompact_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3599 int new_value
, changed
;
3600 int error
= sysctl_io_number(req
, vm_compressor_majorcompact_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3603 vm_compressor_majorcompact_threshold_divisor
= new_value
;
3604 vm_compressor_majorcompact_threshold_divisor_overridden
= 1;
3609 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
,
3610 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3611 0, 0, sysctl_majorcompact_threshold_divisor
, "I", "");
3615 sysctl_unthrottle_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3617 int new_value
, changed
;
3618 int error
= sysctl_io_number(req
, vm_compressor_unthrottle_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3621 vm_compressor_unthrottle_threshold_divisor
= new_value
;
3622 vm_compressor_unthrottle_threshold_divisor_overridden
= 1;
3627 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
,
3628 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3629 0, 0, sysctl_unthrottle_threshold_divisor
, "I", "");
3633 sysctl_catchup_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3635 int new_value
, changed
;
3636 int error
= sysctl_io_number(req
, vm_compressor_catchup_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3639 vm_compressor_catchup_threshold_divisor
= new_value
;
3640 vm_compressor_catchup_threshold_divisor_overridden
= 1;
3645 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
,
3646 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3647 0, 0, sysctl_catchup_threshold_divisor
, "I", "");
3651 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
3652 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
3653 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
3655 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
3656 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
3657 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
3658 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
3660 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
3662 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
3663 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
3664 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
3665 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
3666 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
3668 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
3670 SYSCTL_INT(_vm
, OID_AUTO
, compressor_timing_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_time_thread
, 0, "");
3672 #if DEVELOPMENT || DEBUG
3673 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[0], "");
3674 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[1], "");
3676 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_threads_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_cthreads_total
, "");
3678 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[0], "");
3679 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[1], "");
3681 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[0], "");
3682 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[1], "");
3684 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[0], 0, "");
3685 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[1], 0, "");
3687 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[0], 0, "");
3688 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[1], 0, "");
3692 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressions
, "");
3693 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compression_failures
, "");
3694 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressed_bytes
, "");
3695 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_delta
, "");
3696 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_negative_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_negative_delta
, "");
3698 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressions
, "");
3699 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressed_bytes
, "");
3701 SYSCTL_QUAD(_vm
, OID_AUTO
, uc_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.uc_decompressions
, "");
3703 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions
, "");
3705 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_cabstime
, "");
3707 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_cabstime
, "");
3708 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_compressions
, "");
3710 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_cabstime
, "");
3711 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressions
, "");
3713 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions_exclusive
, "");
3714 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_compressions
, "");
3715 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_mzv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_mzv_compressions
, "");
3716 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compression_failures
, "");
3717 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_exclusive
, "");
3718 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_total
, "");
3720 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressed_bytes
, "");
3721 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compression_failures
, "");
3722 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_sv_compressions
, "");
3725 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressions
, "");
3727 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_dabstime
, "");
3729 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_dabstime
, "");
3730 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_decompressions
, "");
3732 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_dabstime
, "");
3733 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_decompressions
, "");
3735 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressed_bytes
, "");
3736 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_decompressions
, "");
3738 SYSCTL_INT(_vm
, OID_AUTO
, lz4_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_threshold
, 0, "");
3739 SYSCTL_INT(_vm
, OID_AUTO
, wkdm_reeval_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.wkdm_reeval_threshold
, 0, "");
3740 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_skips
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_skips
, 0, "");
3741 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_run_length
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_run_length
, 0, "");
3742 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_preselects
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_preselects
, 0, "");
3743 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_preselection_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_preselection_threshold
, 0, "");
3744 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_continue_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_continue_bytes
, 0, "");
3745 SYSCTL_INT(_vm
, OID_AUTO
, lz4_profitable_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_profitable_bytes
, 0, "");
3746 #if DEVELOPMENT || DEBUG
3747 extern int vm_compressor_current_codec
;
3748 extern int vm_compressor_test_seg_wp
;
3749 extern boolean_t vm_compressor_force_sw_wkdm
;
3750 SYSCTL_INT(_vm
, OID_AUTO
, compressor_codec
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_current_codec
, 0, "");
3751 SYSCTL_INT(_vm
, OID_AUTO
, compressor_test_wp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_test_seg_wp
, 0, "");
3753 SYSCTL_INT(_vm
, OID_AUTO
, wksw_force
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_force_sw_wkdm
, 0, "");
3754 extern int precompy
, wkswhw
;
3756 SYSCTL_INT(_vm
, OID_AUTO
, precompy
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &precompy
, 0, "");
3757 SYSCTL_INT(_vm
, OID_AUTO
, wkswhw
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &wkswhw
, 0, "");
3758 extern unsigned int vm_ktrace_enabled
;
3759 SYSCTL_INT(_vm
, OID_AUTO
, vm_ktrace
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ktrace_enabled
, 0, "");
3762 #if CONFIG_PHANTOM_CACHE
3763 extern uint32_t phantom_cache_thrashing_threshold
;
3764 extern uint32_t phantom_cache_eval_period_in_msecs
;
3765 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
3768 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
3769 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
3770 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
3773 #if CONFIG_BACKGROUND_QUEUE
3775 extern uint32_t vm_page_background_count
;
3776 extern uint32_t vm_page_background_target
;
3777 extern uint32_t vm_page_background_internal_count
;
3778 extern uint32_t vm_page_background_external_count
;
3779 extern uint32_t vm_page_background_mode
;
3780 extern uint32_t vm_page_background_exclude_external
;
3781 extern uint64_t vm_page_background_promoted_count
;
3782 extern uint64_t vm_pageout_rejected_bq_internal
;
3783 extern uint64_t vm_pageout_rejected_bq_external
;
3785 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_mode
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_mode
, 0, "");
3786 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_exclude_external
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_exclude_external
, 0, "");
3787 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_target
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_target
, 0, "");
3788 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_count
, 0, "");
3789 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_internal_count
, 0, "");
3790 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_external_count
, 0, "");
3792 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_page_background_promoted_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_promoted_count
, "");
3793 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_vminfo
.vm_pageout_considered_bq_internal
, "");
3794 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_vminfo
.vm_pageout_considered_bq_external
, "");
3795 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_internal
, "");
3796 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_external
, "");
3798 #endif /* CONFIG_BACKGROUND_QUEUE */
3800 extern void vm_update_darkwake_mode(boolean_t
);
3801 extern boolean_t vm_darkwake_mode
;
3804 sysctl_toggle_darkwake_mode(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3806 int new_value
, changed
;
3807 int error
= sysctl_io_number(req
, vm_darkwake_mode
, sizeof(int), &new_value
, &changed
);
3809 if (!error
&& changed
) {
3810 if (new_value
!= 0 && new_value
!= 1) {
3811 printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n");
3814 vm_update_darkwake_mode((boolean_t
) new_value
);
3821 SYSCTL_PROC(_vm
, OID_AUTO
, darkwake_mode
,
3822 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3823 0, 0, sysctl_toggle_darkwake_mode
, "I", "");
3825 #if (DEVELOPMENT || DEBUG)
3827 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
3828 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3829 &vm_page_creation_throttled_hard
, 0, "");
3831 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
3832 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3833 &vm_page_creation_throttled_soft
, 0, "");
3835 extern uint32_t vm_pageout_memorystatus_fb_factor_nr
;
3836 extern uint32_t vm_pageout_memorystatus_fb_factor_dr
;
3837 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_nr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_nr
, 0, "");
3838 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_dr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_dr
, 0, "");
3840 extern uint32_t vm_grab_anon_nops
;
3842 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_overrides
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_debug
.vm_grab_anon_overrides
, 0, "");
3843 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_nops
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_debug
.vm_grab_anon_nops
, 0, "");
3845 /* log message counters for persistence mode */
3846 extern uint32_t oslog_p_total_msgcount
;
3847 extern uint32_t oslog_p_metadata_saved_msgcount
;
3848 extern uint32_t oslog_p_metadata_dropped_msgcount
;
3849 extern uint32_t oslog_p_error_count
;
3850 extern uint32_t oslog_p_saved_msgcount
;
3851 extern uint32_t oslog_p_dropped_msgcount
;
3852 extern uint32_t oslog_p_boot_dropped_msgcount
;
3854 /* log message counters for streaming mode */
3855 extern uint32_t oslog_s_total_msgcount
;
3856 extern uint32_t oslog_s_metadata_msgcount
;
3857 extern uint32_t oslog_s_error_count
;
3858 extern uint32_t oslog_s_streamed_msgcount
;
3859 extern uint32_t oslog_s_dropped_msgcount
;
3861 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_total_msgcount
, 0, "");
3862 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_saved_msgcount
, 0, "");
3863 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_dropped_msgcount
, 0, "");
3864 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_error_count
, 0, "");
3865 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_saved_msgcount
, 0, "");
3866 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_dropped_msgcount
, 0, "");
3867 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_boot_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_boot_dropped_msgcount
, 0, "");
3869 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_total_msgcount
, 0, "");
3870 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_metadata_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_metadata_msgcount
, 0, "");
3871 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_error_count
, 0, "");
3872 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_streamed_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_streamed_msgcount
, 0, "");
3873 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_dropped_msgcount
, 0, "");
3876 #endif /* DEVELOPMENT || DEBUG */
3879 * Enable tracing of voucher contents
3881 extern uint32_t ipc_voucher_trace_contents
;
3883 SYSCTL_INT(_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
3884 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
3887 * Kernel stack size and depth
3889 SYSCTL_INT(_kern
, OID_AUTO
, stack_size
,
3890 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3891 SYSCTL_INT(_kern
, OID_AUTO
, stack_depth_max
,
3892 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3894 extern unsigned int kern_feature_overrides
;
3895 SYSCTL_INT(_kern
, OID_AUTO
, kern_feature_overrides
,
3896 CTLFLAG_RD
| CTLFLAG_LOCKED
, &kern_feature_overrides
, 0, "Kernel feature override mask");
3899 * enable back trace for port allocations
3901 extern int ipc_portbt
;
3903 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3904 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3905 &ipc_portbt
, 0, "");
3911 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3912 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3913 sched_string
, sizeof(sched_string
),
3914 "Timeshare scheduler implementation");
3916 #if CONFIG_QUIESCE_COUNTER
3918 sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS
3920 #pragma unused(arg1, arg2)
3922 uint32_t local_min_interval_us
= cpu_quiescent_counter_get_min_interval_us();
3924 int error
= sysctl_handle_int(oidp
, &local_min_interval_us
, 0, req
);
3925 if (error
|| !req
->newptr
) {
3929 cpu_quiescent_counter_set_min_interval_us(local_min_interval_us
);
3934 SYSCTL_PROC(_kern
, OID_AUTO
, cpu_checkin_interval
,
3935 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3937 sysctl_cpu_quiescent_counter_interval
, "I",
3938 "Quiescent CPU checkin interval (microseconds)");
3939 #endif /* CONFIG_QUIESCE_COUNTER */
3943 * Only support runtime modification on embedded platforms
3944 * with development config enabled
3948 extern int precise_user_kernel_time
;
3949 SYSCTL_INT(_kern
, OID_AUTO
, precise_user_kernel_time
,
3950 CTLFLAG_RW
| CTLFLAG_LOCKED
,
3951 &precise_user_kernel_time
, 0, "Precise accounting of kernel vs. user time");
3956 /* Parameters related to timer coalescing tuning, to be replaced
3957 * with a dedicated systemcall in the future.
3959 /* Enable processing pending timers in the context of any other interrupt
3960 * Coalescing tuning parameters for various thread/task attributes */
3962 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3964 #pragma unused(oidp)
3965 int size
= arg2
; /* subcommand*/
3968 uint64_t old_value_ns
;
3969 uint64_t new_value_ns
;
3970 uint64_t value_abstime
;
3971 if (size
== sizeof(uint32_t)) {
3972 value_abstime
= *((uint32_t *)arg1
);
3973 } else if (size
== sizeof(uint64_t)) {
3974 value_abstime
= *((uint64_t *)arg1
);
3979 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
3980 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
3981 if ((error
) || (!changed
)) {
3985 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
3986 if (size
== sizeof(uint32_t)) {
3987 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
3989 *((uint64_t *)arg1
) = value_abstime
;
3994 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
3995 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3996 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
3997 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
3998 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3999 &tcoal_prio_params
.timer_resort_threshold_abstime
,
4000 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
4001 sysctl_timer_user_us_kernel_abstime
,
4003 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
4004 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4005 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
4006 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
4007 sysctl_timer_user_us_kernel_abstime
,
4010 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
4011 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4012 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
4014 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
4015 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4016 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
4017 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
4018 sysctl_timer_user_us_kernel_abstime
,
4021 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
4022 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4023 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
4025 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
4026 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4027 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
4028 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
4029 sysctl_timer_user_us_kernel_abstime
,
4032 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
4033 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4034 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
4036 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
4037 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4038 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
4039 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
4040 sysctl_timer_user_us_kernel_abstime
,
4043 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
4044 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4045 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
4047 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
4048 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4049 &tcoal_prio_params
.latency_qos_abstime_max
[0],
4050 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
4051 sysctl_timer_user_us_kernel_abstime
,
4054 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
4055 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4056 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
4058 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
4059 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4060 &tcoal_prio_params
.latency_qos_abstime_max
[1],
4061 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
4062 sysctl_timer_user_us_kernel_abstime
,
4065 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
4066 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4067 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
4069 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
4070 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4071 &tcoal_prio_params
.latency_qos_abstime_max
[2],
4072 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
4073 sysctl_timer_user_us_kernel_abstime
,
4076 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
4077 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4078 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
4080 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
4081 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4082 &tcoal_prio_params
.latency_qos_abstime_max
[3],
4083 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
4084 sysctl_timer_user_us_kernel_abstime
,
4087 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
4088 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4089 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
4091 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
4092 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4093 &tcoal_prio_params
.latency_qos_abstime_max
[4],
4094 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
4095 sysctl_timer_user_us_kernel_abstime
,
4098 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
4099 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4100 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
4102 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
4103 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4104 &tcoal_prio_params
.latency_qos_abstime_max
[5],
4105 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
4106 sysctl_timer_user_us_kernel_abstime
,
4109 /* Communicate the "user idle level" heuristic to the timer layer, and
4110 * potentially other layers in the future.
4114 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4116 int new_value
= 0, old_value
= 0, changed
= 0, error
;
4118 old_value
= timer_get_user_idle_level();
4120 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
4122 if (error
== 0 && changed
) {
4123 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
) {
4131 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
4132 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4134 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
4137 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
4138 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
4139 &hv_support_available
, 0, "");
4144 sysctl_darkboot SYSCTL_HANDLER_ARGS
4146 int err
= 0, value
= 0;
4147 #pragma unused(oidp, arg1, arg2, err, value, req)
4150 * Handle the sysctl request.
4152 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
4153 * we'll get the request identifier into "value" and then we can honor it.
4155 if ((err
= sysctl_io_number(req
, darkboot
, sizeof(int), &value
, NULL
)) != 0) {
4159 /* writing requested, let's process the request */
4161 /* writing is protected by an entitlement */
4162 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT
, 0) != 0) {
4168 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET
:
4170 * If the darkboot sysctl is unset, the NVRAM variable
4171 * must be unset too. If that's not the case, it means
4172 * someone is doing something crazy and not supported.
4174 if (darkboot
!= 0) {
4175 int ret
= PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
);
4183 case MEMORY_MAINTENANCE_DARK_BOOT_SET
:
4186 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT
: {
4188 * Set the NVRAM and update 'darkboot' in case
4189 * of success. Otherwise, do not update
4190 * 'darkboot' and report the failure.
4192 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
, TRUE
)) {
4209 SYSCTL_PROC(_kern
, OID_AUTO
, darkboot
,
4210 CTLFLAG_KERN
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
,
4211 0, 0, sysctl_darkboot
, "I", "");
4214 #if DEVELOPMENT || DEBUG
4215 #include <sys/sysent.h>
4216 /* This should result in a fatal exception, verifying that "sysent" is
4220 kern_sysent_write(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4222 uint64_t new_value
= 0, old_value
= 0;
4223 int changed
= 0, error
;
4225 error
= sysctl_io_number(req
, old_value
, sizeof(uint64_t), &new_value
, &changed
);
4226 if ((error
== 0) && changed
) {
4227 volatile uint32_t *wraddr
= (uint32_t *) &sysent
[0];
4229 printf("sysent[0] write succeeded\n");
4234 SYSCTL_PROC(_kern
, OID_AUTO
, sysent_const_check
,
4235 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4237 kern_sysent_write
, "I", "Attempt sysent[0] write");
4241 #if DEVELOPMENT || DEBUG
4242 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 1, "");
4244 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 0, "");
4248 #if DEVELOPMENT || DEBUG
4251 sysctl_panic_test SYSCTL_HANDLER_ARGS
4253 #pragma unused(arg1, arg2)
4255 char str
[32] = "entry prelog postlog postcore";
4257 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4259 if (rval
== 0 && req
->newptr
) {
4260 if (strncmp("entry", str
, strlen("entry")) == 0) {
4261 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_ENTRY
, "test recursive panic at entry");
4262 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
4263 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_PRELOG
, "test recursive panic prior to writing a paniclog");
4264 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
4265 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTLOG
, "test recursive panic subsequent to paniclog");
4266 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
4267 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTCORE
, "test recursive panic subsequent to on-device core");
4275 sysctl_debugger_test SYSCTL_HANDLER_ARGS
4277 #pragma unused(arg1, arg2)
4279 char str
[32] = "entry prelog postlog postcore";
4281 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4283 if (rval
== 0 && req
->newptr
) {
4284 if (strncmp("entry", str
, strlen("entry")) == 0) {
4285 DebuggerWithContext(0, NULL
, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY
);
4286 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
4287 DebuggerWithContext(0, NULL
, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG
);
4288 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
4289 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG
);
4290 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
4291 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE
);
4298 decl_lck_spin_data(, spinlock_panic_test_lock
);
4300 __attribute__((noreturn
))
4302 spinlock_panic_test_acquire_spinlock(void * arg __unused
, wait_result_t wres __unused
)
4304 lck_spin_lock(&spinlock_panic_test_lock
);
4311 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
4313 #pragma unused(oidp, arg1, arg2)
4314 if (req
->newlen
== 0) {
4318 thread_t panic_spinlock_thread
;
4319 /* Initialize panic spinlock */
4320 lck_grp_t
* panic_spinlock_grp
;
4321 lck_grp_attr_t
* panic_spinlock_grp_attr
;
4322 lck_attr_t
* panic_spinlock_attr
;
4324 panic_spinlock_grp_attr
= lck_grp_attr_alloc_init();
4325 panic_spinlock_grp
= lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr
);
4326 panic_spinlock_attr
= lck_attr_alloc_init();
4328 lck_spin_init(&spinlock_panic_test_lock
, panic_spinlock_grp
, panic_spinlock_attr
);
4331 /* Create thread to acquire spinlock */
4332 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock
, NULL
, &panic_spinlock_thread
) != KERN_SUCCESS
) {
4336 /* Try to acquire spinlock -- should panic eventually */
4337 lck_spin_lock(&spinlock_panic_test_lock
);
4343 __attribute__((noreturn
))
4345 simultaneous_panic_worker
4346 (void * arg
, wait_result_t wres __unused
)
4348 atomic_int
*start_panic
= (atomic_int
*)arg
;
4350 while (!atomic_load(start_panic
)) {
4353 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
4354 __builtin_unreachable();
4358 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
4360 #pragma unused(oidp, arg1, arg2)
4361 if (req
->newlen
== 0) {
4365 int i
= 0, threads_to_create
= 2 * processor_count
;
4366 atomic_int start_panic
= 0;
4367 unsigned int threads_created
= 0;
4368 thread_t new_panic_thread
;
4370 for (i
= threads_to_create
; i
> 0; i
--) {
4371 if (kernel_thread_start(simultaneous_panic_worker
, (void *) &start_panic
, &new_panic_thread
) == KERN_SUCCESS
) {
4376 /* FAIL if we couldn't create at least processor_count threads */
4377 if (threads_created
< processor_count
) {
4378 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
4379 threads_created
, threads_to_create
);
4382 atomic_exchange(&start_panic
, 1);
4388 SYSCTL_PROC(_debug
, OID_AUTO
, panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_panic_test
, "A", "panic test");
4389 SYSCTL_PROC(_debug
, OID_AUTO
, debugger_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_debugger_test
, "A", "debugger test");
4390 SYSCTL_PROC(_debug
, OID_AUTO
, spinlock_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_spinlock_panic_test
, "A", "spinlock panic test");
4391 SYSCTL_PROC(_debug
, OID_AUTO
, simultaneous_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_simultaneous_panic_test
, "A", "simultaneous panic test");
4393 extern int exc_resource_threads_enabled
;
4395 SYSCTL_INT(_kern
, OID_AUTO
, exc_resource_threads_enabled
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &exc_resource_threads_enabled
, 0, "exc_resource thread limit enabled");
4398 #endif /* DEVELOPMENT || DEBUG */
4400 const uint32_t thread_groups_supported
= 0;
4403 sysctl_thread_groups_supported(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4405 int value
= thread_groups_supported
;
4406 return sysctl_io_number(req
, value
, sizeof(value
), NULL
, NULL
);
4409 SYSCTL_PROC(_kern
, OID_AUTO
, thread_groups_supported
, CTLFLAG_RD
| CTLFLAG_LOCKED
| CTLFLAG_KERN
,
4410 0, 0, &sysctl_thread_groups_supported
, "I", "thread groups supported");
4413 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
4415 #pragma unused(arg1, arg2, oidp)
4417 int type_tuple
[2] = {};
4418 int return_value
= 0;
4420 error
= SYSCTL_IN(req
, &type_tuple
, sizeof(type_tuple
));
4426 return_value
= grade_binary(type_tuple
[0], type_tuple
[1], FALSE
);
4428 error
= SYSCTL_OUT(req
, &return_value
, sizeof(return_value
));
4437 SYSCTL_PROC(_kern
, OID_AUTO
, grade_cputype
,
4438 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
| CTLTYPE_OPAQUE
,
4439 0, 0, &sysctl_grade_cputype
, "S",
4440 "grade value of cpu_type_t+cpu_sub_type_t");
4443 #if DEVELOPMENT || DEBUG
4445 extern void do_cseg_wedge_thread(void);
4446 extern void do_cseg_unwedge_thread(void);
4449 cseg_wedge_thread SYSCTL_HANDLER_ARGS
4451 #pragma unused(arg1, arg2)
4454 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4455 if (error
|| val
== 0) {
4459 do_cseg_wedge_thread();
4462 SYSCTL_PROC(_kern
, OID_AUTO
, cseg_wedge_thread
, CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
, 0, 0, cseg_wedge_thread
, "I", "wedge c_seg thread");
4465 cseg_unwedge_thread SYSCTL_HANDLER_ARGS
4467 #pragma unused(arg1, arg2)
4470 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4471 if (error
|| val
== 0) {
4475 do_cseg_unwedge_thread();
4478 SYSCTL_PROC(_kern
, OID_AUTO
, cseg_unwedge_thread
, CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
, 0, 0, cseg_unwedge_thread
, "I", "unstuck c_seg thread");
4480 static atomic_int wedge_thread_should_wake
= 0;
4483 unwedge_thread SYSCTL_HANDLER_ARGS
4485 #pragma unused(arg1, arg2)
4487 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4488 if (error
|| val
== 0) {
4492 atomic_store(&wedge_thread_should_wake
, 1);
4496 SYSCTL_PROC(_kern
, OID_AUTO
, unwedge_thread
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, unwedge_thread
, "I", "unwedge the thread wedged by kern.wedge_thread");
4498 SYSCTL_LONG(_kern
, OID_AUTO
, phys_carveout_pa
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
4500 "base physical address of the phys_carveout_mb boot-arg region");
4501 SYSCTL_LONG(_kern
, OID_AUTO
, phys_carveout_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
4502 &phys_carveout_size
,
4503 "size in bytes of the phys_carveout_mb boot-arg region");
4506 wedge_thread SYSCTL_HANDLER_ARGS
4508 #pragma unused(arg1, arg2)
4511 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4512 if (error
|| val
== 0) {
4516 uint64_t interval
= 1;
4517 nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval
);
4519 atomic_store(&wedge_thread_should_wake
, 0);
4520 while (!atomic_load(&wedge_thread_should_wake
)) {
4521 tsleep1(NULL
, 0, "wedge_thread", mach_absolute_time() + interval
, NULL
);
4527 SYSCTL_PROC(_kern
, OID_AUTO
, wedge_thread
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, wedge_thread
, "I", "wedge this thread so it cannot be cleaned up");
4529 extern unsigned long
4530 total_corpses_count(void);
4533 sysctl_total_corpses_count SYSCTL_HANDLER_ARGS
;
4536 sysctl_total_corpses_count SYSCTL_HANDLER_ARGS
4538 #pragma unused(oidp, arg1, arg2)
4539 int corpse_count
= total_corpses_count();
4540 return sysctl_io_opaque(req
, &corpse_count
, sizeof(int), NULL
);
4543 SYSCTL_PROC(_kern
, OID_AUTO
, total_corpses_count
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, sysctl_total_corpses_count
, "I", "total corpses on the system");
4546 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
;
4548 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
;
4550 tstile_test_prim_lock(boolean_t use_hashtable
);
4552 tstile_test_prim_unlock(boolean_t use_hashtable
);
4555 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
4557 #pragma unused(arg1, arg2)
4559 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4560 if (error
|| val
== 0) {
4564 case SYSCTL_TURNSTILE_TEST_USER_DEFAULT
:
4565 case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE
:
4566 case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT
:
4567 case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE
:
4568 return tstile_test_prim_lock(val
);
4575 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
4577 #pragma unused(arg1, arg2)
4579 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4580 if (error
|| val
== 0) {
4584 case SYSCTL_TURNSTILE_TEST_USER_DEFAULT
:
4585 case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE
:
4586 case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT
:
4587 case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE
:
4588 return tstile_test_prim_unlock(val
);
4594 SYSCTL_PROC(_kern
, OID_AUTO
, turnstiles_test_lock
, CTLFLAG_WR
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4595 0, 0, sysctl_turnstile_test_prim_lock
, "I", "turnstiles test lock");
4597 SYSCTL_PROC(_kern
, OID_AUTO
, turnstiles_test_unlock
, CTLFLAG_WR
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4598 0, 0, sysctl_turnstile_test_prim_unlock
, "I", "turnstiles test unlock");
4601 turnstile_get_boost_stats_sysctl(void *req
);
4603 turnstile_get_unboost_stats_sysctl(void *req
);
4605 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
;
4607 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
;
4608 extern uint64_t thread_block_on_turnstile_count
;
4609 extern uint64_t thread_block_on_regular_waitq_count
;
4612 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
4614 #pragma unused(arg1, arg2, oidp)
4615 return turnstile_get_boost_stats_sysctl(req
);
4619 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
4621 #pragma unused(arg1, arg2, oidp)
4622 return turnstile_get_unboost_stats_sysctl(req
);
4625 SYSCTL_PROC(_kern
, OID_AUTO
, turnstile_boost_stats
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLTYPE_STRUCT
,
4626 0, 0, sysctl_turnstile_boost_stats
, "S", "turnstiles boost stats");
4627 SYSCTL_PROC(_kern
, OID_AUTO
, turnstile_unboost_stats
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLTYPE_STRUCT
,
4628 0, 0, sysctl_turnstile_unboost_stats
, "S", "turnstiles unboost stats");
4629 SYSCTL_QUAD(_kern
, OID_AUTO
, thread_block_count_on_turnstile
,
4630 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4631 &thread_block_on_turnstile_count
, "thread blocked on turnstile count");
4632 SYSCTL_QUAD(_kern
, OID_AUTO
, thread_block_count_on_reg_waitq
,
4633 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4634 &thread_block_on_regular_waitq_count
, "thread blocked on regular waitq count");
4637 sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
4639 #pragma unused(arg1, arg2)
4641 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4642 if (error
|| val
== 0) {
4647 lck_mtx_test_init();
4648 erase_all_test_mtx_stats();
4655 sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
4657 #pragma unused(oidp, arg1, arg2)
4659 int size
, buffer_size
, error
;
4662 buffer
= kalloc(buffer_size
);
4664 panic("Impossible to allocate memory for %s\n", __func__
);
4667 lck_mtx_test_init();
4669 size
= get_test_mtx_stats_string(buffer
, buffer_size
);
4671 error
= sysctl_io_string(req
, buffer
, size
, 0, NULL
);
4673 kfree(buffer
, buffer_size
);
4679 sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
4681 #pragma unused(oidp, arg1, arg2)
4683 int buffer_size
, offset
, error
, iter
;
4694 if (req
->newlen
>= sizeof(input_val
)) {
4698 error
= SYSCTL_IN(req
, input_val
, req
->newlen
);
4702 input_val
[req
->newlen
] = '\0';
4705 error
= sscanf(input_val
, "%d", &iter
);
4707 printf("%s invalid input\n", __func__
);
4712 printf("%s requested %d iterations, not starting the test\n", __func__
, iter
);
4716 lck_mtx_test_init();
4720 buffer
= kalloc(buffer_size
);
4722 panic("Impossible to allocate memory for %s\n", __func__
);
4724 memset(buffer
, 0, buffer_size
);
4726 printf("%s starting uncontended mutex test with %d iterations\n", __func__
, iter
);
4728 offset
= scnprintf(buffer
, buffer_size
, "STATS INNER LOOP");
4729 offset
+= lck_mtx_test_mtx_uncontended(iter
, &buffer
[offset
], buffer_size
- offset
);
4731 offset
+= scnprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4732 offset
+= lck_mtx_test_mtx_uncontended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
);
4734 error
= SYSCTL_OUT(req
, buffer
, offset
);
4736 kfree(buffer
, buffer_size
);
4741 sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS
4743 #pragma unused(oidp, arg1, arg2)
4745 int buffer_size
, offset
, error
, iter
;
4756 if (req
->newlen
>= sizeof(input_val
)) {
4760 error
= SYSCTL_IN(req
, input_val
, req
->newlen
);
4764 input_val
[req
->newlen
] = '\0';
4767 error
= sscanf(input_val
, "%d", &iter
);
4769 printf("%s invalid input\n", __func__
);
4774 printf("%s requested %d iterations, not starting the test\n", __func__
, iter
);
4778 lck_mtx_test_init();
4780 erase_all_test_mtx_stats();
4784 buffer
= kalloc(buffer_size
);
4786 panic("Impossible to allocate memory for %s\n", __func__
);
4788 memset(buffer
, 0, buffer_size
);
4790 printf("%s starting contended mutex test with %d iterations FULL_CONTENDED\n", __func__
, iter
);
4792 offset
= scnprintf(buffer
, buffer_size
, "STATS INNER LOOP");
4793 offset
+= lck_mtx_test_mtx_contended(iter
, &buffer
[offset
], buffer_size
- offset
, FULL_CONTENDED
);
4795 printf("%s starting contended mutex loop test with %d iterations FULL_CONTENDED\n", __func__
, iter
);
4797 offset
+= scnprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4798 offset
+= lck_mtx_test_mtx_contended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
, FULL_CONTENDED
);
4800 printf("%s starting contended mutex test with %d iterations HALF_CONTENDED\n", __func__
, iter
);
4802 offset
+= scnprintf(&buffer
[offset
], buffer_size
- offset
, "STATS INNER LOOP");
4803 offset
+= lck_mtx_test_mtx_contended(iter
, &buffer
[offset
], buffer_size
- offset
, HALF_CONTENDED
);
4805 printf("%s starting contended mutex loop test with %d iterations HALF_CONTENDED\n", __func__
, iter
);
4807 offset
+= scnprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4808 offset
+= lck_mtx_test_mtx_contended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
, HALF_CONTENDED
);
4810 error
= SYSCTL_OUT(req
, buffer
, offset
);
4812 printf("\n%s\n", buffer
);
4813 kfree(buffer
, buffer_size
);
4818 SYSCTL_PROC(_kern
, OID_AUTO
, erase_all_test_mtx_stats
, CTLFLAG_WR
| CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4819 0, 0, sysctl_erase_all_test_mtx_stats
, "I", "erase test_mtx statistics");
4821 SYSCTL_PROC(_kern
, OID_AUTO
, get_test_mtx_stats
, CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4822 0, 0, sysctl_get_test_mtx_stats
, "A", "get test_mtx statistics");
4824 SYSCTL_PROC(_kern
, OID_AUTO
, test_mtx_contended
, CTLTYPE_STRING
| CTLFLAG_MASKED
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4825 0, 0, sysctl_test_mtx_contended
, "A", "get statistics for contended mtx test");
4827 SYSCTL_PROC(_kern
, OID_AUTO
, test_mtx_uncontended
, CTLTYPE_STRING
| CTLFLAG_MASKED
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4828 0, 0, sysctl_test_mtx_uncontended
, "A", "get statistics for uncontended mtx test");
4830 extern uint64_t MutexSpin
;
4832 SYSCTL_QUAD(_kern
, OID_AUTO
, mutex_spin_abs
, CTLFLAG_RW
, &MutexSpin
,
4833 "Spin time in abs for acquiring a kernel mutex");
4835 extern uint64_t low_MutexSpin
;
4836 extern int64_t high_MutexSpin
;
4837 extern unsigned int real_ncpus
;
4839 SYSCTL_QUAD(_kern
, OID_AUTO
, low_mutex_spin_abs
, CTLFLAG_RW
, &low_MutexSpin
,
4840 "Low spin threshold in abs for acquiring a kernel mutex");
4843 sysctl_high_mutex_spin_ns SYSCTL_HANDLER_ARGS
4845 #pragma unused(oidp, arg1, arg2)
4850 /* Check if the user is writing to high_MutexSpin, or just reading it */
4852 error
= SYSCTL_IN(req
, &val
, sizeof(val
));
4853 if (error
|| (val
< 0 && val
!= -1)) {
4856 high_MutexSpin
= val
;
4859 if (high_MutexSpin
>= 0) {
4860 res
= high_MutexSpin
;
4862 res
= low_MutexSpin
* real_ncpus
;
4864 return SYSCTL_OUT(req
, &res
, sizeof(res
));
4866 SYSCTL_PROC(_kern
, OID_AUTO
, high_mutex_spin_abs
, CTLFLAG_RW
| CTLTYPE_QUAD
, 0, 0, sysctl_high_mutex_spin_ns
, "I",
4867 "High spin threshold in abs for acquiring a kernel mutex");
4869 #if defined (__x86_64__)
4871 semaphore_t sysctl_test_panic_with_thread_sem
;
4873 #pragma clang diagnostic push
4874 #pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */
4875 __attribute__((noreturn
))
4877 panic_thread_test_child_spin(void * arg
, wait_result_t wres
)
4879 static int panic_thread_recurse_count
= 5;
4881 if (panic_thread_recurse_count
> 0) {
4882 panic_thread_recurse_count
--;
4883 panic_thread_test_child_spin(arg
, wres
);
4886 semaphore_signal(sysctl_test_panic_with_thread_sem
);
4891 #pragma clang diagnostic pop
4894 panic_thread_test_child_park(void * arg __unused
, wait_result_t wres __unused
)
4898 assert_wait(&event
, THREAD_UNINT
);
4899 semaphore_signal(sysctl_test_panic_with_thread_sem
);
4900 thread_block(panic_thread_test_child_park
);
4904 sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS
4906 #pragma unused(arg1, arg2)
4908 char str
[16] = { '\0' };
4909 thread_t child_thread
= THREAD_NULL
;
4911 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4912 if (rval
!= 0 || !req
->newptr
) {
4916 semaphore_create(kernel_task
, &sysctl_test_panic_with_thread_sem
, SYNC_POLICY_FIFO
, 0);
4918 /* Create thread to spin or park in continuation */
4919 if (strncmp("spin", str
, strlen("spin")) == 0) {
4920 if (kernel_thread_start(panic_thread_test_child_spin
, NULL
, &child_thread
) != KERN_SUCCESS
) {
4921 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4924 } else if (strncmp("continuation", str
, strlen("continuation")) == 0) {
4925 if (kernel_thread_start(panic_thread_test_child_park
, NULL
, &child_thread
) != KERN_SUCCESS
) {
4926 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4930 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4934 semaphore_wait(sysctl_test_panic_with_thread_sem
);
4936 panic_with_thread_context(0, NULL
, 0, child_thread
, "testing panic_with_thread_context for thread %p", child_thread
);
4942 SYSCTL_PROC(_kern
, OID_AUTO
, test_panic_with_thread
, CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_WR
| CTLTYPE_STRING
,
4943 0, 0, sysctl_test_panic_with_thread
, "A", "test panic flow for backtracing a different thread");
4944 #endif /* defined (__x86_64__) */
4946 #endif /* DEVELOPMENT || DEBUG */
4949 sysctl_get_owned_vmobjects SYSCTL_HANDLER_ARGS
4951 #pragma unused(oidp, arg1, arg2)
4954 if (req
->newlen
!= sizeof(mach_port_name_t
) || req
->newptr
== USER_ADDR_NULL
||
4955 req
->oldidx
!= 0 || req
->newidx
!= 0 || req
->p
== NULL
) {
4960 mach_port_name_t task_port_name
;
4962 int buffer_size
= (req
->oldptr
!= USER_ADDR_NULL
) ? req
->oldlen
: 0;
4963 vmobject_list_output_t buffer
;
4967 const int min_size
= sizeof(vm_object_query_data_t
) + sizeof(int64_t);
4969 if (buffer_size
< min_size
) {
4970 buffer_size
= min_size
;
4973 buffer
= kalloc(buffer_size
);
4977 goto sysctl_get_vmobject_list_exit
;
4983 /* we have a "newptr" (for write) we get a task port name from the caller. */
4984 error
= SYSCTL_IN(req
, &task_port_name
, sizeof(mach_port_name_t
));
4987 goto sysctl_get_vmobject_list_exit
;
4990 task
= port_name_to_task(task_port_name
);
4991 if (task
== TASK_NULL
) {
4993 goto sysctl_get_vmobject_list_exit
;
4996 /* copy the vmobjects and vmobject data out of the task */
4997 if (buffer_size
== 0) {
4999 task_copy_vmobjects(task
, NULL
, 0, &__size
);
5000 output_size
= (__size
> 0) ? __size
* sizeof(vm_object_query_data_t
) + sizeof(int64_t) : 0;
5002 task_copy_vmobjects(task
, &buffer
->data
[0], buffer_size
- sizeof(int64_t), &buffer
->entries
);
5003 output_size
= buffer
->entries
* sizeof(vm_object_query_data_t
) + sizeof(int64_t);
5006 task_deallocate(task
);
5008 error
= SYSCTL_OUT(req
, (char*) buffer
, output_size
);
5010 sysctl_get_vmobject_list_exit
:
5012 kfree(buffer
, buffer_size
);
5018 SYSCTL_PROC(_vm
, OID_AUTO
, get_owned_vmobjects
, CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_WR
| CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
,
5019 0, 0, sysctl_get_owned_vmobjects
, "A", "get owned vmobjects in task");