2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
112 #include <machine/smp.h>
113 #include <machine/atomic.h>
114 #include <mach/machine.h>
115 #include <mach/mach_host.h>
116 #include <mach/mach_types.h>
117 #include <mach/processor_info.h>
118 #include <mach/vm_param.h>
119 #include <kern/debug.h>
120 #include <kern/mach_param.h>
121 #include <kern/task.h>
122 #include <kern/thread.h>
123 #include <kern/thread_group.h>
124 #include <kern/processor.h>
125 #include <kern/cpu_number.h>
126 #include <kern/cpu_quiesce.h>
127 #include <kern/sched_prim.h>
128 #include <vm/vm_kern.h>
129 #include <vm/vm_map.h>
130 #include <mach/host_info.h>
132 #include <sys/mount_internal.h>
133 #include <sys/kdebug.h>
135 #include <IOKit/IOPlatformExpert.h>
136 #include <pexpert/pexpert.h>
138 #include <machine/machine_routines.h>
139 #include <machine/exec.h>
141 #include <vm/vm_protos.h>
142 #include <vm/vm_pageout.h>
143 #include <vm/vm_compressor_algorithms.h>
144 #include <sys/imgsrc.h>
145 #include <kern/timer_call.h>
147 #if defined(__i386__) || defined(__x86_64__)
148 #include <i386/cpuid.h>
152 #include <sys/kern_memorystatus.h>
156 #include <kperf/kperf.h>
160 #include <kern/hv_support.h>
164 * deliberately setting max requests to really high number
165 * so that runaway settings do not cause MALLOC overflows
167 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
169 extern int aio_max_requests
;
170 extern int aio_max_requests_per_process
;
171 extern int aio_worker_threads
;
172 extern int lowpri_IO_window_msecs
;
173 extern int lowpri_IO_delay_msecs
;
174 #if DEVELOPMENT || DEBUG
175 extern int nx_enabled
;
177 extern int speculative_reads_disabled
;
178 extern unsigned int speculative_prefetch_max
;
179 extern unsigned int speculative_prefetch_max_iosize
;
180 extern unsigned int preheat_max_bytes
;
181 extern unsigned int preheat_min_bytes
;
182 extern long numvnodes
;
183 extern long num_recycledvnodes
;
185 extern uuid_string_t bootsessionuuid_string
;
187 extern unsigned int vm_max_delayed_work_limit
;
188 extern unsigned int vm_max_batch
;
190 extern unsigned int vm_page_free_min
;
191 extern unsigned int vm_page_free_target
;
192 extern unsigned int vm_page_free_reserved
;
194 #if (DEVELOPMENT || DEBUG)
195 extern uint32_t vm_page_creation_throttled_hard
;
196 extern uint32_t vm_page_creation_throttled_soft
;
197 #endif /* DEVELOPMENT || DEBUG */
200 * Conditionally allow dtrace to see these functions for debugging purposes.
208 #define STATIC static
211 extern boolean_t mach_timer_coalescing_enabled
;
213 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
216 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
218 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
220 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
222 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
224 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
226 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
229 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
235 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
238 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
239 size_t *sizep
, proc_t cur_proc
);
241 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
242 proc_t cur_proc
, int argc_yes
);
244 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
245 size_t newlen
, void *sp
, int len
);
247 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
248 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
249 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
250 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
251 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
252 int sysdoproc_callback(proc_t p
, void *arg
);
255 /* forward declarations for non-static STATIC */
256 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
257 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
258 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
259 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
260 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
261 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
263 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
264 #endif /* COUNT_SYSCALLS */
266 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
267 #endif /* !CONFIG_EMBEDDED */
268 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
269 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
270 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 #ifdef CONFIG_IMGSRC_ACCESS
287 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
293 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 #if DEVELOPMENT || DEBUG
299 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
301 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
302 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
303 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
304 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
305 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
306 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
307 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
308 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
309 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
310 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
312 #ifdef CONFIG_XNUPOST
313 #include <tests/xnupost.h>
315 STATIC
int sysctl_debug_test_oslog_ctl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
316 STATIC
int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
317 STATIC
int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
320 extern void IORegistrySetOSBuildVersion(char * build_version
);
323 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
325 la64
->ldavg
[0] = la
->ldavg
[0];
326 la64
->ldavg
[1] = la
->ldavg
[1];
327 la64
->ldavg
[2] = la
->ldavg
[2];
328 la64
->fscale
= (user64_long_t
)la
->fscale
;
332 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
334 la32
->ldavg
[0] = la
->ldavg
[0];
335 la32
->ldavg
[1] = la
->ldavg
[1];
336 la32
->ldavg
[2] = la
->ldavg
[2];
337 la32
->fscale
= (user32_long_t
)la
->fscale
;
342 * Attributes stored in the kernel.
344 extern char corefilename
[MAXPATHLEN
+ 1];
345 extern int do_coredump
;
346 extern int sugid_coredump
;
350 extern int do_count_syscalls
;
354 int securelevel
= -1;
360 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
361 __unused
int arg2
, struct sysctl_req
*req
)
364 struct uthread
*ut
= get_bsdthread_info(current_thread());
365 user_addr_t oldp
= 0, newp
= 0;
366 size_t *oldlenp
= NULL
;
370 oldlenp
= &(req
->oldlen
);
372 newlen
= req
->newlen
;
374 /* We want the current length, and maybe the string itself */
376 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
377 size_t currlen
= MAXTHREADNAMESIZE
- 1;
380 /* use length of current thread name */
381 currlen
= strlen(ut
->pth_name
);
384 if (*oldlenp
< currlen
) {
387 /* NOTE - we do not copy the NULL terminator */
389 error
= copyout(ut
->pth_name
, oldp
, currlen
);
395 /* return length of thread name minus NULL terminator (just like strlen) */
396 req
->oldidx
= currlen
;
399 /* We want to set the name to something */
401 if (newlen
> (MAXTHREADNAMESIZE
- 1)) {
405 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
410 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV
, ut
->pth_name
);
412 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
413 error
= copyin(newp
, ut
->pth_name
, newlen
);
418 kernel_debug_string_simple(TRACE_STRING_THREADNAME
, ut
->pth_name
);
424 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
, "A", "");
428 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
430 host_basic_info_data_t hinfo
;
434 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
435 struct _processor_statistics_np
*buf
;
438 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
439 if (kret
!= KERN_SUCCESS
) {
443 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
445 if (req
->oldlen
< size
) {
449 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
451 kret
= get_sched_statistics(buf
, &size
);
452 if (kret
!= KERN_SUCCESS
) {
457 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
463 panic("Sched info changed?!");
470 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
473 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
478 if (req
->newlen
!= sizeof(active
)) {
482 res
= copyin(req
->newptr
, &active
, sizeof(active
));
487 return set_sched_stats_active(active
);
490 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
492 extern uint32_t sched_debug_flags
;
493 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
495 #if (DEBUG || DEVELOPMENT)
496 extern boolean_t doprnt_hide_pointers
;
497 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
500 extern int get_kernel_symfile(proc_t
, char **);
503 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
505 extern unsigned int nsysent
;
506 extern int syscalls_log
[];
507 extern const char *syscallnames
[];
510 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
512 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
513 __unused
int *name
= arg1
; /* oid element argument vector */
514 __unused
int namelen
= arg2
; /* number of oid element arguments */
515 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
516 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
517 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
518 size_t newlen
= req
->newlen
; /* user buffer copy in size */
523 /* valid values passed in:
524 * = 0 means don't keep called counts for each bsd syscall
525 * > 0 means keep called counts for each bsd syscall
526 * = 2 means dump current counts to the system log
527 * = 3 means reset all counts
528 * for example, to dump current counts:
529 * sysctl -w kern.count_calls=2
531 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
537 do_count_syscalls
= 1;
538 } else if (tmp
== 0 || tmp
== 2 || tmp
== 3) {
540 for (i
= 0; i
< nsysent
; i
++) {
541 if (syscalls_log
[i
] != 0) {
543 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
550 do_count_syscalls
= 1;
554 /* adjust index so we return the right required/consumed amount */
556 req
->oldidx
+= req
->oldlen
;
561 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
562 0, /* Pointer argument (arg1) */
563 0, /* Integer argument (arg2) */
564 sysctl_docountsyscalls
, /* Handler function */
565 NULL
, /* Data pointer */
567 #endif /* COUNT_SYSCALLS */
570 * The following sysctl_* functions should not be used
571 * any more, as they can only cope with callers in
572 * user mode: Use new-style
580 * Validate parameters and get old / set new parameters
581 * for an integer-valued sysctl function.
584 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
585 user_addr_t newp
, size_t newlen
, int *valp
)
589 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
) {
592 if (oldp
&& *oldlenp
< sizeof(int)) {
595 if (newp
&& newlen
!= sizeof(int)) {
598 *oldlenp
= sizeof(int);
600 error
= copyout(valp
, oldp
, sizeof(int));
602 if (error
== 0 && newp
) {
603 error
= copyin(newp
, valp
, sizeof(int));
604 AUDIT_ARG(value32
, *valp
);
610 * Validate parameters and get old / set new parameters
611 * for an quad(64bit)-valued sysctl function.
614 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
615 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
619 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
) {
622 if (oldp
&& *oldlenp
< sizeof(quad_t
)) {
625 if (newp
&& newlen
!= sizeof(quad_t
)) {
628 *oldlenp
= sizeof(quad_t
);
630 error
= copyout(valp
, oldp
, sizeof(quad_t
));
632 if (error
== 0 && newp
) {
633 error
= copyin(newp
, valp
, sizeof(quad_t
));
639 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
641 if (p
->p_pid
!= (pid_t
)*(int*)arg
) {
649 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
651 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
) {
659 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
664 /* This is very racy but list lock is held.. Hmmm. */
665 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
666 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
667 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
668 tp
->t_dev
!= (dev_t
)*(int*)arg
) {
678 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
680 kauth_cred_t my_cred
;
683 if (p
->p_ucred
== NULL
) {
686 my_cred
= kauth_cred_proc_ref(p
);
687 uid
= kauth_cred_getuid(my_cred
);
688 kauth_cred_unref(&my_cred
);
690 if (uid
!= (uid_t
)*(int*)arg
) {
699 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
701 kauth_cred_t my_cred
;
704 if (p
->p_ucred
== NULL
) {
707 my_cred
= kauth_cred_proc_ref(p
);
708 ruid
= kauth_cred_getruid(my_cred
);
709 kauth_cred_unref(&my_cred
);
711 if (ruid
!= (uid_t
)*(int*)arg
) {
719 * try over estimating by 5 procs
721 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
722 struct sysdoproc_args
{
737 sysdoproc_callback(proc_t p
, void *arg
)
739 struct sysdoproc_args
*args
= arg
;
741 if (args
->buflen
>= args
->sizeof_kproc
) {
742 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0)) {
743 return PROC_RETURNED
;
745 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0)) {
746 return PROC_RETURNED
;
748 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0)) {
749 return PROC_RETURNED
;
752 bzero(args
->kprocp
, args
->sizeof_kproc
);
753 if (args
->is_64_bit
) {
754 fill_user64_proc(p
, args
->kprocp
);
756 fill_user32_proc(p
, args
->kprocp
);
758 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
760 *args
->errorp
= error
;
761 return PROC_RETURNED_DONE
;
763 args
->dp
+= args
->sizeof_kproc
;
764 args
->buflen
-= args
->sizeof_kproc
;
766 args
->needed
+= args
->sizeof_kproc
;
767 return PROC_RETURNED
;
770 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
772 sysctl_prochandle SYSCTL_HANDLER_ARGS
774 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
775 int *name
= arg1
; /* oid element argument vector */
776 int namelen
= arg2
; /* number of oid element arguments */
777 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
779 user_addr_t dp
= where
;
781 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
783 boolean_t is_64_bit
= proc_is64bit(current_proc());
784 struct user32_kinfo_proc user32_kproc
;
785 struct user64_kinfo_proc user_kproc
;
788 int (*filterfn
)(proc_t
, void *) = 0;
789 struct sysdoproc_args args
;
795 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
)) {
800 sizeof_kproc
= sizeof(user_kproc
);
801 kprocp
= &user_kproc
;
803 sizeof_kproc
= sizeof(user32_kproc
);
804 kprocp
= &user32_kproc
;
809 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
813 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
832 /* must be kern.proc.<unknown> */
837 args
.buflen
= buflen
;
838 args
.kprocp
= kprocp
;
839 args
.is_64_bit
= is_64_bit
;
841 args
.needed
= needed
;
842 args
.errorp
= &error
;
843 args
.uidcheck
= uidcheck
;
844 args
.ruidcheck
= ruidcheck
;
845 args
.ttycheck
= ttycheck
;
846 args
.sizeof_kproc
= sizeof_kproc
;
848 args
.uidval
= name
[0];
851 success
= proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
852 sysdoproc_callback
, &args
, filterfn
, name
);
855 * rdar://problem/28433391: if we can't iterate over the processes,
856 * make sure to return an error.
868 needed
= args
.needed
;
870 if (where
!= USER_ADDR_NULL
) {
871 req
->oldlen
= dp
- where
;
872 if (needed
> req
->oldlen
) {
876 needed
+= KERN_PROCSLOP
;
877 req
->oldlen
= needed
;
879 /* adjust index so we return the right required/consumed amount */
880 req
->oldidx
+= req
->oldlen
;
885 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
886 * in the sysctl declaration itself, which comes into the handler function
887 * as 'oidp->oid_arg2'.
889 * For these particular sysctls, since they have well known OIDs, we could
890 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
891 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
892 * of a well known value with a common handler function. This is desirable,
893 * because we want well known values to "go away" at some future date.
895 * It should be noted that the value of '((int *)arg1)[1]' is used for many
896 * an integer parameter to the subcommand for many of these sysctls; we'd
897 * rather have used '((int *)arg1)[0]' for that, or even better, an element
898 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
899 * and then use leaf-node permissions enforcement, but that would have
900 * necessitated modifying user space code to correspond to the interface
901 * change, and we are striving for binary backward compatibility here; even
902 * though these are SPI, and not intended for use by user space applications
903 * which are not themselves system tools or libraries, some applications
904 * have erroneously used them.
906 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
907 0, /* Pointer argument (arg1) */
908 KERN_PROC_ALL
, /* Integer argument (arg2) */
909 sysctl_prochandle
, /* Handler function */
910 NULL
, /* Data is size variant on ILP32/LP64 */
912 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
913 0, /* Pointer argument (arg1) */
914 KERN_PROC_PID
, /* Integer argument (arg2) */
915 sysctl_prochandle
, /* Handler function */
916 NULL
, /* Data is size variant on ILP32/LP64 */
918 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
919 0, /* Pointer argument (arg1) */
920 KERN_PROC_TTY
, /* Integer argument (arg2) */
921 sysctl_prochandle
, /* Handler function */
922 NULL
, /* Data is size variant on ILP32/LP64 */
924 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
925 0, /* Pointer argument (arg1) */
926 KERN_PROC_PGRP
, /* Integer argument (arg2) */
927 sysctl_prochandle
, /* Handler function */
928 NULL
, /* Data is size variant on ILP32/LP64 */
930 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
931 0, /* Pointer argument (arg1) */
932 KERN_PROC_UID
, /* Integer argument (arg2) */
933 sysctl_prochandle
, /* Handler function */
934 NULL
, /* Data is size variant on ILP32/LP64 */
936 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
937 0, /* Pointer argument (arg1) */
938 KERN_PROC_RUID
, /* Integer argument (arg2) */
939 sysctl_prochandle
, /* Handler function */
940 NULL
, /* Data is size variant on ILP32/LP64 */
942 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
943 0, /* Pointer argument (arg1) */
944 KERN_PROC_LCID
, /* Integer argument (arg2) */
945 sysctl_prochandle
, /* Handler function */
946 NULL
, /* Data is size variant on ILP32/LP64 */
951 * Fill in non-zero fields of an eproc structure for the specified process.
954 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
958 struct session
*sessp
;
959 kauth_cred_t my_cred
;
962 sessp
= proc_session(p
);
964 if (pg
!= PGRP_NULL
) {
965 ep
->e_pgid
= p
->p_pgrpid
;
966 ep
->e_jobc
= pg
->pg_jobc
;
967 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
) {
968 ep
->e_flag
= EPROC_CTTY
;
971 ep
->e_ppid
= p
->p_ppid
;
973 my_cred
= kauth_cred_proc_ref(p
);
975 /* A fake historical pcred */
976 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
977 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
978 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
979 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
981 /* A fake historical *kauth_cred_t */
982 ep
->e_ucred
.cr_ref
= os_atomic_load(&my_cred
->cr_ref
, relaxed
);
983 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
984 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
985 bcopy(posix_cred_get(my_cred
)->cr_groups
,
986 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof(gid_t
));
988 kauth_cred_unref(&my_cred
);
991 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
992 (tp
= SESSION_TP(sessp
))) {
993 ep
->e_tdev
= tp
->t_dev
;
994 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
999 if (sessp
!= SESSION_NULL
) {
1000 if (SESS_LEADER(p
, sessp
)) {
1001 ep
->e_flag
|= EPROC_SLEADER
;
1003 session_rele(sessp
);
1005 if (pg
!= PGRP_NULL
) {
1011 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
1014 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
1018 struct session
*sessp
;
1019 kauth_cred_t my_cred
;
1022 sessp
= proc_session(p
);
1024 if (pg
!= PGRP_NULL
) {
1025 ep
->e_pgid
= p
->p_pgrpid
;
1026 ep
->e_jobc
= pg
->pg_jobc
;
1027 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
) {
1028 ep
->e_flag
= EPROC_CTTY
;
1031 ep
->e_ppid
= p
->p_ppid
;
1033 my_cred
= kauth_cred_proc_ref(p
);
1035 /* A fake historical pcred */
1036 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1037 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1038 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1039 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1041 /* A fake historical *kauth_cred_t */
1042 ep
->e_ucred
.cr_ref
= os_atomic_load(&my_cred
->cr_ref
, relaxed
);
1043 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1044 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1045 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1046 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof(gid_t
));
1048 kauth_cred_unref(&my_cred
);
1051 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1052 (tp
= SESSION_TP(sessp
))) {
1053 ep
->e_tdev
= tp
->t_dev
;
1054 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1059 if (sessp
!= SESSION_NULL
) {
1060 if (SESS_LEADER(p
, sessp
)) {
1061 ep
->e_flag
|= EPROC_SLEADER
;
1063 session_rele(sessp
);
1065 if (pg
!= PGRP_NULL
) {
1071 * Fill in an eproc structure for the specified process.
1072 * bzeroed by our caller, so only set non-zero fields.
1075 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1077 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1078 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1079 exp
->p_flag
= p
->p_flag
;
1080 if (p
->p_lflag
& P_LTRACED
) {
1081 exp
->p_flag
|= P_TRACED
;
1083 if (p
->p_lflag
& P_LPPWAIT
) {
1084 exp
->p_flag
|= P_PPWAIT
;
1086 if (p
->p_lflag
& P_LEXIT
) {
1087 exp
->p_flag
|= P_WEXIT
;
1089 exp
->p_stat
= p
->p_stat
;
1090 exp
->p_pid
= p
->p_pid
;
1091 exp
->p_oppid
= p
->p_oppid
;
1093 exp
->user_stack
= p
->user_stack
;
1094 exp
->p_debugger
= p
->p_debugger
;
1095 exp
->sigwait
= p
->sigwait
;
1097 #ifdef _PROC_HAS_SCHEDINFO_
1098 exp
->p_estcpu
= p
->p_estcpu
;
1099 exp
->p_pctcpu
= p
->p_pctcpu
;
1100 exp
->p_slptime
= p
->p_slptime
;
1102 exp
->p_realtimer
.it_interval
.tv_sec
=
1103 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1104 exp
->p_realtimer
.it_interval
.tv_usec
=
1105 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1107 exp
->p_realtimer
.it_value
.tv_sec
=
1108 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1109 exp
->p_realtimer
.it_value
.tv_usec
=
1110 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1112 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1113 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1115 exp
->p_sigignore
= p
->p_sigignore
;
1116 exp
->p_sigcatch
= p
->p_sigcatch
;
1117 exp
->p_priority
= p
->p_priority
;
1118 exp
->p_nice
= p
->p_nice
;
1119 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1120 exp
->p_xstat
= p
->p_xstat
;
1121 exp
->p_acflag
= p
->p_acflag
;
1125 * Fill in an LP64 version of extern_proc structure for the specified process.
1128 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1130 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1131 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1132 exp
->p_flag
= p
->p_flag
;
1133 if (p
->p_lflag
& P_LTRACED
) {
1134 exp
->p_flag
|= P_TRACED
;
1136 if (p
->p_lflag
& P_LPPWAIT
) {
1137 exp
->p_flag
|= P_PPWAIT
;
1139 if (p
->p_lflag
& P_LEXIT
) {
1140 exp
->p_flag
|= P_WEXIT
;
1142 exp
->p_stat
= p
->p_stat
;
1143 exp
->p_pid
= p
->p_pid
;
1144 exp
->p_oppid
= p
->p_oppid
;
1146 exp
->user_stack
= p
->user_stack
;
1147 exp
->p_debugger
= p
->p_debugger
;
1148 exp
->sigwait
= p
->sigwait
;
1150 #ifdef _PROC_HAS_SCHEDINFO_
1151 exp
->p_estcpu
= p
->p_estcpu
;
1152 exp
->p_pctcpu
= p
->p_pctcpu
;
1153 exp
->p_slptime
= p
->p_slptime
;
1155 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1156 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1158 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1159 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1161 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1162 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1164 exp
->p_sigignore
= p
->p_sigignore
;
1165 exp
->p_sigcatch
= p
->p_sigcatch
;
1166 exp
->p_priority
= p
->p_priority
;
1167 exp
->p_nice
= p
->p_nice
;
1168 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1169 exp
->p_xstat
= p
->p_xstat
;
1170 exp
->p_acflag
= p
->p_acflag
;
1174 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1176 /* on a 64 bit kernel, 32 bit users get some truncated information */
1177 fill_user32_externproc(p
, &kp
->kp_proc
);
1178 fill_user32_eproc(p
, &kp
->kp_eproc
);
1182 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1184 fill_user64_externproc(p
, &kp
->kp_proc
);
1185 fill_user64_eproc(p
, &kp
->kp_eproc
);
1189 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1191 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1192 int *name
= arg1
; /* oid element argument vector */
1193 int namelen
= arg2
; /* number of oid element arguments */
1194 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1195 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1196 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1197 // size_t newlen = req->newlen; /* user buffer copy in size */
1215 case KERN_KDWRITETR
:
1216 case KERN_KDWRITEMAP
:
1222 case KERN_KDREADCURTHRMAP
:
1223 case KERN_KDSET_TYPEFILTER
:
1224 case KERN_KDBUFWAIT
:
1226 case KERN_KDWRITEMAP_V3
:
1227 case KERN_KDWRITETR_V3
:
1228 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1235 /* adjust index so we return the right required/consumed amount */
1237 req
->oldidx
+= req
->oldlen
;
1242 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1243 0, /* Pointer argument (arg1) */
1244 0, /* Integer argument (arg2) */
1245 sysctl_kdebug_ops
, /* Handler function */
1246 NULL
, /* Data pointer */
1250 #if !CONFIG_EMBEDDED
1252 * Return the top *sizep bytes of the user stack, or the entire area of the
1253 * user stack down through the saved exec_path, whichever is smaller.
1256 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1258 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1259 int *name
= arg1
; /* oid element argument vector */
1260 int namelen
= arg2
; /* number of oid element arguments */
1261 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1262 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1263 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1264 // size_t newlen = req->newlen; /* user buffer copy in size */
1267 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1269 /* adjust index so we return the right required/consumed amount */
1271 req
->oldidx
+= req
->oldlen
;
1276 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1277 0, /* Pointer argument (arg1) */
1278 0, /* Integer argument (arg2) */
1279 sysctl_doprocargs
, /* Handler function */
1280 NULL
, /* Data pointer */
1282 #endif /* !CONFIG_EMBEDDED */
1285 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1287 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1288 int *name
= arg1
; /* oid element argument vector */
1289 int namelen
= arg2
; /* number of oid element arguments */
1290 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1291 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1292 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1293 // size_t newlen = req->newlen; /* user buffer copy in size */
1296 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1298 /* adjust index so we return the right required/consumed amount */
1300 req
->oldidx
+= req
->oldlen
;
1305 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1306 0, /* Pointer argument (arg1) */
1307 0, /* Integer argument (arg2) */
1308 sysctl_doprocargs2
, /* Handler function */
1309 NULL
, /* Data pointer */
1313 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1314 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1317 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1319 struct _vm_map
*proc_map
;
1322 user_addr_t arg_addr
;
1327 vm_size_t alloc_size
= 0;
1328 vm_offset_t copy_start
, copy_end
;
1331 kauth_cred_t my_cred
;
1340 buflen
-= sizeof(int); /* reserve first word to return argc */
1342 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1343 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1344 /* is not NULL then the caller wants us to return the length needed to */
1345 /* hold the data we would return */
1346 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1352 * Lookup process by pid
1361 * Copy the top N bytes of the stack.
1362 * On all machines we have so far, the stack grows
1365 * If the user expects no more than N bytes of
1366 * argument list, use that as a guess for the
1370 if (!p
->user_stack
) {
1375 if (where
== USER_ADDR_NULL
) {
1376 /* caller only wants to know length of proc args data */
1377 if (sizep
== NULL
) {
1382 size
= p
->p_argslen
;
1385 size
+= sizeof(int);
1388 * old PROCARGS will return the executable's path and plus some
1389 * extra space for work alignment and data tags
1391 size
+= PATH_MAX
+ (6 * sizeof(int));
1393 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1398 my_cred
= kauth_cred_proc_ref(p
);
1399 uid
= kauth_cred_getuid(my_cred
);
1400 kauth_cred_unref(&my_cred
);
1402 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1403 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1408 if ((u_int
)arg_size
> p
->p_argslen
) {
1409 arg_size
= round_page(p
->p_argslen
);
1412 arg_addr
= p
->user_stack
- arg_size
;
1415 * Before we can block (any VM code), make another
1416 * reference to the map to keep it alive. We do
1417 * that by getting a reference on the task itself.
1425 /* save off argc before releasing the proc */
1428 argslen
= p
->p_argslen
;
1430 * Once we have a task reference we can convert that into a
1431 * map reference, which we will use in the calls below. The
1432 * task/process may change its map after we take this reference
1433 * (see execve), but the worst that will happen then is a return
1434 * of stale info (which is always a possibility).
1436 task_reference(task
);
1438 proc_map
= get_task_map_reference(task
);
1439 task_deallocate(task
);
1441 if (proc_map
== NULL
) {
1445 alloc_size
= round_page(arg_size
);
1446 ret
= kmem_alloc(kernel_map
, ©_start
, alloc_size
, VM_KERN_MEMORY_BSD
);
1447 if (ret
!= KERN_SUCCESS
) {
1448 vm_map_deallocate(proc_map
);
1451 bzero((void *)copy_start
, alloc_size
);
1453 copy_end
= round_page(copy_start
+ arg_size
);
1455 if (vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1456 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1457 vm_map_deallocate(proc_map
);
1458 kmem_free(kernel_map
, copy_start
,
1459 round_page(arg_size
));
1464 * Now that we've done the copyin from the process'
1465 * map, we can release the reference to it.
1467 vm_map_deallocate(proc_map
);
1469 if (vm_map_copy_overwrite(kernel_map
,
1470 (vm_map_address_t
)copy_start
,
1471 tmp
, FALSE
) != KERN_SUCCESS
) {
1472 kmem_free(kernel_map
, copy_start
,
1473 round_page(arg_size
));
1474 vm_map_copy_discard(tmp
);
1478 if (arg_size
> argslen
) {
1479 data
= (caddr_t
) (copy_end
- argslen
);
1482 data
= (caddr_t
) (copy_end
- arg_size
);
1487 * When these sysctls were introduced, the first string in the strings
1488 * section was just the bare path of the executable. However, for security
1489 * reasons we now prefix this string with executable_path= so it can be
1490 * parsed getenv style. To avoid binary compatability issues with exising
1491 * callers of this sysctl, we strip it off here if present.
1492 * (rdar://problem/13746466)
1494 #define EXECUTABLE_KEY "executable_path="
1495 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0) {
1496 data
+= strlen(EXECUTABLE_KEY
);
1497 size
-= strlen(EXECUTABLE_KEY
);
1501 /* Put processes argc as the first word in the copyout buffer */
1502 suword(where
, argc
);
1503 error
= copyout(data
, (where
+ sizeof(int)), size
);
1504 size
+= sizeof(int);
1506 error
= copyout(data
, where
, size
);
1509 * Make the old PROCARGS work to return the executable's path
1510 * But, only if there is enough space in the provided buffer
1512 * on entry: data [possibily] points to the beginning of the path
1514 * Note: we keep all pointers&sizes aligned to word boundries
1516 if ((!error
) && (buflen
> 0 && (u_int
)buflen
> argslen
)) {
1517 int binPath_sz
, alignedBinPath_sz
= 0;
1518 int extraSpaceNeeded
, addThis
;
1519 user_addr_t placeHere
;
1520 char * str
= (char *) data
;
1523 /* Some apps are really bad about messing up their stacks
1524 * So, we have to be extra careful about getting the length
1525 * of the executing binary. If we encounter an error, we bail.
1528 /* Limit ourselves to PATH_MAX paths */
1529 if (max_len
> PATH_MAX
) {
1535 while ((binPath_sz
< max_len
- 1) && (*str
++ != 0)) {
1539 /* If we have a NUL terminator, copy it, too */
1540 if (binPath_sz
< max_len
- 1) {
1544 /* Pre-Flight the space requiremnts */
1546 /* Account for the padding that fills out binPath to the next word */
1547 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int) - 1)) ? (sizeof(int) - (binPath_sz
& (sizeof(int) - 1))) : 0;
1549 placeHere
= where
+ size
;
1551 /* Account for the bytes needed to keep placeHere word aligned */
1552 addThis
= (placeHere
& (sizeof(int) - 1)) ? (sizeof(int) - (placeHere
& (sizeof(int) - 1))) : 0;
1554 /* Add up all the space that is needed */
1555 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1557 /* is there is room to tack on argv[0]? */
1558 if ((buflen
& ~(sizeof(int) - 1)) >= (argslen
+ extraSpaceNeeded
)) {
1559 placeHere
+= addThis
;
1560 suword(placeHere
, 0);
1561 placeHere
+= sizeof(int);
1562 suword(placeHere
, 0xBFFF0000);
1563 placeHere
+= sizeof(int);
1564 suword(placeHere
, 0);
1565 placeHere
+= sizeof(int);
1566 error
= copyout(data
, placeHere
, binPath_sz
);
1568 placeHere
+= binPath_sz
;
1569 suword(placeHere
, 0);
1570 size
+= extraSpaceNeeded
;
1576 if (copy_start
!= (vm_offset_t
) 0) {
1577 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1583 if (where
!= USER_ADDR_NULL
) {
1591 * Max number of concurrent aio requests
1595 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1597 int new_value
, changed
;
1598 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1600 /* make sure the system-wide limit is greater than the per process limit */
1601 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
) {
1602 aio_max_requests
= new_value
;
1612 * Max number of concurrent aio requests per process
1616 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1618 int new_value
, changed
;
1619 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1621 /* make sure per process limit is less than the system-wide limit */
1622 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
) {
1623 aio_max_requests_per_process
= new_value
;
1633 * Max number of async IO worker threads
1637 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1639 int new_value
, changed
;
1640 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1642 /* we only allow an increase in the number of worker threads */
1643 if (new_value
> aio_worker_threads
) {
1644 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1645 aio_worker_threads
= new_value
;
1655 * System-wide limit on the max number of processes
1659 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1661 int new_value
, changed
;
1662 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1664 AUDIT_ARG(value32
, new_value
);
1665 /* make sure the system-wide limit is less than the configured hard
1666 * limit set at kernel compilation */
1667 if (new_value
<= hard_maxproc
&& new_value
> 0) {
1668 maxproc
= new_value
;
1676 extern int sched_enable_smt
;
1678 sysctl_sched_enable_smt
1679 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1681 int new_value
, changed
;
1682 int error
= sysctl_io_number(req
, sched_enable_smt
, sizeof(int), &new_value
, &changed
);
1686 kern_return_t kret
= KERN_SUCCESS
;
1688 AUDIT_ARG(value32
, new_value
);
1689 if (new_value
== 0) {
1690 sched_enable_smt
= 0;
1691 kret
= enable_smt_processors(false);
1693 sched_enable_smt
= 1;
1694 kret
= enable_smt_processors(true);
1701 case KERN_INVALID_ARGUMENT
:
1715 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1716 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1718 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1719 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1721 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1722 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1723 (int *)NULL
, BSD
, "");
1724 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1725 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1727 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1728 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1729 &kernel_uuid_string
[0], 0, "");
1731 SYSCTL_STRING(_kern
, OID_AUTO
, osbuildconfig
,
1732 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1733 &osbuild_config
[0], 0, "");
1743 int debug_kprint_syscall
= 0;
1744 char debug_kprint_syscall_process
[MAXCOMLEN
+ 1];
1746 /* Thread safe: bits and string value are not used to reclaim state */
1747 SYSCTL_INT(_debug
, OID_AUTO
, kprint_syscall
,
1748 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1749 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1750 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1751 "name of process for kprintf syscall tracing");
1754 debug_kprint_current_process(const char **namep
)
1756 struct proc
*p
= current_proc();
1762 if (debug_kprint_syscall_process
[0]) {
1763 /* user asked to scope tracing to a particular process name */
1764 if (0 == strncmp(debug_kprint_syscall_process
,
1765 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1766 /* no value in telling the user that we traced what they asked */
1777 /* trace all processes. Tell user what we traced */
1786 /* PR-5293665: need to use a callback function for kern.osversion to set
1787 * osversion in IORegistry */
1790 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1794 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1797 IORegistrySetOSBuildVersion((char *)arg1
);
1803 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1804 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1805 osversion
, 256 /* OSVERSIZE*/,
1806 sysctl_osversion
, "A", "");
1808 char osproductversion
[48] = { '\0' };
1811 sysctl_osproductversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1813 if (req
->newptr
!= 0) {
1815 * Can only ever be set by launchd, and only once at boot.
1817 if (req
->p
->p_pid
!= 1 || osproductversion
[0] != '\0') {
1822 return sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1825 SYSCTL_PROC(_kern
, OID_AUTO
, osproductversion
,
1826 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1827 osproductversion
, sizeof(osproductversion
),
1828 sysctl_osproductversion
, "A", "The ProductVersion from SystemVersion.plist");
1830 static uint64_t osvariant_status
= 0;
1833 sysctl_osvariant_status(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1835 if (req
->newptr
!= 0) {
1837 * Can only ever be set by launchd, and only once at boot.
1839 if (req
->p
->p_pid
!= 1 || osvariant_status
!= 0) {
1844 return sysctl_handle_quad(oidp
, arg1
, arg2
, req
);
1847 SYSCTL_PROC(_kern
, OID_AUTO
, osvariant_status
,
1848 CTLFLAG_RW
| CTLTYPE_QUAD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1849 &osvariant_status
, sizeof(osvariant_status
),
1850 sysctl_osvariant_status
, "Q", "Opaque flags used to cache OS variant information");
1852 #if defined(XNU_TARGET_OS_BRIDGE)
1853 char macosproductversion
[MACOS_VERS_LEN
] = { '\0' };
1855 SYSCTL_STRING(_kern
, OID_AUTO
, macosproductversion
,
1856 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1857 &macosproductversion
[0], MACOS_VERS_LEN
, "The currently running macOS ProductVersion (from SystemVersion.plist on macOS)");
1859 char macosversion
[MACOS_VERS_LEN
] = { '\0' };
1861 SYSCTL_STRING(_kern
, OID_AUTO
, macosversion
,
1862 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1863 &macosversion
[0], MACOS_VERS_LEN
, "The currently running macOS build version");
1867 sysctl_sysctl_bootargs
1868 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1871 /* BOOT_LINE_LENGTH */
1873 size_t boot_args_len
= 256;
1875 size_t boot_args_len
= 1024;
1877 char buf
[boot_args_len
];
1879 strlcpy(buf
, PE_boot_args(), boot_args_len
);
1880 error
= sysctl_io_string(req
, buf
, boot_args_len
, 0, NULL
);
1884 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1885 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1887 sysctl_sysctl_bootargs
, "A", "bootargs");
1890 sysctl_kernelcacheuuid(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1893 if (kernelcache_uuid_valid
) {
1894 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1899 SYSCTL_PROC(_kern
, OID_AUTO
, kernelcacheuuid
,
1900 CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1901 kernelcache_uuid_string
, sizeof(kernelcache_uuid_string
),
1902 sysctl_kernelcacheuuid
, "A", "");
1904 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1905 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1907 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1908 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1909 (int *)NULL
, ARG_MAX
, "");
1910 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1911 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1912 (int *)NULL
, _POSIX_VERSION
, "");
1913 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1914 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1915 (int *)NULL
, NGROUPS_MAX
, "");
1916 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1917 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1918 (int *)NULL
, 1, "");
1919 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1920 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1921 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1922 (int *)NULL
, 1, "");
1924 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1925 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1928 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1929 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1931 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1932 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1934 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1935 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1937 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1938 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1939 &thread_max
, 0, "");
1940 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1941 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1942 &task_threadmax
, 0, "");
1943 SYSCTL_LONG(_kern
, OID_AUTO
, num_recycledvnodes
,
1944 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1945 &num_recycledvnodes
, "");
1948 sysctl_maxvnodes(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1950 int oldval
= desiredvnodes
;
1951 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1953 if (oldval
!= desiredvnodes
) {
1954 resize_namecache(desiredvnodes
);
1960 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1961 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1962 &nc_disabled
, 0, "");
1964 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1965 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1966 0, 0, sysctl_maxvnodes
, "I", "");
1968 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1969 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1970 0, 0, sysctl_maxproc
, "I", "");
1972 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1973 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1974 0, 0, sysctl_aiomax
, "I", "");
1976 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1977 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1978 0, 0, sysctl_aioprocmax
, "I", "");
1980 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1981 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1982 0, 0, sysctl_aiothreads
, "I", "");
1984 SYSCTL_PROC(_kern
, OID_AUTO
, sched_enable_smt
,
1985 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
,
1986 0, 0, sysctl_sched_enable_smt
, "I", "");
1988 extern int sched_allow_NO_SMT_threads
;
1989 SYSCTL_INT(_kern
, OID_AUTO
, sched_allow_NO_SMT_threads
,
1990 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1991 &sched_allow_NO_SMT_threads
, 0, "");
1993 #if (DEVELOPMENT || DEBUG)
1994 extern int sched_smt_balance
;
1995 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1996 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1997 &sched_smt_balance
, 0, "");
1998 extern int sched_allow_rt_smt
;
1999 SYSCTL_INT(_kern
, OID_AUTO
, sched_allow_rt_smt
,
2000 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2001 &sched_allow_rt_smt
, 0, "");
2002 extern int sched_avoid_cpu0
;
2003 SYSCTL_INT(_kern
, OID_AUTO
, sched_avoid_cpu0
,
2004 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2005 &sched_avoid_cpu0
, 0, "");
2006 #if __arm__ || __arm64__
2007 extern uint32_t perfcontrol_requested_recommended_cores
;
2008 SYSCTL_UINT(_kern
, OID_AUTO
, sched_recommended_cores
,
2009 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2010 &perfcontrol_requested_recommended_cores
, 0, "");
2012 /* Scheduler perfcontrol callouts sysctls */
2013 SYSCTL_DECL(_kern_perfcontrol_callout
);
2014 SYSCTL_NODE(_kern
, OID_AUTO
, perfcontrol_callout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
2015 "scheduler perfcontrol callouts");
2017 extern int perfcontrol_callout_stats_enabled
;
2018 SYSCTL_INT(_kern_perfcontrol_callout
, OID_AUTO
, stats_enabled
,
2019 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2020 &perfcontrol_callout_stats_enabled
, 0, "");
2022 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
2023 perfcontrol_callout_stat_t stat
);
2025 /* On-Core Callout */
2027 sysctl_perfcontrol_callout_stat
2028 (__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
2030 perfcontrol_callout_stat_t stat
= (perfcontrol_callout_stat_t
)arg1
;
2031 perfcontrol_callout_type_t type
= (perfcontrol_callout_type_t
)arg2
;
2032 return sysctl_io_number(req
, (int)perfcontrol_callout_stat_avg(type
, stat
),
2033 sizeof(int), NULL
, NULL
);
2036 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_instr
,
2037 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2038 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_ON_CORE
,
2039 sysctl_perfcontrol_callout_stat
, "I", "");
2040 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_cycles
,
2041 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2042 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_ON_CORE
,
2043 sysctl_perfcontrol_callout_stat
, "I", "");
2044 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_instr
,
2045 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2046 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_OFF_CORE
,
2047 sysctl_perfcontrol_callout_stat
, "I", "");
2048 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_cycles
,
2049 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2050 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_OFF_CORE
,
2051 sysctl_perfcontrol_callout_stat
, "I", "");
2052 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_instr
,
2053 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2054 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_CONTEXT
,
2055 sysctl_perfcontrol_callout_stat
, "I", "");
2056 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_cycles
,
2057 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2058 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_CONTEXT
,
2059 sysctl_perfcontrol_callout_stat
, "I", "");
2060 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_instr
,
2061 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2062 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
2063 sysctl_perfcontrol_callout_stat
, "I", "");
2064 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_cycles
,
2065 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2066 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
2067 sysctl_perfcontrol_callout_stat
, "I", "");
2069 #endif /* __arm__ || __arm64__ */
2070 #endif /* (DEVELOPMENT || DEBUG) */
2074 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2076 int new_value
, changed
;
2077 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2079 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2081 securelevel
= new_value
;
2090 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2091 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2092 0, 0, sysctl_securelvl
, "I", "");
2097 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2100 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2102 domainnamelen
= strlen(domainname
);
2107 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2108 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2109 0, 0, sysctl_domainname
, "A", "");
2111 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
2112 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2117 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2120 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2122 hostnamelen
= req
->newlen
;
2128 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2129 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2130 0, 0, sysctl_hostname
, "A", "");
2134 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2136 /* Original code allowed writing, I'm copying this, although this all makes
2137 * no sense to me. Besides, this sysctl is never used. */
2138 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2 * MAXCOMLEN
+ 1), 1, NULL
);
2141 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2142 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2143 0, 0, sysctl_procname
, "A", "");
2145 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2146 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2147 &speculative_reads_disabled
, 0, "");
2149 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
2150 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2151 &preheat_max_bytes
, 0, "");
2153 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
2154 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2155 &preheat_min_bytes
, 0, "");
2157 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
2158 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2159 &speculative_prefetch_max
, 0, "");
2161 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
2162 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2163 &speculative_prefetch_max_iosize
, 0, "");
2165 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
2166 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2167 &vm_page_free_target
, 0, "");
2169 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
2170 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2171 &vm_page_free_min
, 0, "");
2173 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
2174 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2175 &vm_page_free_reserved
, 0, "");
2177 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
2178 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2179 &vm_pageout_state
.vm_page_speculative_percentage
, 0, "");
2181 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
2182 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2183 &vm_pageout_state
.vm_page_speculative_q_age_ms
, 0, "");
2185 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
2186 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2187 &vm_max_delayed_work_limit
, 0, "");
2189 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2190 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2191 &vm_max_batch
, 0, "");
2193 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
2194 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2195 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
), "");
2199 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2202 boottime_timeval(&tv
);
2203 struct proc
*p
= req
->p
;
2205 if (proc_is64bit(p
)) {
2206 struct user64_timeval t
= {};
2207 t
.tv_sec
= tv
.tv_sec
;
2208 t
.tv_usec
= tv
.tv_usec
;
2209 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2211 struct user32_timeval t
= {};
2212 t
.tv_sec
= tv
.tv_sec
;
2213 t
.tv_usec
= tv
.tv_usec
;
2214 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2218 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2219 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2220 0, 0, sysctl_boottime
, "S,timeval", "");
2224 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2227 int error
= get_kernel_symfile(req
->p
, &str
);
2231 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2235 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2236 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2237 0, 0, sysctl_symfile
, "A", "");
2242 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2244 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2247 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2248 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2249 0, 0, sysctl_netboot
, "I", "");
2252 #ifdef CONFIG_IMGSRC_ACCESS
2254 * Legacy--act as if only one layer of nesting is possible.
2258 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2260 vfs_context_t ctx
= vfs_context_current();
2264 if (!vfs_context_issuser(ctx
)) {
2268 if (imgsrc_rootvnodes
[0] == NULL
) {
2272 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2277 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2278 result
= vnode_getwithref(devvp
);
2283 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2287 vnode_put(imgsrc_rootvnodes
[0]);
2291 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2292 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2293 0, 0, sysctl_imgsrcdev
, "I", "");
2297 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2300 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
] = {}; /* 2 for now, no problem */
2304 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2308 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2310 * Go get the root vnode.
2312 rvp
= imgsrc_rootvnodes
[i
];
2313 if (rvp
== NULLVP
) {
2317 error
= vnode_get(rvp
);
2323 * For now, no getting at a non-local volume.
2325 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2326 if (devvp
== NULL
) {
2331 error
= vnode_getwithref(devvp
);
2340 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2341 info
[i
].ii_flags
= 0;
2342 info
[i
].ii_height
= i
;
2343 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2349 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2352 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2353 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2354 0, 0, sysctl_imgsrcinfo
, "I", "");
2356 #endif /* CONFIG_IMGSRC_ACCESS */
2359 SYSCTL_DECL(_kern_timer
);
2360 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2363 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2364 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2365 &mach_timer_coalescing_enabled
, 0, "");
2367 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2368 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2369 &timer_deadline_tracking_bin_1
, "");
2370 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2371 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2372 &timer_deadline_tracking_bin_2
, "");
2374 SYSCTL_DECL(_kern_timer_longterm
);
2375 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2378 /* Must match definition in osfmk/kern/timer_call.c */
2381 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2382 LATENCY
, LATENCY_MIN
, LATENCY_MAX
, SCAN_LIMIT
, SCAN_INTERVAL
, PAUSES
2384 extern uint64_t timer_sysctl_get(int);
2385 extern int timer_sysctl_set(int, uint64_t);
2389 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2391 int oid
= (int)arg1
;
2392 uint64_t value
= timer_sysctl_get(oid
);
2397 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2399 error
= timer_sysctl_set(oid
, new_value
);
2405 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2406 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2407 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2408 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_limit
,
2409 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2410 (void *) SCAN_LIMIT
, 0, sysctl_timer
, "Q", "");
2411 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_interval
,
2412 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2413 (void *) SCAN_INTERVAL
, 0, sysctl_timer
, "Q", "");
2415 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2416 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2417 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2418 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_pauses
,
2419 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2420 (void *) PAUSES
, 0, sysctl_timer
, "Q", "");
2423 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2424 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2425 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2426 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2427 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2428 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2429 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2430 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2431 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2432 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2433 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2434 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2435 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2436 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2437 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2438 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2439 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2440 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2441 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2442 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2443 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2444 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2445 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2446 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2451 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2453 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2456 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2457 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2458 0, 0, sysctl_usrstack
, "I", "");
2462 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2464 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2467 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2468 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2469 0, 0, sysctl_usrstack64
, "Q", "");
2473 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2474 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2475 corefilename
, sizeof(corefilename
), "");
2479 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2481 #ifdef SECURE_KERNEL
2485 int new_value
, changed
;
2486 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2488 if ((new_value
== 0) || (new_value
== 1)) {
2489 do_coredump
= new_value
;
2498 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2499 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2500 0, 0, sysctl_coredump
, "I", "");
2503 sysctl_suid_coredump
2504 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2506 #ifdef SECURE_KERNEL
2510 int new_value
, changed
;
2511 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2513 if ((new_value
== 0) || (new_value
== 1)) {
2514 sugid_coredump
= new_value
;
2523 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2524 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2525 0, 0, sysctl_suid_coredump
, "I", "");
2527 #endif /* CONFIG_COREDUMP */
2531 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2533 struct proc
*p
= req
->p
;
2534 int new_value
, changed
;
2535 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2539 req
->p
->p_lflag
|= P_LDELAYTERM
;
2541 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2548 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2549 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2550 0, 0, sysctl_delayterm
, "I", "");
2555 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2557 struct proc
*p
= req
->p
;
2559 int new_value
, old_value
, changed
;
2562 ut
= get_bsdthread_info(current_thread());
2564 if (ut
->uu_flag
& UT_RAGE_VNODES
) {
2565 old_value
= KERN_RAGE_THREAD
;
2566 } else if (p
->p_lflag
& P_LRAGE_VNODES
) {
2567 old_value
= KERN_RAGE_PROC
;
2572 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2575 switch (new_value
) {
2576 case KERN_RAGE_PROC
:
2578 p
->p_lflag
|= P_LRAGE_VNODES
;
2581 case KERN_UNRAGE_PROC
:
2583 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2587 case KERN_RAGE_THREAD
:
2588 ut
->uu_flag
|= UT_RAGE_VNODES
;
2590 case KERN_UNRAGE_THREAD
:
2591 ut
= get_bsdthread_info(current_thread());
2592 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2599 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2600 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2601 0, 0, sysctl_rage_vnode
, "I", "");
2603 /* XXX move this interface into libproc and remove this sysctl */
2605 sysctl_setthread_cpupercent
2606 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2608 int new_value
, old_value
;
2610 kern_return_t kret
= KERN_SUCCESS
;
2611 uint8_t percent
= 0;
2620 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0) {
2624 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2625 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2626 if (percent
> 100) {
2631 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2633 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0) {
2640 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2641 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2642 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2646 sysctl_kern_check_openevt
2647 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2649 struct proc
*p
= req
->p
;
2650 int new_value
, old_value
, changed
;
2653 if (p
->p_flag
& P_CHECKOPENEVT
) {
2654 old_value
= KERN_OPENEVT_PROC
;
2659 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2662 switch (new_value
) {
2663 case KERN_OPENEVT_PROC
:
2664 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2667 case KERN_UNOPENEVT_PROC
:
2668 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2678 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2679 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2682 #if DEVELOPMENT || DEBUG
2685 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2687 #ifdef SECURE_KERNEL
2691 int new_value
, changed
;
2694 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2700 #if defined(__x86_64__)
2702 * Only allow setting if NX is supported on the chip
2704 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
)) {
2708 nx_enabled
= new_value
;
2711 #endif /* SECURE_KERNEL */
2715 #if DEVELOPMENT || DEBUG
2716 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2717 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2718 0, 0, sysctl_nx
, "I", "");
2723 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2725 if (proc_is64bit(req
->p
)) {
2726 struct user64_loadavg loadinfo64
= {};
2727 fill_loadavg64(&averunnable
, &loadinfo64
);
2728 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2730 struct user32_loadavg loadinfo32
= {};
2731 fill_loadavg32(&averunnable
, &loadinfo32
);
2732 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2736 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2737 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2738 0, 0, sysctl_loadavg
, "S,loadavg", "");
2741 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2744 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2745 __unused
int arg2
, struct sysctl_req
*req
)
2747 int old_value
= 0, new_value
= 0, error
= 0;
2749 if (vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
)) {
2752 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2754 return vm_toggle_entry_reuse(new_value
, NULL
);
2759 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
, "I", "");
2761 #ifdef CONFIG_XNUPOST
2763 extern int xnupost_export_testdata(void *outp
, uint32_t size
, uint32_t *lenp
);
2764 extern uint32_t xnupost_get_estimated_testdata_size(void);
2766 extern int xnupost_reset_all_tests(void);
2769 sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS
2771 /* fixup unused arguments warnings */
2772 __unused
int _oa2
= arg2
;
2773 __unused
void * _oa1
= arg1
;
2774 __unused
struct sysctl_oid
* _oidp
= oidp
;
2777 user_addr_t oldp
= 0;
2778 user_addr_t newp
= 0;
2779 uint32_t usedbytes
= 0;
2788 if ((void *)oldp
== NULL
) {
2789 /* return estimated size for second call where info can be placed */
2790 req
->oldidx
= xnupost_get_estimated_testdata_size();
2792 error
= xnupost_export_testdata((void *)oldp
, req
->oldlen
, &usedbytes
);
2793 req
->oldidx
= usedbytes
;
2802 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2805 sysctl_handle_xnupost_get_tests
,
2807 "read xnupost test data in kernel");
2810 sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS
2812 /* fixup unused arguments warnings */
2813 __unused
int _oa2
= arg2
;
2814 __unused
void * _oa1
= arg1
;
2815 __unused
struct sysctl_oid
* _oidp
= oidp
;
2819 * INPUT: ACTION, PARAM1, PARAM2, PARAM3
2820 * OUTPUT: RESULTCODE, ADDITIONAL DATA
2822 int32_t outval
[ARRCOUNT
] = {0};
2823 int32_t input
[ARRCOUNT
] = {0};
2824 int32_t out_size
= sizeof(outval
);
2825 int32_t in_size
= sizeof(input
);
2828 /* if this is NULL call to find out size, send out size info */
2833 /* pull in provided value from userspace */
2834 error
= SYSCTL_IN(req
, &input
[0], in_size
);
2839 if (input
[0] == XTCTL_RESET_TESTDATA
) {
2840 outval
[0] = xnupost_reset_all_tests();
2845 error
= SYSCTL_OUT(req
, &outval
[0], out_size
);
2852 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2855 sysctl_debug_xnupost_ctl
,
2857 "xnupost control for kernel testing");
2859 extern void test_oslog_handleOSLogCtl(int32_t * in
, int32_t * out
, int32_t arraycount
);
2862 sysctl_debug_test_oslog_ctl(__unused
struct sysctl_oid
* oidp
, __unused
void * arg1
, __unused
int arg2
, struct sysctl_req
* req
)
2865 int32_t outval
[ARRCOUNT
] = {0};
2866 int32_t input
[ARRCOUNT
] = {0};
2867 int32_t size_outval
= sizeof(outval
);
2868 int32_t size_inval
= sizeof(input
);
2871 /* if this is NULL call to find out size, send out size info */
2873 error
= SYSCTL_OUT(req
, &outval
[0], size_outval
);
2877 /* pull in provided value from userspace */
2878 error
= SYSCTL_IN(req
, &input
[0], size_inval
);
2883 test_oslog_handleOSLogCtl(input
, outval
, ARRCOUNT
);
2885 error
= SYSCTL_OUT(req
, &outval
[0], size_outval
);
2893 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2896 sysctl_debug_test_oslog_ctl
,
2898 "testing oslog in kernel");
2900 #include <mach/task.h>
2901 #include <mach/semaphore.h>
2903 extern lck_grp_t
* sysctl_debug_test_stackshot_owner_grp
; /* used for both mutexes and rwlocks */
2904 extern lck_mtx_t
* sysctl_debug_test_stackshot_owner_init_mtx
; /* used to protect lck_*_init */
2906 /* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded
2907 * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a
2908 * stackshot is taken to see if the owner of the lock can be identified.
2910 * We can't return to userland with a kernel lock held, so be sure to unlock before we leave.
2911 * the semaphores allow us to artificially create cases where the lock is being held and the
2912 * thread is hanging / taking a long time to do something. */
2914 volatile char sysctl_debug_test_stackshot_mtx_inited
= 0;
2915 semaphore_t sysctl_debug_test_stackshot_mutex_sem
;
2916 lck_mtx_t sysctl_debug_test_stackshot_owner_lck
;
2918 #define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT 1
2919 #define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2
2920 #define SYSCTL_DEBUG_MTX_SIGNAL 3
2921 #define SYSCTL_DEBUG_MTX_TEARDOWN 4
2924 sysctl_debug_test_stackshot_mutex_owner(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2926 long long option
= -1;
2927 /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */
2928 long long mtx_unslid_addr
= (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck
);
2929 int error
= sysctl_io_number(req
, mtx_unslid_addr
, sizeof(long long), (void*)&option
, NULL
);
2931 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
2932 if (!sysctl_debug_test_stackshot_mtx_inited
) {
2933 lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck
,
2934 sysctl_debug_test_stackshot_owner_grp
,
2936 semaphore_create(kernel_task
,
2937 &sysctl_debug_test_stackshot_mutex_sem
,
2938 SYNC_POLICY_FIFO
, 0);
2939 sysctl_debug_test_stackshot_mtx_inited
= 1;
2941 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
2945 case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT
:
2946 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck
);
2947 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck
);
2949 case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT
:
2950 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck
);
2951 semaphore_wait(sysctl_debug_test_stackshot_mutex_sem
);
2952 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck
);
2954 case SYSCTL_DEBUG_MTX_SIGNAL
:
2955 semaphore_signal(sysctl_debug_test_stackshot_mutex_sem
);
2957 case SYSCTL_DEBUG_MTX_TEARDOWN
:
2958 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
2960 lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck
,
2961 sysctl_debug_test_stackshot_owner_grp
);
2962 semaphore_destroy(kernel_task
,
2963 sysctl_debug_test_stackshot_mutex_sem
);
2964 sysctl_debug_test_stackshot_mtx_inited
= 0;
2966 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
2968 case -1: /* user just wanted to read the value, so do nothing */
2978 /* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave.
2979 * the semaphores allow us to artificially create cases where the lock is being held and the
2980 * thread is hanging / taking a long time to do something. */
2985 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2988 sysctl_debug_test_stackshot_mutex_owner
,
2990 "Testing mutex owner in kernel");
2992 volatile char sysctl_debug_test_stackshot_rwlck_inited
= 0;
2993 lck_rw_t sysctl_debug_test_stackshot_owner_rwlck
;
2994 semaphore_t sysctl_debug_test_stackshot_rwlck_sem
;
2996 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1
2997 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT 2
2998 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3
2999 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT 4
3000 #define SYSCTL_DEBUG_KRWLCK_SIGNAL 5
3001 #define SYSCTL_DEBUG_KRWLCK_TEARDOWN 6
3004 sysctl_debug_test_stackshot_rwlck_owner(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3006 long long option
= -1;
3007 /* if the user tries to read the sysctl, we tell them what the address of the lock is
3008 * (to test against stackshot's output) */
3009 long long rwlck_unslid_addr
= (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck
);
3010 int error
= sysctl_io_number(req
, rwlck_unslid_addr
, sizeof(long long), (void*)&option
, NULL
);
3012 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3013 if (!sysctl_debug_test_stackshot_rwlck_inited
) {
3014 lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck
,
3015 sysctl_debug_test_stackshot_owner_grp
,
3017 semaphore_create(kernel_task
,
3018 &sysctl_debug_test_stackshot_rwlck_sem
,
3021 sysctl_debug_test_stackshot_rwlck_inited
= 1;
3023 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3027 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT
:
3028 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3029 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3031 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT
:
3032 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3033 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem
);
3034 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3036 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT
:
3037 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3038 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3040 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT
:
3041 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3042 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem
);
3043 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3045 case SYSCTL_DEBUG_KRWLCK_SIGNAL
:
3046 semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem
);
3048 case SYSCTL_DEBUG_KRWLCK_TEARDOWN
:
3049 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3051 lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck
,
3052 sysctl_debug_test_stackshot_owner_grp
);
3053 semaphore_destroy(kernel_task
,
3054 sysctl_debug_test_stackshot_rwlck_sem
);
3055 sysctl_debug_test_stackshot_rwlck_inited
= 0;
3057 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3059 case -1: /* user just wanted to read the value, so do nothing */
3072 test_RWLockOwnerCtl
,
3073 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3076 sysctl_debug_test_stackshot_rwlck_owner
,
3078 "Testing rwlock owner in kernel");
3079 #endif /* !CONFIG_XNUPOST */
3083 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3086 uint64_t swap_total
;
3087 uint64_t swap_avail
;
3088 vm_size_t swap_pagesize
;
3089 boolean_t swap_encrypted
;
3090 struct xsw_usage xsu
= {};
3092 error
= macx_swapinfo(&swap_total
,
3100 xsu
.xsu_total
= swap_total
;
3101 xsu
.xsu_avail
= swap_avail
;
3102 xsu
.xsu_used
= swap_total
- swap_avail
;
3103 xsu
.xsu_pagesize
= swap_pagesize
;
3104 xsu
.xsu_encrypted
= swap_encrypted
;
3105 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
3110 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
3111 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3112 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
3115 extern void vm_page_reactivate_all_throttled(void);
3116 extern void memorystatus_disable_freeze(void);
3119 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
3121 #pragma unused(arg1, arg2)
3122 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
3125 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3126 if (error
|| !req
->newptr
) {
3130 if (!VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
3131 //assert(req->newptr);
3132 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
3137 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
3139 disabled
= (!val
&& memorystatus_freeze_enabled
);
3141 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
3144 vm_page_reactivate_all_throttled();
3145 memorystatus_disable_freeze();
3151 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
3152 #endif /* CONFIG_FREEZE */
3154 #if DEVELOPMENT || DEBUG
3155 extern int vm_num_swap_files_config
;
3156 extern int vm_num_swap_files
;
3157 extern lck_mtx_t vm_swap_data_lock
;
3158 #define VM_MAX_SWAP_FILE_NUM 100
3161 sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS
3163 #pragma unused(arg1, arg2)
3164 int error
= 0, val
= vm_num_swap_files_config
;
3166 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3167 if (error
|| !req
->newptr
) {
3171 if (!VM_CONFIG_SWAP_IS_ACTIVE
&& !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
3172 printf("Swap is disabled\n");
3177 lck_mtx_lock(&vm_swap_data_lock
);
3179 if (val
< vm_num_swap_files
) {
3180 printf("Cannot configure fewer swap files than already exist.\n");
3182 lck_mtx_unlock(&vm_swap_data_lock
);
3186 if (val
> VM_MAX_SWAP_FILE_NUM
) {
3187 printf("Capping number of swap files to upper bound.\n");
3188 val
= VM_MAX_SWAP_FILE_NUM
;
3191 vm_num_swap_files_config
= val
;
3192 lck_mtx_unlock(&vm_swap_data_lock
);
3198 SYSCTL_PROC(_debug
, OID_AUTO
, num_swap_files_configured
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_config_num_swap_files
, "I", "");
3199 #endif /* DEVELOPMENT || DEBUG */
3201 /* this kernel does NOT implement shared_region_make_private_np() */
3202 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
3203 CTLFLAG_RD
| CTLFLAG_LOCKED
,
3204 (int *)NULL
, 0, "");
3207 fetch_process_cputype(
3211 cpu_type_t
*cputype
)
3213 proc_t p
= PROC_NULL
;
3220 } else if (namelen
== 1) {
3221 p
= proc_find(name
[0]);
3231 ret
= cpu_type() & ~CPU_ARCH_MASK
;
3232 if (IS_64BIT_PROCESS(p
)) {
3233 ret
|= CPU_ARCH_ABI64
;
3246 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3247 struct sysctl_req
*req
)
3250 cpu_type_t proc_cputype
= 0;
3251 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0) {
3255 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
)) {
3258 return SYSCTL_OUT(req
, &res
, sizeof(res
));
3260 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
, "I", "proc_native");
3263 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3264 struct sysctl_req
*req
)
3267 cpu_type_t proc_cputype
= 0;
3268 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0) {
3271 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
3273 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
, "I", "proc_cputype");
3277 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3279 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
3282 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
3283 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3284 0, 0, sysctl_safeboot
, "I", "");
3288 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3290 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
3293 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
3294 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3295 0, 0, sysctl_singleuser
, "I", "");
3299 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3301 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
3304 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
3305 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3306 0, 0, sysctl_minimalboot
, "I", "");
3309 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3311 extern boolean_t affinity_sets_enabled
;
3312 extern int affinity_sets_mapping
;
3314 SYSCTL_INT(_kern
, OID_AUTO
, affinity_sets_enabled
,
3315 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
3316 SYSCTL_INT(_kern
, OID_AUTO
, affinity_sets_mapping
,
3317 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
3320 * Boolean indicating if KASLR is active.
3324 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3328 slide
= vm_kernel_slide
? 1 : 0;
3330 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
3333 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
3334 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3335 0, 0, sysctl_slide
, "I", "");
3338 * Limit on total memory users can wire.
3340 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3342 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3344 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3347 * All values are in bytes.
3350 vm_map_size_t vm_global_no_user_wire_amount
;
3351 vm_map_size_t vm_global_user_wire_limit
;
3352 vm_map_size_t vm_user_wire_limit
;
3355 * There needs to be a more automatic/elegant way to do this
3357 #if defined(__ARM__)
3358 SYSCTL_INT(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, 0, "");
3359 SYSCTL_INT(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, 0, "");
3360 SYSCTL_INT(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, 0, "");
3362 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
3363 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
3364 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
3367 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
3368 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
3369 extern int vm_map_copy_overwrite_aligned_src_large
;
3370 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
3371 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
3372 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
3375 extern uint32_t vm_page_external_count
;
3377 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
3379 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_filecache_min
, 0, "");
3380 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_xpmapped_min
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_xpmapped_min
, 0, "");
3382 #if DEVELOPMENT || DEBUG
3383 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_filecache_min_divisor
, 0, "");
3384 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_xpmapped_min_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_xpmapped_min_divisor
, 0, "");
3387 extern int vm_compressor_mode
;
3388 extern int vm_compressor_is_active
;
3389 extern int vm_compressor_available
;
3390 extern uint32_t vm_ripe_target_age
;
3391 extern uint32_t swapout_target_age
;
3392 extern int64_t compressor_bytes_used
;
3393 extern int64_t c_segment_input_bytes
;
3394 extern int64_t c_segment_compressed_bytes
;
3395 extern uint32_t compressor_eval_period_in_msecs
;
3396 extern uint32_t compressor_sample_min_in_msecs
;
3397 extern uint32_t compressor_sample_max_in_msecs
;
3398 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
3399 extern uint32_t compressor_thrashing_min_per_10msecs
;
3400 extern uint32_t vm_compressor_time_thread
;
3402 #if DEVELOPMENT || DEBUG
3403 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
3404 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
3405 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
3406 extern uint32_t vm_compressor_catchup_threshold_divisor
;
3408 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden
;
3409 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden
;
3410 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden
;
3411 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden
;
3413 extern vmct_stats_t vmct_stats
;
3417 sysctl_minorcompact_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3419 int new_value
, changed
;
3420 int error
= sysctl_io_number(req
, vm_compressor_minorcompact_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3423 vm_compressor_minorcompact_threshold_divisor
= new_value
;
3424 vm_compressor_minorcompact_threshold_divisor_overridden
= 1;
3429 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
,
3430 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3431 0, 0, sysctl_minorcompact_threshold_divisor
, "I", "");
3435 sysctl_majorcompact_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3437 int new_value
, changed
;
3438 int error
= sysctl_io_number(req
, vm_compressor_majorcompact_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3441 vm_compressor_majorcompact_threshold_divisor
= new_value
;
3442 vm_compressor_majorcompact_threshold_divisor_overridden
= 1;
3447 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
,
3448 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3449 0, 0, sysctl_majorcompact_threshold_divisor
, "I", "");
3453 sysctl_unthrottle_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3455 int new_value
, changed
;
3456 int error
= sysctl_io_number(req
, vm_compressor_unthrottle_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3459 vm_compressor_unthrottle_threshold_divisor
= new_value
;
3460 vm_compressor_unthrottle_threshold_divisor_overridden
= 1;
3465 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
,
3466 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3467 0, 0, sysctl_unthrottle_threshold_divisor
, "I", "");
3471 sysctl_catchup_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3473 int new_value
, changed
;
3474 int error
= sysctl_io_number(req
, vm_compressor_catchup_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3477 vm_compressor_catchup_threshold_divisor
= new_value
;
3478 vm_compressor_catchup_threshold_divisor_overridden
= 1;
3483 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
,
3484 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3485 0, 0, sysctl_catchup_threshold_divisor
, "I", "");
3489 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
3490 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
3491 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
3493 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
3494 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
3495 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
3496 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
3498 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
3500 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
3501 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
3502 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
3503 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
3504 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
3506 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
3508 SYSCTL_INT(_vm
, OID_AUTO
, compressor_timing_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_time_thread
, 0, "");
3510 #if DEVELOPMENT || DEBUG
3511 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[0], "");
3512 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[1], "");
3514 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_threads_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_cthreads_total
, "");
3516 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[0], "");
3517 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[1], "");
3519 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[0], "");
3520 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[1], "");
3522 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[0], 0, "");
3523 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[1], 0, "");
3525 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[0], 0, "");
3526 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[1], 0, "");
3530 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressions
, "");
3531 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compression_failures
, "");
3532 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressed_bytes
, "");
3533 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_delta
, "");
3534 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_negative_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_negative_delta
, "");
3536 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressions
, "");
3537 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressed_bytes
, "");
3539 SYSCTL_QUAD(_vm
, OID_AUTO
, uc_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.uc_decompressions
, "");
3541 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions
, "");
3543 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_cabstime
, "");
3545 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_cabstime
, "");
3546 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_compressions
, "");
3548 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_cabstime
, "");
3549 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressions
, "");
3551 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions_exclusive
, "");
3552 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_compressions
, "");
3553 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_mzv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_mzv_compressions
, "");
3554 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compression_failures
, "");
3555 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_exclusive
, "");
3556 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_total
, "");
3558 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressed_bytes
, "");
3559 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compression_failures
, "");
3560 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_sv_compressions
, "");
3563 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressions
, "");
3565 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_dabstime
, "");
3567 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_dabstime
, "");
3568 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_decompressions
, "");
3570 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_dabstime
, "");
3571 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_decompressions
, "");
3573 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressed_bytes
, "");
3574 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_decompressions
, "");
3576 SYSCTL_INT(_vm
, OID_AUTO
, lz4_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_threshold
, 0, "");
3577 SYSCTL_INT(_vm
, OID_AUTO
, wkdm_reeval_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.wkdm_reeval_threshold
, 0, "");
3578 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_skips
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_skips
, 0, "");
3579 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_run_length
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_run_length
, 0, "");
3580 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_preselects
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_preselects
, 0, "");
3581 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_preselection_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_preselection_threshold
, 0, "");
3582 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_continue_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_continue_bytes
, 0, "");
3583 SYSCTL_INT(_vm
, OID_AUTO
, lz4_profitable_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_profitable_bytes
, 0, "");
3584 #if DEVELOPMENT || DEBUG
3585 extern int vm_compressor_current_codec
;
3586 extern int vm_compressor_test_seg_wp
;
3587 extern boolean_t vm_compressor_force_sw_wkdm
;
3588 SYSCTL_INT(_vm
, OID_AUTO
, compressor_codec
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_current_codec
, 0, "");
3589 SYSCTL_INT(_vm
, OID_AUTO
, compressor_test_wp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_test_seg_wp
, 0, "");
3591 SYSCTL_INT(_vm
, OID_AUTO
, wksw_force
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_force_sw_wkdm
, 0, "");
3592 extern int precompy
, wkswhw
;
3594 SYSCTL_INT(_vm
, OID_AUTO
, precompy
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &precompy
, 0, "");
3595 SYSCTL_INT(_vm
, OID_AUTO
, wkswhw
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &wkswhw
, 0, "");
3596 extern unsigned int vm_ktrace_enabled
;
3597 SYSCTL_INT(_vm
, OID_AUTO
, vm_ktrace
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ktrace_enabled
, 0, "");
3600 #if CONFIG_PHANTOM_CACHE
3601 extern uint32_t phantom_cache_thrashing_threshold
;
3602 extern uint32_t phantom_cache_eval_period_in_msecs
;
3603 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
3606 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
3607 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
3608 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
3611 #if CONFIG_BACKGROUND_QUEUE
3613 extern uint32_t vm_page_background_count
;
3614 extern uint32_t vm_page_background_target
;
3615 extern uint32_t vm_page_background_internal_count
;
3616 extern uint32_t vm_page_background_external_count
;
3617 extern uint32_t vm_page_background_mode
;
3618 extern uint32_t vm_page_background_exclude_external
;
3619 extern uint64_t vm_page_background_promoted_count
;
3620 extern uint64_t vm_pageout_rejected_bq_internal
;
3621 extern uint64_t vm_pageout_rejected_bq_external
;
3623 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_mode
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_mode
, 0, "");
3624 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_exclude_external
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_exclude_external
, 0, "");
3625 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_target
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_target
, 0, "");
3626 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_count
, 0, "");
3627 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_internal_count
, 0, "");
3628 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_external_count
, 0, "");
3630 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_page_background_promoted_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_promoted_count
, "");
3631 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_vminfo
.vm_pageout_considered_bq_internal
, "");
3632 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_vminfo
.vm_pageout_considered_bq_external
, "");
3633 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_internal
, "");
3634 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_external
, "");
3636 #endif /* CONFIG_BACKGROUND_QUEUE */
3638 extern void vm_update_darkwake_mode(boolean_t
);
3639 extern boolean_t vm_darkwake_mode
;
3642 sysctl_toggle_darkwake_mode(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3644 int new_value
, changed
;
3645 int error
= sysctl_io_number(req
, vm_darkwake_mode
, sizeof(int), &new_value
, &changed
);
3647 if (!error
&& changed
) {
3648 if (new_value
!= 0 && new_value
!= 1) {
3649 printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n");
3652 vm_update_darkwake_mode((boolean_t
) new_value
);
3659 SYSCTL_PROC(_vm
, OID_AUTO
, darkwake_mode
,
3660 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3661 0, 0, sysctl_toggle_darkwake_mode
, "I", "");
3663 #if (DEVELOPMENT || DEBUG)
3665 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
3666 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3667 &vm_page_creation_throttled_hard
, 0, "");
3669 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
3670 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3671 &vm_page_creation_throttled_soft
, 0, "");
3673 extern uint32_t vm_pageout_memorystatus_fb_factor_nr
;
3674 extern uint32_t vm_pageout_memorystatus_fb_factor_dr
;
3675 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_nr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_nr
, 0, "");
3676 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_dr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_dr
, 0, "");
3679 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_overrides
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_debug
.vm_grab_anon_overrides
, 0, "");
3680 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_nops
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_debug
.vm_grab_anon_nops
, 0, "");
3682 /* log message counters for persistence mode */
3683 extern uint32_t oslog_p_total_msgcount
;
3684 extern uint32_t oslog_p_metadata_saved_msgcount
;
3685 extern uint32_t oslog_p_metadata_dropped_msgcount
;
3686 extern uint32_t oslog_p_error_count
;
3687 extern uint32_t oslog_p_saved_msgcount
;
3688 extern uint32_t oslog_p_dropped_msgcount
;
3689 extern uint32_t oslog_p_boot_dropped_msgcount
;
3691 /* log message counters for streaming mode */
3692 extern uint32_t oslog_s_total_msgcount
;
3693 extern uint32_t oslog_s_metadata_msgcount
;
3694 extern uint32_t oslog_s_error_count
;
3695 extern uint32_t oslog_s_streamed_msgcount
;
3696 extern uint32_t oslog_s_dropped_msgcount
;
3698 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_total_msgcount
, 0, "");
3699 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_saved_msgcount
, 0, "");
3700 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_dropped_msgcount
, 0, "");
3701 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_error_count
, 0, "");
3702 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_saved_msgcount
, 0, "");
3703 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_dropped_msgcount
, 0, "");
3704 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_boot_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_boot_dropped_msgcount
, 0, "");
3706 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_total_msgcount
, 0, "");
3707 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_metadata_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_metadata_msgcount
, 0, "");
3708 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_error_count
, 0, "");
3709 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_streamed_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_streamed_msgcount
, 0, "");
3710 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_dropped_msgcount
, 0, "");
3713 #endif /* DEVELOPMENT || DEBUG */
3716 * Enable tracing of voucher contents
3718 extern uint32_t ipc_voucher_trace_contents
;
3720 SYSCTL_INT(_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
3721 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
3724 * Kernel stack size and depth
3726 SYSCTL_INT(_kern
, OID_AUTO
, stack_size
,
3727 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3728 SYSCTL_INT(_kern
, OID_AUTO
, stack_depth_max
,
3729 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3731 extern unsigned int kern_feature_overrides
;
3732 SYSCTL_INT(_kern
, OID_AUTO
, kern_feature_overrides
,
3733 CTLFLAG_RD
| CTLFLAG_LOCKED
, &kern_feature_overrides
, 0, "Kernel feature override mask");
3736 * enable back trace for port allocations
3738 extern int ipc_portbt
;
3740 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3741 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3742 &ipc_portbt
, 0, "");
3748 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3749 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3750 sched_string
, sizeof(sched_string
),
3751 "Timeshare scheduler implementation");
3753 #if CONFIG_QUIESCE_COUNTER
3755 sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS
3757 #pragma unused(arg1, arg2)
3759 int error
= sysctl_handle_int(oidp
, &cpu_checkin_min_interval_us
, 0, req
);
3760 if (error
|| !req
->newptr
) {
3764 cpu_quiescent_counter_set_min_interval_us(cpu_checkin_min_interval_us
);
3769 SYSCTL_PROC(_kern
, OID_AUTO
, cpu_checkin_interval
,
3770 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3772 sysctl_cpu_quiescent_counter_interval
, "I",
3773 "Quiescent CPU checkin interval (microseconds)");
3774 #endif /* CONFIG_QUIESCE_COUNTER */
3778 * Only support runtime modification on embedded platforms
3779 * with development config enabled
3783 extern int precise_user_kernel_time
;
3784 SYSCTL_INT(_kern
, OID_AUTO
, precise_user_kernel_time
,
3785 CTLFLAG_RW
| CTLFLAG_LOCKED
,
3786 &precise_user_kernel_time
, 0, "Precise accounting of kernel vs. user time");
3791 /* Parameters related to timer coalescing tuning, to be replaced
3792 * with a dedicated systemcall in the future.
3794 /* Enable processing pending timers in the context of any other interrupt
3795 * Coalescing tuning parameters for various thread/task attributes */
3797 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3799 #pragma unused(oidp)
3800 int size
= arg2
; /* subcommand*/
3803 uint64_t old_value_ns
;
3804 uint64_t new_value_ns
;
3805 uint64_t value_abstime
;
3806 if (size
== sizeof(uint32_t)) {
3807 value_abstime
= *((uint32_t *)arg1
);
3808 } else if (size
== sizeof(uint64_t)) {
3809 value_abstime
= *((uint64_t *)arg1
);
3814 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
3815 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
3816 if ((error
) || (!changed
)) {
3820 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
3821 if (size
== sizeof(uint32_t)) {
3822 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
3824 *((uint64_t *)arg1
) = value_abstime
;
3829 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
3830 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3831 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
3832 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
3833 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3834 &tcoal_prio_params
.timer_resort_threshold_abstime
,
3835 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
3836 sysctl_timer_user_us_kernel_abstime
,
3838 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
3839 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3840 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
3841 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
3842 sysctl_timer_user_us_kernel_abstime
,
3845 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
3846 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3847 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
3849 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
3850 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3851 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
3852 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
3853 sysctl_timer_user_us_kernel_abstime
,
3856 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
3857 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3858 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
3860 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
3861 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3862 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
3863 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
3864 sysctl_timer_user_us_kernel_abstime
,
3867 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
3868 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3869 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
3871 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
3872 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3873 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
3874 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
3875 sysctl_timer_user_us_kernel_abstime
,
3878 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
3879 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3880 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
3882 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
3883 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3884 &tcoal_prio_params
.latency_qos_abstime_max
[0],
3885 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
3886 sysctl_timer_user_us_kernel_abstime
,
3889 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
3890 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3891 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
3893 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
3894 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3895 &tcoal_prio_params
.latency_qos_abstime_max
[1],
3896 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
3897 sysctl_timer_user_us_kernel_abstime
,
3900 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
3901 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3902 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
3904 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
3905 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3906 &tcoal_prio_params
.latency_qos_abstime_max
[2],
3907 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
3908 sysctl_timer_user_us_kernel_abstime
,
3911 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
3912 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3913 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
3915 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
3916 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3917 &tcoal_prio_params
.latency_qos_abstime_max
[3],
3918 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
3919 sysctl_timer_user_us_kernel_abstime
,
3922 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
3923 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3924 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
3926 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
3927 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3928 &tcoal_prio_params
.latency_qos_abstime_max
[4],
3929 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
3930 sysctl_timer_user_us_kernel_abstime
,
3933 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
3934 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3935 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
3937 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
3938 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3939 &tcoal_prio_params
.latency_qos_abstime_max
[5],
3940 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
3941 sysctl_timer_user_us_kernel_abstime
,
3944 /* Communicate the "user idle level" heuristic to the timer layer, and
3945 * potentially other layers in the future.
3949 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3951 int new_value
= 0, old_value
= 0, changed
= 0, error
;
3953 old_value
= timer_get_user_idle_level();
3955 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
3957 if (error
== 0 && changed
) {
3958 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
) {
3966 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
3967 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3969 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
3972 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
3973 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3974 &hv_support_available
, 0, "");
3979 sysctl_darkboot SYSCTL_HANDLER_ARGS
3981 int err
= 0, value
= 0;
3982 #pragma unused(oidp, arg1, arg2, err, value, req)
3985 * Handle the sysctl request.
3987 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
3988 * we'll get the request identifier into "value" and then we can honor it.
3990 if ((err
= sysctl_io_number(req
, darkboot
, sizeof(int), &value
, NULL
)) != 0) {
3994 /* writing requested, let's process the request */
3996 /* writing is protected by an entitlement */
3997 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT
, 0) != 0) {
4003 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET
:
4005 * If the darkboot sysctl is unset, the NVRAM variable
4006 * must be unset too. If that's not the case, it means
4007 * someone is doing something crazy and not supported.
4009 if (darkboot
!= 0) {
4010 int ret
= PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
);
4018 case MEMORY_MAINTENANCE_DARK_BOOT_SET
:
4021 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT
: {
4023 * Set the NVRAM and update 'darkboot' in case
4024 * of success. Otherwise, do not update
4025 * 'darkboot' and report the failure.
4027 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
, TRUE
)) {
4044 SYSCTL_PROC(_kern
, OID_AUTO
, darkboot
,
4045 CTLFLAG_KERN
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
,
4046 0, 0, sysctl_darkboot
, "I", "");
4049 #if DEVELOPMENT || DEBUG
4050 #include <sys/sysent.h>
4051 /* This should result in a fatal exception, verifying that "sysent" is
4055 kern_sysent_write(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4057 uint64_t new_value
= 0, old_value
= 0;
4058 int changed
= 0, error
;
4060 error
= sysctl_io_number(req
, old_value
, sizeof(uint64_t), &new_value
, &changed
);
4061 if ((error
== 0) && changed
) {
4062 volatile uint32_t *wraddr
= (uint32_t *) &sysent
[0];
4064 printf("sysent[0] write succeeded\n");
4069 SYSCTL_PROC(_kern
, OID_AUTO
, sysent_const_check
,
4070 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4072 kern_sysent_write
, "I", "Attempt sysent[0] write");
4076 #if DEVELOPMENT || DEBUG
4077 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 1, "");
4079 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 0, "");
4083 #if DEVELOPMENT || DEBUG
4086 sysctl_panic_test SYSCTL_HANDLER_ARGS
4088 #pragma unused(arg1, arg2)
4090 char str
[32] = "entry prelog postlog postcore";
4092 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4094 if (rval
== 0 && req
->newptr
) {
4095 if (strncmp("entry", str
, strlen("entry")) == 0) {
4096 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_ENTRY
, "test recursive panic at entry");
4097 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
4098 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_PRELOG
, "test recursive panic prior to writing a paniclog");
4099 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
4100 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTLOG
, "test recursive panic subsequent to paniclog");
4101 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
4102 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTCORE
, "test recursive panic subsequent to on-device core");
4110 sysctl_debugger_test SYSCTL_HANDLER_ARGS
4112 #pragma unused(arg1, arg2)
4114 char str
[32] = "entry prelog postlog postcore";
4116 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4118 if (rval
== 0 && req
->newptr
) {
4119 if (strncmp("entry", str
, strlen("entry")) == 0) {
4120 DebuggerWithContext(0, NULL
, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY
);
4121 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
4122 DebuggerWithContext(0, NULL
, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG
);
4123 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
4124 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG
);
4125 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
4126 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE
);
4133 decl_lck_spin_data(, spinlock_panic_test_lock
)
4135 __attribute__((noreturn
))
4137 spinlock_panic_test_acquire_spinlock(void * arg __unused
, wait_result_t wres __unused
)
4139 lck_spin_lock(&spinlock_panic_test_lock
);
4146 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
4148 #pragma unused(oidp, arg1, arg2)
4149 if (req
->newlen
== 0) {
4153 thread_t panic_spinlock_thread
;
4154 /* Initialize panic spinlock */
4155 lck_grp_t
* panic_spinlock_grp
;
4156 lck_grp_attr_t
* panic_spinlock_grp_attr
;
4157 lck_attr_t
* panic_spinlock_attr
;
4159 panic_spinlock_grp_attr
= lck_grp_attr_alloc_init();
4160 panic_spinlock_grp
= lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr
);
4161 panic_spinlock_attr
= lck_attr_alloc_init();
4163 lck_spin_init(&spinlock_panic_test_lock
, panic_spinlock_grp
, panic_spinlock_attr
);
4166 /* Create thread to acquire spinlock */
4167 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock
, NULL
, &panic_spinlock_thread
) != KERN_SUCCESS
) {
4171 /* Try to acquire spinlock -- should panic eventually */
4172 lck_spin_lock(&spinlock_panic_test_lock
);
4178 __attribute__((noreturn
))
4180 simultaneous_panic_worker
4181 (void * arg
, wait_result_t wres __unused
)
4183 atomic_int
*start_panic
= (atomic_int
*)arg
;
4185 while (!atomic_load(start_panic
)) {
4188 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
4189 __builtin_unreachable();
4193 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
4195 #pragma unused(oidp, arg1, arg2)
4196 if (req
->newlen
== 0) {
4200 int i
= 0, threads_to_create
= 2 * processor_count
;
4201 atomic_int start_panic
= 0;
4202 unsigned int threads_created
= 0;
4203 thread_t new_panic_thread
;
4205 for (i
= threads_to_create
; i
> 0; i
--) {
4206 if (kernel_thread_start(simultaneous_panic_worker
, (void *) &start_panic
, &new_panic_thread
) == KERN_SUCCESS
) {
4211 /* FAIL if we couldn't create at least processor_count threads */
4212 if (threads_created
< processor_count
) {
4213 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
4214 threads_created
, threads_to_create
);
4217 atomic_exchange(&start_panic
, 1);
4223 SYSCTL_PROC(_debug
, OID_AUTO
, panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_panic_test
, "A", "panic test");
4224 SYSCTL_PROC(_debug
, OID_AUTO
, debugger_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_debugger_test
, "A", "debugger test");
4225 SYSCTL_PROC(_debug
, OID_AUTO
, spinlock_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_spinlock_panic_test
, "A", "spinlock panic test");
4226 SYSCTL_PROC(_debug
, OID_AUTO
, simultaneous_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_simultaneous_panic_test
, "A", "simultaneous panic test");
4228 extern int exc_resource_threads_enabled
;
4230 SYSCTL_INT(_kern
, OID_AUTO
, exc_resource_threads_enabled
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &exc_resource_threads_enabled
, 0, "exc_resource thread limit enabled");
4233 #endif /* DEVELOPMENT || DEBUG */
4235 const uint32_t thread_groups_supported
= 0;
4238 sysctl_thread_groups_supported(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4240 int value
= thread_groups_supported
;
4241 return sysctl_io_number(req
, value
, sizeof(value
), NULL
, NULL
);
4244 SYSCTL_PROC(_kern
, OID_AUTO
, thread_groups_supported
, CTLFLAG_RD
| CTLFLAG_LOCKED
| CTLFLAG_KERN
,
4245 0, 0, &sysctl_thread_groups_supported
, "I", "thread groups supported");
4248 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
4250 #pragma unused(arg1, arg2, oidp)
4252 int type_tuple
[2] = {};
4253 int return_value
= 0;
4255 error
= SYSCTL_IN(req
, &type_tuple
, sizeof(type_tuple
));
4261 return_value
= grade_binary(type_tuple
[0], type_tuple
[1]);
4263 error
= SYSCTL_OUT(req
, &return_value
, sizeof(return_value
));
4272 SYSCTL_PROC(_kern
, OID_AUTO
, grade_cputype
,
4273 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
| CTLTYPE_OPAQUE
,
4274 0, 0, &sysctl_grade_cputype
, "S",
4275 "grade value of cpu_type_t+cpu_sub_type_t");
4278 #if DEVELOPMENT || DEBUG
4280 static atomic_int wedge_thread_should_wake
= 0;
4283 unwedge_thread SYSCTL_HANDLER_ARGS
4285 #pragma unused(arg1, arg2)
4287 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4288 if (error
|| val
== 0) {
4292 atomic_store(&wedge_thread_should_wake
, 1);
4296 SYSCTL_PROC(_kern
, OID_AUTO
, unwedge_thread
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, unwedge_thread
, "I", "unwedge the thread wedged by kern.wedge_thread");
4298 extern uintptr_t phys_carveout_pa
;
4299 SYSCTL_LONG(_kern
, OID_AUTO
, phys_carveout_pa
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
4301 "base physical address of the phys_carveout_mb boot-arg region");
4302 extern size_t phys_carveout_size
;
4303 SYSCTL_LONG(_kern
, OID_AUTO
, phys_carveout_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
4304 &phys_carveout_size
,
4305 "size in bytes of the phys_carveout_mb boot-arg region");
4308 wedge_thread SYSCTL_HANDLER_ARGS
4310 #pragma unused(arg1, arg2)
4313 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4314 if (error
|| val
== 0) {
4318 uint64_t interval
= 1;
4319 nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval
);
4321 atomic_store(&wedge_thread_should_wake
, 0);
4322 while (!atomic_load(&wedge_thread_should_wake
)) {
4323 tsleep1(NULL
, 0, "wedge_thread", mach_absolute_time() + interval
, NULL
);
4329 SYSCTL_PROC(_kern
, OID_AUTO
, wedge_thread
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, wedge_thread
, "I", "wedge this thread so it cannot be cleaned up");
4332 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
;
4334 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
;
4336 tstile_test_prim_lock(boolean_t use_hashtable
);
4338 tstile_test_prim_unlock(boolean_t use_hashtable
);
4340 #define SYSCTL_TURNSTILE_TEST_DEFAULT 1
4341 #define SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE 2
4344 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
4346 #pragma unused(arg1, arg2)
4348 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4349 if (error
|| val
== 0) {
4352 boolean_t use_hashtable
= (val
== SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE
) ? true : false;
4353 return tstile_test_prim_lock(use_hashtable
);
4357 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
4359 #pragma unused(arg1, arg2)
4361 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4362 if (error
|| val
== 0) {
4365 boolean_t use_hashtable
= (val
== SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE
) ? true : false;
4366 return tstile_test_prim_unlock(use_hashtable
);
4369 SYSCTL_PROC(_kern
, OID_AUTO
, turnstiles_test_lock
, CTLFLAG_WR
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4370 0, 0, sysctl_turnstile_test_prim_lock
, "I", "turnstiles test lock");
4372 SYSCTL_PROC(_kern
, OID_AUTO
, turnstiles_test_unlock
, CTLFLAG_WR
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4373 0, 0, sysctl_turnstile_test_prim_unlock
, "I", "turnstiles test unlock");
4376 turnstile_get_boost_stats_sysctl(void *req
);
4378 turnstile_get_unboost_stats_sysctl(void *req
);
4380 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
;
4382 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
;
4383 extern uint64_t thread_block_on_turnstile_count
;
4384 extern uint64_t thread_block_on_regular_waitq_count
;
4387 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
4389 #pragma unused(arg1, arg2, oidp)
4390 return turnstile_get_boost_stats_sysctl(req
);
4394 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
4396 #pragma unused(arg1, arg2, oidp)
4397 return turnstile_get_unboost_stats_sysctl(req
);
4400 SYSCTL_PROC(_kern
, OID_AUTO
, turnstile_boost_stats
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLTYPE_STRUCT
,
4401 0, 0, sysctl_turnstile_boost_stats
, "S", "turnstiles boost stats");
4402 SYSCTL_PROC(_kern
, OID_AUTO
, turnstile_unboost_stats
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLTYPE_STRUCT
,
4403 0, 0, sysctl_turnstile_unboost_stats
, "S", "turnstiles unboost stats");
4404 SYSCTL_QUAD(_kern
, OID_AUTO
, thread_block_count_on_turnstile
,
4405 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4406 &thread_block_on_turnstile_count
, "thread blocked on turnstile count");
4407 SYSCTL_QUAD(_kern
, OID_AUTO
, thread_block_count_on_reg_waitq
,
4408 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4409 &thread_block_on_regular_waitq_count
, "thread blocked on regular waitq count");
4412 sysctl_lck_mtx_test_lock SYSCTL_HANDLER_ARGS
4414 #pragma unused(arg1, arg2)
4416 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4417 if (error
|| val
== 0) {
4422 lck_mtx_test_init();
4423 lck_mtx_test_lock();
4430 sysctl_lck_mtx_test_unlock SYSCTL_HANDLER_ARGS
4432 #pragma unused(arg1, arg2)
4434 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4435 if (error
|| val
== 0) {
4440 lck_mtx_test_init();
4441 lck_mtx_test_unlock();
4448 sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
4450 #pragma unused(arg1, arg2)
4452 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4453 if (error
|| val
== 0) {
4458 lck_mtx_test_init();
4459 erase_all_test_mtx_stats();
4466 sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
4468 #pragma unused(oidp, arg1, arg2)
4470 int size
, buffer_size
, error
;
4473 buffer
= kalloc(buffer_size
);
4475 panic("Impossible to allocate memory for %s\n", __func__
);
4478 lck_mtx_test_init();
4480 size
= get_test_mtx_stats_string(buffer
, buffer_size
);
4482 error
= sysctl_io_string(req
, buffer
, size
, 0, NULL
);
4484 kfree(buffer
, buffer_size
);
4490 sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
4492 #pragma unused(oidp, arg1, arg2)
4494 int buffer_size
, offset
, error
, iter
;
4505 if (req
->newlen
>= sizeof(input_val
)) {
4509 error
= SYSCTL_IN(req
, input_val
, req
->newlen
);
4513 input_val
[req
->newlen
] = '\0';
4515 sscanf(input_val
, "%d", &iter
);
4518 printf("%s requested %d iterations, not starting the test\n", __func__
, iter
);
4522 lck_mtx_test_init();
4526 buffer
= kalloc(buffer_size
);
4528 panic("Impossible to allocate memory for %s\n", __func__
);
4530 memset(buffer
, 0, buffer_size
);
4532 printf("%s starting uncontended mutex test with %d iterations\n", __func__
, iter
);
4534 offset
= snprintf(buffer
, buffer_size
, "STATS INNER LOOP");
4535 offset
+= lck_mtx_test_mtx_uncontended(iter
, &buffer
[offset
], buffer_size
- offset
);
4537 offset
+= snprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4538 offset
+= lck_mtx_test_mtx_uncontended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
);
4540 error
= SYSCTL_OUT(req
, buffer
, offset
);
4542 kfree(buffer
, buffer_size
);
4547 sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS
4549 #pragma unused(oidp, arg1, arg2)
4551 int buffer_size
, offset
, error
, iter
;
4554 printf("%s called\n", __func__
);
4564 if (req
->newlen
>= sizeof(input_val
)) {
4568 error
= SYSCTL_IN(req
, input_val
, req
->newlen
);
4572 input_val
[req
->newlen
] = '\0';
4574 sscanf(input_val
, "%d", &iter
);
4577 printf("%s requested %d iterations, not starting the test\n", __func__
, iter
);
4581 lck_mtx_test_init();
4583 erase_all_test_mtx_stats();
4587 buffer
= kalloc(buffer_size
);
4589 panic("Impossible to allocate memory for %s\n", __func__
);
4591 memset(buffer
, 0, buffer_size
);
4593 printf("%s starting contended mutex test with %d iterations\n", __func__
, iter
);
4595 offset
= snprintf(buffer
, buffer_size
, "STATS INNER LOOP");
4596 offset
+= lck_mtx_test_mtx_contended(iter
, &buffer
[offset
], buffer_size
- offset
);
4598 printf("%s starting contended mutex loop test with %d iterations\n", __func__
, iter
);
4600 offset
+= snprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4601 offset
+= lck_mtx_test_mtx_contended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
);
4603 error
= SYSCTL_OUT(req
, buffer
, offset
);
4605 kfree(buffer
, buffer_size
);
4610 SYSCTL_PROC(_kern
, OID_AUTO
, lck_mtx_test_lock
, CTLFLAG_WR
| CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4611 0, 0, sysctl_lck_mtx_test_lock
, "I", "lck mtx test lock");
4613 SYSCTL_PROC(_kern
, OID_AUTO
, lck_mtx_test_unlock
, CTLFLAG_WR
| CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4614 0, 0, sysctl_lck_mtx_test_unlock
, "I", "lck mtx test unlock");
4616 SYSCTL_PROC(_kern
, OID_AUTO
, erase_all_test_mtx_stats
, CTLFLAG_WR
| CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4617 0, 0, sysctl_erase_all_test_mtx_stats
, "I", "erase test_mtx statistics");
4619 SYSCTL_PROC(_kern
, OID_AUTO
, get_test_mtx_stats
, CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4620 0, 0, sysctl_get_test_mtx_stats
, "A", "get test_mtx statistics");
4622 SYSCTL_PROC(_kern
, OID_AUTO
, test_mtx_contended
, CTLTYPE_STRING
| CTLFLAG_MASKED
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4623 0, 0, sysctl_test_mtx_contended
, "A", "get statistics for contended mtx test");
4625 SYSCTL_PROC(_kern
, OID_AUTO
, test_mtx_uncontended
, CTLTYPE_STRING
| CTLFLAG_MASKED
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4626 0, 0, sysctl_test_mtx_uncontended
, "A", "get statistics for uncontended mtx test");
4628 extern uint64_t MutexSpin
;
4630 SYSCTL_QUAD(_kern
, OID_AUTO
, mutex_spin_us
, CTLFLAG_RW
, &MutexSpin
,
4631 "Spin time for acquiring a kernel mutex");
4633 #if defined (__x86_64__)
4635 semaphore_t sysctl_test_panic_with_thread_sem
;
4637 #pragma clang diagnostic push
4638 #pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */
4639 __attribute__((noreturn
))
4641 panic_thread_test_child_spin(void * arg
, wait_result_t wres
)
4643 static int panic_thread_recurse_count
= 5;
4645 if (panic_thread_recurse_count
> 0) {
4646 panic_thread_recurse_count
--;
4647 panic_thread_test_child_spin(arg
, wres
);
4650 semaphore_signal(sysctl_test_panic_with_thread_sem
);
4655 #pragma clang diagnostic pop
4658 panic_thread_test_child_park(void * arg __unused
, wait_result_t wres __unused
)
4662 assert_wait(&event
, THREAD_UNINT
);
4663 semaphore_signal(sysctl_test_panic_with_thread_sem
);
4664 thread_block(panic_thread_test_child_park
);
4668 sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS
4670 #pragma unused(arg1, arg2)
4672 char str
[16] = { '\0' };
4673 thread_t child_thread
= THREAD_NULL
;
4675 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4676 if (rval
!= 0 || !req
->newptr
) {
4680 semaphore_create(kernel_task
, &sysctl_test_panic_with_thread_sem
, SYNC_POLICY_FIFO
, 0);
4682 /* Create thread to spin or park in continuation */
4683 if (strncmp("spin", str
, strlen("spin")) == 0) {
4684 if (kernel_thread_start(panic_thread_test_child_spin
, NULL
, &child_thread
) != KERN_SUCCESS
) {
4685 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4688 } else if (strncmp("continuation", str
, strlen("continuation")) == 0) {
4689 if (kernel_thread_start(panic_thread_test_child_park
, NULL
, &child_thread
) != KERN_SUCCESS
) {
4690 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4694 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4698 semaphore_wait(sysctl_test_panic_with_thread_sem
);
4700 panic_with_thread_context(0, NULL
, 0, child_thread
, "testing panic_with_thread_context for thread %p", child_thread
);
4706 SYSCTL_PROC(_kern
, OID_AUTO
, test_panic_with_thread
, CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_WR
| CTLTYPE_STRING
,
4707 0, 0, sysctl_test_panic_with_thread
, "A", "test panic flow for backtracing a different thread");
4708 #endif /* defined (__x86_64__) */
4709 #endif /* DEVELOPMENT || DEBUG */