2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
112 #include <machine/smp.h>
113 #include <machine/atomic.h>
114 #include <mach/machine.h>
115 #include <mach/mach_host.h>
116 #include <mach/mach_types.h>
117 #include <mach/processor_info.h>
118 #include <mach/vm_param.h>
119 #include <kern/debug.h>
120 #include <kern/mach_param.h>
121 #include <kern/task.h>
122 #include <kern/thread.h>
123 #include <kern/thread_group.h>
124 #include <kern/processor.h>
125 #include <kern/cpu_number.h>
126 #include <kern/cpu_quiesce.h>
127 #include <kern/sched_prim.h>
128 #include <vm/vm_kern.h>
129 #include <vm/vm_map.h>
130 #include <mach/host_info.h>
132 #include <sys/mount_internal.h>
133 #include <sys/kdebug.h>
134 #include <sys/kern_sysctl.h>
136 #include <IOKit/IOPlatformExpert.h>
137 #include <pexpert/pexpert.h>
139 #include <machine/machine_routines.h>
140 #include <machine/exec.h>
142 #include <vm/vm_protos.h>
143 #include <vm/vm_pageout.h>
144 #include <vm/vm_compressor_algorithms.h>
145 #include <sys/imgsrc.h>
146 #include <kern/timer_call.h>
148 #if defined(__i386__) || defined(__x86_64__)
149 #include <i386/cpuid.h>
153 #include <sys/kern_memorystatus.h>
157 #include <kperf/kperf.h>
161 #include <kern/hv_support.h>
165 * deliberately setting max requests to really high number
166 * so that runaway settings do not cause MALLOC overflows
168 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
170 extern int aio_max_requests
;
171 extern int aio_max_requests_per_process
;
172 extern int aio_worker_threads
;
173 extern int lowpri_IO_window_msecs
;
174 extern int lowpri_IO_delay_msecs
;
175 #if DEVELOPMENT || DEBUG
176 extern int nx_enabled
;
178 extern int speculative_reads_disabled
;
179 extern unsigned int speculative_prefetch_max
;
180 extern unsigned int speculative_prefetch_max_iosize
;
181 extern unsigned int preheat_max_bytes
;
182 extern unsigned int preheat_min_bytes
;
183 extern long numvnodes
;
184 extern long num_recycledvnodes
;
186 extern uuid_string_t bootsessionuuid_string
;
188 extern unsigned int vm_max_delayed_work_limit
;
189 extern unsigned int vm_max_batch
;
191 extern unsigned int vm_page_free_min
;
192 extern unsigned int vm_page_free_target
;
193 extern unsigned int vm_page_free_reserved
;
195 #if (DEVELOPMENT || DEBUG)
196 extern uint32_t vm_page_creation_throttled_hard
;
197 extern uint32_t vm_page_creation_throttled_soft
;
198 #endif /* DEVELOPMENT || DEBUG */
200 #if CONFIG_LOCKERBOOT
201 extern const char kernel_protoboot_mount
[];
205 * Conditionally allow dtrace to see these functions for debugging purposes.
213 #define STATIC static
216 extern boolean_t mach_timer_coalescing_enabled
;
218 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
221 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
223 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
225 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
227 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
229 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
231 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
234 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
240 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
241 size_t *sizep
, proc_t cur_proc
);
243 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
244 proc_t cur_proc
, int argc_yes
);
246 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
247 size_t newlen
, void *sp
, int len
);
249 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
250 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
251 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
252 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
253 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
254 int sysdoproc_callback(proc_t p
, void *arg
);
257 /* forward declarations for non-static STATIC */
258 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
259 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
260 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
261 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
262 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
265 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
266 #endif /* COUNT_SYSCALLS */
268 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
269 #endif /* !CONFIG_EMBEDDED */
270 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
271 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
272 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 #ifdef CONFIG_IMGSRC_ACCESS
289 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
299 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
300 #if DEVELOPMENT || DEBUG
301 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
303 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
304 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
305 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
306 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
307 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
308 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
309 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
310 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
311 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
312 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
314 #ifdef CONFIG_XNUPOST
315 #include <tests/xnupost.h>
317 STATIC
int sysctl_debug_test_oslog_ctl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
318 STATIC
int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
319 STATIC
int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
322 extern void IORegistrySetOSBuildVersion(char * build_version
);
325 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
327 la64
->ldavg
[0] = la
->ldavg
[0];
328 la64
->ldavg
[1] = la
->ldavg
[1];
329 la64
->ldavg
[2] = la
->ldavg
[2];
330 la64
->fscale
= (user64_long_t
)la
->fscale
;
334 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
336 la32
->ldavg
[0] = la
->ldavg
[0];
337 la32
->ldavg
[1] = la
->ldavg
[1];
338 la32
->ldavg
[2] = la
->ldavg
[2];
339 la32
->fscale
= (user32_long_t
)la
->fscale
;
344 * Attributes stored in the kernel.
346 extern char corefilename
[MAXPATHLEN
+ 1];
347 extern int do_coredump
;
348 extern int sugid_coredump
;
352 extern int do_count_syscalls
;
356 int securelevel
= -1;
362 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
363 __unused
int arg2
, struct sysctl_req
*req
)
366 struct uthread
*ut
= get_bsdthread_info(current_thread());
367 user_addr_t oldp
= 0, newp
= 0;
368 size_t *oldlenp
= NULL
;
372 oldlenp
= &(req
->oldlen
);
374 newlen
= req
->newlen
;
376 /* We want the current length, and maybe the string itself */
378 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
379 size_t currlen
= MAXTHREADNAMESIZE
- 1;
382 /* use length of current thread name */
383 currlen
= strlen(ut
->pth_name
);
386 if (*oldlenp
< currlen
) {
389 /* NOTE - we do not copy the NULL terminator */
391 error
= copyout(ut
->pth_name
, oldp
, currlen
);
397 /* return length of thread name minus NULL terminator (just like strlen) */
398 req
->oldidx
= currlen
;
401 /* We want to set the name to something */
403 if (newlen
> (MAXTHREADNAMESIZE
- 1)) {
407 char *tmp_pth_name
= (char *)kalloc(MAXTHREADNAMESIZE
);
411 bzero(tmp_pth_name
, MAXTHREADNAMESIZE
);
412 if (!OSCompareAndSwapPtr(NULL
, tmp_pth_name
, &ut
->pth_name
)) {
413 kfree(tmp_pth_name
, MAXTHREADNAMESIZE
);
417 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV
, ut
->pth_name
);
418 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
420 error
= copyin(newp
, ut
->pth_name
, newlen
);
425 kernel_debug_string_simple(TRACE_STRING_THREADNAME
, ut
->pth_name
);
431 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
, "A", "");
435 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
437 host_basic_info_data_t hinfo
;
441 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
442 struct _processor_statistics_np
*buf
;
445 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
446 if (kret
!= KERN_SUCCESS
) {
450 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
452 if (req
->oldlen
< size
) {
456 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
458 kret
= get_sched_statistics(buf
, &size
);
459 if (kret
!= KERN_SUCCESS
) {
464 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
470 panic("Sched info changed?!");
477 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
480 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
485 if (req
->newlen
!= sizeof(active
)) {
489 res
= copyin(req
->newptr
, &active
, sizeof(active
));
494 return set_sched_stats_active(active
);
497 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
499 extern uint32_t sched_debug_flags
;
500 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
502 #if (DEBUG || DEVELOPMENT)
503 extern boolean_t doprnt_hide_pointers
;
504 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
507 extern int get_kernel_symfile(proc_t
, char **);
510 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
512 extern const unsigned int nsysent
;
513 extern int syscalls_log
[];
514 extern const char *syscallnames
[];
517 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
519 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
520 __unused
int *name
= arg1
; /* oid element argument vector */
521 __unused
int namelen
= arg2
; /* number of oid element arguments */
522 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
523 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
524 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
525 size_t newlen
= req
->newlen
; /* user buffer copy in size */
530 /* valid values passed in:
531 * = 0 means don't keep called counts for each bsd syscall
532 * > 0 means keep called counts for each bsd syscall
533 * = 2 means dump current counts to the system log
534 * = 3 means reset all counts
535 * for example, to dump current counts:
536 * sysctl -w kern.count_calls=2
538 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
544 do_count_syscalls
= 1;
545 } else if (tmp
== 0 || tmp
== 2 || tmp
== 3) {
547 for (i
= 0; i
< nsysent
; i
++) {
548 if (syscalls_log
[i
] != 0) {
550 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
557 do_count_syscalls
= 1;
561 /* adjust index so we return the right required/consumed amount */
563 req
->oldidx
+= req
->oldlen
;
568 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
569 0, /* Pointer argument (arg1) */
570 0, /* Integer argument (arg2) */
571 sysctl_docountsyscalls
, /* Handler function */
572 NULL
, /* Data pointer */
574 #endif /* COUNT_SYSCALLS */
577 * The following sysctl_* functions should not be used
578 * any more, as they can only cope with callers in
579 * user mode: Use new-style
587 * Validate parameters and get old / set new parameters
588 * for an integer-valued sysctl function.
591 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
592 user_addr_t newp
, size_t newlen
, int *valp
)
596 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
) {
599 if (oldp
&& *oldlenp
< sizeof(int)) {
602 if (newp
&& newlen
!= sizeof(int)) {
605 *oldlenp
= sizeof(int);
607 error
= copyout(valp
, oldp
, sizeof(int));
609 if (error
== 0 && newp
) {
610 error
= copyin(newp
, valp
, sizeof(int));
611 AUDIT_ARG(value32
, *valp
);
617 * Validate parameters and get old / set new parameters
618 * for an quad(64bit)-valued sysctl function.
621 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
622 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
626 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
) {
629 if (oldp
&& *oldlenp
< sizeof(quad_t
)) {
632 if (newp
&& newlen
!= sizeof(quad_t
)) {
635 *oldlenp
= sizeof(quad_t
);
637 error
= copyout(valp
, oldp
, sizeof(quad_t
));
639 if (error
== 0 && newp
) {
640 error
= copyin(newp
, valp
, sizeof(quad_t
));
646 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
648 if (p
->p_pid
!= (pid_t
)*(int*)arg
) {
656 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
658 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
) {
666 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
671 /* This is very racy but list lock is held.. Hmmm. */
672 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
673 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
674 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
675 tp
->t_dev
!= (dev_t
)*(int*)arg
) {
685 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
687 kauth_cred_t my_cred
;
690 if (p
->p_ucred
== NULL
) {
693 my_cred
= kauth_cred_proc_ref(p
);
694 uid
= kauth_cred_getuid(my_cred
);
695 kauth_cred_unref(&my_cred
);
697 if (uid
!= (uid_t
)*(int*)arg
) {
706 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
708 kauth_cred_t my_cred
;
711 if (p
->p_ucred
== NULL
) {
714 my_cred
= kauth_cred_proc_ref(p
);
715 ruid
= kauth_cred_getruid(my_cred
);
716 kauth_cred_unref(&my_cred
);
718 if (ruid
!= (uid_t
)*(int*)arg
) {
726 * try over estimating by 5 procs
728 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
729 struct sysdoproc_args
{
744 sysdoproc_callback(proc_t p
, void *arg
)
746 struct sysdoproc_args
*args
= arg
;
748 if (args
->buflen
>= args
->sizeof_kproc
) {
749 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0)) {
750 return PROC_RETURNED
;
752 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0)) {
753 return PROC_RETURNED
;
755 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0)) {
756 return PROC_RETURNED
;
759 bzero(args
->kprocp
, args
->sizeof_kproc
);
760 if (args
->is_64_bit
) {
761 fill_user64_proc(p
, args
->kprocp
);
763 fill_user32_proc(p
, args
->kprocp
);
765 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
767 *args
->errorp
= error
;
768 return PROC_RETURNED_DONE
;
770 args
->dp
+= args
->sizeof_kproc
;
771 args
->buflen
-= args
->sizeof_kproc
;
773 args
->needed
+= args
->sizeof_kproc
;
774 return PROC_RETURNED
;
777 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
779 sysctl_prochandle SYSCTL_HANDLER_ARGS
781 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
782 int *name
= arg1
; /* oid element argument vector */
783 int namelen
= arg2
; /* number of oid element arguments */
784 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
786 user_addr_t dp
= where
;
788 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
790 boolean_t is_64_bit
= proc_is64bit(current_proc());
791 struct user32_kinfo_proc user32_kproc
;
792 struct user64_kinfo_proc user_kproc
;
795 int (*filterfn
)(proc_t
, void *) = 0;
796 struct sysdoproc_args args
;
801 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
)) {
806 sizeof_kproc
= sizeof(user_kproc
);
807 kprocp
= &user_kproc
;
809 sizeof_kproc
= sizeof(user32_kproc
);
810 kprocp
= &user32_kproc
;
815 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
819 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
838 /* must be kern.proc.<unknown> */
843 args
.buflen
= buflen
;
844 args
.kprocp
= kprocp
;
845 args
.is_64_bit
= is_64_bit
;
847 args
.needed
= needed
;
848 args
.errorp
= &error
;
849 args
.uidcheck
= uidcheck
;
850 args
.ruidcheck
= ruidcheck
;
851 args
.ttycheck
= ttycheck
;
852 args
.sizeof_kproc
= sizeof_kproc
;
854 args
.uidval
= name
[0];
857 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
858 sysdoproc_callback
, &args
, filterfn
, name
);
865 needed
= args
.needed
;
867 if (where
!= USER_ADDR_NULL
) {
868 req
->oldlen
= dp
- where
;
869 if (needed
> req
->oldlen
) {
873 needed
+= KERN_PROCSLOP
;
874 req
->oldlen
= needed
;
876 /* adjust index so we return the right required/consumed amount */
877 req
->oldidx
+= req
->oldlen
;
882 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
883 * in the sysctl declaration itself, which comes into the handler function
884 * as 'oidp->oid_arg2'.
886 * For these particular sysctls, since they have well known OIDs, we could
887 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
888 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
889 * of a well known value with a common handler function. This is desirable,
890 * because we want well known values to "go away" at some future date.
892 * It should be noted that the value of '((int *)arg1)[1]' is used for many
893 * an integer parameter to the subcommand for many of these sysctls; we'd
894 * rather have used '((int *)arg1)[0]' for that, or even better, an element
895 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
896 * and then use leaf-node permissions enforcement, but that would have
897 * necessitated modifying user space code to correspond to the interface
898 * change, and we are striving for binary backward compatibility here; even
899 * though these are SPI, and not intended for use by user space applications
900 * which are not themselves system tools or libraries, some applications
901 * have erroneously used them.
903 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
904 0, /* Pointer argument (arg1) */
905 KERN_PROC_ALL
, /* Integer argument (arg2) */
906 sysctl_prochandle
, /* Handler function */
907 NULL
, /* Data is size variant on ILP32/LP64 */
909 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
910 0, /* Pointer argument (arg1) */
911 KERN_PROC_PID
, /* Integer argument (arg2) */
912 sysctl_prochandle
, /* Handler function */
913 NULL
, /* Data is size variant on ILP32/LP64 */
915 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
916 0, /* Pointer argument (arg1) */
917 KERN_PROC_TTY
, /* Integer argument (arg2) */
918 sysctl_prochandle
, /* Handler function */
919 NULL
, /* Data is size variant on ILP32/LP64 */
921 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
922 0, /* Pointer argument (arg1) */
923 KERN_PROC_PGRP
, /* Integer argument (arg2) */
924 sysctl_prochandle
, /* Handler function */
925 NULL
, /* Data is size variant on ILP32/LP64 */
927 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
928 0, /* Pointer argument (arg1) */
929 KERN_PROC_UID
, /* Integer argument (arg2) */
930 sysctl_prochandle
, /* Handler function */
931 NULL
, /* Data is size variant on ILP32/LP64 */
933 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
934 0, /* Pointer argument (arg1) */
935 KERN_PROC_RUID
, /* Integer argument (arg2) */
936 sysctl_prochandle
, /* Handler function */
937 NULL
, /* Data is size variant on ILP32/LP64 */
939 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
940 0, /* Pointer argument (arg1) */
941 KERN_PROC_LCID
, /* Integer argument (arg2) */
942 sysctl_prochandle
, /* Handler function */
943 NULL
, /* Data is size variant on ILP32/LP64 */
948 * Fill in non-zero fields of an eproc structure for the specified process.
951 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
955 struct session
*sessp
;
956 kauth_cred_t my_cred
;
959 sessp
= proc_session(p
);
961 if (pg
!= PGRP_NULL
) {
962 ep
->e_pgid
= p
->p_pgrpid
;
963 ep
->e_jobc
= pg
->pg_jobc
;
964 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
) {
965 ep
->e_flag
= EPROC_CTTY
;
968 ep
->e_ppid
= p
->p_ppid
;
970 my_cred
= kauth_cred_proc_ref(p
);
972 /* A fake historical pcred */
973 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
974 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
975 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
976 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
978 /* A fake historical *kauth_cred_t */
979 ep
->e_ucred
.cr_ref
= os_atomic_load(&my_cred
->cr_ref
, relaxed
);
980 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
981 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
982 bcopy(posix_cred_get(my_cred
)->cr_groups
,
983 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof(gid_t
));
985 kauth_cred_unref(&my_cred
);
988 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
989 (tp
= SESSION_TP(sessp
))) {
990 ep
->e_tdev
= tp
->t_dev
;
991 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
996 if (sessp
!= SESSION_NULL
) {
997 if (SESS_LEADER(p
, sessp
)) {
998 ep
->e_flag
|= EPROC_SLEADER
;
1000 session_rele(sessp
);
1002 if (pg
!= PGRP_NULL
) {
1008 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
1011 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
1015 struct session
*sessp
;
1016 kauth_cred_t my_cred
;
1019 sessp
= proc_session(p
);
1021 if (pg
!= PGRP_NULL
) {
1022 ep
->e_pgid
= p
->p_pgrpid
;
1023 ep
->e_jobc
= pg
->pg_jobc
;
1024 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
) {
1025 ep
->e_flag
= EPROC_CTTY
;
1028 ep
->e_ppid
= p
->p_ppid
;
1030 my_cred
= kauth_cred_proc_ref(p
);
1032 /* A fake historical pcred */
1033 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1034 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1035 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1036 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1038 /* A fake historical *kauth_cred_t */
1039 ep
->e_ucred
.cr_ref
= os_atomic_load(&my_cred
->cr_ref
, relaxed
);
1040 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1041 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1042 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1043 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof(gid_t
));
1045 kauth_cred_unref(&my_cred
);
1048 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1049 (tp
= SESSION_TP(sessp
))) {
1050 ep
->e_tdev
= tp
->t_dev
;
1051 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1056 if (sessp
!= SESSION_NULL
) {
1057 if (SESS_LEADER(p
, sessp
)) {
1058 ep
->e_flag
|= EPROC_SLEADER
;
1060 session_rele(sessp
);
1062 if (pg
!= PGRP_NULL
) {
1068 * Fill in an eproc structure for the specified process.
1069 * bzeroed by our caller, so only set non-zero fields.
1072 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1074 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1075 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1076 exp
->p_flag
= p
->p_flag
;
1077 if (p
->p_lflag
& P_LTRACED
) {
1078 exp
->p_flag
|= P_TRACED
;
1080 if (p
->p_lflag
& P_LPPWAIT
) {
1081 exp
->p_flag
|= P_PPWAIT
;
1083 if (p
->p_lflag
& P_LEXIT
) {
1084 exp
->p_flag
|= P_WEXIT
;
1086 exp
->p_stat
= p
->p_stat
;
1087 exp
->p_pid
= p
->p_pid
;
1088 exp
->p_oppid
= p
->p_oppid
;
1090 exp
->user_stack
= p
->user_stack
;
1091 exp
->p_debugger
= p
->p_debugger
;
1092 exp
->sigwait
= p
->sigwait
;
1094 #ifdef _PROC_HAS_SCHEDINFO_
1095 exp
->p_estcpu
= p
->p_estcpu
;
1096 exp
->p_pctcpu
= p
->p_pctcpu
;
1097 exp
->p_slptime
= p
->p_slptime
;
1099 exp
->p_realtimer
.it_interval
.tv_sec
=
1100 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1101 exp
->p_realtimer
.it_interval
.tv_usec
=
1102 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1104 exp
->p_realtimer
.it_value
.tv_sec
=
1105 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1106 exp
->p_realtimer
.it_value
.tv_usec
=
1107 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1109 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1110 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1112 exp
->p_sigignore
= p
->p_sigignore
;
1113 exp
->p_sigcatch
= p
->p_sigcatch
;
1114 exp
->p_priority
= p
->p_priority
;
1115 exp
->p_nice
= p
->p_nice
;
1116 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1117 exp
->p_xstat
= p
->p_xstat
;
1118 exp
->p_acflag
= p
->p_acflag
;
1122 * Fill in an LP64 version of extern_proc structure for the specified process.
1125 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1127 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1128 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1129 exp
->p_flag
= p
->p_flag
;
1130 if (p
->p_lflag
& P_LTRACED
) {
1131 exp
->p_flag
|= P_TRACED
;
1133 if (p
->p_lflag
& P_LPPWAIT
) {
1134 exp
->p_flag
|= P_PPWAIT
;
1136 if (p
->p_lflag
& P_LEXIT
) {
1137 exp
->p_flag
|= P_WEXIT
;
1139 exp
->p_stat
= p
->p_stat
;
1140 exp
->p_pid
= p
->p_pid
;
1141 exp
->p_oppid
= p
->p_oppid
;
1143 exp
->user_stack
= p
->user_stack
;
1144 exp
->p_debugger
= p
->p_debugger
;
1145 exp
->sigwait
= p
->sigwait
;
1147 #ifdef _PROC_HAS_SCHEDINFO_
1148 exp
->p_estcpu
= p
->p_estcpu
;
1149 exp
->p_pctcpu
= p
->p_pctcpu
;
1150 exp
->p_slptime
= p
->p_slptime
;
1152 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1153 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1155 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1156 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1158 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1159 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1161 exp
->p_sigignore
= p
->p_sigignore
;
1162 exp
->p_sigcatch
= p
->p_sigcatch
;
1163 exp
->p_priority
= p
->p_priority
;
1164 exp
->p_nice
= p
->p_nice
;
1165 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1166 exp
->p_xstat
= p
->p_xstat
;
1167 exp
->p_acflag
= p
->p_acflag
;
1171 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1173 /* on a 64 bit kernel, 32 bit users get some truncated information */
1174 fill_user32_externproc(p
, &kp
->kp_proc
);
1175 fill_user32_eproc(p
, &kp
->kp_eproc
);
1179 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1181 fill_user64_externproc(p
, &kp
->kp_proc
);
1182 fill_user64_eproc(p
, &kp
->kp_eproc
);
1186 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1188 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1189 int *name
= arg1
; /* oid element argument vector */
1190 int namelen
= arg2
; /* number of oid element arguments */
1191 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1192 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1193 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1194 // size_t newlen = req->newlen; /* user buffer copy in size */
1212 case KERN_KDWRITETR
:
1213 case KERN_KDWRITEMAP
:
1219 case KERN_KDREADCURTHRMAP
:
1220 case KERN_KDSET_TYPEFILTER
:
1221 case KERN_KDBUFWAIT
:
1223 case KERN_KDWRITEMAP_V3
:
1224 case KERN_KDWRITETR_V3
:
1225 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1232 /* adjust index so we return the right required/consumed amount */
1234 req
->oldidx
+= req
->oldlen
;
1239 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1240 0, /* Pointer argument (arg1) */
1241 0, /* Integer argument (arg2) */
1242 sysctl_kdebug_ops
, /* Handler function */
1243 NULL
, /* Data pointer */
1247 #if !CONFIG_EMBEDDED
1249 * Return the top *sizep bytes of the user stack, or the entire area of the
1250 * user stack down through the saved exec_path, whichever is smaller.
1253 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1255 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1256 int *name
= arg1
; /* oid element argument vector */
1257 int namelen
= arg2
; /* number of oid element arguments */
1258 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1259 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1260 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1261 // size_t newlen = req->newlen; /* user buffer copy in size */
1264 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1266 /* adjust index so we return the right required/consumed amount */
1268 req
->oldidx
+= req
->oldlen
;
1273 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1274 0, /* Pointer argument (arg1) */
1275 0, /* Integer argument (arg2) */
1276 sysctl_doprocargs
, /* Handler function */
1277 NULL
, /* Data pointer */
1279 #endif /* !CONFIG_EMBEDDED */
1282 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1284 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1285 int *name
= arg1
; /* oid element argument vector */
1286 int namelen
= arg2
; /* number of oid element arguments */
1287 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1288 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1289 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1290 // size_t newlen = req->newlen; /* user buffer copy in size */
1293 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1295 /* adjust index so we return the right required/consumed amount */
1297 req
->oldidx
+= req
->oldlen
;
1302 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1303 0, /* Pointer argument (arg1) */
1304 0, /* Integer argument (arg2) */
1305 sysctl_doprocargs2
, /* Handler function */
1306 NULL
, /* Data pointer */
1310 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1311 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1314 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1316 struct _vm_map
*proc_map
;
1319 user_addr_t arg_addr
;
1324 vm_size_t alloc_size
= 0;
1325 vm_offset_t copy_start
, copy_end
;
1328 kauth_cred_t my_cred
;
1337 buflen
-= sizeof(int); /* reserve first word to return argc */
1339 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1340 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1341 /* is not NULL then the caller wants us to return the length needed to */
1342 /* hold the data we would return */
1343 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1349 * Lookup process by pid
1358 * Copy the top N bytes of the stack.
1359 * On all machines we have so far, the stack grows
1362 * If the user expects no more than N bytes of
1363 * argument list, use that as a guess for the
1367 if (!p
->user_stack
) {
1372 if (where
== USER_ADDR_NULL
) {
1373 /* caller only wants to know length of proc args data */
1374 if (sizep
== NULL
) {
1379 size
= p
->p_argslen
;
1382 size
+= sizeof(int);
1385 * old PROCARGS will return the executable's path and plus some
1386 * extra space for work alignment and data tags
1388 size
+= PATH_MAX
+ (6 * sizeof(int));
1390 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1395 my_cred
= kauth_cred_proc_ref(p
);
1396 uid
= kauth_cred_getuid(my_cred
);
1397 kauth_cred_unref(&my_cred
);
1399 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1400 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1405 if ((u_int
)arg_size
> p
->p_argslen
) {
1406 arg_size
= round_page(p
->p_argslen
);
1409 arg_addr
= p
->user_stack
- arg_size
;
1412 * Before we can block (any VM code), make another
1413 * reference to the map to keep it alive. We do
1414 * that by getting a reference on the task itself.
1422 /* save off argc before releasing the proc */
1425 argslen
= p
->p_argslen
;
1427 * Once we have a task reference we can convert that into a
1428 * map reference, which we will use in the calls below. The
1429 * task/process may change its map after we take this reference
1430 * (see execve), but the worst that will happen then is a return
1431 * of stale info (which is always a possibility).
1433 task_reference(task
);
1435 proc_map
= get_task_map_reference(task
);
1436 task_deallocate(task
);
1438 if (proc_map
== NULL
) {
1442 alloc_size
= round_page(arg_size
);
1443 ret
= kmem_alloc(kernel_map
, ©_start
, alloc_size
, VM_KERN_MEMORY_BSD
);
1444 if (ret
!= KERN_SUCCESS
) {
1445 vm_map_deallocate(proc_map
);
1448 bzero((void *)copy_start
, alloc_size
);
1450 copy_end
= round_page(copy_start
+ arg_size
);
1452 if (vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1453 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1454 vm_map_deallocate(proc_map
);
1455 kmem_free(kernel_map
, copy_start
,
1456 round_page(arg_size
));
1461 * Now that we've done the copyin from the process'
1462 * map, we can release the reference to it.
1464 vm_map_deallocate(proc_map
);
1466 if (vm_map_copy_overwrite(kernel_map
,
1467 (vm_map_address_t
)copy_start
,
1468 tmp
, FALSE
) != KERN_SUCCESS
) {
1469 kmem_free(kernel_map
, copy_start
,
1470 round_page(arg_size
));
1471 vm_map_copy_discard(tmp
);
1475 if (arg_size
> argslen
) {
1476 data
= (caddr_t
) (copy_end
- argslen
);
1479 data
= (caddr_t
) (copy_end
- arg_size
);
1484 * When these sysctls were introduced, the first string in the strings
1485 * section was just the bare path of the executable. However, for security
1486 * reasons we now prefix this string with executable_path= so it can be
1487 * parsed getenv style. To avoid binary compatability issues with exising
1488 * callers of this sysctl, we strip it off here if present.
1489 * (rdar://problem/13746466)
1491 #define EXECUTABLE_KEY "executable_path="
1492 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0) {
1493 data
+= strlen(EXECUTABLE_KEY
);
1494 size
-= strlen(EXECUTABLE_KEY
);
1498 /* Put processes argc as the first word in the copyout buffer */
1499 suword(where
, argc
);
1500 error
= copyout(data
, (where
+ sizeof(int)), size
);
1501 size
+= sizeof(int);
1503 error
= copyout(data
, where
, size
);
1506 * Make the old PROCARGS work to return the executable's path
1507 * But, only if there is enough space in the provided buffer
1509 * on entry: data [possibily] points to the beginning of the path
1511 * Note: we keep all pointers&sizes aligned to word boundries
1513 if ((!error
) && (buflen
> 0 && (u_int
)buflen
> argslen
)) {
1514 int binPath_sz
, alignedBinPath_sz
= 0;
1515 int extraSpaceNeeded
, addThis
;
1516 user_addr_t placeHere
;
1517 char * str
= (char *) data
;
1520 /* Some apps are really bad about messing up their stacks
1521 * So, we have to be extra careful about getting the length
1522 * of the executing binary. If we encounter an error, we bail.
1525 /* Limit ourselves to PATH_MAX paths */
1526 if (max_len
> PATH_MAX
) {
1532 while ((binPath_sz
< max_len
- 1) && (*str
++ != 0)) {
1536 /* If we have a NUL terminator, copy it, too */
1537 if (binPath_sz
< max_len
- 1) {
1541 /* Pre-Flight the space requiremnts */
1543 /* Account for the padding that fills out binPath to the next word */
1544 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int) - 1)) ? (sizeof(int) - (binPath_sz
& (sizeof(int) - 1))) : 0;
1546 placeHere
= where
+ size
;
1548 /* Account for the bytes needed to keep placeHere word aligned */
1549 addThis
= (placeHere
& (sizeof(int) - 1)) ? (sizeof(int) - (placeHere
& (sizeof(int) - 1))) : 0;
1551 /* Add up all the space that is needed */
1552 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1554 /* is there is room to tack on argv[0]? */
1555 if ((buflen
& ~(sizeof(int) - 1)) >= (argslen
+ extraSpaceNeeded
)) {
1556 placeHere
+= addThis
;
1557 suword(placeHere
, 0);
1558 placeHere
+= sizeof(int);
1559 suword(placeHere
, 0xBFFF0000);
1560 placeHere
+= sizeof(int);
1561 suword(placeHere
, 0);
1562 placeHere
+= sizeof(int);
1563 error
= copyout(data
, placeHere
, binPath_sz
);
1565 placeHere
+= binPath_sz
;
1566 suword(placeHere
, 0);
1567 size
+= extraSpaceNeeded
;
1573 if (copy_start
!= (vm_offset_t
) 0) {
1574 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1580 if (where
!= USER_ADDR_NULL
) {
1588 * Max number of concurrent aio requests
1592 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1594 int new_value
, changed
;
1595 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1597 /* make sure the system-wide limit is greater than the per process limit */
1598 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
) {
1599 aio_max_requests
= new_value
;
1609 * Max number of concurrent aio requests per process
1613 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1615 int new_value
, changed
;
1616 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1618 /* make sure per process limit is less than the system-wide limit */
1619 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
) {
1620 aio_max_requests_per_process
= new_value
;
1630 * Max number of async IO worker threads
1634 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1636 int new_value
, changed
;
1637 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1639 /* we only allow an increase in the number of worker threads */
1640 if (new_value
> aio_worker_threads
) {
1641 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1642 aio_worker_threads
= new_value
;
1652 * System-wide limit on the max number of processes
1656 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1658 int new_value
, changed
;
1659 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1661 AUDIT_ARG(value32
, new_value
);
1662 /* make sure the system-wide limit is less than the configured hard
1663 * limit set at kernel compilation */
1664 if (new_value
<= hard_maxproc
&& new_value
> 0) {
1665 maxproc
= new_value
;
1673 extern int sched_enable_smt
;
1675 sysctl_sched_enable_smt
1676 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1678 int new_value
, changed
;
1679 int error
= sysctl_io_number(req
, sched_enable_smt
, sizeof(int), &new_value
, &changed
);
1683 kern_return_t kret
= KERN_SUCCESS
;
1685 AUDIT_ARG(value32
, new_value
);
1686 if (new_value
== 0) {
1687 sched_enable_smt
= 0;
1688 kret
= enable_smt_processors(false);
1690 sched_enable_smt
= 1;
1691 kret
= enable_smt_processors(true);
1698 case KERN_INVALID_ARGUMENT
:
1712 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1713 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1715 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1716 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1718 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1719 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1720 (int *)NULL
, BSD
, "");
1721 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1722 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1724 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1725 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1726 &kernel_uuid_string
[0], 0, "");
1728 SYSCTL_STRING(_kern
, OID_AUTO
, osbuildconfig
,
1729 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1730 &osbuild_config
[0], 0, "");
1733 sysctl_protoboot(__unused
struct sysctl_oid
*oidp
,
1734 __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1737 #if CONFIG_LOCKERBOOT
1738 char protoboot_buff
[24];
1739 size_t protoboot_len
= sizeof(protoboot_buff
);
1741 if (vnode_tag(rootvnode
) == VT_LOCKERFS
) {
1742 strlcpy(protoboot_buff
, kernel_protoboot_mount
, protoboot_len
);
1743 error
= sysctl_io_string(req
, protoboot_buff
, protoboot_len
, 0, NULL
);
1756 SYSCTL_PROC(_kern
, OID_AUTO
, protoboot
,
1757 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1758 0, 0, sysctl_protoboot
, "A", "");
1767 int debug_kprint_syscall
= 0;
1768 char debug_kprint_syscall_process
[MAXCOMLEN
+ 1];
1770 /* Thread safe: bits and string value are not used to reclaim state */
1771 SYSCTL_INT(_debug
, OID_AUTO
, kprint_syscall
,
1772 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1773 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1774 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1775 "name of process for kprintf syscall tracing");
1778 debug_kprint_current_process(const char **namep
)
1780 struct proc
*p
= current_proc();
1786 if (debug_kprint_syscall_process
[0]) {
1787 /* user asked to scope tracing to a particular process name */
1788 if (0 == strncmp(debug_kprint_syscall_process
,
1789 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1790 /* no value in telling the user that we traced what they asked */
1801 /* trace all processes. Tell user what we traced */
1810 /* PR-5293665: need to use a callback function for kern.osversion to set
1811 * osversion in IORegistry */
1814 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1818 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1821 IORegistrySetOSBuildVersion((char *)arg1
);
1827 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1828 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1829 osversion
, 256 /* OSVERSIZE*/,
1830 sysctl_osversion
, "A", "");
1832 char osproductversion
[48] = { '\0' };
1835 sysctl_osproductversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1837 if (req
->newptr
!= 0) {
1839 * Can only ever be set by launchd, and only once at boot.
1841 if (req
->p
->p_pid
!= 1 || osproductversion
[0] != '\0') {
1846 return sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1849 SYSCTL_PROC(_kern
, OID_AUTO
, osproductversion
,
1850 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1851 osproductversion
, sizeof(osproductversion
),
1852 sysctl_osproductversion
, "A", "The ProductVersion from SystemVersion.plist");
1854 static uint64_t iossupportversion_string
[48];
1857 sysctl_iossupportversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1859 if (req
->newptr
!= 0) {
1861 * Can only ever be set by launchd, and only once at boot.
1863 if (req
->p
->p_pid
!= 1 || iossupportversion_string
[0] != '\0') {
1868 return sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1871 SYSCTL_PROC(_kern
, OID_AUTO
, iossupportversion
,
1872 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1873 iossupportversion_string
, sizeof(iossupportversion_string
),
1874 sysctl_iossupportversion
, "A", "The iOSSupportVersion from SystemVersion.plist");
1876 static uint64_t osvariant_status
= 0;
1879 sysctl_osvariant_status(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1881 if (req
->newptr
!= 0) {
1883 * Can only ever be set by launchd, and only once at boot.
1885 if (req
->p
->p_pid
!= 1 || osvariant_status
!= 0) {
1890 return sysctl_handle_quad(oidp
, arg1
, arg2
, req
);
1893 SYSCTL_PROC(_kern
, OID_AUTO
, osvariant_status
,
1894 CTLFLAG_RW
| CTLTYPE_QUAD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1895 &osvariant_status
, sizeof(osvariant_status
),
1896 sysctl_osvariant_status
, "Q", "Opaque flags used to cache OS variant information");
1898 extern void commpage_update_dyld_flags(uint64_t);
1899 static uint64_t dyld_system_flags
= 0;
1902 sysctl_dyld_system_flags(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1905 * Can only ever be set by launchd, possibly several times
1906 * as dyld may change its mind after a userspace reboot.
1908 if (req
->newptr
!= 0 && req
->p
->p_pid
!= 1) {
1912 int res
= sysctl_handle_quad(oidp
, arg1
, arg2
, req
);
1913 if (req
->newptr
&& res
== 0) {
1914 commpage_update_dyld_flags(osvariant_status
);
1919 SYSCTL_PROC(_kern
, OID_AUTO
, dyld_system_flags
,
1920 CTLFLAG_RW
| CTLTYPE_QUAD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1921 &dyld_system_flags
, sizeof(dyld_system_flags
),
1922 sysctl_dyld_system_flags
, "Q", "Opaque flags used to cache dyld system-wide configuration");
1924 #if defined(XNU_TARGET_OS_BRIDGE)
1925 char macosproductversion
[MACOS_VERS_LEN
] = { '\0' };
1927 SYSCTL_STRING(_kern
, OID_AUTO
, macosproductversion
,
1928 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1929 &macosproductversion
[0], MACOS_VERS_LEN
, "The currently running macOS ProductVersion (from SystemVersion.plist on macOS)");
1931 char macosversion
[MACOS_VERS_LEN
] = { '\0' };
1933 SYSCTL_STRING(_kern
, OID_AUTO
, macosversion
,
1934 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1935 &macosversion
[0], MACOS_VERS_LEN
, "The currently running macOS build version");
1939 sysctl_sysctl_bootargs
1940 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1943 char buf
[BOOT_LINE_LENGTH
];
1945 strlcpy(buf
, PE_boot_args(), BOOT_LINE_LENGTH
);
1946 error
= sysctl_io_string(req
, buf
, BOOT_LINE_LENGTH
, 0, NULL
);
1950 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1951 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1953 sysctl_sysctl_bootargs
, "A", "bootargs");
1956 sysctl_kernelcacheuuid(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1959 if (kernelcache_uuid_valid
) {
1960 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1965 SYSCTL_PROC(_kern
, OID_AUTO
, kernelcacheuuid
,
1966 CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1967 kernelcache_uuid_string
, sizeof(kernelcache_uuid_string
),
1968 sysctl_kernelcacheuuid
, "A", "");
1970 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1971 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1973 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1974 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1975 (int *)NULL
, ARG_MAX
, "");
1976 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1977 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1978 (int *)NULL
, _POSIX_VERSION
, "");
1979 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1980 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1981 (int *)NULL
, NGROUPS_MAX
, "");
1982 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1983 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1984 (int *)NULL
, 1, "");
1985 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1986 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1987 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1988 (int *)NULL
, 1, "");
1990 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1991 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1994 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1995 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1997 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1998 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2000 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
2001 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2003 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
2004 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2005 &thread_max
, 0, "");
2006 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
2007 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2008 &task_threadmax
, 0, "");
2009 SYSCTL_LONG(_kern
, OID_AUTO
, num_recycledvnodes
,
2010 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2011 &num_recycledvnodes
, "");
2014 sysctl_maxvnodes(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2016 int oldval
= desiredvnodes
;
2017 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2019 if (oldval
!= desiredvnodes
) {
2020 resize_namecache(desiredvnodes
);
2026 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
2027 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2028 &nc_disabled
, 0, "");
2030 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2031 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2032 0, 0, sysctl_maxvnodes
, "I", "");
2034 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2035 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2036 0, 0, sysctl_maxproc
, "I", "");
2038 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2039 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2040 0, 0, sysctl_aiomax
, "I", "");
2042 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2043 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2044 0, 0, sysctl_aioprocmax
, "I", "");
2046 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2047 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2048 0, 0, sysctl_aiothreads
, "I", "");
2050 SYSCTL_PROC(_kern
, OID_AUTO
, sched_enable_smt
,
2051 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
,
2052 0, 0, sysctl_sched_enable_smt
, "I", "");
2054 extern int sched_allow_NO_SMT_threads
;
2055 SYSCTL_INT(_kern
, OID_AUTO
, sched_allow_NO_SMT_threads
,
2056 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2057 &sched_allow_NO_SMT_threads
, 0, "");
2059 #if (DEVELOPMENT || DEBUG)
2060 extern int sched_smt_balance
;
2061 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
2062 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2063 &sched_smt_balance
, 0, "");
2064 extern int sched_allow_rt_smt
;
2065 SYSCTL_INT(_kern
, OID_AUTO
, sched_allow_rt_smt
,
2066 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2067 &sched_allow_rt_smt
, 0, "");
2068 extern int sched_avoid_cpu0
;
2069 SYSCTL_INT(_kern
, OID_AUTO
, sched_avoid_cpu0
,
2070 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2071 &sched_avoid_cpu0
, 0, "");
2072 #if __arm__ || __arm64__
2073 extern uint32_t perfcontrol_requested_recommended_cores
;
2074 SYSCTL_UINT(_kern
, OID_AUTO
, sched_recommended_cores
,
2075 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2076 &perfcontrol_requested_recommended_cores
, 0, "");
2078 /* Scheduler perfcontrol callouts sysctls */
2079 SYSCTL_DECL(_kern_perfcontrol_callout
);
2080 SYSCTL_NODE(_kern
, OID_AUTO
, perfcontrol_callout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
2081 "scheduler perfcontrol callouts");
2083 extern int perfcontrol_callout_stats_enabled
;
2084 SYSCTL_INT(_kern_perfcontrol_callout
, OID_AUTO
, stats_enabled
,
2085 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2086 &perfcontrol_callout_stats_enabled
, 0, "");
2088 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
2089 perfcontrol_callout_stat_t stat
);
2091 /* On-Core Callout */
2093 sysctl_perfcontrol_callout_stat
2094 (__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
2096 perfcontrol_callout_stat_t stat
= (perfcontrol_callout_stat_t
)arg1
;
2097 perfcontrol_callout_type_t type
= (perfcontrol_callout_type_t
)arg2
;
2098 return sysctl_io_number(req
, (int)perfcontrol_callout_stat_avg(type
, stat
),
2099 sizeof(int), NULL
, NULL
);
2102 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_instr
,
2103 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2104 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_ON_CORE
,
2105 sysctl_perfcontrol_callout_stat
, "I", "");
2106 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_cycles
,
2107 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2108 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_ON_CORE
,
2109 sysctl_perfcontrol_callout_stat
, "I", "");
2110 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_instr
,
2111 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2112 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_OFF_CORE
,
2113 sysctl_perfcontrol_callout_stat
, "I", "");
2114 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_cycles
,
2115 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2116 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_OFF_CORE
,
2117 sysctl_perfcontrol_callout_stat
, "I", "");
2118 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_instr
,
2119 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2120 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_CONTEXT
,
2121 sysctl_perfcontrol_callout_stat
, "I", "");
2122 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_cycles
,
2123 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2124 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_CONTEXT
,
2125 sysctl_perfcontrol_callout_stat
, "I", "");
2126 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_instr
,
2127 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2128 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
2129 sysctl_perfcontrol_callout_stat
, "I", "");
2130 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_cycles
,
2131 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2132 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
2133 sysctl_perfcontrol_callout_stat
, "I", "");
2135 #endif /* __arm__ || __arm64__ */
2138 extern int legacy_footprint_entitlement_mode
;
2139 SYSCTL_INT(_kern
, OID_AUTO
, legacy_footprint_entitlement_mode
,
2140 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2141 &legacy_footprint_entitlement_mode
, 0, "");
2142 #endif /* __arm64__ */
2144 #endif /* (DEVELOPMENT || DEBUG) */
2148 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2150 int new_value
, changed
;
2151 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2153 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2155 securelevel
= new_value
;
2164 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2165 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2166 0, 0, sysctl_securelvl
, "I", "");
2171 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2174 char tmpname
[MAXHOSTNAMELEN
] = {};
2176 lck_mtx_lock(&domainname_lock
);
2177 strlcpy(tmpname
, domainname
, sizeof(tmpname
));
2178 lck_mtx_unlock(&domainname_lock
);
2180 error
= sysctl_io_string(req
, tmpname
, sizeof(tmpname
), 0, &changed
);
2181 if (!error
&& changed
) {
2182 lck_mtx_lock(&hostname_lock
);
2183 strlcpy(domainname
, tmpname
, sizeof(domainname
));
2184 lck_mtx_unlock(&hostname_lock
);
2189 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2190 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2191 0, 0, sysctl_domainname
, "A", "");
2193 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
2194 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2199 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2202 char tmpname
[MAXHOSTNAMELEN
] = {};
2204 lck_mtx_lock(&hostname_lock
);
2205 strlcpy(tmpname
, hostname
, sizeof(tmpname
));
2206 lck_mtx_unlock(&hostname_lock
);
2208 error
= sysctl_io_string(req
, tmpname
, sizeof(tmpname
), 1, &changed
);
2209 if (!error
&& changed
) {
2210 lck_mtx_lock(&hostname_lock
);
2211 strlcpy(hostname
, tmpname
, sizeof(hostname
));
2212 lck_mtx_unlock(&hostname_lock
);
2217 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2218 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2219 0, 0, sysctl_hostname
, "A", "");
2223 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2225 /* Original code allowed writing, I'm copying this, although this all makes
2226 * no sense to me. Besides, this sysctl is never used. */
2227 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2 * MAXCOMLEN
+ 1), 1, NULL
);
2230 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2231 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2232 0, 0, sysctl_procname
, "A", "");
2234 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2235 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2236 &speculative_reads_disabled
, 0, "");
2238 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
2239 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2240 &preheat_max_bytes
, 0, "");
2242 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
2243 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2244 &preheat_min_bytes
, 0, "");
2246 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
2247 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2248 &speculative_prefetch_max
, 0, "");
2250 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
2251 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2252 &speculative_prefetch_max_iosize
, 0, "");
2254 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
2255 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2256 &vm_page_free_target
, 0, "");
2258 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
2259 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2260 &vm_page_free_min
, 0, "");
2262 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
2263 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2264 &vm_page_free_reserved
, 0, "");
2266 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
2267 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2268 &vm_pageout_state
.vm_page_speculative_percentage
, 0, "");
2270 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
2271 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2272 &vm_pageout_state
.vm_page_speculative_q_age_ms
, 0, "");
2274 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
2275 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2276 &vm_max_delayed_work_limit
, 0, "");
2278 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2279 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2280 &vm_max_batch
, 0, "");
2282 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
2283 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2284 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
), "");
2288 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2291 boottime_timeval(&tv
);
2292 struct proc
*p
= req
->p
;
2294 if (proc_is64bit(p
)) {
2295 struct user64_timeval t
= {};
2296 t
.tv_sec
= tv
.tv_sec
;
2297 t
.tv_usec
= tv
.tv_usec
;
2298 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2300 struct user32_timeval t
= {};
2301 t
.tv_sec
= tv
.tv_sec
;
2302 t
.tv_usec
= tv
.tv_usec
;
2303 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2307 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2308 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2309 0, 0, sysctl_boottime
, "S,timeval", "");
2313 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2316 int error
= get_kernel_symfile(req
->p
, &str
);
2320 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2324 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2325 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2326 0, 0, sysctl_symfile
, "A", "");
2331 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2333 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2336 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2337 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2338 0, 0, sysctl_netboot
, "I", "");
2341 #ifdef CONFIG_IMGSRC_ACCESS
2343 * Legacy--act as if only one layer of nesting is possible.
2347 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2349 vfs_context_t ctx
= vfs_context_current();
2353 if (!vfs_context_issuser(ctx
)) {
2357 if (imgsrc_rootvnodes
[0] == NULL
) {
2361 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2366 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2367 result
= vnode_getwithref(devvp
);
2372 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2376 vnode_put(imgsrc_rootvnodes
[0]);
2380 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2381 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2382 0, 0, sysctl_imgsrcdev
, "I", "");
2386 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2389 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
] = {}; /* 2 for now, no problem */
2393 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2397 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2399 * Go get the root vnode.
2401 rvp
= imgsrc_rootvnodes
[i
];
2402 if (rvp
== NULLVP
) {
2406 error
= vnode_get(rvp
);
2412 * For now, no getting at a non-local volume.
2414 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2415 if (devvp
== NULL
) {
2420 error
= vnode_getwithref(devvp
);
2429 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2430 info
[i
].ii_flags
= 0;
2431 info
[i
].ii_height
= i
;
2432 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2438 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2441 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2442 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2443 0, 0, sysctl_imgsrcinfo
, "I", "");
2445 #endif /* CONFIG_IMGSRC_ACCESS */
2448 SYSCTL_DECL(_kern_timer
);
2449 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2452 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2453 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2454 &mach_timer_coalescing_enabled
, 0, "");
2456 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2457 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2458 &timer_deadline_tracking_bin_1
, "");
2459 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2460 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2461 &timer_deadline_tracking_bin_2
, "");
2463 SYSCTL_DECL(_kern_timer_longterm
);
2464 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2467 /* Must match definition in osfmk/kern/timer_call.c */
2470 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2471 LATENCY
, LATENCY_MIN
, LATENCY_MAX
, SCAN_LIMIT
, SCAN_INTERVAL
, PAUSES
2473 extern uint64_t timer_sysctl_get(int);
2474 extern int timer_sysctl_set(int, uint64_t);
2478 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2480 int oid
= (int)arg1
;
2481 uint64_t value
= timer_sysctl_get(oid
);
2486 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2488 error
= timer_sysctl_set(oid
, new_value
);
2494 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2495 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2496 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2497 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_limit
,
2498 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2499 (void *) SCAN_LIMIT
, 0, sysctl_timer
, "Q", "");
2500 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_interval
,
2501 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2502 (void *) SCAN_INTERVAL
, 0, sysctl_timer
, "Q", "");
2504 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2505 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2506 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2507 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_pauses
,
2508 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2509 (void *) PAUSES
, 0, sysctl_timer
, "Q", "");
2512 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2513 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2514 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2515 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2516 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2517 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2518 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2519 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2520 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2521 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2522 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2523 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2524 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2525 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2526 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2527 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2528 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2529 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2530 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2531 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2532 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2533 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2534 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2535 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2540 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2542 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2545 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2546 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2547 0, 0, sysctl_usrstack
, "I", "");
2551 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2553 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2556 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2557 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2558 0, 0, sysctl_usrstack64
, "Q", "");
2562 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2563 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2564 corefilename
, sizeof(corefilename
), "");
2568 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2570 #ifdef SECURE_KERNEL
2574 int new_value
, changed
;
2575 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2577 if ((new_value
== 0) || (new_value
== 1)) {
2578 do_coredump
= new_value
;
2587 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2588 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2589 0, 0, sysctl_coredump
, "I", "");
2592 sysctl_suid_coredump
2593 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2595 #ifdef SECURE_KERNEL
2599 int new_value
, changed
;
2600 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2602 if ((new_value
== 0) || (new_value
== 1)) {
2603 sugid_coredump
= new_value
;
2612 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2613 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2614 0, 0, sysctl_suid_coredump
, "I", "");
2616 #endif /* CONFIG_COREDUMP */
2620 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2622 struct proc
*p
= req
->p
;
2623 int new_value
, changed
;
2624 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2628 req
->p
->p_lflag
|= P_LDELAYTERM
;
2630 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2637 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2638 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2639 0, 0, sysctl_delayterm
, "I", "");
2644 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2646 struct proc
*p
= req
->p
;
2648 int new_value
, old_value
, changed
;
2651 ut
= get_bsdthread_info(current_thread());
2653 if (ut
->uu_flag
& UT_RAGE_VNODES
) {
2654 old_value
= KERN_RAGE_THREAD
;
2655 } else if (p
->p_lflag
& P_LRAGE_VNODES
) {
2656 old_value
= KERN_RAGE_PROC
;
2661 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2663 if ((error
== 0) && (changed
!= 0)) {
2664 switch (new_value
) {
2665 case KERN_RAGE_PROC
:
2667 p
->p_lflag
|= P_LRAGE_VNODES
;
2670 case KERN_UNRAGE_PROC
:
2672 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2676 case KERN_RAGE_THREAD
:
2677 ut
->uu_flag
|= UT_RAGE_VNODES
;
2679 case KERN_UNRAGE_THREAD
:
2680 ut
= get_bsdthread_info(current_thread());
2681 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2688 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2689 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2690 0, 0, sysctl_rage_vnode
, "I", "");
2692 /* XXX until filecoordinationd fixes a bit of inverted logic. */
2695 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2697 int old_value
= 0, new_value
, changed
;
2699 return sysctl_io_number(req
, old_value
, sizeof(int), &new_value
,
2703 SYSCTL_PROC(_kern
, OID_AUTO
, vfsnspace
,
2704 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2705 0, 0, sysctl_vfsnspace
, "I", "");
2707 /* XXX move this interface into libproc and remove this sysctl */
2709 sysctl_setthread_cpupercent
2710 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2712 int new_value
, old_value
;
2714 kern_return_t kret
= KERN_SUCCESS
;
2715 uint8_t percent
= 0;
2724 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0) {
2728 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2729 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2730 if (percent
> 100) {
2735 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2737 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0) {
2744 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2745 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2746 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2750 sysctl_kern_check_openevt
2751 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2753 struct proc
*p
= req
->p
;
2754 int new_value
, old_value
, changed
;
2757 if (p
->p_flag
& P_CHECKOPENEVT
) {
2758 old_value
= KERN_OPENEVT_PROC
;
2763 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2765 if ((error
== 0) && (changed
!= 0)) {
2766 switch (new_value
) {
2767 case KERN_OPENEVT_PROC
:
2768 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2771 case KERN_UNOPENEVT_PROC
:
2772 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2782 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2783 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2786 #if DEVELOPMENT || DEBUG
2789 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2791 #ifdef SECURE_KERNEL
2795 int new_value
, changed
;
2798 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2804 #if defined(__x86_64__)
2806 * Only allow setting if NX is supported on the chip
2808 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
)) {
2812 nx_enabled
= new_value
;
2815 #endif /* SECURE_KERNEL */
2819 #if DEVELOPMENT || DEBUG
2820 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2821 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2822 0, 0, sysctl_nx
, "I", "");
2827 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2829 if (proc_is64bit(req
->p
)) {
2830 struct user64_loadavg loadinfo64
= {};
2831 fill_loadavg64(&averunnable
, &loadinfo64
);
2832 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2834 struct user32_loadavg loadinfo32
= {};
2835 fill_loadavg32(&averunnable
, &loadinfo32
);
2836 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2840 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2841 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2842 0, 0, sysctl_loadavg
, "S,loadavg", "");
2845 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2848 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2849 __unused
int arg2
, struct sysctl_req
*req
)
2851 int old_value
= 0, new_value
= 0, error
= 0;
2853 if (vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
)) {
2856 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2858 return vm_toggle_entry_reuse(new_value
, NULL
);
2863 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
, "I", "");
2865 #ifdef CONFIG_XNUPOST
2867 extern int xnupost_export_testdata(void *outp
, uint32_t size
, uint32_t *lenp
);
2868 extern uint32_t xnupost_get_estimated_testdata_size(void);
2870 extern int xnupost_reset_all_tests(void);
2873 sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS
2875 /* fixup unused arguments warnings */
2876 __unused
int _oa2
= arg2
;
2877 __unused
void * _oa1
= arg1
;
2878 __unused
struct sysctl_oid
* _oidp
= oidp
;
2881 user_addr_t oldp
= 0;
2882 user_addr_t newp
= 0;
2883 uint32_t usedbytes
= 0;
2892 if ((void *)oldp
== NULL
) {
2893 /* return estimated size for second call where info can be placed */
2894 req
->oldidx
= xnupost_get_estimated_testdata_size();
2896 error
= xnupost_export_testdata((void *)oldp
, req
->oldlen
, &usedbytes
);
2897 req
->oldidx
= usedbytes
;
2906 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2909 sysctl_handle_xnupost_get_tests
,
2911 "read xnupost test data in kernel");
2914 sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS
2916 /* fixup unused arguments warnings */
2917 __unused
int _oa2
= arg2
;
2918 __unused
void * _oa1
= arg1
;
2919 __unused
struct sysctl_oid
* _oidp
= oidp
;
2923 * INPUT: ACTION, PARAM1, PARAM2, PARAM3
2924 * OUTPUT: RESULTCODE, ADDITIONAL DATA
2926 int32_t outval
[ARRCOUNT
] = {0};
2927 int32_t input
[ARRCOUNT
] = {0};
2928 int32_t out_size
= sizeof(outval
);
2929 int32_t in_size
= sizeof(input
);
2932 /* if this is NULL call to find out size, send out size info */
2937 /* pull in provided value from userspace */
2938 error
= SYSCTL_IN(req
, &input
[0], in_size
);
2943 if (input
[0] == XTCTL_RESET_TESTDATA
) {
2944 outval
[0] = xnupost_reset_all_tests();
2949 error
= SYSCTL_OUT(req
, &outval
[0], out_size
);
2956 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2959 sysctl_debug_xnupost_ctl
,
2961 "xnupost control for kernel testing");
2963 extern void test_oslog_handleOSLogCtl(int32_t * in
, int32_t * out
, int32_t arraycount
);
2966 sysctl_debug_test_oslog_ctl(__unused
struct sysctl_oid
* oidp
, __unused
void * arg1
, __unused
int arg2
, struct sysctl_req
* req
)
2969 int32_t outval
[ARRCOUNT
] = {0};
2970 int32_t input
[ARRCOUNT
] = {0};
2971 int32_t size_outval
= sizeof(outval
);
2972 int32_t size_inval
= sizeof(input
);
2975 /* if this is NULL call to find out size, send out size info */
2977 error
= SYSCTL_OUT(req
, &outval
[0], size_outval
);
2981 /* pull in provided value from userspace */
2982 error
= SYSCTL_IN(req
, &input
[0], size_inval
);
2987 test_oslog_handleOSLogCtl(input
, outval
, ARRCOUNT
);
2989 error
= SYSCTL_OUT(req
, &outval
[0], size_outval
);
2997 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3000 sysctl_debug_test_oslog_ctl
,
3002 "testing oslog in kernel");
3004 #include <mach/task.h>
3005 #include <mach/semaphore.h>
3007 extern lck_grp_t
* sysctl_debug_test_stackshot_owner_grp
; /* used for both mutexes and rwlocks */
3008 extern lck_mtx_t
* sysctl_debug_test_stackshot_owner_init_mtx
; /* used to protect lck_*_init */
3010 /* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded
3011 * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a
3012 * stackshot is taken to see if the owner of the lock can be identified.
3014 * We can't return to userland with a kernel lock held, so be sure to unlock before we leave.
3015 * the semaphores allow us to artificially create cases where the lock is being held and the
3016 * thread is hanging / taking a long time to do something. */
3018 volatile char sysctl_debug_test_stackshot_mtx_inited
= 0;
3019 semaphore_t sysctl_debug_test_stackshot_mutex_sem
;
3020 lck_mtx_t sysctl_debug_test_stackshot_owner_lck
;
3022 #define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT 1
3023 #define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2
3024 #define SYSCTL_DEBUG_MTX_SIGNAL 3
3025 #define SYSCTL_DEBUG_MTX_TEARDOWN 4
3028 sysctl_debug_test_stackshot_mutex_owner(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3030 long long option
= -1;
3031 /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */
3032 long long mtx_unslid_addr
= (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck
);
3033 int error
= sysctl_io_number(req
, mtx_unslid_addr
, sizeof(long long), (void*)&option
, NULL
);
3035 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3036 if (!sysctl_debug_test_stackshot_mtx_inited
) {
3037 lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck
,
3038 sysctl_debug_test_stackshot_owner_grp
,
3040 semaphore_create(kernel_task
,
3041 &sysctl_debug_test_stackshot_mutex_sem
,
3042 SYNC_POLICY_FIFO
, 0);
3043 sysctl_debug_test_stackshot_mtx_inited
= 1;
3045 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3049 case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT
:
3050 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck
);
3051 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck
);
3053 case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT
:
3054 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck
);
3055 semaphore_wait(sysctl_debug_test_stackshot_mutex_sem
);
3056 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck
);
3058 case SYSCTL_DEBUG_MTX_SIGNAL
:
3059 semaphore_signal(sysctl_debug_test_stackshot_mutex_sem
);
3061 case SYSCTL_DEBUG_MTX_TEARDOWN
:
3062 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3064 lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck
,
3065 sysctl_debug_test_stackshot_owner_grp
);
3066 semaphore_destroy(kernel_task
,
3067 sysctl_debug_test_stackshot_mutex_sem
);
3068 sysctl_debug_test_stackshot_mtx_inited
= 0;
3070 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3072 case -1: /* user just wanted to read the value, so do nothing */
3082 /* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave.
3083 * the semaphores allow us to artificially create cases where the lock is being held and the
3084 * thread is hanging / taking a long time to do something. */
3089 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3092 sysctl_debug_test_stackshot_mutex_owner
,
3094 "Testing mutex owner in kernel");
3096 volatile char sysctl_debug_test_stackshot_rwlck_inited
= 0;
3097 lck_rw_t sysctl_debug_test_stackshot_owner_rwlck
;
3098 semaphore_t sysctl_debug_test_stackshot_rwlck_sem
;
3100 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1
3101 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT 2
3102 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3
3103 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT 4
3104 #define SYSCTL_DEBUG_KRWLCK_SIGNAL 5
3105 #define SYSCTL_DEBUG_KRWLCK_TEARDOWN 6
3108 sysctl_debug_test_stackshot_rwlck_owner(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3110 long long option
= -1;
3111 /* if the user tries to read the sysctl, we tell them what the address of the lock is
3112 * (to test against stackshot's output) */
3113 long long rwlck_unslid_addr
= (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck
);
3114 int error
= sysctl_io_number(req
, rwlck_unslid_addr
, sizeof(long long), (void*)&option
, NULL
);
3116 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3117 if (!sysctl_debug_test_stackshot_rwlck_inited
) {
3118 lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck
,
3119 sysctl_debug_test_stackshot_owner_grp
,
3121 semaphore_create(kernel_task
,
3122 &sysctl_debug_test_stackshot_rwlck_sem
,
3125 sysctl_debug_test_stackshot_rwlck_inited
= 1;
3127 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3131 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT
:
3132 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3133 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3135 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT
:
3136 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3137 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem
);
3138 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
3140 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT
:
3141 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3142 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3144 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT
:
3145 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3146 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem
);
3147 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
3149 case SYSCTL_DEBUG_KRWLCK_SIGNAL
:
3150 semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem
);
3152 case SYSCTL_DEBUG_KRWLCK_TEARDOWN
:
3153 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
3155 lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck
,
3156 sysctl_debug_test_stackshot_owner_grp
);
3157 semaphore_destroy(kernel_task
,
3158 sysctl_debug_test_stackshot_rwlck_sem
);
3159 sysctl_debug_test_stackshot_rwlck_inited
= 0;
3161 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
3163 case -1: /* user just wanted to read the value, so do nothing */
3176 test_RWLockOwnerCtl
,
3177 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3180 sysctl_debug_test_stackshot_rwlck_owner
,
3182 "Testing rwlock owner in kernel");
3183 #endif /* !CONFIG_XNUPOST */
3187 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3190 uint64_t swap_total
;
3191 uint64_t swap_avail
;
3192 vm_size_t swap_pagesize
;
3193 boolean_t swap_encrypted
;
3194 struct xsw_usage xsu
= {};
3196 error
= macx_swapinfo(&swap_total
,
3204 xsu
.xsu_total
= swap_total
;
3205 xsu
.xsu_avail
= swap_avail
;
3206 xsu
.xsu_used
= swap_total
- swap_avail
;
3207 xsu
.xsu_pagesize
= swap_pagesize
;
3208 xsu
.xsu_encrypted
= swap_encrypted
;
3209 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
3214 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
3215 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3216 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
3219 extern void vm_page_reactivate_all_throttled(void);
3220 extern void memorystatus_disable_freeze(void);
3223 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
3225 #pragma unused(arg1, arg2)
3226 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
3229 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3230 if (error
|| !req
->newptr
) {
3234 if (!VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
3235 //assert(req->newptr);
3236 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
3241 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
3243 disabled
= (!val
&& memorystatus_freeze_enabled
);
3245 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
3248 vm_page_reactivate_all_throttled();
3249 memorystatus_disable_freeze();
3255 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
3256 #endif /* CONFIG_FREEZE */
3258 #if DEVELOPMENT || DEBUG
3259 extern int vm_num_swap_files_config
;
3260 extern int vm_num_swap_files
;
3261 extern lck_mtx_t vm_swap_data_lock
;
3262 #define VM_MAX_SWAP_FILE_NUM 100
3265 sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS
3267 #pragma unused(arg1, arg2)
3268 int error
= 0, val
= vm_num_swap_files_config
;
3270 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3271 if (error
|| !req
->newptr
) {
3275 if (!VM_CONFIG_SWAP_IS_ACTIVE
&& !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
3276 printf("Swap is disabled\n");
3281 lck_mtx_lock(&vm_swap_data_lock
);
3283 if (val
< vm_num_swap_files
) {
3284 printf("Cannot configure fewer swap files than already exist.\n");
3286 lck_mtx_unlock(&vm_swap_data_lock
);
3290 if (val
> VM_MAX_SWAP_FILE_NUM
) {
3291 printf("Capping number of swap files to upper bound.\n");
3292 val
= VM_MAX_SWAP_FILE_NUM
;
3295 vm_num_swap_files_config
= val
;
3296 lck_mtx_unlock(&vm_swap_data_lock
);
3302 SYSCTL_PROC(_debug
, OID_AUTO
, num_swap_files_configured
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_config_num_swap_files
, "I", "");
3303 #endif /* DEVELOPMENT || DEBUG */
3305 /* this kernel does NOT implement shared_region_make_private_np() */
3306 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
3307 CTLFLAG_RD
| CTLFLAG_LOCKED
,
3308 (int *)NULL
, 0, "");
3311 fetch_process_cputype(
3315 cpu_type_t
*cputype
)
3317 proc_t p
= PROC_NULL
;
3324 } else if (namelen
== 1) {
3325 p
= proc_find(name
[0]);
3335 ret
= cpu_type() & ~CPU_ARCH_MASK
;
3336 if (IS_64BIT_PROCESS(p
)) {
3337 ret
|= CPU_ARCH_ABI64
;
3350 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3351 struct sysctl_req
*req
)
3354 cpu_type_t proc_cputype
= 0;
3355 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0) {
3359 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
)) {
3362 return SYSCTL_OUT(req
, &res
, sizeof(res
));
3364 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
, "I", "proc_native");
3367 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3368 struct sysctl_req
*req
)
3371 cpu_type_t proc_cputype
= 0;
3372 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0) {
3375 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
3377 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
, "I", "proc_cputype");
3381 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3383 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
3386 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
3387 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3388 0, 0, sysctl_safeboot
, "I", "");
3392 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3394 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
3397 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
3398 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3399 0, 0, sysctl_singleuser
, "I", "");
3403 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3405 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
3408 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
3409 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3410 0, 0, sysctl_minimalboot
, "I", "");
3413 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3415 extern boolean_t affinity_sets_enabled
;
3416 extern int affinity_sets_mapping
;
3418 SYSCTL_INT(_kern
, OID_AUTO
, affinity_sets_enabled
,
3419 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
3420 SYSCTL_INT(_kern
, OID_AUTO
, affinity_sets_mapping
,
3421 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
3424 * Boolean indicating if KASLR is active.
3428 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3432 slide
= vm_kernel_slide
? 1 : 0;
3434 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
3437 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
3438 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3439 0, 0, sysctl_slide
, "I", "");
3442 * Limit on total memory users can wire.
3444 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3446 * vm_per_task_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3448 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3451 * All values are in bytes.
3454 vm_map_size_t vm_global_user_wire_limit
;
3455 vm_map_size_t vm_per_task_user_wire_limit
;
3456 extern uint64_t max_mem
;
3459 * We used to have a global in the kernel called vm_global_no_user_wire_limit which was the inverse
3460 * of vm_global_user_wire_limit. But maintaining both of those is silly, and vm_global_user_wire_limit is the
3462 * This function is for backwards compatibility with userspace
3463 * since we exposed the old global via a sysctl.
3466 sysctl_global_no_user_wire_amount(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3468 vm_map_size_t old_value
;
3469 vm_map_size_t new_value
;
3473 old_value
= max_mem
- vm_global_user_wire_limit
;
3474 error
= sysctl_io_number(req
, old_value
, sizeof(vm_map_size_t
), &new_value
, &changed
);
3476 if ((uint64_t)new_value
> max_mem
) {
3479 vm_global_user_wire_limit
= max_mem
- new_value
;
3485 * There needs to be a more automatic/elegant way to do this
3487 #if defined(__ARM__)
3488 SYSCTL_INT(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, 0, "");
3489 SYSCTL_INT(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_per_task_user_wire_limit
, 0, "");
3490 SYSCTL_PROC(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, &sysctl_global_no_user_wire_amount
, "I", "");
3492 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
3493 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_per_task_user_wire_limit
, "");
3494 SYSCTL_PROC(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, &sysctl_global_no_user_wire_amount
, "Q", "");
3497 #if DEVELOPMENT || DEBUG
3498 /* These sysyctls are used to test the wired limit. */
3499 extern unsigned int vm_page_wire_count
;
3500 extern uint32_t vm_lopage_free_count
;
3501 SYSCTL_INT(_vm
, OID_AUTO
, page_wire_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_wire_count
, 0, "");
3502 SYSCTL_INT(_vm
, OID_AUTO
, lopage_free_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_lopage_free_count
, 0, "");
3503 #endif /* DEVELOPMENT */
3505 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
3506 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
3507 extern int vm_map_copy_overwrite_aligned_src_large
;
3508 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
3509 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
3510 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
3513 extern uint32_t vm_page_external_count
;
3515 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
3517 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_filecache_min
, 0, "");
3518 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_xpmapped_min
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_xpmapped_min
, 0, "");
3520 #if DEVELOPMENT || DEBUG
3521 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_filecache_min_divisor
, 0, "");
3522 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_xpmapped_min_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_xpmapped_min_divisor
, 0, "");
3525 extern int vm_compressor_mode
;
3526 extern int vm_compressor_is_active
;
3527 extern int vm_compressor_available
;
3528 extern uint32_t vm_ripe_target_age
;
3529 extern uint32_t swapout_target_age
;
3530 extern int64_t compressor_bytes_used
;
3531 extern int64_t c_segment_input_bytes
;
3532 extern int64_t c_segment_compressed_bytes
;
3533 extern uint32_t compressor_eval_period_in_msecs
;
3534 extern uint32_t compressor_sample_min_in_msecs
;
3535 extern uint32_t compressor_sample_max_in_msecs
;
3536 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
3537 extern uint32_t compressor_thrashing_min_per_10msecs
;
3538 extern uint32_t vm_compressor_time_thread
;
3540 #if DEVELOPMENT || DEBUG
3541 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
3542 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
3543 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
3544 extern uint32_t vm_compressor_catchup_threshold_divisor
;
3546 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden
;
3547 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden
;
3548 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden
;
3549 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden
;
3551 extern vmct_stats_t vmct_stats
;
3555 sysctl_minorcompact_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3557 int new_value
, changed
;
3558 int error
= sysctl_io_number(req
, vm_compressor_minorcompact_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3561 vm_compressor_minorcompact_threshold_divisor
= new_value
;
3562 vm_compressor_minorcompact_threshold_divisor_overridden
= 1;
3567 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
,
3568 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3569 0, 0, sysctl_minorcompact_threshold_divisor
, "I", "");
3573 sysctl_majorcompact_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3575 int new_value
, changed
;
3576 int error
= sysctl_io_number(req
, vm_compressor_majorcompact_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3579 vm_compressor_majorcompact_threshold_divisor
= new_value
;
3580 vm_compressor_majorcompact_threshold_divisor_overridden
= 1;
3585 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
,
3586 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3587 0, 0, sysctl_majorcompact_threshold_divisor
, "I", "");
3591 sysctl_unthrottle_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3593 int new_value
, changed
;
3594 int error
= sysctl_io_number(req
, vm_compressor_unthrottle_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3597 vm_compressor_unthrottle_threshold_divisor
= new_value
;
3598 vm_compressor_unthrottle_threshold_divisor_overridden
= 1;
3603 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
,
3604 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3605 0, 0, sysctl_unthrottle_threshold_divisor
, "I", "");
3609 sysctl_catchup_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3611 int new_value
, changed
;
3612 int error
= sysctl_io_number(req
, vm_compressor_catchup_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3615 vm_compressor_catchup_threshold_divisor
= new_value
;
3616 vm_compressor_catchup_threshold_divisor_overridden
= 1;
3621 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
,
3622 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3623 0, 0, sysctl_catchup_threshold_divisor
, "I", "");
3627 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
3628 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
3629 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
3631 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
3632 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
3633 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
3634 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
3636 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
3638 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
3639 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
3640 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
3641 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
3642 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
3644 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
3646 SYSCTL_INT(_vm
, OID_AUTO
, compressor_timing_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_time_thread
, 0, "");
3648 #if DEVELOPMENT || DEBUG
3649 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[0], "");
3650 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[1], "");
3652 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_threads_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_cthreads_total
, "");
3654 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[0], "");
3655 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[1], "");
3657 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[0], "");
3658 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[1], "");
3660 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[0], 0, "");
3661 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[1], 0, "");
3663 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[0], 0, "");
3664 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[1], 0, "");
3668 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressions
, "");
3669 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compression_failures
, "");
3670 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressed_bytes
, "");
3671 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_delta
, "");
3672 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_negative_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_negative_delta
, "");
3674 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressions
, "");
3675 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressed_bytes
, "");
3677 SYSCTL_QUAD(_vm
, OID_AUTO
, uc_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.uc_decompressions
, "");
3679 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions
, "");
3681 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_cabstime
, "");
3683 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_cabstime
, "");
3684 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_compressions
, "");
3686 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_cabstime
, "");
3687 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressions
, "");
3689 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions_exclusive
, "");
3690 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_compressions
, "");
3691 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_mzv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_mzv_compressions
, "");
3692 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compression_failures
, "");
3693 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_exclusive
, "");
3694 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_total
, "");
3696 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressed_bytes
, "");
3697 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compression_failures
, "");
3698 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_sv_compressions
, "");
3701 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressions
, "");
3703 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_dabstime
, "");
3705 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_dabstime
, "");
3706 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_decompressions
, "");
3708 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_dabstime
, "");
3709 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_decompressions
, "");
3711 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressed_bytes
, "");
3712 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_decompressions
, "");
3714 SYSCTL_INT(_vm
, OID_AUTO
, lz4_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_threshold
, 0, "");
3715 SYSCTL_INT(_vm
, OID_AUTO
, wkdm_reeval_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.wkdm_reeval_threshold
, 0, "");
3716 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_skips
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_skips
, 0, "");
3717 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_run_length
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_run_length
, 0, "");
3718 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_preselects
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_preselects
, 0, "");
3719 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_preselection_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_preselection_threshold
, 0, "");
3720 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_continue_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_continue_bytes
, 0, "");
3721 SYSCTL_INT(_vm
, OID_AUTO
, lz4_profitable_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_profitable_bytes
, 0, "");
3722 #if DEVELOPMENT || DEBUG
3723 extern int vm_compressor_current_codec
;
3724 extern int vm_compressor_test_seg_wp
;
3725 extern boolean_t vm_compressor_force_sw_wkdm
;
3726 SYSCTL_INT(_vm
, OID_AUTO
, compressor_codec
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_current_codec
, 0, "");
3727 SYSCTL_INT(_vm
, OID_AUTO
, compressor_test_wp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_test_seg_wp
, 0, "");
3729 SYSCTL_INT(_vm
, OID_AUTO
, wksw_force
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_force_sw_wkdm
, 0, "");
3730 extern int precompy
, wkswhw
;
3732 SYSCTL_INT(_vm
, OID_AUTO
, precompy
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &precompy
, 0, "");
3733 SYSCTL_INT(_vm
, OID_AUTO
, wkswhw
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &wkswhw
, 0, "");
3734 extern unsigned int vm_ktrace_enabled
;
3735 SYSCTL_INT(_vm
, OID_AUTO
, vm_ktrace
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ktrace_enabled
, 0, "");
3738 #if CONFIG_PHANTOM_CACHE
3739 extern uint32_t phantom_cache_thrashing_threshold
;
3740 extern uint32_t phantom_cache_eval_period_in_msecs
;
3741 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
3744 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
3745 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
3746 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
3749 #if CONFIG_BACKGROUND_QUEUE
3751 extern uint32_t vm_page_background_count
;
3752 extern uint32_t vm_page_background_target
;
3753 extern uint32_t vm_page_background_internal_count
;
3754 extern uint32_t vm_page_background_external_count
;
3755 extern uint32_t vm_page_background_mode
;
3756 extern uint32_t vm_page_background_exclude_external
;
3757 extern uint64_t vm_page_background_promoted_count
;
3758 extern uint64_t vm_pageout_rejected_bq_internal
;
3759 extern uint64_t vm_pageout_rejected_bq_external
;
3761 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_mode
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_mode
, 0, "");
3762 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_exclude_external
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_exclude_external
, 0, "");
3763 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_target
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_target
, 0, "");
3764 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_count
, 0, "");
3765 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_internal_count
, 0, "");
3766 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_external_count
, 0, "");
3768 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_page_background_promoted_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_promoted_count
, "");
3769 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_vminfo
.vm_pageout_considered_bq_internal
, "");
3770 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_vminfo
.vm_pageout_considered_bq_external
, "");
3771 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_internal
, "");
3772 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_external
, "");
3774 #endif /* CONFIG_BACKGROUND_QUEUE */
3776 extern void vm_update_darkwake_mode(boolean_t
);
3777 extern boolean_t vm_darkwake_mode
;
3780 sysctl_toggle_darkwake_mode(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3782 int new_value
, changed
;
3783 int error
= sysctl_io_number(req
, vm_darkwake_mode
, sizeof(int), &new_value
, &changed
);
3785 if (!error
&& changed
) {
3786 if (new_value
!= 0 && new_value
!= 1) {
3787 printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n");
3790 vm_update_darkwake_mode((boolean_t
) new_value
);
3797 SYSCTL_PROC(_vm
, OID_AUTO
, darkwake_mode
,
3798 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3799 0, 0, sysctl_toggle_darkwake_mode
, "I", "");
3801 #if (DEVELOPMENT || DEBUG)
3803 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
3804 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3805 &vm_page_creation_throttled_hard
, 0, "");
3807 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
3808 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3809 &vm_page_creation_throttled_soft
, 0, "");
3811 extern uint32_t vm_pageout_memorystatus_fb_factor_nr
;
3812 extern uint32_t vm_pageout_memorystatus_fb_factor_dr
;
3813 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_nr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_nr
, 0, "");
3814 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_dr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_dr
, 0, "");
3816 extern uint32_t vm_grab_anon_nops
;
3818 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_overrides
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_debug
.vm_grab_anon_overrides
, 0, "");
3819 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_nops
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_debug
.vm_grab_anon_nops
, 0, "");
3821 /* log message counters for persistence mode */
3822 extern uint32_t oslog_p_total_msgcount
;
3823 extern uint32_t oslog_p_metadata_saved_msgcount
;
3824 extern uint32_t oslog_p_metadata_dropped_msgcount
;
3825 extern uint32_t oslog_p_error_count
;
3826 extern uint32_t oslog_p_saved_msgcount
;
3827 extern uint32_t oslog_p_dropped_msgcount
;
3828 extern uint32_t oslog_p_boot_dropped_msgcount
;
3830 /* log message counters for streaming mode */
3831 extern uint32_t oslog_s_total_msgcount
;
3832 extern uint32_t oslog_s_metadata_msgcount
;
3833 extern uint32_t oslog_s_error_count
;
3834 extern uint32_t oslog_s_streamed_msgcount
;
3835 extern uint32_t oslog_s_dropped_msgcount
;
3837 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_total_msgcount
, 0, "");
3838 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_saved_msgcount
, 0, "");
3839 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_dropped_msgcount
, 0, "");
3840 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_error_count
, 0, "");
3841 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_saved_msgcount
, 0, "");
3842 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_dropped_msgcount
, 0, "");
3843 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_boot_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_boot_dropped_msgcount
, 0, "");
3845 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_total_msgcount
, 0, "");
3846 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_metadata_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_metadata_msgcount
, 0, "");
3847 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_error_count
, 0, "");
3848 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_streamed_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_streamed_msgcount
, 0, "");
3849 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_dropped_msgcount
, 0, "");
3852 #endif /* DEVELOPMENT || DEBUG */
3855 * Enable tracing of voucher contents
3857 extern uint32_t ipc_voucher_trace_contents
;
3859 SYSCTL_INT(_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
3860 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
3863 * Kernel stack size and depth
3865 SYSCTL_INT(_kern
, OID_AUTO
, stack_size
,
3866 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3867 SYSCTL_INT(_kern
, OID_AUTO
, stack_depth_max
,
3868 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3870 extern unsigned int kern_feature_overrides
;
3871 SYSCTL_INT(_kern
, OID_AUTO
, kern_feature_overrides
,
3872 CTLFLAG_RD
| CTLFLAG_LOCKED
, &kern_feature_overrides
, 0, "Kernel feature override mask");
3875 * enable back trace for port allocations
3877 extern int ipc_portbt
;
3879 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3880 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3881 &ipc_portbt
, 0, "");
3887 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3888 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3889 sched_string
, sizeof(sched_string
),
3890 "Timeshare scheduler implementation");
3892 #if CONFIG_QUIESCE_COUNTER
3894 sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS
3896 #pragma unused(arg1, arg2)
3898 uint32_t local_min_interval_us
= cpu_quiescent_counter_get_min_interval_us();
3900 int error
= sysctl_handle_int(oidp
, &local_min_interval_us
, 0, req
);
3901 if (error
|| !req
->newptr
) {
3905 cpu_quiescent_counter_set_min_interval_us(local_min_interval_us
);
3910 SYSCTL_PROC(_kern
, OID_AUTO
, cpu_checkin_interval
,
3911 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3913 sysctl_cpu_quiescent_counter_interval
, "I",
3914 "Quiescent CPU checkin interval (microseconds)");
3915 #endif /* CONFIG_QUIESCE_COUNTER */
3919 * Only support runtime modification on embedded platforms
3920 * with development config enabled
3924 extern int precise_user_kernel_time
;
3925 SYSCTL_INT(_kern
, OID_AUTO
, precise_user_kernel_time
,
3926 CTLFLAG_RW
| CTLFLAG_LOCKED
,
3927 &precise_user_kernel_time
, 0, "Precise accounting of kernel vs. user time");
3932 /* Parameters related to timer coalescing tuning, to be replaced
3933 * with a dedicated systemcall in the future.
3935 /* Enable processing pending timers in the context of any other interrupt
3936 * Coalescing tuning parameters for various thread/task attributes */
3938 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3940 #pragma unused(oidp)
3941 int size
= arg2
; /* subcommand*/
3944 uint64_t old_value_ns
;
3945 uint64_t new_value_ns
;
3946 uint64_t value_abstime
;
3947 if (size
== sizeof(uint32_t)) {
3948 value_abstime
= *((uint32_t *)arg1
);
3949 } else if (size
== sizeof(uint64_t)) {
3950 value_abstime
= *((uint64_t *)arg1
);
3955 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
3956 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
3957 if ((error
) || (!changed
)) {
3961 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
3962 if (size
== sizeof(uint32_t)) {
3963 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
3965 *((uint64_t *)arg1
) = value_abstime
;
3970 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
3971 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3972 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
3973 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
3974 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3975 &tcoal_prio_params
.timer_resort_threshold_abstime
,
3976 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
3977 sysctl_timer_user_us_kernel_abstime
,
3979 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
3980 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3981 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
3982 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
3983 sysctl_timer_user_us_kernel_abstime
,
3986 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
3987 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3988 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
3990 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
3991 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3992 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
3993 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
3994 sysctl_timer_user_us_kernel_abstime
,
3997 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
3998 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3999 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
4001 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
4002 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4003 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
4004 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
4005 sysctl_timer_user_us_kernel_abstime
,
4008 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
4009 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4010 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
4012 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
4013 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4014 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
4015 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
4016 sysctl_timer_user_us_kernel_abstime
,
4019 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
4020 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4021 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
4023 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
4024 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4025 &tcoal_prio_params
.latency_qos_abstime_max
[0],
4026 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
4027 sysctl_timer_user_us_kernel_abstime
,
4030 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
4031 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4032 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
4034 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
4035 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4036 &tcoal_prio_params
.latency_qos_abstime_max
[1],
4037 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
4038 sysctl_timer_user_us_kernel_abstime
,
4041 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
4042 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4043 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
4045 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
4046 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4047 &tcoal_prio_params
.latency_qos_abstime_max
[2],
4048 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
4049 sysctl_timer_user_us_kernel_abstime
,
4052 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
4053 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4054 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
4056 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
4057 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4058 &tcoal_prio_params
.latency_qos_abstime_max
[3],
4059 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
4060 sysctl_timer_user_us_kernel_abstime
,
4063 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
4064 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4065 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
4067 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
4068 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4069 &tcoal_prio_params
.latency_qos_abstime_max
[4],
4070 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
4071 sysctl_timer_user_us_kernel_abstime
,
4074 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
4075 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4076 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
4078 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
4079 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4080 &tcoal_prio_params
.latency_qos_abstime_max
[5],
4081 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
4082 sysctl_timer_user_us_kernel_abstime
,
4085 /* Communicate the "user idle level" heuristic to the timer layer, and
4086 * potentially other layers in the future.
4090 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4092 int new_value
= 0, old_value
= 0, changed
= 0, error
;
4094 old_value
= timer_get_user_idle_level();
4096 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
4098 if (error
== 0 && changed
) {
4099 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
) {
4107 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
4108 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4110 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
4113 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
4114 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
4115 &hv_support_available
, 0, "");
4120 sysctl_darkboot SYSCTL_HANDLER_ARGS
4122 int err
= 0, value
= 0;
4123 #pragma unused(oidp, arg1, arg2, err, value, req)
4126 * Handle the sysctl request.
4128 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
4129 * we'll get the request identifier into "value" and then we can honor it.
4131 if ((err
= sysctl_io_number(req
, darkboot
, sizeof(int), &value
, NULL
)) != 0) {
4135 /* writing requested, let's process the request */
4137 /* writing is protected by an entitlement */
4138 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT
, 0) != 0) {
4144 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET
:
4146 * If the darkboot sysctl is unset, the NVRAM variable
4147 * must be unset too. If that's not the case, it means
4148 * someone is doing something crazy and not supported.
4150 if (darkboot
!= 0) {
4151 int ret
= PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
);
4159 case MEMORY_MAINTENANCE_DARK_BOOT_SET
:
4162 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT
: {
4164 * Set the NVRAM and update 'darkboot' in case
4165 * of success. Otherwise, do not update
4166 * 'darkboot' and report the failure.
4168 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
, TRUE
)) {
4185 SYSCTL_PROC(_kern
, OID_AUTO
, darkboot
,
4186 CTLFLAG_KERN
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
,
4187 0, 0, sysctl_darkboot
, "I", "");
4190 #if DEVELOPMENT || DEBUG
4191 #include <sys/sysent.h>
4192 /* This should result in a fatal exception, verifying that "sysent" is
4196 kern_sysent_write(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4198 uint64_t new_value
= 0, old_value
= 0;
4199 int changed
= 0, error
;
4201 error
= sysctl_io_number(req
, old_value
, sizeof(uint64_t), &new_value
, &changed
);
4202 if ((error
== 0) && changed
) {
4203 volatile uint32_t *wraddr
= (uint32_t *) &sysent
[0];
4205 printf("sysent[0] write succeeded\n");
4210 SYSCTL_PROC(_kern
, OID_AUTO
, sysent_const_check
,
4211 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
4213 kern_sysent_write
, "I", "Attempt sysent[0] write");
4217 #if DEVELOPMENT || DEBUG
4218 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 1, "");
4220 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 0, "");
4224 #if DEVELOPMENT || DEBUG
4227 sysctl_panic_test SYSCTL_HANDLER_ARGS
4229 #pragma unused(arg1, arg2)
4231 char str
[32] = "entry prelog postlog postcore";
4233 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4235 if (rval
== 0 && req
->newptr
) {
4236 if (strncmp("entry", str
, strlen("entry")) == 0) {
4237 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_ENTRY
, "test recursive panic at entry");
4238 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
4239 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_PRELOG
, "test recursive panic prior to writing a paniclog");
4240 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
4241 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTLOG
, "test recursive panic subsequent to paniclog");
4242 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
4243 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTCORE
, "test recursive panic subsequent to on-device core");
4251 sysctl_debugger_test SYSCTL_HANDLER_ARGS
4253 #pragma unused(arg1, arg2)
4255 char str
[32] = "entry prelog postlog postcore";
4257 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4259 if (rval
== 0 && req
->newptr
) {
4260 if (strncmp("entry", str
, strlen("entry")) == 0) {
4261 DebuggerWithContext(0, NULL
, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY
);
4262 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
4263 DebuggerWithContext(0, NULL
, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG
);
4264 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
4265 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG
);
4266 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
4267 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE
);
4274 decl_lck_spin_data(, spinlock_panic_test_lock
);
4276 __attribute__((noreturn
))
4278 spinlock_panic_test_acquire_spinlock(void * arg __unused
, wait_result_t wres __unused
)
4280 lck_spin_lock(&spinlock_panic_test_lock
);
4287 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
4289 #pragma unused(oidp, arg1, arg2)
4290 if (req
->newlen
== 0) {
4294 thread_t panic_spinlock_thread
;
4295 /* Initialize panic spinlock */
4296 lck_grp_t
* panic_spinlock_grp
;
4297 lck_grp_attr_t
* panic_spinlock_grp_attr
;
4298 lck_attr_t
* panic_spinlock_attr
;
4300 panic_spinlock_grp_attr
= lck_grp_attr_alloc_init();
4301 panic_spinlock_grp
= lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr
);
4302 panic_spinlock_attr
= lck_attr_alloc_init();
4304 lck_spin_init(&spinlock_panic_test_lock
, panic_spinlock_grp
, panic_spinlock_attr
);
4307 /* Create thread to acquire spinlock */
4308 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock
, NULL
, &panic_spinlock_thread
) != KERN_SUCCESS
) {
4312 /* Try to acquire spinlock -- should panic eventually */
4313 lck_spin_lock(&spinlock_panic_test_lock
);
4319 __attribute__((noreturn
))
4321 simultaneous_panic_worker
4322 (void * arg
, wait_result_t wres __unused
)
4324 atomic_int
*start_panic
= (atomic_int
*)arg
;
4326 while (!atomic_load(start_panic
)) {
4329 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
4330 __builtin_unreachable();
4334 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
4336 #pragma unused(oidp, arg1, arg2)
4337 if (req
->newlen
== 0) {
4341 int i
= 0, threads_to_create
= 2 * processor_count
;
4342 atomic_int start_panic
= 0;
4343 unsigned int threads_created
= 0;
4344 thread_t new_panic_thread
;
4346 for (i
= threads_to_create
; i
> 0; i
--) {
4347 if (kernel_thread_start(simultaneous_panic_worker
, (void *) &start_panic
, &new_panic_thread
) == KERN_SUCCESS
) {
4352 /* FAIL if we couldn't create at least processor_count threads */
4353 if (threads_created
< processor_count
) {
4354 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
4355 threads_created
, threads_to_create
);
4358 atomic_exchange(&start_panic
, 1);
4364 SYSCTL_PROC(_debug
, OID_AUTO
, panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_panic_test
, "A", "panic test");
4365 SYSCTL_PROC(_debug
, OID_AUTO
, debugger_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_debugger_test
, "A", "debugger test");
4366 SYSCTL_PROC(_debug
, OID_AUTO
, spinlock_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_spinlock_panic_test
, "A", "spinlock panic test");
4367 SYSCTL_PROC(_debug
, OID_AUTO
, simultaneous_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_simultaneous_panic_test
, "A", "simultaneous panic test");
4369 extern int exc_resource_threads_enabled
;
4371 SYSCTL_INT(_kern
, OID_AUTO
, exc_resource_threads_enabled
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &exc_resource_threads_enabled
, 0, "exc_resource thread limit enabled");
4374 #endif /* DEVELOPMENT || DEBUG */
4376 const uint32_t thread_groups_supported
= 0;
4379 sysctl_thread_groups_supported(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4381 int value
= thread_groups_supported
;
4382 return sysctl_io_number(req
, value
, sizeof(value
), NULL
, NULL
);
4385 SYSCTL_PROC(_kern
, OID_AUTO
, thread_groups_supported
, CTLFLAG_RD
| CTLFLAG_LOCKED
| CTLFLAG_KERN
,
4386 0, 0, &sysctl_thread_groups_supported
, "I", "thread groups supported");
4389 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
4391 #pragma unused(arg1, arg2, oidp)
4393 int type_tuple
[2] = {};
4394 int return_value
= 0;
4396 error
= SYSCTL_IN(req
, &type_tuple
, sizeof(type_tuple
));
4402 return_value
= grade_binary(type_tuple
[0], type_tuple
[1], FALSE
);
4404 error
= SYSCTL_OUT(req
, &return_value
, sizeof(return_value
));
4413 SYSCTL_PROC(_kern
, OID_AUTO
, grade_cputype
,
4414 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
| CTLTYPE_OPAQUE
,
4415 0, 0, &sysctl_grade_cputype
, "S",
4416 "grade value of cpu_type_t+cpu_sub_type_t");
4419 #if DEVELOPMENT || DEBUG
4421 static atomic_int wedge_thread_should_wake
= 0;
4424 unwedge_thread SYSCTL_HANDLER_ARGS
4426 #pragma unused(arg1, arg2)
4428 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4429 if (error
|| val
== 0) {
4433 atomic_store(&wedge_thread_should_wake
, 1);
4437 SYSCTL_PROC(_kern
, OID_AUTO
, unwedge_thread
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, unwedge_thread
, "I", "unwedge the thread wedged by kern.wedge_thread");
4439 SYSCTL_LONG(_kern
, OID_AUTO
, phys_carveout_pa
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
4441 "base physical address of the phys_carveout_mb boot-arg region");
4442 SYSCTL_LONG(_kern
, OID_AUTO
, phys_carveout_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
4443 &phys_carveout_size
,
4444 "size in bytes of the phys_carveout_mb boot-arg region");
4447 wedge_thread SYSCTL_HANDLER_ARGS
4449 #pragma unused(arg1, arg2)
4452 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4453 if (error
|| val
== 0) {
4457 uint64_t interval
= 1;
4458 nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval
);
4460 atomic_store(&wedge_thread_should_wake
, 0);
4461 while (!atomic_load(&wedge_thread_should_wake
)) {
4462 tsleep1(NULL
, 0, "wedge_thread", mach_absolute_time() + interval
, NULL
);
4468 SYSCTL_PROC(_kern
, OID_AUTO
, wedge_thread
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, wedge_thread
, "I", "wedge this thread so it cannot be cleaned up");
4470 extern unsigned long
4471 total_corpses_count(void);
4474 sysctl_total_corpses_count SYSCTL_HANDLER_ARGS
;
4477 sysctl_total_corpses_count SYSCTL_HANDLER_ARGS
4479 #pragma unused(oidp, arg1, arg2)
4480 int corpse_count
= total_corpses_count();
4481 return sysctl_io_opaque(req
, &corpse_count
, sizeof(int), NULL
);
4484 SYSCTL_PROC(_kern
, OID_AUTO
, total_corpses_count
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, sysctl_total_corpses_count
, "I", "total corpses on the system");
4487 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
;
4489 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
;
4491 tstile_test_prim_lock(boolean_t use_hashtable
);
4493 tstile_test_prim_unlock(boolean_t use_hashtable
);
4496 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
4498 #pragma unused(arg1, arg2)
4500 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4501 if (error
|| val
== 0) {
4505 case SYSCTL_TURNSTILE_TEST_USER_DEFAULT
:
4506 case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE
:
4507 case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT
:
4508 case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE
:
4509 return tstile_test_prim_lock(val
);
4516 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
4518 #pragma unused(arg1, arg2)
4520 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4521 if (error
|| val
== 0) {
4525 case SYSCTL_TURNSTILE_TEST_USER_DEFAULT
:
4526 case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE
:
4527 case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT
:
4528 case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE
:
4529 return tstile_test_prim_unlock(val
);
4535 SYSCTL_PROC(_kern
, OID_AUTO
, turnstiles_test_lock
, CTLFLAG_WR
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4536 0, 0, sysctl_turnstile_test_prim_lock
, "I", "turnstiles test lock");
4538 SYSCTL_PROC(_kern
, OID_AUTO
, turnstiles_test_unlock
, CTLFLAG_WR
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4539 0, 0, sysctl_turnstile_test_prim_unlock
, "I", "turnstiles test unlock");
4542 turnstile_get_boost_stats_sysctl(void *req
);
4544 turnstile_get_unboost_stats_sysctl(void *req
);
4546 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
;
4548 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
;
4549 extern uint64_t thread_block_on_turnstile_count
;
4550 extern uint64_t thread_block_on_regular_waitq_count
;
4553 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
4555 #pragma unused(arg1, arg2, oidp)
4556 return turnstile_get_boost_stats_sysctl(req
);
4560 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
4562 #pragma unused(arg1, arg2, oidp)
4563 return turnstile_get_unboost_stats_sysctl(req
);
4566 SYSCTL_PROC(_kern
, OID_AUTO
, turnstile_boost_stats
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLTYPE_STRUCT
,
4567 0, 0, sysctl_turnstile_boost_stats
, "S", "turnstiles boost stats");
4568 SYSCTL_PROC(_kern
, OID_AUTO
, turnstile_unboost_stats
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLTYPE_STRUCT
,
4569 0, 0, sysctl_turnstile_unboost_stats
, "S", "turnstiles unboost stats");
4570 SYSCTL_QUAD(_kern
, OID_AUTO
, thread_block_count_on_turnstile
,
4571 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4572 &thread_block_on_turnstile_count
, "thread blocked on turnstile count");
4573 SYSCTL_QUAD(_kern
, OID_AUTO
, thread_block_count_on_reg_waitq
,
4574 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4575 &thread_block_on_regular_waitq_count
, "thread blocked on regular waitq count");
4578 sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
4580 #pragma unused(arg1, arg2)
4582 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4583 if (error
|| val
== 0) {
4588 lck_mtx_test_init();
4589 erase_all_test_mtx_stats();
4596 sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
4598 #pragma unused(oidp, arg1, arg2)
4600 int size
, buffer_size
, error
;
4603 buffer
= kalloc(buffer_size
);
4605 panic("Impossible to allocate memory for %s\n", __func__
);
4608 lck_mtx_test_init();
4610 size
= get_test_mtx_stats_string(buffer
, buffer_size
);
4612 error
= sysctl_io_string(req
, buffer
, size
, 0, NULL
);
4614 kfree(buffer
, buffer_size
);
4620 sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
4622 #pragma unused(oidp, arg1, arg2)
4624 int buffer_size
, offset
, error
, iter
;
4635 if (req
->newlen
>= sizeof(input_val
)) {
4639 error
= SYSCTL_IN(req
, input_val
, req
->newlen
);
4643 input_val
[req
->newlen
] = '\0';
4646 error
= sscanf(input_val
, "%d", &iter
);
4648 printf("%s invalid input\n", __func__
);
4653 printf("%s requested %d iterations, not starting the test\n", __func__
, iter
);
4657 lck_mtx_test_init();
4661 buffer
= kalloc(buffer_size
);
4663 panic("Impossible to allocate memory for %s\n", __func__
);
4665 memset(buffer
, 0, buffer_size
);
4667 printf("%s starting uncontended mutex test with %d iterations\n", __func__
, iter
);
4669 offset
= scnprintf(buffer
, buffer_size
, "STATS INNER LOOP");
4670 offset
+= lck_mtx_test_mtx_uncontended(iter
, &buffer
[offset
], buffer_size
- offset
);
4672 offset
+= scnprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4673 offset
+= lck_mtx_test_mtx_uncontended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
);
4675 error
= SYSCTL_OUT(req
, buffer
, offset
);
4677 kfree(buffer
, buffer_size
);
4682 sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS
4684 #pragma unused(oidp, arg1, arg2)
4686 int buffer_size
, offset
, error
, iter
;
4697 if (req
->newlen
>= sizeof(input_val
)) {
4701 error
= SYSCTL_IN(req
, input_val
, req
->newlen
);
4705 input_val
[req
->newlen
] = '\0';
4708 error
= sscanf(input_val
, "%d", &iter
);
4710 printf("%s invalid input\n", __func__
);
4715 printf("%s requested %d iterations, not starting the test\n", __func__
, iter
);
4719 lck_mtx_test_init();
4721 erase_all_test_mtx_stats();
4725 buffer
= kalloc(buffer_size
);
4727 panic("Impossible to allocate memory for %s\n", __func__
);
4729 memset(buffer
, 0, buffer_size
);
4731 printf("%s starting contended mutex test with %d iterations FULL_CONTENDED\n", __func__
, iter
);
4733 offset
= scnprintf(buffer
, buffer_size
, "STATS INNER LOOP");
4734 offset
+= lck_mtx_test_mtx_contended(iter
, &buffer
[offset
], buffer_size
- offset
, FULL_CONTENDED
);
4736 printf("%s starting contended mutex loop test with %d iterations FULL_CONTENDED\n", __func__
, iter
);
4738 offset
+= scnprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4739 offset
+= lck_mtx_test_mtx_contended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
, FULL_CONTENDED
);
4741 printf("%s starting contended mutex test with %d iterations HALF_CONTENDED\n", __func__
, iter
);
4743 offset
+= scnprintf(&buffer
[offset
], buffer_size
- offset
, "STATS INNER LOOP");
4744 offset
+= lck_mtx_test_mtx_contended(iter
, &buffer
[offset
], buffer_size
- offset
, HALF_CONTENDED
);
4746 printf("%s starting contended mutex loop test with %d iterations HALF_CONTENDED\n", __func__
, iter
);
4748 offset
+= scnprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4749 offset
+= lck_mtx_test_mtx_contended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
, HALF_CONTENDED
);
4751 error
= SYSCTL_OUT(req
, buffer
, offset
);
4753 printf("\n%s\n", buffer
);
4754 kfree(buffer
, buffer_size
);
4759 SYSCTL_PROC(_kern
, OID_AUTO
, erase_all_test_mtx_stats
, CTLFLAG_WR
| CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4760 0, 0, sysctl_erase_all_test_mtx_stats
, "I", "erase test_mtx statistics");
4762 SYSCTL_PROC(_kern
, OID_AUTO
, get_test_mtx_stats
, CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4763 0, 0, sysctl_get_test_mtx_stats
, "A", "get test_mtx statistics");
4765 SYSCTL_PROC(_kern
, OID_AUTO
, test_mtx_contended
, CTLTYPE_STRING
| CTLFLAG_MASKED
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4766 0, 0, sysctl_test_mtx_contended
, "A", "get statistics for contended mtx test");
4768 SYSCTL_PROC(_kern
, OID_AUTO
, test_mtx_uncontended
, CTLTYPE_STRING
| CTLFLAG_MASKED
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4769 0, 0, sysctl_test_mtx_uncontended
, "A", "get statistics for uncontended mtx test");
4771 extern uint64_t MutexSpin
;
4773 SYSCTL_QUAD(_kern
, OID_AUTO
, mutex_spin_us
, CTLFLAG_RW
, &MutexSpin
,
4774 "Spin time for acquiring a kernel mutex");
4776 #if defined (__x86_64__)
4778 semaphore_t sysctl_test_panic_with_thread_sem
;
4780 #pragma clang diagnostic push
4781 #pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */
4782 __attribute__((noreturn
))
4784 panic_thread_test_child_spin(void * arg
, wait_result_t wres
)
4786 static int panic_thread_recurse_count
= 5;
4788 if (panic_thread_recurse_count
> 0) {
4789 panic_thread_recurse_count
--;
4790 panic_thread_test_child_spin(arg
, wres
);
4793 semaphore_signal(sysctl_test_panic_with_thread_sem
);
4798 #pragma clang diagnostic pop
4801 panic_thread_test_child_park(void * arg __unused
, wait_result_t wres __unused
)
4805 assert_wait(&event
, THREAD_UNINT
);
4806 semaphore_signal(sysctl_test_panic_with_thread_sem
);
4807 thread_block(panic_thread_test_child_park
);
4811 sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS
4813 #pragma unused(arg1, arg2)
4815 char str
[16] = { '\0' };
4816 thread_t child_thread
= THREAD_NULL
;
4818 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4819 if (rval
!= 0 || !req
->newptr
) {
4823 semaphore_create(kernel_task
, &sysctl_test_panic_with_thread_sem
, SYNC_POLICY_FIFO
, 0);
4825 /* Create thread to spin or park in continuation */
4826 if (strncmp("spin", str
, strlen("spin")) == 0) {
4827 if (kernel_thread_start(panic_thread_test_child_spin
, NULL
, &child_thread
) != KERN_SUCCESS
) {
4828 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4831 } else if (strncmp("continuation", str
, strlen("continuation")) == 0) {
4832 if (kernel_thread_start(panic_thread_test_child_park
, NULL
, &child_thread
) != KERN_SUCCESS
) {
4833 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4837 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4841 semaphore_wait(sysctl_test_panic_with_thread_sem
);
4843 panic_with_thread_context(0, NULL
, 0, child_thread
, "testing panic_with_thread_context for thread %p", child_thread
);
4849 SYSCTL_PROC(_kern
, OID_AUTO
, test_panic_with_thread
, CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_WR
| CTLTYPE_STRING
,
4850 0, 0, sysctl_test_panic_with_thread
, "A", "test panic flow for backtracing a different thread");
4851 #endif /* defined (__x86_64__) */
4853 #endif /* DEVELOPMENT || DEBUG */
4856 sysctl_get_owned_vmobjects SYSCTL_HANDLER_ARGS
4858 #pragma unused(oidp, arg1, arg2)
4861 if (req
->newlen
!= sizeof(mach_port_name_t
) || req
->newptr
== USER_ADDR_NULL
||
4862 req
->oldidx
!= 0 || req
->newidx
!= 0 || req
->p
== NULL
) {
4867 mach_port_name_t task_port_name
;
4869 int buffer_size
= (req
->oldptr
!= USER_ADDR_NULL
) ? req
->oldlen
: 0;
4870 vmobject_list_output_t buffer
;
4874 const int min_size
= sizeof(vm_object_query_data_t
) + sizeof(int64_t);
4876 if (buffer_size
< min_size
) {
4877 buffer_size
= min_size
;
4880 buffer
= kalloc(buffer_size
);
4884 goto sysctl_get_vmobject_list_exit
;
4890 /* we have a "newptr" (for write) we get a task port name from the caller. */
4891 error
= SYSCTL_IN(req
, &task_port_name
, sizeof(mach_port_name_t
));
4894 goto sysctl_get_vmobject_list_exit
;
4897 task
= port_name_to_task(task_port_name
);
4898 if (task
== TASK_NULL
) {
4900 goto sysctl_get_vmobject_list_exit
;
4903 /* copy the vmobjects and vmobject data out of the task */
4904 if (buffer_size
== 0) {
4906 task_copy_vmobjects(task
, NULL
, 0, &__size
);
4907 output_size
= (__size
> 0) ? __size
* sizeof(vm_object_query_data_t
) + sizeof(int64_t) : 0;
4909 task_copy_vmobjects(task
, &buffer
->data
[0], buffer_size
- sizeof(int64_t), &buffer
->entries
);
4910 output_size
= buffer
->entries
* sizeof(vm_object_query_data_t
) + sizeof(int64_t);
4913 task_deallocate(task
);
4915 error
= SYSCTL_OUT(req
, (char*) buffer
, output_size
);
4917 sysctl_get_vmobject_list_exit
:
4919 kfree(buffer
, buffer_size
);
4925 SYSCTL_PROC(_vm
, OID_AUTO
, get_owned_vmobjects
, CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_WR
| CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
,
4926 0, 0, sysctl_get_owned_vmobjects
, "A", "get owned vmobjects in task");