2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
112 #include <machine/smp.h>
113 #include <mach/machine.h>
114 #include <mach/mach_host.h>
115 #include <mach/mach_types.h>
116 #include <mach/processor_info.h>
117 #include <mach/vm_param.h>
118 #include <kern/debug.h>
119 #include <kern/mach_param.h>
120 #include <kern/task.h>
121 #include <kern/thread.h>
122 #include <kern/thread_group.h>
123 #include <kern/processor.h>
124 #include <kern/cpu_number.h>
125 #include <kern/cpu_quiesce.h>
126 #include <kern/debug.h>
127 #include <kern/sched_prim.h>
128 #include <vm/vm_kern.h>
129 #include <vm/vm_map.h>
130 #include <mach/host_info.h>
132 #include <sys/mount_internal.h>
133 #include <sys/kdebug.h>
135 #include <IOKit/IOPlatformExpert.h>
136 #include <pexpert/pexpert.h>
138 #include <machine/machine_routines.h>
139 #include <machine/exec.h>
141 #include <vm/vm_protos.h>
142 #include <vm/vm_pageout.h>
143 #include <vm/vm_compressor_algorithms.h>
144 #include <sys/imgsrc.h>
145 #include <kern/timer_call.h>
147 #if defined(__i386__) || defined(__x86_64__)
148 #include <i386/cpuid.h>
152 #include <sys/kern_memorystatus.h>
156 #include <kperf/kperf.h>
160 #include <kern/hv_support.h>
164 * deliberately setting max requests to really high number
165 * so that runaway settings do not cause MALLOC overflows
167 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
169 extern int aio_max_requests
;
170 extern int aio_max_requests_per_process
;
171 extern int aio_worker_threads
;
172 extern int lowpri_IO_window_msecs
;
173 extern int lowpri_IO_delay_msecs
;
174 extern int nx_enabled
;
175 extern int speculative_reads_disabled
;
176 extern unsigned int speculative_prefetch_max
;
177 extern unsigned int speculative_prefetch_max_iosize
;
178 extern unsigned int preheat_max_bytes
;
179 extern unsigned int preheat_min_bytes
;
180 extern long numvnodes
;
182 extern uuid_string_t bootsessionuuid_string
;
184 extern unsigned int vm_max_delayed_work_limit
;
185 extern unsigned int vm_max_batch
;
187 extern unsigned int vm_page_free_min
;
188 extern unsigned int vm_page_free_target
;
189 extern unsigned int vm_page_free_reserved
;
191 #if (DEVELOPMENT || DEBUG)
192 extern uint32_t vm_page_creation_throttled_hard
;
193 extern uint32_t vm_page_creation_throttled_soft
;
194 #endif /* DEVELOPMENT || DEBUG */
197 * Conditionally allow dtrace to see these functions for debugging purposes.
205 #define STATIC static
208 extern boolean_t mach_timer_coalescing_enabled
;
210 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
213 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
215 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
217 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
219 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
221 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
223 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
226 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
232 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
235 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
236 size_t *sizep
, proc_t cur_proc
);
238 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
239 proc_t cur_proc
, int argc_yes
);
241 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
242 size_t newlen
, void *sp
, int len
);
244 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
245 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
246 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
247 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
248 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
249 int sysdoproc_callback(proc_t p
, void *arg
);
252 /* forward declarations for non-static STATIC */
253 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
254 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
255 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
256 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
257 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
258 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
260 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
261 #endif /* COUNT_SYSCALLS */
263 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
264 #endif /* !CONFIG_EMBEDDED */
265 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
266 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
267 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 #ifdef CONFIG_IMGSRC_ACCESS
284 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
293 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
299 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
300 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
301 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
302 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
303 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
304 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
305 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
307 #ifdef CONFIG_XNUPOST
308 #include <tests/xnupost.h>
310 STATIC
int sysctl_debug_test_oslog_ctl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
311 STATIC
int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
312 STATIC
int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
315 extern void IORegistrySetOSBuildVersion(char * build_version
);
318 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
320 la64
->ldavg
[0] = la
->ldavg
[0];
321 la64
->ldavg
[1] = la
->ldavg
[1];
322 la64
->ldavg
[2] = la
->ldavg
[2];
323 la64
->fscale
= (user64_long_t
)la
->fscale
;
327 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
329 la32
->ldavg
[0] = la
->ldavg
[0];
330 la32
->ldavg
[1] = la
->ldavg
[1];
331 la32
->ldavg
[2] = la
->ldavg
[2];
332 la32
->fscale
= (user32_long_t
)la
->fscale
;
337 * Attributes stored in the kernel.
339 extern char corefilename
[MAXPATHLEN
+1];
340 extern int do_coredump
;
341 extern int sugid_coredump
;
345 extern int do_count_syscalls
;
349 int securelevel
= -1;
355 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
356 __unused
int arg2
, struct sysctl_req
*req
)
359 struct uthread
*ut
= get_bsdthread_info(current_thread());
360 user_addr_t oldp
=0, newp
=0;
361 size_t *oldlenp
=NULL
;
365 oldlenp
= &(req
->oldlen
);
367 newlen
= req
->newlen
;
369 /* We want the current length, and maybe the string itself */
371 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
372 size_t currlen
= MAXTHREADNAMESIZE
- 1;
375 /* use length of current thread name */
376 currlen
= strlen(ut
->pth_name
);
378 if(*oldlenp
< currlen
)
380 /* NOTE - we do not copy the NULL terminator */
382 error
= copyout(ut
->pth_name
,oldp
,currlen
);
387 /* return length of thread name minus NULL terminator (just like strlen) */
388 req
->oldidx
= currlen
;
391 /* We want to set the name to something */
394 if(newlen
> (MAXTHREADNAMESIZE
- 1))
398 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
402 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV
, ut
->pth_name
);
404 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
405 error
= copyin(newp
, ut
->pth_name
, newlen
);
410 kernel_debug_string_simple(TRACE_STRING_THREADNAME
, ut
->pth_name
);
416 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
420 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
422 host_basic_info_data_t hinfo
;
426 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
427 struct _processor_statistics_np
*buf
;
430 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
431 if (kret
!= KERN_SUCCESS
) {
435 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
437 if (req
->oldlen
< size
) {
441 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
443 kret
= get_sched_statistics(buf
, &size
);
444 if (kret
!= KERN_SUCCESS
) {
449 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
455 panic("Sched info changed?!");
462 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
465 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
470 if (req
->newlen
!= sizeof(active
)) {
474 res
= copyin(req
->newptr
, &active
, sizeof(active
));
479 return set_sched_stats_active(active
);
482 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
484 extern uint32_t sched_debug_flags
;
485 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
487 #if (DEBUG || DEVELOPMENT)
488 extern boolean_t doprnt_hide_pointers
;
489 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
492 extern int get_kernel_symfile(proc_t
, char **);
495 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
497 extern unsigned int nsysent
;
498 extern int syscalls_log
[];
499 extern const char *syscallnames
[];
502 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
504 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
505 __unused
int *name
= arg1
; /* oid element argument vector */
506 __unused
int namelen
= arg2
; /* number of oid element arguments */
507 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
508 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
509 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
510 size_t newlen
= req
->newlen
; /* user buffer copy in size */
515 /* valid values passed in:
516 * = 0 means don't keep called counts for each bsd syscall
517 * > 0 means keep called counts for each bsd syscall
518 * = 2 means dump current counts to the system log
519 * = 3 means reset all counts
520 * for example, to dump current counts:
521 * sysctl -w kern.count_calls=2
523 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
529 do_count_syscalls
= 1;
531 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
533 for ( i
= 0; i
< nsysent
; i
++ ) {
534 if ( syscalls_log
[i
] != 0 ) {
536 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
544 do_count_syscalls
= 1;
548 /* adjust index so we return the right required/consumed amount */
550 req
->oldidx
+= req
->oldlen
;
554 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
555 0, /* Pointer argument (arg1) */
556 0, /* Integer argument (arg2) */
557 sysctl_docountsyscalls
, /* Handler function */
558 NULL
, /* Data pointer */
560 #endif /* COUNT_SYSCALLS */
563 * The following sysctl_* functions should not be used
564 * any more, as they can only cope with callers in
565 * user mode: Use new-style
573 * Validate parameters and get old / set new parameters
574 * for an integer-valued sysctl function.
577 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
578 user_addr_t newp
, size_t newlen
, int *valp
)
582 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
584 if (oldp
&& *oldlenp
< sizeof(int))
586 if (newp
&& newlen
!= sizeof(int))
588 *oldlenp
= sizeof(int);
590 error
= copyout(valp
, oldp
, sizeof(int));
591 if (error
== 0 && newp
) {
592 error
= copyin(newp
, valp
, sizeof(int));
593 AUDIT_ARG(value32
, *valp
);
599 * Validate parameters and get old / set new parameters
600 * for an quad(64bit)-valued sysctl function.
603 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
604 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
608 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
610 if (oldp
&& *oldlenp
< sizeof(quad_t
))
612 if (newp
&& newlen
!= sizeof(quad_t
))
614 *oldlenp
= sizeof(quad_t
);
616 error
= copyout(valp
, oldp
, sizeof(quad_t
));
617 if (error
== 0 && newp
)
618 error
= copyin(newp
, valp
, sizeof(quad_t
));
623 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
625 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
632 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
634 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
641 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
646 /* This is very racy but list lock is held.. Hmmm. */
647 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
648 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
649 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
650 tp
->t_dev
!= (dev_t
)*(int*)arg
)
659 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
661 kauth_cred_t my_cred
;
664 if (p
->p_ucred
== NULL
)
666 my_cred
= kauth_cred_proc_ref(p
);
667 uid
= kauth_cred_getuid(my_cred
);
668 kauth_cred_unref(&my_cred
);
670 if (uid
!= (uid_t
)*(int*)arg
)
678 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
680 kauth_cred_t my_cred
;
683 if (p
->p_ucred
== NULL
)
685 my_cred
= kauth_cred_proc_ref(p
);
686 ruid
= kauth_cred_getruid(my_cred
);
687 kauth_cred_unref(&my_cred
);
689 if (ruid
!= (uid_t
)*(int*)arg
)
696 * try over estimating by 5 procs
698 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
699 struct sysdoproc_args
{
714 sysdoproc_callback(proc_t p
, void *arg
)
716 struct sysdoproc_args
*args
= arg
;
718 if (args
->buflen
>= args
->sizeof_kproc
) {
719 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
720 return (PROC_RETURNED
);
721 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
722 return (PROC_RETURNED
);
723 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
724 return (PROC_RETURNED
);
726 bzero(args
->kprocp
, args
->sizeof_kproc
);
728 fill_user64_proc(p
, args
->kprocp
);
730 fill_user32_proc(p
, args
->kprocp
);
731 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
733 *args
->errorp
= error
;
734 return (PROC_RETURNED_DONE
);
736 args
->dp
+= args
->sizeof_kproc
;
737 args
->buflen
-= args
->sizeof_kproc
;
739 args
->needed
+= args
->sizeof_kproc
;
740 return (PROC_RETURNED
);
743 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
745 sysctl_prochandle SYSCTL_HANDLER_ARGS
747 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
748 int *name
= arg1
; /* oid element argument vector */
749 int namelen
= arg2
; /* number of oid element arguments */
750 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
752 user_addr_t dp
= where
;
754 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
756 boolean_t is_64_bit
= proc_is64bit(current_proc());
757 struct user32_kinfo_proc user32_kproc
;
758 struct user64_kinfo_proc user_kproc
;
761 int (*filterfn
)(proc_t
, void *) = 0;
762 struct sysdoproc_args args
;
768 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
772 sizeof_kproc
= sizeof(user_kproc
);
773 kprocp
= &user_kproc
;
775 sizeof_kproc
= sizeof(user32_kproc
);
776 kprocp
= &user32_kproc
;
782 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
786 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
805 /* must be kern.proc.<unknown> */
810 args
.buflen
= buflen
;
811 args
.kprocp
= kprocp
;
812 args
.is_64_bit
= is_64_bit
;
814 args
.needed
= needed
;
815 args
.errorp
= &error
;
816 args
.uidcheck
= uidcheck
;
817 args
.ruidcheck
= ruidcheck
;
818 args
.ttycheck
= ttycheck
;
819 args
.sizeof_kproc
= sizeof_kproc
;
821 args
.uidval
= name
[0];
823 success
= proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
824 sysdoproc_callback
, &args
, filterfn
, name
);
827 * rdar://problem/28433391: if we can't iterate over the processes,
828 * make sure to return an error.
838 needed
= args
.needed
;
840 if (where
!= USER_ADDR_NULL
) {
841 req
->oldlen
= dp
- where
;
842 if (needed
> req
->oldlen
)
845 needed
+= KERN_PROCSLOP
;
846 req
->oldlen
= needed
;
848 /* adjust index so we return the right required/consumed amount */
849 req
->oldidx
+= req
->oldlen
;
854 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
855 * in the sysctl declaration itself, which comes into the handler function
856 * as 'oidp->oid_arg2'.
858 * For these particular sysctls, since they have well known OIDs, we could
859 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
860 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
861 * of a well known value with a common handler function. This is desirable,
862 * because we want well known values to "go away" at some future date.
864 * It should be noted that the value of '((int *)arg1)[1]' is used for many
865 * an integer parameter to the subcommand for many of these sysctls; we'd
866 * rather have used '((int *)arg1)[0]' for that, or even better, an element
867 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
868 * and then use leaf-node permissions enforcement, but that would have
869 * necessitated modifying user space code to correspond to the interface
870 * change, and we are striving for binary backward compatibility here; even
871 * though these are SPI, and not intended for use by user space applications
872 * which are not themselves system tools or libraries, some applications
873 * have erroneously used them.
875 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_ALL
, /* Integer argument (arg2) */
878 sysctl_prochandle
, /* Handler function */
879 NULL
, /* Data is size variant on ILP32/LP64 */
881 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_PID
, /* Integer argument (arg2) */
884 sysctl_prochandle
, /* Handler function */
885 NULL
, /* Data is size variant on ILP32/LP64 */
887 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
888 0, /* Pointer argument (arg1) */
889 KERN_PROC_TTY
, /* Integer argument (arg2) */
890 sysctl_prochandle
, /* Handler function */
891 NULL
, /* Data is size variant on ILP32/LP64 */
893 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
894 0, /* Pointer argument (arg1) */
895 KERN_PROC_PGRP
, /* Integer argument (arg2) */
896 sysctl_prochandle
, /* Handler function */
897 NULL
, /* Data is size variant on ILP32/LP64 */
899 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
900 0, /* Pointer argument (arg1) */
901 KERN_PROC_UID
, /* Integer argument (arg2) */
902 sysctl_prochandle
, /* Handler function */
903 NULL
, /* Data is size variant on ILP32/LP64 */
905 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
906 0, /* Pointer argument (arg1) */
907 KERN_PROC_RUID
, /* Integer argument (arg2) */
908 sysctl_prochandle
, /* Handler function */
909 NULL
, /* Data is size variant on ILP32/LP64 */
911 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
912 0, /* Pointer argument (arg1) */
913 KERN_PROC_LCID
, /* Integer argument (arg2) */
914 sysctl_prochandle
, /* Handler function */
915 NULL
, /* Data is size variant on ILP32/LP64 */
920 * Fill in non-zero fields of an eproc structure for the specified process.
923 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
927 struct session
*sessp
;
928 kauth_cred_t my_cred
;
931 sessp
= proc_session(p
);
933 if (pg
!= PGRP_NULL
) {
934 ep
->e_pgid
= p
->p_pgrpid
;
935 ep
->e_jobc
= pg
->pg_jobc
;
936 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
937 ep
->e_flag
= EPROC_CTTY
;
939 ep
->e_ppid
= p
->p_ppid
;
941 my_cred
= kauth_cred_proc_ref(p
);
943 /* A fake historical pcred */
944 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
945 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
946 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
947 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
949 /* A fake historical *kauth_cred_t */
950 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
951 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
952 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
953 bcopy(posix_cred_get(my_cred
)->cr_groups
,
954 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
956 kauth_cred_unref(&my_cred
);
959 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
960 (tp
= SESSION_TP(sessp
))) {
961 ep
->e_tdev
= tp
->t_dev
;
962 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
966 if (sessp
!= SESSION_NULL
) {
967 if (SESS_LEADER(p
, sessp
))
968 ep
->e_flag
|= EPROC_SLEADER
;
976 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
979 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
983 struct session
*sessp
;
984 kauth_cred_t my_cred
;
987 sessp
= proc_session(p
);
989 if (pg
!= PGRP_NULL
) {
990 ep
->e_pgid
= p
->p_pgrpid
;
991 ep
->e_jobc
= pg
->pg_jobc
;
992 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
993 ep
->e_flag
= EPROC_CTTY
;
995 ep
->e_ppid
= p
->p_ppid
;
997 my_cred
= kauth_cred_proc_ref(p
);
999 /* A fake historical pcred */
1000 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1001 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1002 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1003 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1005 /* A fake historical *kauth_cred_t */
1006 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1007 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1008 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1009 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1010 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
1012 kauth_cred_unref(&my_cred
);
1015 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1016 (tp
= SESSION_TP(sessp
))) {
1017 ep
->e_tdev
= tp
->t_dev
;
1018 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1022 if (sessp
!= SESSION_NULL
) {
1023 if (SESS_LEADER(p
, sessp
))
1024 ep
->e_flag
|= EPROC_SLEADER
;
1025 session_rele(sessp
);
1027 if (pg
!= PGRP_NULL
)
1032 * Fill in an eproc structure for the specified process.
1033 * bzeroed by our caller, so only set non-zero fields.
1036 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1038 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1039 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1040 exp
->p_flag
= p
->p_flag
;
1041 if (p
->p_lflag
& P_LTRACED
)
1042 exp
->p_flag
|= P_TRACED
;
1043 if (p
->p_lflag
& P_LPPWAIT
)
1044 exp
->p_flag
|= P_PPWAIT
;
1045 if (p
->p_lflag
& P_LEXIT
)
1046 exp
->p_flag
|= P_WEXIT
;
1047 exp
->p_stat
= p
->p_stat
;
1048 exp
->p_pid
= p
->p_pid
;
1049 exp
->p_oppid
= p
->p_oppid
;
1051 exp
->user_stack
= p
->user_stack
;
1052 exp
->p_debugger
= p
->p_debugger
;
1053 exp
->sigwait
= p
->sigwait
;
1055 #ifdef _PROC_HAS_SCHEDINFO_
1056 exp
->p_estcpu
= p
->p_estcpu
;
1057 exp
->p_pctcpu
= p
->p_pctcpu
;
1058 exp
->p_slptime
= p
->p_slptime
;
1060 exp
->p_realtimer
.it_interval
.tv_sec
=
1061 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1062 exp
->p_realtimer
.it_interval
.tv_usec
=
1063 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1065 exp
->p_realtimer
.it_value
.tv_sec
=
1066 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1067 exp
->p_realtimer
.it_value
.tv_usec
=
1068 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1070 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1071 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1073 exp
->p_sigignore
= p
->p_sigignore
;
1074 exp
->p_sigcatch
= p
->p_sigcatch
;
1075 exp
->p_priority
= p
->p_priority
;
1076 exp
->p_nice
= p
->p_nice
;
1077 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1078 exp
->p_xstat
= p
->p_xstat
;
1079 exp
->p_acflag
= p
->p_acflag
;
1083 * Fill in an LP64 version of extern_proc structure for the specified process.
1086 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1088 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1089 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1090 exp
->p_flag
= p
->p_flag
;
1091 if (p
->p_lflag
& P_LTRACED
)
1092 exp
->p_flag
|= P_TRACED
;
1093 if (p
->p_lflag
& P_LPPWAIT
)
1094 exp
->p_flag
|= P_PPWAIT
;
1095 if (p
->p_lflag
& P_LEXIT
)
1096 exp
->p_flag
|= P_WEXIT
;
1097 exp
->p_stat
= p
->p_stat
;
1098 exp
->p_pid
= p
->p_pid
;
1099 exp
->p_oppid
= p
->p_oppid
;
1101 exp
->user_stack
= p
->user_stack
;
1102 exp
->p_debugger
= p
->p_debugger
;
1103 exp
->sigwait
= p
->sigwait
;
1105 #ifdef _PROC_HAS_SCHEDINFO_
1106 exp
->p_estcpu
= p
->p_estcpu
;
1107 exp
->p_pctcpu
= p
->p_pctcpu
;
1108 exp
->p_slptime
= p
->p_slptime
;
1110 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1111 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1113 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1114 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1116 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1117 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1119 exp
->p_sigignore
= p
->p_sigignore
;
1120 exp
->p_sigcatch
= p
->p_sigcatch
;
1121 exp
->p_priority
= p
->p_priority
;
1122 exp
->p_nice
= p
->p_nice
;
1123 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1124 exp
->p_xstat
= p
->p_xstat
;
1125 exp
->p_acflag
= p
->p_acflag
;
1129 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1131 /* on a 64 bit kernel, 32 bit users get some truncated information */
1132 fill_user32_externproc(p
, &kp
->kp_proc
);
1133 fill_user32_eproc(p
, &kp
->kp_eproc
);
1137 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1139 fill_user64_externproc(p
, &kp
->kp_proc
);
1140 fill_user64_eproc(p
, &kp
->kp_eproc
);
1144 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1146 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1147 int *name
= arg1
; /* oid element argument vector */
1148 int namelen
= arg2
; /* number of oid element arguments */
1149 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1150 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1151 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1152 // size_t newlen = req->newlen; /* user buffer copy in size */
1169 case KERN_KDWRITETR
:
1170 case KERN_KDWRITEMAP
:
1176 case KERN_KDREADCURTHRMAP
:
1177 case KERN_KDSET_TYPEFILTER
:
1178 case KERN_KDBUFWAIT
:
1180 case KERN_KDWRITEMAP_V3
:
1181 case KERN_KDWRITETR_V3
:
1182 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1189 /* adjust index so we return the right required/consumed amount */
1191 req
->oldidx
+= req
->oldlen
;
1195 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1196 0, /* Pointer argument (arg1) */
1197 0, /* Integer argument (arg2) */
1198 sysctl_kdebug_ops
, /* Handler function */
1199 NULL
, /* Data pointer */
1203 #if !CONFIG_EMBEDDED
1205 * Return the top *sizep bytes of the user stack, or the entire area of the
1206 * user stack down through the saved exec_path, whichever is smaller.
1209 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1211 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1212 int *name
= arg1
; /* oid element argument vector */
1213 int namelen
= arg2
; /* number of oid element arguments */
1214 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1215 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1216 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1217 // size_t newlen = req->newlen; /* user buffer copy in size */
1220 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1222 /* adjust index so we return the right required/consumed amount */
1224 req
->oldidx
+= req
->oldlen
;
1228 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1229 0, /* Pointer argument (arg1) */
1230 0, /* Integer argument (arg2) */
1231 sysctl_doprocargs
, /* Handler function */
1232 NULL
, /* Data pointer */
1234 #endif /* !CONFIG_EMBEDDED */
1237 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1239 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1240 int *name
= arg1
; /* oid element argument vector */
1241 int namelen
= arg2
; /* number of oid element arguments */
1242 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1243 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1244 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1245 // size_t newlen = req->newlen; /* user buffer copy in size */
1248 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1250 /* adjust index so we return the right required/consumed amount */
1252 req
->oldidx
+= req
->oldlen
;
1256 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1257 0, /* Pointer argument (arg1) */
1258 0, /* Integer argument (arg2) */
1259 sysctl_doprocargs2
, /* Handler function */
1260 NULL
, /* Data pointer */
1264 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1265 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1268 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1270 struct _vm_map
*proc_map
;
1273 user_addr_t arg_addr
;
1278 vm_size_t alloc_size
= 0;
1279 vm_offset_t copy_start
, copy_end
;
1282 kauth_cred_t my_cred
;
1290 buflen
-= sizeof(int); /* reserve first word to return argc */
1292 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1293 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1294 /* is not NULL then the caller wants us to return the length needed to */
1295 /* hold the data we would return */
1296 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1302 * Lookup process by pid
1311 * Copy the top N bytes of the stack.
1312 * On all machines we have so far, the stack grows
1315 * If the user expects no more than N bytes of
1316 * argument list, use that as a guess for the
1320 if (!p
->user_stack
) {
1325 if (where
== USER_ADDR_NULL
) {
1326 /* caller only wants to know length of proc args data */
1327 if (sizep
== NULL
) {
1332 size
= p
->p_argslen
;
1335 size
+= sizeof(int);
1338 * old PROCARGS will return the executable's path and plus some
1339 * extra space for work alignment and data tags
1341 size
+= PATH_MAX
+ (6 * sizeof(int));
1343 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1348 my_cred
= kauth_cred_proc_ref(p
);
1349 uid
= kauth_cred_getuid(my_cred
);
1350 kauth_cred_unref(&my_cred
);
1352 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1353 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1358 if ((u_int
)arg_size
> p
->p_argslen
)
1359 arg_size
= round_page(p
->p_argslen
);
1361 arg_addr
= p
->user_stack
- arg_size
;
1364 * Before we can block (any VM code), make another
1365 * reference to the map to keep it alive. We do
1366 * that by getting a reference on the task itself.
1374 /* save off argc before releasing the proc */
1377 argslen
= p
->p_argslen
;
1379 * Once we have a task reference we can convert that into a
1380 * map reference, which we will use in the calls below. The
1381 * task/process may change its map after we take this reference
1382 * (see execve), but the worst that will happen then is a return
1383 * of stale info (which is always a possibility).
1385 task_reference(task
);
1387 proc_map
= get_task_map_reference(task
);
1388 task_deallocate(task
);
1390 if (proc_map
== NULL
)
1393 alloc_size
= round_page(arg_size
);
1394 ret
= kmem_alloc(kernel_map
, ©_start
, alloc_size
, VM_KERN_MEMORY_BSD
);
1395 if (ret
!= KERN_SUCCESS
) {
1396 vm_map_deallocate(proc_map
);
1399 bzero((void *)copy_start
, alloc_size
);
1401 copy_end
= round_page(copy_start
+ arg_size
);
1403 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1404 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1405 vm_map_deallocate(proc_map
);
1406 kmem_free(kernel_map
, copy_start
,
1407 round_page(arg_size
));
1412 * Now that we've done the copyin from the process'
1413 * map, we can release the reference to it.
1415 vm_map_deallocate(proc_map
);
1417 if( vm_map_copy_overwrite(kernel_map
,
1418 (vm_map_address_t
)copy_start
,
1419 tmp
, FALSE
) != KERN_SUCCESS
) {
1420 kmem_free(kernel_map
, copy_start
,
1421 round_page(arg_size
));
1422 vm_map_copy_discard(tmp
);
1426 if (arg_size
> argslen
) {
1427 data
= (caddr_t
) (copy_end
- argslen
);
1430 data
= (caddr_t
) (copy_end
- arg_size
);
1435 * When these sysctls were introduced, the first string in the strings
1436 * section was just the bare path of the executable. However, for security
1437 * reasons we now prefix this string with executable_path= so it can be
1438 * parsed getenv style. To avoid binary compatability issues with exising
1439 * callers of this sysctl, we strip it off here if present.
1440 * (rdar://problem/13746466)
1442 #define EXECUTABLE_KEY "executable_path="
1443 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0){
1444 data
+= strlen(EXECUTABLE_KEY
);
1445 size
-= strlen(EXECUTABLE_KEY
);
1449 /* Put processes argc as the first word in the copyout buffer */
1450 suword(where
, argc
);
1451 error
= copyout(data
, (where
+ sizeof(int)), size
);
1452 size
+= sizeof(int);
1454 error
= copyout(data
, where
, size
);
1457 * Make the old PROCARGS work to return the executable's path
1458 * But, only if there is enough space in the provided buffer
1460 * on entry: data [possibily] points to the beginning of the path
1462 * Note: we keep all pointers&sizes aligned to word boundries
1464 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1466 int binPath_sz
, alignedBinPath_sz
= 0;
1467 int extraSpaceNeeded
, addThis
;
1468 user_addr_t placeHere
;
1469 char * str
= (char *) data
;
1472 /* Some apps are really bad about messing up their stacks
1473 So, we have to be extra careful about getting the length
1474 of the executing binary. If we encounter an error, we bail.
1477 /* Limit ourselves to PATH_MAX paths */
1478 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1482 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1485 /* If we have a NUL terminator, copy it, too */
1486 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1488 /* Pre-Flight the space requiremnts */
1490 /* Account for the padding that fills out binPath to the next word */
1491 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1493 placeHere
= where
+ size
;
1495 /* Account for the bytes needed to keep placeHere word aligned */
1496 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1498 /* Add up all the space that is needed */
1499 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1501 /* is there is room to tack on argv[0]? */
1502 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1504 placeHere
+= addThis
;
1505 suword(placeHere
, 0);
1506 placeHere
+= sizeof(int);
1507 suword(placeHere
, 0xBFFF0000);
1508 placeHere
+= sizeof(int);
1509 suword(placeHere
, 0);
1510 placeHere
+= sizeof(int);
1511 error
= copyout(data
, placeHere
, binPath_sz
);
1514 placeHere
+= binPath_sz
;
1515 suword(placeHere
, 0);
1516 size
+= extraSpaceNeeded
;
1522 if (copy_start
!= (vm_offset_t
) 0) {
1523 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1529 if (where
!= USER_ADDR_NULL
)
1536 * Max number of concurrent aio requests
1540 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1542 int new_value
, changed
;
1543 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1545 /* make sure the system-wide limit is greater than the per process limit */
1546 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1547 aio_max_requests
= new_value
;
1556 * Max number of concurrent aio requests per process
1560 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1562 int new_value
, changed
;
1563 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1565 /* make sure per process limit is less than the system-wide limit */
1566 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1567 aio_max_requests_per_process
= new_value
;
1576 * Max number of async IO worker threads
1580 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1582 int new_value
, changed
;
1583 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1585 /* we only allow an increase in the number of worker threads */
1586 if (new_value
> aio_worker_threads
) {
1587 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1588 aio_worker_threads
= new_value
;
1598 * System-wide limit on the max number of processes
1602 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1604 int new_value
, changed
;
1605 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1607 AUDIT_ARG(value32
, new_value
);
1608 /* make sure the system-wide limit is less than the configured hard
1609 limit set at kernel compilation */
1610 if (new_value
<= hard_maxproc
&& new_value
> 0)
1611 maxproc
= new_value
;
1618 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1619 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1621 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1622 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1624 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1625 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1626 (int *)NULL
, BSD
, "");
1627 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1628 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1630 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1631 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1632 &kernel_uuid_string
[0], 0, "");
1634 SYSCTL_STRING(_kern
, OID_AUTO
, osbuildconfig
,
1635 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1636 &osbuild_config
[0], 0, "");
1645 int debug_kprint_syscall
= 0;
1646 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1648 /* Thread safe: bits and string value are not used to reclaim state */
1649 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1650 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1651 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1652 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1653 "name of process for kprintf syscall tracing");
1655 int debug_kprint_current_process(const char **namep
)
1657 struct proc
*p
= current_proc();
1663 if (debug_kprint_syscall_process
[0]) {
1664 /* user asked to scope tracing to a particular process name */
1665 if(0 == strncmp(debug_kprint_syscall_process
,
1666 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1667 /* no value in telling the user that we traced what they asked */
1668 if(namep
) *namep
= NULL
;
1676 /* trace all processes. Tell user what we traced */
1685 /* PR-5293665: need to use a callback function for kern.osversion to set
1686 * osversion in IORegistry */
1689 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1693 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1696 IORegistrySetOSBuildVersion((char *)arg1
);
1702 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1703 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1704 osversion
, 256 /* OSVERSIZE*/,
1705 sysctl_osversion
, "A", "");
1707 static uint64_t osproductversion_string
[48];
1710 sysctl_osproductversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1712 if (req
->newptr
!= 0) {
1714 * Can only ever be set by launchd, and only once at boot.
1716 if (req
->p
->p_pid
!= 1 || osproductversion_string
[0] != '\0') {
1721 return sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1724 SYSCTL_PROC(_kern
, OID_AUTO
, osproductversion
,
1725 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1726 osproductversion_string
, sizeof(osproductversion_string
),
1727 sysctl_osproductversion
, "A", "The ProductVersion from SystemVersion.plist");
1729 static uint64_t osvariant_status
= 0;
1732 sysctl_osvariant_status(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1734 if (req
->newptr
!= 0) {
1736 * Can only ever be set by launchd, and only once at boot.
1738 if (req
->p
->p_pid
!= 1 || osvariant_status
!= 0) {
1743 return sysctl_handle_quad(oidp
, arg1
, arg2
, req
);
1746 SYSCTL_PROC(_kern
, OID_AUTO
, osvariant_status
,
1747 CTLFLAG_RW
| CTLTYPE_QUAD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
,
1748 &osvariant_status
, sizeof(osvariant_status
),
1749 sysctl_osvariant_status
, "Q", "Opaque flags used to cache OS variant information");
1752 sysctl_sysctl_bootargs
1753 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1756 /* BOOT_LINE_LENGTH */
1758 size_t boot_args_len
= 256;
1760 size_t boot_args_len
= 1024;
1762 char buf
[boot_args_len
];
1764 strlcpy(buf
, PE_boot_args(), boot_args_len
);
1765 error
= sysctl_io_string(req
, buf
, boot_args_len
, 0, NULL
);
1769 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1770 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1772 sysctl_sysctl_bootargs
, "A", "bootargs");
1775 sysctl_kernelcacheuuid(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1778 if (kernelcache_uuid_valid
) {
1779 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1784 SYSCTL_PROC(_kern
, OID_AUTO
, kernelcacheuuid
,
1785 CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1786 kernelcache_uuid_string
, sizeof(kernelcache_uuid_string
),
1787 sysctl_kernelcacheuuid
, "A", "");
1789 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1790 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1792 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1793 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1794 (int *)NULL
, ARG_MAX
, "");
1795 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1796 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1797 (int *)NULL
, _POSIX_VERSION
, "");
1798 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1799 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1800 (int *)NULL
, NGROUPS_MAX
, "");
1801 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1802 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1803 (int *)NULL
, 1, "");
1804 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1805 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1806 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1807 (int *)NULL
, 1, "");
1809 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1810 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1813 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1814 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1816 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1817 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1819 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1820 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1822 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1823 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1824 &thread_max
, 0, "");
1825 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1826 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1827 &task_threadmax
, 0, "");
1830 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1832 int oldval
= desiredvnodes
;
1833 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1835 if (oldval
!= desiredvnodes
) {
1836 resize_namecache(desiredvnodes
);
1842 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1843 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1844 &nc_disabled
, 0, "");
1846 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1847 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1848 0, 0, sysctl_maxvnodes
, "I", "");
1850 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1851 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1852 0, 0, sysctl_maxproc
, "I", "");
1854 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1855 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1856 0, 0, sysctl_aiomax
, "I", "");
1858 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1859 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1860 0, 0, sysctl_aioprocmax
, "I", "");
1862 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1863 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1864 0, 0, sysctl_aiothreads
, "I", "");
1866 #if (DEVELOPMENT || DEBUG)
1867 extern int sched_smt_balance
;
1868 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1869 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1870 &sched_smt_balance
, 0, "");
1871 extern int sched_allow_rt_smt
;
1872 SYSCTL_INT(_kern
, OID_AUTO
, sched_allow_rt_smt
,
1873 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1874 &sched_allow_rt_smt
, 0, "");
1875 #if __arm__ || __arm64__
1876 extern uint32_t perfcontrol_requested_recommended_cores
;
1877 SYSCTL_UINT(_kern
, OID_AUTO
, sched_recommended_cores
,
1878 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1879 &perfcontrol_requested_recommended_cores
, 0, "");
1881 /* Scheduler perfcontrol callouts sysctls */
1882 SYSCTL_DECL(_kern_perfcontrol_callout
);
1883 SYSCTL_NODE(_kern
, OID_AUTO
, perfcontrol_callout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
1884 "scheduler perfcontrol callouts");
1886 extern int perfcontrol_callout_stats_enabled
;
1887 SYSCTL_INT(_kern_perfcontrol_callout
, OID_AUTO
, stats_enabled
,
1888 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1889 &perfcontrol_callout_stats_enabled
, 0, "");
1891 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
1892 perfcontrol_callout_stat_t stat
);
1894 /* On-Core Callout */
1896 sysctl_perfcontrol_callout_stat
1897 (__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1899 perfcontrol_callout_stat_t stat
= (perfcontrol_callout_stat_t
)arg1
;
1900 perfcontrol_callout_type_t type
= (perfcontrol_callout_type_t
)arg2
;
1901 return sysctl_io_number(req
, (int)perfcontrol_callout_stat_avg(type
, stat
),
1902 sizeof(int), NULL
, NULL
);
1905 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_instr
,
1906 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1907 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_ON_CORE
,
1908 sysctl_perfcontrol_callout_stat
, "I", "");
1909 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, oncore_cycles
,
1910 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1911 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_ON_CORE
,
1912 sysctl_perfcontrol_callout_stat
, "I", "");
1913 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_instr
,
1914 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1915 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_OFF_CORE
,
1916 sysctl_perfcontrol_callout_stat
, "I", "");
1917 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, offcore_cycles
,
1918 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1919 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_OFF_CORE
,
1920 sysctl_perfcontrol_callout_stat
, "I", "");
1921 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_instr
,
1922 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1923 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_CONTEXT
,
1924 sysctl_perfcontrol_callout_stat
, "I", "");
1925 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, context_cycles
,
1926 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1927 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_CONTEXT
,
1928 sysctl_perfcontrol_callout_stat
, "I", "");
1929 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_instr
,
1930 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1931 (void *)PERFCONTROL_STAT_INSTRS
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
1932 sysctl_perfcontrol_callout_stat
, "I", "");
1933 SYSCTL_PROC(_kern_perfcontrol_callout
, OID_AUTO
, update_cycles
,
1934 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1935 (void *)PERFCONTROL_STAT_CYCLES
, PERFCONTROL_CALLOUT_STATE_UPDATE
,
1936 sysctl_perfcontrol_callout_stat
, "I", "");
1938 #endif /* __arm__ || __arm64__ */
1939 #endif /* (DEVELOPMENT || DEBUG) */
1943 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1945 int new_value
, changed
;
1946 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1948 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1950 securelevel
= new_value
;
1959 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1960 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1961 0, 0, sysctl_securelvl
, "I", "");
1966 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1969 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1971 domainnamelen
= strlen(domainname
);
1976 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1977 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1978 0, 0, sysctl_domainname
, "A", "");
1980 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1981 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1986 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1989 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1991 hostnamelen
= req
->newlen
;
1997 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1998 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1999 0, 0, sysctl_hostname
, "A", "");
2003 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2005 /* Original code allowed writing, I'm copying this, although this all makes
2006 no sense to me. Besides, this sysctl is never used. */
2007 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2010 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2011 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2012 0, 0, sysctl_procname
, "A", "");
2014 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2015 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2016 &speculative_reads_disabled
, 0, "");
2018 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
2019 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2020 &preheat_max_bytes
, 0, "");
2022 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
2023 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2024 &preheat_min_bytes
, 0, "");
2026 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
2027 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2028 &speculative_prefetch_max
, 0, "");
2030 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
2031 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2032 &speculative_prefetch_max_iosize
, 0, "");
2034 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
2035 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2036 &vm_page_free_target
, 0, "");
2038 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
2039 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2040 &vm_page_free_min
, 0, "");
2042 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
2043 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2044 &vm_page_free_reserved
, 0, "");
2046 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
2047 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2048 &vm_pageout_state
.vm_page_speculative_percentage
, 0, "");
2050 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
2051 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2052 &vm_pageout_state
.vm_page_speculative_q_age_ms
, 0, "");
2054 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
2055 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2056 &vm_max_delayed_work_limit
, 0, "");
2058 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2059 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2060 &vm_max_batch
, 0, "");
2062 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
2063 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2064 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
2068 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2071 boottime_timeval(&tv
);
2072 struct proc
*p
= req
->p
;
2074 if (proc_is64bit(p
)) {
2075 struct user64_timeval t
= {};
2076 t
.tv_sec
= tv
.tv_sec
;
2077 t
.tv_usec
= tv
.tv_usec
;
2078 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2080 struct user32_timeval t
= {};
2081 t
.tv_sec
= tv
.tv_sec
;
2082 t
.tv_usec
= tv
.tv_usec
;
2083 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2087 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2088 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2089 0, 0, sysctl_boottime
, "S,timeval", "");
2093 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2096 int error
= get_kernel_symfile(req
->p
, &str
);
2099 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2103 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2104 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2105 0, 0, sysctl_symfile
, "A", "");
2110 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2112 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2115 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2116 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2117 0, 0, sysctl_netboot
, "I", "");
2120 #ifdef CONFIG_IMGSRC_ACCESS
2122 * Legacy--act as if only one layer of nesting is possible.
2126 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2128 vfs_context_t ctx
= vfs_context_current();
2132 if (!vfs_context_issuser(ctx
)) {
2136 if (imgsrc_rootvnodes
[0] == NULL
) {
2140 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2145 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2146 result
= vnode_getwithref(devvp
);
2151 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2155 vnode_put(imgsrc_rootvnodes
[0]);
2159 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2160 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2161 0, 0, sysctl_imgsrcdev
, "I", "");
2165 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2168 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
] = {}; /* 2 for now, no problem */
2172 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2176 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2178 * Go get the root vnode.
2180 rvp
= imgsrc_rootvnodes
[i
];
2181 if (rvp
== NULLVP
) {
2185 error
= vnode_get(rvp
);
2191 * For now, no getting at a non-local volume.
2193 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2194 if (devvp
== NULL
) {
2199 error
= vnode_getwithref(devvp
);
2208 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2209 info
[i
].ii_flags
= 0;
2210 info
[i
].ii_height
= i
;
2211 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2217 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2220 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2221 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2222 0, 0, sysctl_imgsrcinfo
, "I", "");
2224 #endif /* CONFIG_IMGSRC_ACCESS */
2227 SYSCTL_DECL(_kern_timer
);
2228 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2231 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2232 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2233 &mach_timer_coalescing_enabled
, 0, "");
2235 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2236 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2237 &timer_deadline_tracking_bin_1
, "");
2238 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2239 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2240 &timer_deadline_tracking_bin_2
, "");
2242 SYSCTL_DECL(_kern_timer_longterm
);
2243 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2246 /* Must match definition in osfmk/kern/timer_call.c */
2249 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2250 LATENCY
, LATENCY_MIN
, LATENCY_MAX
, SCAN_LIMIT
, SCAN_INTERVAL
, PAUSES
2252 extern uint64_t timer_sysctl_get(int);
2253 extern int timer_sysctl_set(int, uint64_t);
2257 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2259 int oid
= (int)arg1
;
2260 uint64_t value
= timer_sysctl_get(oid
);
2265 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2267 error
= timer_sysctl_set(oid
, new_value
);
2272 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2273 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2274 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2275 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_limit
,
2276 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2277 (void *) SCAN_LIMIT
, 0, sysctl_timer
, "Q", "");
2278 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_interval
,
2279 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2280 (void *) SCAN_INTERVAL
, 0, sysctl_timer
, "Q", "");
2282 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2283 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2284 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2285 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scan_pauses
,
2286 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2287 (void *) PAUSES
, 0, sysctl_timer
, "Q", "");
2290 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2291 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2292 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2293 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2294 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2295 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2296 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2297 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2298 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2299 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2300 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2301 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2302 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2303 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2304 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2305 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2306 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2307 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2308 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2309 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2310 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2311 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2312 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2313 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2318 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2320 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2323 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2324 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2325 0, 0, sysctl_usrstack
, "I", "");
2329 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2331 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2334 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2335 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2336 0, 0, sysctl_usrstack64
, "Q", "");
2340 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2341 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2342 corefilename
, sizeof(corefilename
), "");
2346 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2348 #ifdef SECURE_KERNEL
2352 int new_value
, changed
;
2353 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2355 if ((new_value
== 0) || (new_value
== 1))
2356 do_coredump
= new_value
;
2364 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2365 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2366 0, 0, sysctl_coredump
, "I", "");
2369 sysctl_suid_coredump
2370 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2372 #ifdef SECURE_KERNEL
2376 int new_value
, changed
;
2377 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2379 if ((new_value
== 0) || (new_value
== 1))
2380 sugid_coredump
= new_value
;
2388 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2389 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2390 0, 0, sysctl_suid_coredump
, "I", "");
2392 #endif /* CONFIG_COREDUMP */
2396 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2398 struct proc
*p
= req
->p
;
2399 int new_value
, changed
;
2400 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2404 req
->p
->p_lflag
|= P_LDELAYTERM
;
2406 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2412 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2413 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2414 0, 0, sysctl_delayterm
, "I", "");
2419 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2421 struct proc
*p
= req
->p
;
2423 int new_value
, old_value
, changed
;
2426 ut
= get_bsdthread_info(current_thread());
2428 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2429 old_value
= KERN_RAGE_THREAD
;
2430 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2431 old_value
= KERN_RAGE_PROC
;
2435 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2438 switch (new_value
) {
2439 case KERN_RAGE_PROC
:
2441 p
->p_lflag
|= P_LRAGE_VNODES
;
2444 case KERN_UNRAGE_PROC
:
2446 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2450 case KERN_RAGE_THREAD
:
2451 ut
->uu_flag
|= UT_RAGE_VNODES
;
2453 case KERN_UNRAGE_THREAD
:
2454 ut
= get_bsdthread_info(current_thread());
2455 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2462 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2463 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2464 0, 0, sysctl_rage_vnode
, "I", "");
2466 /* XXX move this interface into libproc and remove this sysctl */
2468 sysctl_setthread_cpupercent
2469 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2471 int new_value
, old_value
;
2473 kern_return_t kret
= KERN_SUCCESS
;
2474 uint8_t percent
= 0;
2482 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2485 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2486 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2491 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2493 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2499 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2500 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2501 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2505 sysctl_kern_check_openevt
2506 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2508 struct proc
*p
= req
->p
;
2509 int new_value
, old_value
, changed
;
2512 if (p
->p_flag
& P_CHECKOPENEVT
) {
2513 old_value
= KERN_OPENEVT_PROC
;
2518 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2521 switch (new_value
) {
2522 case KERN_OPENEVT_PROC
:
2523 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2526 case KERN_UNOPENEVT_PROC
:
2527 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2537 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2538 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2544 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2546 #ifdef SECURE_KERNEL
2550 int new_value
, changed
;
2553 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2558 #if defined(__i386__) || defined(__x86_64__)
2560 * Only allow setting if NX is supported on the chip
2562 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2565 nx_enabled
= new_value
;
2568 #endif /* SECURE_KERNEL */
2573 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2574 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2575 0, 0, sysctl_nx
, "I", "");
2579 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2581 if (proc_is64bit(req
->p
)) {
2582 struct user64_loadavg loadinfo64
= {};
2583 fill_loadavg64(&averunnable
, &loadinfo64
);
2584 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2586 struct user32_loadavg loadinfo32
= {};
2587 fill_loadavg32(&averunnable
, &loadinfo32
);
2588 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2592 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2593 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2594 0, 0, sysctl_loadavg
, "S,loadavg", "");
2597 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2600 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2601 __unused
int arg2
, struct sysctl_req
*req
)
2603 int old_value
=0, new_value
=0, error
=0;
2605 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2607 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2609 return (vm_toggle_entry_reuse(new_value
, NULL
));
2614 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2616 #ifdef CONFIG_XNUPOST
2618 extern int xnupost_export_testdata(void *outp
, uint32_t size
, uint32_t *lenp
);
2619 extern uint32_t xnupost_get_estimated_testdata_size(void);
2621 extern int xnupost_reset_all_tests(void);
2624 sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS
2626 /* fixup unused arguments warnings */
2627 __unused
int _oa2
= arg2
;
2628 __unused
void * _oa1
= arg1
;
2629 __unused
struct sysctl_oid
* _oidp
= oidp
;
2632 user_addr_t oldp
= 0;
2633 user_addr_t newp
= 0;
2634 uint32_t usedbytes
= 0;
2642 if ((void *)oldp
== NULL
) {
2643 /* return estimated size for second call where info can be placed */
2644 req
->oldidx
= xnupost_get_estimated_testdata_size();
2646 error
= xnupost_export_testdata((void *)oldp
, req
->oldlen
, &usedbytes
);
2647 req
->oldidx
= usedbytes
;
2656 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2659 sysctl_handle_xnupost_get_tests
,
2661 "read xnupost test data in kernel");
2664 sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS
2666 /* fixup unused arguments warnings */
2667 __unused
int _oa2
= arg2
;
2668 __unused
void * _oa1
= arg1
;
2669 __unused
struct sysctl_oid
* _oidp
= oidp
;
2673 * INPUT: ACTION, PARAM1, PARAM2, PARAM3
2674 * OUTPUT: RESULTCODE, ADDITIONAL DATA
2676 int32_t outval
[ARRCOUNT
] = {0};
2677 int32_t input
[ARRCOUNT
] = {0};
2678 int32_t out_size
= sizeof(outval
);
2679 int32_t in_size
= sizeof(input
);
2682 /* if this is NULL call to find out size, send out size info */
2687 /* pull in provided value from userspace */
2688 error
= SYSCTL_IN(req
, &input
[0], in_size
);
2692 if (input
[0] == XTCTL_RESET_TESTDATA
) {
2693 outval
[0] = xnupost_reset_all_tests();
2698 error
= SYSCTL_OUT(req
, &outval
[0], out_size
);
2705 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2708 sysctl_debug_xnupost_ctl
,
2710 "xnupost control for kernel testing");
2712 extern void test_oslog_handleOSLogCtl(int32_t * in
, int32_t * out
, int32_t arraycount
);
2715 sysctl_debug_test_oslog_ctl(__unused
struct sysctl_oid
* oidp
, __unused
void * arg1
, __unused
int arg2
, struct sysctl_req
* req
)
2718 int32_t outval
[ARRCOUNT
] = {0};
2719 int32_t input
[ARRCOUNT
] = {0};
2720 int32_t size_outval
= sizeof(outval
);
2721 int32_t size_inval
= sizeof(input
);
2724 /* if this is NULL call to find out size, send out size info */
2726 error
= SYSCTL_OUT(req
, &outval
[0], size_outval
);
2730 /* pull in provided value from userspace */
2731 error
= SYSCTL_IN(req
, &input
[0], size_inval
);
2735 test_oslog_handleOSLogCtl(input
, outval
, ARRCOUNT
);
2737 error
= SYSCTL_OUT(req
, &outval
[0], size_outval
);
2745 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_OPAQUE
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2748 sysctl_debug_test_oslog_ctl
,
2750 "testing oslog in kernel");
2752 #include <mach/task.h>
2753 #include <mach/semaphore.h>
2755 extern lck_grp_t
* sysctl_debug_test_stackshot_owner_grp
; /* used for both mutexes and rwlocks */
2756 extern lck_mtx_t
* sysctl_debug_test_stackshot_owner_init_mtx
; /* used to protect lck_*_init */
2758 /* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded
2759 * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a
2760 * stackshot is taken to see if the owner of the lock can be identified.
2762 * We can't return to userland with a kernel lock held, so be sure to unlock before we leave.
2763 * the semaphores allow us to artificially create cases where the lock is being held and the
2764 * thread is hanging / taking a long time to do something. */
2766 volatile char sysctl_debug_test_stackshot_mtx_inited
= 0;
2767 semaphore_t sysctl_debug_test_stackshot_mutex_sem
;
2768 lck_mtx_t sysctl_debug_test_stackshot_owner_lck
;
2770 #define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT 1
2771 #define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2
2772 #define SYSCTL_DEBUG_MTX_SIGNAL 3
2773 #define SYSCTL_DEBUG_MTX_TEARDOWN 4
2776 sysctl_debug_test_stackshot_mutex_owner(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2778 long long option
= -1;
2779 /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */
2780 long long mtx_unslid_addr
= (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck
);
2781 int error
= sysctl_io_number(req
, mtx_unslid_addr
, sizeof(long long), (void*)&option
, NULL
);
2783 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
2784 if (!sysctl_debug_test_stackshot_mtx_inited
) {
2785 lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck
,
2786 sysctl_debug_test_stackshot_owner_grp
,
2788 semaphore_create(kernel_task
,
2789 &sysctl_debug_test_stackshot_mutex_sem
,
2790 SYNC_POLICY_FIFO
, 0);
2791 sysctl_debug_test_stackshot_mtx_inited
= 1;
2793 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
2797 case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT
:
2798 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck
);
2799 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck
);
2801 case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT
:
2802 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck
);
2803 semaphore_wait(sysctl_debug_test_stackshot_mutex_sem
);
2804 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck
);
2806 case SYSCTL_DEBUG_MTX_SIGNAL
:
2807 semaphore_signal(sysctl_debug_test_stackshot_mutex_sem
);
2809 case SYSCTL_DEBUG_MTX_TEARDOWN
:
2810 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
2812 lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck
,
2813 sysctl_debug_test_stackshot_owner_grp
);
2814 semaphore_destroy(kernel_task
,
2815 sysctl_debug_test_stackshot_mutex_sem
);
2816 sysctl_debug_test_stackshot_mtx_inited
= 0;
2818 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
2820 case -1: /* user just wanted to read the value, so do nothing */
2830 /* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave.
2831 * the semaphores allow us to artificially create cases where the lock is being held and the
2832 * thread is hanging / taking a long time to do something. */
2837 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2840 sysctl_debug_test_stackshot_mutex_owner
,
2842 "Testing mutex owner in kernel");
2844 volatile char sysctl_debug_test_stackshot_rwlck_inited
= 0;
2845 lck_rw_t sysctl_debug_test_stackshot_owner_rwlck
;
2846 semaphore_t sysctl_debug_test_stackshot_rwlck_sem
;
2848 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1
2849 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT 2
2850 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3
2851 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT 4
2852 #define SYSCTL_DEBUG_KRWLCK_SIGNAL 5
2853 #define SYSCTL_DEBUG_KRWLCK_TEARDOWN 6
2856 sysctl_debug_test_stackshot_rwlck_owner(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2858 long long option
= -1;
2859 /* if the user tries to read the sysctl, we tell them what the address of the lock is
2860 * (to test against stackshot's output) */
2861 long long rwlck_unslid_addr
= (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck
);
2862 int error
= sysctl_io_number(req
, rwlck_unslid_addr
, sizeof(long long), (void*)&option
, NULL
);
2864 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
2865 if (!sysctl_debug_test_stackshot_rwlck_inited
) {
2866 lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck
,
2867 sysctl_debug_test_stackshot_owner_grp
,
2869 semaphore_create(kernel_task
,
2870 &sysctl_debug_test_stackshot_rwlck_sem
,
2873 sysctl_debug_test_stackshot_rwlck_inited
= 1;
2875 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
2879 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT
:
2880 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
2881 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
2883 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT
:
2884 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
2885 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem
);
2886 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_SHARED
);
2888 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT
:
2889 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
2890 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
2892 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT
:
2893 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
2894 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem
);
2895 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck
, LCK_RW_TYPE_EXCLUSIVE
);
2897 case SYSCTL_DEBUG_KRWLCK_SIGNAL
:
2898 semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem
);
2900 case SYSCTL_DEBUG_KRWLCK_TEARDOWN
:
2901 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx
);
2903 lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck
,
2904 sysctl_debug_test_stackshot_owner_grp
);
2905 semaphore_destroy(kernel_task
,
2906 sysctl_debug_test_stackshot_rwlck_sem
);
2907 sysctl_debug_test_stackshot_rwlck_inited
= 0;
2909 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx
);
2911 case -1: /* user just wanted to read the value, so do nothing */
2924 test_RWLockOwnerCtl
,
2925 CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2928 sysctl_debug_test_stackshot_rwlck_owner
,
2930 "Testing rwlock owner in kernel");
2931 #endif /* !CONFIG_XNUPOST */
2935 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2938 uint64_t swap_total
;
2939 uint64_t swap_avail
;
2940 vm_size_t swap_pagesize
;
2941 boolean_t swap_encrypted
;
2942 struct xsw_usage xsu
= {};
2944 error
= macx_swapinfo(&swap_total
,
2951 xsu
.xsu_total
= swap_total
;
2952 xsu
.xsu_avail
= swap_avail
;
2953 xsu
.xsu_used
= swap_total
- swap_avail
;
2954 xsu
.xsu_pagesize
= swap_pagesize
;
2955 xsu
.xsu_encrypted
= swap_encrypted
;
2956 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2961 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2962 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2963 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2966 extern void vm_page_reactivate_all_throttled(void);
2967 extern void memorystatus_disable_freeze(void);
2970 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2972 #pragma unused(arg1, arg2)
2973 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2976 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2977 if (error
|| !req
->newptr
)
2980 if (! VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
2981 //assert(req->newptr);
2982 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2987 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2989 disabled
= (!val
&& memorystatus_freeze_enabled
);
2991 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2994 vm_page_reactivate_all_throttled();
2995 memorystatus_disable_freeze();
3001 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
3002 #endif /* CONFIG_FREEZE */
3004 #if DEVELOPMENT || DEBUG
3005 extern int vm_num_swap_files_config
;
3006 extern int vm_num_swap_files
;
3007 extern lck_mtx_t vm_swap_data_lock
;
3008 #define VM_MAX_SWAP_FILE_NUM 100
3011 sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS
3013 #pragma unused(arg1, arg2)
3014 int error
= 0, val
= vm_num_swap_files_config
;
3016 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3017 if (error
|| !req
->newptr
) {
3021 if (!VM_CONFIG_SWAP_IS_ACTIVE
&& !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
3022 printf("Swap is disabled\n");
3027 lck_mtx_lock(&vm_swap_data_lock
);
3029 if (val
< vm_num_swap_files
) {
3030 printf("Cannot configure fewer swap files than already exist.\n");
3032 lck_mtx_unlock(&vm_swap_data_lock
);
3036 if (val
> VM_MAX_SWAP_FILE_NUM
) {
3037 printf("Capping number of swap files to upper bound.\n");
3038 val
= VM_MAX_SWAP_FILE_NUM
;
3041 vm_num_swap_files_config
= val
;
3042 lck_mtx_unlock(&vm_swap_data_lock
);
3048 SYSCTL_PROC(_debug
, OID_AUTO
, num_swap_files_configured
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_config_num_swap_files
, "I", "");
3049 #endif /* DEVELOPMENT || DEBUG */
3051 /* this kernel does NOT implement shared_region_make_private_np() */
3052 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
3053 CTLFLAG_RD
| CTLFLAG_LOCKED
,
3054 (int *)NULL
, 0, "");
3057 fetch_process_cputype(
3061 cpu_type_t
*cputype
)
3063 proc_t p
= PROC_NULL
;
3070 else if (namelen
== 1) {
3071 p
= proc_find(name
[0]);
3080 ret
= cpu_type() & ~CPU_ARCH_MASK
;
3081 if (IS_64BIT_PROCESS(p
)) {
3082 ret
|= CPU_ARCH_ABI64
;
3094 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3095 struct sysctl_req
*req
)
3098 cpu_type_t proc_cputype
= 0;
3099 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
3102 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
3104 return SYSCTL_OUT(req
, &res
, sizeof(res
));
3106 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
3109 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3110 struct sysctl_req
*req
)
3113 cpu_type_t proc_cputype
= 0;
3114 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
3116 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
3118 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
3122 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3124 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
3127 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
3128 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3129 0, 0, sysctl_safeboot
, "I", "");
3133 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3135 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
3138 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
3139 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3140 0, 0, sysctl_singleuser
, "I", "");
3142 STATIC
int sysctl_minimalboot
3143 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3145 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
3148 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
3149 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3150 0, 0, sysctl_minimalboot
, "I", "");
3153 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3155 extern boolean_t affinity_sets_enabled
;
3156 extern int affinity_sets_mapping
;
3158 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
3159 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
3160 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
3161 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
3164 * Boolean indicating if KASLR is active.
3168 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3172 slide
= vm_kernel_slide
? 1 : 0;
3174 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
3177 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
3178 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3179 0, 0, sysctl_slide
, "I", "");
3182 * Limit on total memory users can wire.
3184 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3186 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3188 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3191 * All values are in bytes.
3194 vm_map_size_t vm_global_no_user_wire_amount
;
3195 vm_map_size_t vm_global_user_wire_limit
;
3196 vm_map_size_t vm_user_wire_limit
;
3199 * There needs to be a more automatic/elegant way to do this
3201 #if defined(__ARM__)
3202 SYSCTL_INT(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, 0, "");
3203 SYSCTL_INT(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, 0, "");
3204 SYSCTL_INT(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, 0, "");
3206 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
3207 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
3208 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
3211 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
3212 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
3213 extern int vm_map_copy_overwrite_aligned_src_large
;
3214 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
3215 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
3216 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
3219 extern uint32_t vm_page_external_count
;
3221 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
3223 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_filecache_min
, 0, "");
3224 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_xpmapped_min
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_xpmapped_min
, 0, "");
3226 #if DEVELOPMENT || DEBUG
3227 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_filecache_min_divisor
, 0, "");
3228 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_xpmapped_min_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_state
.vm_page_xpmapped_min_divisor
, 0, "");
3231 extern int vm_compressor_mode
;
3232 extern int vm_compressor_is_active
;
3233 extern int vm_compressor_available
;
3234 extern uint32_t vm_ripe_target_age
;
3235 extern uint32_t swapout_target_age
;
3236 extern int64_t compressor_bytes_used
;
3237 extern int64_t c_segment_input_bytes
;
3238 extern int64_t c_segment_compressed_bytes
;
3239 extern uint32_t compressor_eval_period_in_msecs
;
3240 extern uint32_t compressor_sample_min_in_msecs
;
3241 extern uint32_t compressor_sample_max_in_msecs
;
3242 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
3243 extern uint32_t compressor_thrashing_min_per_10msecs
;
3244 extern uint32_t vm_compressor_time_thread
;
3246 #if DEVELOPMENT || DEBUG
3247 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
3248 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
3249 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
3250 extern uint32_t vm_compressor_catchup_threshold_divisor
;
3252 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden
;
3253 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden
;
3254 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden
;
3255 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden
;
3257 extern vmct_stats_t vmct_stats
;
3261 sysctl_minorcompact_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3263 int new_value
, changed
;
3264 int error
= sysctl_io_number(req
, vm_compressor_minorcompact_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3267 vm_compressor_minorcompact_threshold_divisor
= new_value
;
3268 vm_compressor_minorcompact_threshold_divisor_overridden
= 1;
3273 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
,
3274 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3275 0, 0, sysctl_minorcompact_threshold_divisor
, "I", "");
3279 sysctl_majorcompact_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3281 int new_value
, changed
;
3282 int error
= sysctl_io_number(req
, vm_compressor_majorcompact_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3285 vm_compressor_majorcompact_threshold_divisor
= new_value
;
3286 vm_compressor_majorcompact_threshold_divisor_overridden
= 1;
3291 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
,
3292 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3293 0, 0, sysctl_majorcompact_threshold_divisor
, "I", "");
3297 sysctl_unthrottle_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3299 int new_value
, changed
;
3300 int error
= sysctl_io_number(req
, vm_compressor_unthrottle_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3303 vm_compressor_unthrottle_threshold_divisor
= new_value
;
3304 vm_compressor_unthrottle_threshold_divisor_overridden
= 1;
3309 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
,
3310 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3311 0, 0, sysctl_unthrottle_threshold_divisor
, "I", "");
3315 sysctl_catchup_threshold_divisor(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3317 int new_value
, changed
;
3318 int error
= sysctl_io_number(req
, vm_compressor_catchup_threshold_divisor
, sizeof(int), &new_value
, &changed
);
3321 vm_compressor_catchup_threshold_divisor
= new_value
;
3322 vm_compressor_catchup_threshold_divisor_overridden
= 1;
3327 SYSCTL_PROC(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
,
3328 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3329 0, 0, sysctl_catchup_threshold_divisor
, "I", "");
3333 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
3334 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
3335 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
3337 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
3338 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
3339 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
3340 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
3342 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
3344 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
3345 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
3346 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
3347 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
3348 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
3350 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
3352 SYSCTL_INT(_vm
, OID_AUTO
, compressor_timing_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_time_thread
, 0, "");
3354 #if DEVELOPMENT || DEBUG
3355 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[0], "");
3356 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_runtimes
[1], "");
3358 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_threads_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_cthreads_total
, "");
3360 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[0], "");
3361 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_pages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_pages
[1], "");
3363 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[0], "");
3364 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_iterations1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_iterations
[1], "");
3366 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[0], 0, "");
3367 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_minpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_minpages
[1], 0, "");
3369 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages0
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[0], 0, "");
3370 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thread_maxpages1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vmct_stats
.vmct_maxpages
[1], 0, "");
3374 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressions
, "");
3375 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compression_failures
, "");
3376 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressed_bytes
, "");
3377 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_delta
, "");
3378 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_negative_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_negative_delta
, "");
3380 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressions
, "");
3381 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressed_bytes
, "");
3383 SYSCTL_QUAD(_vm
, OID_AUTO
, uc_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.uc_decompressions
, "");
3385 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions
, "");
3387 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_cabstime
, "");
3389 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_cabstime
, "");
3390 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_compressions
, "");
3392 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_catime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_cabstime
, "");
3393 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressions
, "");
3395 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions_exclusive
, "");
3396 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_compressions
, "");
3397 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_mzv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_mzv_compressions
, "");
3398 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compression_failures
, "");
3399 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_exclusive
, "");
3400 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_total
, "");
3402 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compressed_bytes
, "");
3403 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_compression_failures
, "");
3404 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_sv_compressions
, "");
3407 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressions
, "");
3409 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_dabstime
, "");
3411 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_dabstime
, "");
3412 SYSCTL_QUAD(_vm
, OID_AUTO
, wkh_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wkh_decompressions
, "");
3414 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_datime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_dabstime
, "");
3415 SYSCTL_QUAD(_vm
, OID_AUTO
, wks_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wks_decompressions
, "");
3417 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressed_bytes
, "");
3418 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_decompressions
, "");
3420 SYSCTL_INT(_vm
, OID_AUTO
, lz4_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_threshold
, 0, "");
3421 SYSCTL_INT(_vm
, OID_AUTO
, wkdm_reeval_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.wkdm_reeval_threshold
, 0, "");
3422 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_skips
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_skips
, 0, "");
3423 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_run_length
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_run_length
, 0, "");
3424 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_preselects
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_preselects
, 0, "");
3425 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_preselection_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_preselection_threshold
, 0, "");
3426 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_continue_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_continue_bytes
, 0, "");
3427 SYSCTL_INT(_vm
, OID_AUTO
, lz4_profitable_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_profitable_bytes
, 0, "");
3428 #if DEVELOPMENT || DEBUG
3429 extern int vm_compressor_current_codec
;
3430 extern int vm_compressor_test_seg_wp
;
3431 extern boolean_t vm_compressor_force_sw_wkdm
;
3432 SYSCTL_INT(_vm
, OID_AUTO
, compressor_codec
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_current_codec
, 0, "");
3433 SYSCTL_INT(_vm
, OID_AUTO
, compressor_test_wp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_test_seg_wp
, 0, "");
3435 SYSCTL_INT(_vm
, OID_AUTO
, wksw_force
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_force_sw_wkdm
, 0, "");
3436 extern int precompy
, wkswhw
;
3438 SYSCTL_INT(_vm
, OID_AUTO
, precompy
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &precompy
, 0, "");
3439 SYSCTL_INT(_vm
, OID_AUTO
, wkswhw
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &wkswhw
, 0, "");
3440 extern unsigned int vm_ktrace_enabled
;
3441 SYSCTL_INT(_vm
, OID_AUTO
, vm_ktrace
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ktrace_enabled
, 0, "");
3444 #if CONFIG_PHANTOM_CACHE
3445 extern uint32_t phantom_cache_thrashing_threshold
;
3446 extern uint32_t phantom_cache_eval_period_in_msecs
;
3447 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
3450 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
3451 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
3452 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
3455 #if CONFIG_BACKGROUND_QUEUE
3457 extern uint32_t vm_page_background_count
;
3458 extern uint32_t vm_page_background_target
;
3459 extern uint32_t vm_page_background_internal_count
;
3460 extern uint32_t vm_page_background_external_count
;
3461 extern uint32_t vm_page_background_mode
;
3462 extern uint32_t vm_page_background_exclude_external
;
3463 extern uint64_t vm_page_background_promoted_count
;
3464 extern uint64_t vm_pageout_rejected_bq_internal
;
3465 extern uint64_t vm_pageout_rejected_bq_external
;
3467 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_mode
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_mode
, 0, "");
3468 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_exclude_external
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_exclude_external
, 0, "");
3469 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_target
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_target
, 0, "");
3470 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_count
, 0, "");
3471 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_internal_count
, 0, "");
3472 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_external_count
, 0, "");
3474 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_page_background_promoted_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_promoted_count
, "");
3475 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_vminfo
.vm_pageout_considered_bq_internal
, "");
3476 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_vminfo
.vm_pageout_considered_bq_external
, "");
3477 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_internal
, "");
3478 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_external
, "");
3480 #endif /* CONFIG_BACKGROUND_QUEUE */
3482 extern void vm_update_darkwake_mode(boolean_t
);
3483 extern boolean_t vm_darkwake_mode
;
3486 sysctl_toggle_darkwake_mode(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3488 int new_value
, changed
;
3489 int error
= sysctl_io_number(req
, vm_darkwake_mode
, sizeof(int), &new_value
, &changed
);
3491 if ( !error
&& changed
) {
3493 if (new_value
!= 0 && new_value
!= 1) {
3494 printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n");
3497 vm_update_darkwake_mode((boolean_t
) new_value
);
3504 SYSCTL_PROC(_vm
, OID_AUTO
, darkwake_mode
,
3505 CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
,
3506 0, 0, sysctl_toggle_darkwake_mode
, "I", "");
3508 #if (DEVELOPMENT || DEBUG)
3510 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
3511 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3512 &vm_page_creation_throttled_hard
, 0, "");
3514 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
3515 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3516 &vm_page_creation_throttled_soft
, 0, "");
3518 extern uint32_t vm_pageout_memorystatus_fb_factor_nr
;
3519 extern uint32_t vm_pageout_memorystatus_fb_factor_dr
;
3520 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_nr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_nr
, 0, "");
3521 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_dr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_dr
, 0, "");
3524 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_overrides
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_debug
.vm_grab_anon_overrides
, 0, "");
3525 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_nops
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_debug
.vm_grab_anon_nops
, 0, "");
3527 /* log message counters for persistence mode */
3528 extern uint32_t oslog_p_total_msgcount
;
3529 extern uint32_t oslog_p_metadata_saved_msgcount
;
3530 extern uint32_t oslog_p_metadata_dropped_msgcount
;
3531 extern uint32_t oslog_p_error_count
;
3532 extern uint32_t oslog_p_saved_msgcount
;
3533 extern uint32_t oslog_p_dropped_msgcount
;
3534 extern uint32_t oslog_p_boot_dropped_msgcount
;
3536 /* log message counters for streaming mode */
3537 extern uint32_t oslog_s_total_msgcount
;
3538 extern uint32_t oslog_s_metadata_msgcount
;
3539 extern uint32_t oslog_s_error_count
;
3540 extern uint32_t oslog_s_streamed_msgcount
;
3541 extern uint32_t oslog_s_dropped_msgcount
;
3543 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_total_msgcount
, 0, "");
3544 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_saved_msgcount
, 0, "");
3545 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_dropped_msgcount
, 0, "");
3546 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_error_count
, 0, "");
3547 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_saved_msgcount
, 0, "");
3548 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_dropped_msgcount
, 0, "");
3549 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_boot_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_boot_dropped_msgcount
, 0, "");
3551 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_total_msgcount
, 0, "");
3552 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_metadata_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_metadata_msgcount
, 0, "");
3553 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_error_count
, 0, "");
3554 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_streamed_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_streamed_msgcount
, 0, "");
3555 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_dropped_msgcount
, 0, "");
3558 #endif /* DEVELOPMENT || DEBUG */
3561 * Enable tracing of voucher contents
3563 extern uint32_t ipc_voucher_trace_contents
;
3565 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
3566 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
3569 * Kernel stack size and depth
3571 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
3572 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3573 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
3574 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3576 extern unsigned int kern_feature_overrides
;
3577 SYSCTL_INT (_kern
, OID_AUTO
, kern_feature_overrides
,
3578 CTLFLAG_RD
| CTLFLAG_LOCKED
, &kern_feature_overrides
, 0, "Kernel feature override mask");
3581 * enable back trace for port allocations
3583 extern int ipc_portbt
;
3585 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3586 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3587 &ipc_portbt
, 0, "");
3593 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3594 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3595 sched_string
, sizeof(sched_string
),
3596 "Timeshare scheduler implementation");
3598 #if CONFIG_QUIESCE_COUNTER
3600 sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS
3602 #pragma unused(arg1, arg2)
3604 int error
= sysctl_handle_int(oidp
, &cpu_checkin_min_interval_us
, 0, req
);
3605 if (error
|| !req
->newptr
)
3608 cpu_quiescent_counter_set_min_interval_us(cpu_checkin_min_interval_us
);
3613 SYSCTL_PROC(_kern
, OID_AUTO
, cpu_checkin_interval
,
3614 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3616 sysctl_cpu_quiescent_counter_interval
, "I",
3617 "Quiescent CPU checkin interval (microseconds)");
3618 #endif /* CONFIG_QUIESCE_COUNTER */
3622 * Only support runtime modification on embedded platforms
3623 * with development config enabled
3627 extern int precise_user_kernel_time
;
3628 SYSCTL_INT(_kern
, OID_AUTO
, precise_user_kernel_time
,
3629 CTLFLAG_RW
| CTLFLAG_LOCKED
,
3630 &precise_user_kernel_time
, 0, "Precise accounting of kernel vs. user time");
3635 /* Parameters related to timer coalescing tuning, to be replaced
3636 * with a dedicated systemcall in the future.
3638 /* Enable processing pending timers in the context of any other interrupt
3639 * Coalescing tuning parameters for various thread/task attributes */
3641 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3643 #pragma unused(oidp)
3644 int size
= arg2
; /* subcommand*/
3647 uint64_t old_value_ns
;
3648 uint64_t new_value_ns
;
3649 uint64_t value_abstime
;
3650 if (size
== sizeof(uint32_t))
3651 value_abstime
= *((uint32_t *)arg1
);
3652 else if (size
== sizeof(uint64_t))
3653 value_abstime
= *((uint64_t *)arg1
);
3654 else return ENOTSUP
;
3656 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
3657 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
3658 if ((error
) || (!changed
))
3661 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
3662 if (size
== sizeof(uint32_t))
3663 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
3665 *((uint64_t *)arg1
) = value_abstime
;
3669 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
3670 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3671 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
3672 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
3673 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3674 &tcoal_prio_params
.timer_resort_threshold_abstime
,
3675 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
3676 sysctl_timer_user_us_kernel_abstime
,
3678 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
3679 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3680 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
3681 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
3682 sysctl_timer_user_us_kernel_abstime
,
3685 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
3686 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3687 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
3689 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
3690 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3691 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
3692 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
3693 sysctl_timer_user_us_kernel_abstime
,
3696 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
3697 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3698 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
3700 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
3701 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3702 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
3703 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
3704 sysctl_timer_user_us_kernel_abstime
,
3707 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
3708 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3709 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
3711 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
3712 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3713 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
3714 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
3715 sysctl_timer_user_us_kernel_abstime
,
3718 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
3719 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3720 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
3722 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
3723 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3724 &tcoal_prio_params
.latency_qos_abstime_max
[0],
3725 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
3726 sysctl_timer_user_us_kernel_abstime
,
3729 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
3730 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3731 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
3733 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
3734 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3735 &tcoal_prio_params
.latency_qos_abstime_max
[1],
3736 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
3737 sysctl_timer_user_us_kernel_abstime
,
3740 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
3741 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3742 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
3744 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
3745 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3746 &tcoal_prio_params
.latency_qos_abstime_max
[2],
3747 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
3748 sysctl_timer_user_us_kernel_abstime
,
3751 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
3752 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3753 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
3755 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
3756 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3757 &tcoal_prio_params
.latency_qos_abstime_max
[3],
3758 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
3759 sysctl_timer_user_us_kernel_abstime
,
3762 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
3763 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3764 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
3766 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
3767 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3768 &tcoal_prio_params
.latency_qos_abstime_max
[4],
3769 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
3770 sysctl_timer_user_us_kernel_abstime
,
3773 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
3774 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3775 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
3777 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
3778 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3779 &tcoal_prio_params
.latency_qos_abstime_max
[5],
3780 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
3781 sysctl_timer_user_us_kernel_abstime
,
3784 /* Communicate the "user idle level" heuristic to the timer layer, and
3785 * potentially other layers in the future.
3789 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3790 int new_value
= 0, old_value
= 0, changed
= 0, error
;
3792 old_value
= timer_get_user_idle_level();
3794 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
3796 if (error
== 0 && changed
) {
3797 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
3804 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
3805 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3807 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
3810 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
3811 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3812 &hv_support_available
, 0, "");
3817 sysctl_darkboot SYSCTL_HANDLER_ARGS
3819 int err
= 0, value
= 0;
3820 #pragma unused(oidp, arg1, arg2, err, value, req)
3823 * Handle the sysctl request.
3825 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
3826 * we'll get the request identifier into "value" and then we can honor it.
3828 if ((err
= sysctl_io_number(req
, darkboot
, sizeof(int), &value
, NULL
)) != 0) {
3832 /* writing requested, let's process the request */
3834 /* writing is protected by an entitlement */
3835 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT
, 0) != 0) {
3841 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET
:
3843 * If the darkboot sysctl is unset, the NVRAM variable
3844 * must be unset too. If that's not the case, it means
3845 * someone is doing something crazy and not supported.
3847 if (darkboot
!= 0) {
3848 int ret
= PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
);
3856 case MEMORY_MAINTENANCE_DARK_BOOT_SET
:
3859 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT
: {
3861 * Set the NVRAM and update 'darkboot' in case
3862 * of success. Otherwise, do not update
3863 * 'darkboot' and report the failure.
3865 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME
, TRUE
)) {
3882 SYSCTL_PROC(_kern
, OID_AUTO
, darkboot
,
3883 CTLFLAG_KERN
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
,
3884 0, 0, sysctl_darkboot
, "I", "");
3887 #if DEVELOPMENT || DEBUG
3888 #include <sys/sysent.h>
3889 /* This should result in a fatal exception, verifying that "sysent" is
3893 kern_sysent_write(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3894 uint64_t new_value
= 0, old_value
= 0;
3895 int changed
= 0, error
;
3897 error
= sysctl_io_number(req
, old_value
, sizeof(uint64_t), &new_value
, &changed
);
3898 if ((error
== 0) && changed
) {
3899 volatile uint32_t *wraddr
= (uint32_t *) &sysent
[0];
3901 printf("sysent[0] write succeeded\n");
3906 SYSCTL_PROC(_kern
, OID_AUTO
, sysent_const_check
,
3907 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3909 kern_sysent_write
, "I", "Attempt sysent[0] write");
3913 #if DEVELOPMENT || DEBUG
3914 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 1, "");
3916 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 0, "");
3920 #if DEVELOPMENT || DEBUG
3923 sysctl_panic_test SYSCTL_HANDLER_ARGS
3925 #pragma unused(arg1, arg2)
3927 char str
[32] = "entry prelog postlog postcore";
3929 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
3931 if (rval
== 0 && req
->newptr
) {
3932 if (strncmp("entry", str
, strlen("entry")) == 0) {
3933 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_ENTRY
, "test recursive panic at entry");
3934 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
3935 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_PRELOG
, "test recursive panic prior to writing a paniclog");
3936 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
3937 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTLOG
, "test recursive panic subsequent to paniclog");
3938 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
3939 panic_with_options(0, NULL
, DEBUGGER_OPTION_RECURPANIC_POSTCORE
, "test recursive panic subsequent to on-device core");
3947 sysctl_debugger_test SYSCTL_HANDLER_ARGS
3949 #pragma unused(arg1, arg2)
3951 char str
[32] = "entry prelog postlog postcore";
3953 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
3955 if (rval
== 0 && req
->newptr
) {
3956 if (strncmp("entry", str
, strlen("entry")) == 0) {
3957 DebuggerWithContext(0, NULL
, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY
);
3958 } else if (strncmp("prelog", str
, strlen("prelog")) == 0) {
3959 DebuggerWithContext(0, NULL
, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG
);
3960 } else if (strncmp("postlog", str
, strlen("postlog")) == 0) {
3961 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG
);
3962 } else if (strncmp("postcore", str
, strlen("postcore")) == 0) {
3963 DebuggerWithContext(0, NULL
, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE
);
3970 decl_lck_spin_data(, spinlock_panic_test_lock
)
3972 __attribute__((noreturn
))
3974 spinlock_panic_test_acquire_spinlock(void * arg __unused
, wait_result_t wres __unused
)
3976 lck_spin_lock(&spinlock_panic_test_lock
);
3981 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
3983 #pragma unused(oidp, arg1, arg2)
3984 if (req
->newlen
== 0)
3987 thread_t panic_spinlock_thread
;
3988 /* Initialize panic spinlock */
3989 lck_grp_t
* panic_spinlock_grp
;
3990 lck_grp_attr_t
* panic_spinlock_grp_attr
;
3991 lck_attr_t
* panic_spinlock_attr
;
3993 panic_spinlock_grp_attr
= lck_grp_attr_alloc_init();
3994 panic_spinlock_grp
= lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr
);
3995 panic_spinlock_attr
= lck_attr_alloc_init();
3997 lck_spin_init(&spinlock_panic_test_lock
, panic_spinlock_grp
, panic_spinlock_attr
);
4000 /* Create thread to acquire spinlock */
4001 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock
, NULL
, &panic_spinlock_thread
) != KERN_SUCCESS
) {
4005 /* Try to acquire spinlock -- should panic eventually */
4006 lck_spin_lock(&spinlock_panic_test_lock
);
4010 __attribute__((noreturn
))
4012 simultaneous_panic_worker
4013 (void * arg
, wait_result_t wres __unused
)
4015 atomic_int
*start_panic
= (atomic_int
*)arg
;
4017 while (!atomic_load(start_panic
)) { ; }
4018 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
4019 __builtin_unreachable();
4023 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
4025 #pragma unused(oidp, arg1, arg2)
4026 if (req
->newlen
== 0)
4029 int i
= 0, threads_to_create
= 2 * processor_count
;
4030 atomic_int start_panic
= 0;
4031 unsigned int threads_created
= 0;
4032 thread_t new_panic_thread
;
4034 for (i
= threads_to_create
; i
> 0; i
--) {
4035 if (kernel_thread_start(simultaneous_panic_worker
, (void *) &start_panic
, &new_panic_thread
) == KERN_SUCCESS
) {
4040 /* FAIL if we couldn't create at least processor_count threads */
4041 if (threads_created
< processor_count
) {
4042 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
4043 threads_created
, threads_to_create
);
4046 atomic_exchange(&start_panic
, 1);
4050 SYSCTL_PROC(_debug
, OID_AUTO
, panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_panic_test
, "A", "panic test");
4051 SYSCTL_PROC(_debug
, OID_AUTO
, debugger_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_debugger_test
, "A", "debugger test");
4052 SYSCTL_PROC(_debug
, OID_AUTO
, spinlock_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_spinlock_panic_test
, "A", "spinlock panic test");
4053 SYSCTL_PROC(_debug
, OID_AUTO
, simultaneous_panic_test
, CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_MASKED
, 0, 0, sysctl_simultaneous_panic_test
, "A", "simultaneous panic test");
4055 extern int exc_resource_threads_enabled
;
4057 SYSCTL_INT(_kern
, OID_AUTO
, exc_resource_threads_enabled
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &exc_resource_threads_enabled
, 0, "exc_resource thread limit enabled");
4060 #endif /* DEVELOPMENT || DEBUG */
4062 const uint32_t thread_groups_supported
= 0;
4065 sysctl_thread_groups_supported (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4067 int value
= thread_groups_supported
;
4068 return sysctl_io_number(req
, value
, sizeof(value
), NULL
, NULL
);
4071 SYSCTL_PROC(_kern
, OID_AUTO
, thread_groups_supported
, CTLFLAG_RD
| CTLFLAG_LOCKED
| CTLFLAG_KERN
,
4072 0, 0, &sysctl_thread_groups_supported
, "I", "thread groups supported");
4075 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
4077 #pragma unused(arg1, arg2, oidp)
4079 int type_tuple
[2] = {};
4080 int return_value
= 0;
4082 error
= SYSCTL_IN(req
, &type_tuple
, sizeof(type_tuple
));
4088 return_value
= grade_binary(type_tuple
[0], type_tuple
[1]);
4090 error
= SYSCTL_OUT(req
, &return_value
, sizeof(return_value
));
4099 SYSCTL_PROC(_kern
, OID_AUTO
, grade_cputype
,
4100 CTLFLAG_RW
|CTLFLAG_ANYBODY
|CTLFLAG_MASKED
|CTLFLAG_LOCKED
|CTLTYPE_OPAQUE
,
4101 0, 0, &sysctl_grade_cputype
, "S",
4102 "grade value of cpu_type_t+cpu_sub_type_t");
4105 #if DEVELOPMENT || DEBUG
4107 static atomic_int wedge_thread_should_wake
= 0;
4110 unwedge_thread SYSCTL_HANDLER_ARGS
4112 #pragma unused(arg1, arg2)
4114 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4115 if (error
|| val
== 0) {
4119 atomic_store(&wedge_thread_should_wake
, 1);
4123 SYSCTL_PROC(_kern
, OID_AUTO
, unwedge_thread
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, unwedge_thread
, "I", "unwedge the thread wedged by kern.wedge_thread");
4125 extern uintptr_t phys_carveout_pa
;
4126 SYSCTL_LONG(_kern
, OID_AUTO
, phys_carveout_pa
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
4128 "base physical address of the phys_carveout_mb boot-arg region");
4129 extern size_t phys_carveout_size
;
4130 SYSCTL_LONG(_kern
, OID_AUTO
, phys_carveout_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
4131 &phys_carveout_size
,
4132 "size in bytes of the phys_carveout_mb boot-arg region");
4135 wedge_thread SYSCTL_HANDLER_ARGS
4137 #pragma unused(arg1, arg2)
4140 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4141 if (error
|| val
== 0) {
4145 uint64_t interval
= 1;
4146 nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval
);
4148 atomic_store(&wedge_thread_should_wake
, 0);
4149 while (!atomic_load(&wedge_thread_should_wake
)) {
4150 tsleep1(NULL
, 0, "wedge_thread", mach_absolute_time()+interval
, NULL
);
4156 SYSCTL_PROC(_kern
, OID_AUTO
, wedge_thread
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, wedge_thread
, "I", "wedge this thread so it cannot be cleaned up");
4159 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
;
4161 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
;
4163 tstile_test_prim_lock(boolean_t use_hashtable
);
4165 tstile_test_prim_unlock(boolean_t use_hashtable
);
4167 #define SYSCTL_TURNSTILE_TEST_DEFAULT 1
4168 #define SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE 2
4171 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
4173 #pragma unused(arg1, arg2)
4175 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4176 if (error
|| val
== 0) {
4179 boolean_t use_hashtable
= (val
== SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE
) ? true : false;
4180 return tstile_test_prim_lock(use_hashtable
);
4184 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
4186 #pragma unused(arg1, arg2)
4188 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4189 if (error
|| val
== 0) {
4192 boolean_t use_hashtable
= (val
== SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE
) ? true : false;
4193 return tstile_test_prim_unlock(use_hashtable
);
4196 SYSCTL_PROC(_kern
, OID_AUTO
, turnstiles_test_lock
, CTLFLAG_WR
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4197 0, 0, sysctl_turnstile_test_prim_lock
, "I", "turnstiles test lock");
4199 SYSCTL_PROC(_kern
, OID_AUTO
, turnstiles_test_unlock
, CTLFLAG_WR
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4200 0, 0, sysctl_turnstile_test_prim_unlock
, "I", "turnstiles test unlock");
4203 turnstile_get_boost_stats_sysctl(void *req
);
4205 turnstile_get_unboost_stats_sysctl(void *req
);
4207 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
;
4209 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
;
4210 extern uint64_t thread_block_on_turnstile_count
;
4211 extern uint64_t thread_block_on_regular_waitq_count
;
4214 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
4216 #pragma unused(arg1, arg2, oidp)
4217 return turnstile_get_boost_stats_sysctl(req
);
4221 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
4223 #pragma unused(arg1, arg2, oidp)
4224 return turnstile_get_unboost_stats_sysctl(req
);
4227 SYSCTL_PROC(_kern
, OID_AUTO
, turnstile_boost_stats
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLTYPE_STRUCT
,
4228 0, 0, sysctl_turnstile_boost_stats
, "S", "turnstiles boost stats");
4229 SYSCTL_PROC(_kern
, OID_AUTO
, turnstile_unboost_stats
, CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLTYPE_STRUCT
,
4230 0, 0, sysctl_turnstile_unboost_stats
, "S", "turnstiles unboost stats");
4231 SYSCTL_QUAD(_kern
, OID_AUTO
, thread_block_count_on_turnstile
,
4232 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4233 &thread_block_on_turnstile_count
, "thread blocked on turnstile count");
4234 SYSCTL_QUAD(_kern
, OID_AUTO
, thread_block_count_on_reg_waitq
,
4235 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4236 &thread_block_on_regular_waitq_count
, "thread blocked on regular waitq count");
4239 sysctl_lck_mtx_test_lock SYSCTL_HANDLER_ARGS
4241 #pragma unused(arg1, arg2)
4243 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4244 if (error
|| val
== 0) {
4249 lck_mtx_test_init();
4250 lck_mtx_test_lock();
4257 sysctl_lck_mtx_test_unlock SYSCTL_HANDLER_ARGS
4259 #pragma unused(arg1, arg2)
4261 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4262 if (error
|| val
== 0) {
4267 lck_mtx_test_init();
4268 lck_mtx_test_unlock();
4275 sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
4277 #pragma unused(arg1, arg2)
4279 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
4280 if (error
|| val
== 0) {
4285 lck_mtx_test_init();
4286 erase_all_test_mtx_stats();
4293 sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
4295 #pragma unused(oidp, arg1, arg2)
4297 int size
, buffer_size
, error
;
4300 buffer
= kalloc(buffer_size
);
4302 panic("Impossible to allocate memory for %s\n", __func__
);
4304 lck_mtx_test_init();
4306 size
= get_test_mtx_stats_string(buffer
, buffer_size
);
4308 error
= sysctl_io_string(req
, buffer
, size
, 0, NULL
);
4310 kfree(buffer
, buffer_size
);
4316 sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
4318 #pragma unused(oidp, arg1, arg2)
4320 int buffer_size
, offset
, error
, iter
;
4331 if (req
->newlen
>= sizeof(input_val
)) {
4335 error
= SYSCTL_IN(req
, input_val
, req
->newlen
);
4339 input_val
[req
->newlen
] = '\0';
4341 sscanf(input_val
, "%d", &iter
);
4344 printf("%s requested %d iterations, not starting the test\n", __func__
, iter
);
4348 lck_mtx_test_init();
4352 buffer
= kalloc(buffer_size
);
4354 panic("Impossible to allocate memory for %s\n", __func__
);
4355 memset(buffer
, 0, buffer_size
);
4357 printf("%s starting uncontended mutex test with %d iterations\n", __func__
, iter
);
4359 offset
= snprintf(buffer
, buffer_size
, "STATS INNER LOOP");
4360 offset
+= lck_mtx_test_mtx_uncontended(iter
, &buffer
[offset
], buffer_size
- offset
);
4362 offset
+= snprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4363 offset
+= lck_mtx_test_mtx_uncontended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
);
4365 error
= SYSCTL_OUT(req
, buffer
, offset
);
4367 kfree(buffer
, buffer_size
);
4372 sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS
4374 #pragma unused(oidp, arg1, arg2)
4376 int buffer_size
, offset
, error
, iter
;
4379 printf("%s called\n", __func__
);
4389 if (req
->newlen
>= sizeof(input_val
)) {
4393 error
= SYSCTL_IN(req
, input_val
, req
->newlen
);
4397 input_val
[req
->newlen
] = '\0';
4399 sscanf(input_val
, "%d", &iter
);
4402 printf("%s requested %d iterations, not starting the test\n", __func__
, iter
);
4406 lck_mtx_test_init();
4408 erase_all_test_mtx_stats();
4412 buffer
= kalloc(buffer_size
);
4414 panic("Impossible to allocate memory for %s\n", __func__
);
4415 memset(buffer
, 0, buffer_size
);
4417 printf("%s starting contended mutex test with %d iterations\n", __func__
, iter
);
4419 offset
= snprintf(buffer
, buffer_size
, "STATS INNER LOOP");
4420 offset
+= lck_mtx_test_mtx_contended(iter
, &buffer
[offset
], buffer_size
- offset
);
4422 printf("%s starting contended mutex loop test with %d iterations\n", __func__
, iter
);
4424 offset
+= snprintf(&buffer
[offset
], buffer_size
- offset
, "\nSTATS OUTER LOOP");
4425 offset
+= lck_mtx_test_mtx_contended_loop_time(iter
, &buffer
[offset
], buffer_size
- offset
);
4427 error
= SYSCTL_OUT(req
, buffer
, offset
);
4429 kfree(buffer
, buffer_size
);
4434 SYSCTL_PROC(_kern
, OID_AUTO
, lck_mtx_test_lock
, CTLFLAG_WR
| CTLFLAG_MASKED
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4435 0, 0, sysctl_lck_mtx_test_lock
, "I", "lck mtx test lock");
4437 SYSCTL_PROC(_kern
, OID_AUTO
, lck_mtx_test_unlock
, CTLFLAG_WR
| CTLFLAG_MASKED
|CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4438 0, 0, sysctl_lck_mtx_test_unlock
, "I", "lck mtx test unlock");
4440 SYSCTL_PROC(_kern
, OID_AUTO
, erase_all_test_mtx_stats
, CTLFLAG_WR
| CTLFLAG_MASKED
|CTLFLAG_ANYBODY
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4441 0, 0, sysctl_erase_all_test_mtx_stats
, "I", "erase test_mtx statistics");
4443 SYSCTL_PROC(_kern
, OID_AUTO
, get_test_mtx_stats
, CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4444 0, 0, sysctl_get_test_mtx_stats
, "A", "get test_mtx statistics");
4446 SYSCTL_PROC(_kern
, OID_AUTO
, test_mtx_contended
, CTLTYPE_STRING
| CTLFLAG_MASKED
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4447 0, 0, sysctl_test_mtx_contended
, "A", "get statistics for contended mtx test");
4449 SYSCTL_PROC(_kern
, OID_AUTO
, test_mtx_uncontended
, CTLTYPE_STRING
| CTLFLAG_MASKED
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4450 0, 0, sysctl_test_mtx_uncontended
, "A", "get statistics for uncontended mtx test");
4452 #if defined (__x86_64__)
4454 semaphore_t sysctl_test_panic_with_thread_sem
;
4456 #pragma clang diagnostic push
4457 #pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */
4458 __attribute__((noreturn
))
4460 panic_thread_test_child_spin(void * arg
, wait_result_t wres
)
4462 static int panic_thread_recurse_count
= 5;
4464 if (panic_thread_recurse_count
> 0) {
4465 panic_thread_recurse_count
--;
4466 panic_thread_test_child_spin(arg
, wres
);
4469 semaphore_signal(sysctl_test_panic_with_thread_sem
);
4472 #pragma clang diagnostic pop
4475 panic_thread_test_child_park(void * arg __unused
, wait_result_t wres __unused
)
4479 assert_wait(&event
, THREAD_UNINT
);
4480 semaphore_signal(sysctl_test_panic_with_thread_sem
);
4481 thread_block(panic_thread_test_child_park
);
4485 sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS
4487 #pragma unused(arg1, arg2)
4489 char str
[16] = { '\0' };
4490 thread_t child_thread
= THREAD_NULL
;
4492 rval
= sysctl_handle_string(oidp
, str
, sizeof(str
), req
);
4493 if (rval
!= 0 || !req
->newptr
) {
4497 semaphore_create(kernel_task
, &sysctl_test_panic_with_thread_sem
, SYNC_POLICY_FIFO
, 0);
4499 /* Create thread to spin or park in continuation */
4500 if (strncmp("spin", str
, strlen("spin")) == 0) {
4501 if (kernel_thread_start(panic_thread_test_child_spin
, NULL
, &child_thread
) != KERN_SUCCESS
) {
4502 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4505 } else if (strncmp("continuation", str
, strlen("continuation")) == 0) {
4506 if (kernel_thread_start(panic_thread_test_child_park
, NULL
, &child_thread
) != KERN_SUCCESS
) {
4507 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4511 semaphore_destroy(kernel_task
, sysctl_test_panic_with_thread_sem
);
4515 semaphore_wait(sysctl_test_panic_with_thread_sem
);
4517 panic_with_thread_context(0, NULL
, 0, child_thread
, "testing panic_with_thread_context for thread %p", child_thread
);
4523 SYSCTL_PROC(_kern
, OID_AUTO
, test_panic_with_thread
, CTLFLAG_MASKED
| CTLFLAG_KERN
| CTLFLAG_LOCKED
| CTLFLAG_WR
| CTLTYPE_STRING
,
4524 0, 0, sysctl_test_panic_with_thread
, "A", "test panic flow for backtracing a different thread");
4525 #endif /* defined (__x86_64__) */
4526 #endif /* DEVELOPMENT || DEBUG */