2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
108 #include <security/audit/audit.h>
109 #include <kern/kalloc.h>
111 #include <mach/machine.h>
112 #include <mach/mach_host.h>
113 #include <mach/mach_types.h>
114 #include <mach/vm_param.h>
115 #include <kern/mach_param.h>
116 #include <kern/task.h>
117 #include <kern/thread.h>
118 #include <kern/processor.h>
119 #include <kern/debug.h>
120 #include <kern/sched_prim.h>
121 #include <vm/vm_kern.h>
122 #include <vm/vm_map.h>
123 #include <mach/host_info.h>
125 #include <sys/mount_internal.h>
126 #include <sys/kdebug.h>
128 #include <IOKit/IOPlatformExpert.h>
129 #include <pexpert/pexpert.h>
131 #include <machine/machine_routines.h>
132 #include <machine/exec.h>
134 #include <vm/vm_protos.h>
135 #include <vm/vm_pageout.h>
136 #include <vm/vm_compressor_algorithms.h>
137 #include <sys/imgsrc.h>
138 #include <kern/timer_call.h>
140 #if defined(__i386__) || defined(__x86_64__)
141 #include <i386/cpuid.h>
145 #include <sys/kern_memorystatus.h>
149 #include <kperf/kperf.h>
153 #include <kern/hv_support.h>
157 * deliberately setting max requests to really high number
158 * so that runaway settings do not cause MALLOC overflows
160 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
162 extern int aio_max_requests
;
163 extern int aio_max_requests_per_process
;
164 extern int aio_worker_threads
;
165 extern int lowpri_IO_window_msecs
;
166 extern int lowpri_IO_delay_msecs
;
167 extern int nx_enabled
;
168 extern int speculative_reads_disabled
;
169 extern int ignore_is_ssd
;
170 extern unsigned int speculative_prefetch_max
;
171 extern unsigned int speculative_prefetch_max_iosize
;
172 extern unsigned int preheat_max_bytes
;
173 extern unsigned int preheat_min_bytes
;
174 extern long numvnodes
;
176 extern uuid_string_t bootsessionuuid_string
;
178 extern unsigned int vm_max_delayed_work_limit
;
179 extern unsigned int vm_max_batch
;
181 extern unsigned int vm_page_free_min
;
182 extern unsigned int vm_page_free_target
;
183 extern unsigned int vm_page_free_reserved
;
184 extern unsigned int vm_page_speculative_percentage
;
185 extern unsigned int vm_page_speculative_q_age_ms
;
187 #if (DEVELOPMENT || DEBUG)
188 extern uint32_t vm_page_creation_throttled_hard
;
189 extern uint32_t vm_page_creation_throttled_soft
;
190 #endif /* DEVELOPMENT || DEBUG */
193 * Conditionally allow dtrace to see these functions for debugging purposes.
201 #define STATIC static
204 extern boolean_t mach_timer_coalescing_enabled
;
206 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
209 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
211 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
213 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
215 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
217 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
219 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
222 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
228 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
230 __private_extern__ kern_return_t
231 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
233 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
234 size_t *sizep
, proc_t cur_proc
);
236 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
237 proc_t cur_proc
, int argc_yes
);
239 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
240 size_t newlen
, void *sp
, int len
);
242 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
243 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
244 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
245 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
246 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
247 int sysdoproc_callback(proc_t p
, void *arg
);
250 /* forward declarations for non-static STATIC */
251 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
252 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
253 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
254 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
255 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
256 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
258 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
259 #endif /* COUNT_SYSCALLS */
260 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
261 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
262 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
263 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
267 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 #ifdef CONFIG_IMGSRC_ACCESS
280 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
285 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
293 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
296 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
299 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
300 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
301 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
304 extern void IORegistrySetOSBuildVersion(char * build_version
);
307 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
309 la64
->ldavg
[0] = la
->ldavg
[0];
310 la64
->ldavg
[1] = la
->ldavg
[1];
311 la64
->ldavg
[2] = la
->ldavg
[2];
312 la64
->fscale
= (user64_long_t
)la
->fscale
;
316 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
318 la32
->ldavg
[0] = la
->ldavg
[0];
319 la32
->ldavg
[1] = la
->ldavg
[1];
320 la32
->ldavg
[2] = la
->ldavg
[2];
321 la32
->fscale
= (user32_long_t
)la
->fscale
;
326 * Attributes stored in the kernel.
328 extern char corefilename
[MAXPATHLEN
+1];
329 extern int do_coredump
;
330 extern int sugid_coredump
;
334 extern int do_count_syscalls
;
338 int securelevel
= -1;
344 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
345 __unused
int arg2
, struct sysctl_req
*req
)
348 struct uthread
*ut
= get_bsdthread_info(current_thread());
349 user_addr_t oldp
=0, newp
=0;
350 size_t *oldlenp
=NULL
;
354 oldlenp
= &(req
->oldlen
);
356 newlen
= req
->newlen
;
358 /* We want the current length, and maybe the string itself */
360 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
361 size_t currlen
= MAXTHREADNAMESIZE
- 1;
364 /* use length of current thread name */
365 currlen
= strlen(ut
->pth_name
);
367 if(*oldlenp
< currlen
)
369 /* NOTE - we do not copy the NULL terminator */
371 error
= copyout(ut
->pth_name
,oldp
,currlen
);
376 /* return length of thread name minus NULL terminator (just like strlen) */
377 req
->oldidx
= currlen
;
380 /* We want to set the name to something */
383 if(newlen
> (MAXTHREADNAMESIZE
- 1))
387 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
391 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV
, ut
->pth_name
);
393 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
394 error
= copyin(newp
, ut
->pth_name
, newlen
);
399 kernel_debug_string_simple(TRACE_STRING_THREADNAME
, ut
->pth_name
);
405 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
409 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
411 host_basic_info_data_t hinfo
;
415 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
416 struct _processor_statistics_np
*buf
;
419 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
420 if (kret
!= KERN_SUCCESS
) {
424 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
426 if (req
->oldlen
< size
) {
430 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
432 kret
= get_sched_statistics(buf
, &size
);
433 if (kret
!= KERN_SUCCESS
) {
438 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
444 panic("Sched info changed?!");
451 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
454 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
459 if (req
->newlen
!= sizeof(active
)) {
463 res
= copyin(req
->newptr
, &active
, sizeof(active
));
468 return set_sched_stats_active(active
);
471 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
473 extern uint32_t sched_debug_flags
;
474 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
476 #if (DEBUG || DEVELOPMENT)
477 extern boolean_t doprnt_hide_pointers
;
478 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
481 extern int get_kernel_symfile(proc_t
, char **);
484 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
486 extern unsigned int nsysent
;
487 extern int syscalls_log
[];
488 extern const char *syscallnames
[];
491 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
493 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
494 __unused
int *name
= arg1
; /* oid element argument vector */
495 __unused
int namelen
= arg2
; /* number of oid element arguments */
496 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
497 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
498 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
499 size_t newlen
= req
->newlen
; /* user buffer copy in size */
504 /* valid values passed in:
505 * = 0 means don't keep called counts for each bsd syscall
506 * > 0 means keep called counts for each bsd syscall
507 * = 2 means dump current counts to the system log
508 * = 3 means reset all counts
509 * for example, to dump current counts:
510 * sysctl -w kern.count_calls=2
512 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
518 do_count_syscalls
= 1;
520 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
522 for ( i
= 0; i
< nsysent
; i
++ ) {
523 if ( syscalls_log
[i
] != 0 ) {
525 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
533 do_count_syscalls
= 1;
537 /* adjust index so we return the right required/consumed amount */
539 req
->oldidx
+= req
->oldlen
;
543 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
544 0, /* Pointer argument (arg1) */
545 0, /* Integer argument (arg2) */
546 sysctl_docountsyscalls
, /* Handler function */
547 NULL
, /* Data pointer */
549 #endif /* COUNT_SYSCALLS */
552 * The following sysctl_* functions should not be used
553 * any more, as they can only cope with callers in
554 * user mode: Use new-style
562 * Validate parameters and get old / set new parameters
563 * for an integer-valued sysctl function.
566 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
567 user_addr_t newp
, size_t newlen
, int *valp
)
571 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
573 if (oldp
&& *oldlenp
< sizeof(int))
575 if (newp
&& newlen
!= sizeof(int))
577 *oldlenp
= sizeof(int);
579 error
= copyout(valp
, oldp
, sizeof(int));
580 if (error
== 0 && newp
) {
581 error
= copyin(newp
, valp
, sizeof(int));
582 AUDIT_ARG(value32
, *valp
);
588 * Validate parameters and get old / set new parameters
589 * for an quad(64bit)-valued sysctl function.
592 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
593 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
597 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
599 if (oldp
&& *oldlenp
< sizeof(quad_t
))
601 if (newp
&& newlen
!= sizeof(quad_t
))
603 *oldlenp
= sizeof(quad_t
);
605 error
= copyout(valp
, oldp
, sizeof(quad_t
));
606 if (error
== 0 && newp
)
607 error
= copyin(newp
, valp
, sizeof(quad_t
));
612 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
614 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
621 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
623 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
630 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
635 /* This is very racy but list lock is held.. Hmmm. */
636 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
637 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
638 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
639 tp
->t_dev
!= (dev_t
)*(int*)arg
)
648 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
650 kauth_cred_t my_cred
;
653 if (p
->p_ucred
== NULL
)
655 my_cred
= kauth_cred_proc_ref(p
);
656 uid
= kauth_cred_getuid(my_cred
);
657 kauth_cred_unref(&my_cred
);
659 if (uid
!= (uid_t
)*(int*)arg
)
667 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
669 kauth_cred_t my_cred
;
672 if (p
->p_ucred
== NULL
)
674 my_cred
= kauth_cred_proc_ref(p
);
675 ruid
= kauth_cred_getruid(my_cred
);
676 kauth_cred_unref(&my_cred
);
678 if (ruid
!= (uid_t
)*(int*)arg
)
685 * try over estimating by 5 procs
687 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
688 struct sysdoproc_args
{
703 sysdoproc_callback(proc_t p
, void *arg
)
705 struct sysdoproc_args
*args
= arg
;
707 if (args
->buflen
>= args
->sizeof_kproc
) {
708 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
709 return (PROC_RETURNED
);
710 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
711 return (PROC_RETURNED
);
712 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
713 return (PROC_RETURNED
);
715 bzero(args
->kprocp
, args
->sizeof_kproc
);
717 fill_user64_proc(p
, args
->kprocp
);
719 fill_user32_proc(p
, args
->kprocp
);
720 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
722 *args
->errorp
= error
;
723 return (PROC_RETURNED_DONE
);
725 args
->dp
+= args
->sizeof_kproc
;
726 args
->buflen
-= args
->sizeof_kproc
;
728 args
->needed
+= args
->sizeof_kproc
;
729 return (PROC_RETURNED
);
732 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
734 sysctl_prochandle SYSCTL_HANDLER_ARGS
736 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
737 int *name
= arg1
; /* oid element argument vector */
738 int namelen
= arg2
; /* number of oid element arguments */
739 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
741 user_addr_t dp
= where
;
743 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
745 boolean_t is_64_bit
= proc_is64bit(current_proc());
746 struct user32_kinfo_proc user32_kproc
;
747 struct user64_kinfo_proc user_kproc
;
750 int (*filterfn
)(proc_t
, void *) = 0;
751 struct sysdoproc_args args
;
756 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
760 sizeof_kproc
= sizeof(user_kproc
);
761 kprocp
= &user_kproc
;
763 sizeof_kproc
= sizeof(user32_kproc
);
764 kprocp
= &user32_kproc
;
770 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
774 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
793 /* must be kern.proc.<unknown> */
798 args
.buflen
= buflen
;
799 args
.kprocp
= kprocp
;
800 args
.is_64_bit
= is_64_bit
;
802 args
.needed
= needed
;
803 args
.errorp
= &error
;
804 args
.uidcheck
= uidcheck
;
805 args
.ruidcheck
= ruidcheck
;
806 args
.ttycheck
= ttycheck
;
807 args
.sizeof_kproc
= sizeof_kproc
;
809 args
.uidval
= name
[0];
811 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
812 sysdoproc_callback
, &args
, filterfn
, name
);
818 needed
= args
.needed
;
820 if (where
!= USER_ADDR_NULL
) {
821 req
->oldlen
= dp
- where
;
822 if (needed
> req
->oldlen
)
825 needed
+= KERN_PROCSLOP
;
826 req
->oldlen
= needed
;
828 /* adjust index so we return the right required/consumed amount */
829 req
->oldidx
+= req
->oldlen
;
834 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
835 * in the sysctl declaration itself, which comes into the handler function
836 * as 'oidp->oid_arg2'.
838 * For these particular sysctls, since they have well known OIDs, we could
839 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
840 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
841 * of a well known value with a common handler function. This is desirable,
842 * because we want well known values to "go away" at some future date.
844 * It should be noted that the value of '((int *)arg1)[1]' is used for many
845 * an integer parameter to the subcommand for many of these sysctls; we'd
846 * rather have used '((int *)arg1)[0]' for that, or even better, an element
847 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
848 * and then use leaf-node permissions enforcement, but that would have
849 * necessitated modifying user space code to correspond to the interface
850 * change, and we are striving for binary backward compatibility here; even
851 * though these are SPI, and not intended for use by user space applications
852 * which are not themselves system tools or libraries, some applications
853 * have erroneously used them.
855 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
856 0, /* Pointer argument (arg1) */
857 KERN_PROC_ALL
, /* Integer argument (arg2) */
858 sysctl_prochandle
, /* Handler function */
859 NULL
, /* Data is size variant on ILP32/LP64 */
861 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
862 0, /* Pointer argument (arg1) */
863 KERN_PROC_PID
, /* Integer argument (arg2) */
864 sysctl_prochandle
, /* Handler function */
865 NULL
, /* Data is size variant on ILP32/LP64 */
867 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
868 0, /* Pointer argument (arg1) */
869 KERN_PROC_TTY
, /* Integer argument (arg2) */
870 sysctl_prochandle
, /* Handler function */
871 NULL
, /* Data is size variant on ILP32/LP64 */
873 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
874 0, /* Pointer argument (arg1) */
875 KERN_PROC_PGRP
, /* Integer argument (arg2) */
876 sysctl_prochandle
, /* Handler function */
877 NULL
, /* Data is size variant on ILP32/LP64 */
879 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
880 0, /* Pointer argument (arg1) */
881 KERN_PROC_UID
, /* Integer argument (arg2) */
882 sysctl_prochandle
, /* Handler function */
883 NULL
, /* Data is size variant on ILP32/LP64 */
885 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
886 0, /* Pointer argument (arg1) */
887 KERN_PROC_RUID
, /* Integer argument (arg2) */
888 sysctl_prochandle
, /* Handler function */
889 NULL
, /* Data is size variant on ILP32/LP64 */
891 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
892 0, /* Pointer argument (arg1) */
893 KERN_PROC_LCID
, /* Integer argument (arg2) */
894 sysctl_prochandle
, /* Handler function */
895 NULL
, /* Data is size variant on ILP32/LP64 */
900 * Fill in non-zero fields of an eproc structure for the specified process.
903 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
907 struct session
*sessp
;
908 kauth_cred_t my_cred
;
911 sessp
= proc_session(p
);
913 if (pg
!= PGRP_NULL
) {
914 ep
->e_pgid
= p
->p_pgrpid
;
915 ep
->e_jobc
= pg
->pg_jobc
;
916 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
917 ep
->e_flag
= EPROC_CTTY
;
919 ep
->e_ppid
= p
->p_ppid
;
921 my_cred
= kauth_cred_proc_ref(p
);
923 /* A fake historical pcred */
924 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
925 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
926 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
927 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
929 /* A fake historical *kauth_cred_t */
930 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
931 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
932 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
933 bcopy(posix_cred_get(my_cred
)->cr_groups
,
934 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
936 kauth_cred_unref(&my_cred
);
939 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
940 (tp
= SESSION_TP(sessp
))) {
941 ep
->e_tdev
= tp
->t_dev
;
942 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
946 if (sessp
!= SESSION_NULL
) {
947 if (SESS_LEADER(p
, sessp
))
948 ep
->e_flag
|= EPROC_SLEADER
;
956 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
959 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
963 struct session
*sessp
;
964 kauth_cred_t my_cred
;
967 sessp
= proc_session(p
);
969 if (pg
!= PGRP_NULL
) {
970 ep
->e_pgid
= p
->p_pgrpid
;
971 ep
->e_jobc
= pg
->pg_jobc
;
972 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
973 ep
->e_flag
= EPROC_CTTY
;
975 ep
->e_ppid
= p
->p_ppid
;
977 my_cred
= kauth_cred_proc_ref(p
);
979 /* A fake historical pcred */
980 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
981 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
982 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
983 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
985 /* A fake historical *kauth_cred_t */
986 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
987 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
988 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
989 bcopy(posix_cred_get(my_cred
)->cr_groups
,
990 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
992 kauth_cred_unref(&my_cred
);
995 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
996 (tp
= SESSION_TP(sessp
))) {
997 ep
->e_tdev
= tp
->t_dev
;
998 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1002 if (sessp
!= SESSION_NULL
) {
1003 if (SESS_LEADER(p
, sessp
))
1004 ep
->e_flag
|= EPROC_SLEADER
;
1005 session_rele(sessp
);
1007 if (pg
!= PGRP_NULL
)
1012 * Fill in an eproc structure for the specified process.
1013 * bzeroed by our caller, so only set non-zero fields.
1016 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1018 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1019 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1020 exp
->p_flag
= p
->p_flag
;
1021 if (p
->p_lflag
& P_LTRACED
)
1022 exp
->p_flag
|= P_TRACED
;
1023 if (p
->p_lflag
& P_LPPWAIT
)
1024 exp
->p_flag
|= P_PPWAIT
;
1025 if (p
->p_lflag
& P_LEXIT
)
1026 exp
->p_flag
|= P_WEXIT
;
1027 exp
->p_stat
= p
->p_stat
;
1028 exp
->p_pid
= p
->p_pid
;
1029 exp
->p_oppid
= p
->p_oppid
;
1031 exp
->user_stack
= p
->user_stack
;
1032 exp
->p_debugger
= p
->p_debugger
;
1033 exp
->sigwait
= p
->sigwait
;
1035 #ifdef _PROC_HAS_SCHEDINFO_
1036 exp
->p_estcpu
= p
->p_estcpu
;
1037 exp
->p_pctcpu
= p
->p_pctcpu
;
1038 exp
->p_slptime
= p
->p_slptime
;
1040 exp
->p_realtimer
.it_interval
.tv_sec
=
1041 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1042 exp
->p_realtimer
.it_interval
.tv_usec
=
1043 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1045 exp
->p_realtimer
.it_value
.tv_sec
=
1046 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1047 exp
->p_realtimer
.it_value
.tv_usec
=
1048 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1050 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1051 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1053 exp
->p_sigignore
= p
->p_sigignore
;
1054 exp
->p_sigcatch
= p
->p_sigcatch
;
1055 exp
->p_priority
= p
->p_priority
;
1056 exp
->p_nice
= p
->p_nice
;
1057 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1058 exp
->p_xstat
= p
->p_xstat
;
1059 exp
->p_acflag
= p
->p_acflag
;
1063 * Fill in an LP64 version of extern_proc structure for the specified process.
1066 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1068 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1069 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1070 exp
->p_flag
= p
->p_flag
;
1071 if (p
->p_lflag
& P_LTRACED
)
1072 exp
->p_flag
|= P_TRACED
;
1073 if (p
->p_lflag
& P_LPPWAIT
)
1074 exp
->p_flag
|= P_PPWAIT
;
1075 if (p
->p_lflag
& P_LEXIT
)
1076 exp
->p_flag
|= P_WEXIT
;
1077 exp
->p_stat
= p
->p_stat
;
1078 exp
->p_pid
= p
->p_pid
;
1079 exp
->p_oppid
= p
->p_oppid
;
1081 exp
->user_stack
= p
->user_stack
;
1082 exp
->p_debugger
= p
->p_debugger
;
1083 exp
->sigwait
= p
->sigwait
;
1085 #ifdef _PROC_HAS_SCHEDINFO_
1086 exp
->p_estcpu
= p
->p_estcpu
;
1087 exp
->p_pctcpu
= p
->p_pctcpu
;
1088 exp
->p_slptime
= p
->p_slptime
;
1090 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1091 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1093 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1094 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1096 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1097 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1099 exp
->p_sigignore
= p
->p_sigignore
;
1100 exp
->p_sigcatch
= p
->p_sigcatch
;
1101 exp
->p_priority
= p
->p_priority
;
1102 exp
->p_nice
= p
->p_nice
;
1103 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1104 exp
->p_xstat
= p
->p_xstat
;
1105 exp
->p_acflag
= p
->p_acflag
;
1109 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1111 /* on a 64 bit kernel, 32 bit users get some truncated information */
1112 fill_user32_externproc(p
, &kp
->kp_proc
);
1113 fill_user32_eproc(p
, &kp
->kp_eproc
);
1117 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1119 fill_user64_externproc(p
, &kp
->kp_proc
);
1120 fill_user64_eproc(p
, &kp
->kp_eproc
);
1124 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1126 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1127 int *name
= arg1
; /* oid element argument vector */
1128 int namelen
= arg2
; /* number of oid element arguments */
1129 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1130 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1131 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1132 // size_t newlen = req->newlen; /* user buffer copy in size */
1149 case KERN_KDWRITETR
:
1150 case KERN_KDWRITEMAP
:
1156 case KERN_KDGETENTROPY
:
1157 case KERN_KDREADCURTHRMAP
:
1158 case KERN_KDSET_TYPEFILTER
:
1159 case KERN_KDBUFWAIT
:
1161 case KERN_KDWRITEMAP_V3
:
1162 case KERN_KDWRITETR_V3
:
1163 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1170 /* adjust index so we return the right required/consumed amount */
1172 req
->oldidx
+= req
->oldlen
;
1176 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1177 0, /* Pointer argument (arg1) */
1178 0, /* Integer argument (arg2) */
1179 sysctl_kdebug_ops
, /* Handler function */
1180 NULL
, /* Data pointer */
1185 * Return the top *sizep bytes of the user stack, or the entire area of the
1186 * user stack down through the saved exec_path, whichever is smaller.
1189 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1191 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1192 int *name
= arg1
; /* oid element argument vector */
1193 int namelen
= arg2
; /* number of oid element arguments */
1194 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1195 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1196 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1197 // size_t newlen = req->newlen; /* user buffer copy in size */
1200 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1202 /* adjust index so we return the right required/consumed amount */
1204 req
->oldidx
+= req
->oldlen
;
1208 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1209 0, /* Pointer argument (arg1) */
1210 0, /* Integer argument (arg2) */
1211 sysctl_doprocargs
, /* Handler function */
1212 NULL
, /* Data pointer */
1216 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1218 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1219 int *name
= arg1
; /* oid element argument vector */
1220 int namelen
= arg2
; /* number of oid element arguments */
1221 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1222 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1223 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1224 // size_t newlen = req->newlen; /* user buffer copy in size */
1227 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1229 /* adjust index so we return the right required/consumed amount */
1231 req
->oldidx
+= req
->oldlen
;
1235 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1236 0, /* Pointer argument (arg1) */
1237 0, /* Integer argument (arg2) */
1238 sysctl_doprocargs2
, /* Handler function */
1239 NULL
, /* Data pointer */
1243 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1244 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1247 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1249 struct _vm_map
*proc_map
;
1252 user_addr_t arg_addr
;
1257 vm_offset_t copy_start
, copy_end
;
1260 kauth_cred_t my_cred
;
1267 buflen
-= sizeof(int); /* reserve first word to return argc */
1269 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1270 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1271 /* is not NULL then the caller wants us to return the length needed to */
1272 /* hold the data we would return */
1273 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1279 * Lookup process by pid
1288 * Copy the top N bytes of the stack.
1289 * On all machines we have so far, the stack grows
1292 * If the user expects no more than N bytes of
1293 * argument list, use that as a guess for the
1297 if (!p
->user_stack
) {
1302 if (where
== USER_ADDR_NULL
) {
1303 /* caller only wants to know length of proc args data */
1304 if (sizep
== NULL
) {
1309 size
= p
->p_argslen
;
1312 size
+= sizeof(int);
1316 * old PROCARGS will return the executable's path and plus some
1317 * extra space for work alignment and data tags
1319 size
+= PATH_MAX
+ (6 * sizeof(int));
1321 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1326 my_cred
= kauth_cred_proc_ref(p
);
1327 uid
= kauth_cred_getuid(my_cred
);
1328 kauth_cred_unref(&my_cred
);
1330 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1331 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1336 if ((u_int
)arg_size
> p
->p_argslen
)
1337 arg_size
= round_page(p
->p_argslen
);
1339 arg_addr
= p
->user_stack
- arg_size
;
1343 * Before we can block (any VM code), make another
1344 * reference to the map to keep it alive. We do
1345 * that by getting a reference on the task itself.
1353 argslen
= p
->p_argslen
;
1355 * Once we have a task reference we can convert that into a
1356 * map reference, which we will use in the calls below. The
1357 * task/process may change its map after we take this reference
1358 * (see execve), but the worst that will happen then is a return
1359 * of stale info (which is always a possibility).
1361 task_reference(task
);
1363 proc_map
= get_task_map_reference(task
);
1364 task_deallocate(task
);
1366 if (proc_map
== NULL
)
1370 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
), VM_KERN_MEMORY_BSD
);
1371 if (ret
!= KERN_SUCCESS
) {
1372 vm_map_deallocate(proc_map
);
1376 copy_end
= round_page(copy_start
+ arg_size
);
1378 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1379 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1380 vm_map_deallocate(proc_map
);
1381 kmem_free(kernel_map
, copy_start
,
1382 round_page(arg_size
));
1387 * Now that we've done the copyin from the process'
1388 * map, we can release the reference to it.
1390 vm_map_deallocate(proc_map
);
1392 if( vm_map_copy_overwrite(kernel_map
,
1393 (vm_map_address_t
)copy_start
,
1394 tmp
, FALSE
) != KERN_SUCCESS
) {
1395 kmem_free(kernel_map
, copy_start
,
1396 round_page(arg_size
));
1397 vm_map_copy_discard(tmp
);
1401 if (arg_size
> argslen
) {
1402 data
= (caddr_t
) (copy_end
- argslen
);
1405 data
= (caddr_t
) (copy_end
- arg_size
);
1410 * When these sysctls were introduced, the first string in the strings
1411 * section was just the bare path of the executable. However, for security
1412 * reasons we now prefix this string with executable_path= so it can be
1413 * parsed getenv style. To avoid binary compatability issues with exising
1414 * callers of this sysctl, we strip it off here if present.
1415 * (rdar://problem/13746466)
1417 #define EXECUTABLE_KEY "executable_path="
1418 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0){
1419 data
+= strlen(EXECUTABLE_KEY
);
1420 size
-= strlen(EXECUTABLE_KEY
);
1424 /* Put processes argc as the first word in the copyout buffer */
1425 suword(where
, p
->p_argc
);
1426 error
= copyout(data
, (where
+ sizeof(int)), size
);
1427 size
+= sizeof(int);
1429 error
= copyout(data
, where
, size
);
1432 * Make the old PROCARGS work to return the executable's path
1433 * But, only if there is enough space in the provided buffer
1435 * on entry: data [possibily] points to the beginning of the path
1437 * Note: we keep all pointers&sizes aligned to word boundries
1439 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1441 int binPath_sz
, alignedBinPath_sz
= 0;
1442 int extraSpaceNeeded
, addThis
;
1443 user_addr_t placeHere
;
1444 char * str
= (char *) data
;
1447 /* Some apps are really bad about messing up their stacks
1448 So, we have to be extra careful about getting the length
1449 of the executing binary. If we encounter an error, we bail.
1452 /* Limit ourselves to PATH_MAX paths */
1453 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1457 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1460 /* If we have a NUL terminator, copy it, too */
1461 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1463 /* Pre-Flight the space requiremnts */
1465 /* Account for the padding that fills out binPath to the next word */
1466 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1468 placeHere
= where
+ size
;
1470 /* Account for the bytes needed to keep placeHere word aligned */
1471 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1473 /* Add up all the space that is needed */
1474 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1476 /* is there is room to tack on argv[0]? */
1477 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1479 placeHere
+= addThis
;
1480 suword(placeHere
, 0);
1481 placeHere
+= sizeof(int);
1482 suword(placeHere
, 0xBFFF0000);
1483 placeHere
+= sizeof(int);
1484 suword(placeHere
, 0);
1485 placeHere
+= sizeof(int);
1486 error
= copyout(data
, placeHere
, binPath_sz
);
1489 placeHere
+= binPath_sz
;
1490 suword(placeHere
, 0);
1491 size
+= extraSpaceNeeded
;
1497 if (copy_start
!= (vm_offset_t
) 0) {
1498 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1504 if (where
!= USER_ADDR_NULL
)
1511 * Max number of concurrent aio requests
1515 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1517 int new_value
, changed
;
1518 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1520 /* make sure the system-wide limit is greater than the per process limit */
1521 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1522 aio_max_requests
= new_value
;
1531 * Max number of concurrent aio requests per process
1535 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1537 int new_value
, changed
;
1538 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1540 /* make sure per process limit is less than the system-wide limit */
1541 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1542 aio_max_requests_per_process
= new_value
;
1551 * Max number of async IO worker threads
1555 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1557 int new_value
, changed
;
1558 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1560 /* we only allow an increase in the number of worker threads */
1561 if (new_value
> aio_worker_threads
) {
1562 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1563 aio_worker_threads
= new_value
;
1573 * System-wide limit on the max number of processes
1577 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1579 int new_value
, changed
;
1580 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1582 AUDIT_ARG(value32
, new_value
);
1583 /* make sure the system-wide limit is less than the configured hard
1584 limit set at kernel compilation */
1585 if (new_value
<= hard_maxproc
&& new_value
> 0)
1586 maxproc
= new_value
;
1593 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1594 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1596 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1597 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1599 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1600 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1601 (int *)NULL
, BSD
, "");
1602 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1603 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1605 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1606 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1607 &kernel_uuid_string
[0], 0, "");
1610 int debug_kprint_syscall
= 0;
1611 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1613 /* Thread safe: bits and string value are not used to reclaim state */
1614 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1615 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1616 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1617 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1618 "name of process for kprintf syscall tracing");
1620 int debug_kprint_current_process(const char **namep
)
1622 struct proc
*p
= current_proc();
1628 if (debug_kprint_syscall_process
[0]) {
1629 /* user asked to scope tracing to a particular process name */
1630 if(0 == strncmp(debug_kprint_syscall_process
,
1631 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1632 /* no value in telling the user that we traced what they asked */
1633 if(namep
) *namep
= NULL
;
1641 /* trace all processes. Tell user what we traced */
1650 /* PR-5293665: need to use a callback function for kern.osversion to set
1651 * osversion in IORegistry */
1654 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1658 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1661 IORegistrySetOSBuildVersion((char *)arg1
);
1667 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1668 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1669 osversion
, 256 /* OSVERSIZE*/,
1670 sysctl_osversion
, "A", "");
1673 sysctl_sysctl_bootargs
1674 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1679 strlcpy(buf
, PE_boot_args(), 256);
1680 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
1684 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1685 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1687 sysctl_sysctl_bootargs
, "A", "bootargs");
1689 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1690 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1692 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1693 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1694 (int *)NULL
, ARG_MAX
, "");
1695 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1696 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1697 (int *)NULL
, _POSIX_VERSION
, "");
1698 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1699 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1700 (int *)NULL
, NGROUPS_MAX
, "");
1701 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1702 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1703 (int *)NULL
, 1, "");
1704 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1705 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1706 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1707 (int *)NULL
, 1, "");
1709 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1710 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1713 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1714 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1716 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1717 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1719 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1720 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1722 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1723 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1724 &thread_max
, 0, "");
1725 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1726 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1727 &task_threadmax
, 0, "");
1730 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1732 int oldval
= desiredvnodes
;
1733 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1735 if (oldval
!= desiredvnodes
) {
1736 reset_vmobjectcache(oldval
, desiredvnodes
);
1737 resize_namecache(desiredvnodes
);
1743 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1744 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1745 &nc_disabled
, 0, "");
1747 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1748 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1749 0, 0, sysctl_maxvnodes
, "I", "");
1751 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1752 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1753 0, 0, sysctl_maxproc
, "I", "");
1755 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1756 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1757 0, 0, sysctl_aiomax
, "I", "");
1759 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1760 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1761 0, 0, sysctl_aioprocmax
, "I", "");
1763 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1764 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1765 0, 0, sysctl_aiothreads
, "I", "");
1767 #if (DEVELOPMENT || DEBUG)
1768 extern int sched_smt_balance
;
1769 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1770 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1771 &sched_smt_balance
, 0, "");
1776 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1778 int new_value
, changed
;
1779 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1781 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1783 securelevel
= new_value
;
1792 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1793 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1794 0, 0, sysctl_securelvl
, "I", "");
1799 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1802 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1804 domainnamelen
= strlen(domainname
);
1809 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1810 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1811 0, 0, sysctl_domainname
, "A", "");
1813 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1814 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1819 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1822 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1824 hostnamelen
= req
->newlen
;
1830 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1831 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1832 0, 0, sysctl_hostname
, "A", "");
1836 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1838 /* Original code allowed writing, I'm copying this, although this all makes
1839 no sense to me. Besides, this sysctl is never used. */
1840 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
1843 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
1844 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1845 0, 0, sysctl_procname
, "A", "");
1847 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
1848 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1849 &speculative_reads_disabled
, 0, "");
1851 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
1852 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1853 &ignore_is_ssd
, 0, "");
1855 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
1856 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1857 &preheat_max_bytes
, 0, "");
1859 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
1860 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1861 &preheat_min_bytes
, 0, "");
1863 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
1864 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1865 &speculative_prefetch_max
, 0, "");
1867 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
1868 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1869 &speculative_prefetch_max_iosize
, 0, "");
1871 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
1872 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1873 &vm_page_free_target
, 0, "");
1875 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
1876 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1877 &vm_page_free_min
, 0, "");
1879 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
1880 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1881 &vm_page_free_reserved
, 0, "");
1883 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
1884 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1885 &vm_page_speculative_percentage
, 0, "");
1887 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
1888 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1889 &vm_page_speculative_q_age_ms
, 0, "");
1891 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
1892 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1893 &vm_max_delayed_work_limit
, 0, "");
1895 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
1896 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1897 &vm_max_batch
, 0, "");
1899 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
1900 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1901 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
1905 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1908 boottime_timeval(&tv
);
1909 struct proc
*p
= req
->p
;
1911 if (proc_is64bit(p
)) {
1912 struct user64_timeval t
;
1913 t
.tv_sec
= tv
.tv_sec
;
1914 t
.tv_usec
= tv
.tv_usec
;
1915 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1917 struct user32_timeval t
;
1918 t
.tv_sec
= tv
.tv_sec
;
1919 t
.tv_usec
= tv
.tv_usec
;
1920 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1924 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
1925 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1926 0, 0, sysctl_boottime
, "S,timeval", "");
1930 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1933 int error
= get_kernel_symfile(req
->p
, &str
);
1936 return sysctl_io_string(req
, str
, 0, 0, NULL
);
1940 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
1941 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1942 0, 0, sysctl_symfile
, "A", "");
1947 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1949 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
1952 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
1953 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1954 0, 0, sysctl_netboot
, "I", "");
1957 #ifdef CONFIG_IMGSRC_ACCESS
1959 * Legacy--act as if only one layer of nesting is possible.
1963 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1965 vfs_context_t ctx
= vfs_context_current();
1969 if (!vfs_context_issuser(ctx
)) {
1973 if (imgsrc_rootvnodes
[0] == NULL
) {
1977 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
1982 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
1983 result
= vnode_getwithref(devvp
);
1988 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
1992 vnode_put(imgsrc_rootvnodes
[0]);
1996 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
1997 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1998 0, 0, sysctl_imgsrcdev
, "I", "");
2002 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2005 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2009 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2013 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2015 * Go get the root vnode.
2017 rvp
= imgsrc_rootvnodes
[i
];
2018 if (rvp
== NULLVP
) {
2022 error
= vnode_get(rvp
);
2028 * For now, no getting at a non-local volume.
2030 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2031 if (devvp
== NULL
) {
2036 error
= vnode_getwithref(devvp
);
2045 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2046 info
[i
].ii_flags
= 0;
2047 info
[i
].ii_height
= i
;
2048 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2054 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2057 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2058 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2059 0, 0, sysctl_imgsrcinfo
, "I", "");
2061 #endif /* CONFIG_IMGSRC_ACCESS */
2064 SYSCTL_DECL(_kern_timer
);
2065 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2068 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2069 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2070 &mach_timer_coalescing_enabled
, 0, "");
2072 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2073 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2074 &timer_deadline_tracking_bin_1
, "");
2075 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2076 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2077 &timer_deadline_tracking_bin_2
, "");
2079 SYSCTL_DECL(_kern_timer_longterm
);
2080 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2083 /* Must match definition in osfmk/kern/timer_call.c */
2086 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2087 LATENCY
, LATENCY_MIN
, LATENCY_MAX
2089 extern uint64_t timer_sysctl_get(int);
2090 extern int timer_sysctl_set(int, uint64_t);
2094 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2096 int oid
= (int)arg1
;
2097 uint64_t value
= timer_sysctl_get(oid
);
2102 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2104 error
= timer_sysctl_set(oid
, new_value
);
2109 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2110 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2111 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2112 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2113 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2114 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2116 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2117 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2118 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2119 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2120 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2121 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2122 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2123 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2124 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2125 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2126 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2127 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2128 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2129 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2130 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2131 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2132 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2133 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2134 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2135 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2136 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2137 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2138 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2139 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2144 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2146 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2149 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2150 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2151 0, 0, sysctl_usrstack
, "I", "");
2155 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2157 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2160 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2161 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2162 0, 0, sysctl_usrstack64
, "Q", "");
2166 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2167 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2168 corefilename
, sizeof(corefilename
), "");
2172 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2174 #ifdef SECURE_KERNEL
2178 int new_value
, changed
;
2179 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2181 if ((new_value
== 0) || (new_value
== 1))
2182 do_coredump
= new_value
;
2190 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2191 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2192 0, 0, sysctl_coredump
, "I", "");
2195 sysctl_suid_coredump
2196 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2198 #ifdef SECURE_KERNEL
2202 int new_value
, changed
;
2203 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2205 if ((new_value
== 0) || (new_value
== 1))
2206 sugid_coredump
= new_value
;
2214 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2215 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2216 0, 0, sysctl_suid_coredump
, "I", "");
2218 #endif /* CONFIG_COREDUMP */
2222 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2224 struct proc
*p
= req
->p
;
2225 int new_value
, changed
;
2226 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2230 req
->p
->p_lflag
|= P_LDELAYTERM
;
2232 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2238 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2239 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2240 0, 0, sysctl_delayterm
, "I", "");
2245 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2247 struct proc
*p
= req
->p
;
2249 int new_value
, old_value
, changed
;
2252 ut
= get_bsdthread_info(current_thread());
2254 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2255 old_value
= KERN_RAGE_THREAD
;
2256 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2257 old_value
= KERN_RAGE_PROC
;
2261 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2264 switch (new_value
) {
2265 case KERN_RAGE_PROC
:
2267 p
->p_lflag
|= P_LRAGE_VNODES
;
2270 case KERN_UNRAGE_PROC
:
2272 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2276 case KERN_RAGE_THREAD
:
2277 ut
->uu_flag
|= UT_RAGE_VNODES
;
2279 case KERN_UNRAGE_THREAD
:
2280 ut
= get_bsdthread_info(current_thread());
2281 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2288 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2289 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2290 0, 0, sysctl_rage_vnode
, "I", "");
2292 /* XXX move this interface into libproc and remove this sysctl */
2294 sysctl_setthread_cpupercent
2295 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2297 int new_value
, old_value
;
2299 kern_return_t kret
= KERN_SUCCESS
;
2300 uint8_t percent
= 0;
2308 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2311 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2312 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2317 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2319 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2325 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2326 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2327 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2331 sysctl_kern_check_openevt
2332 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2334 struct proc
*p
= req
->p
;
2335 int new_value
, old_value
, changed
;
2338 if (p
->p_flag
& P_CHECKOPENEVT
) {
2339 old_value
= KERN_OPENEVT_PROC
;
2344 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2347 switch (new_value
) {
2348 case KERN_OPENEVT_PROC
:
2349 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2352 case KERN_UNOPENEVT_PROC
:
2353 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2363 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2364 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2370 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2372 #ifdef SECURE_KERNEL
2376 int new_value
, changed
;
2379 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2384 #if defined(__i386__) || defined(__x86_64__)
2386 * Only allow setting if NX is supported on the chip
2388 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2391 nx_enabled
= new_value
;
2394 #endif /* SECURE_KERNEL */
2399 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2400 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2401 0, 0, sysctl_nx
, "I", "");
2405 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2407 if (proc_is64bit(req
->p
)) {
2408 struct user64_loadavg loadinfo64
;
2409 fill_loadavg64(&averunnable
, &loadinfo64
);
2410 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2412 struct user32_loadavg loadinfo32
;
2413 fill_loadavg32(&averunnable
, &loadinfo32
);
2414 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2418 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2419 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2420 0, 0, sysctl_loadavg
, "S,loadavg", "");
2423 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2426 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2427 __unused
int arg2
, struct sysctl_req
*req
)
2429 int old_value
=0, new_value
=0, error
=0;
2431 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2433 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2435 return (vm_toggle_entry_reuse(new_value
, NULL
));
2440 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2445 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2448 uint64_t swap_total
;
2449 uint64_t swap_avail
;
2450 vm_size_t swap_pagesize
;
2451 boolean_t swap_encrypted
;
2452 struct xsw_usage xsu
;
2454 error
= macx_swapinfo(&swap_total
,
2461 xsu
.xsu_total
= swap_total
;
2462 xsu
.xsu_avail
= swap_avail
;
2463 xsu
.xsu_used
= swap_total
- swap_avail
;
2464 xsu
.xsu_pagesize
= swap_pagesize
;
2465 xsu
.xsu_encrypted
= swap_encrypted
;
2466 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2471 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2472 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2473 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2476 extern void vm_page_reactivate_all_throttled(void);
2479 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2481 #pragma unused(arg1, arg2)
2482 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2485 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2486 if (error
|| !req
->newptr
)
2489 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
2490 //assert(req->newptr);
2491 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2496 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2498 disabled
= (!val
&& memorystatus_freeze_enabled
);
2500 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2503 vm_page_reactivate_all_throttled();
2509 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2510 #endif /* CONFIG_FREEZE */
2512 /* this kernel does NOT implement shared_region_make_private_np() */
2513 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2514 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2515 (int *)NULL
, 0, "");
2518 fetch_process_cputype(
2522 cpu_type_t
*cputype
)
2524 proc_t p
= PROC_NULL
;
2531 else if (namelen
== 1) {
2532 p
= proc_find(name
[0]);
2541 ret
= cpu_type() & ~CPU_ARCH_MASK
;
2542 if (IS_64BIT_PROCESS(p
))
2543 ret
|= CPU_ARCH_ABI64
;
2554 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2555 struct sysctl_req
*req
)
2558 cpu_type_t proc_cputype
= 0;
2559 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2562 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2564 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2566 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2569 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2570 struct sysctl_req
*req
)
2573 cpu_type_t proc_cputype
= 0;
2574 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2576 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2578 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2582 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2584 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2587 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2588 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2589 0, 0, sysctl_safeboot
, "I", "");
2593 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2595 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2598 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2599 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2600 0, 0, sysctl_singleuser
, "I", "");
2602 STATIC
int sysctl_minimalboot
2603 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2605 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
2608 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
2609 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2610 0, 0, sysctl_minimalboot
, "I", "");
2613 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2615 extern boolean_t affinity_sets_enabled
;
2616 extern int affinity_sets_mapping
;
2618 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2619 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2620 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2621 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
2624 * Boolean indicating if KASLR is active.
2628 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2632 slide
= vm_kernel_slide
? 1 : 0;
2634 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
2637 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
2638 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2639 0, 0, sysctl_slide
, "I", "");
2642 * Limit on total memory users can wire.
2644 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2646 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2648 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2651 * All values are in bytes.
2654 vm_map_size_t vm_global_no_user_wire_amount
;
2655 vm_map_size_t vm_global_user_wire_limit
;
2656 vm_map_size_t vm_user_wire_limit
;
2659 * There needs to be a more automatic/elegant way to do this
2661 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
2662 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
2663 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
2665 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
2666 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
2667 extern int vm_map_copy_overwrite_aligned_src_large
;
2668 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
2669 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
2670 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
2673 extern uint32_t vm_page_external_count
;
2674 extern uint32_t vm_page_filecache_min
;
2676 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
2677 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
2679 extern int vm_compressor_mode
;
2680 extern int vm_compressor_is_active
;
2681 extern int vm_compressor_available
;
2682 extern uint32_t vm_ripe_target_age
;
2683 extern uint32_t swapout_target_age
;
2684 extern int64_t compressor_bytes_used
;
2685 extern int64_t c_segment_input_bytes
;
2686 extern int64_t c_segment_compressed_bytes
;
2687 extern uint32_t compressor_eval_period_in_msecs
;
2688 extern uint32_t compressor_sample_min_in_msecs
;
2689 extern uint32_t compressor_sample_max_in_msecs
;
2690 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
2691 extern uint32_t compressor_thrashing_min_per_10msecs
;
2692 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
2693 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
2694 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
2695 extern uint32_t vm_compressor_catchup_threshold_divisor
;
2696 extern uint32_t vm_compressor_time_thread
;
2697 extern uint64_t vm_compressor_thread_runtime
;
2699 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
2700 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
2701 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
2703 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
2704 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
2705 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
2706 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
2708 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
2710 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
2711 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
2712 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
2713 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
2714 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
2715 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
2716 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
2717 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
2718 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
2720 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
2722 SYSCTL_INT(_vm
, OID_AUTO
, compressor_timing_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_time_thread
, 0, "");
2723 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_thread_runtime
, "");
2725 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressions
, "");
2726 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compression_failures
, "");
2727 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressed_bytes
, "");
2728 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_delta
, "");
2729 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_negative_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_negative_delta
, "");
2731 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressions
, "");
2732 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressed_bytes
, "");
2734 SYSCTL_QUAD(_vm
, OID_AUTO
, uc_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.uc_decompressions
, "");
2736 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions
, "");
2737 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions_exclusive
, "");
2738 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_compressions
, "");
2739 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_mzv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_mzv_compressions
, "");
2740 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compression_failures
, "");
2741 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_exclusive
, "");
2742 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_total
, "");
2744 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressions
, "");
2745 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressed_bytes
, "");
2746 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_decompressions
, "");
2748 SYSCTL_INT(_vm
, OID_AUTO
, lz4_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_threshold
, 0, "");
2749 SYSCTL_INT(_vm
, OID_AUTO
, wkdm_reeval_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.wkdm_reeval_threshold
, 0, "");
2750 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_skips
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_skips
, 0, "");
2751 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_run_length
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_run_length
, 0, "");
2752 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_preselects
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_preselects
, 0, "");
2753 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_preselection_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_preselection_threshold
, 0, "");
2754 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_continue_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_continue_bytes
, 0, "");
2755 SYSCTL_INT(_vm
, OID_AUTO
, lz4_profitable_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_profitable_bytes
, 0, "");
2757 #if CONFIG_PHANTOM_CACHE
2758 extern uint32_t phantom_cache_thrashing_threshold
;
2759 extern uint32_t phantom_cache_eval_period_in_msecs
;
2760 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
2763 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
2764 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
2765 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
2768 #if CONFIG_BACKGROUND_QUEUE
2770 extern uint32_t vm_page_background_count
;
2771 extern uint32_t vm_page_background_limit
;
2772 extern uint32_t vm_page_background_target
;
2773 extern uint32_t vm_page_background_internal_count
;
2774 extern uint32_t vm_page_background_external_count
;
2775 extern uint32_t vm_page_background_mode
;
2776 extern uint32_t vm_page_background_exclude_external
;
2777 extern uint64_t vm_page_background_promoted_count
;
2778 extern uint64_t vm_pageout_considered_bq_internal
;
2779 extern uint64_t vm_pageout_considered_bq_external
;
2780 extern uint64_t vm_pageout_rejected_bq_internal
;
2781 extern uint64_t vm_pageout_rejected_bq_external
;
2783 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_mode
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_mode
, 0, "");
2784 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_exclude_external
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_exclude_external
, 0, "");
2785 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_limit
, 0, "");
2786 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_target
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_target
, 0, "");
2787 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_count
, 0, "");
2788 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_internal_count
, 0, "");
2789 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_external_count
, 0, "");
2791 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_page_background_promoted_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_promoted_count
, "");
2792 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_considered_bq_internal
, "");
2793 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_considered_bq_external
, "");
2794 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_internal
, "");
2795 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_external
, "");
2799 #if (DEVELOPMENT || DEBUG)
2801 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
2802 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2803 &vm_page_creation_throttled_hard
, 0, "");
2805 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
2806 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2807 &vm_page_creation_throttled_soft
, 0, "");
2809 extern uint32_t vm_pageout_memorystatus_fb_factor_nr
;
2810 extern uint32_t vm_pageout_memorystatus_fb_factor_dr
;
2811 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_nr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_nr
, 0, "");
2812 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_dr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_dr
, 0, "");
2814 extern uint32_t vm_grab_anon_overrides
;
2815 extern uint32_t vm_grab_anon_nops
;
2817 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_overrides
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_grab_anon_overrides
, 0, "");
2818 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_nops
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_grab_anon_nops
, 0, "");
2820 /* log message counters for persistence mode */
2821 extern uint32_t oslog_p_total_msgcount
;
2822 extern uint32_t oslog_p_metadata_saved_msgcount
;
2823 extern uint32_t oslog_p_metadata_dropped_msgcount
;
2824 extern uint32_t oslog_p_error_count
;
2825 extern uint32_t oslog_p_saved_msgcount
;
2826 extern uint32_t oslog_p_dropped_msgcount
;
2827 extern uint32_t oslog_p_boot_dropped_msgcount
;
2829 /* log message counters for streaming mode */
2830 extern uint32_t oslog_s_total_msgcount
;
2831 extern uint32_t oslog_s_metadata_msgcount
;
2832 extern uint32_t oslog_s_error_count
;
2833 extern uint32_t oslog_s_streamed_msgcount
;
2834 extern uint32_t oslog_s_dropped_msgcount
;
2836 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_total_msgcount
, 0, "");
2837 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_saved_msgcount
, 0, "");
2838 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_dropped_msgcount
, 0, "");
2839 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_error_count
, 0, "");
2840 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_saved_msgcount
, 0, "");
2841 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_dropped_msgcount
, 0, "");
2842 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_boot_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_boot_dropped_msgcount
, 0, "");
2844 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_total_msgcount
, 0, "");
2845 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_metadata_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_metadata_msgcount
, 0, "");
2846 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_error_count
, 0, "");
2847 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_streamed_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_streamed_msgcount
, 0, "");
2848 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_dropped_msgcount
, 0, "");
2851 #endif /* DEVELOPMENT || DEBUG */
2854 * Enable tracing of voucher contents
2856 extern uint32_t ipc_voucher_trace_contents
;
2858 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
2859 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
2862 * Kernel stack size and depth
2864 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
2865 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
2866 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
2867 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
2870 * enable back trace for port allocations
2872 extern int ipc_portbt
;
2874 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
2875 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2876 &ipc_portbt
, 0, "");
2882 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
2883 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2884 sched_string
, sizeof(sched_string
),
2885 "Timeshare scheduler implementation");
2888 * Only support runtime modification on embedded platforms
2889 * with development config enabled
2893 /* Parameters related to timer coalescing tuning, to be replaced
2894 * with a dedicated systemcall in the future.
2896 /* Enable processing pending timers in the context of any other interrupt
2897 * Coalescing tuning parameters for various thread/task attributes */
2899 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2901 #pragma unused(oidp)
2902 int size
= arg2
; /* subcommand*/
2905 uint64_t old_value_ns
;
2906 uint64_t new_value_ns
;
2907 uint64_t value_abstime
;
2908 if (size
== sizeof(uint32_t))
2909 value_abstime
= *((uint32_t *)arg1
);
2910 else if (size
== sizeof(uint64_t))
2911 value_abstime
= *((uint64_t *)arg1
);
2912 else return ENOTSUP
;
2914 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
2915 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
2916 if ((error
) || (!changed
))
2919 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
2920 if (size
== sizeof(uint32_t))
2921 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
2923 *((uint64_t *)arg1
) = value_abstime
;
2927 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
2928 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2929 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
2930 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
2931 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2932 &tcoal_prio_params
.timer_resort_threshold_abstime
,
2933 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
2934 sysctl_timer_user_us_kernel_abstime
,
2936 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
2937 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2938 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
2939 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
2940 sysctl_timer_user_us_kernel_abstime
,
2943 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
2944 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2945 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
2947 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
2948 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2949 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
2950 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
2951 sysctl_timer_user_us_kernel_abstime
,
2954 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
2955 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2956 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
2958 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
2959 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2960 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
2961 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
2962 sysctl_timer_user_us_kernel_abstime
,
2965 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
2966 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2967 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
2969 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
2970 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2971 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
2972 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
2973 sysctl_timer_user_us_kernel_abstime
,
2976 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
2977 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2978 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
2980 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
2981 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2982 &tcoal_prio_params
.latency_qos_abstime_max
[0],
2983 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
2984 sysctl_timer_user_us_kernel_abstime
,
2987 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
2988 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2989 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
2991 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
2992 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2993 &tcoal_prio_params
.latency_qos_abstime_max
[1],
2994 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
2995 sysctl_timer_user_us_kernel_abstime
,
2998 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
2999 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3000 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
3002 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
3003 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3004 &tcoal_prio_params
.latency_qos_abstime_max
[2],
3005 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
3006 sysctl_timer_user_us_kernel_abstime
,
3009 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
3010 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3011 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
3013 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
3014 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3015 &tcoal_prio_params
.latency_qos_abstime_max
[3],
3016 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
3017 sysctl_timer_user_us_kernel_abstime
,
3020 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
3021 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3022 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
3024 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
3025 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3026 &tcoal_prio_params
.latency_qos_abstime_max
[4],
3027 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
3028 sysctl_timer_user_us_kernel_abstime
,
3031 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
3032 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3033 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
3035 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
3036 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3037 &tcoal_prio_params
.latency_qos_abstime_max
[5],
3038 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
3039 sysctl_timer_user_us_kernel_abstime
,
3042 /* Communicate the "user idle level" heuristic to the timer layer, and
3043 * potentially other layers in the future.
3047 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3048 int new_value
= 0, old_value
= 0, changed
= 0, error
;
3050 old_value
= timer_get_user_idle_level();
3052 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
3054 if (error
== 0 && changed
) {
3055 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
3062 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
3063 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3065 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
3068 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
3069 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3070 &hv_support_available
, 0, "");
3075 * This is set by core audio to tell tailspin (ie background tracing) how long
3076 * its smallest buffer is. Background tracing can then try to make a reasonable
3077 * decisions to try to avoid introducing so much latency that the buffers will
3081 int min_audio_buffer_usec
;
3084 sysctl_audio_buffer SYSCTL_HANDLER_ARGS
3086 #pragma unused(oidp, arg1, arg2)
3087 int err
= 0, value
= 0, changed
= 0;
3088 err
= sysctl_io_number(req
, min_audio_buffer_usec
, sizeof(int), &value
, &changed
);
3092 /* writing is protected by an entitlement */
3093 if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY
, 0) != 0) {
3097 min_audio_buffer_usec
= value
;
3103 SYSCTL_PROC(_kern
, OID_AUTO
, min_audio_buffer_usec
, CTLFLAG_RW
| CTLFLAG_ANYBODY
, 0, 0, sysctl_audio_buffer
, "I", "Minimum audio buffer size, in microseconds");
3105 #if DEVELOPMENT || DEBUG
3106 #include <sys/sysent.h>
3107 /* This should result in a fatal exception, verifying that "sysent" is
3111 kern_sysent_write(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3112 uint64_t new_value
= 0, old_value
= 0;
3113 int changed
= 0, error
;
3115 error
= sysctl_io_number(req
, old_value
, sizeof(uint64_t), &new_value
, &changed
);
3116 if ((error
== 0) && changed
) {
3117 volatile uint32_t *wraddr
= (uint32_t *) &sysent
[0];
3119 printf("sysent[0] write succeeded\n");
3124 SYSCTL_PROC(_kern
, OID_AUTO
, sysent_const_check
,
3125 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3127 kern_sysent_write
, "I", "Attempt sysent[0] write");
3131 #if DEVELOPMENT || DEBUG
3132 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 1, "");
3134 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 0, "");