2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
108 #include <security/audit/audit.h>
109 #include <kern/kalloc.h>
111 #include <mach/machine.h>
112 #include <mach/mach_host.h>
113 #include <mach/mach_types.h>
114 #include <mach/vm_param.h>
115 #include <kern/mach_param.h>
116 #include <kern/task.h>
117 #include <kern/thread.h>
118 #include <kern/processor.h>
119 #include <kern/debug.h>
120 #include <kern/sched_prim.h>
121 #include <vm/vm_kern.h>
122 #include <vm/vm_map.h>
123 #include <mach/host_info.h>
125 #include <sys/mount_internal.h>
126 #include <sys/kdebug.h>
128 #include <IOKit/IOPlatformExpert.h>
129 #include <pexpert/pexpert.h>
131 #include <machine/machine_routines.h>
132 #include <machine/exec.h>
134 #include <vm/vm_protos.h>
135 #include <vm/vm_pageout.h>
136 #include <vm/vm_compressor_algorithms.h>
137 #include <sys/imgsrc.h>
138 #include <kern/timer_call.h>
140 #if defined(__i386__) || defined(__x86_64__)
141 #include <i386/cpuid.h>
145 #include <sys/kern_memorystatus.h>
149 #include <kperf/kperf.h>
153 #include <kern/hv_support.h>
157 * deliberately setting max requests to really high number
158 * so that runaway settings do not cause MALLOC overflows
160 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
162 extern int aio_max_requests
;
163 extern int aio_max_requests_per_process
;
164 extern int aio_worker_threads
;
165 extern int lowpri_IO_window_msecs
;
166 extern int lowpri_IO_delay_msecs
;
167 extern int nx_enabled
;
168 extern int speculative_reads_disabled
;
169 extern int ignore_is_ssd
;
170 extern unsigned int speculative_prefetch_max
;
171 extern unsigned int speculative_prefetch_max_iosize
;
172 extern unsigned int preheat_max_bytes
;
173 extern unsigned int preheat_min_bytes
;
174 extern long numvnodes
;
176 extern uuid_string_t bootsessionuuid_string
;
178 extern unsigned int vm_max_delayed_work_limit
;
179 extern unsigned int vm_max_batch
;
181 extern unsigned int vm_page_free_min
;
182 extern unsigned int vm_page_free_target
;
183 extern unsigned int vm_page_free_reserved
;
184 extern unsigned int vm_page_speculative_percentage
;
185 extern unsigned int vm_page_speculative_q_age_ms
;
187 #if (DEVELOPMENT || DEBUG)
188 extern uint32_t vm_page_creation_throttled_hard
;
189 extern uint32_t vm_page_creation_throttled_soft
;
190 #endif /* DEVELOPMENT || DEBUG */
193 * Conditionally allow dtrace to see these functions for debugging purposes.
201 #define STATIC static
204 extern boolean_t mach_timer_coalescing_enabled
;
206 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
209 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
211 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
213 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
215 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
217 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
219 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
222 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
228 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
230 __private_extern__ kern_return_t
231 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
233 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
234 size_t *sizep
, proc_t cur_proc
);
236 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
237 proc_t cur_proc
, int argc_yes
);
239 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
240 size_t newlen
, void *sp
, int len
);
242 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
243 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
244 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
245 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
246 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
247 int sysdoproc_callback(proc_t p
, void *arg
);
250 /* forward declarations for non-static STATIC */
251 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
252 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
253 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
254 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
255 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
256 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
258 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
259 #endif /* COUNT_SYSCALLS */
260 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
261 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
262 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
263 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
267 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 #ifdef CONFIG_IMGSRC_ACCESS
280 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
285 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
293 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
296 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
299 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
300 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
301 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
304 extern void IORegistrySetOSBuildVersion(char * build_version
);
307 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
309 la64
->ldavg
[0] = la
->ldavg
[0];
310 la64
->ldavg
[1] = la
->ldavg
[1];
311 la64
->ldavg
[2] = la
->ldavg
[2];
312 la64
->fscale
= (user64_long_t
)la
->fscale
;
316 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
318 la32
->ldavg
[0] = la
->ldavg
[0];
319 la32
->ldavg
[1] = la
->ldavg
[1];
320 la32
->ldavg
[2] = la
->ldavg
[2];
321 la32
->fscale
= (user32_long_t
)la
->fscale
;
326 * Attributes stored in the kernel.
328 extern char corefilename
[MAXPATHLEN
+1];
329 extern int do_coredump
;
330 extern int sugid_coredump
;
334 extern int do_count_syscalls
;
338 int securelevel
= -1;
344 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
345 __unused
int arg2
, struct sysctl_req
*req
)
348 struct uthread
*ut
= get_bsdthread_info(current_thread());
349 user_addr_t oldp
=0, newp
=0;
350 size_t *oldlenp
=NULL
;
354 oldlenp
= &(req
->oldlen
);
356 newlen
= req
->newlen
;
358 /* We want the current length, and maybe the string itself */
360 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
361 size_t currlen
= MAXTHREADNAMESIZE
- 1;
364 /* use length of current thread name */
365 currlen
= strlen(ut
->pth_name
);
367 if(*oldlenp
< currlen
)
369 /* NOTE - we do not copy the NULL terminator */
371 error
= copyout(ut
->pth_name
,oldp
,currlen
);
376 /* return length of thread name minus NULL terminator (just like strlen) */
377 req
->oldidx
= currlen
;
380 /* We want to set the name to something */
383 if(newlen
> (MAXTHREADNAMESIZE
- 1))
387 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
391 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV
, ut
->pth_name
);
393 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
394 error
= copyin(newp
, ut
->pth_name
, newlen
);
399 kernel_debug_string_simple(TRACE_STRING_THREADNAME
, ut
->pth_name
);
405 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
409 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
411 host_basic_info_data_t hinfo
;
415 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
416 struct _processor_statistics_np
*buf
;
419 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
420 if (kret
!= KERN_SUCCESS
) {
424 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
426 if (req
->oldlen
< size
) {
430 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
432 kret
= get_sched_statistics(buf
, &size
);
433 if (kret
!= KERN_SUCCESS
) {
438 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
444 panic("Sched info changed?!");
451 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
454 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
459 if (req
->newlen
!= sizeof(active
)) {
463 res
= copyin(req
->newptr
, &active
, sizeof(active
));
468 return set_sched_stats_active(active
);
471 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
473 extern uint32_t sched_debug_flags
;
474 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
476 #if (DEBUG || DEVELOPMENT)
477 extern boolean_t doprnt_hide_pointers
;
478 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
481 extern int get_kernel_symfile(proc_t
, char **);
484 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
486 extern unsigned int nsysent
;
487 extern int syscalls_log
[];
488 extern const char *syscallnames
[];
491 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
493 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
494 __unused
int *name
= arg1
; /* oid element argument vector */
495 __unused
int namelen
= arg2
; /* number of oid element arguments */
496 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
497 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
498 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
499 size_t newlen
= req
->newlen
; /* user buffer copy in size */
504 /* valid values passed in:
505 * = 0 means don't keep called counts for each bsd syscall
506 * > 0 means keep called counts for each bsd syscall
507 * = 2 means dump current counts to the system log
508 * = 3 means reset all counts
509 * for example, to dump current counts:
510 * sysctl -w kern.count_calls=2
512 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
518 do_count_syscalls
= 1;
520 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
522 for ( i
= 0; i
< nsysent
; i
++ ) {
523 if ( syscalls_log
[i
] != 0 ) {
525 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
533 do_count_syscalls
= 1;
537 /* adjust index so we return the right required/consumed amount */
539 req
->oldidx
+= req
->oldlen
;
543 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
544 0, /* Pointer argument (arg1) */
545 0, /* Integer argument (arg2) */
546 sysctl_docountsyscalls
, /* Handler function */
547 NULL
, /* Data pointer */
549 #endif /* COUNT_SYSCALLS */
552 * The following sysctl_* functions should not be used
553 * any more, as they can only cope with callers in
554 * user mode: Use new-style
562 * Validate parameters and get old / set new parameters
563 * for an integer-valued sysctl function.
566 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
567 user_addr_t newp
, size_t newlen
, int *valp
)
571 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
573 if (oldp
&& *oldlenp
< sizeof(int))
575 if (newp
&& newlen
!= sizeof(int))
577 *oldlenp
= sizeof(int);
579 error
= copyout(valp
, oldp
, sizeof(int));
580 if (error
== 0 && newp
) {
581 error
= copyin(newp
, valp
, sizeof(int));
582 AUDIT_ARG(value32
, *valp
);
588 * Validate parameters and get old / set new parameters
589 * for an quad(64bit)-valued sysctl function.
592 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
593 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
597 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
599 if (oldp
&& *oldlenp
< sizeof(quad_t
))
601 if (newp
&& newlen
!= sizeof(quad_t
))
603 *oldlenp
= sizeof(quad_t
);
605 error
= copyout(valp
, oldp
, sizeof(quad_t
));
606 if (error
== 0 && newp
)
607 error
= copyin(newp
, valp
, sizeof(quad_t
));
612 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
614 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
621 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
623 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
630 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
635 /* This is very racy but list lock is held.. Hmmm. */
636 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
637 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
638 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
639 tp
->t_dev
!= (dev_t
)*(int*)arg
)
648 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
650 kauth_cred_t my_cred
;
653 if (p
->p_ucred
== NULL
)
655 my_cred
= kauth_cred_proc_ref(p
);
656 uid
= kauth_cred_getuid(my_cred
);
657 kauth_cred_unref(&my_cred
);
659 if (uid
!= (uid_t
)*(int*)arg
)
667 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
669 kauth_cred_t my_cred
;
672 if (p
->p_ucred
== NULL
)
674 my_cred
= kauth_cred_proc_ref(p
);
675 ruid
= kauth_cred_getruid(my_cred
);
676 kauth_cred_unref(&my_cred
);
678 if (ruid
!= (uid_t
)*(int*)arg
)
685 * try over estimating by 5 procs
687 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
688 struct sysdoproc_args
{
703 sysdoproc_callback(proc_t p
, void *arg
)
705 struct sysdoproc_args
*args
= arg
;
707 if (args
->buflen
>= args
->sizeof_kproc
) {
708 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
709 return (PROC_RETURNED
);
710 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
711 return (PROC_RETURNED
);
712 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
713 return (PROC_RETURNED
);
715 bzero(args
->kprocp
, args
->sizeof_kproc
);
717 fill_user64_proc(p
, args
->kprocp
);
719 fill_user32_proc(p
, args
->kprocp
);
720 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
722 *args
->errorp
= error
;
723 return (PROC_RETURNED_DONE
);
725 args
->dp
+= args
->sizeof_kproc
;
726 args
->buflen
-= args
->sizeof_kproc
;
728 args
->needed
+= args
->sizeof_kproc
;
729 return (PROC_RETURNED
);
732 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
734 sysctl_prochandle SYSCTL_HANDLER_ARGS
736 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
737 int *name
= arg1
; /* oid element argument vector */
738 int namelen
= arg2
; /* number of oid element arguments */
739 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
741 user_addr_t dp
= where
;
743 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
745 boolean_t is_64_bit
= proc_is64bit(current_proc());
746 struct user32_kinfo_proc user32_kproc
;
747 struct user64_kinfo_proc user_kproc
;
750 int (*filterfn
)(proc_t
, void *) = 0;
751 struct sysdoproc_args args
;
756 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
760 sizeof_kproc
= sizeof(user_kproc
);
761 kprocp
= &user_kproc
;
763 sizeof_kproc
= sizeof(user32_kproc
);
764 kprocp
= &user32_kproc
;
770 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
774 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
793 /* must be kern.proc.<unknown> */
798 args
.buflen
= buflen
;
799 args
.kprocp
= kprocp
;
800 args
.is_64_bit
= is_64_bit
;
802 args
.needed
= needed
;
803 args
.errorp
= &error
;
804 args
.uidcheck
= uidcheck
;
805 args
.ruidcheck
= ruidcheck
;
806 args
.ttycheck
= ttycheck
;
807 args
.sizeof_kproc
= sizeof_kproc
;
809 args
.uidval
= name
[0];
811 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
812 sysdoproc_callback
, &args
, filterfn
, name
);
818 needed
= args
.needed
;
820 if (where
!= USER_ADDR_NULL
) {
821 req
->oldlen
= dp
- where
;
822 if (needed
> req
->oldlen
)
825 needed
+= KERN_PROCSLOP
;
826 req
->oldlen
= needed
;
828 /* adjust index so we return the right required/consumed amount */
829 req
->oldidx
+= req
->oldlen
;
834 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
835 * in the sysctl declaration itself, which comes into the handler function
836 * as 'oidp->oid_arg2'.
838 * For these particular sysctls, since they have well known OIDs, we could
839 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
840 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
841 * of a well known value with a common handler function. This is desirable,
842 * because we want well known values to "go away" at some future date.
844 * It should be noted that the value of '((int *)arg1)[1]' is used for many
845 * an integer parameter to the subcommand for many of these sysctls; we'd
846 * rather have used '((int *)arg1)[0]' for that, or even better, an element
847 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
848 * and then use leaf-node permissions enforcement, but that would have
849 * necessitated modifying user space code to correspond to the interface
850 * change, and we are striving for binary backward compatibility here; even
851 * though these are SPI, and not intended for use by user space applications
852 * which are not themselves system tools or libraries, some applications
853 * have erroneously used them.
855 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
856 0, /* Pointer argument (arg1) */
857 KERN_PROC_ALL
, /* Integer argument (arg2) */
858 sysctl_prochandle
, /* Handler function */
859 NULL
, /* Data is size variant on ILP32/LP64 */
861 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
862 0, /* Pointer argument (arg1) */
863 KERN_PROC_PID
, /* Integer argument (arg2) */
864 sysctl_prochandle
, /* Handler function */
865 NULL
, /* Data is size variant on ILP32/LP64 */
867 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
868 0, /* Pointer argument (arg1) */
869 KERN_PROC_TTY
, /* Integer argument (arg2) */
870 sysctl_prochandle
, /* Handler function */
871 NULL
, /* Data is size variant on ILP32/LP64 */
873 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
874 0, /* Pointer argument (arg1) */
875 KERN_PROC_PGRP
, /* Integer argument (arg2) */
876 sysctl_prochandle
, /* Handler function */
877 NULL
, /* Data is size variant on ILP32/LP64 */
879 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
880 0, /* Pointer argument (arg1) */
881 KERN_PROC_UID
, /* Integer argument (arg2) */
882 sysctl_prochandle
, /* Handler function */
883 NULL
, /* Data is size variant on ILP32/LP64 */
885 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
886 0, /* Pointer argument (arg1) */
887 KERN_PROC_RUID
, /* Integer argument (arg2) */
888 sysctl_prochandle
, /* Handler function */
889 NULL
, /* Data is size variant on ILP32/LP64 */
891 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
892 0, /* Pointer argument (arg1) */
893 KERN_PROC_LCID
, /* Integer argument (arg2) */
894 sysctl_prochandle
, /* Handler function */
895 NULL
, /* Data is size variant on ILP32/LP64 */
900 * Fill in non-zero fields of an eproc structure for the specified process.
903 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
907 struct session
*sessp
;
908 kauth_cred_t my_cred
;
911 sessp
= proc_session(p
);
913 if (pg
!= PGRP_NULL
) {
914 ep
->e_pgid
= p
->p_pgrpid
;
915 ep
->e_jobc
= pg
->pg_jobc
;
916 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
917 ep
->e_flag
= EPROC_CTTY
;
919 ep
->e_ppid
= p
->p_ppid
;
921 my_cred
= kauth_cred_proc_ref(p
);
923 /* A fake historical pcred */
924 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
925 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
926 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
927 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
929 /* A fake historical *kauth_cred_t */
930 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
931 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
932 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
933 bcopy(posix_cred_get(my_cred
)->cr_groups
,
934 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
936 kauth_cred_unref(&my_cred
);
939 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
940 (tp
= SESSION_TP(sessp
))) {
941 ep
->e_tdev
= tp
->t_dev
;
942 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
946 if (sessp
!= SESSION_NULL
) {
947 if (SESS_LEADER(p
, sessp
))
948 ep
->e_flag
|= EPROC_SLEADER
;
956 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
959 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
963 struct session
*sessp
;
964 kauth_cred_t my_cred
;
967 sessp
= proc_session(p
);
969 if (pg
!= PGRP_NULL
) {
970 ep
->e_pgid
= p
->p_pgrpid
;
971 ep
->e_jobc
= pg
->pg_jobc
;
972 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
973 ep
->e_flag
= EPROC_CTTY
;
975 ep
->e_ppid
= p
->p_ppid
;
977 my_cred
= kauth_cred_proc_ref(p
);
979 /* A fake historical pcred */
980 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
981 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
982 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
983 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
985 /* A fake historical *kauth_cred_t */
986 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
987 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
988 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
989 bcopy(posix_cred_get(my_cred
)->cr_groups
,
990 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
992 kauth_cred_unref(&my_cred
);
995 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
996 (tp
= SESSION_TP(sessp
))) {
997 ep
->e_tdev
= tp
->t_dev
;
998 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1002 if (sessp
!= SESSION_NULL
) {
1003 if (SESS_LEADER(p
, sessp
))
1004 ep
->e_flag
|= EPROC_SLEADER
;
1005 session_rele(sessp
);
1007 if (pg
!= PGRP_NULL
)
1012 * Fill in an eproc structure for the specified process.
1013 * bzeroed by our caller, so only set non-zero fields.
1016 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1018 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1019 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1020 exp
->p_flag
= p
->p_flag
;
1021 if (p
->p_lflag
& P_LTRACED
)
1022 exp
->p_flag
|= P_TRACED
;
1023 if (p
->p_lflag
& P_LPPWAIT
)
1024 exp
->p_flag
|= P_PPWAIT
;
1025 if (p
->p_lflag
& P_LEXIT
)
1026 exp
->p_flag
|= P_WEXIT
;
1027 exp
->p_stat
= p
->p_stat
;
1028 exp
->p_pid
= p
->p_pid
;
1029 exp
->p_oppid
= p
->p_oppid
;
1031 exp
->user_stack
= p
->user_stack
;
1032 exp
->p_debugger
= p
->p_debugger
;
1033 exp
->sigwait
= p
->sigwait
;
1035 #ifdef _PROC_HAS_SCHEDINFO_
1036 exp
->p_estcpu
= p
->p_estcpu
;
1037 exp
->p_pctcpu
= p
->p_pctcpu
;
1038 exp
->p_slptime
= p
->p_slptime
;
1040 exp
->p_realtimer
.it_interval
.tv_sec
=
1041 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1042 exp
->p_realtimer
.it_interval
.tv_usec
=
1043 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1045 exp
->p_realtimer
.it_value
.tv_sec
=
1046 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1047 exp
->p_realtimer
.it_value
.tv_usec
=
1048 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1050 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1051 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1053 exp
->p_sigignore
= p
->p_sigignore
;
1054 exp
->p_sigcatch
= p
->p_sigcatch
;
1055 exp
->p_priority
= p
->p_priority
;
1056 exp
->p_nice
= p
->p_nice
;
1057 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1058 exp
->p_xstat
= p
->p_xstat
;
1059 exp
->p_acflag
= p
->p_acflag
;
1063 * Fill in an LP64 version of extern_proc structure for the specified process.
1066 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1068 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1069 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1070 exp
->p_flag
= p
->p_flag
;
1071 if (p
->p_lflag
& P_LTRACED
)
1072 exp
->p_flag
|= P_TRACED
;
1073 if (p
->p_lflag
& P_LPPWAIT
)
1074 exp
->p_flag
|= P_PPWAIT
;
1075 if (p
->p_lflag
& P_LEXIT
)
1076 exp
->p_flag
|= P_WEXIT
;
1077 exp
->p_stat
= p
->p_stat
;
1078 exp
->p_pid
= p
->p_pid
;
1079 exp
->p_oppid
= p
->p_oppid
;
1081 exp
->user_stack
= p
->user_stack
;
1082 exp
->p_debugger
= p
->p_debugger
;
1083 exp
->sigwait
= p
->sigwait
;
1085 #ifdef _PROC_HAS_SCHEDINFO_
1086 exp
->p_estcpu
= p
->p_estcpu
;
1087 exp
->p_pctcpu
= p
->p_pctcpu
;
1088 exp
->p_slptime
= p
->p_slptime
;
1090 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1091 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1093 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1094 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1096 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1097 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1099 exp
->p_sigignore
= p
->p_sigignore
;
1100 exp
->p_sigcatch
= p
->p_sigcatch
;
1101 exp
->p_priority
= p
->p_priority
;
1102 exp
->p_nice
= p
->p_nice
;
1103 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1104 exp
->p_xstat
= p
->p_xstat
;
1105 exp
->p_acflag
= p
->p_acflag
;
1109 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1111 /* on a 64 bit kernel, 32 bit users get some truncated information */
1112 fill_user32_externproc(p
, &kp
->kp_proc
);
1113 fill_user32_eproc(p
, &kp
->kp_eproc
);
1117 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1119 fill_user64_externproc(p
, &kp
->kp_proc
);
1120 fill_user64_eproc(p
, &kp
->kp_eproc
);
1124 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1126 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1127 int *name
= arg1
; /* oid element argument vector */
1128 int namelen
= arg2
; /* number of oid element arguments */
1129 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1130 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1131 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1132 // size_t newlen = req->newlen; /* user buffer copy in size */
1149 case KERN_KDWRITETR
:
1150 case KERN_KDWRITEMAP
:
1156 case KERN_KDREADCURTHRMAP
:
1157 case KERN_KDSET_TYPEFILTER
:
1158 case KERN_KDBUFWAIT
:
1160 case KERN_KDWRITEMAP_V3
:
1161 case KERN_KDWRITETR_V3
:
1162 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1169 /* adjust index so we return the right required/consumed amount */
1171 req
->oldidx
+= req
->oldlen
;
1175 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1176 0, /* Pointer argument (arg1) */
1177 0, /* Integer argument (arg2) */
1178 sysctl_kdebug_ops
, /* Handler function */
1179 NULL
, /* Data pointer */
1184 * Return the top *sizep bytes of the user stack, or the entire area of the
1185 * user stack down through the saved exec_path, whichever is smaller.
1188 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1190 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1191 int *name
= arg1
; /* oid element argument vector */
1192 int namelen
= arg2
; /* number of oid element arguments */
1193 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1194 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1195 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1196 // size_t newlen = req->newlen; /* user buffer copy in size */
1199 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1201 /* adjust index so we return the right required/consumed amount */
1203 req
->oldidx
+= req
->oldlen
;
1207 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1208 0, /* Pointer argument (arg1) */
1209 0, /* Integer argument (arg2) */
1210 sysctl_doprocargs
, /* Handler function */
1211 NULL
, /* Data pointer */
1215 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1217 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1218 int *name
= arg1
; /* oid element argument vector */
1219 int namelen
= arg2
; /* number of oid element arguments */
1220 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1221 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1222 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1223 // size_t newlen = req->newlen; /* user buffer copy in size */
1226 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1228 /* adjust index so we return the right required/consumed amount */
1230 req
->oldidx
+= req
->oldlen
;
1234 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1235 0, /* Pointer argument (arg1) */
1236 0, /* Integer argument (arg2) */
1237 sysctl_doprocargs2
, /* Handler function */
1238 NULL
, /* Data pointer */
1242 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1243 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1246 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1248 struct _vm_map
*proc_map
;
1251 user_addr_t arg_addr
;
1256 vm_offset_t copy_start
, copy_end
;
1259 kauth_cred_t my_cred
;
1266 buflen
-= sizeof(int); /* reserve first word to return argc */
1268 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1269 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1270 /* is not NULL then the caller wants us to return the length needed to */
1271 /* hold the data we would return */
1272 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1278 * Lookup process by pid
1287 * Copy the top N bytes of the stack.
1288 * On all machines we have so far, the stack grows
1291 * If the user expects no more than N bytes of
1292 * argument list, use that as a guess for the
1296 if (!p
->user_stack
) {
1301 if (where
== USER_ADDR_NULL
) {
1302 /* caller only wants to know length of proc args data */
1303 if (sizep
== NULL
) {
1308 size
= p
->p_argslen
;
1311 size
+= sizeof(int);
1315 * old PROCARGS will return the executable's path and plus some
1316 * extra space for work alignment and data tags
1318 size
+= PATH_MAX
+ (6 * sizeof(int));
1320 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1325 my_cred
= kauth_cred_proc_ref(p
);
1326 uid
= kauth_cred_getuid(my_cred
);
1327 kauth_cred_unref(&my_cred
);
1329 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1330 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1335 if ((u_int
)arg_size
> p
->p_argslen
)
1336 arg_size
= round_page(p
->p_argslen
);
1338 arg_addr
= p
->user_stack
- arg_size
;
1342 * Before we can block (any VM code), make another
1343 * reference to the map to keep it alive. We do
1344 * that by getting a reference on the task itself.
1352 argslen
= p
->p_argslen
;
1354 * Once we have a task reference we can convert that into a
1355 * map reference, which we will use in the calls below. The
1356 * task/process may change its map after we take this reference
1357 * (see execve), but the worst that will happen then is a return
1358 * of stale info (which is always a possibility).
1360 task_reference(task
);
1362 proc_map
= get_task_map_reference(task
);
1363 task_deallocate(task
);
1365 if (proc_map
== NULL
)
1369 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
), VM_KERN_MEMORY_BSD
);
1370 if (ret
!= KERN_SUCCESS
) {
1371 vm_map_deallocate(proc_map
);
1375 copy_end
= round_page(copy_start
+ arg_size
);
1377 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1378 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1379 vm_map_deallocate(proc_map
);
1380 kmem_free(kernel_map
, copy_start
,
1381 round_page(arg_size
));
1386 * Now that we've done the copyin from the process'
1387 * map, we can release the reference to it.
1389 vm_map_deallocate(proc_map
);
1391 if( vm_map_copy_overwrite(kernel_map
,
1392 (vm_map_address_t
)copy_start
,
1393 tmp
, FALSE
) != KERN_SUCCESS
) {
1394 kmem_free(kernel_map
, copy_start
,
1395 round_page(arg_size
));
1396 vm_map_copy_discard(tmp
);
1400 if (arg_size
> argslen
) {
1401 data
= (caddr_t
) (copy_end
- argslen
);
1404 data
= (caddr_t
) (copy_end
- arg_size
);
1409 * When these sysctls were introduced, the first string in the strings
1410 * section was just the bare path of the executable. However, for security
1411 * reasons we now prefix this string with executable_path= so it can be
1412 * parsed getenv style. To avoid binary compatability issues with exising
1413 * callers of this sysctl, we strip it off here if present.
1414 * (rdar://problem/13746466)
1416 #define EXECUTABLE_KEY "executable_path="
1417 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0){
1418 data
+= strlen(EXECUTABLE_KEY
);
1419 size
-= strlen(EXECUTABLE_KEY
);
1423 /* Put processes argc as the first word in the copyout buffer */
1424 suword(where
, p
->p_argc
);
1425 error
= copyout(data
, (where
+ sizeof(int)), size
);
1426 size
+= sizeof(int);
1428 error
= copyout(data
, where
, size
);
1431 * Make the old PROCARGS work to return the executable's path
1432 * But, only if there is enough space in the provided buffer
1434 * on entry: data [possibily] points to the beginning of the path
1436 * Note: we keep all pointers&sizes aligned to word boundries
1438 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1440 int binPath_sz
, alignedBinPath_sz
= 0;
1441 int extraSpaceNeeded
, addThis
;
1442 user_addr_t placeHere
;
1443 char * str
= (char *) data
;
1446 /* Some apps are really bad about messing up their stacks
1447 So, we have to be extra careful about getting the length
1448 of the executing binary. If we encounter an error, we bail.
1451 /* Limit ourselves to PATH_MAX paths */
1452 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1456 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1459 /* If we have a NUL terminator, copy it, too */
1460 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1462 /* Pre-Flight the space requiremnts */
1464 /* Account for the padding that fills out binPath to the next word */
1465 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1467 placeHere
= where
+ size
;
1469 /* Account for the bytes needed to keep placeHere word aligned */
1470 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1472 /* Add up all the space that is needed */
1473 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1475 /* is there is room to tack on argv[0]? */
1476 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1478 placeHere
+= addThis
;
1479 suword(placeHere
, 0);
1480 placeHere
+= sizeof(int);
1481 suword(placeHere
, 0xBFFF0000);
1482 placeHere
+= sizeof(int);
1483 suword(placeHere
, 0);
1484 placeHere
+= sizeof(int);
1485 error
= copyout(data
, placeHere
, binPath_sz
);
1488 placeHere
+= binPath_sz
;
1489 suword(placeHere
, 0);
1490 size
+= extraSpaceNeeded
;
1496 if (copy_start
!= (vm_offset_t
) 0) {
1497 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1503 if (where
!= USER_ADDR_NULL
)
1510 * Max number of concurrent aio requests
1514 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1516 int new_value
, changed
;
1517 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1519 /* make sure the system-wide limit is greater than the per process limit */
1520 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1521 aio_max_requests
= new_value
;
1530 * Max number of concurrent aio requests per process
1534 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1536 int new_value
, changed
;
1537 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1539 /* make sure per process limit is less than the system-wide limit */
1540 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1541 aio_max_requests_per_process
= new_value
;
1550 * Max number of async IO worker threads
1554 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1556 int new_value
, changed
;
1557 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1559 /* we only allow an increase in the number of worker threads */
1560 if (new_value
> aio_worker_threads
) {
1561 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1562 aio_worker_threads
= new_value
;
1572 * System-wide limit on the max number of processes
1576 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1578 int new_value
, changed
;
1579 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1581 AUDIT_ARG(value32
, new_value
);
1582 /* make sure the system-wide limit is less than the configured hard
1583 limit set at kernel compilation */
1584 if (new_value
<= hard_maxproc
&& new_value
> 0)
1585 maxproc
= new_value
;
1592 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1593 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1595 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1596 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1598 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1599 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1600 (int *)NULL
, BSD
, "");
1601 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1602 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1604 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1605 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1606 &kernel_uuid_string
[0], 0, "");
1609 int debug_kprint_syscall
= 0;
1610 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1612 /* Thread safe: bits and string value are not used to reclaim state */
1613 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1614 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1615 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1616 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1617 "name of process for kprintf syscall tracing");
1619 int debug_kprint_current_process(const char **namep
)
1621 struct proc
*p
= current_proc();
1627 if (debug_kprint_syscall_process
[0]) {
1628 /* user asked to scope tracing to a particular process name */
1629 if(0 == strncmp(debug_kprint_syscall_process
,
1630 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1631 /* no value in telling the user that we traced what they asked */
1632 if(namep
) *namep
= NULL
;
1640 /* trace all processes. Tell user what we traced */
1649 /* PR-5293665: need to use a callback function for kern.osversion to set
1650 * osversion in IORegistry */
1653 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1657 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1660 IORegistrySetOSBuildVersion((char *)arg1
);
1666 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1667 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1668 osversion
, 256 /* OSVERSIZE*/,
1669 sysctl_osversion
, "A", "");
1672 sysctl_sysctl_bootargs
1673 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1678 strlcpy(buf
, PE_boot_args(), 256);
1679 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
1683 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1684 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1686 sysctl_sysctl_bootargs
, "A", "bootargs");
1688 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1689 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1691 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1692 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1693 (int *)NULL
, ARG_MAX
, "");
1694 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1695 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1696 (int *)NULL
, _POSIX_VERSION
, "");
1697 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1698 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1699 (int *)NULL
, NGROUPS_MAX
, "");
1700 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1701 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1702 (int *)NULL
, 1, "");
1703 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1704 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1705 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1706 (int *)NULL
, 1, "");
1708 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1709 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1712 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1713 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1715 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1716 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1718 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1719 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1721 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1722 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1723 &thread_max
, 0, "");
1724 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1725 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1726 &task_threadmax
, 0, "");
1729 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1731 int oldval
= desiredvnodes
;
1732 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1734 if (oldval
!= desiredvnodes
) {
1735 reset_vmobjectcache(oldval
, desiredvnodes
);
1736 resize_namecache(desiredvnodes
);
1742 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1743 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1744 &nc_disabled
, 0, "");
1746 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1747 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1748 0, 0, sysctl_maxvnodes
, "I", "");
1750 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1751 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1752 0, 0, sysctl_maxproc
, "I", "");
1754 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1755 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1756 0, 0, sysctl_aiomax
, "I", "");
1758 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1759 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1760 0, 0, sysctl_aioprocmax
, "I", "");
1762 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1763 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1764 0, 0, sysctl_aiothreads
, "I", "");
1766 #if (DEVELOPMENT || DEBUG)
1767 extern int sched_smt_balance
;
1768 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1769 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1770 &sched_smt_balance
, 0, "");
1775 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1777 int new_value
, changed
;
1778 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1780 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1782 securelevel
= new_value
;
1791 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1792 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1793 0, 0, sysctl_securelvl
, "I", "");
1798 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1801 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1803 domainnamelen
= strlen(domainname
);
1808 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1809 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1810 0, 0, sysctl_domainname
, "A", "");
1812 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1813 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1818 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1821 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1823 hostnamelen
= req
->newlen
;
1829 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1830 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1831 0, 0, sysctl_hostname
, "A", "");
1835 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1837 /* Original code allowed writing, I'm copying this, although this all makes
1838 no sense to me. Besides, this sysctl is never used. */
1839 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
1842 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
1843 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1844 0, 0, sysctl_procname
, "A", "");
1846 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
1847 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1848 &speculative_reads_disabled
, 0, "");
1850 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
1851 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1852 &ignore_is_ssd
, 0, "");
1854 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
1855 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1856 &preheat_max_bytes
, 0, "");
1858 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
1859 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1860 &preheat_min_bytes
, 0, "");
1862 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
1863 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1864 &speculative_prefetch_max
, 0, "");
1866 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
1867 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1868 &speculative_prefetch_max_iosize
, 0, "");
1870 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
1871 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1872 &vm_page_free_target
, 0, "");
1874 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
1875 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1876 &vm_page_free_min
, 0, "");
1878 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
1879 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1880 &vm_page_free_reserved
, 0, "");
1882 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
1883 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1884 &vm_page_speculative_percentage
, 0, "");
1886 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
1887 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1888 &vm_page_speculative_q_age_ms
, 0, "");
1890 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
1891 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1892 &vm_max_delayed_work_limit
, 0, "");
1894 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
1895 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1896 &vm_max_batch
, 0, "");
1898 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
1899 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1900 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
1904 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1907 boottime_timeval(&tv
);
1908 struct proc
*p
= req
->p
;
1910 if (proc_is64bit(p
)) {
1911 struct user64_timeval t
;
1912 t
.tv_sec
= tv
.tv_sec
;
1913 t
.tv_usec
= tv
.tv_usec
;
1914 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1916 struct user32_timeval t
;
1917 t
.tv_sec
= tv
.tv_sec
;
1918 t
.tv_usec
= tv
.tv_usec
;
1919 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1923 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
1924 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1925 0, 0, sysctl_boottime
, "S,timeval", "");
1929 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1932 int error
= get_kernel_symfile(req
->p
, &str
);
1935 return sysctl_io_string(req
, str
, 0, 0, NULL
);
1939 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
1940 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1941 0, 0, sysctl_symfile
, "A", "");
1946 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1948 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
1951 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
1952 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1953 0, 0, sysctl_netboot
, "I", "");
1956 #ifdef CONFIG_IMGSRC_ACCESS
1958 * Legacy--act as if only one layer of nesting is possible.
1962 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1964 vfs_context_t ctx
= vfs_context_current();
1968 if (!vfs_context_issuser(ctx
)) {
1972 if (imgsrc_rootvnodes
[0] == NULL
) {
1976 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
1981 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
1982 result
= vnode_getwithref(devvp
);
1987 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
1991 vnode_put(imgsrc_rootvnodes
[0]);
1995 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
1996 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1997 0, 0, sysctl_imgsrcdev
, "I", "");
2001 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2004 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2008 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2012 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2014 * Go get the root vnode.
2016 rvp
= imgsrc_rootvnodes
[i
];
2017 if (rvp
== NULLVP
) {
2021 error
= vnode_get(rvp
);
2027 * For now, no getting at a non-local volume.
2029 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2030 if (devvp
== NULL
) {
2035 error
= vnode_getwithref(devvp
);
2044 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2045 info
[i
].ii_flags
= 0;
2046 info
[i
].ii_height
= i
;
2047 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2053 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2056 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2057 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2058 0, 0, sysctl_imgsrcinfo
, "I", "");
2060 #endif /* CONFIG_IMGSRC_ACCESS */
2063 SYSCTL_DECL(_kern_timer
);
2064 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2067 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2068 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2069 &mach_timer_coalescing_enabled
, 0, "");
2071 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2072 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2073 &timer_deadline_tracking_bin_1
, "");
2074 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2075 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2076 &timer_deadline_tracking_bin_2
, "");
2078 SYSCTL_DECL(_kern_timer_longterm
);
2079 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2082 /* Must match definition in osfmk/kern/timer_call.c */
2085 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2086 LATENCY
, LATENCY_MIN
, LATENCY_MAX
2088 extern uint64_t timer_sysctl_get(int);
2089 extern int timer_sysctl_set(int, uint64_t);
2093 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2095 int oid
= (int)arg1
;
2096 uint64_t value
= timer_sysctl_get(oid
);
2101 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2103 error
= timer_sysctl_set(oid
, new_value
);
2108 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2109 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2110 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2111 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2112 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2113 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2115 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2116 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2117 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2118 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2119 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2120 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2121 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2122 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2123 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2124 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2125 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2126 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2127 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2128 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2129 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2130 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2131 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2132 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2133 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2134 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2135 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2136 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2137 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2138 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2143 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2145 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2148 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2149 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2150 0, 0, sysctl_usrstack
, "I", "");
2154 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2156 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2159 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2160 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2161 0, 0, sysctl_usrstack64
, "Q", "");
2165 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2166 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2167 corefilename
, sizeof(corefilename
), "");
2171 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2173 #ifdef SECURE_KERNEL
2177 int new_value
, changed
;
2178 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2180 if ((new_value
== 0) || (new_value
== 1))
2181 do_coredump
= new_value
;
2189 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2190 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2191 0, 0, sysctl_coredump
, "I", "");
2194 sysctl_suid_coredump
2195 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2197 #ifdef SECURE_KERNEL
2201 int new_value
, changed
;
2202 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2204 if ((new_value
== 0) || (new_value
== 1))
2205 sugid_coredump
= new_value
;
2213 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2214 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2215 0, 0, sysctl_suid_coredump
, "I", "");
2217 #endif /* CONFIG_COREDUMP */
2221 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2223 struct proc
*p
= req
->p
;
2224 int new_value
, changed
;
2225 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2229 req
->p
->p_lflag
|= P_LDELAYTERM
;
2231 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2237 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2238 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2239 0, 0, sysctl_delayterm
, "I", "");
2244 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2246 struct proc
*p
= req
->p
;
2248 int new_value
, old_value
, changed
;
2251 ut
= get_bsdthread_info(current_thread());
2253 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2254 old_value
= KERN_RAGE_THREAD
;
2255 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2256 old_value
= KERN_RAGE_PROC
;
2260 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2263 switch (new_value
) {
2264 case KERN_RAGE_PROC
:
2266 p
->p_lflag
|= P_LRAGE_VNODES
;
2269 case KERN_UNRAGE_PROC
:
2271 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2275 case KERN_RAGE_THREAD
:
2276 ut
->uu_flag
|= UT_RAGE_VNODES
;
2278 case KERN_UNRAGE_THREAD
:
2279 ut
= get_bsdthread_info(current_thread());
2280 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2287 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2288 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2289 0, 0, sysctl_rage_vnode
, "I", "");
2291 /* XXX move this interface into libproc and remove this sysctl */
2293 sysctl_setthread_cpupercent
2294 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2296 int new_value
, old_value
;
2298 kern_return_t kret
= KERN_SUCCESS
;
2299 uint8_t percent
= 0;
2307 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2310 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2311 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2316 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2318 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2324 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2325 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2326 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2330 sysctl_kern_check_openevt
2331 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2333 struct proc
*p
= req
->p
;
2334 int new_value
, old_value
, changed
;
2337 if (p
->p_flag
& P_CHECKOPENEVT
) {
2338 old_value
= KERN_OPENEVT_PROC
;
2343 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2346 switch (new_value
) {
2347 case KERN_OPENEVT_PROC
:
2348 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2351 case KERN_UNOPENEVT_PROC
:
2352 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2362 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2363 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2369 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2371 #ifdef SECURE_KERNEL
2375 int new_value
, changed
;
2378 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2383 #if defined(__i386__) || defined(__x86_64__)
2385 * Only allow setting if NX is supported on the chip
2387 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2390 nx_enabled
= new_value
;
2393 #endif /* SECURE_KERNEL */
2398 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2399 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2400 0, 0, sysctl_nx
, "I", "");
2404 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2406 if (proc_is64bit(req
->p
)) {
2407 struct user64_loadavg loadinfo64
;
2408 fill_loadavg64(&averunnable
, &loadinfo64
);
2409 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2411 struct user32_loadavg loadinfo32
;
2412 fill_loadavg32(&averunnable
, &loadinfo32
);
2413 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2417 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2418 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2419 0, 0, sysctl_loadavg
, "S,loadavg", "");
2422 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2425 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2426 __unused
int arg2
, struct sysctl_req
*req
)
2428 int old_value
=0, new_value
=0, error
=0;
2430 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2432 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2434 return (vm_toggle_entry_reuse(new_value
, NULL
));
2439 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2444 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2447 uint64_t swap_total
;
2448 uint64_t swap_avail
;
2449 vm_size_t swap_pagesize
;
2450 boolean_t swap_encrypted
;
2451 struct xsw_usage xsu
;
2453 error
= macx_swapinfo(&swap_total
,
2460 xsu
.xsu_total
= swap_total
;
2461 xsu
.xsu_avail
= swap_avail
;
2462 xsu
.xsu_used
= swap_total
- swap_avail
;
2463 xsu
.xsu_pagesize
= swap_pagesize
;
2464 xsu
.xsu_encrypted
= swap_encrypted
;
2465 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2470 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2471 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2472 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2475 extern void vm_page_reactivate_all_throttled(void);
2478 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2480 #pragma unused(arg1, arg2)
2481 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2484 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2485 if (error
|| !req
->newptr
)
2488 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
2489 //assert(req->newptr);
2490 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2495 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2497 disabled
= (!val
&& memorystatus_freeze_enabled
);
2499 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2502 vm_page_reactivate_all_throttled();
2508 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2509 #endif /* CONFIG_FREEZE */
2511 /* this kernel does NOT implement shared_region_make_private_np() */
2512 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2513 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2514 (int *)NULL
, 0, "");
2517 fetch_process_cputype(
2521 cpu_type_t
*cputype
)
2523 proc_t p
= PROC_NULL
;
2530 else if (namelen
== 1) {
2531 p
= proc_find(name
[0]);
2540 ret
= cpu_type() & ~CPU_ARCH_MASK
;
2541 if (IS_64BIT_PROCESS(p
))
2542 ret
|= CPU_ARCH_ABI64
;
2553 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2554 struct sysctl_req
*req
)
2557 cpu_type_t proc_cputype
= 0;
2558 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2561 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2563 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2565 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2568 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2569 struct sysctl_req
*req
)
2572 cpu_type_t proc_cputype
= 0;
2573 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2575 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2577 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2581 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2583 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2586 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2587 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2588 0, 0, sysctl_safeboot
, "I", "");
2592 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2594 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2597 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2598 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2599 0, 0, sysctl_singleuser
, "I", "");
2601 STATIC
int sysctl_minimalboot
2602 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2604 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
2607 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
2608 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2609 0, 0, sysctl_minimalboot
, "I", "");
2612 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2614 extern boolean_t affinity_sets_enabled
;
2615 extern int affinity_sets_mapping
;
2617 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2618 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2619 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2620 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
2623 * Boolean indicating if KASLR is active.
2627 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2631 slide
= vm_kernel_slide
? 1 : 0;
2633 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
2636 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
2637 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2638 0, 0, sysctl_slide
, "I", "");
2641 * Limit on total memory users can wire.
2643 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2645 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2647 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2650 * All values are in bytes.
2653 vm_map_size_t vm_global_no_user_wire_amount
;
2654 vm_map_size_t vm_global_user_wire_limit
;
2655 vm_map_size_t vm_user_wire_limit
;
2658 * There needs to be a more automatic/elegant way to do this
2660 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
2661 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
2662 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
2664 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
2665 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
2666 extern int vm_map_copy_overwrite_aligned_src_large
;
2667 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
2668 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
2669 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
2672 extern uint32_t vm_page_external_count
;
2673 extern uint32_t vm_page_filecache_min
;
2675 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
2676 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
2678 extern int vm_compressor_mode
;
2679 extern int vm_compressor_is_active
;
2680 extern int vm_compressor_available
;
2681 extern uint32_t vm_ripe_target_age
;
2682 extern uint32_t swapout_target_age
;
2683 extern int64_t compressor_bytes_used
;
2684 extern int64_t c_segment_input_bytes
;
2685 extern int64_t c_segment_compressed_bytes
;
2686 extern uint32_t compressor_eval_period_in_msecs
;
2687 extern uint32_t compressor_sample_min_in_msecs
;
2688 extern uint32_t compressor_sample_max_in_msecs
;
2689 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
2690 extern uint32_t compressor_thrashing_min_per_10msecs
;
2691 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
2692 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
2693 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
2694 extern uint32_t vm_compressor_catchup_threshold_divisor
;
2695 extern uint32_t vm_compressor_time_thread
;
2696 extern uint64_t vm_compressor_thread_runtime
;
2698 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
2699 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
2700 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
2702 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
2703 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
2704 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
2705 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
2707 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
2709 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
2710 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
2711 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
2712 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
2713 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
2714 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
2715 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
2716 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
2717 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
2719 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
2721 SYSCTL_INT(_vm
, OID_AUTO
, compressor_timing_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_time_thread
, 0, "");
2722 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_thread_runtime
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_thread_runtime
, "");
2724 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressions
, "");
2725 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compression_failures
, "");
2726 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_compressed_bytes
, "");
2727 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_delta
, "");
2728 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_wk_compression_negative_delta
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_wk_compression_negative_delta
, "");
2730 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressions
, "");
2731 SYSCTL_QUAD(_vm
, OID_AUTO
, lz4_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.lz4_decompressed_bytes
, "");
2733 SYSCTL_QUAD(_vm
, OID_AUTO
, uc_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.uc_decompressions
, "");
2735 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions
, "");
2736 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressions_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressions_exclusive
, "");
2737 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_compressions
, "");
2738 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_mzv_compressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_mzv_compressions
, "");
2739 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compression_failures
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compression_failures
, "");
2740 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_exclusive
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_exclusive
, "");
2741 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_compressed_bytes_total
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_compressed_bytes_total
, "");
2743 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressions
, "");
2744 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_decompressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_decompressed_bytes
, "");
2745 SYSCTL_QUAD(_vm
, OID_AUTO
, wk_sv_decompressions
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_stats
.wk_sv_decompressions
, "");
2747 SYSCTL_INT(_vm
, OID_AUTO
, lz4_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_threshold
, 0, "");
2748 SYSCTL_INT(_vm
, OID_AUTO
, wkdm_reeval_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.wkdm_reeval_threshold
, 0, "");
2749 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_skips
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_skips
, 0, "");
2750 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_failure_run_length
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_failure_run_length
, 0, "");
2751 SYSCTL_INT(_vm
, OID_AUTO
, lz4_max_preselects
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_max_preselects
, 0, "");
2752 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_preselection_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_preselection_threshold
, 0, "");
2753 SYSCTL_INT(_vm
, OID_AUTO
, lz4_run_continue_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_run_continue_bytes
, 0, "");
2754 SYSCTL_INT(_vm
, OID_AUTO
, lz4_profitable_bytes
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vmctune
.lz4_profitable_bytes
, 0, "");
2756 #if CONFIG_PHANTOM_CACHE
2757 extern uint32_t phantom_cache_thrashing_threshold
;
2758 extern uint32_t phantom_cache_eval_period_in_msecs
;
2759 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
2762 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
2763 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
2764 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
2767 #if CONFIG_BACKGROUND_QUEUE
2769 extern uint32_t vm_page_background_count
;
2770 extern uint32_t vm_page_background_limit
;
2771 extern uint32_t vm_page_background_target
;
2772 extern uint32_t vm_page_background_internal_count
;
2773 extern uint32_t vm_page_background_external_count
;
2774 extern uint32_t vm_page_background_mode
;
2775 extern uint32_t vm_page_background_exclude_external
;
2776 extern uint64_t vm_page_background_promoted_count
;
2777 extern uint64_t vm_pageout_considered_bq_internal
;
2778 extern uint64_t vm_pageout_considered_bq_external
;
2779 extern uint64_t vm_pageout_rejected_bq_internal
;
2780 extern uint64_t vm_pageout_rejected_bq_external
;
2782 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_mode
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_mode
, 0, "");
2783 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_exclude_external
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_exclude_external
, 0, "");
2784 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_limit
, 0, "");
2785 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_target
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_background_target
, 0, "");
2786 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_count
, 0, "");
2787 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_internal_count
, 0, "");
2788 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_background_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_external_count
, 0, "");
2790 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_page_background_promoted_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_background_promoted_count
, "");
2791 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_considered_bq_internal
, "");
2792 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_considered_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_considered_bq_external
, "");
2793 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_internal
, "");
2794 SYSCTL_QUAD(_vm
, OID_AUTO
, vm_pageout_rejected_bq_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_rejected_bq_external
, "");
2798 #if (DEVELOPMENT || DEBUG)
2800 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
2801 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2802 &vm_page_creation_throttled_hard
, 0, "");
2804 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
2805 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2806 &vm_page_creation_throttled_soft
, 0, "");
2808 extern uint32_t vm_pageout_memorystatus_fb_factor_nr
;
2809 extern uint32_t vm_pageout_memorystatus_fb_factor_dr
;
2810 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_nr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_nr
, 0, "");
2811 SYSCTL_INT(_vm
, OID_AUTO
, vm_pageout_memorystatus_fb_factor_dr
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_pageout_memorystatus_fb_factor_dr
, 0, "");
2813 extern uint32_t vm_grab_anon_overrides
;
2814 extern uint32_t vm_grab_anon_nops
;
2816 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_overrides
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_grab_anon_overrides
, 0, "");
2817 SYSCTL_INT(_vm
, OID_AUTO
, vm_grab_anon_nops
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_grab_anon_nops
, 0, "");
2819 /* log message counters for persistence mode */
2820 extern uint32_t oslog_p_total_msgcount
;
2821 extern uint32_t oslog_p_metadata_saved_msgcount
;
2822 extern uint32_t oslog_p_metadata_dropped_msgcount
;
2823 extern uint32_t oslog_p_error_count
;
2824 extern uint32_t oslog_p_saved_msgcount
;
2825 extern uint32_t oslog_p_dropped_msgcount
;
2826 extern uint32_t oslog_p_boot_dropped_msgcount
;
2828 /* log message counters for streaming mode */
2829 extern uint32_t oslog_s_total_msgcount
;
2830 extern uint32_t oslog_s_metadata_msgcount
;
2831 extern uint32_t oslog_s_error_count
;
2832 extern uint32_t oslog_s_streamed_msgcount
;
2833 extern uint32_t oslog_s_dropped_msgcount
;
2835 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_total_msgcount
, 0, "");
2836 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_saved_msgcount
, 0, "");
2837 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_metadata_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_metadata_dropped_msgcount
, 0, "");
2838 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_error_count
, 0, "");
2839 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_saved_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_saved_msgcount
, 0, "");
2840 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_dropped_msgcount
, 0, "");
2841 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_p_boot_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_p_boot_dropped_msgcount
, 0, "");
2843 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_total_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_total_msgcount
, 0, "");
2844 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_metadata_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_metadata_msgcount
, 0, "");
2845 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_error_count
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_error_count
, 0, "");
2846 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_streamed_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_streamed_msgcount
, 0, "");
2847 SYSCTL_UINT(_debug
, OID_AUTO
, oslog_s_dropped_msgcount
, CTLFLAG_ANYBODY
| CTLFLAG_RD
| CTLFLAG_LOCKED
, &oslog_s_dropped_msgcount
, 0, "");
2850 #endif /* DEVELOPMENT || DEBUG */
2853 * Enable tracing of voucher contents
2855 extern uint32_t ipc_voucher_trace_contents
;
2857 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
2858 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
2861 * Kernel stack size and depth
2863 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
2864 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
2865 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
2866 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
2869 * enable back trace for port allocations
2871 extern int ipc_portbt
;
2873 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
2874 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2875 &ipc_portbt
, 0, "");
2881 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
2882 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2883 sched_string
, sizeof(sched_string
),
2884 "Timeshare scheduler implementation");
2887 * Only support runtime modification on embedded platforms
2888 * with development config enabled
2892 /* Parameters related to timer coalescing tuning, to be replaced
2893 * with a dedicated systemcall in the future.
2895 /* Enable processing pending timers in the context of any other interrupt
2896 * Coalescing tuning parameters for various thread/task attributes */
2898 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2900 #pragma unused(oidp)
2901 int size
= arg2
; /* subcommand*/
2904 uint64_t old_value_ns
;
2905 uint64_t new_value_ns
;
2906 uint64_t value_abstime
;
2907 if (size
== sizeof(uint32_t))
2908 value_abstime
= *((uint32_t *)arg1
);
2909 else if (size
== sizeof(uint64_t))
2910 value_abstime
= *((uint64_t *)arg1
);
2911 else return ENOTSUP
;
2913 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
2914 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
2915 if ((error
) || (!changed
))
2918 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
2919 if (size
== sizeof(uint32_t))
2920 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
2922 *((uint64_t *)arg1
) = value_abstime
;
2926 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
2927 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2928 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
2929 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
2930 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2931 &tcoal_prio_params
.timer_resort_threshold_abstime
,
2932 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
2933 sysctl_timer_user_us_kernel_abstime
,
2935 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
2936 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2937 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
2938 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
2939 sysctl_timer_user_us_kernel_abstime
,
2942 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
2943 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2944 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
2946 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
2947 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2948 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
2949 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
2950 sysctl_timer_user_us_kernel_abstime
,
2953 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
2954 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2955 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
2957 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
2958 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2959 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
2960 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
2961 sysctl_timer_user_us_kernel_abstime
,
2964 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
2965 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2966 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
2968 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
2969 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2970 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
2971 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
2972 sysctl_timer_user_us_kernel_abstime
,
2975 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
2976 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2977 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
2979 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
2980 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2981 &tcoal_prio_params
.latency_qos_abstime_max
[0],
2982 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
2983 sysctl_timer_user_us_kernel_abstime
,
2986 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
2987 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2988 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
2990 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
2991 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2992 &tcoal_prio_params
.latency_qos_abstime_max
[1],
2993 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
2994 sysctl_timer_user_us_kernel_abstime
,
2997 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
2998 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2999 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
3001 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
3002 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3003 &tcoal_prio_params
.latency_qos_abstime_max
[2],
3004 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
3005 sysctl_timer_user_us_kernel_abstime
,
3008 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
3009 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3010 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
3012 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
3013 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3014 &tcoal_prio_params
.latency_qos_abstime_max
[3],
3015 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
3016 sysctl_timer_user_us_kernel_abstime
,
3019 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
3020 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3021 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
3023 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
3024 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3025 &tcoal_prio_params
.latency_qos_abstime_max
[4],
3026 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
3027 sysctl_timer_user_us_kernel_abstime
,
3030 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
3031 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3032 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
3034 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
3035 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3036 &tcoal_prio_params
.latency_qos_abstime_max
[5],
3037 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
3038 sysctl_timer_user_us_kernel_abstime
,
3041 /* Communicate the "user idle level" heuristic to the timer layer, and
3042 * potentially other layers in the future.
3046 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3047 int new_value
= 0, old_value
= 0, changed
= 0, error
;
3049 old_value
= timer_get_user_idle_level();
3051 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
3053 if (error
== 0 && changed
) {
3054 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
3061 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
3062 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3064 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
3067 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
3068 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3069 &hv_support_available
, 0, "");
3074 * This is set by core audio to tell tailspin (ie background tracing) how long
3075 * its smallest buffer is. Background tracing can then try to make a reasonable
3076 * decisions to try to avoid introducing so much latency that the buffers will
3080 int min_audio_buffer_usec
;
3083 sysctl_audio_buffer SYSCTL_HANDLER_ARGS
3085 #pragma unused(oidp, arg1, arg2)
3086 int err
= 0, value
= 0, changed
= 0;
3087 err
= sysctl_io_number(req
, min_audio_buffer_usec
, sizeof(int), &value
, &changed
);
3091 /* writing is protected by an entitlement */
3092 if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY
, 0) != 0) {
3096 min_audio_buffer_usec
= value
;
3102 SYSCTL_PROC(_kern
, OID_AUTO
, min_audio_buffer_usec
, CTLFLAG_RW
| CTLFLAG_ANYBODY
, 0, 0, sysctl_audio_buffer
, "I", "Minimum audio buffer size, in microseconds");
3104 #if DEVELOPMENT || DEBUG
3105 #include <sys/sysent.h>
3106 /* This should result in a fatal exception, verifying that "sysent" is
3110 kern_sysent_write(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
3111 uint64_t new_value
= 0, old_value
= 0;
3112 int changed
= 0, error
;
3114 error
= sysctl_io_number(req
, old_value
, sizeof(uint64_t), &new_value
, &changed
);
3115 if ((error
== 0) && changed
) {
3116 volatile uint32_t *wraddr
= (uint32_t *) &sysent
[0];
3118 printf("sysent[0] write succeeded\n");
3123 SYSCTL_PROC(_kern
, OID_AUTO
, sysent_const_check
,
3124 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
3126 kern_sysent_write
, "I", "Attempt sysent[0] write");
3130 #if DEVELOPMENT || DEBUG
3131 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 1, "");
3133 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, development
, CTLFLAG_RD
| CTLFLAG_MASKED
, NULL
, 0, "");