2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
108 #include <security/audit/audit.h>
109 #include <kern/kalloc.h>
111 #include <mach/machine.h>
112 #include <mach/mach_host.h>
113 #include <mach/mach_types.h>
114 #include <mach/vm_param.h>
115 #include <kern/mach_param.h>
116 #include <kern/task.h>
117 #include <kern/thread.h>
118 #include <kern/processor.h>
119 #include <kern/debug.h>
120 #include <kern/sched_prim.h>
121 #include <vm/vm_kern.h>
122 #include <vm/vm_map.h>
123 #include <mach/host_info.h>
125 #include <sys/mount_internal.h>
126 #include <sys/kdebug.h>
128 #include <IOKit/IOPlatformExpert.h>
129 #include <pexpert/pexpert.h>
131 #include <machine/machine_routines.h>
132 #include <machine/exec.h>
134 #include <vm/vm_protos.h>
135 #include <vm/vm_pageout.h>
136 #include <sys/imgsrc.h>
137 #include <kern/timer_call.h>
139 #if defined(__i386__) || defined(__x86_64__)
140 #include <i386/cpuid.h>
144 #include <sys/kern_memorystatus.h>
148 #include <kperf/kperf.h>
152 #include <kern/hv_support.h>
156 * deliberately setting max requests to really high number
157 * so that runaway settings do not cause MALLOC overflows
159 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
161 extern int aio_max_requests
;
162 extern int aio_max_requests_per_process
;
163 extern int aio_worker_threads
;
164 extern int lowpri_IO_window_msecs
;
165 extern int lowpri_IO_delay_msecs
;
166 extern int nx_enabled
;
167 extern int speculative_reads_disabled
;
168 extern int ignore_is_ssd
;
169 extern unsigned int speculative_prefetch_max
;
170 extern unsigned int speculative_prefetch_max_iosize
;
171 extern unsigned int preheat_max_bytes
;
172 extern unsigned int preheat_min_bytes
;
173 extern long numvnodes
;
175 extern uuid_string_t bootsessionuuid_string
;
177 extern unsigned int vm_max_delayed_work_limit
;
178 extern unsigned int vm_max_batch
;
180 extern unsigned int vm_page_free_min
;
181 extern unsigned int vm_page_free_target
;
182 extern unsigned int vm_page_free_reserved
;
183 extern unsigned int vm_page_speculative_percentage
;
184 extern unsigned int vm_page_speculative_q_age_ms
;
186 #if (DEVELOPMENT || DEBUG)
187 extern uint32_t vm_page_creation_throttled_hard
;
188 extern uint32_t vm_page_creation_throttled_soft
;
189 #endif /* DEVELOPMENT || DEBUG */
192 * Conditionally allow dtrace to see these functions for debugging purposes.
200 #define STATIC static
203 extern boolean_t mach_timer_coalescing_enabled
;
205 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
208 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
210 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
212 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
214 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
216 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
218 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
221 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
227 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
229 __private_extern__ kern_return_t
230 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
232 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
233 size_t *sizep
, proc_t cur_proc
);
235 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
236 proc_t cur_proc
, int argc_yes
);
238 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
239 size_t newlen
, void *sp
, int len
);
241 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
242 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
243 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
244 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
245 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
246 int sysdoproc_callback(proc_t p
, void *arg
);
249 /* forward declarations for non-static STATIC */
250 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
251 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
252 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
253 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
254 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
255 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
257 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
258 #endif /* COUNT_SYSCALLS */
259 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
260 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
261 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
262 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
267 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 #ifdef CONFIG_IMGSRC_ACCESS
279 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
285 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
293 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
301 extern void IORegistrySetOSBuildVersion(char * build_version
);
304 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
306 la64
->ldavg
[0] = la
->ldavg
[0];
307 la64
->ldavg
[1] = la
->ldavg
[1];
308 la64
->ldavg
[2] = la
->ldavg
[2];
309 la64
->fscale
= (user64_long_t
)la
->fscale
;
313 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
315 la32
->ldavg
[0] = la
->ldavg
[0];
316 la32
->ldavg
[1] = la
->ldavg
[1];
317 la32
->ldavg
[2] = la
->ldavg
[2];
318 la32
->fscale
= (user32_long_t
)la
->fscale
;
322 * Attributes stored in the kernel.
324 extern char corefilename
[MAXPATHLEN
+1];
325 extern int do_coredump
;
326 extern int sugid_coredump
;
329 extern int do_count_syscalls
;
333 int securelevel
= -1;
339 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
340 __unused
int arg2
, struct sysctl_req
*req
)
343 struct uthread
*ut
= get_bsdthread_info(current_thread());
344 user_addr_t oldp
=0, newp
=0;
345 size_t *oldlenp
=NULL
;
349 oldlenp
= &(req
->oldlen
);
351 newlen
= req
->newlen
;
353 /* We want the current length, and maybe the string itself */
355 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
356 size_t currlen
= MAXTHREADNAMESIZE
- 1;
359 /* use length of current thread name */
360 currlen
= strlen(ut
->pth_name
);
362 if(*oldlenp
< currlen
)
364 /* NOTE - we do not copy the NULL terminator */
366 error
= copyout(ut
->pth_name
,oldp
,currlen
);
371 /* return length of thread name minus NULL terminator (just like strlen) */
372 req
->oldidx
= currlen
;
375 /* We want to set the name to something */
378 if(newlen
> (MAXTHREADNAMESIZE
- 1))
382 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
386 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
387 error
= copyin(newp
, ut
->pth_name
, newlen
);
395 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
399 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
401 host_basic_info_data_t hinfo
;
405 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
406 struct _processor_statistics_np
*buf
;
409 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
410 if (kret
!= KERN_SUCCESS
) {
414 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
416 if (req
->oldlen
< size
) {
420 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
422 kret
= get_sched_statistics(buf
, &size
);
423 if (kret
!= KERN_SUCCESS
) {
428 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
434 panic("Sched info changed?!");
441 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
444 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
449 if (req
->newlen
!= sizeof(active
)) {
453 res
= copyin(req
->newptr
, &active
, sizeof(active
));
458 return set_sched_stats_active(active
);
461 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
463 extern uint32_t sched_debug_flags
;
464 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
466 #if (DEBUG || DEVELOPMENT)
467 extern boolean_t doprnt_hide_pointers
;
468 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
471 extern int get_kernel_symfile(proc_t
, char **);
474 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
477 extern int syscalls_log
[];
478 extern const char *syscallnames
[];
481 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
483 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
484 __unused
int *name
= arg1
; /* oid element argument vector */
485 __unused
int namelen
= arg2
; /* number of oid element arguments */
486 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
487 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
488 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
489 size_t newlen
= req
->newlen
; /* user buffer copy in size */
494 /* valid values passed in:
495 * = 0 means don't keep called counts for each bsd syscall
496 * > 0 means keep called counts for each bsd syscall
497 * = 2 means dump current counts to the system log
498 * = 3 means reset all counts
499 * for example, to dump current counts:
500 * sysctl -w kern.count_calls=2
502 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
508 do_count_syscalls
= 1;
510 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
512 for ( i
= 0; i
< nsysent
; i
++ ) {
513 if ( syscalls_log
[i
] != 0 ) {
515 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
523 do_count_syscalls
= 1;
527 /* adjust index so we return the right required/consumed amount */
529 req
->oldidx
+= req
->oldlen
;
533 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
534 0, /* Pointer argument (arg1) */
535 0, /* Integer argument (arg2) */
536 sysctl_docountsyscalls
, /* Handler function */
537 NULL
, /* Data pointer */
539 #endif /* COUNT_SYSCALLS */
542 * The following sysctl_* functions should not be used
543 * any more, as they can only cope with callers in
544 * user mode: Use new-style
552 * Validate parameters and get old / set new parameters
553 * for an integer-valued sysctl function.
556 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
557 user_addr_t newp
, size_t newlen
, int *valp
)
561 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
563 if (oldp
&& *oldlenp
< sizeof(int))
565 if (newp
&& newlen
!= sizeof(int))
567 *oldlenp
= sizeof(int);
569 error
= copyout(valp
, oldp
, sizeof(int));
570 if (error
== 0 && newp
) {
571 error
= copyin(newp
, valp
, sizeof(int));
572 AUDIT_ARG(value32
, *valp
);
578 * Validate parameters and get old / set new parameters
579 * for an quad(64bit)-valued sysctl function.
582 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
583 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
587 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
589 if (oldp
&& *oldlenp
< sizeof(quad_t
))
591 if (newp
&& newlen
!= sizeof(quad_t
))
593 *oldlenp
= sizeof(quad_t
);
595 error
= copyout(valp
, oldp
, sizeof(quad_t
));
596 if (error
== 0 && newp
)
597 error
= copyin(newp
, valp
, sizeof(quad_t
));
602 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
604 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
611 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
613 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
620 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
625 /* This is very racy but list lock is held.. Hmmm. */
626 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
627 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
628 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
629 tp
->t_dev
!= (dev_t
)*(int*)arg
)
638 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
640 kauth_cred_t my_cred
;
643 if (p
->p_ucred
== NULL
)
645 my_cred
= kauth_cred_proc_ref(p
);
646 uid
= kauth_cred_getuid(my_cred
);
647 kauth_cred_unref(&my_cred
);
649 if (uid
!= (uid_t
)*(int*)arg
)
657 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
659 kauth_cred_t my_cred
;
662 if (p
->p_ucred
== NULL
)
664 my_cred
= kauth_cred_proc_ref(p
);
665 ruid
= kauth_cred_getruid(my_cred
);
666 kauth_cred_unref(&my_cred
);
668 if (ruid
!= (uid_t
)*(int*)arg
)
675 * try over estimating by 5 procs
677 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
678 struct sysdoproc_args
{
693 sysdoproc_callback(proc_t p
, void *arg
)
695 struct sysdoproc_args
*args
= arg
;
697 if (args
->buflen
>= args
->sizeof_kproc
) {
698 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
699 return (PROC_RETURNED
);
700 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
701 return (PROC_RETURNED
);
702 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
703 return (PROC_RETURNED
);
705 bzero(args
->kprocp
, args
->sizeof_kproc
);
707 fill_user64_proc(p
, args
->kprocp
);
709 fill_user32_proc(p
, args
->kprocp
);
710 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
712 *args
->errorp
= error
;
713 return (PROC_RETURNED_DONE
);
715 args
->dp
+= args
->sizeof_kproc
;
716 args
->buflen
-= args
->sizeof_kproc
;
718 args
->needed
+= args
->sizeof_kproc
;
719 return (PROC_RETURNED
);
722 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
724 sysctl_prochandle SYSCTL_HANDLER_ARGS
726 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
727 int *name
= arg1
; /* oid element argument vector */
728 int namelen
= arg2
; /* number of oid element arguments */
729 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
731 user_addr_t dp
= where
;
733 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
735 boolean_t is_64_bit
= proc_is64bit(current_proc());
736 struct user32_kinfo_proc user32_kproc
;
737 struct user64_kinfo_proc user_kproc
;
740 int (*filterfn
)(proc_t
, void *) = 0;
741 struct sysdoproc_args args
;
746 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
750 sizeof_kproc
= sizeof(user_kproc
);
751 kprocp
= &user_kproc
;
753 sizeof_kproc
= sizeof(user32_kproc
);
754 kprocp
= &user32_kproc
;
760 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
764 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
783 /* must be kern.proc.<unknown> */
788 args
.buflen
= buflen
;
789 args
.kprocp
= kprocp
;
790 args
.is_64_bit
= is_64_bit
;
792 args
.needed
= needed
;
793 args
.errorp
= &error
;
794 args
.uidcheck
= uidcheck
;
795 args
.ruidcheck
= ruidcheck
;
796 args
.ttycheck
= ttycheck
;
797 args
.sizeof_kproc
= sizeof_kproc
;
799 args
.uidval
= name
[0];
801 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
802 sysdoproc_callback
, &args
, filterfn
, name
);
808 needed
= args
.needed
;
810 if (where
!= USER_ADDR_NULL
) {
811 req
->oldlen
= dp
- where
;
812 if (needed
> req
->oldlen
)
815 needed
+= KERN_PROCSLOP
;
816 req
->oldlen
= needed
;
818 /* adjust index so we return the right required/consumed amount */
819 req
->oldidx
+= req
->oldlen
;
824 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
825 * in the sysctl declaration itself, which comes into the handler function
826 * as 'oidp->oid_arg2'.
828 * For these particular sysctls, since they have well known OIDs, we could
829 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
830 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
831 * of a well known value with a common handler function. This is desirable,
832 * because we want well known values to "go away" at some future date.
834 * It should be noted that the value of '((int *)arg1)[1]' is used for many
835 * an integer parameter to the subcommand for many of these sysctls; we'd
836 * rather have used '((int *)arg1)[0]' for that, or even better, an element
837 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
838 * and then use leaf-node permissions enforcement, but that would have
839 * necessitated modifying user space code to correspond to the interface
840 * change, and we are striving for binary backward compatibility here; even
841 * though these are SPI, and not intended for use by user space applications
842 * which are not themselves system tools or libraries, some applications
843 * have erroneously used them.
845 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
846 0, /* Pointer argument (arg1) */
847 KERN_PROC_ALL
, /* Integer argument (arg2) */
848 sysctl_prochandle
, /* Handler function */
849 NULL
, /* Data is size variant on ILP32/LP64 */
851 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
852 0, /* Pointer argument (arg1) */
853 KERN_PROC_PID
, /* Integer argument (arg2) */
854 sysctl_prochandle
, /* Handler function */
855 NULL
, /* Data is size variant on ILP32/LP64 */
857 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
858 0, /* Pointer argument (arg1) */
859 KERN_PROC_TTY
, /* Integer argument (arg2) */
860 sysctl_prochandle
, /* Handler function */
861 NULL
, /* Data is size variant on ILP32/LP64 */
863 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
864 0, /* Pointer argument (arg1) */
865 KERN_PROC_PGRP
, /* Integer argument (arg2) */
866 sysctl_prochandle
, /* Handler function */
867 NULL
, /* Data is size variant on ILP32/LP64 */
869 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_UID
, /* Integer argument (arg2) */
872 sysctl_prochandle
, /* Handler function */
873 NULL
, /* Data is size variant on ILP32/LP64 */
875 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_RUID
, /* Integer argument (arg2) */
878 sysctl_prochandle
, /* Handler function */
879 NULL
, /* Data is size variant on ILP32/LP64 */
881 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_LCID
, /* Integer argument (arg2) */
884 sysctl_prochandle
, /* Handler function */
885 NULL
, /* Data is size variant on ILP32/LP64 */
890 * Fill in non-zero fields of an eproc structure for the specified process.
893 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
897 struct session
*sessp
;
898 kauth_cred_t my_cred
;
901 sessp
= proc_session(p
);
903 if (pg
!= PGRP_NULL
) {
904 ep
->e_pgid
= p
->p_pgrpid
;
905 ep
->e_jobc
= pg
->pg_jobc
;
906 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
907 ep
->e_flag
= EPROC_CTTY
;
909 ep
->e_ppid
= p
->p_ppid
;
911 my_cred
= kauth_cred_proc_ref(p
);
913 /* A fake historical pcred */
914 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
915 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
916 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
917 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
919 /* A fake historical *kauth_cred_t */
920 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
921 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
922 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
923 bcopy(posix_cred_get(my_cred
)->cr_groups
,
924 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
926 kauth_cred_unref(&my_cred
);
929 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
930 (tp
= SESSION_TP(sessp
))) {
931 ep
->e_tdev
= tp
->t_dev
;
932 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
936 if (sessp
!= SESSION_NULL
) {
937 if (SESS_LEADER(p
, sessp
))
938 ep
->e_flag
|= EPROC_SLEADER
;
946 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
949 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
953 struct session
*sessp
;
954 kauth_cred_t my_cred
;
957 sessp
= proc_session(p
);
959 if (pg
!= PGRP_NULL
) {
960 ep
->e_pgid
= p
->p_pgrpid
;
961 ep
->e_jobc
= pg
->pg_jobc
;
962 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
963 ep
->e_flag
= EPROC_CTTY
;
965 ep
->e_ppid
= p
->p_ppid
;
967 my_cred
= kauth_cred_proc_ref(p
);
969 /* A fake historical pcred */
970 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
971 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
972 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
973 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
975 /* A fake historical *kauth_cred_t */
976 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
977 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
978 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
979 bcopy(posix_cred_get(my_cred
)->cr_groups
,
980 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
982 kauth_cred_unref(&my_cred
);
985 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
986 (tp
= SESSION_TP(sessp
))) {
987 ep
->e_tdev
= tp
->t_dev
;
988 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
992 if (sessp
!= SESSION_NULL
) {
993 if (SESS_LEADER(p
, sessp
))
994 ep
->e_flag
|= EPROC_SLEADER
;
1002 * Fill in an eproc structure for the specified process.
1003 * bzeroed by our caller, so only set non-zero fields.
1006 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1008 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1009 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1010 exp
->p_flag
= p
->p_flag
;
1011 if (p
->p_lflag
& P_LTRACED
)
1012 exp
->p_flag
|= P_TRACED
;
1013 if (p
->p_lflag
& P_LPPWAIT
)
1014 exp
->p_flag
|= P_PPWAIT
;
1015 if (p
->p_lflag
& P_LEXIT
)
1016 exp
->p_flag
|= P_WEXIT
;
1017 exp
->p_stat
= p
->p_stat
;
1018 exp
->p_pid
= p
->p_pid
;
1019 exp
->p_oppid
= p
->p_oppid
;
1021 exp
->user_stack
= p
->user_stack
;
1022 exp
->p_debugger
= p
->p_debugger
;
1023 exp
->sigwait
= p
->sigwait
;
1025 #ifdef _PROC_HAS_SCHEDINFO_
1026 exp
->p_estcpu
= p
->p_estcpu
;
1027 exp
->p_pctcpu
= p
->p_pctcpu
;
1028 exp
->p_slptime
= p
->p_slptime
;
1030 exp
->p_realtimer
.it_interval
.tv_sec
=
1031 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1032 exp
->p_realtimer
.it_interval
.tv_usec
=
1033 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1035 exp
->p_realtimer
.it_value
.tv_sec
=
1036 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1037 exp
->p_realtimer
.it_value
.tv_usec
=
1038 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1040 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1041 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1043 exp
->p_sigignore
= p
->p_sigignore
;
1044 exp
->p_sigcatch
= p
->p_sigcatch
;
1045 exp
->p_priority
= p
->p_priority
;
1046 exp
->p_nice
= p
->p_nice
;
1047 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1048 exp
->p_xstat
= p
->p_xstat
;
1049 exp
->p_acflag
= p
->p_acflag
;
1053 * Fill in an LP64 version of extern_proc structure for the specified process.
1056 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1058 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1059 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1060 exp
->p_flag
= p
->p_flag
;
1061 if (p
->p_lflag
& P_LTRACED
)
1062 exp
->p_flag
|= P_TRACED
;
1063 if (p
->p_lflag
& P_LPPWAIT
)
1064 exp
->p_flag
|= P_PPWAIT
;
1065 if (p
->p_lflag
& P_LEXIT
)
1066 exp
->p_flag
|= P_WEXIT
;
1067 exp
->p_stat
= p
->p_stat
;
1068 exp
->p_pid
= p
->p_pid
;
1069 exp
->p_oppid
= p
->p_oppid
;
1071 exp
->user_stack
= p
->user_stack
;
1072 exp
->p_debugger
= p
->p_debugger
;
1073 exp
->sigwait
= p
->sigwait
;
1075 #ifdef _PROC_HAS_SCHEDINFO_
1076 exp
->p_estcpu
= p
->p_estcpu
;
1077 exp
->p_pctcpu
= p
->p_pctcpu
;
1078 exp
->p_slptime
= p
->p_slptime
;
1080 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1081 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1083 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1084 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1086 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1087 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1089 exp
->p_sigignore
= p
->p_sigignore
;
1090 exp
->p_sigcatch
= p
->p_sigcatch
;
1091 exp
->p_priority
= p
->p_priority
;
1092 exp
->p_nice
= p
->p_nice
;
1093 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1094 exp
->p_xstat
= p
->p_xstat
;
1095 exp
->p_acflag
= p
->p_acflag
;
1099 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1101 /* on a 64 bit kernel, 32 bit users get some truncated information */
1102 fill_user32_externproc(p
, &kp
->kp_proc
);
1103 fill_user32_eproc(p
, &kp
->kp_eproc
);
1107 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1109 fill_user64_externproc(p
, &kp
->kp_proc
);
1110 fill_user64_eproc(p
, &kp
->kp_eproc
);
1114 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1116 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1117 int *name
= arg1
; /* oid element argument vector */
1118 int namelen
= arg2
; /* number of oid element arguments */
1119 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1120 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1121 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1122 // size_t newlen = req->newlen; /* user buffer copy in size */
1124 proc_t p
= current_proc();
1130 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1132 /* Non-root processes may be blessed by kperf to access data
1133 * logged into trace.
1136 ret
= kperf_access_check();
1151 case KERN_KDWRITETR
:
1152 case KERN_KDWRITEMAP
:
1156 case KERN_KDSETRTCDEC
:
1158 case KERN_KDGETENTROPY
:
1159 case KERN_KDENABLE_BG_TRACE
:
1160 case KERN_KDDISABLE_BG_TRACE
:
1161 case KERN_KDREADCURTHRMAP
:
1162 case KERN_KDSET_TYPEFILTER
:
1163 case KERN_KDBUFWAIT
:
1165 case KERN_KDWAIT_BG_TRACE_RESET
:
1166 case KERN_KDSET_BG_TYPEFILTER
:
1167 case KERN_KDWRITEMAP_V3
:
1168 case KERN_KDWRITETR_V3
:
1169 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1176 /* adjust index so we return the right required/consumed amount */
1178 req
->oldidx
+= req
->oldlen
;
1182 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1183 0, /* Pointer argument (arg1) */
1184 0, /* Integer argument (arg2) */
1185 sysctl_kdebug_ops
, /* Handler function */
1186 NULL
, /* Data pointer */
1191 * Return the top *sizep bytes of the user stack, or the entire area of the
1192 * user stack down through the saved exec_path, whichever is smaller.
1195 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1197 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1198 int *name
= arg1
; /* oid element argument vector */
1199 int namelen
= arg2
; /* number of oid element arguments */
1200 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1201 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1202 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1203 // size_t newlen = req->newlen; /* user buffer copy in size */
1206 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1208 /* adjust index so we return the right required/consumed amount */
1210 req
->oldidx
+= req
->oldlen
;
1214 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1215 0, /* Pointer argument (arg1) */
1216 0, /* Integer argument (arg2) */
1217 sysctl_doprocargs
, /* Handler function */
1218 NULL
, /* Data pointer */
1222 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1224 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1225 int *name
= arg1
; /* oid element argument vector */
1226 int namelen
= arg2
; /* number of oid element arguments */
1227 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1228 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1229 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1230 // size_t newlen = req->newlen; /* user buffer copy in size */
1233 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1235 /* adjust index so we return the right required/consumed amount */
1237 req
->oldidx
+= req
->oldlen
;
1241 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1242 0, /* Pointer argument (arg1) */
1243 0, /* Integer argument (arg2) */
1244 sysctl_doprocargs2
, /* Handler function */
1245 NULL
, /* Data pointer */
1249 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1250 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1253 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1255 struct _vm_map
*proc_map
;
1258 user_addr_t arg_addr
;
1263 vm_offset_t copy_start
, copy_end
;
1266 kauth_cred_t my_cred
;
1273 buflen
-= sizeof(int); /* reserve first word to return argc */
1275 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1276 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1277 /* is not NULL then the caller wants us to return the length needed to */
1278 /* hold the data we would return */
1279 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1285 * Lookup process by pid
1294 * Copy the top N bytes of the stack.
1295 * On all machines we have so far, the stack grows
1298 * If the user expects no more than N bytes of
1299 * argument list, use that as a guess for the
1303 if (!p
->user_stack
) {
1308 if (where
== USER_ADDR_NULL
) {
1309 /* caller only wants to know length of proc args data */
1310 if (sizep
== NULL
) {
1315 size
= p
->p_argslen
;
1318 size
+= sizeof(int);
1322 * old PROCARGS will return the executable's path and plus some
1323 * extra space for work alignment and data tags
1325 size
+= PATH_MAX
+ (6 * sizeof(int));
1327 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1332 my_cred
= kauth_cred_proc_ref(p
);
1333 uid
= kauth_cred_getuid(my_cred
);
1334 kauth_cred_unref(&my_cred
);
1336 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1337 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1342 if ((u_int
)arg_size
> p
->p_argslen
)
1343 arg_size
= round_page(p
->p_argslen
);
1345 arg_addr
= p
->user_stack
- arg_size
;
1349 * Before we can block (any VM code), make another
1350 * reference to the map to keep it alive. We do
1351 * that by getting a reference on the task itself.
1359 argslen
= p
->p_argslen
;
1361 * Once we have a task reference we can convert that into a
1362 * map reference, which we will use in the calls below. The
1363 * task/process may change its map after we take this reference
1364 * (see execve), but the worst that will happen then is a return
1365 * of stale info (which is always a possibility).
1367 task_reference(task
);
1369 proc_map
= get_task_map_reference(task
);
1370 task_deallocate(task
);
1372 if (proc_map
== NULL
)
1376 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
), VM_KERN_MEMORY_BSD
);
1377 if (ret
!= KERN_SUCCESS
) {
1378 vm_map_deallocate(proc_map
);
1382 copy_end
= round_page(copy_start
+ arg_size
);
1384 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1385 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1386 vm_map_deallocate(proc_map
);
1387 kmem_free(kernel_map
, copy_start
,
1388 round_page(arg_size
));
1393 * Now that we've done the copyin from the process'
1394 * map, we can release the reference to it.
1396 vm_map_deallocate(proc_map
);
1398 if( vm_map_copy_overwrite(kernel_map
,
1399 (vm_map_address_t
)copy_start
,
1400 tmp
, FALSE
) != KERN_SUCCESS
) {
1401 kmem_free(kernel_map
, copy_start
,
1402 round_page(arg_size
));
1406 if (arg_size
> argslen
) {
1407 data
= (caddr_t
) (copy_end
- argslen
);
1410 data
= (caddr_t
) (copy_end
- arg_size
);
1415 * When these sysctls were introduced, the first string in the strings
1416 * section was just the bare path of the executable. However, for security
1417 * reasons we now prefix this string with executable_path= so it can be
1418 * parsed getenv style. To avoid binary compatability issues with exising
1419 * callers of this sysctl, we strip it off here if present.
1420 * (rdar://problem/13746466)
1422 #define EXECUTABLE_KEY "executable_path="
1423 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0){
1424 data
+= strlen(EXECUTABLE_KEY
);
1425 size
-= strlen(EXECUTABLE_KEY
);
1429 /* Put processes argc as the first word in the copyout buffer */
1430 suword(where
, p
->p_argc
);
1431 error
= copyout(data
, (where
+ sizeof(int)), size
);
1432 size
+= sizeof(int);
1434 error
= copyout(data
, where
, size
);
1437 * Make the old PROCARGS work to return the executable's path
1438 * But, only if there is enough space in the provided buffer
1440 * on entry: data [possibily] points to the beginning of the path
1442 * Note: we keep all pointers&sizes aligned to word boundries
1444 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1446 int binPath_sz
, alignedBinPath_sz
= 0;
1447 int extraSpaceNeeded
, addThis
;
1448 user_addr_t placeHere
;
1449 char * str
= (char *) data
;
1452 /* Some apps are really bad about messing up their stacks
1453 So, we have to be extra careful about getting the length
1454 of the executing binary. If we encounter an error, we bail.
1457 /* Limit ourselves to PATH_MAX paths */
1458 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1462 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1465 /* If we have a NUL terminator, copy it, too */
1466 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1468 /* Pre-Flight the space requiremnts */
1470 /* Account for the padding that fills out binPath to the next word */
1471 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1473 placeHere
= where
+ size
;
1475 /* Account for the bytes needed to keep placeHere word aligned */
1476 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1478 /* Add up all the space that is needed */
1479 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1481 /* is there is room to tack on argv[0]? */
1482 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1484 placeHere
+= addThis
;
1485 suword(placeHere
, 0);
1486 placeHere
+= sizeof(int);
1487 suword(placeHere
, 0xBFFF0000);
1488 placeHere
+= sizeof(int);
1489 suword(placeHere
, 0);
1490 placeHere
+= sizeof(int);
1491 error
= copyout(data
, placeHere
, binPath_sz
);
1494 placeHere
+= binPath_sz
;
1495 suword(placeHere
, 0);
1496 size
+= extraSpaceNeeded
;
1502 if (copy_start
!= (vm_offset_t
) 0) {
1503 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1509 if (where
!= USER_ADDR_NULL
)
1516 * Max number of concurrent aio requests
1520 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1522 int new_value
, changed
;
1523 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1525 /* make sure the system-wide limit is greater than the per process limit */
1526 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1527 aio_max_requests
= new_value
;
1536 * Max number of concurrent aio requests per process
1540 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1542 int new_value
, changed
;
1543 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1545 /* make sure per process limit is less than the system-wide limit */
1546 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1547 aio_max_requests_per_process
= new_value
;
1556 * Max number of async IO worker threads
1560 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1562 int new_value
, changed
;
1563 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1565 /* we only allow an increase in the number of worker threads */
1566 if (new_value
> aio_worker_threads
) {
1567 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1568 aio_worker_threads
= new_value
;
1578 * System-wide limit on the max number of processes
1582 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1584 int new_value
, changed
;
1585 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1587 AUDIT_ARG(value32
, new_value
);
1588 /* make sure the system-wide limit is less than the configured hard
1589 limit set at kernel compilation */
1590 if (new_value
<= hard_maxproc
&& new_value
> 0)
1591 maxproc
= new_value
;
1598 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1599 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1601 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1602 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1604 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1605 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1606 (int *)NULL
, BSD
, "");
1607 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1608 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1610 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1611 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1612 &kernel_uuid_string
[0], 0, "");
1615 int debug_kprint_syscall
= 0;
1616 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1618 /* Thread safe: bits and string value are not used to reclaim state */
1619 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1620 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1621 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1622 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1623 "name of process for kprintf syscall tracing");
1625 int debug_kprint_current_process(const char **namep
)
1627 struct proc
*p
= current_proc();
1633 if (debug_kprint_syscall_process
[0]) {
1634 /* user asked to scope tracing to a particular process name */
1635 if(0 == strncmp(debug_kprint_syscall_process
,
1636 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1637 /* no value in telling the user that we traced what they asked */
1638 if(namep
) *namep
= NULL
;
1646 /* trace all processes. Tell user what we traced */
1655 /* PR-5293665: need to use a callback function for kern.osversion to set
1656 * osversion in IORegistry */
1659 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1663 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1666 IORegistrySetOSBuildVersion((char *)arg1
);
1672 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1673 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1674 osversion
, 256 /* OSVERSIZE*/,
1675 sysctl_osversion
, "A", "");
1678 sysctl_sysctl_bootargs
1679 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1684 strlcpy(buf
, PE_boot_args(), 256);
1685 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
1689 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1690 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1692 sysctl_sysctl_bootargs
, "A", "bootargs");
1694 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1695 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1697 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1698 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1699 (int *)NULL
, ARG_MAX
, "");
1700 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1701 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1702 (int *)NULL
, _POSIX_VERSION
, "");
1703 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1704 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1705 (int *)NULL
, NGROUPS_MAX
, "");
1706 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1707 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1708 (int *)NULL
, 1, "");
1709 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1710 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1711 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1712 (int *)NULL
, 1, "");
1714 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1715 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1718 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1719 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1721 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1722 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1724 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1725 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1727 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1728 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1729 &thread_max
, 0, "");
1730 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1731 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1732 &task_threadmax
, 0, "");
1735 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1737 int oldval
= desiredvnodes
;
1738 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1740 if (oldval
!= desiredvnodes
) {
1741 reset_vmobjectcache(oldval
, desiredvnodes
);
1742 resize_namecache(desiredvnodes
);
1748 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1749 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1750 &nc_disabled
, 0, "");
1752 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1753 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1754 0, 0, sysctl_maxvnodes
, "I", "");
1756 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1757 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1758 0, 0, sysctl_maxproc
, "I", "");
1760 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1761 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1762 0, 0, sysctl_aiomax
, "I", "");
1764 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1765 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1766 0, 0, sysctl_aioprocmax
, "I", "");
1768 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1769 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1770 0, 0, sysctl_aiothreads
, "I", "");
1772 #if (DEVELOPMENT || DEBUG)
1773 extern int sched_smt_balance
;
1774 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1775 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1776 &sched_smt_balance
, 0, "");
1781 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1783 int new_value
, changed
;
1784 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1786 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1788 securelevel
= new_value
;
1797 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1798 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1799 0, 0, sysctl_securelvl
, "I", "");
1804 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1807 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1809 domainnamelen
= strlen(domainname
);
1814 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1815 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1816 0, 0, sysctl_domainname
, "A", "");
1818 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1819 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1824 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1827 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1829 hostnamelen
= req
->newlen
;
1835 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1836 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1837 0, 0, sysctl_hostname
, "A", "");
1841 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1843 /* Original code allowed writing, I'm copying this, although this all makes
1844 no sense to me. Besides, this sysctl is never used. */
1845 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
1848 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
1849 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1850 0, 0, sysctl_procname
, "A", "");
1852 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
1853 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1854 &speculative_reads_disabled
, 0, "");
1856 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
1857 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1858 &ignore_is_ssd
, 0, "");
1860 SYSCTL_INT(_kern
, OID_AUTO
, root_is_CF_drive
,
1861 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1862 &root_is_CF_drive
, 0, "");
1864 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
1865 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1866 &preheat_max_bytes
, 0, "");
1868 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
1869 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1870 &preheat_min_bytes
, 0, "");
1872 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
1873 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1874 &speculative_prefetch_max
, 0, "");
1876 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
1877 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1878 &speculative_prefetch_max_iosize
, 0, "");
1880 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
1881 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1882 &vm_page_free_target
, 0, "");
1884 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
1885 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1886 &vm_page_free_min
, 0, "");
1888 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
1889 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1890 &vm_page_free_reserved
, 0, "");
1892 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
1893 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1894 &vm_page_speculative_percentage
, 0, "");
1896 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
1897 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1898 &vm_page_speculative_q_age_ms
, 0, "");
1900 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
1901 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1902 &vm_max_delayed_work_limit
, 0, "");
1904 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
1905 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1906 &vm_max_batch
, 0, "");
1908 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
1909 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1910 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
1914 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1916 time_t tv_sec
= boottime_sec();
1917 struct proc
*p
= req
->p
;
1919 if (proc_is64bit(p
)) {
1920 struct user64_timeval t
;
1923 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1925 struct user32_timeval t
;
1928 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1932 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
1933 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1934 0, 0, sysctl_boottime
, "S,timeval", "");
1938 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1941 int error
= get_kernel_symfile(req
->p
, &str
);
1944 return sysctl_io_string(req
, str
, 0, 0, NULL
);
1948 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
1949 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1950 0, 0, sysctl_symfile
, "A", "");
1955 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1957 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
1960 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
1961 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1962 0, 0, sysctl_netboot
, "I", "");
1965 #ifdef CONFIG_IMGSRC_ACCESS
1967 * Legacy--act as if only one layer of nesting is possible.
1971 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1973 vfs_context_t ctx
= vfs_context_current();
1977 if (!vfs_context_issuser(ctx
)) {
1981 if (imgsrc_rootvnodes
[0] == NULL
) {
1985 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
1990 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
1991 result
= vnode_getwithref(devvp
);
1996 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2000 vnode_put(imgsrc_rootvnodes
[0]);
2004 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2005 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2006 0, 0, sysctl_imgsrcdev
, "I", "");
2010 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2013 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2017 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2021 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2023 * Go get the root vnode.
2025 rvp
= imgsrc_rootvnodes
[i
];
2026 if (rvp
== NULLVP
) {
2030 error
= vnode_get(rvp
);
2036 * For now, no getting at a non-local volume.
2038 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2039 if (devvp
== NULL
) {
2044 error
= vnode_getwithref(devvp
);
2053 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2054 info
[i
].ii_flags
= 0;
2055 info
[i
].ii_height
= i
;
2056 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2062 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2065 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2066 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2067 0, 0, sysctl_imgsrcinfo
, "I", "");
2069 #endif /* CONFIG_IMGSRC_ACCESS */
2072 SYSCTL_DECL(_kern_timer
);
2073 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2076 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2077 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2078 &mach_timer_coalescing_enabled
, 0, "");
2080 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2081 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2082 &timer_deadline_tracking_bin_1
, "");
2083 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2084 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2085 &timer_deadline_tracking_bin_2
, "");
2087 SYSCTL_DECL(_kern_timer_longterm
);
2088 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2091 /* Must match definition in osfmk/kern/timer_call.c */
2094 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2095 LATENCY
, LATENCY_MIN
, LATENCY_MAX
2097 extern uint64_t timer_sysctl_get(int);
2098 extern int timer_sysctl_set(int, uint64_t);
2102 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2104 int oid
= (int)arg1
;
2105 uint64_t value
= timer_sysctl_get(oid
);
2110 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2112 error
= timer_sysctl_set(oid
, new_value
);
2117 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2118 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2119 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2120 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2121 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2122 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2124 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2125 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2126 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2127 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2128 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2129 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2130 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2131 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2132 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2133 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2134 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2135 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2136 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2137 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2138 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2139 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2140 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2141 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2142 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2143 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2144 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2145 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2146 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2147 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2152 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2154 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2157 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2158 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2159 0, 0, sysctl_usrstack
, "I", "");
2163 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2165 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2168 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2169 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2170 0, 0, sysctl_usrstack64
, "Q", "");
2172 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2173 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2174 corefilename
, sizeof(corefilename
), "");
2178 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2180 #ifdef SECURE_KERNEL
2184 int new_value
, changed
;
2185 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2187 if ((new_value
== 0) || (new_value
== 1))
2188 do_coredump
= new_value
;
2196 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2197 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2198 0, 0, sysctl_coredump
, "I", "");
2201 sysctl_suid_coredump
2202 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2204 #ifdef SECURE_KERNEL
2208 int new_value
, changed
;
2209 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2211 if ((new_value
== 0) || (new_value
== 1))
2212 sugid_coredump
= new_value
;
2220 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2221 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2222 0, 0, sysctl_suid_coredump
, "I", "");
2226 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2228 struct proc
*p
= req
->p
;
2229 int new_value
, changed
;
2230 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2234 req
->p
->p_lflag
|= P_LDELAYTERM
;
2236 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2242 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2243 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2244 0, 0, sysctl_delayterm
, "I", "");
2249 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2251 struct proc
*p
= req
->p
;
2253 int new_value
, old_value
, changed
;
2256 ut
= get_bsdthread_info(current_thread());
2258 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2259 old_value
= KERN_RAGE_THREAD
;
2260 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2261 old_value
= KERN_RAGE_PROC
;
2265 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2268 switch (new_value
) {
2269 case KERN_RAGE_PROC
:
2271 p
->p_lflag
|= P_LRAGE_VNODES
;
2274 case KERN_UNRAGE_PROC
:
2276 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2280 case KERN_RAGE_THREAD
:
2281 ut
->uu_flag
|= UT_RAGE_VNODES
;
2283 case KERN_UNRAGE_THREAD
:
2284 ut
= get_bsdthread_info(current_thread());
2285 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2292 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2293 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2294 0, 0, sysctl_rage_vnode
, "I", "");
2296 /* XXX move this interface into libproc and remove this sysctl */
2298 sysctl_setthread_cpupercent
2299 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2301 int new_value
, old_value
;
2303 kern_return_t kret
= KERN_SUCCESS
;
2304 uint8_t percent
= 0;
2312 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2315 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2316 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2321 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2323 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2329 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2330 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2331 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2335 sysctl_kern_check_openevt
2336 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2338 struct proc
*p
= req
->p
;
2339 int new_value
, old_value
, changed
;
2342 if (p
->p_flag
& P_CHECKOPENEVT
) {
2343 old_value
= KERN_OPENEVT_PROC
;
2348 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2351 switch (new_value
) {
2352 case KERN_OPENEVT_PROC
:
2353 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2356 case KERN_UNOPENEVT_PROC
:
2357 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2367 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2368 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2374 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2376 #ifdef SECURE_KERNEL
2380 int new_value
, changed
;
2383 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2388 #if defined(__i386__) || defined(__x86_64__)
2390 * Only allow setting if NX is supported on the chip
2392 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2395 nx_enabled
= new_value
;
2398 #endif /* SECURE_KERNEL */
2403 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2404 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2405 0, 0, sysctl_nx
, "I", "");
2409 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2411 if (proc_is64bit(req
->p
)) {
2412 struct user64_loadavg loadinfo64
;
2413 fill_loadavg64(&averunnable
, &loadinfo64
);
2414 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2416 struct user32_loadavg loadinfo32
;
2417 fill_loadavg32(&averunnable
, &loadinfo32
);
2418 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2422 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2423 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2424 0, 0, sysctl_loadavg
, "S,loadavg", "");
2427 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2430 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2431 __unused
int arg2
, struct sysctl_req
*req
)
2433 int old_value
=0, new_value
=0, error
=0;
2435 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2437 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2439 return (vm_toggle_entry_reuse(new_value
, NULL
));
2444 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2448 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2451 uint64_t swap_total
;
2452 uint64_t swap_avail
;
2453 vm_size_t swap_pagesize
;
2454 boolean_t swap_encrypted
;
2455 struct xsw_usage xsu
;
2457 error
= macx_swapinfo(&swap_total
,
2464 xsu
.xsu_total
= swap_total
;
2465 xsu
.xsu_avail
= swap_avail
;
2466 xsu
.xsu_used
= swap_total
- swap_avail
;
2467 xsu
.xsu_pagesize
= swap_pagesize
;
2468 xsu
.xsu_encrypted
= swap_encrypted
;
2469 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2474 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2475 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2476 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2479 extern void vm_page_reactivate_all_throttled(void);
2482 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2484 #pragma unused(arg1, arg2)
2485 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2488 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2489 if (error
|| !req
->newptr
)
2492 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2493 //assert(req->newptr);
2494 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2499 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2501 disabled
= (!val
&& memorystatus_freeze_enabled
);
2503 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2506 vm_page_reactivate_all_throttled();
2512 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2513 #endif /* CONFIG_FREEZE */
2515 /* this kernel does NOT implement shared_region_make_private_np() */
2516 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2517 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2518 (int *)NULL
, 0, "");
2521 fetch_process_cputype(
2525 cpu_type_t
*cputype
)
2527 proc_t p
= PROC_NULL
;
2534 else if (namelen
== 1) {
2535 p
= proc_find(name
[0]);
2544 ret
= cpu_type() & ~CPU_ARCH_MASK
;
2545 if (IS_64BIT_PROCESS(p
))
2546 ret
|= CPU_ARCH_ABI64
;
2557 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2558 struct sysctl_req
*req
)
2561 cpu_type_t proc_cputype
= 0;
2562 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2565 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2567 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2569 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2572 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2573 struct sysctl_req
*req
)
2576 cpu_type_t proc_cputype
= 0;
2577 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2579 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2581 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2585 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2587 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2590 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2591 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2592 0, 0, sysctl_safeboot
, "I", "");
2596 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2598 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2601 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2602 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2603 0, 0, sysctl_singleuser
, "I", "");
2605 STATIC
int sysctl_minimalboot
2606 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2608 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
2611 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
2612 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2613 0, 0, sysctl_minimalboot
, "I", "");
2616 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2618 extern boolean_t affinity_sets_enabled
;
2619 extern int affinity_sets_mapping
;
2621 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2622 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2623 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2624 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
2627 * Boolean indicating if KASLR is active.
2631 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2635 slide
= vm_kernel_slide
? 1 : 0;
2637 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
2640 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
2641 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2642 0, 0, sysctl_slide
, "I", "");
2645 * Limit on total memory users can wire.
2647 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2649 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2651 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2654 * All values are in bytes.
2657 vm_map_size_t vm_global_no_user_wire_amount
;
2658 vm_map_size_t vm_global_user_wire_limit
;
2659 vm_map_size_t vm_user_wire_limit
;
2662 * There needs to be a more automatic/elegant way to do this
2664 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
2665 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
2666 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
2668 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
2669 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
2670 extern int vm_map_copy_overwrite_aligned_src_large
;
2671 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
2672 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
2673 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
2676 extern uint32_t vm_page_external_count
;
2677 extern uint32_t vm_page_filecache_min
;
2679 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
2680 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
2682 extern int vm_compressor_mode
;
2683 extern int vm_compressor_is_active
;
2684 extern int vm_compressor_available
;
2685 extern uint32_t vm_ripe_target_age
;
2686 extern uint32_t swapout_target_age
;
2687 extern int64_t compressor_bytes_used
;
2688 extern int64_t c_segment_input_bytes
;
2689 extern int64_t c_segment_compressed_bytes
;
2690 extern uint32_t compressor_eval_period_in_msecs
;
2691 extern uint32_t compressor_sample_min_in_msecs
;
2692 extern uint32_t compressor_sample_max_in_msecs
;
2693 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
2694 extern uint32_t compressor_thrashing_min_per_10msecs
;
2695 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
2696 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
2697 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
2698 extern uint32_t vm_compressor_catchup_threshold_divisor
;
2700 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
2701 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
2702 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
2704 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
2705 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
2706 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
2707 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
2709 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
2711 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
2712 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
2713 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
2714 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
2715 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
2716 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
2717 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
2718 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
2719 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
2721 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
2723 #if CONFIG_PHANTOM_CACHE
2724 extern uint32_t phantom_cache_thrashing_threshold
;
2725 extern uint32_t phantom_cache_eval_period_in_msecs
;
2726 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
2729 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
2730 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
2731 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
2734 #if (DEVELOPMENT || DEBUG)
2736 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
2737 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2738 &vm_page_creation_throttled_hard
, 0, "");
2740 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
2741 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2742 &vm_page_creation_throttled_soft
, 0, "");
2744 #endif /* DEVELOPMENT || DEBUG */
2747 * Enable tracing of voucher contents
2749 extern uint32_t ipc_voucher_trace_contents
;
2751 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
2752 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
2755 * Kernel stack size and depth
2757 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
2758 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
2759 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
2760 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
2763 * enable back trace for port allocations
2765 extern int ipc_portbt
;
2767 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
2768 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2769 &ipc_portbt
, 0, "");
2775 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
2776 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2777 sched_string
, sizeof(sched_string
),
2778 "Timeshare scheduler implementation");
2781 * Only support runtime modification on embedded platforms
2782 * with development config enabled
2786 /* Parameters related to timer coalescing tuning, to be replaced
2787 * with a dedicated systemcall in the future.
2789 /* Enable processing pending timers in the context of any other interrupt
2790 * Coalescing tuning parameters for various thread/task attributes */
2792 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2794 #pragma unused(oidp)
2795 int size
= arg2
; /* subcommand*/
2798 uint64_t old_value_ns
;
2799 uint64_t new_value_ns
;
2800 uint64_t value_abstime
;
2801 if (size
== sizeof(uint32_t))
2802 value_abstime
= *((uint32_t *)arg1
);
2803 else if (size
== sizeof(uint64_t))
2804 value_abstime
= *((uint64_t *)arg1
);
2805 else return ENOTSUP
;
2807 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
2808 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
2809 if ((error
) || (!changed
))
2812 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
2813 if (size
== sizeof(uint32_t))
2814 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
2816 *((uint64_t *)arg1
) = value_abstime
;
2820 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
2821 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2822 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
2823 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
2824 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2825 &tcoal_prio_params
.timer_resort_threshold_abstime
,
2826 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
2827 sysctl_timer_user_us_kernel_abstime
,
2829 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
2830 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2831 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
2832 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
2833 sysctl_timer_user_us_kernel_abstime
,
2836 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
2837 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2838 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
2840 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
2841 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2842 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
2843 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
2844 sysctl_timer_user_us_kernel_abstime
,
2847 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
2848 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2849 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
2851 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
2852 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2853 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
2854 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
2855 sysctl_timer_user_us_kernel_abstime
,
2858 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
2859 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2860 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
2862 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
2863 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2864 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
2865 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
2866 sysctl_timer_user_us_kernel_abstime
,
2869 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
2870 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2871 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
2873 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
2874 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2875 &tcoal_prio_params
.latency_qos_abstime_max
[0],
2876 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
2877 sysctl_timer_user_us_kernel_abstime
,
2880 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
2881 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2882 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
2884 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
2885 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2886 &tcoal_prio_params
.latency_qos_abstime_max
[1],
2887 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
2888 sysctl_timer_user_us_kernel_abstime
,
2891 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
2892 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2893 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
2895 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
2896 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2897 &tcoal_prio_params
.latency_qos_abstime_max
[2],
2898 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
2899 sysctl_timer_user_us_kernel_abstime
,
2902 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
2903 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2904 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
2906 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
2907 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2908 &tcoal_prio_params
.latency_qos_abstime_max
[3],
2909 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
2910 sysctl_timer_user_us_kernel_abstime
,
2913 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
2914 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2915 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
2917 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
2918 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2919 &tcoal_prio_params
.latency_qos_abstime_max
[4],
2920 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
2921 sysctl_timer_user_us_kernel_abstime
,
2924 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
2925 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2926 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
2928 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
2929 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2930 &tcoal_prio_params
.latency_qos_abstime_max
[5],
2931 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
2932 sysctl_timer_user_us_kernel_abstime
,
2935 /* Communicate the "user idle level" heuristic to the timer layer, and
2936 * potentially other layers in the future.
2940 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
2941 int new_value
= 0, old_value
= 0, changed
= 0, error
;
2943 old_value
= timer_get_user_idle_level();
2945 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2947 if (error
== 0 && changed
) {
2948 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
2955 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
2956 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2958 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
2961 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
2962 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2963 &hv_support_available
, 0, "");