2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
108 #include <security/audit/audit.h>
109 #include <kern/kalloc.h>
111 #include <mach/machine.h>
112 #include <mach/mach_host.h>
113 #include <mach/mach_types.h>
114 #include <mach/vm_param.h>
115 #include <kern/mach_param.h>
116 #include <kern/task.h>
117 #include <kern/thread.h>
118 #include <kern/processor.h>
119 #include <kern/debug.h>
120 #include <kern/sched_prim.h>
121 #include <vm/vm_kern.h>
122 #include <vm/vm_map.h>
123 #include <mach/host_info.h>
125 #include <sys/mount_internal.h>
126 #include <sys/kdebug.h>
128 #include <IOKit/IOPlatformExpert.h>
129 #include <pexpert/pexpert.h>
131 #include <machine/machine_routines.h>
132 #include <machine/exec.h>
134 #include <vm/vm_protos.h>
135 #include <vm/vm_pageout.h>
136 #include <sys/imgsrc.h>
137 #include <kern/timer_call.h>
139 #if defined(__i386__) || defined(__x86_64__)
140 #include <i386/cpuid.h>
144 #include <sys/kern_memorystatus.h>
148 #include <kperf/kperf.h>
152 #include <kern/hv_support.h>
156 * deliberately setting max requests to really high number
157 * so that runaway settings do not cause MALLOC overflows
159 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
161 extern int aio_max_requests
;
162 extern int aio_max_requests_per_process
;
163 extern int aio_worker_threads
;
164 extern int lowpri_IO_window_msecs
;
165 extern int lowpri_IO_delay_msecs
;
166 extern int nx_enabled
;
167 extern int speculative_reads_disabled
;
168 extern int ignore_is_ssd
;
169 extern unsigned int speculative_prefetch_max
;
170 extern unsigned int speculative_prefetch_max_iosize
;
171 extern unsigned int preheat_max_bytes
;
172 extern unsigned int preheat_min_bytes
;
173 extern long numvnodes
;
175 extern uuid_string_t bootsessionuuid_string
;
177 extern unsigned int vm_max_delayed_work_limit
;
178 extern unsigned int vm_max_batch
;
180 extern unsigned int vm_page_free_min
;
181 extern unsigned int vm_page_free_target
;
182 extern unsigned int vm_page_free_reserved
;
183 extern unsigned int vm_page_speculative_percentage
;
184 extern unsigned int vm_page_speculative_q_age_ms
;
186 #if (DEVELOPMENT || DEBUG)
187 extern uint32_t vm_page_creation_throttled_hard
;
188 extern uint32_t vm_page_creation_throttled_soft
;
189 #endif /* DEVELOPMENT || DEBUG */
192 * Conditionally allow dtrace to see these functions for debugging purposes.
200 #define STATIC static
203 extern boolean_t mach_timer_coalescing_enabled
;
205 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
208 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
210 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
212 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
214 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
216 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
218 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
221 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
227 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
229 __private_extern__ kern_return_t
230 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
232 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
233 size_t *sizep
, proc_t cur_proc
);
235 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
236 proc_t cur_proc
, int argc_yes
);
238 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
239 size_t newlen
, void *sp
, int len
);
241 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
242 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
243 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
244 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
245 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
246 int sysdoproc_callback(proc_t p
, void *arg
);
249 /* forward declarations for non-static STATIC */
250 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
251 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
252 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
253 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
254 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
255 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
257 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
258 #endif /* COUNT_SYSCALLS */
259 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
260 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
261 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
262 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
267 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 #ifdef CONFIG_IMGSRC_ACCESS
279 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
285 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
293 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_minimalboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
301 extern void IORegistrySetOSBuildVersion(char * build_version
);
304 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
306 la64
->ldavg
[0] = la
->ldavg
[0];
307 la64
->ldavg
[1] = la
->ldavg
[1];
308 la64
->ldavg
[2] = la
->ldavg
[2];
309 la64
->fscale
= (user64_long_t
)la
->fscale
;
313 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
315 la32
->ldavg
[0] = la
->ldavg
[0];
316 la32
->ldavg
[1] = la
->ldavg
[1];
317 la32
->ldavg
[2] = la
->ldavg
[2];
318 la32
->fscale
= (user32_long_t
)la
->fscale
;
322 * Attributes stored in the kernel.
324 extern char corefilename
[MAXPATHLEN
+1];
325 extern int do_coredump
;
326 extern int sugid_coredump
;
329 extern int do_count_syscalls
;
333 int securelevel
= -1;
339 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
340 __unused
int arg2
, struct sysctl_req
*req
)
343 struct uthread
*ut
= get_bsdthread_info(current_thread());
344 user_addr_t oldp
=0, newp
=0;
345 size_t *oldlenp
=NULL
;
349 oldlenp
= &(req
->oldlen
);
351 newlen
= req
->newlen
;
353 /* We want the current length, and maybe the string itself */
355 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
356 size_t currlen
= MAXTHREADNAMESIZE
- 1;
359 /* use length of current thread name */
360 currlen
= strlen(ut
->pth_name
);
362 if(*oldlenp
< currlen
)
364 /* NOTE - we do not copy the NULL terminator */
366 error
= copyout(ut
->pth_name
,oldp
,currlen
);
371 /* return length of thread name minus NULL terminator (just like strlen) */
372 req
->oldidx
= currlen
;
375 /* We want to set the name to something */
378 if(newlen
> (MAXTHREADNAMESIZE
- 1))
382 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
386 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
387 error
= copyin(newp
, ut
->pth_name
, newlen
);
395 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
399 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
401 host_basic_info_data_t hinfo
;
405 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
406 struct _processor_statistics_np
*buf
;
409 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
410 if (kret
!= KERN_SUCCESS
) {
414 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
416 if (req
->oldlen
< size
) {
420 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
422 kret
= get_sched_statistics(buf
, &size
);
423 if (kret
!= KERN_SUCCESS
) {
428 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
434 panic("Sched info changed?!");
441 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
444 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
449 if (req
->newlen
!= sizeof(active
)) {
453 res
= copyin(req
->newptr
, &active
, sizeof(active
));
458 return set_sched_stats_active(active
);
461 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
463 extern uint32_t sched_debug_flags
;
464 SYSCTL_INT(_debug
, OID_AUTO
, sched
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sched_debug_flags
, 0, "scheduler debug");
466 #if (DEBUG || DEVELOPMENT)
467 extern boolean_t doprnt_hide_pointers
;
468 SYSCTL_INT(_debug
, OID_AUTO
, hide_kernel_pointers
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &doprnt_hide_pointers
, 0, "hide kernel pointers from log");
471 extern int get_kernel_symfile(proc_t
, char **);
474 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
477 extern int syscalls_log
[];
478 extern const char *syscallnames
[];
481 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
483 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
484 __unused
int *name
= arg1
; /* oid element argument vector */
485 __unused
int namelen
= arg2
; /* number of oid element arguments */
486 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
487 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
488 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
489 size_t newlen
= req
->newlen
; /* user buffer copy in size */
494 /* valid values passed in:
495 * = 0 means don't keep called counts for each bsd syscall
496 * > 0 means keep called counts for each bsd syscall
497 * = 2 means dump current counts to the system log
498 * = 3 means reset all counts
499 * for example, to dump current counts:
500 * sysctl -w kern.count_calls=2
502 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
508 do_count_syscalls
= 1;
510 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
512 for ( i
= 0; i
< nsysent
; i
++ ) {
513 if ( syscalls_log
[i
] != 0 ) {
515 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
523 do_count_syscalls
= 1;
527 /* adjust index so we return the right required/consumed amount */
529 req
->oldidx
+= req
->oldlen
;
533 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
534 0, /* Pointer argument (arg1) */
535 0, /* Integer argument (arg2) */
536 sysctl_docountsyscalls
, /* Handler function */
537 NULL
, /* Data pointer */
539 #endif /* COUNT_SYSCALLS */
542 * The following sysctl_* functions should not be used
543 * any more, as they can only cope with callers in
544 * user mode: Use new-style
552 * Validate parameters and get old / set new parameters
553 * for an integer-valued sysctl function.
556 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
557 user_addr_t newp
, size_t newlen
, int *valp
)
561 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
563 if (oldp
&& *oldlenp
< sizeof(int))
565 if (newp
&& newlen
!= sizeof(int))
567 *oldlenp
= sizeof(int);
569 error
= copyout(valp
, oldp
, sizeof(int));
570 if (error
== 0 && newp
) {
571 error
= copyin(newp
, valp
, sizeof(int));
572 AUDIT_ARG(value32
, *valp
);
578 * Validate parameters and get old / set new parameters
579 * for an quad(64bit)-valued sysctl function.
582 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
583 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
587 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
589 if (oldp
&& *oldlenp
< sizeof(quad_t
))
591 if (newp
&& newlen
!= sizeof(quad_t
))
593 *oldlenp
= sizeof(quad_t
);
595 error
= copyout(valp
, oldp
, sizeof(quad_t
));
596 if (error
== 0 && newp
)
597 error
= copyin(newp
, valp
, sizeof(quad_t
));
602 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
604 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
611 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
613 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
620 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
625 /* This is very racy but list lock is held.. Hmmm. */
626 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
627 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
628 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
629 tp
->t_dev
!= (dev_t
)*(int*)arg
)
638 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
640 kauth_cred_t my_cred
;
643 if (p
->p_ucred
== NULL
)
645 my_cred
= kauth_cred_proc_ref(p
);
646 uid
= kauth_cred_getuid(my_cred
);
647 kauth_cred_unref(&my_cred
);
649 if (uid
!= (uid_t
)*(int*)arg
)
657 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
659 kauth_cred_t my_cred
;
662 if (p
->p_ucred
== NULL
)
664 my_cred
= kauth_cred_proc_ref(p
);
665 ruid
= kauth_cred_getruid(my_cred
);
666 kauth_cred_unref(&my_cred
);
668 if (ruid
!= (uid_t
)*(int*)arg
)
675 * try over estimating by 5 procs
677 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
678 struct sysdoproc_args
{
693 sysdoproc_callback(proc_t p
, void *arg
)
695 struct sysdoproc_args
*args
= arg
;
697 if (args
->buflen
>= args
->sizeof_kproc
) {
698 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
699 return (PROC_RETURNED
);
700 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
701 return (PROC_RETURNED
);
702 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
703 return (PROC_RETURNED
);
705 bzero(args
->kprocp
, args
->sizeof_kproc
);
707 fill_user64_proc(p
, args
->kprocp
);
709 fill_user32_proc(p
, args
->kprocp
);
710 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
712 *args
->errorp
= error
;
713 return (PROC_RETURNED_DONE
);
715 args
->dp
+= args
->sizeof_kproc
;
716 args
->buflen
-= args
->sizeof_kproc
;
718 args
->needed
+= args
->sizeof_kproc
;
719 return (PROC_RETURNED
);
722 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
724 sysctl_prochandle SYSCTL_HANDLER_ARGS
726 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
727 int *name
= arg1
; /* oid element argument vector */
728 int namelen
= arg2
; /* number of oid element arguments */
729 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
731 user_addr_t dp
= where
;
733 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
735 boolean_t is_64_bit
= proc_is64bit(current_proc());
736 struct user32_kinfo_proc user32_kproc
;
737 struct user64_kinfo_proc user_kproc
;
740 int (*filterfn
)(proc_t
, void *) = 0;
741 struct sysdoproc_args args
;
746 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
750 sizeof_kproc
= sizeof(user_kproc
);
751 kprocp
= &user_kproc
;
753 sizeof_kproc
= sizeof(user32_kproc
);
754 kprocp
= &user32_kproc
;
760 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
764 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
783 /* must be kern.proc.<unknown> */
788 args
.buflen
= buflen
;
789 args
.kprocp
= kprocp
;
790 args
.is_64_bit
= is_64_bit
;
792 args
.needed
= needed
;
793 args
.errorp
= &error
;
794 args
.uidcheck
= uidcheck
;
795 args
.ruidcheck
= ruidcheck
;
796 args
.ttycheck
= ttycheck
;
797 args
.sizeof_kproc
= sizeof_kproc
;
799 args
.uidval
= name
[0];
801 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
802 sysdoproc_callback
, &args
, filterfn
, name
);
808 needed
= args
.needed
;
810 if (where
!= USER_ADDR_NULL
) {
811 req
->oldlen
= dp
- where
;
812 if (needed
> req
->oldlen
)
815 needed
+= KERN_PROCSLOP
;
816 req
->oldlen
= needed
;
818 /* adjust index so we return the right required/consumed amount */
819 req
->oldidx
+= req
->oldlen
;
824 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
825 * in the sysctl declaration itself, which comes into the handler function
826 * as 'oidp->oid_arg2'.
828 * For these particular sysctls, since they have well known OIDs, we could
829 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
830 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
831 * of a well known value with a common handler function. This is desirable,
832 * because we want well known values to "go away" at some future date.
834 * It should be noted that the value of '((int *)arg1)[1]' is used for many
835 * an integer parameter to the subcommand for many of these sysctls; we'd
836 * rather have used '((int *)arg1)[0]' for that, or even better, an element
837 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
838 * and then use leaf-node permissions enforcement, but that would have
839 * necessitated modifying user space code to correspond to the interface
840 * change, and we are striving for binary backward compatibility here; even
841 * though these are SPI, and not intended for use by user space applications
842 * which are not themselves system tools or libraries, some applications
843 * have erroneously used them.
845 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
846 0, /* Pointer argument (arg1) */
847 KERN_PROC_ALL
, /* Integer argument (arg2) */
848 sysctl_prochandle
, /* Handler function */
849 NULL
, /* Data is size variant on ILP32/LP64 */
851 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
852 0, /* Pointer argument (arg1) */
853 KERN_PROC_PID
, /* Integer argument (arg2) */
854 sysctl_prochandle
, /* Handler function */
855 NULL
, /* Data is size variant on ILP32/LP64 */
857 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
858 0, /* Pointer argument (arg1) */
859 KERN_PROC_TTY
, /* Integer argument (arg2) */
860 sysctl_prochandle
, /* Handler function */
861 NULL
, /* Data is size variant on ILP32/LP64 */
863 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
864 0, /* Pointer argument (arg1) */
865 KERN_PROC_PGRP
, /* Integer argument (arg2) */
866 sysctl_prochandle
, /* Handler function */
867 NULL
, /* Data is size variant on ILP32/LP64 */
869 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_UID
, /* Integer argument (arg2) */
872 sysctl_prochandle
, /* Handler function */
873 NULL
, /* Data is size variant on ILP32/LP64 */
875 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_RUID
, /* Integer argument (arg2) */
878 sysctl_prochandle
, /* Handler function */
879 NULL
, /* Data is size variant on ILP32/LP64 */
881 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_LCID
, /* Integer argument (arg2) */
884 sysctl_prochandle
, /* Handler function */
885 NULL
, /* Data is size variant on ILP32/LP64 */
890 * Fill in non-zero fields of an eproc structure for the specified process.
893 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
897 struct session
*sessp
;
898 kauth_cred_t my_cred
;
901 sessp
= proc_session(p
);
903 if (pg
!= PGRP_NULL
) {
904 ep
->e_pgid
= p
->p_pgrpid
;
905 ep
->e_jobc
= pg
->pg_jobc
;
906 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
907 ep
->e_flag
= EPROC_CTTY
;
909 ep
->e_ppid
= p
->p_ppid
;
911 my_cred
= kauth_cred_proc_ref(p
);
913 /* A fake historical pcred */
914 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
915 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
916 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
917 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
919 /* A fake historical *kauth_cred_t */
920 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
921 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
922 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
923 bcopy(posix_cred_get(my_cred
)->cr_groups
,
924 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
926 kauth_cred_unref(&my_cred
);
929 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
930 (tp
= SESSION_TP(sessp
))) {
931 ep
->e_tdev
= tp
->t_dev
;
932 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
936 if (sessp
!= SESSION_NULL
) {
937 if (SESS_LEADER(p
, sessp
))
938 ep
->e_flag
|= EPROC_SLEADER
;
946 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
949 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
953 struct session
*sessp
;
954 kauth_cred_t my_cred
;
957 sessp
= proc_session(p
);
959 if (pg
!= PGRP_NULL
) {
960 ep
->e_pgid
= p
->p_pgrpid
;
961 ep
->e_jobc
= pg
->pg_jobc
;
962 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
963 ep
->e_flag
= EPROC_CTTY
;
965 ep
->e_ppid
= p
->p_ppid
;
967 my_cred
= kauth_cred_proc_ref(p
);
969 /* A fake historical pcred */
970 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
971 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
972 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
973 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
975 /* A fake historical *kauth_cred_t */
976 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
977 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
978 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
979 bcopy(posix_cred_get(my_cred
)->cr_groups
,
980 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
982 kauth_cred_unref(&my_cred
);
985 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
986 (tp
= SESSION_TP(sessp
))) {
987 ep
->e_tdev
= tp
->t_dev
;
988 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
992 if (sessp
!= SESSION_NULL
) {
993 if (SESS_LEADER(p
, sessp
))
994 ep
->e_flag
|= EPROC_SLEADER
;
1002 * Fill in an eproc structure for the specified process.
1003 * bzeroed by our caller, so only set non-zero fields.
1006 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1008 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1009 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1010 exp
->p_flag
= p
->p_flag
;
1011 if (p
->p_lflag
& P_LTRACED
)
1012 exp
->p_flag
|= P_TRACED
;
1013 if (p
->p_lflag
& P_LPPWAIT
)
1014 exp
->p_flag
|= P_PPWAIT
;
1015 if (p
->p_lflag
& P_LEXIT
)
1016 exp
->p_flag
|= P_WEXIT
;
1017 exp
->p_stat
= p
->p_stat
;
1018 exp
->p_pid
= p
->p_pid
;
1019 exp
->p_oppid
= p
->p_oppid
;
1021 exp
->user_stack
= p
->user_stack
;
1022 exp
->p_debugger
= p
->p_debugger
;
1023 exp
->sigwait
= p
->sigwait
;
1025 #ifdef _PROC_HAS_SCHEDINFO_
1026 exp
->p_estcpu
= p
->p_estcpu
;
1027 exp
->p_pctcpu
= p
->p_pctcpu
;
1028 exp
->p_slptime
= p
->p_slptime
;
1030 exp
->p_realtimer
.it_interval
.tv_sec
=
1031 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1032 exp
->p_realtimer
.it_interval
.tv_usec
=
1033 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1035 exp
->p_realtimer
.it_value
.tv_sec
=
1036 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1037 exp
->p_realtimer
.it_value
.tv_usec
=
1038 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1040 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1041 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1043 exp
->p_sigignore
= p
->p_sigignore
;
1044 exp
->p_sigcatch
= p
->p_sigcatch
;
1045 exp
->p_priority
= p
->p_priority
;
1046 exp
->p_nice
= p
->p_nice
;
1047 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1048 exp
->p_xstat
= p
->p_xstat
;
1049 exp
->p_acflag
= p
->p_acflag
;
1053 * Fill in an LP64 version of extern_proc structure for the specified process.
1056 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1058 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1059 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1060 exp
->p_flag
= p
->p_flag
;
1061 if (p
->p_lflag
& P_LTRACED
)
1062 exp
->p_flag
|= P_TRACED
;
1063 if (p
->p_lflag
& P_LPPWAIT
)
1064 exp
->p_flag
|= P_PPWAIT
;
1065 if (p
->p_lflag
& P_LEXIT
)
1066 exp
->p_flag
|= P_WEXIT
;
1067 exp
->p_stat
= p
->p_stat
;
1068 exp
->p_pid
= p
->p_pid
;
1069 exp
->p_oppid
= p
->p_oppid
;
1071 exp
->user_stack
= p
->user_stack
;
1072 exp
->p_debugger
= p
->p_debugger
;
1073 exp
->sigwait
= p
->sigwait
;
1075 #ifdef _PROC_HAS_SCHEDINFO_
1076 exp
->p_estcpu
= p
->p_estcpu
;
1077 exp
->p_pctcpu
= p
->p_pctcpu
;
1078 exp
->p_slptime
= p
->p_slptime
;
1080 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1081 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1083 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1084 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1086 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1087 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1089 exp
->p_sigignore
= p
->p_sigignore
;
1090 exp
->p_sigcatch
= p
->p_sigcatch
;
1091 exp
->p_priority
= p
->p_priority
;
1092 exp
->p_nice
= p
->p_nice
;
1093 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1094 exp
->p_xstat
= p
->p_xstat
;
1095 exp
->p_acflag
= p
->p_acflag
;
1099 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1101 /* on a 64 bit kernel, 32 bit users get some truncated information */
1102 fill_user32_externproc(p
, &kp
->kp_proc
);
1103 fill_user32_eproc(p
, &kp
->kp_eproc
);
1107 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1109 fill_user64_externproc(p
, &kp
->kp_proc
);
1110 fill_user64_eproc(p
, &kp
->kp_eproc
);
1114 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1116 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1117 int *name
= arg1
; /* oid element argument vector */
1118 int namelen
= arg2
; /* number of oid element arguments */
1119 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1120 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1121 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1122 // size_t newlen = req->newlen; /* user buffer copy in size */
1124 proc_t p
= current_proc();
1130 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1132 /* Non-root processes may be blessed by kperf to access data
1133 * logged into trace.
1136 ret
= kperf_access_check();
1151 case KERN_KDWRITETR
:
1152 case KERN_KDWRITEMAP
:
1156 case KERN_KDSETRTCDEC
:
1158 case KERN_KDGETENTROPY
:
1159 case KERN_KDENABLE_BG_TRACE
:
1160 case KERN_KDDISABLE_BG_TRACE
:
1161 case KERN_KDREADCURTHRMAP
:
1162 case KERN_KDSET_TYPEFILTER
:
1163 case KERN_KDBUFWAIT
:
1165 case KERN_KDWAIT_BG_TRACE_RESET
:
1166 case KERN_KDSET_BG_TYPEFILTER
:
1167 case KERN_KDWRITEMAP_V3
:
1168 case KERN_KDWRITETR_V3
:
1169 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1176 /* adjust index so we return the right required/consumed amount */
1178 req
->oldidx
+= req
->oldlen
;
1182 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1183 0, /* Pointer argument (arg1) */
1184 0, /* Integer argument (arg2) */
1185 sysctl_kdebug_ops
, /* Handler function */
1186 NULL
, /* Data pointer */
1191 * Return the top *sizep bytes of the user stack, or the entire area of the
1192 * user stack down through the saved exec_path, whichever is smaller.
1195 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1197 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1198 int *name
= arg1
; /* oid element argument vector */
1199 int namelen
= arg2
; /* number of oid element arguments */
1200 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1201 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1202 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1203 // size_t newlen = req->newlen; /* user buffer copy in size */
1206 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1208 /* adjust index so we return the right required/consumed amount */
1210 req
->oldidx
+= req
->oldlen
;
1214 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1215 0, /* Pointer argument (arg1) */
1216 0, /* Integer argument (arg2) */
1217 sysctl_doprocargs
, /* Handler function */
1218 NULL
, /* Data pointer */
1222 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1224 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1225 int *name
= arg1
; /* oid element argument vector */
1226 int namelen
= arg2
; /* number of oid element arguments */
1227 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1228 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1229 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1230 // size_t newlen = req->newlen; /* user buffer copy in size */
1233 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1235 /* adjust index so we return the right required/consumed amount */
1237 req
->oldidx
+= req
->oldlen
;
1241 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1242 0, /* Pointer argument (arg1) */
1243 0, /* Integer argument (arg2) */
1244 sysctl_doprocargs2
, /* Handler function */
1245 NULL
, /* Data pointer */
1249 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1250 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1253 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1255 struct _vm_map
*proc_map
;
1258 user_addr_t arg_addr
;
1263 vm_offset_t copy_start
, copy_end
;
1266 kauth_cred_t my_cred
;
1273 buflen
-= sizeof(int); /* reserve first word to return argc */
1275 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1276 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1277 /* is not NULL then the caller wants us to return the length needed to */
1278 /* hold the data we would return */
1279 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1285 * Lookup process by pid
1294 * Copy the top N bytes of the stack.
1295 * On all machines we have so far, the stack grows
1298 * If the user expects no more than N bytes of
1299 * argument list, use that as a guess for the
1303 if (!p
->user_stack
) {
1308 if (where
== USER_ADDR_NULL
) {
1309 /* caller only wants to know length of proc args data */
1310 if (sizep
== NULL
) {
1315 size
= p
->p_argslen
;
1318 size
+= sizeof(int);
1322 * old PROCARGS will return the executable's path and plus some
1323 * extra space for work alignment and data tags
1325 size
+= PATH_MAX
+ (6 * sizeof(int));
1327 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1332 my_cred
= kauth_cred_proc_ref(p
);
1333 uid
= kauth_cred_getuid(my_cred
);
1334 kauth_cred_unref(&my_cred
);
1336 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1337 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1342 if ((u_int
)arg_size
> p
->p_argslen
)
1343 arg_size
= round_page(p
->p_argslen
);
1345 arg_addr
= p
->user_stack
- arg_size
;
1349 * Before we can block (any VM code), make another
1350 * reference to the map to keep it alive. We do
1351 * that by getting a reference on the task itself.
1359 argslen
= p
->p_argslen
;
1361 * Once we have a task reference we can convert that into a
1362 * map reference, which we will use in the calls below. The
1363 * task/process may change its map after we take this reference
1364 * (see execve), but the worst that will happen then is a return
1365 * of stale info (which is always a possibility).
1367 task_reference(task
);
1369 proc_map
= get_task_map_reference(task
);
1370 task_deallocate(task
);
1372 if (proc_map
== NULL
)
1376 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
), VM_KERN_MEMORY_BSD
);
1377 if (ret
!= KERN_SUCCESS
) {
1378 vm_map_deallocate(proc_map
);
1382 copy_end
= round_page(copy_start
+ arg_size
);
1384 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1385 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1386 vm_map_deallocate(proc_map
);
1387 kmem_free(kernel_map
, copy_start
,
1388 round_page(arg_size
));
1393 * Now that we've done the copyin from the process'
1394 * map, we can release the reference to it.
1396 vm_map_deallocate(proc_map
);
1398 if( vm_map_copy_overwrite(kernel_map
,
1399 (vm_map_address_t
)copy_start
,
1400 tmp
, FALSE
) != KERN_SUCCESS
) {
1401 kmem_free(kernel_map
, copy_start
,
1402 round_page(arg_size
));
1403 vm_map_copy_discard(tmp
);
1407 if (arg_size
> argslen
) {
1408 data
= (caddr_t
) (copy_end
- argslen
);
1411 data
= (caddr_t
) (copy_end
- arg_size
);
1416 * When these sysctls were introduced, the first string in the strings
1417 * section was just the bare path of the executable. However, for security
1418 * reasons we now prefix this string with executable_path= so it can be
1419 * parsed getenv style. To avoid binary compatability issues with exising
1420 * callers of this sysctl, we strip it off here if present.
1421 * (rdar://problem/13746466)
1423 #define EXECUTABLE_KEY "executable_path="
1424 if (strncmp(EXECUTABLE_KEY
, data
, strlen(EXECUTABLE_KEY
)) == 0){
1425 data
+= strlen(EXECUTABLE_KEY
);
1426 size
-= strlen(EXECUTABLE_KEY
);
1430 /* Put processes argc as the first word in the copyout buffer */
1431 suword(where
, p
->p_argc
);
1432 error
= copyout(data
, (where
+ sizeof(int)), size
);
1433 size
+= sizeof(int);
1435 error
= copyout(data
, where
, size
);
1438 * Make the old PROCARGS work to return the executable's path
1439 * But, only if there is enough space in the provided buffer
1441 * on entry: data [possibily] points to the beginning of the path
1443 * Note: we keep all pointers&sizes aligned to word boundries
1445 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
1447 int binPath_sz
, alignedBinPath_sz
= 0;
1448 int extraSpaceNeeded
, addThis
;
1449 user_addr_t placeHere
;
1450 char * str
= (char *) data
;
1453 /* Some apps are really bad about messing up their stacks
1454 So, we have to be extra careful about getting the length
1455 of the executing binary. If we encounter an error, we bail.
1458 /* Limit ourselves to PATH_MAX paths */
1459 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
1463 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
1466 /* If we have a NUL terminator, copy it, too */
1467 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
1469 /* Pre-Flight the space requiremnts */
1471 /* Account for the padding that fills out binPath to the next word */
1472 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
1474 placeHere
= where
+ size
;
1476 /* Account for the bytes needed to keep placeHere word aligned */
1477 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
1479 /* Add up all the space that is needed */
1480 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
1482 /* is there is room to tack on argv[0]? */
1483 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
1485 placeHere
+= addThis
;
1486 suword(placeHere
, 0);
1487 placeHere
+= sizeof(int);
1488 suword(placeHere
, 0xBFFF0000);
1489 placeHere
+= sizeof(int);
1490 suword(placeHere
, 0);
1491 placeHere
+= sizeof(int);
1492 error
= copyout(data
, placeHere
, binPath_sz
);
1495 placeHere
+= binPath_sz
;
1496 suword(placeHere
, 0);
1497 size
+= extraSpaceNeeded
;
1503 if (copy_start
!= (vm_offset_t
) 0) {
1504 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
1510 if (where
!= USER_ADDR_NULL
)
1517 * Max number of concurrent aio requests
1521 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1523 int new_value
, changed
;
1524 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
1526 /* make sure the system-wide limit is greater than the per process limit */
1527 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
1528 aio_max_requests
= new_value
;
1537 * Max number of concurrent aio requests per process
1541 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1543 int new_value
, changed
;
1544 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
1546 /* make sure per process limit is less than the system-wide limit */
1547 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
1548 aio_max_requests_per_process
= new_value
;
1557 * Max number of async IO worker threads
1561 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1563 int new_value
, changed
;
1564 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
1566 /* we only allow an increase in the number of worker threads */
1567 if (new_value
> aio_worker_threads
) {
1568 _aio_create_worker_threads((new_value
- aio_worker_threads
));
1569 aio_worker_threads
= new_value
;
1579 * System-wide limit on the max number of processes
1583 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1585 int new_value
, changed
;
1586 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
1588 AUDIT_ARG(value32
, new_value
);
1589 /* make sure the system-wide limit is less than the configured hard
1590 limit set at kernel compilation */
1591 if (new_value
<= hard_maxproc
&& new_value
> 0)
1592 maxproc
= new_value
;
1599 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
1600 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1602 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
1603 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1605 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
1606 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1607 (int *)NULL
, BSD
, "");
1608 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
1609 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1611 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
1612 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1613 &kernel_uuid_string
[0], 0, "");
1616 int debug_kprint_syscall
= 0;
1617 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
1619 /* Thread safe: bits and string value are not used to reclaim state */
1620 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
1621 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
1622 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
1623 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
1624 "name of process for kprintf syscall tracing");
1626 int debug_kprint_current_process(const char **namep
)
1628 struct proc
*p
= current_proc();
1634 if (debug_kprint_syscall_process
[0]) {
1635 /* user asked to scope tracing to a particular process name */
1636 if(0 == strncmp(debug_kprint_syscall_process
,
1637 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
1638 /* no value in telling the user that we traced what they asked */
1639 if(namep
) *namep
= NULL
;
1647 /* trace all processes. Tell user what we traced */
1656 /* PR-5293665: need to use a callback function for kern.osversion to set
1657 * osversion in IORegistry */
1660 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
1664 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
1667 IORegistrySetOSBuildVersion((char *)arg1
);
1673 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
1674 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
1675 osversion
, 256 /* OSVERSIZE*/,
1676 sysctl_osversion
, "A", "");
1679 sysctl_sysctl_bootargs
1680 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1685 strlcpy(buf
, PE_boot_args(), 256);
1686 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
1690 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
1691 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
1693 sysctl_sysctl_bootargs
, "A", "bootargs");
1695 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
1696 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1698 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
1699 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1700 (int *)NULL
, ARG_MAX
, "");
1701 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
1702 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1703 (int *)NULL
, _POSIX_VERSION
, "");
1704 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
1705 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1706 (int *)NULL
, NGROUPS_MAX
, "");
1707 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
1708 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1709 (int *)NULL
, 1, "");
1710 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1711 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1712 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1713 (int *)NULL
, 1, "");
1715 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
1716 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1719 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
1720 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1722 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
1723 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1725 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
1726 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1728 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
1729 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1730 &thread_max
, 0, "");
1731 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
1732 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1733 &task_threadmax
, 0, "");
1736 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1738 int oldval
= desiredvnodes
;
1739 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
1741 if (oldval
!= desiredvnodes
) {
1742 reset_vmobjectcache(oldval
, desiredvnodes
);
1743 resize_namecache(desiredvnodes
);
1749 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
1750 CTLFLAG_RW
| CTLFLAG_LOCKED
,
1751 &nc_disabled
, 0, "");
1753 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
1754 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1755 0, 0, sysctl_maxvnodes
, "I", "");
1757 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
1758 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1759 0, 0, sysctl_maxproc
, "I", "");
1761 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
1762 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1763 0, 0, sysctl_aiomax
, "I", "");
1765 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
1766 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1767 0, 0, sysctl_aioprocmax
, "I", "");
1769 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
1770 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1771 0, 0, sysctl_aiothreads
, "I", "");
1773 #if (DEVELOPMENT || DEBUG)
1774 extern int sched_smt_balance
;
1775 SYSCTL_INT(_kern
, OID_AUTO
, sched_smt_balance
,
1776 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1777 &sched_smt_balance
, 0, "");
1782 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1784 int new_value
, changed
;
1785 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
1787 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
1789 securelevel
= new_value
;
1798 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
1799 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1800 0, 0, sysctl_securelvl
, "I", "");
1805 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1808 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
1810 domainnamelen
= strlen(domainname
);
1815 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
1816 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1817 0, 0, sysctl_domainname
, "A", "");
1819 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
1820 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1825 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1828 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
1830 hostnamelen
= req
->newlen
;
1836 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
1837 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1838 0, 0, sysctl_hostname
, "A", "");
1842 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1844 /* Original code allowed writing, I'm copying this, although this all makes
1845 no sense to me. Besides, this sysctl is never used. */
1846 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
1849 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
1850 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1851 0, 0, sysctl_procname
, "A", "");
1853 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
1854 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1855 &speculative_reads_disabled
, 0, "");
1857 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
1858 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1859 &ignore_is_ssd
, 0, "");
1861 SYSCTL_INT(_kern
, OID_AUTO
, root_is_CF_drive
,
1862 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1863 &root_is_CF_drive
, 0, "");
1865 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_max_bytes
,
1866 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1867 &preheat_max_bytes
, 0, "");
1869 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_min_bytes
,
1870 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1871 &preheat_min_bytes
, 0, "");
1873 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
1874 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1875 &speculative_prefetch_max
, 0, "");
1877 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
1878 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1879 &speculative_prefetch_max_iosize
, 0, "");
1881 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
1882 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1883 &vm_page_free_target
, 0, "");
1885 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
1886 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1887 &vm_page_free_min
, 0, "");
1889 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
1890 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1891 &vm_page_free_reserved
, 0, "");
1893 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
1894 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1895 &vm_page_speculative_percentage
, 0, "");
1897 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
1898 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1899 &vm_page_speculative_q_age_ms
, 0, "");
1901 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
1902 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1903 &vm_max_delayed_work_limit
, 0, "");
1905 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
1906 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
1907 &vm_max_batch
, 0, "");
1909 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
1910 CTLFLAG_RD
| CTLFLAG_LOCKED
,
1911 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
1915 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1917 time_t tv_sec
= boottime_sec();
1918 struct proc
*p
= req
->p
;
1920 if (proc_is64bit(p
)) {
1921 struct user64_timeval t
;
1924 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1926 struct user32_timeval t
;
1929 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
1933 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
1934 CTLTYPE_STRUCT
| CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1935 0, 0, sysctl_boottime
, "S,timeval", "");
1939 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1942 int error
= get_kernel_symfile(req
->p
, &str
);
1945 return sysctl_io_string(req
, str
, 0, 0, NULL
);
1949 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
1950 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1951 0, 0, sysctl_symfile
, "A", "");
1956 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1958 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
1961 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
1962 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1963 0, 0, sysctl_netboot
, "I", "");
1966 #ifdef CONFIG_IMGSRC_ACCESS
1968 * Legacy--act as if only one layer of nesting is possible.
1972 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1974 vfs_context_t ctx
= vfs_context_current();
1978 if (!vfs_context_issuser(ctx
)) {
1982 if (imgsrc_rootvnodes
[0] == NULL
) {
1986 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
1991 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
1992 result
= vnode_getwithref(devvp
);
1997 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2001 vnode_put(imgsrc_rootvnodes
[0]);
2005 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2006 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2007 0, 0, sysctl_imgsrcdev
, "I", "");
2011 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2014 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2018 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2022 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2024 * Go get the root vnode.
2026 rvp
= imgsrc_rootvnodes
[i
];
2027 if (rvp
== NULLVP
) {
2031 error
= vnode_get(rvp
);
2037 * For now, no getting at a non-local volume.
2039 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2040 if (devvp
== NULL
) {
2045 error
= vnode_getwithref(devvp
);
2054 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2055 info
[i
].ii_flags
= 0;
2056 info
[i
].ii_height
= i
;
2057 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2063 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2066 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2067 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2068 0, 0, sysctl_imgsrcinfo
, "I", "");
2070 #endif /* CONFIG_IMGSRC_ACCESS */
2073 SYSCTL_DECL(_kern_timer
);
2074 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2077 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2078 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2079 &mach_timer_coalescing_enabled
, 0, "");
2081 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2082 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2083 &timer_deadline_tracking_bin_1
, "");
2084 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2085 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2086 &timer_deadline_tracking_bin_2
, "");
2088 SYSCTL_DECL(_kern_timer_longterm
);
2089 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2092 /* Must match definition in osfmk/kern/timer_call.c */
2095 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2096 LATENCY
, LATENCY_MIN
, LATENCY_MAX
2098 extern uint64_t timer_sysctl_get(int);
2099 extern int timer_sysctl_set(int, uint64_t);
2103 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2105 int oid
= (int)arg1
;
2106 uint64_t value
= timer_sysctl_get(oid
);
2111 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2113 error
= timer_sysctl_set(oid
, new_value
);
2118 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2119 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2120 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2121 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2122 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2123 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2125 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2126 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2127 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2128 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2129 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2130 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2131 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2132 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2133 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2134 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2135 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2136 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2137 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2138 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2139 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2140 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2141 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2142 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2143 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2144 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2145 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2146 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2147 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2148 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2153 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2155 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2158 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2159 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2160 0, 0, sysctl_usrstack
, "I", "");
2164 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2166 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2169 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2170 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2171 0, 0, sysctl_usrstack64
, "Q", "");
2173 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2174 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2175 corefilename
, sizeof(corefilename
), "");
2179 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2181 #ifdef SECURE_KERNEL
2185 int new_value
, changed
;
2186 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2188 if ((new_value
== 0) || (new_value
== 1))
2189 do_coredump
= new_value
;
2197 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2198 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2199 0, 0, sysctl_coredump
, "I", "");
2202 sysctl_suid_coredump
2203 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2205 #ifdef SECURE_KERNEL
2209 int new_value
, changed
;
2210 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2212 if ((new_value
== 0) || (new_value
== 1))
2213 sugid_coredump
= new_value
;
2221 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2222 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2223 0, 0, sysctl_suid_coredump
, "I", "");
2227 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2229 struct proc
*p
= req
->p
;
2230 int new_value
, changed
;
2231 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2235 req
->p
->p_lflag
|= P_LDELAYTERM
;
2237 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2243 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2244 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2245 0, 0, sysctl_delayterm
, "I", "");
2250 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2252 struct proc
*p
= req
->p
;
2254 int new_value
, old_value
, changed
;
2257 ut
= get_bsdthread_info(current_thread());
2259 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2260 old_value
= KERN_RAGE_THREAD
;
2261 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2262 old_value
= KERN_RAGE_PROC
;
2266 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2269 switch (new_value
) {
2270 case KERN_RAGE_PROC
:
2272 p
->p_lflag
|= P_LRAGE_VNODES
;
2275 case KERN_UNRAGE_PROC
:
2277 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2281 case KERN_RAGE_THREAD
:
2282 ut
->uu_flag
|= UT_RAGE_VNODES
;
2284 case KERN_UNRAGE_THREAD
:
2285 ut
= get_bsdthread_info(current_thread());
2286 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2293 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2294 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2295 0, 0, sysctl_rage_vnode
, "I", "");
2297 /* XXX move this interface into libproc and remove this sysctl */
2299 sysctl_setthread_cpupercent
2300 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2302 int new_value
, old_value
;
2304 kern_return_t kret
= KERN_SUCCESS
;
2305 uint8_t percent
= 0;
2313 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2316 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2317 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2322 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2324 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2330 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2331 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2332 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2336 sysctl_kern_check_openevt
2337 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2339 struct proc
*p
= req
->p
;
2340 int new_value
, old_value
, changed
;
2343 if (p
->p_flag
& P_CHECKOPENEVT
) {
2344 old_value
= KERN_OPENEVT_PROC
;
2349 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2352 switch (new_value
) {
2353 case KERN_OPENEVT_PROC
:
2354 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2357 case KERN_UNOPENEVT_PROC
:
2358 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2368 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2369 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2375 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2377 #ifdef SECURE_KERNEL
2381 int new_value
, changed
;
2384 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2389 #if defined(__i386__) || defined(__x86_64__)
2391 * Only allow setting if NX is supported on the chip
2393 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2396 nx_enabled
= new_value
;
2399 #endif /* SECURE_KERNEL */
2404 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2405 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2406 0, 0, sysctl_nx
, "I", "");
2410 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2412 if (proc_is64bit(req
->p
)) {
2413 struct user64_loadavg loadinfo64
;
2414 fill_loadavg64(&averunnable
, &loadinfo64
);
2415 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2417 struct user32_loadavg loadinfo32
;
2418 fill_loadavg32(&averunnable
, &loadinfo32
);
2419 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2423 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2424 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2425 0, 0, sysctl_loadavg
, "S,loadavg", "");
2428 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2431 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2432 __unused
int arg2
, struct sysctl_req
*req
)
2434 int old_value
=0, new_value
=0, error
=0;
2436 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2438 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2440 return (vm_toggle_entry_reuse(new_value
, NULL
));
2445 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2449 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2452 uint64_t swap_total
;
2453 uint64_t swap_avail
;
2454 vm_size_t swap_pagesize
;
2455 boolean_t swap_encrypted
;
2456 struct xsw_usage xsu
;
2458 error
= macx_swapinfo(&swap_total
,
2465 xsu
.xsu_total
= swap_total
;
2466 xsu
.xsu_avail
= swap_avail
;
2467 xsu
.xsu_used
= swap_total
- swap_avail
;
2468 xsu
.xsu_pagesize
= swap_pagesize
;
2469 xsu
.xsu_encrypted
= swap_encrypted
;
2470 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2475 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2476 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2477 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2480 extern void vm_page_reactivate_all_throttled(void);
2483 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2485 #pragma unused(arg1, arg2)
2486 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2489 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2490 if (error
|| !req
->newptr
)
2493 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2494 //assert(req->newptr);
2495 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2500 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2502 disabled
= (!val
&& memorystatus_freeze_enabled
);
2504 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2507 vm_page_reactivate_all_throttled();
2513 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2514 #endif /* CONFIG_FREEZE */
2516 /* this kernel does NOT implement shared_region_make_private_np() */
2517 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2518 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2519 (int *)NULL
, 0, "");
2522 fetch_process_cputype(
2526 cpu_type_t
*cputype
)
2528 proc_t p
= PROC_NULL
;
2535 else if (namelen
== 1) {
2536 p
= proc_find(name
[0]);
2545 ret
= cpu_type() & ~CPU_ARCH_MASK
;
2546 if (IS_64BIT_PROCESS(p
))
2547 ret
|= CPU_ARCH_ABI64
;
2558 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2559 struct sysctl_req
*req
)
2562 cpu_type_t proc_cputype
= 0;
2563 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2566 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
2568 return SYSCTL_OUT(req
, &res
, sizeof(res
));
2570 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
2573 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
2574 struct sysctl_req
*req
)
2577 cpu_type_t proc_cputype
= 0;
2578 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
2580 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
2582 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
2586 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2588 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
2591 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
2592 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2593 0, 0, sysctl_safeboot
, "I", "");
2597 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2599 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
2602 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
2603 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2604 0, 0, sysctl_singleuser
, "I", "");
2606 STATIC
int sysctl_minimalboot
2607 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2609 return sysctl_io_number(req
, minimalboot
, sizeof(int), NULL
, NULL
);
2612 SYSCTL_PROC(_kern
, OID_AUTO
, minimalboot
,
2613 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2614 0, 0, sysctl_minimalboot
, "I", "");
2617 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2619 extern boolean_t affinity_sets_enabled
;
2620 extern int affinity_sets_mapping
;
2622 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
2623 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
2624 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
2625 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
2628 * Boolean indicating if KASLR is active.
2632 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2636 slide
= vm_kernel_slide
? 1 : 0;
2638 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
2641 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
2642 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2643 0, 0, sysctl_slide
, "I", "");
2646 * Limit on total memory users can wire.
2648 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2650 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2652 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2655 * All values are in bytes.
2658 vm_map_size_t vm_global_no_user_wire_amount
;
2659 vm_map_size_t vm_global_user_wire_limit
;
2660 vm_map_size_t vm_user_wire_limit
;
2663 * There needs to be a more automatic/elegant way to do this
2665 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
2666 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
2667 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
2669 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
2670 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
2671 extern int vm_map_copy_overwrite_aligned_src_large
;
2672 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
2673 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
2674 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
2677 extern uint32_t vm_page_external_count
;
2678 extern uint32_t vm_page_filecache_min
;
2680 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
2681 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
2683 extern int vm_compressor_mode
;
2684 extern int vm_compressor_is_active
;
2685 extern int vm_compressor_available
;
2686 extern uint32_t vm_ripe_target_age
;
2687 extern uint32_t swapout_target_age
;
2688 extern int64_t compressor_bytes_used
;
2689 extern int64_t c_segment_input_bytes
;
2690 extern int64_t c_segment_compressed_bytes
;
2691 extern uint32_t compressor_eval_period_in_msecs
;
2692 extern uint32_t compressor_sample_min_in_msecs
;
2693 extern uint32_t compressor_sample_max_in_msecs
;
2694 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
2695 extern uint32_t compressor_thrashing_min_per_10msecs
;
2696 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
2697 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
2698 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
2699 extern uint32_t vm_compressor_catchup_threshold_divisor
;
2701 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_input_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_input_bytes
, "");
2702 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_compressed_bytes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &c_segment_compressed_bytes
, "");
2703 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
2705 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
2706 SYSCTL_INT(_vm
, OID_AUTO
, compressor_is_active
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_is_active
, 0, "");
2707 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
2708 SYSCTL_INT(_vm
, OID_AUTO
, compressor_available
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_available
, 0, "");
2710 SYSCTL_INT(_vm
, OID_AUTO
, vm_ripe_target_age_in_secs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_ripe_target_age
, 0, "");
2712 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
2713 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
2714 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
2715 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
2716 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
2717 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
2718 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
2719 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
2720 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
2722 SYSCTL_STRING(_vm
, OID_AUTO
, swapfileprefix
, CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
, swapfilename
, sizeof(swapfilename
) - SWAPFILENAME_INDEX_LEN
, "");
2724 #if CONFIG_PHANTOM_CACHE
2725 extern uint32_t phantom_cache_thrashing_threshold
;
2726 extern uint32_t phantom_cache_eval_period_in_msecs
;
2727 extern uint32_t phantom_cache_thrashing_threshold_ssd
;
2730 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_eval_period_in_msecs
, 0, "");
2731 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold
, 0, "");
2732 SYSCTL_INT(_vm
, OID_AUTO
, phantom_cache_thrashing_threshold_ssd
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &phantom_cache_thrashing_threshold_ssd
, 0, "");
2735 #if (DEVELOPMENT || DEBUG)
2737 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_hard
,
2738 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2739 &vm_page_creation_throttled_hard
, 0, "");
2741 SYSCTL_UINT(_vm
, OID_AUTO
, vm_page_creation_throttled_soft
,
2742 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2743 &vm_page_creation_throttled_soft
, 0, "");
2745 #endif /* DEVELOPMENT || DEBUG */
2748 * Enable tracing of voucher contents
2750 extern uint32_t ipc_voucher_trace_contents
;
2752 SYSCTL_INT (_kern
, OID_AUTO
, ipc_voucher_trace_contents
,
2753 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ipc_voucher_trace_contents
, 0, "Enable tracing voucher contents");
2756 * Kernel stack size and depth
2758 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
2759 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
2760 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
2761 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
2764 * enable back trace for port allocations
2766 extern int ipc_portbt
;
2768 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
2769 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2770 &ipc_portbt
, 0, "");
2776 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
2777 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2778 sched_string
, sizeof(sched_string
),
2779 "Timeshare scheduler implementation");
2782 * Only support runtime modification on embedded platforms
2783 * with development config enabled
2787 /* Parameters related to timer coalescing tuning, to be replaced
2788 * with a dedicated systemcall in the future.
2790 /* Enable processing pending timers in the context of any other interrupt
2791 * Coalescing tuning parameters for various thread/task attributes */
2793 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2795 #pragma unused(oidp)
2796 int size
= arg2
; /* subcommand*/
2799 uint64_t old_value_ns
;
2800 uint64_t new_value_ns
;
2801 uint64_t value_abstime
;
2802 if (size
== sizeof(uint32_t))
2803 value_abstime
= *((uint32_t *)arg1
);
2804 else if (size
== sizeof(uint64_t))
2805 value_abstime
= *((uint64_t *)arg1
);
2806 else return ENOTSUP
;
2808 absolutetime_to_nanoseconds(value_abstime
, &old_value_ns
);
2809 error
= sysctl_io_number(req
, old_value_ns
, sizeof(old_value_ns
), &new_value_ns
, &changed
);
2810 if ((error
) || (!changed
))
2813 nanoseconds_to_absolutetime(new_value_ns
, &value_abstime
);
2814 if (size
== sizeof(uint32_t))
2815 *((uint32_t *)arg1
) = (uint32_t)value_abstime
;
2817 *((uint64_t *)arg1
) = value_abstime
;
2821 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_bg_scale
,
2822 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2823 &tcoal_prio_params
.timer_coalesce_bg_shift
, 0, "");
2824 SYSCTL_PROC(_kern
, OID_AUTO
, timer_resort_threshold_ns
,
2825 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2826 &tcoal_prio_params
.timer_resort_threshold_abstime
,
2827 sizeof(tcoal_prio_params
.timer_resort_threshold_abstime
),
2828 sysctl_timer_user_us_kernel_abstime
,
2830 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_bg_ns_max
,
2831 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2832 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
,
2833 sizeof(tcoal_prio_params
.timer_coalesce_bg_abstime_max
),
2834 sysctl_timer_user_us_kernel_abstime
,
2837 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_kt_scale
,
2838 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2839 &tcoal_prio_params
.timer_coalesce_kt_shift
, 0, "");
2841 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_kt_ns_max
,
2842 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2843 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
,
2844 sizeof(tcoal_prio_params
.timer_coalesce_kt_abstime_max
),
2845 sysctl_timer_user_us_kernel_abstime
,
2848 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_fp_scale
,
2849 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2850 &tcoal_prio_params
.timer_coalesce_fp_shift
, 0, "");
2852 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_fp_ns_max
,
2853 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2854 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
,
2855 sizeof(tcoal_prio_params
.timer_coalesce_fp_abstime_max
),
2856 sysctl_timer_user_us_kernel_abstime
,
2859 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_ts_scale
,
2860 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2861 &tcoal_prio_params
.timer_coalesce_ts_shift
, 0, "");
2863 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_ts_ns_max
,
2864 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2865 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
,
2866 sizeof(tcoal_prio_params
.timer_coalesce_ts_abstime_max
),
2867 sysctl_timer_user_us_kernel_abstime
,
2870 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier0_scale
,
2871 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2872 &tcoal_prio_params
.latency_qos_scale
[0], 0, "");
2874 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier0_ns_max
,
2875 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2876 &tcoal_prio_params
.latency_qos_abstime_max
[0],
2877 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[0]),
2878 sysctl_timer_user_us_kernel_abstime
,
2881 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier1_scale
,
2882 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2883 &tcoal_prio_params
.latency_qos_scale
[1], 0, "");
2885 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier1_ns_max
,
2886 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2887 &tcoal_prio_params
.latency_qos_abstime_max
[1],
2888 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[1]),
2889 sysctl_timer_user_us_kernel_abstime
,
2892 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier2_scale
,
2893 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2894 &tcoal_prio_params
.latency_qos_scale
[2], 0, "");
2896 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier2_ns_max
,
2897 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2898 &tcoal_prio_params
.latency_qos_abstime_max
[2],
2899 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[2]),
2900 sysctl_timer_user_us_kernel_abstime
,
2903 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier3_scale
,
2904 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2905 &tcoal_prio_params
.latency_qos_scale
[3], 0, "");
2907 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier3_ns_max
,
2908 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2909 &tcoal_prio_params
.latency_qos_abstime_max
[3],
2910 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[3]),
2911 sysctl_timer_user_us_kernel_abstime
,
2914 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier4_scale
,
2915 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2916 &tcoal_prio_params
.latency_qos_scale
[4], 0, "");
2918 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier4_ns_max
,
2919 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2920 &tcoal_prio_params
.latency_qos_abstime_max
[4],
2921 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[4]),
2922 sysctl_timer_user_us_kernel_abstime
,
2925 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalesce_tier5_scale
,
2926 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2927 &tcoal_prio_params
.latency_qos_scale
[5], 0, "");
2929 SYSCTL_PROC(_kern
, OID_AUTO
, timer_coalesce_tier5_ns_max
,
2930 CTLTYPE_QUAD
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2931 &tcoal_prio_params
.latency_qos_abstime_max
[5],
2932 sizeof(tcoal_prio_params
.latency_qos_abstime_max
[5]),
2933 sysctl_timer_user_us_kernel_abstime
,
2936 /* Communicate the "user idle level" heuristic to the timer layer, and
2937 * potentially other layers in the future.
2941 timer_user_idle_level(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
) {
2942 int new_value
= 0, old_value
= 0, changed
= 0, error
;
2944 old_value
= timer_get_user_idle_level();
2946 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2948 if (error
== 0 && changed
) {
2949 if (timer_set_user_idle_level(new_value
) != KERN_SUCCESS
)
2956 SYSCTL_PROC(_machdep
, OID_AUTO
, user_idle_level
,
2957 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2959 timer_user_idle_level
, "I", "User idle level heuristic, 0-128");
2962 SYSCTL_INT(_kern
, OID_AUTO
, hv_support
,
2963 CTLFLAG_KERN
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2964 &hv_support_available
, 0, "");