2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <security/audit/audit.h>
107 #include <kern/kalloc.h>
109 #include <mach/machine.h>
110 #include <mach/mach_host.h>
111 #include <mach/mach_types.h>
112 #include <mach/vm_param.h>
113 #include <kern/mach_param.h>
114 #include <kern/task.h>
115 #include <kern/thread.h>
116 #include <kern/lock.h>
117 #include <kern/processor.h>
118 #include <kern/debug.h>
119 #include <vm/vm_kern.h>
120 #include <vm/vm_map.h>
121 #include <mach/host_info.h>
123 #include <sys/mount_internal.h>
124 #include <sys/kdebug.h>
125 #include <sys/sysproto.h>
127 #include <IOKit/IOPlatformExpert.h>
128 #include <pexpert/pexpert.h>
130 #include <machine/machine_routines.h>
131 #include <machine/exec.h>
133 #include <vm/vm_protos.h>
134 #include <vm/vm_pageout.h>
135 #include <sys/imgsrc.h>
137 #if defined(__i386__) || defined(__x86_64__)
138 #include <i386/cpuid.h>
142 #include <sys/kern_memorystatus.h>
146 #include <kperf/kperf.h>
150 * deliberately setting max requests to really high number
151 * so that runaway settings do not cause MALLOC overflows
153 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
155 extern sysctlfn net_sysctl
;
156 extern sysctlfn cpu_sysctl
;
157 extern int aio_max_requests
;
158 extern int aio_max_requests_per_process
;
159 extern int aio_worker_threads
;
160 extern int lowpri_IO_window_msecs
;
161 extern int lowpri_IO_delay_msecs
;
162 extern int nx_enabled
;
163 extern int speculative_reads_disabled
;
164 extern int ignore_is_ssd
;
165 extern unsigned int speculative_prefetch_max
;
166 extern unsigned int speculative_prefetch_max_iosize
;
167 extern unsigned int preheat_pages_max
;
168 extern unsigned int preheat_pages_min
;
169 extern long numvnodes
;
171 extern uuid_string_t bootsessionuuid_string
;
173 extern unsigned int vm_max_delayed_work_limit
;
174 extern unsigned int vm_max_batch
;
176 extern unsigned int vm_page_free_min
;
177 extern unsigned int vm_page_free_target
;
178 extern unsigned int vm_page_free_reserved
;
179 extern unsigned int vm_page_speculative_percentage
;
180 extern unsigned int vm_page_speculative_q_age_ms
;
183 * Conditionally allow dtrace to see these functions for debugging purposes.
191 #define STATIC static
194 extern boolean_t mach_timer_coalescing_enabled
;
196 extern uint64_t timer_deadline_tracking_bin_1
, timer_deadline_tracking_bin_2
;
199 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
201 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
203 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
205 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
207 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
209 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
212 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
218 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
220 __private_extern__ kern_return_t
221 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
223 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
224 size_t *sizep
, proc_t cur_proc
);
226 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
227 proc_t cur_proc
, int argc_yes
);
229 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
230 size_t newlen
, void *sp
, int len
);
232 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
233 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
234 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
235 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
236 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
238 STATIC
int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
240 int sysdoproc_callback(proc_t p
, void *arg
);
243 /* forward declarations for non-static STATIC */
244 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
245 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
246 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
247 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
248 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
249 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
250 STATIC
int sysctl_dotranslate SYSCTL_HANDLER_ARGS
;
251 STATIC
int sysctl_doaffinity SYSCTL_HANDLER_ARGS
;
253 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
254 #endif /* COUNT_SYSCALLS */
255 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
256 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
257 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
259 STATIC
int sysctl_dodebug SYSCTL_HANDLER_ARGS
;
261 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
262 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
267 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 #ifdef CONFIG_IMGSRC_ACCESS
278 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
285 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
286 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
288 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 #if defined(__i386__) || defined(__x86_64__)
292 STATIC
int sysctl_sysctl_exec_affinity(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
294 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
295 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
296 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
297 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
298 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
299 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
302 extern void IORegistrySetOSBuildVersion(char * build_version
);
305 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
307 la64
->ldavg
[0] = la
->ldavg
[0];
308 la64
->ldavg
[1] = la
->ldavg
[1];
309 la64
->ldavg
[2] = la
->ldavg
[2];
310 la64
->fscale
= (user64_long_t
)la
->fscale
;
314 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
316 la32
->ldavg
[0] = la
->ldavg
[0];
317 la32
->ldavg
[1] = la
->ldavg
[1];
318 la32
->ldavg
[2] = la
->ldavg
[2];
319 la32
->fscale
= (user32_long_t
)la
->fscale
;
325 * Description: Wire down the callers address map on behalf of sysctl's
326 * that perform their own copy operations while holding
327 * locks e.g. in the paging path, which could lead to a
328 * deadlock, or while holding a spinlock.
330 * Parameters: addr User buffer address
331 * len User buffer length
334 * vslock:ENOMEM Insufficient physical pages to wire
335 * vslock:EACCES Bad protection mode
336 * vslock:EINVAL Invalid parameters
338 * Notes: This code is invoked for the first OID element where the
339 * CTLFLAG_LOCKED is not specified for a given OID node
340 * element durng OID traversal, and is held for all
341 * subsequent node traversals, and only released after the
342 * leaf node handler invocation is complete.
344 * Legacy: For legacy scyctl's provided by third party code which
345 * expect funnel protection for calls into their code, this
346 * routine will also take the funnel, which will also only
347 * be released after the leaf node handler is complete.
349 * This is to support legacy 32 bit BSD KEXTs and legacy 32
350 * bit single threaded filesystem KEXTs and similar code
351 * which relies on funnel protection, e.g. for things like
352 * FSID based sysctl's.
354 * NEW CODE SHOULD NOT RELY ON THIS BEHAVIOUR! IT WILL BE
355 * REMOVED IN A FUTURE RELASE OF Mac OS X!
357 * Bugs: This routine does nothing with the new_addr and new_len
358 * at present, but it should, since read from the user space
359 * process adddress space which could potentially trigger
360 * paging may also be occurring deep down. This is due to
361 * a current limitation of the vslock() routine, which will
362 * always request a wired mapping be read/write, due to not
363 * taking an access mode parameter. Note that this could
364 * also cause problems for output on architectures where
365 * write access does not require read acccess if the current
366 * mapping lacks read access.
368 * XXX: To be moved to kern_newsysctl.c to avoid __private_extern__
370 int sysctl_mem_lock(user_addr_t old_addr
, user_size_t old_len
, user_addr_t new_addr
, user_size_t new_len
);
372 sysctl_mem_lock(__unused user_addr_t old_addr
, __unused user_size_t old_len
, __unused user_addr_t new_addr
, __unused user_size_t new_len
)
381 /* sysctl() syscall */
383 __sysctl(proc_t p
, struct __sysctl_args
*uap
, __unused
int32_t *retval
)
385 boolean_t funnel_state
= FALSE
; /* not held if unknown */
387 size_t savelen
= 0, oldlen
= 0, newlen
;
388 int name
[CTL_MAXNAME
];
390 boolean_t vslock_taken
= FALSE
;
391 boolean_t funnel_taken
= FALSE
;
393 kauth_cred_t my_cred
;
397 * all top-level sysctl names are non-terminal
399 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
401 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
405 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
407 if (proc_is64bit(p
)) {
408 /* uap->newlen is a size_t value which grows to 64 bits
409 * when coming from a 64-bit process. since it's doubtful we'll
410 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
412 newlen
= CAST_DOWN(size_t, uap
->newlen
);
415 newlen
= uap
->newlen
;
419 * XXX TODO: push down rights check for CTL_HW OIDs; most duplicate
420 * XXX it anyway, which is a performance sink, and requires use
421 * XXX of SUID root programs (see <rdar://3915692>).
423 * Note: Opt out of non-leaf node enforcement by removing this
424 * check for the top level OID value, and then adding
425 * CTLFLAG_ANYBODY to the leaf nodes in question. Enforce as
426 * suser for writed in leaf nodes by omitting this flag.
427 * Enforce with a higher granularity by making the leaf node
428 * of type SYSCTL_PROC() in order to provide a procedural
429 * enforcement call site.
431 * NOTE: This function is called prior to any subfunctions being
432 * called with a fallback to userland_sysctl(); as such, this
433 * permissions check here will veto the fallback operation.
435 /* CTL_UNSPEC is used to get oid to AUTO_OID */
436 if (uap
->new != USER_ADDR_NULL
437 && ((name
[0] == CTL_HW
)
438 || (name
[0] == CTL_VM
))
439 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
442 // XXX need to relocate into each terminal instead of leaving this here...
443 // XXX macf preemptory check.
445 my_cred
= kauth_cred_proc_ref(p
);
446 error
= mac_system_check_sysctl(
452 0, /* XXX 1 for CTL_KERN checks */
456 kauth_cred_unref(&my_cred
);
461 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
462 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
464 oldlen
= CAST_DOWN(size_t, oldlen64
);
466 * If more than 4G, clamp to 4G - useracc() below will catch
467 * with an EFAULT, if it's actually necessary.
469 if (oldlen64
> 0x00000000ffffffffULL
)
470 oldlen
= 0xffffffffUL
;
473 if ((name
[0] == CTL_VFS
|| name
[0] == CTL_VM
)) {
475 * Always take the funnel for CTL_VFS and CTL_VM
477 * XXX We should also take it for any OID without the
478 * XXX CTLFLAG_LOCKED set on it; fix this later!
480 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
484 * XXX Take the vslock() only when we are copying out; this
485 * XXX erroneously assumes that the copy in will not cause
486 * XXX a fault if caled from the paging path due to the
487 * XXX having been recently touched in order to establish
488 * XXX the input data. This is a bad assumption.
490 * Note: This is overkill, but third parties might
491 * already call sysctl internally in KEXTs that
492 * implement mass storage drivers. If you are
493 * writing a new KEXT, don't do that.
495 if(uap
->old
!= USER_ADDR_NULL
) {
496 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
)) {
497 thread_funnel_set(kernel_flock
, funnel_state
);
502 if ((error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
503 thread_funnel_set(kernel_flock
, funnel_state
);
513 * XXX convert vfs_sysctl subelements to newsysctl; this is hard
514 * XXX because of VFS_NUMMNTOPS being top level.
517 if (name
[0] == CTL_VFS
) {
518 error
= vfs_sysctl(name
+ 1, uap
->namelen
- 1, uap
->old
,
519 &oldlen
, uap
->new, newlen
, p
);
522 if (vslock_taken
== TRUE
) {
523 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
528 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
) ) {
530 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
531 uap
->new, newlen
, &oldlen
);
535 * If we took the funnel, which we only do for CTL_VFS and CTL_VM on
536 * 32 bit architectures, then drop it.
538 * XXX the grabbing and dropping need to move into the leaf nodes,
539 * XXX for sysctl's that are not marked CTLFLAG_LOCKED, but this is
540 * XXX true for the vslock, as well. We have a start at a routine
541 * to wrapper this (above), but it's not turned on. The current code
542 * removed the funnel and the vslock() from all but these two top
543 * level OIDs. Note that VFS only needs to take the funnel if the FS
544 * against which it's operating is not thread safe (but since an FS
545 * can be in the paging path, it still needs to take the vslock()).
548 thread_funnel_set(kernel_flock
, funnel_state
);
550 if ((error
) && (error
!= ENOMEM
))
553 if (uap
->oldlenp
!= USER_ADDR_NULL
)
554 error
= suulong(uap
->oldlenp
, oldlen
);
560 * Attributes stored in the kernel.
562 extern char corefilename
[MAXPATHLEN
+1];
563 extern int do_coredump
;
564 extern int sugid_coredump
;
567 extern int do_count_syscalls
;
571 int securelevel
= -1;
577 sysctl_doaffinity SYSCTL_HANDLER_ARGS
579 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
580 int *name
= arg1
; /* oid element argument vector */
581 int namelen
= arg2
; /* number of oid element arguments */
582 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
583 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
584 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
585 // size_t newlen = req->newlen; /* user buffer copy in size */
587 int error
= ENOTSUP
; /* Default to failure */
589 proc_t cur_proc
= current_proc();
594 if (name
[0] == 0 && 1 == namelen
) {
595 error
= sysctl_rdint(oldp
, oldlenp
, newp
,
596 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
597 } else if (name
[0] == 1 && 2 == namelen
) {
599 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), &cur_proc
->p_flag
);
601 OSBitOrAtomic(P_AFFINITY
, &cur_proc
->p_flag
);
606 /* adjust index so we return the right required/consumed amount */
608 req
->oldidx
+= req
->oldlen
;
612 SYSCTL_PROC(_kern
, KERN_AFFINITY
, affinity
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
613 0, /* Pointer argument (arg1) */
614 0, /* Integer argument (arg2) */
615 sysctl_doaffinity
, /* Handler function */
616 NULL
, /* Data pointer */
620 sysctl_dotranslate SYSCTL_HANDLER_ARGS
622 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
623 int *name
= arg1
; /* oid element argument vector */
624 int namelen
= arg2
; /* number of oid element arguments */
625 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
626 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
627 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
628 // size_t newlen = req->newlen; /* user buffer copy in size */
631 proc_t cur_proc
= current_proc();
633 int istranslated
= 0;
634 kauth_cred_t my_cred
;
640 p
= proc_find(name
[0]);
644 my_cred
= kauth_cred_proc_ref(p
);
645 uid
= kauth_cred_getuid(my_cred
);
646 kauth_cred_unref(&my_cred
);
647 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
648 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
653 istranslated
= (p
->p_flag
& P_TRANSLATED
);
655 error
= sysctl_rdint(oldp
, oldlenp
, newp
,
656 (istranslated
!= 0) ? 1 : 0);
658 /* adjust index so we return the right required/consumed amount */
660 req
->oldidx
+= req
->oldlen
;
665 * XXX make CTLFLAG_RW so sysctl_rdint() will EPERM on attempts to write;
666 * XXX this may not be necessary.
668 SYSCTL_PROC(_kern
, KERN_TRANSLATE
, translate
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
669 0, /* Pointer argument (arg1) */
670 0, /* Integer argument (arg2) */
671 sysctl_dotranslate
, /* Handler function */
672 NULL
, /* Data pointer */
676 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
677 __unused
int arg2
, struct sysctl_req
*req
)
680 struct uthread
*ut
= get_bsdthread_info(current_thread());
681 user_addr_t oldp
=0, newp
=0;
682 size_t *oldlenp
=NULL
;
686 oldlenp
= &(req
->oldlen
);
688 newlen
= req
->newlen
;
690 /* We want the current length, and maybe the string itself */
692 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
693 size_t currlen
= MAXTHREADNAMESIZE
- 1;
696 /* use length of current thread name */
697 currlen
= strlen(ut
->pth_name
);
699 if(*oldlenp
< currlen
)
701 /* NOTE - we do not copy the NULL terminator */
703 error
= copyout(ut
->pth_name
,oldp
,currlen
);
708 /* return length of thread name minus NULL terminator (just like strlen) */
709 req
->oldidx
= currlen
;
712 /* We want to set the name to something */
715 if(newlen
> (MAXTHREADNAMESIZE
- 1))
719 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
723 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
724 error
= copyin(newp
, ut
->pth_name
, newlen
);
732 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
736 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
738 host_basic_info_data_t hinfo
;
742 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
743 struct _processor_statistics_np
*buf
;
746 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
747 if (kret
!= KERN_SUCCESS
) {
751 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
753 if (req
->oldlen
< size
) {
757 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
759 kret
= get_sched_statistics(buf
, &size
);
760 if (kret
!= KERN_SUCCESS
) {
765 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
771 panic("Sched info changed?!");
778 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
781 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
786 if (req
->newlen
!= sizeof(active
)) {
790 res
= copyin(req
->newptr
, &active
, sizeof(active
));
795 return set_sched_stats_active(active
);
798 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
800 extern int get_kernel_symfile(proc_t
, char **);
803 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
806 extern int syscalls_log
[];
807 extern const char *syscallnames
[];
810 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
812 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
813 __unused
int *name
= arg1
; /* oid element argument vector */
814 __unused
int namelen
= arg2
; /* number of oid element arguments */
815 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
816 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
817 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
818 size_t newlen
= req
->newlen
; /* user buffer copy in size */
823 /* valid values passed in:
824 * = 0 means don't keep called counts for each bsd syscall
825 * > 0 means keep called counts for each bsd syscall
826 * = 2 means dump current counts to the system log
827 * = 3 means reset all counts
828 * for example, to dump current counts:
829 * sysctl -w kern.count_calls=2
831 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
837 do_count_syscalls
= 1;
839 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
841 for ( i
= 0; i
< nsysent
; i
++ ) {
842 if ( syscalls_log
[i
] != 0 ) {
844 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
852 do_count_syscalls
= 1;
856 /* adjust index so we return the right required/consumed amount */
858 req
->oldidx
+= req
->oldlen
;
862 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
863 0, /* Pointer argument (arg1) */
864 0, /* Integer argument (arg2) */
865 sysctl_docountsyscalls
, /* Handler function */
866 NULL
, /* Data pointer */
868 #endif /* COUNT_SYSCALLS */
872 * Debugging related system variables.
876 #endif /* DIAGNOSTIC */
877 struct ctldebug debug0
, debug1
;
878 struct ctldebug debug2
, debug3
, debug4
;
879 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
880 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
881 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
882 STATIC
struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
883 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
884 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
885 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
886 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
889 sysctl_dodebug SYSCTL_HANDLER_ARGS
891 int cmd
= oidp
->oid_arg2
; /* subcommand*/
892 int *name
= arg1
; /* oid element argument vector */
893 int namelen
= arg2
; /* number of oid element arguments */
894 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
895 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
896 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
897 size_t newlen
= req
->newlen
; /* user buffer copy in size */
900 struct ctldebug
*cdp
;
902 /* all sysctl names at this level are name and field */
904 return (ENOTSUP
); /* overloaded */
905 if (cmd
< 0 || cmd
>= CTL_DEBUG_MAXID
)
907 cdp
= debugvars
[cmd
];
908 if (cdp
->debugname
== 0)
912 error
= sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
);
914 case CTL_DEBUG_VALUE
:
915 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
);
922 /* adjust index so we return the right required/consumed amount */
924 req
->oldidx
+= req
->oldlen
;
929 * XXX We mark this RW instead of RD to let sysctl_rdstring() return the
930 * XXX historical error.
932 SYSCTL_PROC(_debug
, CTL_DEBUG_NAME
, name
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
933 0, /* Pointer argument (arg1) */
934 CTL_DEBUG_NAME
, /* Integer argument (arg2) */
935 sysctl_dodebug
, /* Handler function */
936 NULL
, /* Data pointer */
938 SYSCTL_PROC(_debug
, CTL_DEBUG_VALUE
, value
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
939 0, /* Pointer argument (arg1) */
940 CTL_DEBUG_VALUE
, /* Integer argument (arg2) */
941 sysctl_dodebug
, /* Handler function */
942 NULL
, /* Data pointer */
947 * The following sysctl_* functions should not be used
948 * any more, as they can only cope with callers in
949 * user mode: Use new-style
957 * Validate parameters and get old / set new parameters
958 * for an integer-valued sysctl function.
961 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
962 user_addr_t newp
, size_t newlen
, int *valp
)
966 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
968 if (oldp
&& *oldlenp
< sizeof(int))
970 if (newp
&& newlen
!= sizeof(int))
972 *oldlenp
= sizeof(int);
974 error
= copyout(valp
, oldp
, sizeof(int));
975 if (error
== 0 && newp
) {
976 error
= copyin(newp
, valp
, sizeof(int));
977 AUDIT_ARG(value32
, *valp
);
983 * As above, but read-only.
986 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
990 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
992 if (oldp
&& *oldlenp
< sizeof(int))
996 *oldlenp
= sizeof(int);
998 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
1003 * Validate parameters and get old / set new parameters
1004 * for an quad(64bit)-valued sysctl function.
1007 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
1008 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
1012 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1014 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1016 if (newp
&& newlen
!= sizeof(quad_t
))
1018 *oldlenp
= sizeof(quad_t
);
1020 error
= copyout(valp
, oldp
, sizeof(quad_t
));
1021 if (error
== 0 && newp
)
1022 error
= copyin(newp
, valp
, sizeof(quad_t
));
1027 * As above, but read-only.
1030 sysctl_rdquad(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, quad_t val
)
1034 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1036 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1040 *oldlenp
= sizeof(quad_t
);
1042 error
= copyout((caddr_t
)&val
, oldp
, sizeof(quad_t
));
1047 * Validate parameters and get old / set new parameters
1048 * for a string-valued sysctl function. Unlike sysctl_string, if you
1049 * give it a too small (but larger than 0 bytes) buffer, instead of
1050 * returning ENOMEM, it truncates the returned string to the buffer
1051 * size. This preserves the semantics of some library routines
1052 * implemented via sysctl, which truncate their returned data, rather
1053 * than simply returning an error. The returned string is always NUL
1057 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
1058 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1060 int len
, copylen
, error
= 0;
1062 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1064 copylen
= len
= strlen(str
) + 1;
1065 if (oldp
&& (len
< 0 || *oldlenp
< 1))
1067 if (oldp
&& (*oldlenp
< (size_t)len
))
1068 copylen
= *oldlenp
+ 1;
1069 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1071 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
1073 error
= copyout(str
, oldp
, copylen
);
1075 unsigned char c
= 0;
1078 error
= copyout((void *)&c
, oldp
, sizeof(char));
1081 if (error
== 0 && newp
) {
1082 error
= copyin(newp
, str
, newlen
);
1084 AUDIT_ARG(text
, (char *)str
);
1090 * Validate parameters and get old / set new parameters
1091 * for a string-valued sysctl function.
1094 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
1095 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1099 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1101 len
= strlen(str
) + 1;
1102 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1104 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1106 *oldlenp
= len
-1; /* deal with NULL strings correctly */
1108 error
= copyout(str
, oldp
, len
);
1110 if (error
== 0 && newp
) {
1111 error
= copyin(newp
, str
, newlen
);
1113 AUDIT_ARG(text
, (char *)str
);
1119 * As above, but read-only.
1122 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
1123 user_addr_t newp
, char *str
)
1127 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1129 len
= strlen(str
) + 1;
1130 if (oldp
&& *oldlenp
< (size_t)len
)
1136 error
= copyout(str
, oldp
, len
);
1141 * Validate parameters and get old / set new parameters
1142 * for a structure oriented sysctl function.
1145 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
1146 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
1150 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1152 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1154 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
1158 error
= copyout(sp
, oldp
, len
);
1160 if (error
== 0 && newp
)
1161 error
= copyin(newp
, sp
, len
);
1166 * Validate parameters and get old parameters
1167 * for a structure oriented sysctl function.
1170 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
1171 user_addr_t newp
, void *sp
, int len
)
1175 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1177 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1183 error
= copyout(sp
, oldp
, len
);
1188 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
1190 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
1197 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
1199 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
1206 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
1208 boolean_t funnel_state
;
1213 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1214 /* This is very racy but list lock is held.. Hmmm. */
1215 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1216 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
1217 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
1218 tp
->t_dev
!= (dev_t
)*(int*)arg
)
1223 thread_funnel_set(kernel_flock
, funnel_state
);
1229 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
1231 kauth_cred_t my_cred
;
1234 if (p
->p_ucred
== NULL
)
1236 my_cred
= kauth_cred_proc_ref(p
);
1237 uid
= kauth_cred_getuid(my_cred
);
1238 kauth_cred_unref(&my_cred
);
1240 if (uid
!= (uid_t
)*(int*)arg
)
1248 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
1250 kauth_cred_t my_cred
;
1253 if (p
->p_ucred
== NULL
)
1255 my_cred
= kauth_cred_proc_ref(p
);
1256 ruid
= kauth_cred_getruid(my_cred
);
1257 kauth_cred_unref(&my_cred
);
1259 if (ruid
!= (uid_t
)*(int*)arg
)
1267 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
1269 if ((p
->p_lctx
== NULL
) ||
1270 (p
->p_lctx
->lc_id
!= (pid_t
)*(int*)arg
))
1278 * try over estimating by 5 procs
1280 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1281 struct sysdoproc_args
{
1284 boolean_t is_64_bit
;
1296 sysdoproc_callback(proc_t p
, void *arg
)
1298 struct sysdoproc_args
*args
= arg
;
1300 if (args
->buflen
>= args
->sizeof_kproc
) {
1301 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
1302 return (PROC_RETURNED
);
1303 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
1304 return (PROC_RETURNED
);
1305 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
1306 return (PROC_RETURNED
);
1308 bzero(args
->kprocp
, args
->sizeof_kproc
);
1309 if (args
->is_64_bit
)
1310 fill_user64_proc(p
, args
->kprocp
);
1312 fill_user32_proc(p
, args
->kprocp
);
1313 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
1315 *args
->errorp
= error
;
1316 return (PROC_RETURNED_DONE
);
1318 args
->dp
+= args
->sizeof_kproc
;
1319 args
->buflen
-= args
->sizeof_kproc
;
1321 args
->needed
+= args
->sizeof_kproc
;
1322 return (PROC_RETURNED
);
1325 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
1327 sysctl_prochandle SYSCTL_HANDLER_ARGS
1329 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
1330 int *name
= arg1
; /* oid element argument vector */
1331 int namelen
= arg2
; /* number of oid element arguments */
1332 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
1334 user_addr_t dp
= where
;
1336 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
1338 boolean_t is_64_bit
= proc_is64bit(current_proc());
1339 struct user32_kinfo_proc user32_kproc
;
1340 struct user64_kinfo_proc user_kproc
;
1343 int (*filterfn
)(proc_t
, void *) = 0;
1344 struct sysdoproc_args args
;
1349 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
1353 sizeof_kproc
= sizeof(user_kproc
);
1354 kprocp
= &user_kproc
;
1356 sizeof_kproc
= sizeof(user32_kproc
);
1357 kprocp
= &user32_kproc
;
1363 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
1366 case KERN_PROC_PGRP
:
1367 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
1378 case KERN_PROC_RUID
:
1383 case KERN_PROC_LCID
:
1384 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
1391 /* must be kern.proc.<unknown> */
1396 args
.buflen
= buflen
;
1397 args
.kprocp
= kprocp
;
1398 args
.is_64_bit
= is_64_bit
;
1400 args
.needed
= needed
;
1401 args
.errorp
= &error
;
1402 args
.uidcheck
= uidcheck
;
1403 args
.ruidcheck
= ruidcheck
;
1404 args
.ttycheck
= ttycheck
;
1405 args
.sizeof_kproc
= sizeof_kproc
;
1407 args
.uidval
= name
[0];
1409 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
1410 sysdoproc_callback
, &args
, filterfn
, name
);
1416 needed
= args
.needed
;
1418 if (where
!= USER_ADDR_NULL
) {
1419 req
->oldlen
= dp
- where
;
1420 if (needed
> req
->oldlen
)
1423 needed
+= KERN_PROCSLOP
;
1424 req
->oldlen
= needed
;
1426 /* adjust index so we return the right required/consumed amount */
1427 req
->oldidx
+= req
->oldlen
;
1432 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
1433 * in the sysctl declaration itself, which comes into the handler function
1434 * as 'oidp->oid_arg2'.
1436 * For these particular sysctls, since they have well known OIDs, we could
1437 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
1438 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
1439 * of a well known value with a common handler function. This is desirable,
1440 * because we want well known values to "go away" at some future date.
1442 * It should be noted that the value of '((int *)arg1)[1]' is used for many
1443 * an integer parameter to the subcommand for many of these sysctls; we'd
1444 * rather have used '((int *)arg1)[0]' for that, or even better, an element
1445 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
1446 * and then use leaf-node permissions enforcement, but that would have
1447 * necessitated modifying user space code to correspond to the interface
1448 * change, and we are striving for binary backward compatibility here; even
1449 * though these are SPI, and not intended for use by user space applications
1450 * which are not themselves system tools or libraries, some applications
1451 * have erroneously used them.
1453 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1454 0, /* Pointer argument (arg1) */
1455 KERN_PROC_ALL
, /* Integer argument (arg2) */
1456 sysctl_prochandle
, /* Handler function */
1457 NULL
, /* Data is size variant on ILP32/LP64 */
1459 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1460 0, /* Pointer argument (arg1) */
1461 KERN_PROC_PID
, /* Integer argument (arg2) */
1462 sysctl_prochandle
, /* Handler function */
1463 NULL
, /* Data is size variant on ILP32/LP64 */
1465 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1466 0, /* Pointer argument (arg1) */
1467 KERN_PROC_TTY
, /* Integer argument (arg2) */
1468 sysctl_prochandle
, /* Handler function */
1469 NULL
, /* Data is size variant on ILP32/LP64 */
1471 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1472 0, /* Pointer argument (arg1) */
1473 KERN_PROC_PGRP
, /* Integer argument (arg2) */
1474 sysctl_prochandle
, /* Handler function */
1475 NULL
, /* Data is size variant on ILP32/LP64 */
1477 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1478 0, /* Pointer argument (arg1) */
1479 KERN_PROC_UID
, /* Integer argument (arg2) */
1480 sysctl_prochandle
, /* Handler function */
1481 NULL
, /* Data is size variant on ILP32/LP64 */
1483 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1484 0, /* Pointer argument (arg1) */
1485 KERN_PROC_RUID
, /* Integer argument (arg2) */
1486 sysctl_prochandle
, /* Handler function */
1487 NULL
, /* Data is size variant on ILP32/LP64 */
1489 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1490 0, /* Pointer argument (arg1) */
1491 KERN_PROC_LCID
, /* Integer argument (arg2) */
1492 sysctl_prochandle
, /* Handler function */
1493 NULL
, /* Data is size variant on ILP32/LP64 */
1498 * Fill in non-zero fields of an eproc structure for the specified process.
1501 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
1505 struct session
*sessp
;
1506 kauth_cred_t my_cred
;
1509 sessp
= proc_session(p
);
1511 if (pg
!= PGRP_NULL
) {
1512 ep
->e_pgid
= p
->p_pgrpid
;
1513 ep
->e_jobc
= pg
->pg_jobc
;
1514 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
1515 ep
->e_flag
= EPROC_CTTY
;
1519 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1521 ep
->e_ppid
= p
->p_ppid
;
1523 my_cred
= kauth_cred_proc_ref(p
);
1525 /* A fake historical pcred */
1526 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1527 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1528 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1529 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1531 /* A fake historical *kauth_cred_t */
1532 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1533 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1534 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1535 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1536 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
1538 kauth_cred_unref(&my_cred
);
1541 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1542 (tp
= SESSION_TP(sessp
))) {
1543 ep
->e_tdev
= tp
->t_dev
;
1544 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1548 if (sessp
!= SESSION_NULL
) {
1549 if (SESS_LEADER(p
, sessp
))
1550 ep
->e_flag
|= EPROC_SLEADER
;
1551 session_rele(sessp
);
1553 if (pg
!= PGRP_NULL
)
1558 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
1561 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
1565 struct session
*sessp
;
1566 kauth_cred_t my_cred
;
1569 sessp
= proc_session(p
);
1571 if (pg
!= PGRP_NULL
) {
1572 ep
->e_pgid
= p
->p_pgrpid
;
1573 ep
->e_jobc
= pg
->pg_jobc
;
1574 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
1575 ep
->e_flag
= EPROC_CTTY
;
1579 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1581 ep
->e_ppid
= p
->p_ppid
;
1583 my_cred
= kauth_cred_proc_ref(p
);
1585 /* A fake historical pcred */
1586 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1587 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1588 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1589 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1591 /* A fake historical *kauth_cred_t */
1592 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1593 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1594 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1595 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1596 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
1598 kauth_cred_unref(&my_cred
);
1601 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1602 (tp
= SESSION_TP(sessp
))) {
1603 ep
->e_tdev
= tp
->t_dev
;
1604 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1608 if (sessp
!= SESSION_NULL
) {
1609 if (SESS_LEADER(p
, sessp
))
1610 ep
->e_flag
|= EPROC_SLEADER
;
1611 session_rele(sessp
);
1613 if (pg
!= PGRP_NULL
)
1618 * Fill in an eproc structure for the specified process.
1619 * bzeroed by our caller, so only set non-zero fields.
1622 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1624 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1625 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1626 exp
->p_flag
= p
->p_flag
;
1627 if (p
->p_lflag
& P_LTRACED
)
1628 exp
->p_flag
|= P_TRACED
;
1629 if (p
->p_lflag
& P_LPPWAIT
)
1630 exp
->p_flag
|= P_PPWAIT
;
1631 if (p
->p_lflag
& P_LEXIT
)
1632 exp
->p_flag
|= P_WEXIT
;
1633 exp
->p_stat
= p
->p_stat
;
1634 exp
->p_pid
= p
->p_pid
;
1635 exp
->p_oppid
= p
->p_oppid
;
1637 exp
->user_stack
= p
->user_stack
;
1638 exp
->p_debugger
= p
->p_debugger
;
1639 exp
->sigwait
= p
->sigwait
;
1641 #ifdef _PROC_HAS_SCHEDINFO_
1642 exp
->p_estcpu
= p
->p_estcpu
;
1643 exp
->p_pctcpu
= p
->p_pctcpu
;
1644 exp
->p_slptime
= p
->p_slptime
;
1646 exp
->p_realtimer
.it_interval
.tv_sec
=
1647 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1648 exp
->p_realtimer
.it_interval
.tv_usec
=
1649 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1651 exp
->p_realtimer
.it_value
.tv_sec
=
1652 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1653 exp
->p_realtimer
.it_value
.tv_usec
=
1654 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1656 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1657 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1659 exp
->p_sigignore
= p
->p_sigignore
;
1660 exp
->p_sigcatch
= p
->p_sigcatch
;
1661 exp
->p_priority
= p
->p_priority
;
1662 exp
->p_nice
= p
->p_nice
;
1663 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1664 exp
->p_xstat
= p
->p_xstat
;
1665 exp
->p_acflag
= p
->p_acflag
;
1669 * Fill in an LP64 version of extern_proc structure for the specified process.
1672 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1674 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1675 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1676 exp
->p_flag
= p
->p_flag
;
1677 if (p
->p_lflag
& P_LTRACED
)
1678 exp
->p_flag
|= P_TRACED
;
1679 if (p
->p_lflag
& P_LPPWAIT
)
1680 exp
->p_flag
|= P_PPWAIT
;
1681 if (p
->p_lflag
& P_LEXIT
)
1682 exp
->p_flag
|= P_WEXIT
;
1683 exp
->p_stat
= p
->p_stat
;
1684 exp
->p_pid
= p
->p_pid
;
1685 exp
->p_oppid
= p
->p_oppid
;
1687 exp
->user_stack
= p
->user_stack
;
1688 exp
->p_debugger
= p
->p_debugger
;
1689 exp
->sigwait
= p
->sigwait
;
1691 #ifdef _PROC_HAS_SCHEDINFO_
1692 exp
->p_estcpu
= p
->p_estcpu
;
1693 exp
->p_pctcpu
= p
->p_pctcpu
;
1694 exp
->p_slptime
= p
->p_slptime
;
1696 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1697 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1699 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1700 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1702 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1703 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1705 exp
->p_sigignore
= p
->p_sigignore
;
1706 exp
->p_sigcatch
= p
->p_sigcatch
;
1707 exp
->p_priority
= p
->p_priority
;
1708 exp
->p_nice
= p
->p_nice
;
1709 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1710 exp
->p_xstat
= p
->p_xstat
;
1711 exp
->p_acflag
= p
->p_acflag
;
1715 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1717 /* on a 64 bit kernel, 32 bit users get some truncated information */
1718 fill_user32_externproc(p
, &kp
->kp_proc
);
1719 fill_user32_eproc(p
, &kp
->kp_eproc
);
1723 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1725 fill_user64_externproc(p
, &kp
->kp_proc
);
1726 fill_user64_eproc(p
, &kp
->kp_eproc
);
1730 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1732 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1733 int *name
= arg1
; /* oid element argument vector */
1734 int namelen
= arg2
; /* number of oid element arguments */
1735 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1736 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1737 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1738 // size_t newlen = req->newlen; /* user buffer copy in size */
1740 proc_t p
= current_proc();
1746 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1748 /* Non-root processes may be blessed by kperf to access data
1749 * logged into trace.
1752 ret
= kperf_access_check();
1767 case KERN_KDWRITETR
:
1768 case KERN_KDWRITEMAP
:
1772 case KERN_KDSETRTCDEC
:
1774 case KERN_KDGETENTROPY
:
1775 case KERN_KDENABLE_BG_TRACE
:
1776 case KERN_KDDISABLE_BG_TRACE
:
1777 case KERN_KDREADCURTHRMAP
:
1778 case KERN_KDSET_TYPEFILTER
:
1779 case KERN_KDBUFWAIT
:
1782 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1789 /* adjust index so we return the right required/consumed amount */
1791 req
->oldidx
+= req
->oldlen
;
1795 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1796 0, /* Pointer argument (arg1) */
1797 0, /* Integer argument (arg2) */
1798 sysctl_kdebug_ops
, /* Handler function */
1799 NULL
, /* Data pointer */
1804 * Return the top *sizep bytes of the user stack, or the entire area of the
1805 * user stack down through the saved exec_path, whichever is smaller.
1808 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1810 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1811 int *name
= arg1
; /* oid element argument vector */
1812 int namelen
= arg2
; /* number of oid element arguments */
1813 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1814 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1815 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1816 // size_t newlen = req->newlen; /* user buffer copy in size */
1819 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1821 /* adjust index so we return the right required/consumed amount */
1823 req
->oldidx
+= req
->oldlen
;
1827 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1828 0, /* Pointer argument (arg1) */
1829 0, /* Integer argument (arg2) */
1830 sysctl_doprocargs
, /* Handler function */
1831 NULL
, /* Data pointer */
1835 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1837 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1838 int *name
= arg1
; /* oid element argument vector */
1839 int namelen
= arg2
; /* number of oid element arguments */
1840 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1841 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1842 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1843 // size_t newlen = req->newlen; /* user buffer copy in size */
1846 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1848 /* adjust index so we return the right required/consumed amount */
1850 req
->oldidx
+= req
->oldlen
;
1854 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1855 0, /* Pointer argument (arg1) */
1856 0, /* Integer argument (arg2) */
1857 sysctl_doprocargs2
, /* Handler function */
1858 NULL
, /* Data pointer */
1862 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1863 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1866 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1868 struct _vm_map
*proc_map
;
1871 user_addr_t arg_addr
;
1876 vm_offset_t copy_start
, copy_end
;
1879 kauth_cred_t my_cred
;
1886 buflen
-= sizeof(int); /* reserve first word to return argc */
1888 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1889 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1890 /* is not NULL then the caller wants us to return the length needed to */
1891 /* hold the data we would return */
1892 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1898 * Lookup process by pid
1907 * Copy the top N bytes of the stack.
1908 * On all machines we have so far, the stack grows
1911 * If the user expects no more than N bytes of
1912 * argument list, use that as a guess for the
1916 if (!p
->user_stack
) {
1921 if (where
== USER_ADDR_NULL
) {
1922 /* caller only wants to know length of proc args data */
1923 if (sizep
== NULL
) {
1928 size
= p
->p_argslen
;
1931 size
+= sizeof(int);
1935 * old PROCARGS will return the executable's path and plus some
1936 * extra space for work alignment and data tags
1938 size
+= PATH_MAX
+ (6 * sizeof(int));
1940 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1945 my_cred
= kauth_cred_proc_ref(p
);
1946 uid
= kauth_cred_getuid(my_cred
);
1947 kauth_cred_unref(&my_cred
);
1949 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1950 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1955 if ((u_int
)arg_size
> p
->p_argslen
)
1956 arg_size
= round_page(p
->p_argslen
);
1958 arg_addr
= p
->user_stack
- arg_size
;
1962 * Before we can block (any VM code), make another
1963 * reference to the map to keep it alive. We do
1964 * that by getting a reference on the task itself.
1972 argslen
= p
->p_argslen
;
1974 * Once we have a task reference we can convert that into a
1975 * map reference, which we will use in the calls below. The
1976 * task/process may change its map after we take this reference
1977 * (see execve), but the worst that will happen then is a return
1978 * of stale info (which is always a possibility).
1980 task_reference(task
);
1982 proc_map
= get_task_map_reference(task
);
1983 task_deallocate(task
);
1985 if (proc_map
== NULL
)
1989 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1990 if (ret
!= KERN_SUCCESS
) {
1991 vm_map_deallocate(proc_map
);
1995 copy_end
= round_page(copy_start
+ arg_size
);
1997 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1998 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1999 vm_map_deallocate(proc_map
);
2000 kmem_free(kernel_map
, copy_start
,
2001 round_page(arg_size
));
2006 * Now that we've done the copyin from the process'
2007 * map, we can release the reference to it.
2009 vm_map_deallocate(proc_map
);
2011 if( vm_map_copy_overwrite(kernel_map
,
2012 (vm_map_address_t
)copy_start
,
2013 tmp
, FALSE
) != KERN_SUCCESS
) {
2014 kmem_free(kernel_map
, copy_start
,
2015 round_page(arg_size
));
2019 if (arg_size
> argslen
) {
2020 data
= (caddr_t
) (copy_end
- argslen
);
2023 data
= (caddr_t
) (copy_end
- arg_size
);
2028 /* Put processes argc as the first word in the copyout buffer */
2029 suword(where
, p
->p_argc
);
2030 error
= copyout(data
, (where
+ sizeof(int)), size
);
2031 size
+= sizeof(int);
2033 error
= copyout(data
, where
, size
);
2036 * Make the old PROCARGS work to return the executable's path
2037 * But, only if there is enough space in the provided buffer
2039 * on entry: data [possibily] points to the beginning of the path
2041 * Note: we keep all pointers&sizes aligned to word boundries
2043 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
2045 int binPath_sz
, alignedBinPath_sz
= 0;
2046 int extraSpaceNeeded
, addThis
;
2047 user_addr_t placeHere
;
2048 char * str
= (char *) data
;
2051 /* Some apps are really bad about messing up their stacks
2052 So, we have to be extra careful about getting the length
2053 of the executing binary. If we encounter an error, we bail.
2056 /* Limit ourselves to PATH_MAX paths */
2057 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
2061 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
2064 /* If we have a NUL terminator, copy it, too */
2065 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
2067 /* Pre-Flight the space requiremnts */
2069 /* Account for the padding that fills out binPath to the next word */
2070 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
2072 placeHere
= where
+ size
;
2074 /* Account for the bytes needed to keep placeHere word aligned */
2075 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
2077 /* Add up all the space that is needed */
2078 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
2080 /* is there is room to tack on argv[0]? */
2081 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
2083 placeHere
+= addThis
;
2084 suword(placeHere
, 0);
2085 placeHere
+= sizeof(int);
2086 suword(placeHere
, 0xBFFF0000);
2087 placeHere
+= sizeof(int);
2088 suword(placeHere
, 0);
2089 placeHere
+= sizeof(int);
2090 error
= copyout(data
, placeHere
, binPath_sz
);
2093 placeHere
+= binPath_sz
;
2094 suword(placeHere
, 0);
2095 size
+= extraSpaceNeeded
;
2101 if (copy_start
!= (vm_offset_t
) 0) {
2102 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
2108 if (where
!= USER_ADDR_NULL
)
2115 * Max number of concurrent aio requests
2119 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2121 int new_value
, changed
;
2122 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
2124 /* make sure the system-wide limit is greater than the per process limit */
2125 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
2126 aio_max_requests
= new_value
;
2135 * Max number of concurrent aio requests per process
2139 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2141 int new_value
, changed
;
2142 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
2144 /* make sure per process limit is less than the system-wide limit */
2145 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
2146 aio_max_requests_per_process
= new_value
;
2155 * Max number of async IO worker threads
2159 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2161 int new_value
, changed
;
2162 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
2164 /* we only allow an increase in the number of worker threads */
2165 if (new_value
> aio_worker_threads
) {
2166 _aio_create_worker_threads((new_value
- aio_worker_threads
));
2167 aio_worker_threads
= new_value
;
2177 * System-wide limit on the max number of processes
2181 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2183 int new_value
, changed
;
2184 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
2186 AUDIT_ARG(value32
, new_value
);
2187 /* make sure the system-wide limit is less than the configured hard
2188 limit set at kernel compilation */
2189 if (new_value
<= hard_maxproc
&& new_value
> 0)
2190 maxproc
= new_value
;
2197 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
2198 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2200 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
2201 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2203 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
2204 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2205 (int *)NULL
, BSD
, "");
2206 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
2207 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2209 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
2210 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2211 &kernel_uuid_string
[0], 0, "");
2214 int debug_kprint_syscall
= 0;
2215 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
2217 /* Thread safe: bits and string value are not used to reclaim state */
2218 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
2219 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
2220 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
2221 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
2222 "name of process for kprintf syscall tracing");
2224 int debug_kprint_current_process(const char **namep
)
2226 struct proc
*p
= current_proc();
2232 if (debug_kprint_syscall_process
[0]) {
2233 /* user asked to scope tracing to a particular process name */
2234 if(0 == strncmp(debug_kprint_syscall_process
,
2235 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
2236 /* no value in telling the user that we traced what they asked */
2237 if(namep
) *namep
= NULL
;
2245 /* trace all processes. Tell user what we traced */
2254 /* PR-5293665: need to use a callback function for kern.osversion to set
2255 * osversion in IORegistry */
2258 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
2262 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
2265 IORegistrySetOSBuildVersion((char *)arg1
);
2271 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
2272 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
2273 osversion
, 256 /* OSVERSIZE*/,
2274 sysctl_osversion
, "A", "");
2277 sysctl_sysctl_bootargs
2278 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2283 strlcpy(buf
, PE_boot_args(), 256);
2284 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
2288 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
2289 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2291 sysctl_sysctl_bootargs
, "A", "bootargs");
2293 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
2294 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2296 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
2297 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2298 (int *)NULL
, ARG_MAX
, "");
2299 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
2300 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2301 (int *)NULL
, _POSIX_VERSION
, "");
2302 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
2303 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2304 (int *)NULL
, NGROUPS_MAX
, "");
2305 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
2306 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2307 (int *)NULL
, 1, "");
2308 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2309 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2310 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2311 (int *)NULL
, 1, "");
2313 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2314 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2317 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
2318 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2320 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
2321 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2323 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
2324 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2326 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
2327 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2328 &thread_max
, 0, "");
2329 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
2330 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2331 &task_threadmax
, 0, "");
2334 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2336 int oldval
= desiredvnodes
;
2337 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2339 if (oldval
!= desiredvnodes
) {
2340 reset_vmobjectcache(oldval
, desiredvnodes
);
2341 resize_namecache(desiredvnodes
);
2347 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
2348 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2349 &nc_disabled
, 0, "");
2351 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2352 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2353 0, 0, sysctl_maxvnodes
, "I", "");
2355 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2356 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2357 0, 0, sysctl_maxproc
, "I", "");
2359 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2360 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2361 0, 0, sysctl_aiomax
, "I", "");
2363 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2364 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2365 0, 0, sysctl_aioprocmax
, "I", "");
2367 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2368 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2369 0, 0, sysctl_aiothreads
, "I", "");
2373 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2375 int new_value
, changed
;
2376 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2378 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2380 securelevel
= new_value
;
2389 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2390 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2391 0, 0, sysctl_securelvl
, "I", "");
2396 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2399 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2401 domainnamelen
= strlen(domainname
);
2406 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2407 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2408 0, 0, sysctl_domainname
, "A", "");
2410 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
2411 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2416 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2419 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2421 hostnamelen
= req
->newlen
;
2427 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2428 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2429 0, 0, sysctl_hostname
, "A", "");
2433 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2435 /* Original code allowed writing, I'm copying this, although this all makes
2436 no sense to me. Besides, this sysctl is never used. */
2437 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2440 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2441 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2442 0, 0, sysctl_procname
, "A", "");
2444 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2445 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2446 &speculative_reads_disabled
, 0, "");
2448 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
2449 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2450 &ignore_is_ssd
, 0, "");
2452 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_max
,
2453 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2454 &preheat_pages_max
, 0, "");
2456 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_min
,
2457 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2458 &preheat_pages_min
, 0, "");
2460 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
2461 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2462 &speculative_prefetch_max
, 0, "");
2464 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
2465 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2466 &speculative_prefetch_max_iosize
, 0, "");
2468 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
2469 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2470 &vm_page_free_target
, 0, "");
2472 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
2473 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2474 &vm_page_free_min
, 0, "");
2476 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
2477 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2478 &vm_page_free_reserved
, 0, "");
2480 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
2481 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2482 &vm_page_speculative_percentage
, 0, "");
2484 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
2485 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2486 &vm_page_speculative_q_age_ms
, 0, "");
2488 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
2489 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2490 &vm_max_delayed_work_limit
, 0, "");
2492 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2493 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2494 &vm_max_batch
, 0, "");
2496 SYSCTL_STRING(_kern
, OID_AUTO
, bootsessionuuid
,
2497 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2498 &bootsessionuuid_string
, sizeof(bootsessionuuid_string
) , "");
2502 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2504 time_t tv_sec
= boottime_sec();
2505 struct proc
*p
= req
->p
;
2507 if (proc_is64bit(p
)) {
2508 struct user64_timeval t
;
2511 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2513 struct user32_timeval t
;
2516 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2520 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2521 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2522 0, 0, sysctl_boottime
, "S,timeval", "");
2526 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2529 int error
= get_kernel_symfile(req
->p
, &str
);
2532 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2536 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2537 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2538 0, 0, sysctl_symfile
, "A", "");
2543 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2545 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2548 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2549 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2550 0, 0, sysctl_netboot
, "I", "");
2553 #ifdef CONFIG_IMGSRC_ACCESS
2555 * Legacy--act as if only one layer of nesting is possible.
2559 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2561 vfs_context_t ctx
= vfs_context_current();
2565 if (!vfs_context_issuser(ctx
)) {
2569 if (imgsrc_rootvnodes
[0] == NULL
) {
2573 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2578 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2579 result
= vnode_getwithref(devvp
);
2584 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2588 vnode_put(imgsrc_rootvnodes
[0]);
2592 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2593 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2594 0, 0, sysctl_imgsrcdev
, "I", "");
2598 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2601 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2605 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2609 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2611 * Go get the root vnode.
2613 rvp
= imgsrc_rootvnodes
[i
];
2614 if (rvp
== NULLVP
) {
2618 error
= vnode_get(rvp
);
2624 * For now, no getting at a non-local volume.
2626 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2627 if (devvp
== NULL
) {
2632 error
= vnode_getwithref(devvp
);
2641 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2642 info
[i
].ii_flags
= 0;
2643 info
[i
].ii_height
= i
;
2644 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2650 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2653 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2654 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2655 0, 0, sysctl_imgsrcinfo
, "I", "");
2657 #endif /* CONFIG_IMGSRC_ACCESS */
2660 SYSCTL_DECL(_kern_timer
);
2661 SYSCTL_NODE(_kern
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "timer");
2663 SYSCTL_INT(_kern_timer
, OID_AUTO
, coalescing_enabled
,
2664 CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2665 &mach_timer_coalescing_enabled
, 0, "");
2667 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_1
,
2668 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2669 &timer_deadline_tracking_bin_1
, "");
2670 SYSCTL_QUAD(_kern_timer
, OID_AUTO
, deadline_tracking_bin_2
,
2671 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2672 &timer_deadline_tracking_bin_2
, "");
2674 SYSCTL_DECL(_kern_timer_longterm
);
2675 SYSCTL_NODE(_kern_timer
, OID_AUTO
, longterm
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "longterm");
2677 /* Must match definition in osfmk/kern/timer_call.c */
2680 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
2681 LATENCY
, LATENCY_MIN
, LATENCY_MAX
2683 extern uint64_t timer_sysctl_get(int);
2684 extern int timer_sysctl_set(int, uint64_t);
2688 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2690 int oid
= (int)arg1
;
2691 uint64_t value
= timer_sysctl_get(oid
);
2696 error
= sysctl_io_number(req
, value
, sizeof(value
), &new_value
, &changed
);
2698 error
= timer_sysctl_set(oid
, new_value
);
2703 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, threshold
,
2704 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2705 (void *) THRESHOLD
, 0, sysctl_timer
, "Q", "");
2706 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, qlen
,
2707 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2708 (void *) QCOUNT
, 0, sysctl_timer
, "Q", "");
2710 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, enqueues
,
2711 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2712 (void *) ENQUEUES
, 0, sysctl_timer
, "Q", "");
2713 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, dequeues
,
2714 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2715 (void *) DEQUEUES
, 0, sysctl_timer
, "Q", "");
2716 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, escalates
,
2717 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2718 (void *) ESCALATES
, 0, sysctl_timer
, "Q", "");
2719 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, scans
,
2720 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2721 (void *) SCANS
, 0, sysctl_timer
, "Q", "");
2722 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, preempts
,
2723 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2724 (void *) PREEMPTS
, 0, sysctl_timer
, "Q", "");
2725 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency
,
2726 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2727 (void *) LATENCY
, 0, sysctl_timer
, "Q", "");
2728 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_min
,
2729 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2730 (void *) LATENCY_MIN
, 0, sysctl_timer
, "Q", "");
2731 SYSCTL_PROC(_kern_timer_longterm
, OID_AUTO
, latency_max
,
2732 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2733 (void *) LATENCY_MAX
, 0, sysctl_timer
, "Q", "");
2738 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2740 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2743 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2744 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2745 0, 0, sysctl_usrstack
, "I", "");
2749 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2751 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2754 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2755 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2756 0, 0, sysctl_usrstack64
, "Q", "");
2758 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2759 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2760 corefilename
, sizeof(corefilename
), "");
2764 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2766 #ifdef SECURE_KERNEL
2769 int new_value
, changed
;
2770 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2772 if ((new_value
== 0) || (new_value
== 1))
2773 do_coredump
= new_value
;
2780 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2781 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2782 0, 0, sysctl_coredump
, "I", "");
2785 sysctl_suid_coredump
2786 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2788 #ifdef SECURE_KERNEL
2791 int new_value
, changed
;
2792 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2794 if ((new_value
== 0) || (new_value
== 1))
2795 sugid_coredump
= new_value
;
2802 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2803 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2804 0, 0, sysctl_suid_coredump
, "I", "");
2808 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2810 struct proc
*p
= req
->p
;
2811 int new_value
, changed
;
2812 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2816 req
->p
->p_lflag
|= P_LDELAYTERM
;
2818 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2824 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2825 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2826 0, 0, sysctl_delayterm
, "I", "");
2831 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2833 struct proc
*p
= req
->p
;
2835 int new_value
, old_value
, changed
;
2838 ut
= get_bsdthread_info(current_thread());
2840 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2841 old_value
= KERN_RAGE_THREAD
;
2842 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2843 old_value
= KERN_RAGE_PROC
;
2847 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2850 switch (new_value
) {
2851 case KERN_RAGE_PROC
:
2853 p
->p_lflag
|= P_LRAGE_VNODES
;
2856 case KERN_UNRAGE_PROC
:
2858 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2862 case KERN_RAGE_THREAD
:
2863 ut
->uu_flag
|= UT_RAGE_VNODES
;
2865 case KERN_UNRAGE_THREAD
:
2866 ut
= get_bsdthread_info(current_thread());
2867 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2874 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2875 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2876 0, 0, sysctl_rage_vnode
, "I", "");
2878 /* XXX move this interface into libproc and remove this sysctl */
2880 sysctl_setthread_cpupercent
2881 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2883 int new_value
, old_value
;
2885 kern_return_t kret
= KERN_SUCCESS
;
2886 uint8_t percent
= 0;
2894 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2897 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2898 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2903 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2905 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* (int)NSEC_PER_MSEC
)) != 0)
2911 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2912 CTLTYPE_INT
| CTLFLAG_WR
| CTLFLAG_ANYBODY
,
2913 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2917 sysctl_kern_check_openevt
2918 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2920 struct proc
*p
= req
->p
;
2921 int new_value
, old_value
, changed
;
2924 if (p
->p_flag
& P_CHECKOPENEVT
) {
2925 old_value
= KERN_OPENEVT_PROC
;
2930 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2933 switch (new_value
) {
2934 case KERN_OPENEVT_PROC
:
2935 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2938 case KERN_UNOPENEVT_PROC
:
2939 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2949 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2950 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2956 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2958 #ifdef SECURE_KERNEL
2961 int new_value
, changed
;
2964 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2969 #if defined(__i386__) || defined(__x86_64__)
2971 * Only allow setting if NX is supported on the chip
2973 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2976 nx_enabled
= new_value
;
2983 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2984 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2985 0, 0, sysctl_nx
, "I", "");
2989 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2991 if (proc_is64bit(req
->p
)) {
2992 struct user64_loadavg loadinfo64
;
2993 fill_loadavg64(&averunnable
, &loadinfo64
);
2994 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2996 struct user32_loadavg loadinfo32
;
2997 fill_loadavg32(&averunnable
, &loadinfo32
);
2998 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
3002 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
3003 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3004 0, 0, sysctl_loadavg
, "S,loadavg", "");
3007 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
3010 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
3011 __unused
int arg2
, struct sysctl_req
*req
)
3013 int old_value
=0, new_value
=0, error
=0;
3015 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
3017 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
3019 return (vm_toggle_entry_reuse(new_value
, NULL
));
3024 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
3028 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3031 uint64_t swap_total
;
3032 uint64_t swap_avail
;
3033 vm_size_t swap_pagesize
;
3034 boolean_t swap_encrypted
;
3035 struct xsw_usage xsu
;
3037 error
= macx_swapinfo(&swap_total
,
3044 xsu
.xsu_total
= swap_total
;
3045 xsu
.xsu_avail
= swap_avail
;
3046 xsu
.xsu_used
= swap_total
- swap_avail
;
3047 xsu
.xsu_pagesize
= swap_pagesize
;
3048 xsu
.xsu_encrypted
= swap_encrypted
;
3049 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
3054 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
3055 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3056 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
3059 extern void vm_page_reactivate_all_throttled(void);
3062 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
3064 #pragma unused(arg1, arg2)
3065 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
3068 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3069 if (error
|| !req
->newptr
)
3072 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
3073 //assert(req->newptr);
3074 printf("Failed this request to set the sysctl\n");
3079 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
3081 disabled
= (!val
&& memorystatus_freeze_enabled
);
3083 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
3086 vm_page_reactivate_all_throttled();
3092 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
3093 #endif /* CONFIG_FREEZE */
3095 /* this kernel does NOT implement shared_region_make_private_np() */
3096 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
3097 CTLFLAG_RD
| CTLFLAG_LOCKED
,
3098 (int *)NULL
, 0, "");
3100 #if defined(__i386__) || defined(__x86_64__)
3102 sysctl_sysctl_exec_affinity(__unused
struct sysctl_oid
*oidp
,
3103 __unused
void *arg1
, __unused
int arg2
,
3104 struct sysctl_req
*req
)
3106 proc_t cur_proc
= req
->p
;
3109 if (req
->oldptr
!= USER_ADDR_NULL
) {
3110 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
3111 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
3115 if (req
->newptr
!= USER_ADDR_NULL
) {
3116 cpu_type_t newcputype
;
3117 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
3119 if (newcputype
== CPU_TYPE_I386
)
3120 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), &cur_proc
->p_flag
);
3121 else if (newcputype
== CPU_TYPE_POWERPC
)
3122 OSBitOrAtomic(P_AFFINITY
, &cur_proc
->p_flag
);
3129 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
3133 fetch_process_cputype(
3137 cpu_type_t
*cputype
)
3139 proc_t p
= PROC_NULL
;
3146 else if (namelen
== 1) {
3147 p
= proc_find(name
[0]);
3156 #if defined(__i386__) || defined(__x86_64__)
3157 if (p
->p_flag
& P_TRANSLATED
) {
3158 ret
= CPU_TYPE_POWERPC
;
3163 ret
= cpu_type() & ~CPU_ARCH_MASK
;
3164 if (IS_64BIT_PROCESS(p
))
3165 ret
|= CPU_ARCH_ABI64
;
3176 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3177 struct sysctl_req
*req
)
3180 cpu_type_t proc_cputype
= 0;
3181 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
3184 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
3186 return SYSCTL_OUT(req
, &res
, sizeof(res
));
3188 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
3191 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3192 struct sysctl_req
*req
)
3195 cpu_type_t proc_cputype
= 0;
3196 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
3198 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
3200 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
3204 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3206 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
3209 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
3210 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3211 0, 0, sysctl_safeboot
, "I", "");
3215 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3217 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
3220 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
3221 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3222 0, 0, sysctl_singleuser
, "I", "");
3225 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3227 extern boolean_t affinity_sets_enabled
;
3228 extern int affinity_sets_mapping
;
3230 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
3231 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
3232 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
3233 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
3236 * Boolean indicating if KASLR is active.
3240 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3244 slide
= vm_kernel_slide
? 1 : 0;
3246 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
3249 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
3250 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3251 0, 0, sysctl_slide
, "I", "");
3254 * Limit on total memory users can wire.
3256 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3258 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3260 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3263 * All values are in bytes.
3266 vm_map_size_t vm_global_no_user_wire_amount
;
3267 vm_map_size_t vm_global_user_wire_limit
;
3268 vm_map_size_t vm_user_wire_limit
;
3271 * There needs to be a more automatic/elegant way to do this
3273 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
3274 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
3275 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
3277 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
3278 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
3279 extern int vm_map_copy_overwrite_aligned_src_large
;
3280 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
3281 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
3282 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
3285 extern uint32_t vm_page_external_count
;
3286 extern uint32_t vm_page_filecache_min
;
3288 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_external_count
, 0, "");
3289 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_filecache_min
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_page_filecache_min
, 0, "");
3291 extern int vm_compressor_mode
;
3292 extern uint32_t swapout_target_age
;
3293 extern int64_t compressor_bytes_used
;
3294 extern uint32_t compressor_eval_period_in_msecs
;
3295 extern uint32_t compressor_sample_min_in_msecs
;
3296 extern uint32_t compressor_sample_max_in_msecs
;
3297 extern uint32_t compressor_thrashing_threshold_per_10msecs
;
3298 extern uint32_t compressor_thrashing_min_per_10msecs
;
3299 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
3300 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
3301 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
3302 extern uint32_t vm_compressor_catchup_threshold_divisor
;
3304 SYSCTL_INT(_vm
, OID_AUTO
, compressor_mode
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_compressor_mode
, 0, "");
3305 SYSCTL_QUAD(_vm
, OID_AUTO
, compressor_bytes_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &compressor_bytes_used
, "");
3306 SYSCTL_INT(_vm
, OID_AUTO
, compressor_swapout_target_age
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &swapout_target_age
, 0, "");
3308 SYSCTL_INT(_vm
, OID_AUTO
, compressor_eval_period_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_eval_period_in_msecs
, 0, "");
3309 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_min_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_min_in_msecs
, 0, "");
3310 SYSCTL_INT(_vm
, OID_AUTO
, compressor_sample_max_in_msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_sample_max_in_msecs
, 0, "");
3311 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_threshold_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_threshold_per_10msecs
, 0, "");
3312 SYSCTL_INT(_vm
, OID_AUTO
, compressor_thrashing_min_per_10msecs
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &compressor_thrashing_min_per_10msecs
, 0, "");
3313 SYSCTL_INT(_vm
, OID_AUTO
, compressor_minorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_minorcompact_threshold_divisor
, 0, "");
3314 SYSCTL_INT(_vm
, OID_AUTO
, compressor_majorcompact_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_majorcompact_threshold_divisor
, 0, "");
3315 SYSCTL_INT(_vm
, OID_AUTO
, compressor_unthrottle_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_unthrottle_threshold_divisor
, 0, "");
3316 SYSCTL_INT(_vm
, OID_AUTO
, compressor_catchup_threshold_divisor
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_compressor_catchup_threshold_divisor
, 0, "");
3319 * enable back trace events for thread blocks
3322 extern uint32_t kdebug_thread_block
;
3324 SYSCTL_INT (_kern
, OID_AUTO
, kdebug_thread_block
,
3325 CTLFLAG_RW
| CTLFLAG_LOCKED
, &kdebug_thread_block
, 0, "kdebug thread_block");
3328 * Kernel stack size and depth
3330 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
3331 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3332 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
3333 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3336 * enable back trace for port allocations
3338 extern int ipc_portbt
;
3340 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3341 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3342 &ipc_portbt
, 0, "");
3349 * See osfmk/kern/sched_prim.c for the corresponding definition
3350 * in osfmk/. If either version changes, update the other.
3352 #define SCHED_STRING_MAX_LENGTH (48)
3354 extern char sched_string
[SCHED_STRING_MAX_LENGTH
];
3355 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3356 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3357 sched_string
, sizeof(sched_string
),
3358 "Timeshare scheduler implementation");
3361 * Only support runtime modification on embedded platforms
3362 * with development config enabled