2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <security/audit/audit.h>
107 #include <kern/kalloc.h>
109 #include <mach/machine.h>
110 #include <mach/mach_host.h>
111 #include <mach/mach_types.h>
112 #include <mach/vm_param.h>
113 #include <kern/mach_param.h>
114 #include <kern/task.h>
115 #include <kern/lock.h>
116 #include <kern/processor.h>
117 #include <kern/debug.h>
118 #include <vm/vm_kern.h>
119 #include <vm/vm_map.h>
120 #include <mach/host_info.h>
122 #include <sys/mount_internal.h>
123 #include <sys/kdebug.h>
124 #include <sys/sysproto.h>
126 #include <IOKit/IOPlatformExpert.h>
127 #include <pexpert/pexpert.h>
129 #include <machine/machine_routines.h>
130 #include <machine/exec.h>
132 #include <vm/vm_protos.h>
133 #include <sys/imgsrc.h>
135 #if defined(__i386__) || defined(__x86_64__)
136 #include <i386/cpuid.h>
139 extern sysctlfn net_sysctl
;
140 extern sysctlfn cpu_sysctl
;
141 extern int aio_max_requests
;
142 extern int aio_max_requests_per_process
;
143 extern int aio_worker_threads
;
144 extern int lowpri_IO_window_msecs
;
145 extern int lowpri_IO_delay_msecs
;
146 extern int nx_enabled
;
147 extern int speculative_reads_disabled
;
148 extern int ignore_is_ssd
;
149 extern unsigned int speculative_prefetch_max
;
150 extern unsigned int preheat_pages_max
;
151 extern unsigned int preheat_pages_min
;
152 extern long numvnodes
;
154 extern unsigned int vm_max_delayed_work_limit
;
155 extern unsigned int vm_max_batch
;
157 extern unsigned int vm_page_free_min
;
158 extern unsigned int vm_page_free_target
;
159 extern unsigned int vm_page_free_reserved
;
160 extern unsigned int vm_page_speculative_percentage
;
161 extern unsigned int vm_page_speculative_q_age_ms
;
164 * Conditionally allow dtrace to see these functions for debugging purposes.
172 #define STATIC static
175 extern boolean_t mach_timer_coalescing_enabled
;
178 fill_user32_eproc(proc_t p
, struct user32_eproc
*ep
);
180 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*exp
);
182 fill_user64_eproc(proc_t p
, struct user64_eproc
*ep
);
184 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*kp
);
186 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*exp
);
188 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
194 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
196 __private_extern__ kern_return_t
197 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
199 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*kp
);
201 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
202 size_t *sizep
, proc_t cur_proc
);
204 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
205 proc_t cur_proc
, int argc_yes
);
207 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
208 size_t newlen
, void *sp
, int len
);
210 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
211 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
212 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
213 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
214 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
216 STATIC
int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
218 int sysdoproc_callback(proc_t p
, void *arg
);
221 /* forward declarations for non-static STATIC */
222 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
223 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
224 STATIC
int sysctl_handle_exec_archhandler_ppc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
225 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
226 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
227 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
228 STATIC
int sysctl_file(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
229 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
230 STATIC
int sysctl_dotranslate SYSCTL_HANDLER_ARGS
;
231 STATIC
int sysctl_doaffinity SYSCTL_HANDLER_ARGS
;
233 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
234 #endif /* COUNT_SYSCALLS */
236 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
237 #endif /* !CONFIG_EMBEDDED */
238 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
239 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
241 STATIC
int sysctl_dodebug SYSCTL_HANDLER_ARGS
;
243 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
244 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
245 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
246 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
247 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
248 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
249 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
250 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
251 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
252 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
253 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
254 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
255 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
257 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
259 #ifdef CONFIG_IMGSRC_ACCESS
260 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
262 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
267 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
269 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
271 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
272 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 #if defined(__i386__) || defined(__x86_64__)
274 STATIC
int sysctl_sysctl_exec_affinity(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
277 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 extern void IORegistrySetOSBuildVersion(char * build_version
);
286 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
288 la64
->ldavg
[0] = la
->ldavg
[0];
289 la64
->ldavg
[1] = la
->ldavg
[1];
290 la64
->ldavg
[2] = la
->ldavg
[2];
291 la64
->fscale
= (user64_long_t
)la
->fscale
;
295 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
297 la32
->ldavg
[0] = la
->ldavg
[0];
298 la32
->ldavg
[1] = la
->ldavg
[1];
299 la32
->ldavg
[2] = la
->ldavg
[2];
300 la32
->fscale
= (user32_long_t
)la
->fscale
;
306 * Description: Wire down the callers address map on behalf of sysctl's
307 * that perform their own copy operations while holding
308 * locks e.g. in the paging path, which could lead to a
309 * deadlock, or while holding a spinlock.
311 * Parameters: addr User buffer address
312 * len User buffer length
315 * vslock:ENOMEM Insufficient physical pages to wire
316 * vslock:EACCES Bad protection mode
317 * vslock:EINVAL Invalid parameters
319 * Notes: This code is invoked for the first OID element where the
320 * CTLFLAG_LOCKED is not specified for a given OID node
321 * element durng OID traversal, and is held for all
322 * subsequent node traversals, and only released after the
323 * leaf node handler invocation is complete.
325 * Legacy: For legacy scyctl's provided by third party code which
326 * expect funnel protection for calls into their code, this
327 * routine will also take the funnel, which will also only
328 * be released after the leaf node handler is complete.
330 * This is to support legacy 32 bit BSD KEXTs and legacy 32
331 * bit single threaded filesystem KEXTs and similar code
332 * which relies on funnel protection, e.g. for things like
333 * FSID based sysctl's.
335 * NEW CODE SHOULD NOT RELY ON THIS BEHAVIOUR! IT WILL BE
336 * REMOVED IN A FUTURE RELASE OF Mac OS X!
338 * Bugs: This routine does nothing with the new_addr and new_len
339 * at present, but it should, since read from the user space
340 * process adddress space which could potentially trigger
341 * paging may also be occurring deep down. This is due to
342 * a current limitation of the vslock() routine, which will
343 * always request a wired mapping be read/write, due to not
344 * taking an access mode parameter. Note that this could
345 * also cause problems for output on architectures where
346 * write access does not require read acccess if the current
347 * mapping lacks read access.
349 * XXX: To be moved to kern_newsysctl.c to avoid __private_extern__
351 int sysctl_mem_lock(user_addr_t old_addr
, user_size_t old_len
, user_addr_t new_addr
, user_size_t new_len
);
353 sysctl_mem_lock(__unused user_addr_t old_addr
, __unused user_size_t old_len
, __unused user_addr_t new_addr
, __unused user_size_t new_len
)
362 /* sysctl() syscall */
364 __sysctl(proc_t p
, struct __sysctl_args
*uap
, __unused
int32_t *retval
)
366 boolean_t funnel_state
= FALSE
; /* not held if unknown */
368 size_t savelen
= 0, oldlen
= 0, newlen
;
369 int name
[CTL_MAXNAME
];
371 boolean_t vslock_taken
= FALSE
;
372 boolean_t funnel_taken
= FALSE
;
374 kauth_cred_t my_cred
;
378 * all top-level sysctl names are non-terminal
380 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
382 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
386 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
388 if (proc_is64bit(p
)) {
389 /* uap->newlen is a size_t value which grows to 64 bits
390 * when coming from a 64-bit process. since it's doubtful we'll
391 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
393 newlen
= CAST_DOWN(size_t, uap
->newlen
);
396 newlen
= uap
->newlen
;
400 * XXX TODO: push down rights check for CTL_HW OIDs; most duplicate
401 * XXX it anyway, which is a performance sink, and requires use
402 * XXX of SUID root programs (see <rdar://3915692>).
404 * Note: Opt out of non-leaf node enforcement by removing this
405 * check for the top level OID value, and then adding
406 * CTLFLAG_ANYBODY to the leaf nodes in question. Enforce as
407 * suser for writed in leaf nodes by omitting this flag.
408 * Enforce with a higher granularity by making the leaf node
409 * of type SYSCTL_PROC() in order to provide a procedural
410 * enforcement call site.
412 * NOTE: This function is called prior to any subfunctions being
413 * called with a fallback to userland_sysctl(); as such, this
414 * permissions check here will veto the fallback operation.
416 /* CTL_UNSPEC is used to get oid to AUTO_OID */
417 if (uap
->new != USER_ADDR_NULL
418 && ((name
[0] == CTL_HW
)
419 || (name
[0] == CTL_VM
))
420 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
423 // XXX need to relocate into each terminal instead of leaving this here...
424 // XXX macf preemptory check.
426 my_cred
= kauth_cred_proc_ref(p
);
427 error
= mac_system_check_sysctl(
433 0, /* XXX 1 for CTL_KERN checks */
437 kauth_cred_unref(&my_cred
);
442 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
443 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
445 oldlen
= CAST_DOWN(size_t, oldlen64
);
447 * If more than 4G, clamp to 4G - useracc() below will catch
448 * with an EFAULT, if it's actually necessary.
450 if (oldlen64
> 0x00000000ffffffffULL
)
451 oldlen
= 0xffffffffUL
;
454 if ((name
[0] == CTL_VFS
|| name
[0] == CTL_VM
)) {
456 * Always take the funnel for CTL_VFS and CTL_VM
458 * XXX We should also take it for any OID without the
459 * XXX CTLFLAG_LOCKED set on it; fix this later!
461 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
465 * XXX Take the vslock() only when we are copying out; this
466 * XXX erroneously assumes that the copy in will not cause
467 * XXX a fault if caled from the paging path due to the
468 * XXX having been recently touched in order to establish
469 * XXX the input data. This is a bad assumption.
471 * Note: This is overkill, but third parties might
472 * already call sysctl internally in KEXTs that
473 * implement mass storage drivers. If you are
474 * writing a new KEXT, don't do that.
476 if(uap
->old
!= USER_ADDR_NULL
) {
477 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
)) {
478 thread_funnel_set(kernel_flock
, funnel_state
);
483 if ((error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
484 thread_funnel_set(kernel_flock
, funnel_state
);
494 * XXX convert vfs_sysctl subelements to newsysctl; this is hard
495 * XXX because of VFS_NUMMNTOPS being top level.
498 if (name
[0] == CTL_VFS
) {
499 error
= vfs_sysctl(name
+ 1, uap
->namelen
- 1, uap
->old
,
500 &oldlen
, uap
->new, newlen
, p
);
503 if (vslock_taken
== TRUE
) {
504 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
509 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
) ) {
511 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
512 uap
->new, newlen
, &oldlen
);
516 * If we took the funnel, which we only do for CTL_VFS and CTL_VM on
517 * 32 bit architectures, then drop it.
519 * XXX the grabbing and dropping need to move into the leaf nodes,
520 * XXX for sysctl's that are not marked CTLFLAG_LOCKED, but this is
521 * XXX true for the vslock, as well. We have a start at a routine
522 * to wrapper this (above), but it's not turned on. The current code
523 * removed the funnel and the vslock() from all but these two top
524 * level OIDs. Note that VFS only needs to take the funnel if the FS
525 * against which it's operating is not thread safe (but since an FS
526 * can be in the paging path, it still needs to take the vslock()).
529 thread_funnel_set(kernel_flock
, funnel_state
);
531 if ((error
) && (error
!= ENOMEM
))
534 if (uap
->oldlenp
!= USER_ADDR_NULL
)
535 error
= suulong(uap
->oldlenp
, oldlen
);
541 * Attributes stored in the kernel.
543 __private_extern__
char corefilename
[MAXPATHLEN
+1];
544 __private_extern__
int do_coredump
;
545 __private_extern__
int sugid_coredump
;
548 __private_extern__
int do_count_syscalls
;
552 int securelevel
= -1;
558 sysctl_doaffinity SYSCTL_HANDLER_ARGS
560 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
561 int *name
= arg1
; /* oid element argument vector */
562 int namelen
= arg2
; /* number of oid element arguments */
563 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
564 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
565 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
566 // size_t newlen = req->newlen; /* user buffer copy in size */
568 int error
= ENOTSUP
; /* Default to failure */
570 proc_t cur_proc
= current_proc();
575 if (name
[0] == 0 && 1 == namelen
) {
576 error
= sysctl_rdint(oldp
, oldlenp
, newp
,
577 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
578 } else if (name
[0] == 1 && 2 == namelen
) {
580 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), &cur_proc
->p_flag
);
582 OSBitOrAtomic(P_AFFINITY
, &cur_proc
->p_flag
);
587 /* adjust index so we return the right required/consumed amount */
589 req
->oldidx
+= req
->oldlen
;
593 SYSCTL_PROC(_kern
, KERN_AFFINITY
, affinity
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
594 0, /* Pointer argument (arg1) */
595 0, /* Integer argument (arg2) */
596 sysctl_doaffinity
, /* Handler function */
597 NULL
, /* Data pointer */
601 sysctl_dotranslate SYSCTL_HANDLER_ARGS
603 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
604 int *name
= arg1
; /* oid element argument vector */
605 int namelen
= arg2
; /* number of oid element arguments */
606 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
607 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
608 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
609 // size_t newlen = req->newlen; /* user buffer copy in size */
612 proc_t cur_proc
= current_proc();
614 int istranslated
= 0;
615 kauth_cred_t my_cred
;
621 p
= proc_find(name
[0]);
625 my_cred
= kauth_cred_proc_ref(p
);
626 uid
= kauth_cred_getuid(my_cred
);
627 kauth_cred_unref(&my_cred
);
628 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
629 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
634 istranslated
= (p
->p_flag
& P_TRANSLATED
);
636 error
= sysctl_rdint(oldp
, oldlenp
, newp
,
637 (istranslated
!= 0) ? 1 : 0);
639 /* adjust index so we return the right required/consumed amount */
641 req
->oldidx
+= req
->oldlen
;
646 * XXX make CTLFLAG_RW so sysctl_rdint() will EPERM on attempts to write;
647 * XXX this may not be necessary.
649 SYSCTL_PROC(_kern
, KERN_TRANSLATE
, translate
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
650 0, /* Pointer argument (arg1) */
651 0, /* Integer argument (arg2) */
652 sysctl_dotranslate
, /* Handler function */
653 NULL
, /* Data pointer */
657 set_archhandler(__unused proc_t p
, int arch
)
661 struct vnode_attr va
;
662 vfs_context_t ctx
= vfs_context_current();
663 struct exec_archhandler
*archhandler
;
666 case CPU_TYPE_POWERPC
:
667 archhandler
= &exec_archhandler_ppc
;
673 NDINIT(&nd
, LOOKUP
, OP_GETATTR
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
,
674 CAST_USER_ADDR_T(archhandler
->path
), ctx
);
680 /* Check mount point */
681 if ((nd
.ni_vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) ||
682 (nd
.ni_vp
->v_type
!= VREG
)) {
688 VATTR_WANTED(&va
, va_fsid
);
689 VATTR_WANTED(&va
, va_fileid
);
690 error
= vnode_getattr(nd
.ni_vp
, &va
, ctx
);
697 archhandler
->fsid
= va
.va_fsid
;
698 archhandler
->fileid
= va
.va_fileid
;
704 sysctl_handle_exec_archhandler_ppc(struct sysctl_oid
*oidp
, void *arg1
,
705 int arg2
, struct sysctl_req
*req
)
709 if (req
->newptr
&& !kauth_cred_issuser(kauth_cred_get()))
712 error
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
718 error
= set_archhandler(req
->p
, CPU_TYPE_POWERPC
);
726 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
727 __unused
int arg2
, struct sysctl_req
*req
)
730 struct uthread
*ut
= get_bsdthread_info(current_thread());
731 user_addr_t oldp
=0, newp
=0;
732 size_t *oldlenp
=NULL
;
736 oldlenp
= &(req
->oldlen
);
738 newlen
= req
->newlen
;
740 /* We want the current length, and maybe the string itself */
742 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
743 size_t currlen
= MAXTHREADNAMESIZE
- 1;
746 /* use length of current thread name */
747 currlen
= strlen(ut
->pth_name
);
749 if(*oldlenp
< currlen
)
751 /* NOTE - we do not copy the NULL terminator */
753 error
= copyout(ut
->pth_name
,oldp
,currlen
);
758 /* return length of thread name minus NULL terminator (just like strlen) */
759 req
->oldidx
= currlen
;
762 /* We want to set the name to something */
765 if(newlen
> (MAXTHREADNAMESIZE
- 1))
769 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
773 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
774 error
= copyin(newp
, ut
->pth_name
, newlen
);
782 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
784 SYSCTL_NODE(_kern
, KERN_EXEC
, exec
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
786 SYSCTL_NODE(_kern_exec
, OID_AUTO
, archhandler
, CTLFLAG_RD
|CTLFLAG_LOCKED
, 0, "");
788 SYSCTL_PROC(_kern_exec_archhandler
, OID_AUTO
, powerpc
,
789 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
790 exec_archhandler_ppc
.path
,
791 sizeof(exec_archhandler_ppc
.path
),
792 sysctl_handle_exec_archhandler_ppc
, "A", "");
796 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
798 host_basic_info_data_t hinfo
;
802 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
803 struct _processor_statistics_np
*buf
;
806 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
807 if (kret
!= KERN_SUCCESS
) {
811 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
813 if (req
->oldlen
< size
) {
817 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
819 kret
= get_sched_statistics(buf
, &size
);
820 if (kret
!= KERN_SUCCESS
) {
825 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
831 panic("Sched info changed?!");
838 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
841 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
846 if (req
->newlen
!= sizeof(active
)) {
850 res
= copyin(req
->newptr
, &active
, sizeof(active
));
855 return set_sched_stats_active(active
);
858 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
860 extern int get_kernel_symfile(proc_t
, char **);
863 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
866 extern int syscalls_log
[];
867 extern const char *syscallnames
[];
870 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
872 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
873 __unused
int *name
= arg1
; /* oid element argument vector */
874 __unused
int namelen
= arg2
; /* number of oid element arguments */
875 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
876 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
877 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
878 size_t newlen
= req
->newlen
; /* user buffer copy in size */
883 /* valid values passed in:
884 * = 0 means don't keep called counts for each bsd syscall
885 * > 0 means keep called counts for each bsd syscall
886 * = 2 means dump current counts to the system log
887 * = 3 means reset all counts
888 * for example, to dump current counts:
889 * sysctl -w kern.count_calls=2
891 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
897 do_count_syscalls
= 1;
899 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
901 for ( i
= 0; i
< nsysent
; i
++ ) {
902 if ( syscalls_log
[i
] != 0 ) {
904 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
912 do_count_syscalls
= 1;
916 /* adjust index so we return the right required/consumed amount */
918 req
->oldidx
+= req
->oldlen
;
922 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
923 0, /* Pointer argument (arg1) */
924 0, /* Integer argument (arg2) */
925 sysctl_docountsyscalls
, /* Handler function */
926 NULL
, /* Data pointer */
928 #endif /* COUNT_SYSCALLS */
932 * Debugging related system variables.
936 #endif /* DIAGNOSTIC */
937 struct ctldebug debug0
, debug1
;
938 struct ctldebug debug2
, debug3
, debug4
;
939 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
940 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
941 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
942 STATIC
struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
943 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
944 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
945 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
946 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
949 sysctl_dodebug SYSCTL_HANDLER_ARGS
951 int cmd
= oidp
->oid_arg2
; /* subcommand*/
952 int *name
= arg1
; /* oid element argument vector */
953 int namelen
= arg2
; /* number of oid element arguments */
954 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
955 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
956 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
957 size_t newlen
= req
->newlen
; /* user buffer copy in size */
960 struct ctldebug
*cdp
;
962 /* all sysctl names at this level are name and field */
964 return (ENOTSUP
); /* overloaded */
965 if (cmd
< 0 || cmd
>= CTL_DEBUG_MAXID
)
967 cdp
= debugvars
[cmd
];
968 if (cdp
->debugname
== 0)
972 error
= sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
);
974 case CTL_DEBUG_VALUE
:
975 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
);
982 /* adjust index so we return the right required/consumed amount */
984 req
->oldidx
+= req
->oldlen
;
989 * XXX We mark this RW instead of RD to let sysctl_rdstring() return the
990 * XXX historical error.
992 SYSCTL_PROC(_debug
, CTL_DEBUG_NAME
, name
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
993 0, /* Pointer argument (arg1) */
994 CTL_DEBUG_NAME
, /* Integer argument (arg2) */
995 sysctl_dodebug
, /* Handler function */
996 NULL
, /* Data pointer */
998 SYSCTL_PROC(_debug
, CTL_DEBUG_VALUE
, value
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
999 0, /* Pointer argument (arg1) */
1000 CTL_DEBUG_VALUE
, /* Integer argument (arg2) */
1001 sysctl_dodebug
, /* Handler function */
1002 NULL
, /* Data pointer */
1007 * The following sysctl_* functions should not be used
1008 * any more, as they can only cope with callers in
1009 * user mode: Use new-style
1010 * sysctl_io_number()
1011 * sysctl_io_string()
1012 * sysctl_io_opaque()
1017 * Validate parameters and get old / set new parameters
1018 * for an integer-valued sysctl function.
1021 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
1022 user_addr_t newp
, size_t newlen
, int *valp
)
1026 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1028 if (oldp
&& *oldlenp
< sizeof(int))
1030 if (newp
&& newlen
!= sizeof(int))
1032 *oldlenp
= sizeof(int);
1034 error
= copyout(valp
, oldp
, sizeof(int));
1035 if (error
== 0 && newp
) {
1036 error
= copyin(newp
, valp
, sizeof(int));
1037 AUDIT_ARG(value32
, *valp
);
1043 * As above, but read-only.
1046 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
1050 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1052 if (oldp
&& *oldlenp
< sizeof(int))
1056 *oldlenp
= sizeof(int);
1058 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
1063 * Validate parameters and get old / set new parameters
1064 * for an quad(64bit)-valued sysctl function.
1067 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
1068 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
1072 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1074 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1076 if (newp
&& newlen
!= sizeof(quad_t
))
1078 *oldlenp
= sizeof(quad_t
);
1080 error
= copyout(valp
, oldp
, sizeof(quad_t
));
1081 if (error
== 0 && newp
)
1082 error
= copyin(newp
, valp
, sizeof(quad_t
));
1087 * As above, but read-only.
1090 sysctl_rdquad(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, quad_t val
)
1094 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1096 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1100 *oldlenp
= sizeof(quad_t
);
1102 error
= copyout((caddr_t
)&val
, oldp
, sizeof(quad_t
));
1107 * Validate parameters and get old / set new parameters
1108 * for a string-valued sysctl function. Unlike sysctl_string, if you
1109 * give it a too small (but larger than 0 bytes) buffer, instead of
1110 * returning ENOMEM, it truncates the returned string to the buffer
1111 * size. This preserves the semantics of some library routines
1112 * implemented via sysctl, which truncate their returned data, rather
1113 * than simply returning an error. The returned string is always NUL
1117 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
1118 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1120 int len
, copylen
, error
= 0;
1122 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1124 copylen
= len
= strlen(str
) + 1;
1125 if (oldp
&& (len
< 0 || *oldlenp
< 1))
1127 if (oldp
&& (*oldlenp
< (size_t)len
))
1128 copylen
= *oldlenp
+ 1;
1129 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1131 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
1133 error
= copyout(str
, oldp
, copylen
);
1135 unsigned char c
= 0;
1138 error
= copyout((void *)&c
, oldp
, sizeof(char));
1141 if (error
== 0 && newp
) {
1142 error
= copyin(newp
, str
, newlen
);
1144 AUDIT_ARG(text
, (char *)str
);
1150 * Validate parameters and get old / set new parameters
1151 * for a string-valued sysctl function.
1154 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
1155 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1159 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1161 len
= strlen(str
) + 1;
1162 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1164 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1166 *oldlenp
= len
-1; /* deal with NULL strings correctly */
1168 error
= copyout(str
, oldp
, len
);
1170 if (error
== 0 && newp
) {
1171 error
= copyin(newp
, str
, newlen
);
1173 AUDIT_ARG(text
, (char *)str
);
1179 * As above, but read-only.
1182 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
1183 user_addr_t newp
, char *str
)
1187 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1189 len
= strlen(str
) + 1;
1190 if (oldp
&& *oldlenp
< (size_t)len
)
1196 error
= copyout(str
, oldp
, len
);
1201 * Validate parameters and get old / set new parameters
1202 * for a structure oriented sysctl function.
1205 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
1206 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
1210 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1212 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1214 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
1218 error
= copyout(sp
, oldp
, len
);
1220 if (error
== 0 && newp
)
1221 error
= copyin(newp
, sp
, len
);
1226 * Validate parameters and get old parameters
1227 * for a structure oriented sysctl function.
1230 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
1231 user_addr_t newp
, void *sp
, int len
)
1235 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1237 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1243 error
= copyout(sp
, oldp
, len
);
1248 * Get file structures.
1252 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1255 struct fileglob
*fg
;
1256 struct extern_file nef
;
1258 if (req
->oldptr
== USER_ADDR_NULL
) {
1260 * overestimate by 10 files
1262 req
->oldidx
= sizeof(filehead
) + (nfiles
+ 10) * sizeof(struct extern_file
);
1267 * first copyout filehead
1269 error
= SYSCTL_OUT(req
, &filehead
, sizeof(filehead
));
1274 * followed by an array of file structures
1276 for (fg
= filehead
.lh_first
; fg
!= 0; fg
= fg
->f_list
.le_next
) {
1277 nef
.f_list
.le_next
= (struct extern_file
*)fg
->f_list
.le_next
;
1278 nef
.f_list
.le_prev
= (struct extern_file
**)fg
->f_list
.le_prev
;
1279 nef
.f_flag
= (fg
->fg_flag
& FMASK
);
1280 nef
.f_type
= fg
->fg_type
;
1281 nef
.f_count
= fg
->fg_count
;
1282 nef
.f_msgcount
= fg
->fg_msgcount
;
1283 nef
.f_cred
= fg
->fg_cred
;
1284 nef
.f_ops
= fg
->fg_ops
;
1285 nef
.f_offset
= fg
->fg_offset
;
1286 nef
.f_data
= fg
->fg_data
;
1287 error
= SYSCTL_OUT(req
, &nef
, sizeof(nef
));
1294 SYSCTL_PROC(_kern
, KERN_FILE
, file
,
1295 CTLTYPE_STRUCT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1296 0, 0, sysctl_file
, "S,filehead", "");
1299 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
1301 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
1308 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
1310 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
1317 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
1319 boolean_t funnel_state
;
1324 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1325 /* This is very racy but list lock is held.. Hmmm. */
1326 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1327 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
1328 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
1329 tp
->t_dev
!= (dev_t
)*(int*)arg
)
1334 thread_funnel_set(kernel_flock
, funnel_state
);
1340 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
1342 kauth_cred_t my_cred
;
1345 if (p
->p_ucred
== NULL
)
1347 my_cred
= kauth_cred_proc_ref(p
);
1348 uid
= kauth_cred_getuid(my_cred
);
1349 kauth_cred_unref(&my_cred
);
1351 if (uid
!= (uid_t
)*(int*)arg
)
1359 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
1361 kauth_cred_t my_cred
;
1364 if (p
->p_ucred
== NULL
)
1366 my_cred
= kauth_cred_proc_ref(p
);
1367 ruid
= kauth_cred_getruid(my_cred
);
1368 kauth_cred_unref(&my_cred
);
1370 if (ruid
!= (uid_t
)*(int*)arg
)
1378 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
1380 if ((p
->p_lctx
== NULL
) ||
1381 (p
->p_lctx
->lc_id
!= (pid_t
)*(int*)arg
))
1389 * try over estimating by 5 procs
1391 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1392 struct sysdoproc_args
{
1395 boolean_t is_64_bit
;
1407 sysdoproc_callback(proc_t p
, void * arg
)
1409 struct sysdoproc_args
* args
= (struct sysdoproc_args
*)arg
;
1412 if (args
->buflen
>= args
->sizeof_kproc
) {
1413 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
1414 return(PROC_RETURNED
);
1415 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
1416 return(PROC_RETURNED
);
1417 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
1418 return(PROC_RETURNED
);
1420 bzero(args
->kprocp
, args
->sizeof_kproc
);
1421 if (args
->is_64_bit
) {
1422 fill_user64_proc(p
, (struct user64_kinfo_proc
*) args
->kprocp
);
1425 fill_user32_proc(p
, (struct user32_kinfo_proc
*) args
->kprocp
);
1427 error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
1429 *args
->errorp
= error
;
1430 return(PROC_RETURNED_DONE
);
1433 args
->dp
+= args
->sizeof_kproc
;
1434 args
->buflen
-= args
->sizeof_kproc
;
1436 args
->needed
+= args
->sizeof_kproc
;
1437 return(PROC_RETURNED
);
1440 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
1442 sysctl_prochandle SYSCTL_HANDLER_ARGS
1444 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
1445 int *name
= arg1
; /* oid element argument vector */
1446 int namelen
= arg2
; /* number of oid element arguments */
1447 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
1449 user_addr_t dp
= where
;
1451 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
1453 boolean_t is_64_bit
= FALSE
;
1454 struct user32_kinfo_proc user32_kproc
;
1455 struct user64_kinfo_proc user_kproc
;
1458 int (*filterfn
)(proc_t
, void *) = 0;
1459 struct sysdoproc_args args
;
1464 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
1467 is_64_bit
= proc_is64bit(current_proc());
1469 sizeof_kproc
= sizeof(user_kproc
);
1470 kprocp
= (caddr_t
) &user_kproc
;
1473 sizeof_kproc
= sizeof(user32_kproc
);
1474 kprocp
= (caddr_t
) &user32_kproc
;
1481 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
1484 case KERN_PROC_PGRP
:
1485 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
1496 case KERN_PROC_RUID
:
1501 case KERN_PROC_LCID
:
1502 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
1509 /* must be kern.proc.<unknown> */
1514 args
.buflen
= buflen
;
1515 args
.kprocp
= kprocp
;
1516 args
.is_64_bit
= is_64_bit
;
1518 args
.needed
= needed
;
1519 args
.errorp
= &error
;
1520 args
.uidcheck
= uidcheck
;
1521 args
.ruidcheck
= ruidcheck
;
1522 args
.ttycheck
= ttycheck
;
1523 args
.sizeof_kproc
= sizeof_kproc
;
1525 args
.uidval
= name
[0];
1527 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
), sysdoproc_callback
, &args
, filterfn
, name
);
1533 needed
= args
.needed
;
1535 if (where
!= USER_ADDR_NULL
) {
1536 req
->oldlen
= dp
- where
;
1537 if (needed
> req
->oldlen
)
1540 needed
+= KERN_PROCSLOP
;
1541 req
->oldlen
= needed
;
1543 /* adjust index so we return the right required/consumed amount */
1544 req
->oldidx
+= req
->oldlen
;
1548 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
1549 * in the sysctl declaration itself, which comes into the handler function
1550 * as 'oidp->oid_arg2'.
1552 * For these particular sysctls, since they have well known OIDs, we could
1553 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
1554 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
1555 * of a well known value with a common handler function. This is desirable,
1556 * because we want well known values to "go away" at some future date.
1558 * It should be noted that the value of '((int *)arg1)[1]' is used for many
1559 * an integer parameter to the subcommand for many of these sysctls; we'd
1560 * rather have used '((int *)arg1)[0]' for that, or even better, an element
1561 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
1562 * and then use leaf-node permissions enforcement, but that would have
1563 * necessitated modifying user space code to correspond to the interface
1564 * change, and we are striving for binary backward compatibility here; even
1565 * though these are SPI, and not intended for use by user space applications
1566 * which are not themselves system tools or libraries, some applications
1567 * have erroneously used them.
1569 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1570 0, /* Pointer argument (arg1) */
1571 KERN_PROC_ALL
, /* Integer argument (arg2) */
1572 sysctl_prochandle
, /* Handler function */
1573 NULL
, /* Data is size variant on ILP32/LP64 */
1575 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1576 0, /* Pointer argument (arg1) */
1577 KERN_PROC_PID
, /* Integer argument (arg2) */
1578 sysctl_prochandle
, /* Handler function */
1579 NULL
, /* Data is size variant on ILP32/LP64 */
1581 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1582 0, /* Pointer argument (arg1) */
1583 KERN_PROC_TTY
, /* Integer argument (arg2) */
1584 sysctl_prochandle
, /* Handler function */
1585 NULL
, /* Data is size variant on ILP32/LP64 */
1587 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1588 0, /* Pointer argument (arg1) */
1589 KERN_PROC_PGRP
, /* Integer argument (arg2) */
1590 sysctl_prochandle
, /* Handler function */
1591 NULL
, /* Data is size variant on ILP32/LP64 */
1593 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1594 0, /* Pointer argument (arg1) */
1595 KERN_PROC_UID
, /* Integer argument (arg2) */
1596 sysctl_prochandle
, /* Handler function */
1597 NULL
, /* Data is size variant on ILP32/LP64 */
1599 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1600 0, /* Pointer argument (arg1) */
1601 KERN_PROC_RUID
, /* Integer argument (arg2) */
1602 sysctl_prochandle
, /* Handler function */
1603 NULL
, /* Data is size variant on ILP32/LP64 */
1605 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1606 0, /* Pointer argument (arg1) */
1607 KERN_PROC_LCID
, /* Integer argument (arg2) */
1608 sysctl_prochandle
, /* Handler function */
1609 NULL
, /* Data is size variant on ILP32/LP64 */
1614 * Fill in an eproc structure for the specified process.
1617 fill_user32_eproc(proc_t p
, struct user32_eproc
*ep
)
1620 kauth_cred_t my_cred
;
1622 struct session
* sessp
;
1625 sessp
= proc_session(p
);
1627 ep
->e_paddr
= CAST_DOWN_EXPLICIT(uint32_t,p
);
1629 if (pg
!= PGRP_NULL
) {
1630 ep
->e_sess
= CAST_DOWN_EXPLICIT(uint32_t,sessp
);
1631 ep
->e_pgid
= p
->p_pgrpid
;
1632 ep
->e_jobc
= pg
->pg_jobc
;
1633 if ((sessp
!= SESSION_NULL
) && sessp
->s_ttyvp
)
1634 ep
->e_flag
= EPROC_CTTY
;
1642 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1647 ep
->e_ppid
= p
->p_ppid
;
1648 /* Pre-zero the fake historical pcred */
1649 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1651 my_cred
= kauth_cred_proc_ref(p
);
1653 /* A fake historical pcred */
1654 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1655 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1656 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1657 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1658 /* A fake historical *kauth_cred_t */
1659 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1660 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1661 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1662 bcopy(posix_cred_get(my_cred
)->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1664 kauth_cred_unref(&my_cred
);
1666 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1667 ep
->e_vm
.vm_tsize
= 0;
1668 ep
->e_vm
.vm_dsize
= 0;
1669 ep
->e_vm
.vm_ssize
= 0;
1671 ep
->e_vm
.vm_rssize
= 0;
1673 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1674 (tp
= SESSION_TP(sessp
))) {
1675 ep
->e_tdev
= tp
->t_dev
;
1676 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1677 ep
->e_tsess
= CAST_DOWN_EXPLICIT(uint32_t,tp
->t_session
);
1681 if (SESS_LEADER(p
, sessp
))
1682 ep
->e_flag
|= EPROC_SLEADER
;
1683 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1684 ep
->e_xsize
= ep
->e_xrssize
= 0;
1685 ep
->e_xccount
= ep
->e_xswrss
= 0;
1686 if (sessp
!= SESSION_NULL
)
1687 session_rele(sessp
);
1693 * Fill in an LP64 version of eproc structure for the specified process.
1696 fill_user64_eproc(proc_t p
, struct user64_eproc
*ep
)
1699 struct session
*sessp
= NULL
;
1701 kauth_cred_t my_cred
;
1704 sessp
= proc_session(p
);
1706 ep
->e_paddr
= CAST_USER_ADDR_T(p
);
1707 if (pg
!= PGRP_NULL
) {
1708 ep
->e_sess
= CAST_USER_ADDR_T(sessp
);
1709 ep
->e_pgid
= p
->p_pgrpid
;
1710 ep
->e_jobc
= pg
->pg_jobc
;
1711 if (sessp
!= SESSION_NULL
) {
1713 ep
->e_flag
= EPROC_CTTY
;
1716 ep
->e_sess
= USER_ADDR_NULL
;
1722 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1727 ep
->e_ppid
= p
->p_ppid
;
1728 /* Pre-zero the fake historical pcred */
1729 bzero(&ep
->e_pcred
, sizeof(ep
->e_pcred
));
1731 my_cred
= kauth_cred_proc_ref(p
);
1733 /* A fake historical pcred */
1734 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1735 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1736 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1737 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1739 /* A fake historical *kauth_cred_t */
1740 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1741 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1742 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1743 bcopy(posix_cred_get(my_cred
)->cr_groups
, ep
->e_ucred
.cr_groups
, NGROUPS
*sizeof(gid_t
));
1745 kauth_cred_unref(&my_cred
);
1747 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
) {
1748 ep
->e_vm
.vm_tsize
= 0;
1749 ep
->e_vm
.vm_dsize
= 0;
1750 ep
->e_vm
.vm_ssize
= 0;
1752 ep
->e_vm
.vm_rssize
= 0;
1754 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1755 (tp
= SESSION_TP(sessp
))) {
1756 ep
->e_tdev
= tp
->t_dev
;
1757 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1758 ep
->e_tsess
= CAST_USER_ADDR_T(tp
->t_session
);
1762 if (SESS_LEADER(p
, sessp
))
1763 ep
->e_flag
|= EPROC_SLEADER
;
1764 bzero(&ep
->e_wmesg
[0], WMESGLEN
+1);
1765 ep
->e_xsize
= ep
->e_xrssize
= 0;
1766 ep
->e_xccount
= ep
->e_xswrss
= 0;
1767 if (sessp
!= SESSION_NULL
)
1768 session_rele(sessp
);
1769 if (pg
!= PGRP_NULL
)
1774 * Fill in an eproc structure for the specified process.
1777 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*exp
)
1779 exp
->p_forw
= exp
->p_back
= 0;
1780 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1781 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1783 exp
->p_sigacts
= CAST_DOWN_EXPLICIT(uint32_t,p
->p_sigacts
);
1784 exp
->p_flag
= p
->p_flag
;
1785 if (p
->p_lflag
& P_LTRACED
)
1786 exp
->p_flag
|= P_TRACED
;
1787 if (p
->p_lflag
& P_LPPWAIT
)
1788 exp
->p_flag
|= P_PPWAIT
;
1789 if (p
->p_lflag
& P_LEXIT
)
1790 exp
->p_flag
|= P_WEXIT
;
1791 exp
->p_stat
= p
->p_stat
;
1792 exp
->p_pid
= p
->p_pid
;
1793 exp
->p_oppid
= p
->p_oppid
;
1795 exp
->user_stack
= p
->user_stack
;
1796 exp
->exit_thread
= CAST_DOWN_EXPLICIT(uint32_t,p
->exit_thread
);
1797 exp
->p_debugger
= p
->p_debugger
;
1798 exp
->sigwait
= p
->sigwait
;
1800 #ifdef _PROC_HAS_SCHEDINFO_
1801 exp
->p_estcpu
= p
->p_estcpu
;
1802 exp
->p_pctcpu
= p
->p_pctcpu
;
1803 exp
->p_slptime
= p
->p_slptime
;
1807 exp
->p_slptime
= 0 ;
1809 exp
->p_cpticks
= 0 ;
1813 bcopy(&p
->p_realtimer
, &exp
->p_realtimer
,sizeof(struct itimerval
));
1814 bcopy(&p
->p_rtime
, &exp
->p_rtime
,sizeof(struct timeval
));
1818 exp
->p_traceflag
= 0;
1820 exp
->p_siglist
= 0 ; /* No longer relevant */
1821 exp
->p_textvp
= CAST_DOWN_EXPLICIT(uint32_t,p
->p_textvp
) ;
1822 exp
->p_holdcnt
= 0 ;
1823 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1824 exp
->p_sigignore
= p
->p_sigignore
;
1825 exp
->p_sigcatch
= p
->p_sigcatch
;
1826 exp
->p_priority
= p
->p_priority
;
1828 exp
->p_nice
= p
->p_nice
;
1829 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1830 exp
->p_comm
[MAXCOMLEN
] = '\0';
1831 exp
->p_pgrp
= CAST_DOWN_EXPLICIT(uint32_t,p
->p_pgrp
) ;
1833 exp
->p_xstat
= p
->p_xstat
;
1834 exp
->p_acflag
= p
->p_acflag
;
1835 exp
->p_ru
= CAST_DOWN_EXPLICIT(uint32_t,p
->p_ru
) ; /* XXX may be NULL */
1839 * Fill in an LP64 version of extern_proc structure for the specified process.
1842 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*exp
)
1844 exp
->p_forw
= exp
->p_back
= USER_ADDR_NULL
;
1845 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1846 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1847 exp
->p_vmspace
= USER_ADDR_NULL
;
1848 exp
->p_sigacts
= CAST_USER_ADDR_T(p
->p_sigacts
);
1849 exp
->p_flag
= p
->p_flag
;
1850 if (p
->p_lflag
& P_LTRACED
)
1851 exp
->p_flag
|= P_TRACED
;
1852 if (p
->p_lflag
& P_LPPWAIT
)
1853 exp
->p_flag
|= P_PPWAIT
;
1854 if (p
->p_lflag
& P_LEXIT
)
1855 exp
->p_flag
|= P_WEXIT
;
1856 exp
->p_stat
= p
->p_stat
;
1857 exp
->p_pid
= p
->p_pid
;
1858 exp
->p_oppid
= p
->p_oppid
;
1860 exp
->user_stack
= p
->user_stack
;
1861 exp
->exit_thread
= CAST_USER_ADDR_T(p
->exit_thread
);
1862 exp
->p_debugger
= p
->p_debugger
;
1863 exp
->sigwait
= p
->sigwait
;
1865 #ifdef _PROC_HAS_SCHEDINFO_
1866 exp
->p_estcpu
= p
->p_estcpu
;
1867 exp
->p_pctcpu
= p
->p_pctcpu
;
1868 exp
->p_slptime
= p
->p_slptime
;
1872 exp
->p_slptime
= 0 ;
1874 exp
->p_cpticks
= 0 ;
1878 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1879 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1880 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1881 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1882 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1883 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1887 exp
->p_traceflag
= 0 ;
1889 exp
->p_siglist
= 0 ; /* No longer relevant */
1890 exp
->p_textvp
= CAST_USER_ADDR_T(p
->p_textvp
);
1891 exp
->p_holdcnt
= 0 ;
1892 exp
->p_sigmask
= 0 ; /* no longer avaialable */
1893 exp
->p_sigignore
= p
->p_sigignore
;
1894 exp
->p_sigcatch
= p
->p_sigcatch
;
1895 exp
->p_priority
= p
->p_priority
;
1897 exp
->p_nice
= p
->p_nice
;
1898 bcopy(&p
->p_comm
, &exp
->p_comm
,MAXCOMLEN
);
1899 exp
->p_comm
[MAXCOMLEN
] = '\0';
1900 exp
->p_pgrp
= CAST_USER_ADDR_T(p
->p_pgrp
);
1901 exp
->p_addr
= USER_ADDR_NULL
;
1902 exp
->p_xstat
= p
->p_xstat
;
1903 exp
->p_acflag
= p
->p_acflag
;
1904 exp
->p_ru
= CAST_USER_ADDR_T(p
->p_ru
); /* XXX may be NULL */
1908 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*kp
)
1910 /* on a 64 bit kernel, 32 bit users will get some truncated information */
1911 fill_user32_externproc(p
, &kp
->kp_proc
);
1912 fill_user32_eproc(p
, &kp
->kp_eproc
);
1916 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*kp
)
1918 fill_user64_externproc(p
, &kp
->kp_proc
);
1919 fill_user64_eproc(p
, &kp
->kp_eproc
);
1923 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1925 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1926 int *name
= arg1
; /* oid element argument vector */
1927 int namelen
= arg2
; /* number of oid element arguments */
1928 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1929 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1930 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1931 // size_t newlen = req->newlen; /* user buffer copy in size */
1933 proc_t p
= current_proc();
1939 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1953 case KERN_KDWRITETR
:
1954 case KERN_KDWRITEMAP
:
1958 case KERN_KDSETRTCDEC
:
1960 case KERN_KDGETENTROPY
:
1961 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1968 /* adjust index so we return the right required/consumed amount */
1970 req
->oldidx
+= req
->oldlen
;
1974 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1975 0, /* Pointer argument (arg1) */
1976 0, /* Integer argument (arg2) */
1977 sysctl_kdebug_ops
, /* Handler function */
1978 NULL
, /* Data pointer */
1982 #if !CONFIG_EMBEDDED
1984 * Return the top *sizep bytes of the user stack, or the entire area of the
1985 * user stack down through the saved exec_path, whichever is smaller.
1988 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1990 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1991 int *name
= arg1
; /* oid element argument vector */
1992 int namelen
= arg2
; /* number of oid element arguments */
1993 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1994 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1995 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1996 // size_t newlen = req->newlen; /* user buffer copy in size */
1999 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
2001 /* adjust index so we return the right required/consumed amount */
2003 req
->oldidx
+= req
->oldlen
;
2007 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
2008 0, /* Pointer argument (arg1) */
2009 0, /* Integer argument (arg2) */
2010 sysctl_doprocargs
, /* Handler function */
2011 NULL
, /* Data pointer */
2013 #endif /* !CONFIG_EMBEDDED */
2016 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
2018 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
2019 int *name
= arg1
; /* oid element argument vector */
2020 int namelen
= arg2
; /* number of oid element arguments */
2021 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
2022 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
2023 // user_addr_t newp = req->newptr; /* user buffer copy in address */
2024 // size_t newlen = req->newlen; /* user buffer copy in size */
2027 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
2029 /* adjust index so we return the right required/consumed amount */
2031 req
->oldidx
+= req
->oldlen
;
2035 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
2036 0, /* Pointer argument (arg1) */
2037 0, /* Integer argument (arg2) */
2038 sysctl_doprocargs2
, /* Handler function */
2039 NULL
, /* Data pointer */
2043 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
2044 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
2047 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
2049 struct _vm_map
*proc_map
;
2052 user_addr_t arg_addr
;
2057 vm_offset_t copy_start
, copy_end
;
2060 kauth_cred_t my_cred
;
2067 buflen
-= sizeof(int); /* reserve first word to return argc */
2069 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
2070 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
2071 /* is not NULL then the caller wants us to return the length needed to */
2072 /* hold the data we would return */
2073 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
2079 * Lookup process by pid
2088 * Copy the top N bytes of the stack.
2089 * On all machines we have so far, the stack grows
2092 * If the user expects no more than N bytes of
2093 * argument list, use that as a guess for the
2097 if (!p
->user_stack
) {
2102 if (where
== USER_ADDR_NULL
) {
2103 /* caller only wants to know length of proc args data */
2104 if (sizep
== NULL
) {
2109 size
= p
->p_argslen
;
2112 size
+= sizeof(int);
2116 * old PROCARGS will return the executable's path and plus some
2117 * extra space for work alignment and data tags
2119 size
+= PATH_MAX
+ (6 * sizeof(int));
2121 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
2126 my_cred
= kauth_cred_proc_ref(p
);
2127 uid
= kauth_cred_getuid(my_cred
);
2128 kauth_cred_unref(&my_cred
);
2130 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
2131 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
2136 if ((u_int
)arg_size
> p
->p_argslen
)
2137 arg_size
= round_page(p
->p_argslen
);
2139 arg_addr
= p
->user_stack
- arg_size
;
2143 * Before we can block (any VM code), make another
2144 * reference to the map to keep it alive. We do
2145 * that by getting a reference on the task itself.
2153 argslen
= p
->p_argslen
;
2155 * Once we have a task reference we can convert that into a
2156 * map reference, which we will use in the calls below. The
2157 * task/process may change its map after we take this reference
2158 * (see execve), but the worst that will happen then is a return
2159 * of stale info (which is always a possibility).
2161 task_reference(task
);
2163 proc_map
= get_task_map_reference(task
);
2164 task_deallocate(task
);
2166 if (proc_map
== NULL
)
2170 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
2171 if (ret
!= KERN_SUCCESS
) {
2172 vm_map_deallocate(proc_map
);
2176 copy_end
= round_page(copy_start
+ arg_size
);
2178 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
2179 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
2180 vm_map_deallocate(proc_map
);
2181 kmem_free(kernel_map
, copy_start
,
2182 round_page(arg_size
));
2187 * Now that we've done the copyin from the process'
2188 * map, we can release the reference to it.
2190 vm_map_deallocate(proc_map
);
2192 if( vm_map_copy_overwrite(kernel_map
,
2193 (vm_map_address_t
)copy_start
,
2194 tmp
, FALSE
) != KERN_SUCCESS
) {
2195 kmem_free(kernel_map
, copy_start
,
2196 round_page(arg_size
));
2200 if (arg_size
> argslen
) {
2201 data
= (caddr_t
) (copy_end
- argslen
);
2204 data
= (caddr_t
) (copy_end
- arg_size
);
2209 /* Put processes argc as the first word in the copyout buffer */
2210 suword(where
, p
->p_argc
);
2211 error
= copyout(data
, (where
+ sizeof(int)), size
);
2212 size
+= sizeof(int);
2214 error
= copyout(data
, where
, size
);
2217 * Make the old PROCARGS work to return the executable's path
2218 * But, only if there is enough space in the provided buffer
2220 * on entry: data [possibily] points to the beginning of the path
2222 * Note: we keep all pointers&sizes aligned to word boundries
2224 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
2226 int binPath_sz
, alignedBinPath_sz
= 0;
2227 int extraSpaceNeeded
, addThis
;
2228 user_addr_t placeHere
;
2229 char * str
= (char *) data
;
2232 /* Some apps are really bad about messing up their stacks
2233 So, we have to be extra careful about getting the length
2234 of the executing binary. If we encounter an error, we bail.
2237 /* Limit ourselves to PATH_MAX paths */
2238 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
2242 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
2245 /* If we have a NUL terminator, copy it, too */
2246 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
2248 /* Pre-Flight the space requiremnts */
2250 /* Account for the padding that fills out binPath to the next word */
2251 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
2253 placeHere
= where
+ size
;
2255 /* Account for the bytes needed to keep placeHere word aligned */
2256 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
2258 /* Add up all the space that is needed */
2259 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
2261 /* is there is room to tack on argv[0]? */
2262 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
2264 placeHere
+= addThis
;
2265 suword(placeHere
, 0);
2266 placeHere
+= sizeof(int);
2267 suword(placeHere
, 0xBFFF0000);
2268 placeHere
+= sizeof(int);
2269 suword(placeHere
, 0);
2270 placeHere
+= sizeof(int);
2271 error
= copyout(data
, placeHere
, binPath_sz
);
2274 placeHere
+= binPath_sz
;
2275 suword(placeHere
, 0);
2276 size
+= extraSpaceNeeded
;
2282 if (copy_start
!= (vm_offset_t
) 0) {
2283 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
2289 if (where
!= USER_ADDR_NULL
)
2296 * Max number of concurrent aio requests
2300 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2302 int new_value
, changed
;
2303 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
2305 /* make sure the system-wide limit is greater than the per process limit */
2306 if (new_value
>= aio_max_requests_per_process
)
2307 aio_max_requests
= new_value
;
2316 * Max number of concurrent aio requests per process
2320 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2322 int new_value
, changed
;
2323 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
2325 /* make sure per process limit is less than the system-wide limit */
2326 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
2327 aio_max_requests_per_process
= new_value
;
2336 * Max number of async IO worker threads
2340 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2342 int new_value
, changed
;
2343 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
2345 /* we only allow an increase in the number of worker threads */
2346 if (new_value
> aio_worker_threads
) {
2347 _aio_create_worker_threads((new_value
- aio_worker_threads
));
2348 aio_worker_threads
= new_value
;
2358 * System-wide limit on the max number of processes
2362 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2364 int new_value
, changed
;
2365 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
2367 AUDIT_ARG(value32
, new_value
);
2368 /* make sure the system-wide limit is less than the configured hard
2369 limit set at kernel compilation */
2370 if (new_value
<= hard_maxproc
&& new_value
> 0)
2371 maxproc
= new_value
;
2378 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
2379 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2381 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
2382 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2384 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
2385 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2386 (int *)NULL
, BSD
, "");
2387 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
2388 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2390 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
2391 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2392 &kernel_uuid
[0], 0, "");
2395 int debug_kprint_syscall
= 0;
2396 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
2398 /* Thread safe: bits and string value are not used to reclaim state */
2399 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
2400 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
2401 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
2402 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
2403 "name of process for kprintf syscall tracing");
2405 int debug_kprint_current_process(const char **namep
)
2407 struct proc
*p
= current_proc();
2413 if (debug_kprint_syscall_process
[0]) {
2414 /* user asked to scope tracing to a particular process name */
2415 if(0 == strncmp(debug_kprint_syscall_process
,
2416 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
2417 /* no value in telling the user that we traced what they asked */
2418 if(namep
) *namep
= NULL
;
2426 /* trace all processes. Tell user what we traced */
2435 /* PR-5293665: need to use a callback function for kern.osversion to set
2436 * osversion in IORegistry */
2439 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
2443 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
2446 IORegistrySetOSBuildVersion((char *)arg1
);
2452 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
2453 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
2454 osversion
, 256 /* OSVERSIZE*/,
2455 sysctl_osversion
, "A", "");
2458 sysctl_sysctl_bootargs
2459 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2464 strlcpy(buf
, PE_boot_args(), 256);
2465 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
2469 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
2470 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2472 sysctl_sysctl_bootargs
, "A", "bootargs");
2474 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
2475 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2477 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
2478 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2479 (int *)NULL
, ARG_MAX
, "");
2480 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
2481 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2482 (int *)NULL
, _POSIX_VERSION
, "");
2483 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
2484 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2485 (int *)NULL
, NGROUPS_MAX
, "");
2486 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
2487 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2488 (int *)NULL
, 1, "");
2489 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2490 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2491 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2492 (int *)NULL
, 1, "");
2494 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2495 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2498 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
2499 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2501 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
2502 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2504 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
2505 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2507 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
2508 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2509 &thread_max
, 0, "");
2510 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
2511 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2512 &task_threadmax
, 0, "");
2515 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2517 int oldval
= desiredvnodes
;
2518 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2520 if (oldval
!= desiredvnodes
) {
2521 reset_vmobjectcache(oldval
, desiredvnodes
);
2522 resize_namecache(desiredvnodes
);
2528 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
2529 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2530 &nc_disabled
, 0, "");
2532 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2533 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2534 0, 0, sysctl_maxvnodes
, "I", "");
2536 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2537 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2538 0, 0, sysctl_maxproc
, "I", "");
2540 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2541 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2542 0, 0, sysctl_aiomax
, "I", "");
2544 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2545 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2546 0, 0, sysctl_aioprocmax
, "I", "");
2548 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2549 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2550 0, 0, sysctl_aiothreads
, "I", "");
2554 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2556 int new_value
, changed
;
2557 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2559 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2561 securelevel
= new_value
;
2570 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2571 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2572 0, 0, sysctl_securelvl
, "I", "");
2577 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2580 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2582 domainnamelen
= strlen(domainname
);
2587 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2588 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2589 0, 0, sysctl_domainname
, "A", "");
2591 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
2592 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2597 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2600 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2602 hostnamelen
= req
->newlen
;
2608 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2609 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2610 0, 0, sysctl_hostname
, "A", "");
2614 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2616 /* Original code allowed writing, I'm copying this, although this all makes
2617 no sense to me. Besides, this sysctl is never used. */
2618 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2621 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2622 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2623 0, 0, sysctl_procname
, "A", "");
2625 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2626 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2627 &speculative_reads_disabled
, 0, "");
2629 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
2630 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2631 &ignore_is_ssd
, 0, "");
2633 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_max
,
2634 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2635 &preheat_pages_max
, 0, "");
2637 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_min
,
2638 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2639 &preheat_pages_min
, 0, "");
2641 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
2642 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2643 &speculative_prefetch_max
, 0, "");
2645 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
2646 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2647 &vm_page_free_target
, 0, "");
2649 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
2650 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2651 &vm_page_free_min
, 0, "");
2653 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
2654 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2655 &vm_page_free_reserved
, 0, "");
2657 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
2658 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2659 &vm_page_speculative_percentage
, 0, "");
2661 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
2662 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2663 &vm_page_speculative_q_age_ms
, 0, "");
2665 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
2666 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2667 &vm_max_delayed_work_limit
, 0, "");
2669 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2670 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2671 &vm_max_batch
, 0, "");
2676 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2678 time_t tv_sec
= boottime_sec();
2679 struct proc
*p
= req
->p
;
2681 if (proc_is64bit(p
)) {
2682 struct user64_timeval t
;
2685 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2687 struct user32_timeval t
;
2690 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2694 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2695 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2696 0, 0, sysctl_boottime
, "S,timeval", "");
2700 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2703 int error
= get_kernel_symfile(req
->p
, &str
);
2706 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2710 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2711 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2712 0, 0, sysctl_symfile
, "A", "");
2717 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2719 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2722 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2723 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2724 0, 0, sysctl_netboot
, "I", "");
2727 #ifdef CONFIG_IMGSRC_ACCESS
2729 * Legacy--act as if only one layer of nesting is possible.
2733 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2735 vfs_context_t ctx
= vfs_context_current();
2739 if (!vfs_context_issuser(ctx
)) {
2743 if (imgsrc_rootvnodes
[0] == NULL
) {
2747 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2752 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2753 result
= vnode_getwithref(devvp
);
2758 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2762 vnode_put(imgsrc_rootvnodes
[0]);
2766 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2767 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2768 0, 0, sysctl_imgsrcdev
, "I", "");
2772 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2775 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2779 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2783 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2785 * Go get the root vnode.
2787 rvp
= imgsrc_rootvnodes
[i
];
2788 if (rvp
== NULLVP
) {
2792 error
= vnode_get(rvp
);
2798 * For now, no getting at a non-local volume.
2800 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2801 if (devvp
== NULL
) {
2806 error
= vnode_getwithref(devvp
);
2815 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2816 info
[i
].ii_flags
= 0;
2817 info
[i
].ii_height
= i
;
2818 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2824 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2827 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2828 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2829 0, 0, sysctl_imgsrcinfo
, "I", "");
2831 #endif /* CONFIG_IMGSRC_ACCESS */
2833 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalescing_enabled
,
2834 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2835 &mach_timer_coalescing_enabled
, 0, "");
2839 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2841 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2844 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2845 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2846 0, 0, sysctl_usrstack
, "I", "");
2850 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2852 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2855 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2856 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2857 0, 0, sysctl_usrstack64
, "Q", "");
2859 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2860 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2861 corefilename
, sizeof(corefilename
), "");
2865 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2867 #ifdef SECURE_KERNEL
2870 int new_value
, changed
;
2871 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2873 if ((new_value
== 0) || (new_value
== 1))
2874 do_coredump
= new_value
;
2881 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2882 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2883 0, 0, sysctl_coredump
, "I", "");
2886 sysctl_suid_coredump
2887 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2889 #ifdef SECURE_KERNEL
2892 int new_value
, changed
;
2893 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2895 if ((new_value
== 0) || (new_value
== 1))
2896 sugid_coredump
= new_value
;
2903 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2904 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2905 0, 0, sysctl_suid_coredump
, "I", "");
2909 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2911 struct proc
*p
= req
->p
;
2912 int new_value
, changed
;
2913 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2917 req
->p
->p_lflag
|= P_LDELAYTERM
;
2919 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2925 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2926 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2927 0, 0, sysctl_delayterm
, "I", "");
2932 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2934 struct proc
*p
= req
->p
;
2936 int new_value
, old_value
, changed
;
2939 ut
= get_bsdthread_info(current_thread());
2941 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2942 old_value
= KERN_RAGE_THREAD
;
2943 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2944 old_value
= KERN_RAGE_PROC
;
2948 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2951 switch (new_value
) {
2952 case KERN_RAGE_PROC
:
2954 p
->p_lflag
|= P_LRAGE_VNODES
;
2957 case KERN_UNRAGE_PROC
:
2959 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2963 case KERN_RAGE_THREAD
:
2964 ut
->uu_flag
|= UT_RAGE_VNODES
;
2966 case KERN_UNRAGE_THREAD
:
2967 ut
= get_bsdthread_info(current_thread());
2968 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2975 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2976 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2977 0, 0, sysctl_rage_vnode
, "I", "");
2981 sysctl_kern_check_openevt
2982 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2984 struct proc
*p
= req
->p
;
2985 int new_value
, old_value
, changed
;
2988 if (p
->p_flag
& P_CHECKOPENEVT
) {
2989 old_value
= KERN_OPENEVT_PROC
;
2994 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2997 switch (new_value
) {
2998 case KERN_OPENEVT_PROC
:
2999 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
3002 case KERN_UNOPENEVT_PROC
:
3003 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
3013 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
3014 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
3020 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3022 #ifdef SECURE_KERNEL
3025 int new_value
, changed
;
3028 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
3033 #if defined(__i386__) || defined(__x86_64__)
3035 * Only allow setting if NX is supported on the chip
3037 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
3040 nx_enabled
= new_value
;
3047 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
3048 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3049 0, 0, sysctl_nx
, "I", "");
3053 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3055 if (proc_is64bit(req
->p
)) {
3056 struct user64_loadavg loadinfo64
;
3057 fill_loadavg64(&averunnable
, &loadinfo64
);
3058 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
3060 struct user32_loadavg loadinfo32
;
3061 fill_loadavg32(&averunnable
, &loadinfo32
);
3062 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
3066 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
3067 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3068 0, 0, sysctl_loadavg
, "S,loadavg", "");
3071 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
3074 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
3075 __unused
int arg2
, struct sysctl_req
*req
)
3077 int old_value
=0, new_value
=0, error
=0;
3079 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
3081 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
3083 return (vm_toggle_entry_reuse(new_value
, NULL
));
3088 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
3092 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3095 uint64_t swap_total
;
3096 uint64_t swap_avail
;
3097 vm_size_t swap_pagesize
;
3098 boolean_t swap_encrypted
;
3099 struct xsw_usage xsu
;
3101 error
= macx_swapinfo(&swap_total
,
3108 xsu
.xsu_total
= swap_total
;
3109 xsu
.xsu_avail
= swap_avail
;
3110 xsu
.xsu_used
= swap_total
- swap_avail
;
3111 xsu
.xsu_pagesize
= swap_pagesize
;
3112 xsu
.xsu_encrypted
= swap_encrypted
;
3113 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
3118 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
3119 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3120 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
3123 /* <rdar://problem/7688080> */
3124 boolean_t vm_freeze_enabled
= FALSE
;
3125 #endif /* CONFIG_EMBEDDED */
3129 extern void vm_page_reactivate_all_throttled(void);
3132 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
3134 #pragma unused(arg1, arg2)
3135 int error
, val
= vm_freeze_enabled
? 1 : 0;
3138 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
3139 if (error
|| !req
->newptr
)
3143 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
3145 disabled
= (!val
&& vm_freeze_enabled
);
3147 vm_freeze_enabled
= val
? TRUE
: FALSE
;
3150 vm_page_reactivate_all_throttled();
3156 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &vm_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
3157 #endif /* CONFIG_FREEZE */
3159 /* this kernel does NOT implement shared_region_make_private_np() */
3160 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
3161 CTLFLAG_RD
| CTLFLAG_LOCKED
,
3162 (int *)NULL
, 0, "");
3164 #if defined(__i386__) || defined(__x86_64__)
3166 sysctl_sysctl_exec_affinity(__unused
struct sysctl_oid
*oidp
,
3167 __unused
void *arg1
, __unused
int arg2
,
3168 struct sysctl_req
*req
)
3170 proc_t cur_proc
= req
->p
;
3173 if (req
->oldptr
!= USER_ADDR_NULL
) {
3174 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
3175 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
3179 if (req
->newptr
!= USER_ADDR_NULL
) {
3180 cpu_type_t newcputype
;
3181 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
3183 if (newcputype
== CPU_TYPE_I386
)
3184 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), &cur_proc
->p_flag
);
3185 else if (newcputype
== CPU_TYPE_POWERPC
)
3186 OSBitOrAtomic(P_AFFINITY
, &cur_proc
->p_flag
);
3193 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
3197 fetch_process_cputype(
3201 cpu_type_t
*cputype
)
3203 proc_t p
= PROC_NULL
;
3210 else if (namelen
== 1) {
3211 p
= proc_find(name
[0]);
3220 #if defined(__i386__) || defined(__x86_64__)
3221 if (p
->p_flag
& P_TRANSLATED
) {
3222 ret
= CPU_TYPE_POWERPC
;
3228 if (IS_64BIT_PROCESS(p
))
3229 ret
|= CPU_ARCH_ABI64
;
3240 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3241 struct sysctl_req
*req
)
3244 cpu_type_t proc_cputype
= 0;
3245 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
3248 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
3250 return SYSCTL_OUT(req
, &res
, sizeof(res
));
3252 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
3255 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3256 struct sysctl_req
*req
)
3259 cpu_type_t proc_cputype
= 0;
3260 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
3262 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
3264 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
3268 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3270 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
3273 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
3274 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3275 0, 0, sysctl_safeboot
, "I", "");
3279 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3281 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
3284 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
3285 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3286 0, 0, sysctl_singleuser
, "I", "");
3289 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3291 extern boolean_t affinity_sets_enabled
;
3292 extern int affinity_sets_mapping
;
3294 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
3295 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
3296 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
3297 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
3300 * Limit on total memory users can wire.
3302 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3304 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3306 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3309 * All values are in bytes.
3312 vm_map_size_t vm_global_no_user_wire_amount
;
3313 vm_map_size_t vm_global_user_wire_limit
;
3314 vm_map_size_t vm_user_wire_limit
;
3317 * There needs to be a more automatic/elegant way to do this
3320 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
3321 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
3322 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
3327 * enable back trace events for thread blocks
3330 extern uint32_t kdebug_thread_block
;
3332 SYSCTL_INT (_kern
, OID_AUTO
, kdebug_thread_block
,
3333 CTLFLAG_RW
| CTLFLAG_LOCKED
, &kdebug_thread_block
, 0, "kdebug thread_block");
3336 * Kernel stack size and depth
3338 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
3339 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3340 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
3341 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3344 * enable back trace for port allocations
3346 extern int ipc_portbt
;
3348 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3349 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3350 &ipc_portbt
, 0, "");
3357 * See osfmk/kern/sched_prim.c for the corresponding definition
3358 * in osfmk/. If either version changes, update the other.
3360 #define SCHED_STRING_MAX_LENGTH (48)
3362 extern char sched_string
[SCHED_STRING_MAX_LENGTH
];
3363 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3364 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3365 sched_string
, sizeof(sched_string
),
3366 "Timeshare scheduler implementation");